blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
526a32a1fa1538ba1fef84eb6c724026bc5b3ba7 | Shell | daschaich/misc_scripts | /update | UTF-8 | 1,670 | 3.703125 | 4 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | #!/bin/bash
# ------------------------------------------------------------------
if [ $# != 1 ]; then
echo "Usage: $0 <tag>"
exit 1
fi
tag=$1
temp=`pwd`
dir=${temp##*Run} # Strip everything up to and including "Run"
path=/nfs/beowulf03/beowulf02/anna2/KSNHYP/FA-ks481216/Run$dir
# ------------------------------------------------------------------
# ------------------------------------------------------------------
# Construct list of files in given ensemble,
# including "Out/Thermalization" directory if it exists
rm -f $tag.txt MISSING.$tag ERRORS.$tag
if [ -e $path/Out/Thermalization/out_$tag.1 ] ; then
for i in $path/Out/out_$tag.* $path/Out/Thermalization/out_$tag.* ; do
echo ${i##*.} >> temp.$tag # Strip everything up to and including the last "."
done
else
for i in $path/Out/out_$tag.* ; do
echo ${i##*.} >> temp.$tag # Strip everything up to and including the last "."
done
fi
sort temp.$tag -n | uniq > $tag.txt
rm -f temp.$tag
# Use list of files in given ensemble to extract time series data
rm -f data/*.$tag.csv
echo "parse_files.pl $dir $tag"
/nfs/beowulf03/beowulf02/anna2/KSNHYP/FA-ks481216/scripts/parse_files.pl $dir $tag
chmod 664 data/*.$tag.csv
rm -f $tag.txt # Done with this
# Replace Web pages to update timestamp
# Assume index.html is OK
date=`date`
rm -f $tag.html
echo writePage $dir $tag "$date"
/nfs/beowulf03/beowulf02/anna2/KSNHYP/FA-ks481216/scripts/writePage $dir $tag "$date"
chmod 664 $tag.html
# I like to monitor this
wc -l M* | grep -v " 0 " | grep -v "^0 "
wc -l E* | grep -v " 0 " | grep -v "^0 "
echo
# ------------------------------------------------------------------
| true |
6ecd55a203cf8aea53b7661023f7144ab41fae71 | Shell | shivani987/basic-programs | /Day5,6/function3.sh | UTF-8 | 574 | 3.46875 | 3 | [] | no_license | #!/bin/bash -x
num=$n
rev=0
while [ $num -ne 0 ]
do
rem=`expr $num % 10`
rev=`expr $rev \* 10 + $rem`
num=`expr $num / 10`
done
if [ $n -eq $rev ]
then
echo $n "number is palindrome"
else
echo $n "number is not palindrome"
fi
read n;
c)
#!/bin/bash -x
echo "enter the no"
read n
p=$n
count=0
for (( i=1;i<=p;i++ ))
do
if [[ $p % i == 0 ]]
then
count++;
fi
done
while [[ n > 0 ]]
do
rem=`expr $n % 10`
rev=rev=`expr $rev \* 10 + $rem`
n=`expr $n / 10`
done
if [[ p==rev && count==2 ]]
then
echo "the no is palprime"
else
echo "the no is not palprime"
fi
| true |
20711c2190d595af5706766272b5d62f9f440b9f | Shell | adrian-thurston/ragel | /test/ragel.d/perftest | UTF-8 | 737 | 3.5 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#
set -e
ragel1=$1
ragel2=$2
targ_time=$3
shift 3
cases="$@"
if test -z "$cases"; then
cases="mailbox1 strings1 strings2 rlscan cppscan1"
fi
CFLAGS="-O3 -Wall -Wno-unused-but-set-variable -Wno-unused-variable"
tc()
{
ragel=$1
compiler=$2
seconds=$3
root=$4
$ragel -F1 -o $root.cpp $root.rl
$compiler $CFLAGS -DPERF_TEST -I../aapl -DS=${seconds}ll -o $root.bin $root.cpp
( time ./$root.bin ) 2>&1 | \
awk '/user/ { split( $2, a, "[ms]" ); printf( "%.3f\n", a[1] * 60 + a[2] ); }'
}
for c in $cases; do
time1=`tc $ragel1 g++ $targ_time $c`
time2=`tc $ragel2 g++ $targ_time $c`
speedup=`awk "BEGIN { printf( \"%.5f\n\", $time1 / $time2 ); }"`
echo -e "$c\t$time1 -> $time2\t$speedup" | expand -12,30
done
| true |
66971ecd61550495d968f6c1a2c514ef6bc8c734 | Shell | xisafe/scripts | /shell/TemplateShell/Shell_teamplate.sh | UTF-8 | 776 | 3.5 | 4 | [] | no_license | #!/bin/bash
# --------------------------------------------------
#Author: LJ
#Email: admin@attacker.club
#Site: blog.attacker.club
#Last Modified time: 2017-11-22 18:19:12
#Description:
# --------------------------------------------------
function color()
{
case "$1" in
"warn")
echo -e "\e[1;31m$2\e[0m"
;;
"info")
echo -e "\e[1;33m$2\e[0m"
;;
esac
}
function confirm()
{
read -p 'Are you sure to Continue?[Y/n]:' answer
case $answer in
Y | y)
echo -e "\n\t\t\e[44;37m Running the script \e[0m\n";;
N | n)
echo -e "\n\t\t\033[41;36mExit the script \e[0m\n" && exit 0;;
*)
echo -e "\n\t\t\033[41;36mError choice \e[0m\n" && exit 1;;
esac
}
function Cleanup()
{
rm xxxx
}
confirm
Cleanup
| true |
0a6be5c8d01b3eb63f46915c73b4c83479031630 | Shell | Enstore-org/enstore | /doc/other/mk_inherit | UTF-8 | 1,175 | 3.203125 | 3 | [] | no_license | #!/bin/sh
#
# San the .py file in the directory above and generalte a picture
# of the inheritance.
#
rm -f [1-9].out
rm -f obj.dot
rm -f obj.ps
grep -h '^class.*\:' ${ENSTORE_DIR}/src/[a-z]*.py > 1.out
sed -e 's/class//g' < 1.out > 2.out
sed -e 's/://g' < 2.out > 3.out
sed -e 's/pass//g' < 3.out > 4.out
sed -e 's/,/"->"/g' < 4.out > 5.out
sed -e 's/(/->"/g' < 5.out > 6.out # need quotes incase . in
sed -e 's/)/"/g' < 6.out > 7.out # name; remove white b/c
sed -e 's/[ ][ ]*//g' < 7.out > 8.out # using ""
echo 'digraph objs { ' > obj.dot
cat 8.out >> obj.dot
echo 'UDPServer -> TCPServer' >> obj.dot
echo 'size="8,10.5"' >> obj.dot # margin will be .25 on all sides
echo 'margin=0' >> obj.dot # of 8.5x11 sheet of paper
echo 'center=1' >> obj.dot
echo 'rotate=0' >> obj.dot
echo 'ordering=out' >> obj.dot
echo 'rankdir=LR' >> obj.dot
echo 'dir=back' >> obj.dot
echo '}' >> obj.dot
rm [1-9].out
| true |
1cb1d61652dd744f58bb39498dffdaabdf2c1263 | Shell | jrpedrianes/dotfiles | /config/shell-env | UTF-8 | 907 | 3.28125 | 3 | [
"WTFPL"
] | permissive | #!/usr/bin/env zsh
# `shell_env.sh` is a main file for any general-case utils.
#
# This file is used as a source file for zsh.
# === Plugin management ===
# antibody bundle < "$HOME/.antibody" > "$HOME/.zsh-plugins"
# source "$HOME/.zsh-plugins"
# === Shell parts ===
source "$HOME/.shell/exports"
source "$HOME/.shell/aliases"
source "$HOME/.shell/functions"
source "$HOME/.shell/external"
export PATH="$PATH:$HOME/.shell/bin"
export PATH="$PATH:$HOME/.shell/bin-local"
SOURCED_FILES=$(find "$HOME/.shell/source.d" -type f -path "*\.sh" -exec basename -a {} \;)
for SOURCED_FILE in $(echo "${SOURCED_FILES}"); do
# shellcheck source=$HOME/.shell/source.d/kubeswitch_cleanup_handler_zsh.sh
source "$HOME/.shell/source.d/$SOURCED_FILE"
done
unset SOURCED_FILES SOURCED_FILE
# === Local variables and overrides ===
if [[ -f "$HOME/.shell-env-local" ]]; then
source "$HOME/.shell-env-local"
fi
| true |
f7e321e053b947ef7201945994221aeb93738504 | Shell | hmrc/binding-tariff-trader-frontend | /migrations/applied_migrations/AddConfidentialInformation.sh | UTF-8 | 3,245 | 2.53125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
echo "Applying migration AddConfidentialInformation"
echo "Adding routes to conf/app.routes"
echo "" >> ../conf/app.routes
echo "GET /add-confidential-information controllers.AddConfidentialInformationController.onPageLoad(mode: Mode = NormalMode)" >> ../conf/app.routes
echo "POST /add-confidential-information controllers.AddConfidentialInformationController.onSubmit(mode: Mode = NormalMode)" >> ../conf/app.routes
echo "GET /change-add-confidential-information controllers.AddConfidentialInformationController.onPageLoad(mode: Mode = CheckMode)" >> ../conf/app.routes
echo "POST /change-add-confidential-information controllers.AddConfidentialInformationController.onSubmit(mode: Mode = CheckMode)" >> ../conf/app.routes
echo "Adding messages to conf.messages"
echo "" >> ../conf/messages.en
echo "addConfidentialInformation.title = addConfidentialInformation" >> ../conf/messages.en
echo "addConfidentialInformation.heading = addConfidentialInformation" >> ../conf/messages.en
echo "addConfidentialInformation.checkYourAnswersLabel = addConfidentialInformation" >> ../conf/messages.en
echo "addConfidentialInformation.error.required = Select yes if addConfidentialInformation" >> ../conf/messages.en
echo "Adding to UserAnswersEntryGenerators"
awk '/trait UserAnswersEntryGenerators/ {\
print;\
print "";\
print " implicit lazy val arbitraryAddConfidentialInformationUserAnswersEntry: Arbitrary[(AddConfidentialInformationPage.type, JsValue)] =";\
print " Arbitrary {";\
print " for {";\
print " page <- arbitrary[AddConfidentialInformationPage.type]";\
print " value <- arbitrary[Boolean].map(Json.toJson(_))";\
print " } yield (page, value)";\
print " }";\
next }1' ../test/generators/UserAnswersEntryGenerators.scala > tmp && mv tmp ../test/generators/UserAnswersEntryGenerators.scala
echo "Adding to PageGenerators"
awk '/trait PageGenerators/ {\
print;\
print "";\
print " implicit lazy val arbitraryAddConfidentialInformationPage: Arbitrary[AddConfidentialInformationPage.type] =";\
print " Arbitrary(AddConfidentialInformationPage)";\
next }1' ../test/generators/PageGenerators.scala > tmp && mv tmp ../test/generators/PageGenerators.scala
echo "Adding to CacheMapGenerator"
awk '/val generators/ {\
print;\
print " arbitrary[(AddConfidentialInformationPage.type, JsValue)] ::";\
next }1' ../test/generators/CacheMapGenerator.scala > tmp && mv tmp ../test/generators/CacheMapGenerator.scala
echo "Adding helper method to CheckYourAnswersHelper"
awk '/class/ {\
print;\
print "";\
print " def addConfidentialInformation: Option[AnswerRow] = userAnswers.get(AddConfidentialInformationPage) map {";\
print " x => AnswerRow(\"addConfidentialInformation.checkYourAnswersLabel\", if(x) \"site.yes\" else \"site.no\", true, routes.AddConfidentialInformationController.onPageLoad(CheckMode).url)"; print " }";\
next }1' ../app/utils/CheckYourAnswersHelper.scala > tmp && mv tmp ../app/utils/CheckYourAnswersHelper.scala
echo "Migration AddConfidentialInformation completed"
| true |
17db74ae4e3d94251eac687e56929602d73c4a06 | Shell | speech998/uaspeech | /s5_segment/local/decode_segmentation_dnn.sh | UTF-8 | 7,097 | 3.015625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Copyright 2014 Guoguo Chen, 2015 GoVivace Inc. (Nagendra Goel)
# 2017 Vimal Manohar
# Apache 2.0
# Some basic error checking, similar to steps/nnet/decode.sh is added.
set -e
set -o pipefail
# Begin configuration section.
nnet= # non-default location of DNN (optional)
feature_transform= # non-default location of feature_transform (optional)
model= # non-default location of transition model (optional)
class_frame_counts= # non-default location of PDF counts (optional)
srcdir= # non-default location of DNN-dir (decouples model dir from decode dir)
stage=0 # stage=1 skips lattice generation
nj=4
cmd=run.pl
acwt=0.10 # note: only really affects pruning (scoring is on lattices).
beam=13.0
lattice_beam=6.0
min_active=200
max_active=7000 # limit of active tokens
max_mem=50000000 # approx. limit to memory consumption during minimization in bytes
nnet_forward_opts="--no-softmax=true --prior-scale=1.0"
scoring_opts=
allow_partial=true
# note: there are no more min-lmwt and max-lmwt options, instead use
# e.g. --scoring-opts "--min-lmwt 1 --max-lmwt 20"
skip_scoring=false
num_threads=1 # if >1, will use latgen-faster-parallel
parallel_opts= # Ignored now.
use_gpu="no" # yes|no|optionaly
# End configuration section.
echo "$0 $@" # Print the command line for logging
[ -f ./path.sh ] && . ./path.sh; # source the path.
. parse_options.sh || exit 1;
if [ $# != 3 ]; then
echo "$0: This is a special decoding script for segmentation where we"
echo "use one decoding graph per segment. We assume a file HCLG.fsts.scp exists"
echo "which is the scp file of the graphs for each segment."
echo "This will normally be obtained by steps/cleanup/make_biased_lm_graphs.sh."
echo "This script does not estimate fMLLR transforms; you have to use"
echo "the --transform-dir option if you want to use fMLLR."
echo ""
echo "Usage: $0 [options] <graph-dir> <data-dir> <decode-dir>"
echo " e.g.: $0 exp/tri2b/graph_train_si284_split \\"
echo " data/train_si284_split exp/tri2b/decode_train_si284_split"
echo ""
echo "where <decode-dir> is assumed to be a sub-directory of the directory"
echo "where the model is."
echo ""
echo "main options (for others, see top of script file)"
echo " --config <config-file> # config containing options"
echo " --nj <nj> # number of parallel jobs"
echo " --iter <iter> # Iteration of model to test."
echo " --model <model> # which model to use (e.g. to"
echo " # specify the final.alimdl)"
echo " --cmd (utils/run.pl|utils/queue.pl <queue opts>) # how to run jobs."
echo " --transform-dir <trans-dir> # dir to find fMLLR transforms "
echo " --acwt <float> # acoustic scale used for lattice generation "
echo " --scoring-opts <string> # options to local/score.sh"
echo " --num-threads <n> # number of threads to use, default 1."
exit 1;
fi
graphdir=$1
data=$2
dir=$3
mkdir -p $dir/log
if [ -z "$srcdir" ]; then
if [ -e $dir/final.mdl ]; then
srcdir=$dir
elif [ -e $dir/../final.mdl ]; then
srcdir=$(dirname $dir)
else
echo "$0: expected either $dir/final.mdl or $dir/../final.mdl to exist"
exit 1
fi
fi
sdata=$data/split$nj;
[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1;
echo $nj > $dir/num_jobs
utils/lang/check_phones_compatible.sh $graph_dir/phones.txt $srcdir/phones.txt
# Split HCLG.fsts.scp by input utterance
n1=$(cat $graphdir/HCLG.fsts.scp | wc -l)
n2=$(cat $data/feats.scp | wc -l)
if [ $n1 != $n2 ]; then
echo "$0: expected $n2 graphs in $graphdir/HCLG.fsts.scp, got $n1"
fi
mkdir -p $dir/split_fsts
sort -k1,1 $graphdir/HCLG.fsts.scp > $dir/HCLG.fsts.sorted.scp
utils/filter_scps.pl --no-warn -f 1 JOB=1:$nj \
$sdata/JOB/feats.scp $dir/HCLG.fsts.sorted.scp $dir/split_fsts/HCLG.fsts.JOB.scp
HCLG=scp:$dir/split_fsts/HCLG.fsts.JOB.scp
# Select default locations to model files (if not already set externally)
[ -z "$nnet" ] && nnet=$srcdir/final.nnet
[ -z "$model" ] && model=$srcdir/final.mdl
[ -z "$feature_transform" -a -e $srcdir/final.feature_transform ] && feature_transform=$srcdir/final.feature_transform
#
[ -z "$class_frame_counts" -a -f $srcdir/prior_counts ] && class_frame_counts=$srcdir/prior_counts # priority,
[ -z "$class_frame_counts" ] && class_frame_counts=$srcdir/ali_train_pdf.counts
# Check that files exist,
for f in $sdata/1/feats.scp $nnet $model $feature_transform $class_frame_counts; do
[ ! -f $f ] && echo "$0: missing file $f" && exit 1;
done
# Possibly use multi-threaded decoder
thread_string=
[ $num_threads -gt 1 ] && thread_string="-parallel --num-threads=$num_threads"
# PREPARE FEATURE EXTRACTION PIPELINE
# import config,
cmvn_opts=
delta_opts=
D=$srcdir
[ -e $D/norm_vars ] && cmvn_opts="--norm-means=true --norm-vars=$(cat $D/norm_vars)" # Bwd-compatibility,
[ -e $D/cmvn_opts ] && cmvn_opts=$(cat $D/cmvn_opts)
[ -e $D/delta_order ] && delta_opts="--delta-order=$(cat $D/delta_order)" # Bwd-compatibility,
[ -e $D/delta_opts ] && delta_opts=$(cat $D/delta_opts)
#
# Create the feature stream,
feats="ark,s,cs:copy-feats scp:$sdata/JOB/feats.scp ark:- |"
# apply-cmvn (optional),
[ ! -z "$cmvn_opts" -a ! -f $sdata/1/cmvn.scp ] && echo "$0: Missing $sdata/1/cmvn.scp" && exit 1
[ ! -z "$cmvn_opts" ] && feats="$feats apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp ark:- ark:- |"
# add-deltas (optional),
[ ! -z "$delta_opts" ] && feats="$feats add-deltas $delta_opts ark:- ark:- |"
# add-pytel transform (optional),
[ -e $D/pytel_transform.py ] && feats="$feats /bin/env python $D/pytel_transform.py |"
# Run the decoding in the queue,
if [ $stage -le 0 ]; then
if [ -f "$graphdir/num_pdfs" ]; then
[ "`cat $graphdir/num_pdfs`" -eq `am-info --print-args=false $model | grep pdfs | awk '{print $NF}'` ] || \
{ echo "Mismatch in number of pdfs with $model"; exit 1; }
fi
$cmd --num-threads $((num_threads+1)) JOB=1:$nj $dir/log/decode.JOB.log \
nnet-forward $nnet_forward_opts --feature-transform=$feature_transform --class-frame-counts=$class_frame_counts --use-gpu=$use_gpu "$nnet" "$feats" ark:- \| \
latgen-faster-mapped$thread_string --min-active=$min_active --max-active=$max_active --max-mem=$max_mem --beam=$beam \
--lattice-beam=$lattice_beam --acoustic-scale=$acwt --allow-partial=$allow_partial --word-symbol-table=$graphdir/words.txt \
$model "$HCLG" ark:- "ark:|gzip -c > $dir/lat.JOB.gz" || exit 1;
fi
if ! $skip_scoring ; then
[ ! -x local/score.sh ] && \
echo "$0: Not scoring because local/score.sh does not exist or not executable." && exit 1;
local/score.sh --cmd "$cmd" $scoring_opts $data $graphdir $dir ||
{ echo "$0: Scoring failed. (ignore by '--skip-scoring true')"; exit 1; }
fi
exit 0;
| true |
51ce4e29df6125a77b0576fb9be42c72a50b3c18 | Shell | petervlk/dotfiles | /install.sh | UTF-8 | 934 | 3.578125 | 4 | [] | no_license | #!/bin/bash
DOTFILES_DIR=$(pwd)
ZDOTDIR=$XDG_CONFIG_HOME/zsh
VIMDIR=$XDG_CONFIG_HOME/vim
CLOJURE_TOOLS_HOME=$XDG_CONFIG_HOME/clojure
ZSHENV_LOCAL_FILE=$ZDOTDIR/zshenv_local
createZshEnvInitFile () {
local ZSHENV_INIT_FILE=$HOME/.zshenv
echo "export ZDOTDIR=$1" > $ZSHENV_INIT_FILE
echo 'source $ZDOTDIR/zshenv' >> $ZSHENV_INIT_FILE
}
createDirIfDoesntExist () {
if [ ! -d "$1" ]; then
mkdir -p $1
fi
}
createFileIfDoesntExist () {
if [ ! -f "$1" ]; then
touch $1
fi
}
createZshEnvInitFile $ZDOTDIR
createDirIfDoesntExist $ZDOTDIR
createDirIfDoesntExist $VIMDIR
createFileIfDoesntExist $ZSHENV_LOCAL_FILE
# create symlinks
ln -s $DOTFILES_DIR/zshenv $ZDOTDIR/zshenv
ln -s $DOTFILES_DIR/Xresources $HOME/.Xresources
ln -s $DOTFILES_DIR/vimrc $VIMDIR/vimrc
ln -s $DOTFILES_DIR/tmux.conf $HOME/.tmux.conf
ln -s $DOTFILES_DIR/ideavimrc $HOME/.ideavimrc
ln -s $DOTFILES_DIR/deps.edn $CLOJURE_TOOLS_HOME/deps.edn
| true |
452ccaa21fbc1758a5e84a691243e13921fc905b | Shell | ashwinigirase26/shell-Programming_Construct | /Dictionary/birthdayMonth.sh | UTF-8 | 1,571 | 3.5625 | 4 | [] | no_license | #Write a Program to generate a birth month of 50 individuals between the
#year 92 & 93. Find all the individuals having birthdays in the same month.
#Store it to finally print.
#!/bin/bash -x
declare -A Person
for i in {1..50}
do
Person[$i]=$(( $RANDOM % 12 + 1 ))
done
#echo "persone data:"${Person[@]}
for i in {1..50}
do
if [ ${Person[$i]} -eq 1 ]
then
echo "person whos birthday in JANUARY" $i;
fi
done
for i in {1..50}
do
if [ ${Person[$i]} -eq 2 ]
then
echo " person whos birthday in FEBRUARY " $i;
fi
done
for i in {1..50}
do
if [ ${Person[$i]} -eq 3 ]
then
echo " person whos birthday in MARCH " $i;
fi
done
for i in {1..50}
do
if [ ${Person[$i]} -eq 4 ]
then
echo " person whos birthday in APRIL " $i;
fi
done
for i in {1..50}
do
if [ ${Person[$i]} -eq 5 ]
then
echo " PERSON WHOS BIRTHDAY IN MAY " $i;
fi
done
for i in {1..50}
do
if [ ${Person[$i]} -eq 6 ]
then
echo " person whos birthday in JUNE " $i;
fi
done
for i in {1..50}
do
if [ ${Person[$i]} -eq 7 ]
then
echo " person whos birthday in JULY " $i;
fi
done
for i in {1..50}
do
if [ ${Person[$i]} -eq 8 ]
then
echo " person whos birthday in AUGUST " $i;
fi
done
for i in {1..50}
do
if [ ${Person[$i]} -eq 9 ]
then
echo " person whos birthday in SEPTEMBER " $i;
fi
done
for i in {1..50}
do
if [ ${Person[$i]} -eq 10 ]
then
echo " person whos birthday in OCTOBER " $i;
fi
done
for i in {1..50}
do
if [ ${Person[$i]} -eq 11 ]
then
echo " person whos birthday in NOVERMBER " $i;
fi
done
for i in {1..50}
do
if [ ${Person[$i]} -eq 12 ]
then
echo " person whos birthday in DECEMBER " $i;
fi
done
| true |
b5fe098db4b2e0d9d6caf8c192e6bb795c014cf5 | Shell | salomonjohns/S3-Automation | /creates3websitebucket.sh | UTF-8 | 2,055 | 3.546875 | 4 | [] | no_license | #!/bin/bash
BUCKET_NAME=$1
if [ -z "$BUCKET_NAME" ]
then
echo -e "\e[31mOops! You cannot leave the bucket name empty... this isnt going to work \n"
echo -e "\e[39m"
exit 0
else
echo -e "\e[32mGreen Good... you gave your bucket a name! \n"
fi
// Create the bucket
// Reference: https://docs.aws.amazon.com/cli/latest/reference/s3api/create-bucket.html
aws s3api create-bucket --bucket $BUCKET_NAME --region eu-west-1 --create-bucket-configuration LocationConstraint=eu-west-1
echo -e "\e[32mBucket Created\n"
// Set the Configuration Options
// Reference: https://docs.aws.amazon.com/cli/latest/reference/s3api/put-public-access-block.html
aws s3api put-public-access-block --bucket $BUCKET_NAME --public-access-block-configuration BlockPublicAcls=false,IgnorePublicAcls=false,BlockPublicPolicy=false,RestrictPublicBuckets=false
echo -e "\e[32mConfiguration Options Set\n"
// Configure for Static Website Hosting
// Reference: https://docs.aws.amazon.com/cli/latest/reference/s3api/put-bucket-website.html
// Idea for temp file from: https://stuff-things.net/2017/05/03/creating-an-s3-website-redirect-from-the-cli/
printf '{"IndexDocument": {"Suffix": "index.html"},"ErrorDocument": {"Key": "error.html"}}' $2 > website.json
aws s3api put-bucket-website --bucket $BUCKET_NAME --website-configuration file://website.json
echo -e "\e[32mStatic Hosting Options Set\n"
// Cleanup
rm -rf website.json
echo -e "\e[32mCleaned up website.json temp file\n"
// Configure Permissions
// Reference: https://docs.aws.amazon.com/cli/latest/reference/s3api/put-bucket-policy.html
printf '{"Version": "2012-10-17","Statement": [{"Sid": "PublicReadForGetBucketObjects","Effect": "Allow","Principal": "*","Action": ["s3:GetObject"],"Resource": ["arn:aws:s3:::'"$BUCKET_NAME"'/*"]}]}' $2 > policy.json
aws s3api put-bucket-policy --bucket $BUCKET_NAME --policy file://policy.json
echo -e "\e[32mPermissions Set\n"
rm -rf policy.json
echo -e "\e[32mCleaned up policy.json temp file\n"
echo -e "\e[32mProceed to upload a file and test access!\n\n"
echo -e "\e[39m"
| true |
b1e3a4b243fe6d99dc70f1bd1deb87fcd93aa635 | Shell | KaiChuan-Hsieh/wifitest | /mfg_script/wcn3610/cw_tx.sh | UTF-8 | 491 | 2.640625 | 3 | [] | no_license | #!/system/bin/sh
# Para1 = mode ( Tx=2, Rx=1 )
# Para2 = Channel number
# Para3 = Data Rate
function logi {
log -t "WifiTestCmd" -p i $1
}
channel=$1
dGain=$2
rGain=$3
logi "iwpriv wlan0 tx 0"
iwpriv wlan0 tx 0
logi "iwpriv wlan0 rx 0"
iwpriv wlan0 rx 0
logi "iwpriv wlan0 set_channel $channel"
iwpriv wlan0 set_channel $channel
logi "iwpriv wlan0 set_tx_wf_gain $dGain $rGain"
iwpriv wlan0 set_tx_wf_gain $dGain $rGain
logi "iwpriv wlan0 tx_cw_rf_gen 1"
iwpriv wlan0 tx_cw_rf_gen 1
| true |
0b6acb7e7058bcfb9881b6268b4eb8e00ae9544d | Shell | mrhaav/PCEnginesAPU1 | /ME909s-120/huaweiME909s-120.sh | UTF-8 | 2,261 | 3.0625 | 3 | [] | no_license | #!/bin/sh
#
# mrhaav 2020-11-18
# Huawei ME909s-120 modem
# SIM PIN should be deactivated
# ^SYSCFGEX: "00",3FFFFFFF,1,2,7FFFFFFFFFFFFFFF
DEV=/dev/ttyUSB0
APN=internet
pdpType=IP
# Modem start up delay
sleep 1
# Set error codes to verbose
atOut=$(COMMAND="AT+CMEE=2" gcom -d "$DEV" -s /etc/gcom/getruncommand.gcom | awk 'NR==2' | sed -e 's/[\r\n]//g')
while [ $atOut != 'OK' ]
do
atOut=$(COMMAND="AT+CMEE=2" gcom -d "$DEV" -s /etc/gcom/getruncommand.gcom | awk 'NR==2' | sed -e 's/[\r\n]//g')
done
# Check SIMcard and PIN status
atOut=$(COMMAND="AT+CPIN?" gcom -d "$DEV" -s /etc/gcom/getruncommand.gcom | awk 'NR==2' | awk -F : '{print $2}' | sed -e 's/[\r\n]//g' | sed 's/^ *//g' | sed 's/ /q/g')
if [ $atOut == 'READY' ]
# Initiate modem
then
# Flight mode on
atOut=$(COMMAND="AT+CFUN=0" gcom -d "$DEV" -s /etc/gcom/getruncommand.gcom)
# Disable unsolicted indications
atOut=$(COMMAND="AT^CURC=0" gcom -d "$DEV" -s /etc/gcom/getruncommand.gcom)
# Modem manufacturer information
atOut=$(COMMAND="AT+CGMI" gcom -d "$DEV" -s /etc/gcom/getruncommand.gcom | awk 'NR==2')
logger -t modem $atOut
# Modem model information
atOut=$(COMMAND="AT+CGMM" gcom -d "$DEV" -s /etc/gcom/getruncommand.gcom | awk 'NR==2')
logger -t modem $atOut
# Configure PDPcontext
atOut=$(COMMAND="AT+CGDCONT=0,\"$pdpType\",\"$APN\"" gcom -d "$DEV" -s /etc/gcom/getruncommand.gcom)
atOut=$(COMMAND="AT+CGDCONT=1,\"$pdpType\",\"$APN\"" gcom -d "$DEV" -s /etc/gcom/getruncommand.gcom)
# Flight mode off
atOut=$(COMMAND="AT+CFUN=1" gcom -d "$DEV" -s /etc/gcom/getruncommand.gcom)
# Check service status
atOut=$(COMMAND="AT^SYSINFOEX" gcom -d "$DEV" -s /etc/gcom/getruncommand.gcom | grep SYSINFOEX:)
service="${atOut:12:3}"
SIMstatus="${atOut:18:1}"
if [ $SIMstatus != '1' ]
then
logger -t modem Invalid SIMcard
elif [ $service != '2,3' ]
then
logger -t modem No service, check APN
else
# Check operator
atOut=$(COMMAND="AT+COPS?" gcom -d "$DEV" -s /etc/gcom/getruncommand.gcom | awk 'NR==2' | awk -F , '{print $3}' | sed -e 's/\"//g')
logger -t modem Connected to $atOut
# Activate NDIS application
atOut=$(COMMAND="AT^NDISDUP=1,1" gcom -d "$DEV" -s /etc/gcom/getruncommand.gcom)
fi
else
atOut=`echo "$atOut" | sed -e 's/q/ /g'`
logger -t modem $atOut
fi
| true |
51e3a1e140228e560b353bcca0f83d7594c41f75 | Shell | skashtan/iComplyClaimCenter | /bin/gwcc.sh | UTF-8 | 383 | 2.8125 | 3 | [] | no_license | #!/usr/bin/env bash
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
echo "WARNING: This is a deprecated build entry point. Use gwb in the root directory instead"
echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
cd $DIR/..
bash gwb "$@"
| true |
bc24e553a3f7efc79227f1ccd1ec1bd54e8eaecf | Shell | GoneMobileCast/GoneMobileCast.github.io | /publish.sh | UTF-8 | 520 | 2.828125 | 3 | [] | no_license | #!/bin/bash
bundle install
# Remove the previously generated site
rm -rf _site/
mkdir _site/
# Checkout current master (published github pages site) into _site
git -C _site/ init
git -C _site/ remote add origin git@github.com:GoneMobileCast/GoneMobileCast.github.io.git
git -C _site/ fetch
git -C _site/ checkout -t origin/master
# Generate our site into _site
bundle exec jekyll build
git -C _site/ add -A # add everything to commit
git -C _site/ commit -am "Regenerated Jekyll site"
git -C _site/ push origin master
| true |
bd7461bb97b0ea9e203b188f01a10e2e538321d3 | Shell | vzat/mongo-stratus-connection-manager | /scripts/setupMongos.sh | UTF-8 | 566 | 3.296875 | 3 | [
"MIT"
] | permissive | containerName=${1:-test}
rootUser=${2:-admin}
rootPass=${3:-admin}
shards=`cat shards.txt`
sleep 10
# Add Shards to router
while read -r shard
do
echo shard
sudo docker exec -t $containerName \
mongo --eval "sh.addShard('$shard');"
done <<< "$shards"
# Add Root User
sudo docker exec -t $containerName \
mongo admin --eval " \
db.createUser({ \
user: '$rootUser', \
pwd: '$rootPass', \
roles: [{ \
role: 'root', \
db: 'admin' \
}] \
});"
| true |
af60b621b63a1aa569593ec207e52ffc77ef687e | Shell | kylape/support-examples | /jaxws/cxfSsl.war/runClientEap5.sh | UTF-8 | 911 | 3.5 | 4 | [] | no_license | #!/bin/bash
ant setup-example-two-way-ssl
if [ "x$JBOSS_HOME" = "x" ]; then
echo "Must set JBOSS_HOME"
exit 1
fi
ant deploy #Build the project
if [ $? -gt 0 ]; then
exit $?
fi
echo "Copying jbossweb.keystore"
cp WEB-INF/classes/jbossweb.keystore $JBOSS_HOME/server/default/conf
if [ $? -gt 0 ]; then
exit $?
fi
cat httpsConnector.xml
read -p "Please make sure you've installed the above HTTPS connector in $JBOSS_HOME/server/default/deploy/jbossweb.xml/server.xml. Press enter to continue."
read -p "Please start JBoss now. Press enter when the startup has completed."
curl -s -H "Content-Type: text/xml" -d "`cat request.xml`" http://localhost:8080/cxfSsl/clientEndpoint | xmllint --format -
echo
echo "Please stop JBoss before running the test again. "
sleep 1
#Clean things up so this script can be re-run more easily
rm WEB-INF/classes/jbossweb.keystore* WEB-INF/classes/client.keystore*
| true |
7eb3887f709f3a179d6d847abb835c70966056a0 | Shell | jgarte/dotfiles-23 | /dot_tmuxifier-layouts/backup.session.sh | UTF-8 | 335 | 3.1875 | 3 | [] | no_license | #!/bin/sh
# Create session with specified name if it does not already exist. If no
# argument is given, session name will be based on layout file name.
if initialize_session "backup"; then
# Load a defined window layout.
load_window "backup"
fi
# Finalize session creation and switch/attach to it.
finalize_and_go_to_session
| true |
350ba9f9912d39e450ebcdbd5b5d7bc04602a4bd | Shell | gitter-badger/akka-persistence-dynamodb | /scripts/dev-setup.sh | UTF-8 | 488 | 3.296875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
cd $(dirname $0)/..
PID_FILE="target/dynamo.pid"
if [ ! -f dynamodb-local/DynamoDBLocal.jar ]; then
scripts/get-dynamodb-local
fi
echo "Starting up dynamodb"
rm -f $PID_FILE
nohup java -Djava.library.path=./dynamodb-local -jar ./dynamodb-local/DynamoDBLocal.jar -inMemory > target/dynamodb-output.log 2>&1 &
echo $! > $PID_FILE
echo -n "Started up dynamodb, pid is "
cat $PID_FILE
echo "now start sbt and run 'test:run' for a load test or 'test' for the test suite"
| true |
c7b1ac7c5b4a18c5caa846e3b46a48e38af8e590 | Shell | Azure/sonic-build-tools | /scripts/common/sonic-mgmt-common-build/test.sh | UTF-8 | 794 | 3.078125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash -ex
# Run sanity tests for sonic-mgmt-common.
# Assumes sonic-mgmt-common is already compiled and all dependencies
# are installed.
STATUS=0
DEBDIR=$(realpath sonic-mgmt-common/debian/sonic-mgmt-common)
[[ -f sonic-mgmt-common/tools/test/database_config.json ]] && \
export DB_CONFIG_PATH=${PWD}/sonic-mgmt-common/tools/test/database_config.json
# Run CVL tests
pushd sonic-mgmt-common/build/tests/cvl
CVL_SCHEMA_PATH=testdata/schema \
./cvl.test -test.v -logtostderr || STATUS=1
popd
# Run translib tests
pushd sonic-mgmt-common/build/tests/translib
export CVL_SCHEMA_PATH=${DEBDIR}/usr/sbin/schema
export YANG_MODELS_PATH=${DEBDIR}/usr/models/yang
./db.test -test.v -logtostderr || STATUS=1
./translib.test -test.v -logtostderr || STATUS=1
popd
exit ${STATUS}
| true |
f281d28e023a42ce45282d40bd879b48a452ded9 | Shell | andysan/hipe_tests | /arch.sh | UTF-8 | 785 | 2.78125 | 3 | [] | no_license | #! /bin/sh
##====================================================================
## File: arch.sh
## Author(s): Jesper W
## Purpose: To test the HiPE system with all memory architectures.
##
## $Id: arch.sh,v 1.2 2003/12/18 12:34:30 jesperw Exp $
##====================================================================
OSH_DIR=$1
echo --------------------------------------------------------------------
echo "| Testing Private heap"
echo "|"
./testsuite.sh $OSH_DIR
echo --------------------------------------------------------------------
echo "| Testing Shared heap"
echo "|"
./testsuite.sh --rts_opts -shared $OSH_DIR
echo --------------------------------------------------------------------
echo "| Testing Hybrid heap"
echo "|"
./testsuite.sh --rts_opts -hybrid $OSH_DIR
| true |
e0a891921444e9bdfab001b37eb9dc470791d4ec | Shell | liuyu8721/csmd | /PF_P1.sh | UTF-8 | 2,751 | 3.4375 | 3 | [] | no_license | # All rights reserved by Liuyu
# Author: Liuyu
#!/bin/sh
source ${CSMD_HOME}/configs.sh
PF_WORKDIR_P1_SEQDIR=$1
PF_WORKDIR_P1_SEQLST=$2
PF_WORKDIR_P1_OUTDIR=$3
echo "#--------------------------------------------------------------#"
echo "Per-sample microbiome profiling"
if [ ! -d "${PF_WORKDIR_P1_SEQDIR}/SEQ/" ];then
echo -e "\033[44;37;5m ERROR:\033[0m ${PF_WORKDIR_P1_SEQDIR}/SEQ/ not exist"
echo "Please make sure all RefSeq bacteria genomes can be found in ${PF_WORKDIR_P1_SEQDIR}/SEQ/"
exit
elif [ ! -f "${PF_WORKDIR_P1_SEQDIR}/assembly_summary.txt" ];then
echo -e "\033[44;37;5m ERROR: \033[0m ${PF_WORKDIR_P1_SEQDIR}/assembly_summary.txt not exit"
exit
else
echo -e "\033[44;37;5m TIPS:\033[0m The SEQDIR is ready."
echo "NOTE: All RefSeq bacteria genomes are expected in ${PF_WORKDIR_P1_SEQDIR}/SEQ/"
echo "NOTE: RefSeq bacteria summary information is expected in ${PF_WORKDIR_P1_SEQDIR}/assembly_summary.txt"
fi
if [ ! -f "${PF_WORKDIR_P1_SEQLST}" ];then
echo -e "\033[44;37;5m ERROR:\033[0m the sequence list ${PF_WORKDIR_P1_SEQLST} not exist"
echo "Please make sure the sequence list is ready for database update, each line with a RefSeq accssion no. "
else
echo -e "\033[44;37;5m TIPS:\033[0m The SEQLST is ready."
echo "NOTE: Sequences for database update are expected in ${PF_WORKDIR_P1_SEQLST}, each line with a RefSeq accssion no."
fi
echo "PF Phase I: Genome colection and index"
if [ ! -d "${PF_WORKDIR_P1_OUTDIR}/DB/Genome/" ];then
mkdir -p ${PF_WORKDIR_P1_OUTDIR}/DB/Genome
else
if [ "`ls -A ${PF_WORKDIR_P1_OUTDIR}/DB/Genome/`" != "" ];then
echo -e "\033[44;37;5m ERROR:\033[0m ${PF_WORKDIR_P1_OUTDIR}/DB/Genome/ not empty"
exit
fi
fi
for aa in $(cat ${PF_WORKDIR_P1_SEQLST})
do
taxid=$(awk -F '\t' '{if($1=="'${aa}'")print $6}' ${PF_WORKDIR_P1_SEQDIR}/assembly_summary.txt)
organism=$(awk -F '\t' '{if($1=="'${aa}'")print $8}' ${PF_WORKDIR_P1_SEQDIR}/assembly_summary.txt | sed 's/ /_/g' | sed 's/\//!/g')
header=">${aa}|${taxid}|${organism}"
echo ${header}
fasta_genome=$(ls ${PF_WORKDIR_P1_SEQDIR}/SEQ/${aa}*.gz)
if [ "$fasta_genome" != "" ]; then
gzip -d ${fasta_genome} -c > ${PF_WORKDIR_P1_OUTDIR}/DB/Genome/${aa}.fna
sed -i 's/^>.*$/NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN/' ${PF_WORKDIR_P1_OUTDIR}/DB/Genome/${aa}.fna
sed -i "1s/^.*$/${header}/" ${PF_WORKDIR_P1_OUTDIR}/DB/Genome/${aa}.fna
else
echo -e "\033[44;37;5m ERROR:\033[0m the fasta genome of ${aa} not exist"
exit
fi
done
cat ${PF_WORKDIR_P1_OUTDIR}/DB/Genome/*.fna > ${PF_WORKDIR_P1_OUTDIR}/DB/csmdSpecies
cd ${PF_WORKDIR_P1_OUTDIR}/DB
bowtie2-build csmdSpecies csmdSpecies;
| true |
13b5e63ea075050010fc062c3af6289c5df52c4e | Shell | reedliu/NGS-codes | /WES/2019-1-24-WES-one-sample/2-wes-single.sh | UTF-8 | 8,381 | 3.328125 | 3 | [] | no_license | #!/usr/bin/bash
###############################################
############# WES for one sample ##############
# Author: Reed Liu
# Mail: jieandze1314@gmail.com
# Date: 2019-01-24
## Usage:
#cat $wkd/raw/conf | while read i;do
# fqs=($i)
# fq1=${fqs[0]}
# fq2=${fqs[1]}
# sh wes-single.sh $fq1 $fq2
#done
###############################################
## 针对一个样本PE fq文件
# 软件、工具路径
# 可以用conda,也可以自己指定
# Arguments
wkd=/vol2/agis/xiaoyutao_group/liuyunze/project/single-sap-wes
#mkdir $wkd/{raw,ref}
#rsync -av ~/reference/genome/hg38/ $wkd/ref # fast copy files(hg38.fa + hg38.fai)
ref_dir=$wkd/ref/hg38
ref_gnm=$ref_dir/hg38.fa
ref_idx=$ref_dir/hg38/hg38
GATK_bundle=/vol2/agis/xiaoyutao_group/liuyunze/biosoft/GATK/resources/bundle/hg38
conda_wes=/vol2/agis/xiaoyutao_group/liuyunze/biosoft/miniconda3/envs/wes
## shell执行参数
fq1=$1
fq2=$2
GID=Test_gid
library=Wes
sample=Test_sample
outdir=$wkd/outdir
core=16
## 按样本设置目录
outdir=${outdir}/${sample}
## 利用fq1得到fq的前缀,假设名字是*.1.fq.gz
fq_name=`basename $fq1`
fq_name=${fq_name%%.*}
## output dir
# 存储过滤好的数据
if [ ! -d $outdir/clean ]
then mkdir -p $outdir/clean
fi
# 存储比对、索引直到BQSR前
if [ ! -d $outdir/bwa ]
then mkdir -p $outdir/bwa
fi
# 存储变异检测结果
if [ ! -d $outdir/gatk ]
then mkdir -p $outdir/gatk
fi
# 存储临时文件
if [ ! -d $outdir/tmp ]
then mkdir -p $outdir/tmp
fi
###############################################
## Trimmomatic 质控过滤
###############################################
source activate wes
time trimmomatic PE \
$fq1 $fq2 \
$outdir/clean/${fq_name}.paired.1.fq.gz $outdir/tmp/${fq_name}.unpaired.1.fq.gz \
$outdir/clean/${fq_name}.paired.2.fq.gz $outdir/tmp/${fq_name}.unpaired.2.fq.gz \
ILLUMINACLIP:$conda_wes/share/trimmomatic-0.38-1/adapters/TruSeq3-PE-2.fa:2:30:10:8:True \
SLIDINGWINDOW:5:15 LEADING:5 TRAILING:5 MINLEN:50 && echo "** fq QC done **"
source deactivate
###############################################
## bwa mem 比对,对长度大于40bp小于2000bp的read非常有效
###############################################
## index
source activate wes
cd $ref_dir
bwa index -a bwtsw $ref_gnm -p hg38 && echo "** ref index done **"
## align
time bwa mem -t $core -M -R "@RG\tID:$GID\tSM:$sample\tLB:$library\tPL:Illumina" $ref_idx \
$outdir/clean/${fq_name}.paired.1.fq.gz $outdir/clean/${fq_name}.paired.1.fq.gz |\
samtools view -Sb - > $outdir/bwa/${sample}.bam && \
echo "** BWA MEM done **"
time samtools sort -@ $core -m 100G -O bam -o $outdir/bwa/${sample}.sorted.bam $outdir/bwa/${sample}.bam && \
echo "** sort raw bamfile done **"
# 可以对sorted后的bam进行index【可选】
# time samtools index $outdir/bwa/${sample}.sorted.bam && echo "** ${sample}.sorted.bam index done **"
source deactivate
###############################################
## 标记重复序列
###############################################
source activate wes
gatk MarkDuplicates \
-I $outdir/bwa/${sample}.sorted.bam \
-M $outdir/bwa/${sample}.markup_metrics.txt \
-O $outdir/bwa/${sample}.sorted.markdup.bam && \
echo "** ${sample}.sorted.bam MarkDuplicates done **"
# 为${sample}.sorted.bam 构建index,后续需要
time samtools index $outdir/bwa/${sample}.sorted.markdup.bam && \
echo "** ${sample}.sorted.markdup.bam index done **"
source deactivate
###############################################
## BQSR
## 【注意】vcf文件需要有构建好的index
## GATK4不支持对新的vcf进行index
###############################################
source activate wes
time gatk BaseRecalibrator \
-R $ref_gnm \
-I $outdir/bwa/${sample}.sorted.markdup.bam \
--known-sites $GATK_bundle/1000G_phase1.indels.hg38.vcf \
--known-sites $GATK_bundle/Mills_and_1000G_gold_standard.indels.hg38.vcf \
--known-sites $GATK_bundle/dbsnp_146.hg38.vcf \
-O $outdir/bwa/${sample}.sorted.markdup.recal_data.table && \
echo "** ${sample}.sorted.markdup.recal_data.table done **"
time gatk ApplyBQSR \
--bqsr-recal-file $outdir/bwa/${sample}.sorted.markdup.recal_data.table \
-R $ref_gnm \
-I $outdir/bwa/${sample}.sorted.markdup.bam \
-O $outdir/bwa/${sample}.sorted.markdup.BQSR.bam && \
echo "** ApplyBQSR done **"
## 为${sample}.sorted.markdup.BQSR.bam构建索引,后续需要
time samtools index $outdir/bwa/${sample}.sorted.markdup.BQSR.bam && \
echo "** ${sample}.sorted.markdup.BQSR.bam index done **"
source deactivate
###############################################
## 开始变异检测
## 【注意】单个样本有四种检测方式(结果一样)
###############################################
## 第一种,直接调用HaplotypeCaller 输出样本vcf(较大文件比较慢)
## 这适合于单样本,或者那种固定样本数量的情况,也就是执行一次HaplotypeCaller之后就老死不相往来了
source activate wes
time gatk HaplotypeCaller \
-R $ref_gnm \
-I $outdir/bwa/${sample}.sorted.markdup.BQSR.bam \
-O $outdir/gatk/${sample}.HC.vcf.gz && \
echo "** ${sample}.HC.vcf.gz done **"
source deactivate
## 第二种,输出每条染色体vcf,然后再合并结果。目的是提高速度,但仅仅是通过区分染色体提速
## -L 参数,通过这个参数我们可以指定特定的染色体,写成chr1还是1需要看基因组fa格式
source activate wes
chrom=( chr1 chr2 chr3 chr4 chr5 chr6 chr7 chr8 chr9 chr10 chr11 chr12 \
chr13 chr14 chr15 chr16 chr17 chr18 chr19 chr20 chr21 chr22 chrX chrY chrM )
for i in ${chrom[@]};do
time gatk HaplotypeCaller \
-R $ref_gnm \
-I $outdir/bwa/${sample}.sorted.markdup.BQSR.bam \
-L $i \
-O $outdir/gatk/${sample}.HC.${i}.vcf.gz && \
echo "** ${sample}.HC.${i}.vcf.gz done **" &
done && wait
merge_vcfs=""
for i in ${chrom[@]};do
merge_vcfs=${merge_vcfs}" -I $outdir/gatk/${sample}.HC.${i}.vcf.gz \\"\n
done && time gatk MegeVcfs ${merge_vcfs} -O $outdir/gatk/${sample}.HC.vcf.gz && \
echo "** MergeVcfs done **"
source deactivate
## 第三种,先输出样本的全gVCF,再进行GenotypeGCVFs。单样本非必需,多样本的标配,文件较大速度会慢
## gVCF全称是genome VCF,是每个样本用于变异检测的中间文件,格式类似于VCF,
## 它把joint-genotype过程中所需的所有信息都记录在这里面,文件无论是大小还是数据量都远远小于原来的BAM文件
## 一旦新增加样本也不需要再重新去读取所有人的BAM文件,只需为新样本生成一份gVCF,然后重新执行这个joint-genotype就行
source activate wes
time gatk HaplotypeCaller \
--emit-ref-confidence GVCF \
-R $ref_gnm \
-I $outdir/bwa/${sample}.sorted.markdup.BQSR.bam \
-O $outdir/gatk/${sample}.HC.g.vcf.gz && \
echo "** GVCF ${sample}.HC.g.vcf.gz done **"
time gatk GenotypeGCVFs \
-R $ref_gnm \
-V $outdir/gatk/${sample}.HC.g.vcf.gz \
-O $outdir/gatk/${sample}.HC.vcf.gz && \
echo "** GVCF ${sample}.HC.vcf.gz done **"
source deactivate
## 第四种,输出每个染色体的gvcf,然后对每个染色体单独进行GenotypeGVCFs。目的是提高速度,但仅仅是通过区分染色体提速
source activate wes
chrom=( chr1 chr2 chr3 chr4 chr5 chr6 chr7 chr8 chr9 chr10 chr11 chr12 \
chr13 chr14 chr15 chr16 chr17 chr18 chr19 chr20 chr21 chr22 chrX chrY chrM )
for i in ${chrom[@]};do
time gatk HaplotypeCaller \
--emit-ref-confidence GVCF \
-R $ref_gnm \
-I $outdir/bwa/${sample}.sorted.markdup.BQSR.bam \
-L $i \
-O $outdir/gatk/${sample}.HC.${i}.g.vcf.gz && \
time gatk GenotypeGCVFs \
-R $ref_gnm \
-V $outdir/gatk/${sample}.HC.${i}.g.vcf.gz \
-O $outdir/gatk/${sample}.HC.${i}.vcf.gz && \
echo "** ${sample}.HC.${i}.vcf.gz done **" &
done && wait
merge_vcfs=""
for i in ${chrom[@]};do
merge_vcfs=${merge_vcfs}" -I $outdir/gatk/${sample}.HC.${i}.vcf.gz \\"\n
done && time gatk MegeVcfs ${merge_vcfs} -O $outdir/gatk/${sample}.HC.vcf.gz && \
echo "** MergeVcfs done **"
source deactivate
###############################################
## 变异注释
## 使用VEP
###############################################
source activate wes
time VEP --fasta $ref_gnm \
--vcf --merged --fork 10 --hgvs --force_overwrite --everything \
--offline --dir_cache $outdir/tmp/ \
-i $outdir/gatk/${sample}.HC.VQSR.vcf.gz \
-o $outdir/gatk/${sample}.HC.VQSR.VEP.vcf.gz
source deactivate
| true |
cef33ce99a0d64b13ad2ed8ebe1bc28cb26f24e8 | Shell | soheilaGhayour/oslab-homework | /os10/1.sh | UTF-8 | 252 | 3.03125 | 3 | [] | no_license | #!/bin/bash
echo "*MERRY CHRISTMAS*"
echo "Please enter your age"
if [ $1 -gt 17 ]
then
echo "You may go to the party."
elif [ $2 = "yes" ]
then
echo "You may go to the party but be back before midnight."
else
echo "You may not go to the party."
fi
| true |
47896046474f8e92b6303604f21ce72285d27718 | Shell | dilanSachi/LASERtrain | /bucc/data/prepare-UNcorpus.sh | UTF-8 | 2,999 | 3.28125 | 3 | [] | no_license | #!/bin/bash
SRCS=(
"es"
"fr"
"ru"
"zh"
)
TGT="en"
ROOT=$(dirname "$0")
FAIRSEQ=$1
SCRIPTS=$FAIRSEQ/scripts
SPM_TRAIN=$SCRIPTS/spm_train.py
SPM_ENCODE=$SCRIPTS/spm_encode.py
BPESIZE=40000
DATA=$ROOT/UNv1
TRAIN_MINLEN=1 # remove sentences with <1 BPE token
TRAIN_MAXLEN=250 # remove sentences with >250 BPE tokens
#Tokenizing Chinese dev set with the library Jieba
cat $DATA/en-zh/UNv1.0.en-zh.zh | python -c 'import jieba
import sys
for line in sys.stdin:
print(" ".join(jieba.cut(line.strip(), cut_all=True)))' > $DATA/en-zh/UNv1.0.en-zh.lower.tok.zh
cat $DATA/en-zh/UNv1.0.en-zh.en | $ROOT/../../preprocess/moses/tokenizer/tokenizer.perl -l en | awk '{ print tolower($0) }' > $DATA/en-zh/UNv1.0.en-zh.lower.tok.en
#Tokenizing and lowercasing Spanish, French, Russian and English devsets
for LANG in "es" "fr" "ru"; do
cat $DATA/en-$LANG/UNv1.0.en-$LANG.$LANG | $ROOT/../../preprocess/moses/tokenizer/tokenizer.perl -l $LANG | awk '{ print tolower($0) }' > $DATA/en-$LANG/UNv1.0.en-$LANG.lower.tok.$LANG &
cat $DATA/en-$LANG/UNv1.0.en-$LANG.en | $ROOT/../../preprocess/moses/tokenizer/tokenizer.perl -l en | awk '{ print tolower($0) }' > $DATA/en-$LANG/UNv1.0.en-$LANG.lower.tok.en &
done
wait
# learn BPE with sentencepiece
TRAIN_FILES=$(for SRC in "${SRCS[@]}"; do echo $DATA/en-$SRC/UNv1.0.en-$SRC.lower.tok.$SRC; echo $DATA/en-$SRC/UNv1.0.en-$SRC.lower.tok.en; done | tr "\n" ",")
echo "learning joint BPE over ${TRAIN_FILES}..."
python "$SPM_TRAIN" \
--input=$TRAIN_FILES \
--model_prefix=$DATA/sentencepiece.bpe \
--vocab_size=$BPESIZE \
--character_coverage=1.0 \
--model_type=bpe \
--shuffle_input_sentence=true
# encode train/valid/test
echo "encoding train/valid with learned BPE..."
for SRC in "${SRCS[@]}"; do
for LANG in "$SRC" "$TGT"; do
python "$SPM_ENCODE" \
--model "$DATA/sentencepiece.bpe.model" \
--output_format=piece \
--inputs $DATA/en-$SRC/UNv1.0.en-$SRC.lower.tok.$SRC $DATA/en-$SRC/UNv1.0.en-$SRC.lower.tok.en \
--outputs $DATA/en-$SRC/UNv1.0.en-$SRC.bpe.$SRC $DATA/en-$SRC/UNv1.0.en-$SRC.bpe.en \
--min-len $TRAIN_MINLEN --max-len $TRAIN_MAXLEN
done
done
DATABIN=$ROOT/UNv1.0.bpe40k-bin
SRC="fr"
fairseq-preprocess --source-lang en --target-lang $SRC --trainpref $DATA/en-$SRC/UNv1.0.en-$SRC.bpe --joined-dictionary --destdir $DATABIN --workers 15
SRC="ru"
fairseq-preprocess --source-lang en --target-lang $SRC --trainpref $DATA/en-$SRC/UNv1.0.en-$SRC.bpe --joined-dictionary --destdir $DATABIN --workers 15 --srcdict $DATABIN/dict.en.txt
SRC="es"
fairseq-preprocess --source-lang en --target-lang $SRC --trainpref $DATA/en-$SRC/UNv1.0.en-$SRC.bpe --joined-dictionary --destdir $DATABIN --workers 15 --srcdict $DATABIN/dict.en.txt
SRC="zh"
fairseq-preprocess --source-lang en --target-lang $SRC --trainpref $DATA/en-$SRC/UNv1.0.en-$SRC.bpe --joined-dictionary --destdir $DATABIN --workers 15 --srcdict $DATABIN/dict.en.txt
| true |
d1f0973b92950e775410bcc51e6edabd0050b61a | Shell | aslok/bin | /bin/minerkill | UTF-8 | 968 | 2.984375 | 3 | [] | no_license | #!/usr/bin/env bash
PATH='/usr/local/sbin:/usr/local/bin:/usr/bin:/usr/bin/site_perl:/usr/bin/vendor_perl:/usr/bin/core_perl'
cd /home/dima/
git pull
git add .
git commit -m 'Autocommit'
git push
#PIDS=$(pgrep -l -f ssh | grep aslok | awk '{print $1}')
#kill -9 $PIDS
#TUNCMD1='ssh -o StrictHostKeyChecking=no -f -N -g aslok@178.62.242.13 -R 2222:127.0.0.1:22'
#pgrep -f "$TUNCMD1" &>/dev/null || $TUNCMD1
LAST_UPDATED=$(echo `lynx 'http://dwarfpool.com/eth/api?wallet=fd10a43d6e951aef7578546bdf7f3268d943fe24&email=aslok.zp@gmail.com' --dump | grep '"rig1":' -A8 | grep last_submit | sed -e 's/"//g' -e 's/last_submit://' -e 's/GMT,//'`)
TS_LAST=$(date -u -d"$LAST_UPDATED" +%s)
TS_NOW=$(date -u +%s)
SECONDS=$(( $TS_NOW - $TS_LAST ))
MINUTS=$(( $SECONDS / 60 ))
HOURS=$(( $SECONDS / 3600 ))
if [ $MINUTS -gt 30 ]; then
echo $(date) '-' restart from web >> /home/dima/restart.log
PIDS=$(ps ax | grep eth-proxy | grep -v grep | awk '{print $1}')
kill -15 $PIDS
fi
| true |
c087c32c52918a9da9c8ad22752945823febb414 | Shell | unlnown542a/files | /build/libpurple-lurch-git/PKGBUILD | UTF-8 | 993 | 2.8125 | 3 | [] | no_license | # Maintainer: Unknown542
pkgname=libpurple-lurch-git
_pkgname=lurch
pkgver=r94.3156e14
pkgrel=1
pkgdesc='Plugin for libpurple (Pidgin, Adium, etc) implementing OMEMO (using axolotl)'
arch=('i686' 'x86_64')
url="https://github.com/gkdr/lurch"
license=('GPL')
depends=('libpurple' 'mxml' 'libxml2' 'sqlite' 'libgcrypt')
makedepends=('git' 'cmake')
optdepends=('libpurple-carbons-git: message carbons support')
#source=("$_pkgname::git+https://github.com/gkdr/lurch.git")
source=()
sha256sums=()
pkgver () {
cd "$srcdir/$_pkgname"
( set -o pipefail
git describe --long 2>/dev/null | sed 's/\([^-]*-g\)/r\1/;s/-/./g' ||
printf "r%s.%s" "$(git rev-list --count HEAD)" "$(git rev-parse --short HEAD)"
)
}
prepare() {
cd "$srcdir"
git clone https://github.com/gkdr/lurch.git
cd "$_pkgname"
git submodule update --init --recursive
}
build() {
cd "$srcdir/$_pkgname"
make
}
package() {
cd "$srcdir/$_pkgname"
make DESTDIR="$pkgdir" install
}
# vim:set ts=2 sw=2 et:
| true |
79efdbc7df737a445a09287c0c588236777fe46d | Shell | delkyd/alfheim_linux-PKGBUILDS | /mingw-w64-cgns/PKGBUILD | UTF-8 | 1,817 | 3.203125 | 3 | [] | no_license | pkgname=mingw-w64-cgns
_PKGNAME=CGNS
pkgver=3.3.1
pkgrel=2
pkgdesc='Standard for recording and recovering computer data associated with the numerical solution of fluid dynamics equations (mingw-w64)'
arch=('any')
url='http://www.cgns.org'
license=('custom')
depends=('mingw-w64-crt' 'mingw-w64-hdf5')
makedepends=('mingw-w64-cmake')
options=('!buildflags' '!strip' 'staticlibs')
source=(${pkgname}-${pkgver}.tar.gz::https://github.com/${_PKGNAME}/${_PKGNAME}/archive/v${pkgver}.tar.gz)
sha256sums=('81093693b2e21a99c5640b82b267a495625b663d7b8125d5f1e9e7aaa1f8d469')
_architectures="i686-w64-mingw32 x86_64-w64-mingw32"
prepare(){
cd "${srcdir}/${_PKGNAME}-${pkgver}"
# https://github.com/CGNS/CGNS/pull/45
sed -i "s|#ifdef CG_BUILD_64BIT|#if 0|g" src/cgnstypes.h.in
# https://github.com/CGNS/CGNS/pull/46
sed -i "s|add_library(cgns_static|add_library(cgns_static STATIC|g" src/CMakeLists.txt
}
build() {
cd "${srcdir}/${_PKGNAME}-${pkgver}"
for _arch in ${_architectures}; do
mkdir -p build-${_arch} && pushd build-${_arch}
if test "${_arch}" = "x86_64-w64-mingw32"
then
_64bits=ON
else
_64bits=OFF
fi
${_arch}-cmake \
-DCGNS_BUILD_CGNSTOOLS:BOOL=OFF \
-DCGNS_ENABLE_64BIT:BOOL=${_64bits} \
-DCGNS_ENABLE_FORTRAN:BOOL=OFF \
-DCGNS_ENABLE_HDF5:BOOL=OFF \
-DCGNS_ENABLE_LEGACY:BOOL=ON \
-DCGNS_ENABLE_SCOPING:BOOL=OFF \
-DCGNS_ENABLE_TESTS:BOOL=OFF \
..
make
popd
done
}
package() {
for _arch in ${_architectures}; do
cd "$srcdir"/${_PKGNAME}-${pkgver}/build-${_arch}
make install DESTDIR="$pkgdir"
rm "$pkgdir"/usr/${_arch}/bin/*.exe
rm "$pkgdir"/usr/${_arch}/bin/*.bat
${_arch}-strip --strip-unneeded "$pkgdir"/usr/${_arch}/bin/*.dll
${_arch}-strip -g "$pkgdir"/usr/${_arch}/lib/*.a
done
}
| true |
4554f12c53b510e01578a0ec8fb7f3ce50c4e03c | Shell | woky/home | /bin/wakemeup | UTF-8 | 261 | 3.515625 | 4 | [] | no_license | #!/bin/bash
set -e
if [[ $# -ne 1 ]]; then
echo "Usage: $0 HH:MM"
exit 1
fi
now=$(date +%s)
later=$(date -d"$1" +%s)
if [[ $later -lt $now ]]; then
later=$((later + 86400))
fi
date -d@$later +"See you on %d.%m. at %H:%M"
sleep $((later - now))
playalarm
| true |
8fc009baa3532d86028faf3fea4b666d002016bd | Shell | TaitoUnited/taito-cli | /plugins/npm/lib/all.bash | UTF-8 | 2,842 | 3.921875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
function npm::bin () {
local npm_bin=npm
if [[ ${taito_npm_use_yarn} == "true" ]]; then
npm_bin=yarn
fi
echo ${npm_bin}
}
function npm::clean () {
if npm run | grep 'install-clean$' &> /dev/null; then
taito::execute_on_host_fg "
set -e
echo \"Running '$(npm::bin) run install-clean'\"
$(npm::bin) run install-clean
"
fi
# NOTE: Changed clean to run on host because of linux permission issues.
# We are installing libs locally anyway so perhaps it is better this way.
taito::execute_on_host_fg "\
echo \"Deleting all package-lock.json files\"
find . -name \"package-lock.json\" -type f -prune -exec rm -rf '{}' + || :
echo \"Deleting all node_modules directories\"
find . -name \"node_modules\" -type d -prune -exec rm -rf '{}' + || :"
}
function npm::install () {
local switches=" ${*} "
# Run postinstall script: install-all, install-ci or install-dev
local task_postinstall=""
local do_confirm=
local task_install_ci_exists="$(npm run | grep 'install-ci$')"
local task_install_dev_exists="$(npm run | grep 'install-dev$')"
if [[ ${switches} == *" --all "* ]]; then
task_postinstall="install-all"
do_confirm=true
elif [[ ${taito_mode:-} == "ci" ]] && [[ ${task_install_ci_exists:-} ]]; then
task_postinstall="install-ci"
elif [[ ${taito_mode:-} != "ci" ]] && [[ ${task_install_dev_exists:-} ]]; then
task_postinstall="install-dev"
do_confirm=true
fi
local install_all=false
if [[ ${taito_mode} != "ci" ]] && \
[[ ${do_confirm} ]] && \
[[ ! $RUNNING_TESTS ]]
then
if taito::confirm \
"Install all libraries on host for autocompletion purposes?"; then
install_all=true
fi
else
install_all=true
fi
# Run clean
if [[ ${switches} == *" --clean "* ]]; then
npm::clean
fi
local npm_command="install"
if [[ ${switches} == *" --lock "* ]]; then
npm_command="ci"
fi
# Run npm install
# NOTE: Changed 'npm install' to run on host because of linux permission issues.
# We are installing libs locally anyway so perhaps it is better this way.
# TODO add '--python=${npm_python}' for npm install?
taito::execute_on_host_fg "
set -e
echo \"Running '$(npm::bin) ${npm_command}'\"
$(npm::bin) ${npm_command}
"
if [[ ${task_postinstall} ]]; then
npmopts=""
# TODO: can we avoid using --unsafe-perm?
if [[ ${taito_mode:-} == "ci" ]] && [[ $(whoami) == "root" ]] && [[ $(npm::bin) == "npm" ]]; then
npmopts="${npmopts} --unsafe-perm"
fi
# TODO add '--python=${npm_python}' for npm run?
taito::execute_on_host_fg "
set -e
if [[ ${install_all} == \"true\" ]]; then
echo \"Running '$(npm::bin) run ${task_postinstall}'\"
$(npm::bin) run ${npmopts} ${task_postinstall}
fi
"
fi
}
| true |
055ec2624143af781facb939852e79f8cd8777d2 | Shell | dennisasamoahowusu/cmsc828b-project | /my_cands.sh | UTF-8 | 238 | 2.578125 | 3 | [] | no_license | #!/usr/bin/env bash
source activate whale2020
data_dir=/exp/jbremerman/cmsc828b-project/output/ja/mean
for file in $data_dir/fairseq/*.txt; do
echo $file
my_cands_extract.py -i $file -c 1 > $data_dir/staple/$(basename -- $file)
done
| true |
d6e2c44a041ca83005f68fabb8cf647471afb529 | Shell | ukulililixl/scripts | /ec2script/ctrtest/run.sh | UTF-8 | 929 | 2.65625 | 3 | [] | no_license | #!/bin/bash
cd /home/xiaolu/CTDR
redis-cli flushall
killall DRCoordinator
`./DRCoordinator &> output &`
ohio1=52.14.86.225
ohio2=52.14.97.165
ohio3=52.14.53.124
ohio4=52.14.104.92
ohio5=52.14.72.239
california1=54.183.87.142
california2=52.53.210.52
california3=52.53.232.194
california4=54.67.47.129
canada1=52.60.39.184
canada2=52.60.103.4
canada3=52.60.105.122
canada4=52.60.96.226
oregon1=35.166.45.82
oregon2=50.112.19.78
oregon3=35.161.4.24
oregon4=52.34.6.118
cluster=( $ohio2 $ohio3 $ohio4 $ohio5
$california1 $california2 $california3 $california4
$canada1 $canada2 $canada3 $canada4
$oregon1 $oregon2 $oregon3 $oregon4 )
for item in ${cluster[@]}
do
echo "working on $item"
ssh $item "killall DRAgent"
ssh $item "killall CTDRClient"
# scp DRAgent cloud-node$i:~/xiaolu/CTDR/
# scp CTDRClient cloud-node$i:~/xiaolu/CTDR/
ssh $item "redis-cli flushall"
ssh $item "cd CTDR; ./DRAgent &> output &"
done
| true |
fe3f381cb9ee09d24566e3400ece2ac8109195ef | Shell | weberr13/dnsblproxy | /bin/otherspoofers.in | UTF-8 | 1,098 | 3.140625 | 3 | [] | no_license | #!/bin/sh
trap clean 1
trap clean 2
trap clean 15
clean()
{
rm -f /tmp/otherspoofers.$$
}
if [ $# -lt 1 ] ; then
echo usage $0 [-s] name [ name ... ]
exit 1
fi
touch /tmp/otherspoofers.$$
if [ "$1" = "-s" ] ; then
tail -100000 SYSLOG_MAILLOG | grep HELO\. | nawk -FHELO. '{print $2}' | nawk -F.---. '!($1 ~ /\./) {print $2}' >> /tmp/otherspoofers.$$
shift
while [ $# -gt 0 ] ; do
tail -100000 SYSLOG_MAILLOG | grep HELO\. | nawk -FHELO. '{print $2}' | nawk -F.---. '$1 ~ /'$1'/ {print $2}' >> /tmp/otherspoofers.$$
shift
done
else
grep HELO\. SYSLOG_MAILLOG | nawk -FHELO. '{print $2}' | nawk -F.---. '!($1 ~ /\./) {print $2}' >> /tmp/otherspoofers.$$
shift
while [ $# -gt 0 ] ; do
grep HELO\. SYSLOG_MAILLOG | nawk -FHELO. '{print $2}' | nawk -F.---. '$1 ~ /'$1'/ {print $2}' >> /tmp/otherspoofers.$$
shift
done
fi
sort /tmp/otherspoofers.$$ | uniq -c | sort -rn | nawk ' $2 !~ /^128\.138\./ {print $1" "$2" "(s+=$1)}' | grep -v " " > /tmp/otherspoofers.$$.
mv /tmp/otherspoofers.$$. /tmp/otherspoofers.$$
INSTALL_PREFIX/bin/checkentry -f /tmp/otherspoofers.$$
clean
| true |
e207fcd3a82f8cf9ffb8e7243672eb5f8d7d16ee | Shell | novichkov-lab/cmonkey | /glassfish_stop_service.sh | UTF-8 | 1,275 | 3.34375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
if (( $# != 1 ))
then
echo "Usage: glassfish_stop_service <target_port>"
exit
fi
TARGET_PORT=$1
if [ -z "$KB_RUNTIME" ]
then
export KB_RUNTIME=/kb/runtime
fi
if [ -z "$GLASSFISH_HOME" ]
then
export GLASSFISH_HOME=$KB_RUNTIME/glassfish3
fi
asadmin=$GLASSFISH_HOME/glassfish/bin/asadmin
ps ax | grep "\-Dcom.sun.aas.installRoot=\/kb/runtime/glassfish3/glassfish " > /dev/null
if [ $? -eq 0 ]; then
echo "Glassfish is already running."
else
$asadmin start-domain
fi
$asadmin list-applications | grep app-${TARGET_PORT} > /dev/null
if [ $? -eq 0 ]; then
$asadmin undeploy app-${TARGET_PORT}
fi
$asadmin list-http-listeners | grep http-listener-${TARGET_PORT} > /dev/null
if [ $? -eq 0 ]; then
$asadmin delete-http-listener http-listener-${TARGET_PORT}
fi
$asadmin list-protocols | grep http-listener-${TARGET_PORT} > /dev/null
if [ $? -eq 0 ]; then
$asadmin delete-protocol http-listener-${TARGET_PORT}
fi
$asadmin list-threadpools server | grep thread-pool-${TARGET_PORT} > /dev/null
if [ $? -eq 0 ]; then
$asadmin delete-threadpool thread-pool-${TARGET_PORT}
fi
$asadmin list-virtual-servers | grep server-${TARGET_PORT} > /dev/null
if [ $? -eq 0 ]; then
$asadmin delete-virtual-server server-${TARGET_PORT}
fi
| true |
099949b70fd4efd4e010eb6b2011deeb1aa12495 | Shell | duijf/polyglot-advent | /03-cobol/run.sh | UTF-8 | 697 | 4.34375 | 4 | [] | no_license | #!/usr/bin/env bash
# Build a COBOL file to an executable and run it.
#
# EXAMPLE
#
# $ ./run.sh hello.cbl
# Hello, world!
#
# ENVIRONMENT
#
# Run this from the shell defined by `shell.nix`. E.g. `nix-shell`
set -eufo pipefail
# The first argument (`$1`) should contain the filename of the assembly
# file to build.
if [ -z ${1+x} ]; then
echo "usage: $0 COBOL_FILE"
exit 1
else
COBOL_FILE="$1"
BASENAME="$(basename "$COBOL_FILE" .cbl)"
BIN_FILE="build/$BASENAME"
fi
echo "Building $COBOL_FILE to $BIN_FILE"
# Build the executable.
# `-x` - Create an executable.
# `-o` - Build to the given output file.
cobc -x -o "$BIN_FILE" "$COBOL_FILE"
# Run it.
"$BIN_FILE"
| true |
dc0b4eed412c9b68aa50a105d8d3e5b4eccc1e73 | Shell | manuelesimi/MeSCa | /bin/mesca | UTF-8 | 556 | 2.953125 | 3 | [] | no_license | #!/bin/sh
WORKING_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
if [[ $OSTYPE == "cygwin" ]]; then
WORKING_DIR=`cygpath -m "${WORKING_DIR}"`
fi
PARENT_DIR=`dirname ${WORKING_DIR}`
LIB_DIR=${PARENT_DIR}/lib
CONFIG_DIR=${PARENT_DIR}/config
TARGET_DIR=${PARENT_DIR}/target
SNAPSHOT_JAR="${TARGET_DIR}/mesca-dist.jar"
JAVA_OPTIONS="-Xms1g -Xmx4g -XX:PermSize=4g -XX:MaxPermSize=4g"
java $JAVA_OPTIONS -Dlog4j.configuration=file:${CONFIG_DIR}/log4j.properties -cp ${LIB_DIR}/mesca-dist.jar:${SNAPSHOT_JAR} \
org.campagnelab.mesca.Mesca "$@"
| true |
bfe689bcbeafaea26a9c1044f3e2b59b590c6333 | Shell | cultab/dotfiles | /alacritty/.config/alacritty/reload_alacritty | UTF-8 | 998 | 3.25 | 3 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | #!/bin/sh
#
DB=$(xrdb -query)
get_xres() {
echo "$DB" | grep "^\*\?\.$1:" | awk '{print $2}'
}
BEFORE=$(head ~/.config/alacritty/alacritty.yml -n -27)
cat <<EOF > ~/.config/alacritty/alacritty.yml
$(printf %s "$BEFORE")
colors:
# Default colors
primary:
background: '$(get_xres background)'
foreground: '$(get_xres foreground)'
# Normal colors
normal:
black: '$(get_xres color0)'
red: '$(get_xres color1)'
green: '$(get_xres color2)'
yellow: '$(get_xres color3)'
blue: '$(get_xres color4)'
magenta: '$(get_xres color5)'
cyan: '$(get_xres color6)'
white: '$(get_xres color7)'
# Bright colors
bright:
black: '$(get_xres color8)'
red: '$(get_xres color9)'
green: '$(get_xres color10)'
yellow: '$(get_xres color11)'
blue: '$(get_xres color12)'
magenta: '$(get_xres color13)'
cyan: '$(get_xres color14)'
white: '$(get_xres color15)'
EOF
| true |
a8b70e49d688cf6499a0364e90379d30056699a7 | Shell | marekbes/Thesis | /run-all-configurations.sh | UTF-8 | 659 | 3.21875 | 3 | [] | no_license | #!/bin/bash
run() {
echo -n "$4 $3 $1 $(($2 * $1)) $5"
output=$(./build/proofOfConcept --nodes $1 --thread-count $2 --merger $3 --marker $4 --run-length 20 --window-size $5 --window-slide $5 --input $(pwd)/)
if [ $? -ne 0 ]; then
echo $output
fi
echo "$output" | grep -o " .*GB/s" | tr -d '\n'
echo "$output" | grep -o " 5th .*$"
}
for window in 10000 50000 100000; do
for marker in Clock Count; do
for merger in Delayed Direct Eager; do
for threads in 1 2 4 8; do
run 1 $threads $merger $marker $window
done
for nodes in 2 3 4; do
run $nodes 8 $merger $marker $window
done
done
done
done
| true |
9b5737d088d5d987c218f7ddbd1d95b68f39c6e0 | Shell | iganari/package-docker-compose | /11_python-mysql/dcs.sh | UTF-8 | 655 | 3.453125 | 3 | [] | no_license | #!/bin/bash
# ref: https://gist.github.com/iganari/5bd2c315d5705545c10b998ec56da43e
# set -x
# See how we were called
case "${1}" in
start)
echo "docker-compose up"
docker-compose up -d
;;
stop)
echo "docker-compose stop"
docker-compose stop
;;
down)
echo "docker-compose down"
docker-compose down
;;
build)
echo "docker-compose build"
docker-compose build
;;
status)
echo "docker-compose status"
docker-compose ps
;;
*)
echo "Usage: $(basename $0) {start|stop|down|build|status}"
exit 1
esac
| true |
02aeca335db61af39ff71049d8306d9b9ec0355e | Shell | jamiepg1/puppet-aptrepo | /templates/reprepro.create-build-jobs.sh.erb | UTF-8 | 2,390 | 3.65625 | 4 | [] | no_license | #!/bin/bash -x
if ! [ "$1" = "accepted" ]
then
echo "First argument should be 'accepted'. Bailing out"
exit 1
fi
distribution="$2"
src="$3"
version="$4"
changes="$5"
series="${distribution%-proposed}"
if ! echo $changes | grep -q _source.changes$
then
echo "Only mirroring source. Bailing out"
exit 0
fi
i386=0
amd64=0
archs="$(dcmd --dsc grep ^Architecture: "$5" | cut -f2 -d':')"
if echo "${archs}" | grep -q amd64
then
amd64=1
fi
if echo "${archs}" | grep -q i386
then
i386=1
fi
if echo "${archs}" | grep -q all
then
i386=1
fi
if echo "${archs}" | grep -q any
then
i386=1
amd64=1
fi
if [ "$i386" = "1" ]
then
tmpfile="<%= newjobdir %>/${src}_${version}_<%= name %>_${distribution}_i386"
echo "#!/bin/bash" >> "$tmpfile"
echo "if reprepro -b ${REPREPRO_BASE_DIR} -T deb listfilter $distribution '\$Source (== $src) , \$SourceVersion (== $version)' | grep -v 'Could not.*lock' | grep -v 'Waiting.*trying' | grep -q ." >> "$tmpfile"
echo "then" >> "$tmpfile"
echo " echo 'Binaries already present. Not building.'" >> "$tmpfile"
echo " exit 0" >> "$tmpfile"
echo "fi" >> "$tmpfile"
echo 'tmpdir=$(mktemp -d)' >> "$tmpfile"
echo 'cd $tmpdir' >> "$tmpfile"
echo "sbuild -d ${series} -A --arch=i386 -c ${series}-<%= name %>-i386 -n -k<%= keyid %> ${src}_${version}" >> "$tmpfile"
echo "dput -c <%= repodir %>/conf/dput.cf binaries *.changes" >> "$tmpfile"
echo 'rm -rf $tmpdir' >> "$tmpfile"
chmod +x ${tmpfile}
fi
if [ "$amd64" = "1" ]
then
tmpfile="<%= newjobdir %>/${src}_${version}_<%= name %>_${distribution}_amd64"
echo "#!/bin/bash" >> "$tmpfile"
echo "if reprepro -b ${REPREPRO_BASE_DIR} -T deb listfilter $distribution '\$Source (== $src) , \$SourceVersion (== $version)' | grep -v 'Could not.*lock' | grep -v 'Waiting.*trying' | grep -q ." >> "$tmpfile"
echo "then" >> "$tmpfile"
echo " echo 'Binaries already present. Not building.'" >> "$tmpfile"
echo " exit 0" >> "$tmpfile"
echo "fi" >> "$tmpfile"
echo 'tmpdir=$(mktemp -d)' >> "$tmpfile"
echo 'cd $tmpdir' >> "$tmpfile"
echo "sbuild -d ${series} --arch=amd64 -c ${series}-<%= name %>-amd64 -n -k<%= keyid %> ${src}_${version}" >> "$tmpfile"
echo "dput -c <%= repodir %>/conf/dput.cf binaries *.changes" >> "$tmpfile"
echo 'rm -rf $tmpdir' >> "$tmpfile"
chmod +x ${tmpfile}
fi
| true |
8c006626d18984fccf11ef98a048a12f8c907a35 | Shell | microsoft/vscode-dev-containers | /containers/kubernetes-helm/test-project/test.sh | UTF-8 | 611 | 2.6875 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"LGPL-3.0-only",
"GPL-3.0-or-later",
"EPL-1.0",
"Classpath-exception-2.0",
"GPL-1.0-or-later",
"CC-BY-SA-4.0",
"MPL-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-mit-old-style",
"LGPL-2.1-or-later",
"OFL-1.1",
"EPL-2.0",
"CDDL-1.0",
"CC-BY-SA-3.0",
"NCSA",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"LicenseRef-scancode-other-copyleft",
"GPL-3.0-only",
"Python-2.0",
"GPL-2.0-only",
"Apache-2.0",
"Ruby",
"BSD-2-Clause"
] | permissive | #!/bin/bash
cd $(dirname "$0")
source test-utils.sh vscode
# Run common tests
checkCommon
# Execute .bashrc with the SYNC_LOCALHOST_KUBECONFIG set
export SYNC_LOCALHOST_KUBECONFIG=true
exec bash
# Actual tests
checkExtension "ms-azuretools.vscode-docker"
checkExtension "ms-kubernetes-tools.vscode-kubernetes-tools"
check "docker-socket" ls -l /var/run/docker.sock
check "docker" docker ps -a
check "kube-config-mount" ls -l /usr/local/share/kube-localhost
check "kube-config" ls -l "$HOME/.kube"
check "kubectl" kubectl version --client
check "helm" helm version --client
# Report result
reportResults
| true |
e148901ab5ddad54eb9c201b32365164a9c36243 | Shell | AospExtended/platform_system_media | /audio_utils/tests/build_and_run_all_unit_tests.sh | UTF-8 | 643 | 2.875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#
# Run tests in this directory.
#
if [ -z "$ANDROID_BUILD_TOP" ]; then
echo "Android build environment not set"
exit -1
fi
# ensure we have mm
. $ANDROID_BUILD_TOP/build/envsetup.sh
mm
echo "waiting for device"
adb root && adb wait-for-device remount
echo "========================================"
echo "testing primitives"
adb push $OUT/system/lib/libaudioutils.so /system/lib
adb push $OUT/data/nativetest/primitives_tests/primitives_tests /system/bin
adb shell /system/bin/primitives_tests
echo "testing power"
adb push $OUT/data/nativetest/power_tests/power_tests /system/bin
adb shell /system/bin/power_tests
| true |
65b56e3bb757cefb55c5180205a9567baefa41d6 | Shell | smerchkz/chia-fast-deploy-tools | /ubuntu-20.04/storage-google/install.sh | UTF-8 | 1,191 | 2.796875 | 3 | [] | no_license | #!/bin/sh
basketName='chia-basket-001'
#install google storage - gcsfuse
#https://github.com/GoogleCloudPlatform/gcsfuse/blob/master/docs/installing.md
export GCSFUSE_REPO=gcsfuse-`lsb_release -c -s`
echo "deb http://packages.cloud.google.com/apt $GCSFUSE_REPO main" | sudo tee /etc/apt/sources.list.d/gcsfuse.list
curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
sudo apt-get update
sudo apt-get install gcsfuse
export GOOGLE_APPLICATION_CREDENTIALS="$(pwd)/gstorage-key.json"
#mount
mkdir "$(pwd)/../../storage"
gcsfuse $basketName $(pwd)/../../storage
#[gsutils]
#https://cloud.google.com/storage/docs/gsutil_install#deb
echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] https://packages.cloud.google.com/apt cloud-sdk main" | sudo tee -a /etc/apt/sources.list.d/google-cloud-sdk.list
sudo apt-get update && sudo apt-get install -y apt-transport-https ca-certificates gnupg
curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key --keyring /usr/share/keyrings/cloud.google.gpg add -
sudo apt-get update && sudo apt-get install -y google-cloud-sdk
gcloud auth activate-service-account --key-file=$(pwd)/gstorage-key.json | true |
60c06277922ff9128ceeada368049bd05af46a3c | Shell | shubhank-saxena/anax | /test/gov/multiple_agents.sh | UTF-8 | 3,704 | 3.765625 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
PREFIX="Multiple agents:"
function startMultiAgents {
echo -e "${PREFIX} Starting agents"
# get main agent's user input and save it to a file to be used by mult-agent
UIFILE="/tmp/agent_userinput.json"
ui=$(hzn userinput list)
if [ $? -ne 0 ]; then
echo -e "${PREFIX} Failed to get user input from the main agent. $ui"
exit 1
fi
echo -e "${PREFIX} userinput is: $ui"
echo "$ui" > $UIFILE
EX_IP_GATEWAY=$(docker inspect exchange-api | jq -r '.[].NetworkSettings.Networks.e2edev_test_network.Gateway')
CSS_IP_GATEWAY=$(docker inspect css-api | jq -r '.[].NetworkSettings.Networks.e2edev_test_network.Gateway')
EX_HOST_PORT=$(docker inspect exchange-api | jq -r '.[].NetworkSettings.Ports."8080/tcp"[].HostPort')
CSS_HOST_PORT=$(docker inspect css-api | jq -r '.[].NetworkSettings.Ports."9443/tcp"[].HostPort')
# set css certs for the agent container
cat /certs/css.crt > /tmp/css.crt
counter=0
while [ ${counter} -lt ${MUL_AGENTS} ]; do
agent_port=$((8512 + ${counter}))
device_num=$((6 + ${counter}))
# set config for the agent container
echo "HZN_EXCHANGE_URL=http://$EX_IP_GATEWAY:$EX_HOST_PORT/v1" > /tmp/horizon
echo "HZN_FSS_CSSURL=${CSS_URL}" >> /tmp/horizon
echo "HZN_DEVICE_ID=anaxdevice${device_num}" >> /tmp/horizon
echo "HZN_MGMT_HUB_CERT_PATH=/tmp/css.crt" >> /tmp/horizon
echo "HZN_AGENT_PORT=${agent_port}" >> /tmp/horizon
echo "E2E_NETWORK=e2edev_test_network" >> /tmp/horizon
# start agent container
echo "${PREFIX} Start agent container horizon${horizon_num} ..."
export HC_DONT_PULL=1;
export HC_DOCKER_TAG=testing
horizon_num=${device_num};
/tmp/anax-in-container/horizon-container start ${horizon_num} /tmp/horizon
sleep 10
# copy the userinput file to agent container
docker cp $UIFILE horizon${horizon_num}:$UIFILE
# register the agent
regcmd="hzn register -f $UIFILE -p $PATTERN -o e2edev@somecomp.com -u e2edev@somecomp.com/e2edevadmin:e2edevadminpw"
ret=$(docker exec -e "HORIZON_URL=http://localhost:${agent_port}" horizon${horizon_num} $regcmd)
if [ $? -ne 0 ]; then
echo "${PREFIX} Registration failed for anaxdevice${device_num}: $ret"
return 1
fi
echo "$ret"
let counter=counter+1
done
}
function verifyMultiAgentsAgreements {
echo -e "${PREFIX} Verifying agreements"
counter=0
while [ ${counter} -lt ${MUL_AGENTS} ]; do
agent_port=$((8512 + ${counter}))
device_num=$((6 + ${counter}))
echo "${PREFIX} Verify agreement for agent container horizon${device_num} ..."
# copy the test scripts over to agent container
docker cp /root/verify_agreements.sh horizon${device_num}:/root/.
docker cp /root/check_node_status.sh horizon${device_num}:/root/.
docker exec -e ANAX_API=http://localhost:${agent_port} \
-e EXCH_APP_HOST=http://exchange-api:8080/v1 \
-e ORG_ID=e2edev@somecomp.com \
-e PATTERN=${PATTERN} \
-e ADMIN_AUTH=e2edevadmin:e2edevadminpw \
-e NODEID=anaxdevice${device_num} \
-e NOLOOP=${NOLOOP} \
horizon${device_num} /root/verify_agreements.sh
let counter=counter+1
done
}
function stopMultiAgents {
echo -e "${PREFIX} Stopping agents"
counter=0
while [ ${counter} -lt ${MUL_AGENTS} ]; do
agent_port=$((8512 + ${counter}))
device_num=$((6 + ${counter}))
echo "${PREFIX} Delete agent container horizon${device_num} ..."
let horizon_num=$i+5
let port_num=$i+8511
ret=$(docker exec -e HORIZON_URL=http://localhost:${agent_port} horizon${device_num} hzn unregister -f -r)
echo "$ret"
/tmp/anax-in-container/horizon-container stop ${device_num}
let counter=counter+1
done
}
| true |
9623b824a1c7666b0bbd39cfa317f6851701387f | Shell | jetslambda/jets-dwm-branch | /_xinitrc | UTF-8 | 948 | 2.828125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Disable Lenovo X1 Carbon Touchpad
xinput set-prop 11 133 0
# Poor man's session
(sleep 1 && /usr/bin/ck-launch-session) & # do this before nm-applet... I don't know why
# /usr/bin/stalonetray --geometry 16x16+150-0 --icon-size 16 &
/usr/bin/stalonetray --geometry 10x1-700+0 --icon-size 10 &
(sleep 2 && /usr/bin/nm-applet) &
# conky -c /home/seb/.conky-dwm | while true; do /usr/bin/dwm > /dev/null; done;
# nm-applet &
dropbox start &
eval $(gnome-keyring-daemon --start)
#Update DWM name label
while true; do
BATTERY=$( acpi -b | sed 's/.*[charging|unknown], \([0-9]*\)%.*/\1/gi' )
BATTERY_STATUS=$( acpi -b | sed 's/.*: \([a-zA-Z]*\),.*/\1/gi' )
IP=`/sbin/ifconfig wlan0 | grep "inet a" | cut -d: -f2 | cut -d" " -f 1`
WLAN0="wlan0:${IP}"
xsetroot -name "`date` | `uptime | sed 's/.*,//'` | Battery `echo $BATTERY_STATUS $BATTERY%` | `echo $WLAN0`"
sleep 1
done &
#Launch DWM
exec /usr/local/bin/dwm
| true |
47df544ddf8e1f30f4b0ad76e69a9e21bfe8f8f8 | Shell | platform9/pf9-ha | /host/support/after-install.sh | UTF-8 | 1,667 | 3.078125 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
# Copyright (c) 2019 Platform9 Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logfile=/var/log/pf9/pf9-ha-rpm-after-install.log
touch "${logfile}"
echo "" > "${logfile}"
chown pf9:pf9group "${logfile}"
echo "begin of after-install.sh ($(date '+%FT%T'))" >> "${logfile}"
echo "--------------------------" >> "${logfile}"
folders=(
"/opt/pf9/pf9-ha"
"/opt/pf9/pf9-ha-slave"
"/opt/pf9/consul-data-dir"
"/opt/pf9/etc/pf9-consul"
"/opt/pf9/etc/pf9-ha"
"/var/run/pf9-consul/"
"/var/run/pf9-ha/"
"/var/consul-status/"
)
for folder in ${folders[@]}; do
echo "mkdir ${folder}" >> "${logfile}"
mkdir -p "${folder}"
echo "exit code : $?" >> "${logfile}"
echo "" >> "${logfile}"
stat "${folder}" >> "${logfile}"
echo "" >> "${logfile}"
echo "chown -R pf9:pf9group ${folder}" >> "${logfile}"
chown -R pf9:pf9group "${folder}"
echo "exit code : $?" >> "${logfile}"
echo "" >> "${logfile}"
stat "${folder}" >> "${logfile}"
echo "" >> "${logfile}"
done
echo "--------------------------" >> "${logfile}"
echo "end of after-install.sh ($(date '+%FT%T'))" >> "${logfile}"
| true |
4c8e6d3c07023231f3a90720b6f8d86420e9aefa | Shell | jenkinsci/jenkins-automation-operator | /redhat-release/jenkins-operator-bundle/render_templates | UTF-8 | 654 | 2.875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -euxo pipefail
# access the env var to exit early if undefined (set -u)
echo "${JENKINS_OPERATOR_CONTAINER_BUILD_INFO_JSON}" > /dev/null
PULL_URL=$(echo ${JENKINS_OPERATOR_CONTAINER_BUILD_INFO_JSON} | python3 -c 'import json,sys;obj=json.load(sys.stdin);print(obj["extra"]["image"]["index"]["pull"][0])')
REDHAT_IO_PULL_URL=$(echo $PULL_URL | sed "s|registry-proxy.engineering.redhat.com/rh-osbs/redhat-developer-jenkins-operator|registry.redhat.io/ocp-tools-4-tech-preview/jenkins-rhel8-operator|g")
MANIFEST_DIR="manifests"
sed -i "s|REPLACE_IMAGE|${REDHAT_IO_PULL_URL}|g" $MANIFEST_DIR/jenkins-operator.clusterserviceversion.yaml
| true |
1d99833af50dc87c92562e146a41c7346bc7c404 | Shell | sidspencer/xmd-cli | /linux/rmxmd | UTF-8 | 382 | 3.203125 | 3 | [] | no_license | #! /bin/bash
RGX="s/,${1}//"
for FL in "${@:2}"
do
echo "---"
NEWSUB=$(/usr/local/bin/exiftool -S -Keywords "${FL}" | sed -e "s/Keywords: //" | sed -e "s/$/,/" | sed -e "${RGX}" )
NEWSUB=$(echo "${NEWSUB}" | sed -e "s/,$//")
echo "NEWSUB: ${NEWSUB}"
/usr/local/bin/exiftool -overwrite_original_in_place -Subject="${NEWSUB}" -Keywords="${NEWSUB}" "${FL}"
done
| true |
ebe84bb53cde6d931cedb5e1bf6123bc19c74dba | Shell | traskrogers/poop | /internal_tools/git/branch_summary.sh | UTF-8 | 614 | 3.8125 | 4 | [] | no_license |
if [ $# -lt 1 ]; then
echo "Usage: $0 remote"
exit 1
fi
git remote show $1 > /dev/null 2> /dev/null
if [ $? -ne 0 ]; then
echo "unknown remote: $1"
echo "Valid remotes are..."
REMOTES=`git remote`
for r in $REMOTES
do
echo " $r"
done
exit 1
fi
BRANCHES=`git branch -r | grep $1 | grep -v $/HEAD | xargs`
COUNT=8
for b in $BRANCHES
do
echo ""
echo "$b, last commit was `git log --pretty=format:'%cd' -n 1 --date=relative $b` by <`git log --pretty=format:'%an' -n 1 $b`>"
git log --pretty=format:'* %h - %s (%cr) <%an>' --abbrev-commit --date=relative -n $COUNT $b | cat
echo ""
echo ""
done
| true |
538d19f290a9c15ce1701c5e8ff789be9eddaf3d | Shell | sohil86/C_PYTHON | /SHELL.sh | UTF-8 | 420 | 2.9375 | 3 | [] | no_license | cd PROG_RANDOM_NUMBER/build
make
cd ../..
for number in `seq 1 100`
do
echo
echo
echo =============================================
echo
echo TEST \# $number
echo .............................................
cd PROG_RANDOM_NUMBER/bin
./RANDOM_NUMBER
exit_status=$?
cd ../..
echo
echo RANDOM NUMBER: $exit_status
echo
python3 PRIME.py $exit_status
echo
echo ==============================================
echo
echo
done
| true |
9bac0c21b308fddb8d37baca39432e3d0c1bec27 | Shell | lianfeng30/work-scripts | /script/check_incremental_search.sh | UTF-8 | 345 | 2.71875 | 3 | [] | no_license | #!/bin/bash
/home/admin/bin/check_incremental_search.py >/home/admin/bin/check_incremental_search.py.out 2>&1
ret=$?
if [ "x$ret" == "x0" ]; then
echo OK - AY35 incremental service is OK
else
echo Critical - AY35 incremental service may have some problems
echo return code $ret
fi
cat /home/admin/bin/check_incremental_search.py.out
| true |
c8d19151e57680ec83ad667d719b4d1b7b9325a4 | Shell | ceson-l/m1batterystats | /adapterdetails.sh | UTF-8 | 261 | 2.96875 | 3 | [] | no_license | #!/bin/sh
stats=`ioreg -f -r -c AppleSmartBattery \
|tr ',' '\n' \
|sed -E 's/^ *//g'`
famcode=`echo $stats |sed -E -e 's/.*"FamilyCode"=([0-9]+).*/\1/' |bitwise -ww -od`
if [ $famcode -ne 0 ]; then
echo "Adapter Connected"
else
echo "Adapter Disconnected"
fi | true |
bedb53743149781b15cce0e6c4d918d058cc86a0 | Shell | mattharris/riscos-gccsdk | /gcc4/porting-scripts/rm | UTF-8 | 525 | 3.6875 | 4 | [] | no_license | #!/bin/bash
# Avoid removing files during library package creation
if ! [ -z "$RO_PKG" ] ; then
params=
if echo $PWD | grep -q GCCSDK_ENV ; then
echo "$0: Not removing files during library package creation: $@"
else
for param in $@ ; do
if echo $param | grep -q GCCSDK_ENV ; then
echo "$0: Not removing files during library package creation: $param"
else
params="$params $param"
fi
done
fi
if [ "$params" != "" ] ; then
/bin/rm $params
fi
else
/bin/rm $@
fi
| true |
2884745d10f6a011e4d709836cb27a91d2bd66f1 | Shell | surskitt/scripts | /yt_notify.sh | UTF-8 | 695 | 3.734375 | 4 | [] | no_license | #!/usr/bin/env bash
if [ ! -f /tmp/last_vid ]; then
youtube_sm_parser -l '{id}' 2>/dev/null|head -1 > /tmp/last_vid
exit
fi
last_vid=$(</tmp/last_vid)
if [ -z "${last_vid}" ]; then
youtube_sm_parser -l '{id}' 2>/dev/null|head -1 > /tmp/last_vid
exit
fi
mapfile -t vids < <(youtube_sm_parser -l '{title} {uploader} {id}' 2>/dev/null)
for vid in "${vids[@]}"; do
IFS=' ' read title uploader id <<< "${vid}"
if [ "${last_vid}" = "${id}" ]; then
break
fi
img_url="https://img.youtube.com/vi/${id}/hqdefault.jpg"
DIRNAME="$(dirname $0)"
${DIRNAME}/notify-imgurl.sh "${img_url}" "${title}" "${uploader}"
done
echo "${vids##* }" > /tmp/last_vid
| true |
6ab9abb4d48a408f657ddf28b5f828b5d28a6b37 | Shell | kodypeterson/FBTerminal | /resources/createPage.sh | UTF-8 | 1,415 | 3.671875 | 4 | [] | no_license | #######################################
# #
# USE THIS SCRIPT TO CREATE NEW PAGE #
# #
#######################################
scriptPath=${0%/*}
page=$scriptPath"/../pages/"$1".page"
if [ ! -f $page ];
then
echo "#######################################" > $page
echo "# #" >> $page
echo "# VARIABLES #" >> $page
echo "# #" >> $page
echo "#######################################" >> $page
echo "action=\"\"" >> $page
echo "" >> $page
echo "#######################################" >> $page
echo "# #" >> $page
echo "# FUNCTIONS #" >> $page
echo "# #" >> $page
echo "#######################################" >> $page
echo "function page_action" >> $page
echo "{" >> $page
echo " echo \$1" >> $page
echo "}" >> $page
echo "" >> $page
echo "#######################################" >> $page
echo "# #" >> $page
echo "# PAGE CONTENT #" >> $page
echo "# #" >> $page
echo "#######################################" >> $page
chmod 777 $page
"${EDITOR:-vi}" $page
else
echo "The page $1 already exists!"
fi
| true |
bfb4d41a8870dcda021d7d12c8271d3479f05452 | Shell | david30907d/pyproject_template | /.github/branch_protected.sh | UTF-8 | 358 | 3.171875 | 3 | [] | no_license | #!/bin/bash
protected_branch='master'
protected_branch2='prod'
current_branch=$(git symbolic-ref HEAD | sed -e 's,.*/\(.*\),\1,')
if [ $protected_branch = $current_branch ]
then
echo "Hey dipshit, you can't commit to master."
exit 1
elif [ $protected_branch2 = $current_branch ]
then
echo "Hey dipshit, you can't commit to master."
exit 1
else
exit 0
fi | true |
af1c22b9710eaa19272f2d433bbdb322ae1b2765 | Shell | smartheating/SmartHeating | /aws/bin/smart_heating_ec2_list | UTF-8 | 418 | 2.734375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
if [ "$1" == "-a" ]
then
aws ec2 describe-instances --query "Reservations[].Instances[].{name:Tags[0].Value, id:InstanceId, state:State.Name, ip: PublicIpAddress}" --output table
else
aws ec2 describe-instances --filter "Name=instance-state-code, Values=16" --query "Reservations[].Instances[].{name:Tags[0].Value, id:InstanceId, state:State.Name, ip: PublicIpAddress}" --output table
fi
| true |
f0bd249af929044654d3902f5260f736be6312e3 | Shell | lana555/dynamatic | /resource_sharing/share.sh | UTF-8 | 2,108 | 3.171875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
exampleLocation=/home/dynamatic/Dynamatic/etc/dynamatic/Regression_test/examples
bufferExec=/home/dynamatic/Dynamatic/etc/dynamatic/Buffers/bin/buffers
ressourceMinExec=/home/dynamatic/Dynamatic/etc/dynamatic/resource_sharing/bin/resource_minimization
dot2vhdlExec=/home/dynamatic/Dynamatic/etc/dynamatic/dot2vhdl/bin/dot2vhdl
verifierExec=/home/dynamatic/Dynamatic/etc/dynamatic/Regression_test/hls_verifier/HlsVerifier/build/hlsverifier
compFolder=/home/dynamatic/Dynamatic/etc/dynamatic/components
fileext=""
currentDir=$(pwd)
targetFolder=$currentDir/output/$1
rm -r $targetFolder
mkdir $targetFolder
cp $exampleLocation/$1/reports/$1_optimized.dot $targetFolder/$1.dot
cp $exampleLocation/$1/reports/$1_bbgraph_buf.dot $targetFolder/$1_bbgraph.dot
$bufferExec buffers -filename=$targetFolder/$1 -period=5 -timeout=30
mv $targetFolder/$1_graph_buf.dot $targetFolder/$1_graph.dot
mv $targetFolder/$1_bbgraph_buf.dot $targetFolder/$1_bbgraph.dot
echo "Running resource minimization"
#run resource minimization
(
cd $targetFolder
mkdir _input
mkdir _output
mkdir _tmp
cp $1_graph.dot _input
cp $1_bbgraph.dot _input
cp $1_bbgraph.dot _tmp/out_bbgraph.dot
$ressourceMinExec min $1
cp _output/$1_graph.dot .
dot -Tpng ./_tmp/out.dot > ./_tmp/out.png
)
#run dot2vhdl
(
cd $targetFolder
#need to rename $1_graph to $1 for the verfier afterwards
cp $1_graph.dot $1.dot
$dot2vhdlExec $targetFolder/$1
rm $1.dot
)
cp -r $exampleLocation/$1/sim $targetFolder/sim_sharing
cp $targetFolder/$1.vhd $targetFolder/sim_sharing/VHDL_SRC/$1_shared.vhd
#cp $compFolder/sharing_components.vhd $targetFolder/sim_sharing/VHDL_SRC/sharing_components.vhd
rm $targetFolder/sim_sharing/VHDL_SRC/$1_optimized.vhd
cp -r $targetFolder/_output $targetFolder/out_sharing
cd $targetFolder/sim_sharing/HLS_VERIFY/
rm -r work
$verifierExec cover -aw32 $targetFolder/sim_sharing/C_SRC/$1.cpp $targetFolder/sim_sharing/C_SRC/$1.cpp $1
if [ $? = 0 ]; then
echo "PASS"
else
echo "FAIL"
fi
cd ../VHDL_OUT
ls *.dat
if [ $? = 0 ]; then
echo "PASS"
else
echo "FAIL"
fi
| true |
2165b8cf26b17de3a50198d32d1cb03e406b6226 | Shell | srhyne/new-vagrant-vm | /bootstrap.sh | UTF-8 | 2,238 | 2.6875 | 3 | [] | no_license | apt-get update -y
apt-get install -y nginx
sudo apt-get install php5 -y
sudo debconf-set-selections <<< 'mysql-server-5.5 mysql-server/root_password password password'
sudo debconf-set-selections <<< 'mysql-server-5.5 mysql-server/root_password_again password password'
sudo apt-get -y install mysql-server-5.5
sudo apt-get install mysql-server-5.5 php5-mysql -y
# Install Packages
sudo apt-get install zip unzip -y
sudo apt-get install php5-fpm php5-common php5-curl php5-mcrypt -y
#php5-xml, php5-mbstring
# Set up mongodb
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10
echo "deb http://repo.mongodb.org/apt/ubuntu "$(lsb_release -sc)"/mongodb-org/3.2 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb.list
sudo apt-get update
sudo apt-get install mongodb-org --force-yes -y
#Install Composer
php -r "copy('https://getcomposer.org/installer', 'composer-setup.php');"
php -r "if (hash_file('sha384', 'composer-setup.php') === '93b54496392c062774670ac18b134c3b3a95e5a5e5c8f1a9f115f203b75bf9a129d5daa8ba6a13e2cc8a1da0806388a8') { echo 'Installer verified'; } else { echo 'Installer corrupt'; unlink('composer-setup.php'); } echo PHP_EOL;"
php composer-setup.php
php -r "unlink('composer-setup.php');"
sudo mv composer.phar /usr/local/bin/composer
#run Composer
export COMPOSER_ALLOW_SUPERUSER=1
cd /var/sites/conveyour.dev
composer install
cd /var/sites/conveyour.dev/public/min
composer install
#install .env
cd /var/sites/conveyour.dev
wget https://raw.githubusercontent.com/laravel/laravel/master/.env.example
cp .env.example .env
php artisan key:generate
rm .env.example
# Setup Database
mysql -uroot -ppassword -e "CREATE DATABASE homestead;"
# Rewrite .env
sed -i 's/DB_USERNAME=homestead/DB_USERNAME=root/g' .env
sed -i 's/DB_PASSWORD=secret/DB_PASSWORD=password/g' .env
#Set NGINX Config
cd /etc/nginx/sites-enabled/
wget https://raw.githubusercontent.com/Chr15t1an/new-vagrant-vm/master/nginxconf/conveyour.dev
sudo service nginx reload
# cd /etc/nginx/sites-enabled/
echo -e "\nYour machine has been provisioned"
echo "--------------------------------"
echo "MySQL is available with username 'root' and password 'password' (you have to use 127.0.0.1 as opposed to 'localhost')"
| true |
1ce422a0c3ce30629f031de37cec35bd8f1d0458 | Shell | emcorrales/bash101 | /section8/exer2.bash | UTF-8 | 345 | 2.640625 | 3 | [] | no_license | #!/bin/bash
# Exercise 2:
#
# Modify the previous script so that it uses a logging function. Additionally
# tag each syslog message with "randomly" and include the process ID. Generate 3
# random numbers.
logger -s -i -t randomly -p user.info $RANDOM
logger -s -i -t randomly -p user.info $RANDOM
logger -s -i -t randomly -p user.info $RANDOM
| true |
5e1415d88adc59936d560def75670f4344f71dd0 | Shell | YKMeIz/SFProject | /media-fonts/google-droid/getdroid.sh | UTF-8 | 624 | 3.421875 | 3 | [] | no_license | #!/bin/bash
#Try to get upstream latest files
DATE=$(date -u +%Y%m%d)
ARCHIVE="google-droid-fonts-$DATE"
TMPDIR=$(mktemp -d --tmpdir=/var/tmp getdroid-XXXXXXXXXX)
[ $? != 0 ] && exit 1
umask 022
pushd "$TMPDIR"
git init
git remote add -t HEAD origin https://android.googlesource.com/platform/frameworks/base.git
git config core.sparseCheckout true
cat > .git/info/sparse-checkout << EOF
data/fonts/*
!data/fonts/*ttf
data/fonts/Droid*
EOF
git pull --depth=1 --no-tags origin HEAD
mv data/fonts "$ARCHIVE"
chmod -x "$ARCHIVE/*.ttf"
tar -cvJf "$ARCHIVE.tar.xz" "$ARCHIVE"
popd
mv "$TMPDIR/$ARCHIVE.tar.xz" .
rm -fr "$TMPDIR"
| true |
5776c6f7be4e9a267c3d435cc3758484bcab76df | Shell | martincorredor/holberton-system_engineering-devops | /0x05-processes_and_signals/manage_my_process | UTF-8 | 155 | 3.25 | 3 | [] | no_license | #!/usr/bin/env bash
# Writes "I am alive!" to /tmp/my_process nonstop.
t="True"
while [ $t ]
do
echo "I am alive!" >> /tmp/my_process
sleep 2
done
| true |
1130d698480ad75d1f3d16e16161537cf0452927 | Shell | ChrisLangel/run_scripts | /addfile | UTF-8 | 548 | 2.921875 | 3 | [] | no_license | #!/bin/bash
dirRE=$(find . -name 'RE*' -type d)
for dirR in $dirRE ; do
cd $dirR
read RE REnum<<<$(IFS="RE"; echo $dirR)
dirAlph=$(find . -name 'AoA*' -type d)
for dirA in $dirAlph ; do
cd $dirA
read AoA Anum<<<$(IFS="_"; echo $dirA)
cp ../../Base/naca.1.inp .
sed "s/ALPHA = 0.0/AlPHA = $Anum/g" < naca.1.inp > temp
sed "s/REY = 0.0/REY = $REnum/g" < temp > temp2
mv temp2 naca.1.inp
rm -f temp
cd ..
done
cd ..
done
| true |
bcdb57ec5d7463282d2bc8653c09e7329b9451b1 | Shell | garethgeorge/UCSBFaaS-Wrappers | /tools/get_base_stream.sh | UTF-8 | 688 | 3.59375 | 4 | [
"BSD-3-Clause"
] | permissive | #! /bin/bash
NUM_ARGS=4
display_usage() {
echo "./get_base_stream.sh PREFIX awsprofile region1 DTABLE"
echo "This program creates a file called streamD.base which is a snapshot of the current DTABLE stream"
}
if [ $# -ne ${NUM_ARGS} ]
then
display_usage
exit 1
fi
PREFIX=$1
PROF=$2
REG=$3
DTABLE=$4
GRDIR=${PREFIX}/gammaRay
TOOLSDIR=${PREFIX}/tools
cd ${GRDIR}
source venv/bin/activate
cd ${TOOLSDIR}
TMPFILE=tmp_file.out
aws dynamodb describe-table --region ${REG} --table-name ${DTABLE} --profile ${PROF} > ${TMPFILE}
DARN=`python getEleFromJson.py Table:LatestStreamArn ${TMPFILE}`
rm -f ${TMPFILE}
python get_stream_data.py ${DARN} -p ${PROF} > streamD.base
| true |
7fde7a08123b24759909dd767a86329f36a8bfde | Shell | Sylvain-Frey/ACM_latex | /compile.sh | UTF-8 | 699 | 3.453125 | 3 | [] | no_license | #!/bin/sh
doc_name="paper"
export BIBINPUTS=.
if [ $1 = "c" ]
then
# continuous compilation
echo "# Waiting for changes in source files... (in *.tex and sections/*.tex)"
while true; do
inotifywait -e modify *.tex sections/*.tex
pdflatex --output-directory=build -interaction=nonstopmode $doc_name.tex
mv build/$doc_name.pdf .
done
else
# full build
rm -rf build/*
latex --output-directory=build -interaction=nonstopmode $doc_name.tex
bibtex build/$doc_name
latex --output-directory=build -interaction=nonstopmode $doc_name.tex
pdflatex --output-directory=build -interaction=nonstopmode $doc_name.tex
mv build/$doc_name.pdf .
fi
| true |
0876ac073d4b898a1f9dfcdc408e4f2b0c86163c | Shell | marcinpohl/johnscripts | /crackall-wordlist-rulesstock-restore | UTF-8 | 735 | 2.953125 | 3 | [] | no_license | #!/bin/bash
set -e
set -x
### looks for 'Sessions aborted' log entry, finds their session and runs their .rec
source ./crackall.inc
#echo $DICTIONARIES
#echo $PASSWORDFILES
OLDDIR=$(pwd)
cd $SESSIONFILES
$GREP -l completed *.log > $COMPLETED
$GREP -l aborted *.log > $ABORTED
$COMM -23 $ABORTED $COMPLETED > $NOTDONE
cd $OLDDIR
### TODO: gotta made DICTS and PASSWDS work differently so the restore job can access what it needs
for d in $DICTIONARIES
do
for p in $PASSWORDFILES
do
FILENAME="john-wordlist-rulesstock-$(basename ${p})-$(basename ${d})"
if [ $(grep -xc ${FILENAME}.log $NOTDONE) -gt 0 ]
then
$JTR --restore=$SESSIONFILES/$FILENAME
fi
done
done
set +x
set +e
# vim: noai:ts=4:sw=4
| true |
73347727613c3dd58e6037c20830e54bd65fed3f | Shell | kruton/dotfiles | /home/.bashrc.d/20-android-focus.bash | UTF-8 | 2,891 | 3.6875 | 4 | [] | no_license | #!/usr/bin/env bash
#
# Bash script to focus on certain Android devices. Intended to be included
# into one's .bashrc
#
# Author: Kenny Root <kenny@the-b.org>
# Last change: Sep 9, 2014
#
focus() {
local wasmsg
if [[ -n ${ANDROID_SERIAL} ]]; then
wasmsg=" (was ${ANDROID_SERIAL})"
fi
if [[ -n "$1" ]]; then
ANDROID_SERIAL=$1
echo "Focused on ${ANDROID_SERIAL}${wasmsg}"
else
unset ANDROID_SERIAL
echo "Cleared device focus${wasmsg}"
fi
_focus_reset
export ANDROID_SERIAL
}
_focus_match_device() {
local device cur
device="$1"
cur="$2"
if [[ ${device} == ${cur}* ]]; then
return 0
else
return 1
fi
}
_focus_reset() {
# For tracking whether to display descriptions of devices.
_focus__comment_last=1
_focus__comment_pos=0
}
_focus_reset
_focus() {
local cur device nocasematch_set serial description
local -a devices descriptions
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
# complete only if the user is typing the first word of the command
if (( COMP_CWORD == 1 )); then
# See whether we need to toggle nocasematch
if shopt -q nocasematch; then
nocasematch_set=yes
else
shopt -s nocasematch
fi
while read -r serial description; do
if [[ -n $serial && $serial != List ]] && _focus_match_device "$serial" "$cur"; then
devices+=( "$serial" )
descriptions+=( "$description" )
fi
done < <(adb devices -l)
while read -r serial description; do
if [[ -n $serial ]] && _focus_match_device "$serial" "$cur"; then
devices+=( "$serial" )
descriptions+=( "$description" )
fi
done < <(fastboot devices -l)
if [[ $nocasematch_set != "yes" ]]; then
shopt -u nocasematch
fi
[[ $_focus__comment_pos -gt $COMP_POINT ]] && _focus__comment_pos=0
if [[ $_focus__comment_last == 0 && $_focus__comment_pos == "$COMP_POINT" ]]; then
local bold
local nobold
bold="$(tput bold)"
nobold="$(tput sgr0)"
for i in "${!devices[@]}"; do
echo -ne "\n$bold${devices[$i]}$nobold - ${descriptions[$i]}"
done
_focus__comment_last=1
COMPREPLY=()
else
COMPREPLY=( "${devices[@]}" )
if (( ${#devices[@]} == 1 )); then
_focus_reset
else \
_focus__comment_last=0
_focus__comment_pos=$COMP_POINT
fi
fi
else
_focus_reset
fi
}
complete -F _focus focus
# Local variables:
# mode: shell-script
# sh-basic-offset: 4
# sh-indent-comment: t
# indent-tabs-mode: nil
# End:
# ex: ts=4 sw=4 et filetype=sh
| true |
37270bc94a1d271a4884c0b8ee69addc93e09669 | Shell | biolab/orange3-recommendation | /installation_template.sh | UTF-8 | 385 | 2.53125 | 3 | [
"BSD-2-Clause"
] | permissive | #!/usr/bin/env bash
# Create a separate Python environment for Orange and its dependencies,
# and make it the active one
virtualenv --python=python3 --system-site-packages orange3venv-recsys
source orange3venv-recsys/bin/activate
# Install the minimum required dependencies first
pip install -r requirements.txt
# Finally install Orange in editable/development mode.
pip install -e . | true |
cec60fcd1b7ad7b20520dacf91485741e8a76406 | Shell | dickensc/FairRecommenderCSE290T | /scripts/run_psl_noisy_attribute_fairness_threshold_experiments.sh | UTF-8 | 13,663 | 3.03125 | 3 | [] | no_license | #!/usr/bin/env bash
# run weight learning performance experiments,
#i.e. collects runtime and evaluation statistics of various weight learning methods
readonly THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
readonly BASE_DIR="${THIS_DIR}/.."
readonly BASE_OUT_DIR="${BASE_DIR}/results/fairness"
readonly STUDY_NAME='noisy_attribute_fairness_threshold_study'
readonly SUPPORTED_DATASETS='movielens'
readonly SUPPORTED_FAIRNESS_MODELS='base non_parity_attribute_denoised non_parity mutual_information'
readonly NOISE_MODELS='label_gaussian_noise label_poisson_noise gender_flipping clean'
declare -A NOISE_LEVELS
NOISE_LEVELS['clean']='0.0'
NOISE_LEVELS['gaussian_noise']='0.05 0.1 0.15 0.2 0.25 0.3 0.35 0.4'
NOISE_LEVELS['poisson_noise']='0.05 0.1 0.15 0.2 0.25 0.3 0.35 0.4'
NOISE_LEVELS['gender_flipping']='0.05 0.1 0.15 0.2 0.25 0.3 0.35 0.4'
#readonly FAIRNESS_MODELS='non_parity_attribute_denoised base non_parity mutual_information mutual_information_attribute_denoised'
readonly FAIRNESS_MODELS='non_parity_attribute_denoised'
declare -A FAIRNESS_THRESHOLDS
FAIRNESS_THRESHOLDS['non_parity']='0.002 0.004 0.006 0.008 0.010'
FAIRNESS_THRESHOLDS['non_parity_attribute_denoised']='0.002 0.004 0.006 0.008 0.010'
FAIRNESS_THRESHOLDS['mutual_information']='0.0005 0.001 0.0015 0.002 0.0025 0.003 0.0035'
FAIRNESS_THRESHOLDS['base']='0.0'
readonly DENOISED_MODELS='non_parity_attribute_denoised mutual_information_attribute_denoised'
declare -A DENOISER_MODEL
DENOISER_MODEL['non_parity_attribute_denoised']='attribute_noise'
readonly WL_METHODS='UNIFORM'
readonly SEED=22
readonly TRACE_LEVEL='TRACE'
# Evaluators to be use for each example
declare -A DATASET_EVALUATORS
DATASET_EVALUATORS[movielens]='Continuous'
readonly RELAX_MULTIPLIER='1.0'
readonly STANDARD_OPTIONS='-D reasoner.tolerance=1.0e-15f -D sgd.learningrate=10.0 -D inference.relax.squared=false -D inference.relax.multiplier=10000.0 -D weightlearning.inference=SGDInference -D sgd.extension=ADAM -D sgd.inversescaleexp=1.5'
readonly DENOISING_OPTIONS='-D reasoner.tolerance=1.0e-15f -D sgd.learningrate=0.1 -D inference.relax.squared=false -D inference.relax.multiplier=10000.0 -D weightlearning.inference=SGDInference -D sgd.extension=ADAM -D sgd.inversescaleexp=1.5'
# Number of folds to be used for each example
declare -A DATASET_FOLDS
DATASET_FOLDS[movielens]=1
function run_example() {
local example_name=$1
local wl_method=$2
local noise_model=$3
local noise_level=$4
local fairness_model=$5
local fold=$6
local fair_threshold=$7
local evaluator=$8
echo "Running example ${example_name} : ${noise_model} : ${noise_level} : ${fairness_model} : ${fold} : ${wl_method} : tau=${fair_threshold}"
local cli_directory="${BASE_DIR}/psl-datasets/${example_name}/cli"
out_directory="${BASE_OUT_DIR}/psl/${STUDY_NAME}/${example_name}/${wl_method}/${evaluator}/${noise_model}/${noise_level}/${fairness_model}/${fair_threshold}"/${fold}
# Only make a new out directory if it does not already exist
[[ -d "$out_directory" ]] || mkdir -p "$out_directory"
# Setup experiment cli and data directory
setup_fairness_experiment "${example_name}" "${fairness_model}" "${cli_directory}"
# Run denoising model
run_denoising_model "${example_name}" "${evaluator}" "${fairness_model}" "${noise_model}" "${noise_level}" "${fold}" "${out_directory}"
# Write the fairness thresehold
write_fairness_threshold "$fair_threshold" "$fairness_model" "$example_name" "$wl_method" "$cli_directory"
# Write the noise threshold
write_noise_threshold "$example_name" "$fold" "$wl_method" "$cli_directory" "$noise_model" "$noise_level"
##### WEIGHT LEARNING #####
run_weight_learning "${example_name}" "${evaluator}" "${wl_method}" "${fairness_model}" "${fair_threshold}" "${fold}" "${cli_directory}" "${out_directory}" ${STANDARD_OPTIONS}
##### EVALUATION #####
run_evaluation "${example_name}" "${evaluator}" "${fairness_model}" "${fair_threshold}" "${fold}" "${out_directory}" ${STANDARD_OPTIONS}
return 0
}
function run_denoising_model() {
local example_name=$1
local evaluator=$2
local fairness_model=$3
local noise_model=$4
local noise_level=$5
local fold=$6
local out_directory=$7
if [[ "${DENOISED_MODELS}" == *"${fairness_model}"* ]]; then
local out_path="${out_directory}/eval_denoising_out.txt"
local err_path="${out_directory}/eval_denoising_out.err"
local cli_directory="${BASE_DIR}/psl-datasets/${example_name}/cli"
if [[ -e "${out_path}" ]]; then
echo "Output file already exists, skipping: ${out_path}"
else
echo "Running denoising model for ${example_name} ${fairness_model} ${evaluator} ${noise_model} ${noise_level} (#${fold})."
# Use denoising model rather than fair model
setup_fairness_experiment "${example_name}" "${DENOISER_MODEL[${fairness_model}]}" "${cli_directory}"
# Skip weight learning
local fairness_model_directory="${BASE_DIR}/psl-datasets/${example_name}/${example_name}_${DENOISER_MODEL[${fairness_model}]}"
cp "${fairness_model_directory}/${example_name}.psl" "${cli_directory}/${example_name}-learned.psl"
# Set data location.
write_noise_threshold "$example_name" "$fold" "$wl_method" "$cli_directory" "$noise_model" "$noise_level"
# Call inference script for SRL model type
pushd . > /dev/null
cd "psl_scripts" || exit
./run_inference.sh "${example_name}" "${evaluator}" "${DENOISER_MODEL[${fairness_model}]}" "${fold}" "${out_directory}" ${DENOISING_OPTIONS}> "$out_path" 2> "$err_path"
popd > /dev/null
# Use fair model rather than denoising model
setup_fairness_experiment "${example_name}" "${fairness_model}" "${cli_directory}"
fi
if [[ ${fairness_model} == 'non_parity_attribute_denoised' ]]; then
# Round group_1 group_2 predictions.
python3 ./round_group_predictions.py ${out_directory}
# Set the denoised data
pushd . > /dev/null
cd "${cli_directory}" || exit
sed -i -E "s|group_1_obs: ../data/${example_name}/[0-9]+/eval/group_1_obs.txt|group_1_obs: ${out_directory}/inferred-predicates/ROUNDED_GROUP_1_TARGETS.txt|g" "${example_name}"-eval.data
sed -i -E "s|group_2_obs: ../data/${example_name}/[0-9]+/eval/group_2_obs.txt|group_2_obs: ${out_directory}/inferred-predicates/ROUNDED_GROUP_2_TARGETS.txt|g" "${example_name}"-eval.data
cat "${example_name}"-eval.data
popd > /dev/null
fi
fi
}
function setup_fairness_experiment() {
local example_name=$1
local fairness_model=$2
local cli_directory=$3
local fairness_model_directory="${BASE_DIR}/psl-datasets/${example_name}/${example_name}_${fairness_model}"
# copy the .data and .psl files into the cli directory
cp "${fairness_model_directory}/${example_name}.psl" "${cli_directory}/${example_name}.psl"
cp "${fairness_model_directory}/${example_name}-eval.data" "${cli_directory}/${example_name}-eval.data"
cp "${fairness_model_directory}/${example_name}-learn.data" "${cli_directory}/${example_name}-learn.data"
}
function run_evaluation() {
local example_name=$1
local evaluator=$2
local fairness_model=$3
local fair_threshold=$4
local fold=$5
local out_directory=$6
shift 6
local options=$@
# path to output files
local out_path="${out_directory}/eval_out.txt"
local err_path="${out_directory}/eval_out.err"
if [[ -e "${out_path}" ]]; then
echo "Output file already exists, skipping: ${out_path}"
else
echo "Running ${wl_method} Evaluation for ${example_name} ${evaluator} ${fairness_model} ${fair_threshold} (#${fold})."
# call inference script for SRL model type
pushd . > /dev/null
cd "psl_scripts" || exit
./run_inference.sh "${example_name}" "${evaluator}" "${fairness_model}" "${fold}" "${out_directory}" $options> "$out_path" 2> "$err_path"
popd > /dev/null
fi
}
function run_weight_learning() {
local example_name=$1
local evaluator=$2
local wl_method=$3
local fairness_model=$4
local fair_threshold=$5
local fold=$6
local cli_directory=$7
local out_directory=$8
# path to output files
local out_path="${out_directory}/learn_out.txt"
local err_path="${out_directory}/learn_out.err"
if [[ -e "${out_path}" ]]; then
echo "Output file already exists, skipping: ${out_path}"
echo "Copying cached learned model from earlier run into cli"
# copy the learned weights into the cli directory for inference
cp "${out_directory}/${example_name}-learned.psl" "${cli_directory}/"
else
echo "Running ${wl_method} Weight Learning for ${example_name} ${evaluator} ${fairness_model} ${fair_threshold} (#${fold})."
# call weight learning script for SRL model type
pushd . > /dev/null
cd "psl_scripts" || exit
./run_wl.sh "${example_name}" "${evaluator}" "${wl_method}" "${fairness_model}" "${fold}" "${SEED}" "${out_directory}" "${TRACE_LEVEL}" > "$out_path" 2> "$err_path"
popd > /dev/null
fi
}
function write_fairness_threshold() {
local fairness_threshold=$1
local fairness_model=$2
local example_name=$3
local wl_method=$4
local cli_directory=$5
# write fairness threshold for constrarint in psl file
pushd . > /dev/null
cd "${cli_directory}" || exit
local rule
if [[ ${fairness_model} != 'base' ]]; then
if [[ ${wl_method} == 'UNIFORM' ]]; then
# set the fairness related constraint thresholds in the learned file to the fairness_threshold value and write to learned.psl file
if [[ ${fairness_model} == 'non_parity' ]]; then
rule="group1_avg_rating\(c\) - group2_avg_rating\(c\)"
elif [[ ${fairness_model} == 'value' ]]; then
rule="pred_group_average_item_rating\(G1, I\) - obs_group_average_item_rating\(G1, I\) = pred_group_average_item_rating\(G2, I\) - obs_group_average_item_rating\(G2, I\)"
elif [[ ${fairness_model} == 'mutual_information' ]]; then
rule="@MI\[rating\(\+U1, I\), group_member\(\+U2, \+G\)\]"
fi
sed -i -r "s/^${rule} <= TAU .|${rule} <= [0-9]+.[0-9]+ ./${rule} <= ${fairness_threshold} ./g" "${example_name}.psl"
sed -i -r "s/^${rule} >= -TAU .|${rule} >= -[0-9]+.[0-9]+ ./${rule} >= -${fairness_threshold} ./g" "${example_name}.psl"
else
if [[ ${fairness_model} == 'non_parity' ]]; then
rule="1.0 \* GROUP1_AVG_RATING\(c\) \+ -1.0 \* GROUP2_AVG_RATING\(c\) = 0.0"
elif [[ ${fairness_model} == 'value' ]]; then
rule="1.0 \* PRED_GROUP_AVERAGE_ITEM_RATING\(G1, I\) \+ -1.0 \* OBS_GROUP_AVERAGE_ITEM_RATING\(G1, I\) \+ -1.0 \* PRED_GROUP_AVERAGE_ITEM_RATING\(G2, I\) \+ 1.0 \* OBS_GROUP_AVERAGE_ITEM_RATING\(G2, I\) = 0.0"
elif [[ ${fairness_model} == 'mutual_information' ]]; then
rule="@MI\[rating\(\+U1, I\), group_member\(\+U2, \+G\)\]"
fi
sed -i -r "s/^${rule} <= TAU .|${rule} <= [0-9]+.[0-9]+ ./${rule} <= ${fairness_threshold} ./g" "${example_name}.psl"
sed -i -r "s/^${rule} >= -TAU .|${rule} >= -[0-9]+.[0-9]+ ./${rule} >= -${fairness_threshold} ./g" "${example_name}.psl"
fi
fi
popd > /dev/null
}
function write_noise_threshold() {
local example_name=$1
local fold=$2
local wl_method=$3
local cli_directory=$4
local noise_model=$5
local noise_level=$6
# write noise threshold for data file
pushd . > /dev/null
cd "${cli_directory}" || exit
if [[ ${noise_model} != 'clean' ]]; then
target="${BASE_DIR}/psl-datasets/${example_name}/data/${example_name}/${fold}/eval/${noise_model}/${noise_level}"
for filename in $(ls "$target"); do
basename=$(basename $filename)
name=$(echo "$basename" | rev | cut -d "_" -f2- | rev)
a="${name}: ..\/data\/movielens\/${fold}\/eval\/${basename}"
b="${name}: ..\/data\/movielens\/${fold}\/eval\/${noise_model}\/${noise_level}\/${basename}"
sed -i -r "s/${a}/${b}/g" "movielens-eval.data"
done
fi
popd > /dev/null
}
function main() {
trap exit SIGINT
if [[ $# -eq 0 ]]; then
echo "USAGE: $0 <example dir> ..."
exit 1
fi
for example_name in "$@"; do
for wl_method in ${WL_METHODS}; do
for noise_model in ${NOISE_MODELS}; do
for noise_level in ${NOISE_LEVELS[${noise_model}]}; do
for fairness_model in ${FAIRNESS_MODELS}; do
for fair_threshold in ${FAIRNESS_THRESHOLDS[${fairness_model}]}; do
for evaluator in ${DATASET_EVALUATORS[${example_name}]}; do
for ((fold=0; fold<${DATASET_FOLDS[${example_name}]}; fold++)) do
if [[ "${SUPPORTED_DATASETS}" == *"${example_name}"* ]]; then
if [[ "${SUPPORTED_FAIRNESS_MODELS}" == *"${fairness_model}"* ]]; then
run_example "${example_name}" "${wl_method}" "${noise_model}" "${noise_level}" "${fairness_model}" "${fold}" "${fair_threshold}" "${evaluator}"
fi
fi
done
done
done
done
done
done
done
done
return 0
}
main "$@"
| true |
4349a833adb5b4db5fbcb72f89bd8226f81273fe | Shell | fenhl/syncbin | /bin/brew-wrapper | UTF-8 | 778 | 3.40625 | 3 | [
"MIT"
] | permissive | #!/bin/zsh
if [[ -d "${XDG_DATA_HOME:-${HOME}/.local/share}/syncbin" ]] && [[ -w "${XDG_DATA_HOME:-${HOME}/.local/share}/syncbin" ]]; then
BREWUPDATEOUTPUT=$(brew update)
if [[ "$BREWUPDATEOUTPUT" != "Already up-to-date." ]]; then
echo "$BREWUPDATEOUTPUT" >! ${XDG_DATA_HOME:-${HOME}/.local/share}/syncbin/brew.log
brew upgrade &>>! ${XDG_DATA_HOME:-${HOME}/.local/share}/syncbin/brew.log 2>&2
brew cleanup &>>! ${XDG_DATA_HOME:-${HOME}/.local/share}/syncbin/brew.log 2>&2
fi
elif [[ x"$1" == x'--startup' ]]; then
print-warning 'cannot log brew output' '===.' 'running startup script: brew'
brew update &> /dev/null
brew upgrade > /dev/null
brew cleanup > /dev/null
else
brew update
brew upgrade
brew cleanup
fi
| true |
c9e4009007622e9146090d017dc94490c02ee2e1 | Shell | twistedmove/jsalt2019-diadet | /src/kaldi_augmentation/aug_step1_prepare_noise_rir_data.sh | UTF-8 | 5,791 | 3.65625 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Copyright
# 2019 Johns Hopkins University (Author: Phani Sankar Nidadavolu)
# Apache 2.0.
#
set -e
stage=1
sampling_rate=16000
download_rirs=false
seed=777
# SRC locations of musan and simulated rirs
musan_src_location=/export/corpora/JHU/musan
rirs_src_location=/export/fs01/jsalt19/databases/RIRS_NOISES
DEMAND_src_location=/export/corpora/DEMAND
chime3background_src_location=/export/corpora4/CHiME4/CHiME3/data/audio/16kHz/backgrounds
rt60_map_file=./kaldi_augmentation/simrir2rt60.info
echo "$0 $@" # Print the command line for logging.
if [ -f path.sh ]; then . ./path.sh; fi
. parse_options.sh || exit 1;
if [ $# -ne 1 ]; then
cat >&2 <<EOF
echo USAGE: $0 [--optional-args] rir_info_dir
echo USAGE: $0 --sampling-rate 16000 data/rir_info
optional-args:
--sampling-rate <16000> # Specify the source sampling rate, default:16000
--download_rirs <true/false> # Specify whether to download rirs and make your own copy or soflink from a central location
--seed <int> # Default 777
--stage <int>
EOF
exit 1;
fi
# Directory to save rt60 information
#rirs_info_path=data/rirs_info
rirs_info_path=$1
if [ $stage -le 1 ]; then
# First make the MUSAN corpus
# We will make 90-10 splits of speech, noise and music directories
# The 90 split will be used for augmenting the train directories
# The 10 split will be used for augmenting the eval directories
kaldi_augmentation/make_musan.sh --sampling-rate $sampling_rate \
$musan_src_location data
for name in speech noise music; do
utils/subset_data_dir_tr_cv.sh --seed $seed data/musan_${name} \
data/musan_${name}_train data/musan_${name}_eval
done
for name in speech noise music; do
for mode in train eval; do
utils/data/get_utt2dur.sh data/musan_${name}_${mode}
mv data/musan_${name}_${mode}/utt2dur data/musan_${name}_${mode}/reco2dur
done
done
echo "Finished setting up MUSAN corpus"
fi
if [ $stage -le 2 ]; then
# Make the demand and chime2 background noise dirs
kaldi_augmentation/make_DEMAND_and_chime3background.py $DEMAND_src_location $chime3background_src_location data || exit 1;
#merge demand and chime3-train
rm -rf data/tmp
mv data/chime3background_train data/tmp
utils/combine_data.sh data/chime3background_train data/tmp data/demand_train
rm -rf data/tmp
for name in chime3background_train chime3background_eval; do
utils/fix_data_dir.sh data/$name
utils/data/get_utt2dur.sh data/$name
mv data/${name}/utt2dur data/${name}/reco2dur
utils/fix_data_dir.sh data/$name
done
fi
# Reverberant speech simulation
if [ $stage -le 3 ]; then
if [ "$download_rirs" == true ]; then
# Download the package that includes the real RIRs, simulated RIRs, isotropic noises and point-source noises
wget --no-check-certificate http://www.openslr.org/resources/28/rirs_noises.zip
unzip rirs_noises.zip
else
# downloading everytime is a time taking and disk consuming process
# It is better to softlink from a location that we will not delete untill the end of workshop
if [ ! -d "RIRS_NOISES" ]; then
ln -s $rirs_src_location RIRS_NOISES
fi
fi
# The below script will compute rt60s based on sabine's formula for each of the roomtypes
# We have three different room type's: smallroom. mediumroom and largeroom
# It makes use of the room_info file to get the rooms dimensions and absorption coeff
# It creates ${roomsize}_rt60s.txt in output dir RIRS_NOISES/simulated_rirs
[ ! -d $rirs_info_path ] && mkdir -p $rirs_info_path
cat RIRS_NOISES/simulated_rirs/{smallroom,mediumroom,largeroom}/rir_list > $rirs_info_path/rir_list.all
# Split the rooms into two lists (train and test). There are total of 600 rooms split them into 90-10 list (540-60)
awk '{print $2}' $rt60_map_file | sort -u > $rirs_info_path/room-ids.all
utils/shuffle_list.pl --srand $seed $rirs_info_path/room-ids.all | \
head -540 | sort -k1,1 > $rirs_info_path/room-ids.train
utils/filter_scp.pl --exclude $rirs_info_path/room-ids.train \
$rirs_info_path/room-ids.all \
| sort -k1,1 > $rirs_info_path/room-ids.eval
# Get the list of rirs for train and eval
utils/filter_scp.pl -f 2 $rirs_info_path/room-ids.train $rt60_map_file \
| awk '{print $1}' | sort -k1,1 \
| utils/filter_scp.pl -f 2 - $rirs_info_path/rir_list.all \
| sort -k1,1> $rirs_info_path/rir_list.train
utils/filter_scp.pl -f 2 $rirs_info_path/room-ids.eval $rt60_map_file \
| awk '{print $1}' | sort -k1,1 \
| utils/filter_scp.pl -f 2 - $rirs_info_path/rir_list.all \
| sort -k1,1> $rirs_info_path/rir_list.eval
# First filter out the rirs based on the value of rt60s
for mode in train eval; do
# list for 0.0 < rt60 < 0.5
awk '$3 < 0.5 {print $1}' $rt60_map_file | \
utils/filter_scp.pl -f 2 - $rirs_info_path/rir_list.$mode > $rirs_info_path/rir_list_${mode}_rt60_min_0.0_max_0.5
# list for 0.5 < rt60 < 1.0
awk '$3 >= 0.5 && $3 < 1.0 {print $1}' $rt60_map_file | \
utils/filter_scp.pl -f 2 - $rirs_info_path/rir_list.$mode > $rirs_info_path/rir_list_${mode}_rt60_min_0.5_max_1.0
# list for 1.0 < rt60 < 1.5
awk '$3 >= 1.0 && $3 < 1.5 {print $1}' $rt60_map_file | \
utils/filter_scp.pl -f 2 - $rirs_info_path/rir_list.$mode > $rirs_info_path/rir_list_${mode}_rt60_min_1.0_max_1.5
# list for 1.5 < rt60 < inf
awk '$3 > 1.5 {print $1}' $rt60_map_file | \
utils/filter_scp.pl -f 2 - $rirs_info_path/rir_list.$mode > $rirs_info_path/rir_list_${mode}_rt60_min_1.5_max_4.0
done
cat $rt60_map_file > $rirs_info_path/simrirs2rt60.map
echo "Finished setting up the RIRs directory, lists are saved in $rirs_info_path"
fi
| true |
282d42f6486932460f28044f1b9eef5e1e92134c | Shell | bellrd/dotfiles | /.scripts/scripts/i3blocks/test.sh | UTF-8 | 93 | 2.953125 | 3 | [] | no_license | #!/bin/bash
echo $0
if [[ $1 ]];then
echo "Argument passed";
else
echo "No argument";
fi
| true |
df0731980290cc7fb80e2f3f4474fd403679bbcd | Shell | brivio/gfw | /scripts/install-service.sh | UTF-8 | 1,403 | 3.9375 | 4 | [] | no_license | #!/bin/bash
# 使用办法:
# bash <(curl -Ls https://gitee.com/brivio/gfw/raw/master/scripts/install-service.sh) ./service.config.sh
. $1
service_script=/opt/$service_name.sh
service_file=/usr/lib/systemd/system/$service_name.service
if [[ $2 = 'uninstall' ]];then
systemctl stop $service_name
systemctl disable $service_name
if [[ -f $service_script ]];then
rm -f $service_script
fi
if [[ -f $service_file ]];then
rm -f $service_file
fi
echo "卸载成功"
exit
fi
cat >$service_script <<eof
#!/bin/bash
_start()
{
if [[ \$(ps aux|grep "$cmd"|grep -v grep|wc -l) != "1" ]];then
$cmd
fi
}
_stop(){
for pid in \$(ps axo pid,cmd |grep "$cmd"|grep -v grep|awk '{printf "%s\n",\$1}')
do
kill \$pid
done
}
if [[ \$1 = 'start' ]];then
_start
elif [[ \$1 = 'stop' ]];then
_stop
elif [[ \$1 = 'restart' ]];then
_start
_stop
fi
eof
chmod -R 777 $service_script
cat >$service_file <<eof
[Unit]
Description=$service_name daemon
After=network.target
[Service]
Type=simple
Restart=always
RestartSec=5
User=root
WorkingDirectory=/root
ExecStart=$service_script start
ExecReload=$service_script restart
ExecStop=$service_script stop
[Install]
WantedBy=multi-user.target
eof
systemctl enable $service_name
systemctl restart $service_name
systemctl status $service_name
systemctl daemon-reload | true |
414000282e794ec4a5cdbb25ca0df02f1bc015a4 | Shell | rrerolle/ceph-nas | /join-osd.sh | UTF-8 | 1,371 | 3.59375 | 4 | [] | no_license | #!/bin/sh
set -e
ip_mon=$1
mon_admin_key=$2
weight_ratio=1
log_and_die()
{
printf "$@\n" >&2
exit 1
}
[ -n "$ip_mon" ] || log_and_die "Missing IP mon parameter"
[ -n "$mon_admin_key" ] || log_and_die "Missing admin/mon key parameter"
cat << EOF >/etc/ceph/ceph.conf
[global]
mon host = $ip_mon
EOF
cat << EOF > /etc/ceph/ceph.client.admin.keyring
[client.admin]
key = $mon_admin_key
auid = 0
caps mds = "allow"
caps mon = "allow *"
caps osd = "allow *"
EOF
fsid=$(ceph fsid)
cat << EOF >> /etc/ceph/ceph.conf
fsid = $fsid
EOF
osd_num=$(ceph osd create)
[ -n "$osd_num" ] || log_and_die "Failed to retrieve OSD number"
cluster_name=ceph
mkdir -p /var/lib/ceph/osd/$cluster_name-$osd_num
size=$(stat -f -c %b /var/lib/ceph/osd/$cluster_name-$osd_num | \
awk '{printf "%.2f", $1*4096/1024^4}')
weight=$(echo $size $weight_ratio | awk '{printf "%.2f", $1*$2}')
printf "size=%.2fTB weight=%.2f\n" $size $weight
ceph-osd -i $osd_num --mkfs --mkkey
ceph auth add osd.$osd_num osd 'allow *' mon 'allow profile osd' -i /var/lib/ceph/osd/$cluster_name-$osd_num/keyring
ceph osd crush add-bucket $(hostname) host
ceph osd crush move $(hostname) root=default
ceph osd crush add osd.$osd_num $weight host=$(hostname)
touch /var/lib/ceph/osd/$cluster_name-$osd_num/done
touch /var/lib/ceph/osd/$cluster_name-$osd_num/sysvinit
/etc/init.d/ceph start osd.$osd_num
| true |
5c7d55a513abb4329613a2e4faf10b99f793dec9 | Shell | liamg/dotfiles | /wallpaper/install.sh | UTF-8 | 417 | 3.046875 | 3 | [] | no_license | #!/usr/bin/env bash
set -e
source core.sh
[[ -f wallpaper/wallpaper.jpg ]] || [[ -f wallpaper/wallpaper.png ]] || cp wallpaper/default.jpg wallpaper/wallpaper.jpg
log_info wallpaper "Setting..."
if [[ -f wallpaper/wallpaper.png ]]; then
ln -sf `pwd`/wallpaper/wallpaper.png $HOME/active-wallpaper
elif [[ -f wallpaper/wallpaper.jpg ]]; then
ln -sf `pwd`/wallpaper/wallpaper.jpg $HOME/active-wallpaper
fi
| true |
f63ecec66509b0196a853af4d15deaaa4e5a64b4 | Shell | Logicalshift/TameParse | /TameParse/Language/build_defn.sh | UTF-8 | 316 | 2.625 | 3 | [
"MIT"
] | permissive | #!/bin/sh
# build_defn.sh
# TameParse
#
# Created by Andrew Hunter on 27/06/2011.
# Copyright 2011-2012 Andrew Hunter. All rights reserved.
echo Building definition_tp.h
echo Writing to ${BUILT_PRODUCTS_DIR}/definition_tp.h
cd "${SRCROOT}/TameParse/Language"
xxd -i "definition.tp" >"${BUILT_PRODUCTS_DIR}/definition_tp.h"
| true |
a60ebb8918e7d8f451b1f295a63f3adb11c1f66b | Shell | blaues0cke/RaumfeldAlarm | /install.sh | UTF-8 | 476 | 2.515625 | 3 | [] | no_license | #!/bin/bash
#
# This file is part of RaumfeldAlarm.
# Learn more at: https://github.com/blaues0cke/RaumfeldAlarm
#
# Author: Thomas Kekeisen <pisleeptalk@tk.ca.kekeisen.it>
# License: This work is licensed under the Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License.
# To view a copy of this license, visit http://creativecommons.org/licenses/by-nc-sa/4.0/.
#
echo "Installing crontabs"
cp /usr/raumfeld/crontab.sh /etc/cron.d/raumfeld-alarm-crontab
echo "Done!" | true |
aa7ade78ff21795562cef0b8a1d0768a16191178 | Shell | boris/legacy | /dotfiles/bash/profile | UTF-8 | 575 | 2.703125 | 3 | [] | no_license | # if running bash
if [ -n "$BASH_VERSION" ]; then
# include .bashrc if it exists
if [ -f "$HOME/.bashrc" ]; then
. "$HOME/.bashrc"
fi
fi
# set PATH so it includes user's private bin if it exists
if [ -d "$HOME/bin" ] ; then
PATH="$HOME/bin:$PATH"
fi
export PS1="\e[34m\u@\h\e[0m:\e[93m\w\e[0m
\e[31m[\t]\e[0m ➜ "
| true |
ce21010c9b1dc99d304cf69cc77fb3def15bdab4 | Shell | PeterCahn/docker-zeppelin-sdp | /manage-notebooks.sh | UTF-8 | 1,711 | 4.09375 | 4 | [] | no_license | #!/bin/bash
if [ -z "$1" ] ; then
echo "Usage: manage-notebook.sh [ import | backup [dest-dir] | restore [source-dir] ]"
exit 1
fi
case "$1" in
"import" )
sourceNotebookDir="/var/lib/zeppelin/notebook-backup" # Kubernetes volume mount (only .json notebook files)
destNotebookDir="/var/lib/zeppelin/notebook"
if [ ! -d $destNotebookDir ] ; then
mkdir -p $destNotebookDir
fi
# Create directory structure for Zeppelin notebook import
ITEMS=$sourceNotebookDir/*
for item in $ITEMS
do
echo "item: $item"
if [ -n "$item" -a -d $item ] ; then
echo "find directory $item to import to $destNotebookDir"
fi
if [ -n "$item" -a -f $item ] ; then
id=$(jq '.id' $item | tr -d \")
echo "find file json $item to import to $destNotebookDir"
mkdir $sourceNotebookDir/$id
mv $item $sourceNotebookDir/$id/note.json
fi
done
cp -rf -d $sourceNotebookDir/* $destNotebookDir
echo "Import completed"
;;
"backup" )
echo "Backup Zeppelin notebooks"
if [ -n "$2" ] ; then
destRootDir="$2"
else
destRootDir="/var/lib/zeppelin/notebook-backup"
fi
sourceRootDir="/var/lib/zeppelin/notebook"
if [ ! -d $destRootDir ] ; then
mkdir -p $destRootDir
fi
cp -r -d $sourceRootDir/* $destRootDir
echo "Backup completed"
;;
"restore" )
echo "Restore previous Zeppelin notebooks"
if [ -n "$2" ] ; then
sourceRootDir="$2"
else
sourceRootDir="/var/lib/zeppelin/notebook-backup"
fi
destRootDir="/var/lib/zeppelin/notebook"
if [ ! -d $sourceRootDir ] ; then
echo "$sourceRootDir does not exist"
echo "Notebooks not restored"
else
cp -r $sourceRootDir/* $destRootDir
fi
echo "Restore completed"
;;
esac
| true |
7693f37ff3657e66c36e5a6cdd4f78cba315598a | Shell | PaddlePaddle/Paddle-Lite | /tools/ci_tools/ci_benchmark.sh | UTF-8 | 13,078 | 3.515625 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
shopt -s expand_aliases
set -ex
NUM_CORES_FOR_COMPILE=${LITE_BUILD_THREADS:-8}
readonly EXE="benchmark_bin"
# Global variables
# Absolute path of Paddle-Lite source code.
SHELL_FOLDER=$(
cd "$(dirname "$0")"
pwd
)
WORKSPACE=${SHELL_FOLDER%tools/ci_tools*}
# ModelS URL
MODELS_URL="https://paddle-inference-dist.bj.bcebos.com/AI-Rank/mobile/MobileNetV1.tar.gz
https://paddle-inference-dist.bj.bcebos.com/AI-Rank/mobile/MobileNetV2.tar.gz
https://paddle-inference-dist.bj.bcebos.com/AI-Rank/mobile/MobileNetV3_large_x1_0.tar.gz
https://paddle-inference-dist.bj.bcebos.com/AI-Rank/mobile/MobileNetV3_small_x1_0.tar.gz
https://paddle-inference-dist.bj.bcebos.com/AI-Rank/mobile/ResNet50.tar.gz
https://paddle-inference-dist.bj.bcebos.com/AI-Rank/mobile/ssdlite_mobilenet_v3_large.tar.gz
"
# Download models everytime or not
FORCE_DOWNLOAD_MODELS="ON"
# Model zoo path on host
HOST_MODEL_ZOO_DIR=${WORKSPACE}/Models
# Config json file stored models, devices and performace data
CONFIG_PATH=${WORKSPACE}/tools/ci_tools/ci_benchmark_config.json
# The list of os for building(android,armlinux,linux,macos), such as "android"
OS_LIST="android"
# The list of arch abi for building(armv8,armv7,armv7hf), such as "armv8,armv7"
# for android devices, "armv8" for RK3399, "armv7hf" for Raspberry pi 3B
ARCH_LIST="armv8"
# The list of toolchains for building(gcc,clang), such as "clang"
# for android, "gcc" for armlinx
TOOLCHAIN_LIST="clang"
# Remote device type:
# 0: adb for android devices; 1: ssh for armlinux devices; 2: for local device)
REMOTE_DEVICE_TYPE=0
# The list of the device names for the real android devices, use commas to separate them, such as "bcd71650,8MY0220C22019318,A49BEMHY79"
# The list of the device infos for the real armlinux devices, its format is "dev0_ip_addr,dev0_port,dev0_usr_id,dev0_usr_pwd:dev1_ip_addr,dev0_port,dev1_usr_id,dev1_usr_pwd"
REMOTE_DEVICE_LIST="2GX0119401000796,0123456789ABCDEF"
# Work directory of the remote devices for running
REMOTE_DEVICE_WORK_DIR="/data/local/tmp/benchmark_ci_test/"
WARMUP=20
REPEATS=600
# Helper functions
source ${SHELL_FOLDER}/utils.sh
# if operating in mac env, we should expand the maximum file num
os_name=$(uname -s)
if [ ${os_name} == "Darwin" ]; then
ulimit -n 1024
fi
# Build target function
function build_target() {
local os=$1
local arch=$2
local toolchain=$3
# Remove Compiling Cache
rm -rf build.*
# Compiling
if [[ "$os" == "armlinux" ]]; then
os="linux"
fi
echo "$PWD"
cmd_line="./lite/tools/build_${os}.sh --arch=$arch --toolchain=$toolchain --with_benchmark=ON full_publish"
${cmd_line}
# Checking results
local exe_file=$(ls build.*/lite/api/${EXE})
if [ ! -f $exe_file ]; then
echo -e "$RED_COLOR $exe_file is not exist! $OFF_COLOR"
echo -e "Compiling task failed on the following instruction:\n $cmd_line"
exit 1
fi
}
# Check benchmark reuslt
function check_benchmark_result() {
local res_file=$1
local config_path=$2
local os=$3
local arch=$4
local toolchain=$5
local remote_device_name=$6
local model_name=$7
local backend=$8
if [[ "$config_path" == "" ]]; then
echo -e "$YELOW_COLOR $config_path is not set! Skip result check! $OFF_COLOR"
return 1
fi
local toolchain_in_config=`jq -r .toolchain $config_path`
# Skip avg time check if toolchain not matched
if [[ "$toolchain" != "$toolchain_in_config" ]]; then
echo -e "$RED_COLOR Build with toolchain is $toolchain, while toolchain in $config_path is $toolchain_in_config. They are not matched! Skip avg time check! $OFF_COLOR"
return 1
fi
local key_avg_time="avg"
local avg_time=$(grep $key_avg_time $res_file | awk '{print $3}' | tail -1)
local avg_time_baseline=`jq -r --arg v1 $model_name \
--arg v2 $backend \
--arg v3 $arch \
--arg v4 $remote_device_name \
'.model[] | select(.name == $v1) | .backends[] | select(.name == $v2) | .arch[] | select(.name == $v3) | .device_id[] | select(.name == $v4).avg_time_baseline' $config_path`
local avg_time_thres_scale=`jq -r --arg v1 $model_name \
--arg v2 $backend \
--arg v3 $arch \
--arg v4 $remote_device_name \
'.model[] | select(.name == $v1) | .backends[] | select(.name == $v2) | .arch[] | select(.name == $v3) | .device_id[] | select(.name == $v4).avg_time_thres_scale' $config_path`
local avg_time_thres=$(echo "${avg_time_baseline}*${avg_time_thres_scale}" | bc)
local device_alias=`jq -r --arg v1 $model_name \
--arg v2 $backend \
--arg v3 $arch \
--arg v4 $remote_device_name \
'.model[] | select(.name == $v1) | .backends[] | select(.name == $v2) | .arch[] | select(.name == $v3) | .device_id[] | select(.name == $v4).alias' $config_path`
if [ 1 -eq "$(echo "${avg_time} > ${avg_time_thres}" | bc)" ]; then
echo -e "$RED_COLOR avg_time[${avg_time}] > avg_time_thres[${avg_time_thres}] on device[$device_alias] !\nThis PR may reduce performace. Reject this PR. $OFF_COLOR"
exit 1
else
echo -e "$GREEN_COLOR avg_time[${avg_time}] <= avg_time_thres[${avg_time_thres}] on device[$device_alias] Passed. $OFF_COLOR"
# TODO: update .json automatically(after this pr is merged)
# sed -i "s/\${avg_time_baseline}\b/${avg_time}/" $config_path
fi
return 0
}
function run_on_remote_device() {
local os=""
local arch=""
local toolchain=""
local remote_device_name=""
local remote_device_work_dir=""
local remote_device_run=""
local target_name=""
local model_dir=""
local config_path=""
# Extract arguments from command line
for i in "$@"; do
case $i in
--os=*)
os="${i#*=}"
shift
;;
--arch=*)
arch="${i#*=}"
shift
;;
--toolchain=*)
toolchain="${i#*=}"
shift
;;
--remote_device_name=*)
remote_device_name="${i#*=}"
shift
;;
--remote_device_work_dir=*)
remote_device_work_dir="${i#*=}"
shift
;;
--remote_device_run=*)
remote_device_run="${i#*=}"
shift
;;
--target_name=*)
target_name="${i#*=}"
shift
;;
--model_dir=*)
model_dir="${i#*=}"
shift
;;
--config_path=*)
config_path="${i#*=}"
shift
;;
*)
shift
;;
esac
done
# Copy the executable to the remote device
local target_path=$(find ./build.* -name $target_name)
if [[ -z "$target_path" ]]; then
echo -e "$RED_COLOR $target_name not found! $OFF_COLOR"
exit 1
fi
$remote_device_run $remote_device_name shell "rm -f $remote_device_work_dir/$target_name"
$remote_device_run $remote_device_name push "$target_path" "$remote_device_work_dir"
# Get command line arguments
local cmd_line=""
local model_file=`$remote_device_run $remote_device_name shell "cd ${remote_device_work_dir}; ls ${model_dir}/*.pdmodel"`
local param_file=`$remote_device_run $remote_device_name shell "cd ${remote_device_work_dir}; ls ${model_dir}/*.pdiparams"`
local model_name=$(basename $model_dir)
local input_shape=`jq -r --arg v $model_name '.model[] | select(.name == $v).input_shape' $config_path`
local backends=""
if [[ "$os" == "android" || "$os" == "armlinux" ]]; then
backends=("arm" "opencl,arm")
elif [ "$os" == "linux" ]; then
backends=("x86")
elif [ "$os" == "macos" ]; then
backends=("x86" "opencl,x86")
fi
for backend in ${backends[@]}; do
local res_file="result_${model_name}_${arch}_${toolchain}_${backend}_${remote_device_name}.txt"
cmd_line="
./$target_name \
--model_file=$model_file \
--param_file=$param_file \
--input_shape=$input_shape \
--backend=$backend \
--warmup=$WARMUP \
--repeats=$REPEATS \
--result_path=$res_file \
"
echo -e "$GREEN_COLOR model:${model_name} arch:${arch} toolchain:${toolchain} backend:${backend} device:${remote_device_name} $OFF_COLOR"
echo "cmd_line start..."
# Run the model on the remote device
$remote_device_run $remote_device_name shell "cd $remote_device_work_dir; LD_LIBRARY_PATH=$LD_LIBRARY_PATH:.; rm -rf $res_file; $cmd_line; cd -"
echo "cmd_line end"
$remote_device_run $remote_device_name pull "${remote_device_work_dir}/${res_file}" .
# Check benchmark result
check_benchmark_result $res_file $config_path $os $arch $toolchain $remote_device_name $model_name $backend
done
}
function build_and_test_on_remote_device() {
local os_list=$1
local arch_list=$2
local toolchain_list=$3
local build_target_func=$4
local prepare_device_func=$5
local host_model_zoo_dir=$6
local force_download_models=$7
local config_path=$8
local remote_device_type=$9
local remote_device_list=${10}
local remote_device_work_dir=${11}
local extra_arguments=${12}
# 0. Set helper functions to access the remote devices
local remote_device_pick=ssh_device_pick
local remote_device_check=ssh_device_check
local remote_device_run=ssh_device_run
if [[ $remote_device_type -eq 0 ]]; then
remote_device_pick=adb_device_pick
remote_device_check=adb_device_check
remote_device_run=adb_device_run
fi
# 1. Check remote devices are available or not
local remote_device_names=$($remote_device_pick $remote_device_list)
if [[ -z $remote_device_names ]]; then
echo "No remote device available!"
exit 1
else
echo "Found device(s) $remote_device_names."
fi
cd $PWD
# 2. Download models to host machine
prepare_models $host_model_zoo_dir $force_download_models
# 3. Prepare device environment for running, such as device check and push models to remote device, only once for one device
for remote_device_name in $remote_device_names; do
$prepare_device_func $remote_device_name $remote_device_work_dir $remote_device_check $remote_device_run $host_model_zoo_dir
done
# 4. Run
local oss=(${os_list//,/ })
local archs=(${arch_list//,/ })
local toolchains=(${toolchain_list//,/ })
for os in ${oss[@]}; do
for arch in ${archs[@]}; do
for toolchain in ${toolchains[@]}; do
# Build
echo "Build with $os+$arch+$toolchain ..."
$build_target_func $os $arch $toolchain
# TODO: only tested on android currently
if [[ "$os" != "android" ]]; then
continue
fi
# Loop all remote devices
for remote_device_name in $remote_device_names; do
# Loop all models
for model_dir in $(ls $host_model_zoo_dir); do
# Run
run_on_remote_device \
--os=$os \
--arch=$arch \
--toolchain=$toolchain \
--remote_device_name=$remote_device_name \
--remote_device_work_dir=$remote_device_work_dir \
--remote_device_run=$remote_device_run \
--target_name=$EXE \
--model_dir=$(basename ${host_model_zoo_dir})/${model_dir} \
--config_path=$config_path
done
done
cd - >/dev/null
done
done
done
}
function android_build_and_test() {
build_and_test_on_remote_device $OS_LIST $ARCH_LIST $TOOLCHAIN_LIST \
build_target android_prepare_device \
$HOST_MODEL_ZOO_DIR $FORCE_DOWNLOAD_MODELS \
$CONFIG_PATH \
$REMOTE_DEVICE_TYPE $REMOTE_DEVICE_LIST $REMOTE_DEVICE_WORK_DIR
}
function check_command_exist() {
local cmd=$1
which "$cmd" >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo -e "$RED_COLOR $cmd is not found! $OFF_COLOR"
exit 1
fi
}
function main() {
# Check requirements
check_command_exist "jq"
# Parse command line.
for i in "$@"; do
case $i in
--os_list=*)
OS_LIST="${i#*=}"
shift
;;
--arch_list=*)
ARCH_LIST="${i#*=}"
shift
;;
--toolchain_list=*)
TOOLCHAIN_LIST="${i#*=}"
shift
;;
--remote_device_type=*)
REMOTE_DEVICE_TYPE="${i#*=}"
shift
;;
--remote_device_list=*)
REMOTE_DEVICE_LIST="${i#*=}"
shift
;;
--remote_device_work_dir=*)
REMOTE_DEVICE_WORK_DIR="${i#*=}"
shift
;;
--host_model_zoo_dir=*)
HOST_MODEL_ZOO_DIR="${i#*=}"
shift
;;
--force_download_models=*)
FORCE_DOWNLOAD_MODELS="${i#*=}"
shift
;;
--config_path=*)
CONFIG_PATH="${i#*=}"
shift
;;
--warmup=*)
WARMUP="${i#*=}"
shift
;;
--repeats=*)
REPEATS="${i#*=}"
shift
;;
android_build_and_test)
android_build_and_test
shift
;;
*)
echo "Unknown option, exit"
exit 1
;;
esac
done
}
main $@
| true |
f9f32e2bcf42a565f81b9954a410c3a436663778 | Shell | gbsf/archlinux-packages | /pwgen/repos/extra-i686/PKGBUILD | UTF-8 | 676 | 2.796875 | 3 | [] | no_license | # $Id$
# Maintainer: damir <damir@archlinux.org>
# Contributor : Tobias Powalowski <t.powa@gmx.de>
pkgname=pwgen
pkgver=2.05
pkgrel=1
pkgdesc="Pwgen is a small, GPL'ed password generator which creates passwords which can be easily memorized by a human"
arch=("i686" "x86_64")
url="http://sourceforge.net/projects/pwgen/"
depends=('glibc')
source=(http://belnet.dl.sourceforge.net/sourceforge/pwgen/$pkgname-$pkgver.tar.gz)
build() {
cd $startdir/src/$pkgname-$pkgver
./configure --prefix=/usr
make || return 1
mkdir -p $startdir/pkg/usr/{bin,man/man1}
cp $startdir/src/$pkgname-$pkgver/pwgen $startdir/pkg/usr/bin
cp $startdir/src/$pkgname-$pkgver/pwgen.1 $startdir/pkg/usr/man/man1
}
| true |
b568855023a1cee86915bb07ae694077b7ca1d7d | Shell | Geoportail-Luxembourg/geoportailv3 | /geoportal/geoportailv3_geoportal/static-ngeo/ngeo/buildtools/check-no-goog.sh | UTF-8 | 331 | 3.03125 | 3 | [
"MIT"
] | permissive | #!/bin/sh
# Any call to the goog library except the followings are forbidden
echo "Checking use of goog library..."
if grep --include '*.js' -Rn "goog\." src contribs examples | grep -E -v 'goog.provide|goog.require|goog.module|goog.asserts|goog.exportSymbol|goog.exportProperty'
then
echo "Found forbidden uses."
return 1
fi
| true |
cb3ed55e9b111884f12614c342868ae2afae8031 | Shell | tomtomtomtony/wrf_archlinux | /wrf_install/linux/basics.sh | UTF-8 | 823 | 3.40625 | 3 | [
"Zlib",
"Libpng",
"JasPer-2.0",
"MIT"
] | permissive | #!/bin/sh
# @Author: Benjamin Held
# @Date: 2017-02-16 19:47:48
# @Last Modified by: Benjamin Held
# @Last Modified time: 2019-11-19 16:36:21
# setting -e to abort on error
set -e
# define terminal colors
source ../../libs/terminal_color.sh
# Script to get the required basic packages after installing the base system
# Start from home directory
cd ${HOME}
sudo pacman -Sy --needed make pkg-config fakeroot
printf "${YELLOW}Installing yay as package helper... \n${NC}"
# Prepare git package
sudo pacman -S --needed git
# Prepare Folders
mkdir aur_packages
cd aur_packages
# Getting yay
git clone https://aur.archlinux.org/yay.git
cd yay
makepkg -si --noconfirm --needed
printf "${YELLOW}Installing basic packages... ${NC}"
# Installing required packages
yay -S --needed tcsh time curl wget gcc-fortran # required packages
| true |
f10a173254558a355a92e0a496840dc69e7667e5 | Shell | sasg/terraform-ceph-installer | /scripts/vm_setup.sh | UTF-8 | 2,763 | 3.828125 | 4 | [
"LicenseRef-scancode-warranty-disclaimer",
"UPL-1.0",
"Apache-2.0"
] | permissive | #!/bin/bash
#------------------------------------------------------------------------
# This file allows for setting up newly created VMs to fuction properly
# in custom environments
#------------------------------------------------------------------------
outfile=/tmp/terraform_ceph_install.out
if [ -f ceph.config ]; then
do_vm_setup=$(awk -F= '/^do_vm_setup/{print $2}' ceph.config)
outfile=$(awk -F= '/^outputfile_name/{print $2}' ceph.config)
if [ "$do_vm_setup" != "yes" ]; then
echo VM Setup is not done | tee -a $outfile
echo Skipping ... \[ At host: $(hostname) \] $0 $* | tee -a $outfile
exit
fi
fi
print_usage()
{
echo ""
echo "Usage: $0 <type>"
echo " <type> - type of node (deployer|osd|monitor|mds|client)"
echo ""
exit
}
if [ $# -lt 1 ];then
print_usage
fi
type=$1
echo "Setting up VM for $type:" $(hostname) | tee -a $outfile
ceph_version=`ceph -v | cut -d " " -f 3,3`
ceph_major_version=`echo $ceph_version | cut -d. -f 1,1`
# Here are some examples:
#------------------------------------------------------------------------
# To setup the DNS via the /etc/resolv.conf file
#------------------------------------------------------------------------
grep -v "^search" /etc/resolv.conf | grep -v "^nameserver" > /tmp/etc.resolve.conf
echo "search us.oracle.com" >> /tmp/etc.resolve.conf
echo "nameserver 10.211.11.1" >> /tmp/etc.resolve.conf
sudo cp -f /tmp/etc.resolve.conf /etc/resolv.conf
rm -f /tmp/etc.resolve.conf
#------------------------------------------------------------------------
# To setup the proxy servers
#------------------------------------------------------------------------
echo "export http_proxy=http://www-proxy.us.oracle.com:80" >> ~/.bashrc
echo "export https_proxy=http://www-proxy.us.oracle.com:80" >> ~/.bashrc
echo "set -o vi" >> ~/.bashrc
#------------------------------------------------------------------------
# To maintain the proxy environments when doing a sudo
# Add a line to the /etc/sudoers file
#------------------------------------------------------------------------
sudo cp /etc/sudoers /etc/sudoers.orig
sudo sed '/Defaults env_keep += "LC_TIME LC_ALL LANGUAGE LINGUAS _XKB_CHARSET XAUTHORITY"/a Defaults env_keep += "ftp_proxy http_proxy https_proxy no_proxy"' /etc/sudoers > /tmp/etc.sudoers.modified
sudo cp /tmp/etc.sudoers.modified /etc/sudoers
#------------------------------------------------------------------------
# To enter permissive mode for SELinux
#------------------------------------------------------------------------
sudo setenforce 0
#------------------------------------------------------------------------
# To reboot
#------------------------------------------------------------------------
#sudo shutdown -r +1 &
| true |
94ae8c765874e9ccc4063fb8dd361102aa7f349f | Shell | pmpardeshi/bash_scripts | /Functions/func_locchk.sh | UTF-8 | 309 | 3.34375 | 3 | [] | no_license | #!/usr/bin/env bash
#fuction for checling local vars
locvars(){
local A
typeset B
declare C #different ways to declare local vars
A=4
B=5
C=6
printf "\nlocal A:%s local B:%s local C:%s\n" "$A" "$B" "$C"
return
}
A=1
B=2
C=3
locvars
printf "\nglobal A:%s global B:%s global C:%s\n" "$A" "$B" "$C"
exit 0 | true |
c7f3ed6f7f003e69eac4ad63d54b97c1ee3ec50b | Shell | mayank-io/packer-shell-scripts | /riak.sh | UTF-8 | 847 | 3.109375 | 3 | [
"MIT"
] | permissive | #!/bin/bash -eux
# TODO: WORK IN PROGRESS
sudo apt-get install libssl1.0.0
cd /tmp
wget http://s3.amazonaws.com/downloads.basho.com/riak/2.0/2.0.0beta1/ubuntu/precise/riak_2.0.0beta1-1_amd64.deb
sudo dpkg -i riak_2.0.0beta1-1_amd64.deb
cat << EOF | sudo tee /etc/security/limits.conf
* soft nofile 65536
* hard nofile 65536riak soft nofile 4096
riak hard nofile 65536
EOF
# Once Riak 2 comes out of beta, use the following instead of the above code:
# # Add the Riak signing key
# curl http://apt.basho.com/gpg/basho.apt.key | sudo apt-key add -
# # Add the Basho (creator of Riak) repo to the apt sources list
# sudo bash -c "echo deb http://apt.basho.com $(lsb_release -sc) main > /etc/apt/sources.list.d/basho.list"
# sudo apt-get update
# # Install Riak
# sudo apt-get install riak | true |
fdf3c9075e40b52cc26f6f187c313f27911a1c93 | Shell | SOLEROM/linSys | /photoArangerSuit/oldREF/renamer/renamerBatch_dry.sh | UTF-8 | 226 | 3.140625 | 3 | [] | no_license | #!/bin/bash
## $1 - string name
## $2 - start number
count=$2
for dir in ./*; do
echo "./goProRENAMER_dry.sh $dir $1 $count"
./goProRENAMER_dry.sh $dir $1 $count
new=`ls -1 $dir |wc -l`
count=$(($count +$new + 1))
done
| true |
bbb7d1ccd671aefbb7b8345a69a8752aeacb1883 | Shell | thebabush/go-fuck-yourself | /gfy.sh | UTF-8 | 374 | 3.171875 | 3 | [] | no_license | #!/bin/bash
SCRIPT_PATH=$(dirname `which $0`)
for src in $(grep --include \*.go -lrP 'func yyerrorl')
do
echo "GFY: $src..."
sed -ie "/^package/r $SCRIPT_PATH/imports.go" "$src"
sed -ri "/^func\\s*yyerrorl/r $SCRIPT_PATH/patch.go" "$src"
#echo "== HEAD =="
#cat "$src" | head -n 10
#echo "== yyerrorl =="
#cat "$src" | grep 'func yyerrorl' -A 10 -B 10
done
| true |
49ff85c1b27a64b0eee50bbe5d340a1cc709e859 | Shell | zlbruce/nrciz | /divisor/autotest | UTF-8 | 384 | 3.640625 | 4 | [] | no_license | #!/bin/sh
num1=`expr $RANDOM % 1000`
num2=`expr $RANDOM % 1000`
echo "-----计算 $num1 和 $num2 的最大公约数-----"
for i in `ls *|ls |grep -v "~$"`;
do
if [ -x $i ];then
if [ "x$i" == "xautotest" ] ;then continue; fi
echo "=> Run $i ..."
case $i in
*jyf|*lerosua|*shan|*twig|*wind)
echo $num1 $num2 | ./$i
;;
*)
./$i $num1 $num2
;;
esac
fi
done
| true |
f6bf38f3299dc604cb652861dcd43ae2b4f0cc52 | Shell | Renato2012/mapme-tnsm17 | /simulations/scripts/simulation-helpers.sh | UTF-8 | 7,069 | 3.703125 | 4 | [] | no_license | #!/bin/bash
#
# scenario > simulation > round
if [[ $RUNS == "" ]]; then
RUNS=1
echo "RUNS undefined, default to $RUNS"
fi
#QUIET="--quiet=true "
QUIET=""
OVERWRITE=false
MAX_PROCESS=$(grep -c ^processor /proc/cpuinfo)
DEFAULT_NDNSIM_DIR="$HOME/bin"
R_PATH="data" # This has currently to be in sync with the simulation file
T_PATH="tmp"
S_PATH="scripts"
P_PATH="plots"
if [[ $process_round == "" ]]; then
function process_round() { echo; }
fi
if [[ $graph_round == "" ]]; then
function graph_round() { echo; }
fi
if [[ $graph == "" ]]; then
function graph() { echo; }
fi
if [[ $process_scenario == "" ]]; then
function process_scenario() { echo; }
fi
export -f process_round graph_round graph process_scenario
export DEFAULT_NDNSIM_DIR RUNS R_PATH T_PATH S_PATH P_PATH
################################################################################
# SCENARIOS
#
# Scenarios consist in 4 steps... TODO
# We need dependency resolution for the graphs
################################################################################
# parameter from filename
function param()
{
echo $1 | sed "s/.*-$2\([^-]*\).*/\1/"
}
function split_cmdline() {
cmdline=$1
while read key value; do
arr+=(["$key"]="$value")
done < <(awk -F' ' '{for(i=1;i<=NF;i++) {print $i}}' <<< $cmdline | awk -F'=' '{print $1" "$2}')
}
export -f split_cmdline
################################################################################
# HELPERS
################################################################################
# \brief Run enough rounds to measure statistics (currently 10)
# \param scenario (string) : scenario filename
# \params params (string) : commandline parameters for the simulation scenario
#
# A round consists in
# 1) producing simulation results
# 2) postprocessing them
# 3) plotting graphs
function simulation()
{
local scenario=$1
local params=$2
echo "[$scenario] with params [$params]..."
declare -A arr
split_cmdline "$params"
scheme=${arr['--mobility-scheme']}
flavour=default
# if [[ $scheme == 'mapme' ]]; then
# # flavour=mapme
# elif [[ $scheme == 'mapmein' ]]; then
# # flavour=mapme
# elif [[ $scheme == 'oldmapme' ]]; then
# # flavour=oldmapme
# elif [[ $scheme == 'oldmapmein' ]]; then
# # flavour=oldmapme
# else
# # flavour=vanilla
# fi
if [[ "$NDNSIM_DIR" == "" ]]; then
>&2 echo "I: No NDNSIM_DIR environment variable. Defaulting to $DEFAULT_NDNSIM_DIR"
NDNSIM_DIR=$DEFAULT_NDNSIM_DIR
fi
if [[ ! -d $NDNSIM_DIR ]]; then
>&2 cat <<EOF
E: ndnSIM does not seem installed in $NDNSIM_DIR. Please set up the
NDNSIM_DIR environment variable if you have installed it in another
location
EOF
exit
fi
BASE="$NDNSIM_DIR/root/$flavour"
if [[ ! -d $BASE ]]; then
>&2 echo "Base directory for ndnSIM not found [$BASE]. Please check your installation."
exit
fi
export LD_LIBRARY_PATH=$BASE/lib
export PKG_CONFIG_PATH=$BASE/lib/pkgconfig
export PYTHONDIR=$BASE/usr/lib/python$PYVER/dist-packages
export PYTHONARCHDIR=$BASE/usr/lib/python$PYVER/dist-packages
echo $LD_LIBRARY_PATH
# Detect mobility scheme to set environment variables accordingly
# XXX
if [[ ! -d "$R" || ! -f "$R/done" || "$proceed" == "true" ]]; then
# Create all output directories for the current run
# NOTE: We retrieve the output directory name from the simulation. This
# allows up to have a simulation script that can be run indepdently from
# this script.
export RX=$(./waf --run "$scenario $params $QUIET --output-only" | awk '/OUTPUT/{print $2}')
export TX=$RX # ${RX/$R_PATH/$T_PATH}
export PX=${RX/$R_PATH/$P_PATH}
export S="$S_PATH"
mkdir -p $RX $TX $PX
for run in $(seq 1 $RUNS); do
local args="$params --RngRun=$run"
echo " - Round #$run/$RUNS"
# force overwrite in further steps
local proceed="$OVERWRITE "
export R="$RX/$run"
export T="$TX/$run"
export P="$PX/$run"
mkdir -p $R $T $P
# Unless we want to overwrite, we skip already done steps
# 1)
if [[ ! -f $R/done || "$proceed" == "true" ]]; then
echo " . Simulating... $R"
rm -Rf $R && mkdir -p $R
./waf --run "$scenario $args $QUIET"
touch "$R/done"
proceed=true
fi
# Process round
if [[ ! -f $T/donetmp || "$proceed" == "true" ]]; then
echo " . Processing $T"
#rm -Rf $T && mkdir -p $T # XXX $T same as $R !!!
process_round
touch "$T/donetmp"
proceed=true
fi
# Graph
if [[ ! -f $P/done || "$proceed" == "true" ]]; then
echo " . Plotting"
rm -Rf $P && mkdir -p $P
graph_round
touch "$P/done"
proceed=true
fi
done
fi
if [[ "$proceed" == true ]]; then
process_scenario
proceed=true
fi
# 4) plotting graphs
if [[ "$proceed" == true ]]; then
graph
fi
}
function display_help()
{
prog=$1; msg=$2
if [ -n "$msg" ]; then echo "Error: $msg"; echo; exit -1; fi
cat <<EOF
$prog [SCENARIO]
TODO
EOF
}
function parse_cmdline()
{
while getopts ":ah" opt; do
case $opt in
a)
echo "-a was triggered!" >&2
OVERWRITE=true
;;
\?)
echo "Invalid option: -$OPTARG" >&2
;;
h)
#-h | --help)
display_help
exit 0
;;
:)
echo "Option -$OPTARG requires an argument." >&2
exit 1
;;
*) echo "Internal error!" ; exit 1 ;;
esac
done
shift $(expr $OPTIND - 1 )
#[[ ($# -eq 1 || ($# -eq 2 && $2 == <glob pattern>)) && $1 =~ <regex pattern> ]]
#if [[ $# -ne 1 ]]; then
# display_help $0 "Too many positional arguments"
# exit 1
#fi
export -f simulation param
}
function run()
{
# XXX ensure necessary variables are defined
# SCENARIO
# DEFAULT_ARGS
# ARGS
#setup
parse_cmdline
if [[ $1 == "" ]]; then
DATA="./data"
else
DATA=$1
fi
S="scripts/"
# Run simulations in parallel (Note that the simulation function has been exported)
# http://coldattic.info/shvedsky/pro/blogs/a-foo-walks-into-a-bar/posts/7
# http://stackoverflow.com/questions/17307800/how-to-run-given-function-in-bash-in-parallel
export process_round graph_round process_scenario graph
printf "%s\n" "${SIMULATION_ARGS[@]}" | xargs -P $MAX_PROCESS -I PLACEHOLDER bash -c "simulation $SCENARIO \"--data_path=$DATA $SCENARIO_ARGS PLACEHOLDER\""
P="plots/"
graph
}
export -f run
| true |
b355b2a743495ebf540c3cac7e2b0597269e7eb3 | Shell | lwsjtu/Avere | /src/vfxt/src/installvfxt.sh | UTF-8 | 13,556 | 3.84375 | 4 | [
"CC-BY-4.0",
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | #!/bin/bash
# report all lines, and exit on error
set -x
set -e
ARM_TRUE="True"
WAIT_SECONDS=600
AZURE_HOME_DIR=/home/$CONTROLLER_ADMIN_USER_NAME
VFXT_INSTALL_TEMPLATE=$AZURE_HOME_DIR/vfxtinstall
CLOUD_BACKED_TEMPLATE=/create-cloudbacked-cluster
MINIMAL_TEMPLATE=/create-minimal-cluster
VFXT_LOG_FILE=$AZURE_HOME_DIR/vfxt.log
ARM_ENDPOINT=https://management.azure.com/metadata/endpoints?api-version=2017-12-01
function retrycmd_if_failure() {
set +e
retries=$1; wait_sleep=$2; shift && shift
for i in $(seq 1 $retries); do
${@}
[ $? -eq 0 ] && break || \
if [ $i -eq $retries ]; then
echo Executed \"$@\" $i times;
set -e
return 1
else
sleep $wait_sleep
fi
done
set -e
echo Executed \"$@\" $i times;
}
function wait_azure_home_dir() {
counter=0
while [ ! -d $AZURE_HOME_DIR ]; do
sleep 1
counter=$((counter + 1))
if [ $counter -ge $WAIT_SECONDS ]; then
echo "directory $AZURE_HOME_DIR not available after waiting $WAIT_SECONDS seconds"
exit 1
fi
done
}
function wait_arm_endpoint() {
# ensure the arm endpoint is reachable
# https://docs.microsoft.com/en-us/azure/virtual-machines/windows/instance-metadata-service#getting-azure-environment-where-the-vm-is-running
if ! retrycmd_if_failure 24 5 curl -m 5 -o /dev/null $ARM_ENDPOINT ; then
echo "no internet! arm endpoint $ARM_ENDPOINT not reachable. Please see https://github.com/Azure/Avere/tree/main/src/vfxt#internet-access on how to configure firewall, dns, or proxy."
exit 1
fi
}
function wait_az_login_and_vnet() {
# wait for RBAC assignments to be applied
# unfortunately, the RBAC assignments take undetermined time past their associated resource completions to be assigned.
if ! retrycmd_if_failure 120 5 az login --identity ; then
echo "MANAGED IDENTITY FAILURE: failed to login after waiting 10 minutes, this is managed identity bug"
exit 1
fi
if ! retrycmd_if_failure 12 5 az account set --subscription $SUBSCRIPTION_ID ; then
echo "MANAGED IDENTITY FAILURE: failed to set subscription"
exit 1
fi
if ! retrycmd_if_failure 120 5 az network vnet subnet list -g $NETWORK_RESOURCE_GROUP --vnet-name $NETWORK ; then
echo "RBAC ASSIGNMENT FAILURE: failed to list vnet after waiting 10 minutes, this is rbac assignment bug"
exit 1
fi
}
function configure_vfxt_template() {
if [ "$CREATE_CLOUD_BACKED_CLUSTER" == "$ARM_TRUE" ]; then
cp $CLOUD_BACKED_TEMPLATE $VFXT_INSTALL_TEMPLATE
else
cp $MINIMAL_TEMPLATE $VFXT_INSTALL_TEMPLATE
fi
# update the internal variables
sed -i 's/^RESOURCE_GROUP/#RESOURCE_GROUP/g' $VFXT_INSTALL_TEMPLATE
sed -i 's/^LOCATION/#LOCATION/g' $VFXT_INSTALL_TEMPLATE
sed -i 's/^NETWORK/#NETWORK/g' $VFXT_INSTALL_TEMPLATE
sed -i 's/^SUBNET/#SUBNET/g' $VFXT_INSTALL_TEMPLATE
sed -i 's/^AVERE_CLUSTER_ROLE/#AVERE_CLUSTER_ROLE/g' $VFXT_INSTALL_TEMPLATE
sed -i 's/^STORAGE_ACCOUNT/#STORAGE_ACCOUNT/g' $VFXT_INSTALL_TEMPLATE
sed -i 's/^CACHE_SIZE/#CACHE_SIZE/g' $VFXT_INSTALL_TEMPLATE
sed -i 's/^CLUSTER_NAME/#CLUSTER_NAME/g' $VFXT_INSTALL_TEMPLATE
sed -i 's/^ADMIN_PASSWORD/#ADMIN_PASSWORD/g' $VFXT_INSTALL_TEMPLATE
sed -i 's/^INSTANCE_TYPE/#INSTANCE_TYPE/g' $VFXT_INSTALL_TEMPLATE
# replace "--from-environment" with "--on-instance" since we are using
sed -i 's/ --from-environment / --on-instance /g' $VFXT_INSTALL_TEMPLATE
sed -i "s:~/vfxt.log:$VFXT_LOG_FILE:g" $VFXT_INSTALL_TEMPLATE
# do not trace password in log, instead the command is captured in file ~/create_cluster_command.log, with password correctly redacted
sed -i "s/^set -exu/set -eu/g" $VFXT_INSTALL_TEMPLATE
}
function patch_vfxt_py1() {
VFXTPYDIR=$(dirname $(pydoc vFXT | grep usr | tr -d '[:blank:]'))
MSAZURE_PATCH_FILE="$VFXTPYDIR/p"
MSAZURE_TARGET_FILE="$VFXTPYDIR/msazure.py"
/bin/cat <<EOM >$MSAZURE_PATCH_FILE
diff --git a/vFXT/msazure.py b/vFXT/msazure.py
index 4e72fd73..b660d9bb 100644
--- a/vFXT/msazure.py
+++ b/vFXT/msazure.py
@@ -2596,13 +2596,17 @@ class Service(ServiceBase):
association_id = str(uuid.uuid4())
try:
scope = self._resource_group_scope()
- # if we span resource groups, the scope must be on the subscription
- if self.network_resource_group != self.resource_group:
- scope = self._subscription_scope()
r = conn.role_assignments.create(scope, association_id, body)
if not r:
- raise Exception("Failed to assign role {} to principal {}".format(role_name, principal))
+ raise Exception("Failed to assign role {} to principal {} for resource group {}".format(role_name, principal, self.resource_group))
log.debug("Assigned role {} with principal {} to scope {}: {}".format(role_name, principal, scope, body))
+ # if we span resource groups, the scope must be assigned to both resource groups
+ if self.network_resource_group != self.resource_group:
+ network_scope = self._resource_group_scope(self.network_resource_group)
+ network_association_id = str(uuid.uuid4())
+ r2 = conn.role_assignments.create(network_scope, network_association_id, body)
+ if not r2:
+ raise Exception("Failed to assign role {} to principal {} for resource group {}".format(role_name, principal, self.network_resource_group))
return r
except Exception as e:
log.debug(e)
EOM
# don't exit if the patch was already applied
set +e
patch --quiet --forward $MSAZURE_TARGET_FILE $MSAZURE_PATCH_FILE
set -e
rm -f $MSAZURE_PATCH_FILE
rm -f $VFXTPYDIR/*\.pyc
rm -f $VFXTPYDIR/*\.orig
rm -f $VFXTPYDIR/*\.rej
}
function patch_vfxt_py2() {
VFXTPYDIR=$(dirname $(pydoc vFXT | grep usr | tr -d '[:blank:]'))
MSAZURE_PATCH_FILE="$VFXTPYDIR/p"
MSAZURE_TARGET_FILE="$VFXTPYDIR/msazure.py"
/bin/cat <<EOM >$MSAZURE_PATCH_FILE
diff --git a/vFXT/msazure.py b/vFXT/msazure.py
index 4e72fd73..b660d9bb 100644
--- a/vFXT/msazure.py
+++ b/vFXT/msazure.py
@@ -234,6 +234,11 @@ class Service(ServiceBase):
AZURE_ENVIRONMENTS = {
'usGovCloud': { 'endpoint': 'https://management.usgovcloudapi.net/', 'storage_suffix': 'core.usgovcloudapi.net'}
}
+ REGION_FIXUP = {
+ "centralindia": "indiacentral",
+ "southindia": "indiasouth",
+ "westindia": "indiawest",
+ }
def __init__(self, subscription_id=None, application_id=None, application_secret=None,
tenant_id=None, resource_group=None, storage_account=None,
@@ -547,6 +552,9 @@ class Service(ServiceBase):
# reconnect on failure
conn = httplib.HTTPConnection(connection_host, connection_port, source_address=source_address, timeout=CONNECTION_TIMEOUT)
+ instance_location = instance_data['compute']['location'].lower() # region may be mixed case
+ instance_location = cls.REGION_FIXUP.get(instance_location) or instance_location # region may be transposed
+
# endpoint metadata
attempts = 0
endpoint_conn = httplib.HTTPSConnection(cls.AZURE_ENDPOINT_HOST, source_address=source_address, timeout=CONNECTION_TIMEOUT)
@@ -558,7 +566,7 @@ class Service(ServiceBase):
endpoint_data = json.loads(response.read())
for endpoint_name in endpoint_data['cloudEndpoint']:
endpoint = endpoint_data['cloudEndpoint'][endpoint_name]
- if instance_data['compute']['location'] in endpoint['locations']:
+ if instance_location in [_.lower() for _ in endpoint['locations']]: # force lowercase comparison
instance_data['endpoint'] = endpoint
instance_data['token_resource'] = 'https://{}'.format(endpoint['endpoint']) # Always assume URL format
break
EOM
# don't exit if the patch was already applied
set +e
patch --quiet --forward $MSAZURE_TARGET_FILE $MSAZURE_PATCH_FILE
set -e
rm -f $MSAZURE_PATCH_FILE
rm -f $VFXTPYDIR/*\.pyc
rm -f $VFXTPYDIR/*\.orig
rm -f $VFXTPYDIR/*\.rej
}
function create_vfxt() {
#######################################################
# do not trace passwords
#######################################################
set -x
#######################################################
cd $AZURE_HOME_DIR
# enable cloud trace during installation
if [ "${ENABLE_CLOUD_TRACE_DEBUG}" == "${ARM_TRUE}" ] ; then
nohup /bin/bash /opt/avere/enablecloudtrace.sh > $AZURE_HOME_DIR/enablecloudtrace.log 2>&1 &
fi
# ensure the create cluster command is recorded for the future
sleep 2 && ps -a -x -o cmd | egrep '[v]fxt.py' | sed 's/--admin-password [^ ]*/--admin-password ***/' > create_cluster_command.log &
$VFXT_INSTALL_TEMPLATE
#######################################################
# re-enable tracing
#######################################################
set +x
#######################################################
}
function print_vfxt_vars() {
echo "VSERVER_IPS=$(sed -n "s/^.*Creating vserver vserver (\(.*\)\/255.255.255.255).*$/\1/p" $VFXT_LOG_FILE)"
echo "MGMT_IP=$(sed -n "s/^.*management address: \(.*\)/\1/p" $VFXT_LOG_FILE)"
}
function dump_env_vars() {
echo "start env dump"
echo $(pwd)
echo "export RESOURCE_GROUP=$RESOURCE_GROUP"
echo "export LOCATION=$LOCATION"
echo "export NETWORK_RESOURCE_GROUP=$NETWORK_RESOURCE_GROUP"
echo "export NETWORK=$NETWORK"
echo "export SUBNET=$SUBNET"
echo "export AVERE_CLUSTER_ROLE=$AVERE_CLUSTER_ROLE"
echo "export CREATE_CLOUD_BACKED_CLUSTER=$CREATE_CLOUD_BACKED_CLUSTER"
echo "export STORAGE_ACCOUNT=$STORAGE_ACCOUNT"
echo "export CACHE_SIZE=$CACHE_SIZE"
echo "export CLUSTER_NAME=$CLUSTER_NAME"
echo "export INSTANCE_TYPE=$INSTANCE_TYPE"
echo "export ADMIN_PASSWORD=$ADMIN_PASSWORD"
echo "finish env dump"
}
function apt_get_update() {
set +e
retries=10
apt_update_output=/tmp/apt-get-update.out
for i in $(seq 1 $retries); do
timeout 300 apt-get update 2>&1
[ $? -eq 0 ] && break
if [ $i -eq $retries ]; then
set -e
return 1
else sleep 30
fi
done
set +e
echo Executed apt-get update $i times
}
function apt_get_install() {
set +e
retries=$1; wait_sleep=$2; timeout=$3; shift && shift && shift
for i in $(seq 1 $retries); do
# timeout occasionally freezes
#echo "timeout $timeout apt-get install --no-install-recommends -y ${@}"
#timeout $timeout apt-get install --no-install-recommends -y ${@}
apt-get install --no-install-recommends -y ${@}
[ $? -eq 0 ] && break || \
if [ $i -eq $retries ]; then
set -e
return 1
else
sleep $wait_sleep
apt_get_update
fi
done
set -e
echo Executed apt-get install --no-install-recommends -y \"$@\" $i times;
}
function config_linux() {
#hostname=`hostname -s`
#sudo sed -ie "s/127.0.0.1 localhost/127.0.0.1 localhost ${hostname}/" /etc/hosts
export DEBIAN_FRONTEND=noninteractive
apt_get_update
apt_get_install 20 10 180 curl dirmngr python-pip nfs-common build-essential python-dev python-setuptools
# this is no longer need because it is not longer there (mar 2019 ubuntu)
# retrycmd_if_failure 12 5 apt remove --purge -y python-keyring
retrycmd_if_failure 12 5 pip install --requirement /opt/avere/python_requirements.txt
}
function install_vfxt() {
retrycmd_if_failure 12 5 pip install --no-deps vFXT
mv /opt/avere/averecmd.txt /usr/local/bin/averecmd
chmod 755 /usr/local/bin/averecmd
}
function install_vfxt_py_docs() {
pushd / &>/dev/null
curl --retry 5 --retry-delay 5 -o vfxtdistdoc.tgz https://averedistribution.blob.core.windows.net/public/vfxtdistdoc.tgz &>/dev/null || true
if [ -f vfxtdistdoc.tgz ]; then
tar --no-same-owner -xf vfxtdistdoc.tgz
rm -f vfxtdistdoc.tgz
fi
popd &>/dev/null
}
function main() {
# ensure waagent upgrade does not interrupt this CSE
retrycmd_if_failure 240 5 apt-mark hold walinuxagent
echo "wait arm endpoint"
wait_arm_endpoint
echo "wait azure home dir"
wait_azure_home_dir
if [ "$BUILD_CONTROLLER" == "$ARM_TRUE" ]; then
echo "configure linux"
config_linux
echo "install_vfxt_py"
install_vfxt
echo "install_vfxt_docs"
install_vfxt_py_docs
fi
echo "wait az login"
wait_az_login_and_vnet
#echo "dump env vars for debugging"
#dump_env_vars
echo "configure vfxt install template"
configure_vfxt_template
echo "patch vfxt.py"
patch_vfxt_py1
patch_vfxt_py2
echo "create_vfxt"
create_vfxt
echo "print vfxt vars"
print_vfxt_vars
# ensure waagent upgrade can proceed
retrycmd_if_failure 240 5 apt-mark unhold walinuxagent
echo "installation complete"
}
main
| true |
b0ab1307b6b704d9c3ac824e18ac2ead633a6ea6 | Shell | nukisashineko/my_program_warehouse | /linux/more_used/fileOP | UTF-8 | 709 | 3.28125 | 3 | [] | no_license | #!/bin/sh
DIR=~/Documents/unknow
DIR_TEXT_SAVE=~/Documents/save
DIR_MONTH=`date '+%Y_%m'`
DATE=`date '+%Y-%m-%d'`.txt
#$REPORT="$DIR/$DATE.txt"
#touch $REPORT
cd $DIR
echo "`ls $DATE`"
if [ ! `ls $DATE` ]
then
echo "">> $DATE
fi
if [ $# -eq 1 ]
then
$1 $DATE
else
vim $DATE
fi
#HEAD=`head --line 1 $DATE`
#echo "HEAD=${HEAD}"
if [ $HEAD ]
then
# echo "head is hit !!"
NEW_DATE=`basename $DATE .txt`_$HEAD.txt
# echo "NEW_DATE=${NEW_DATE}"
mv $DATE $NEW_DATE
mkdir $DIR_TEXT_SAVE/$HEAD
mkdir $DIR_TEXT_SAVE/$HEAD/$DIR_MONTH
mv $NEW_DATE $DIR_TEXT_SAVE/$HEAD/$DIR_MONTH
echo "file is saved in $DIR_TEXT_SAVE/$HEAD/$DIR_MONTH"
else
echo "head is no hit !!"
echo "you don't write !!"
rm $DATE
fi
| true |
63b9b89549040caa1b6039e1b17df91b03cbac60 | Shell | JamesOBenson/kolla | /docker/mariadb/mariadb-server/healthcheck_mariadb | UTF-8 | 420 | 3.171875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
MYSQL_USERNAME="${MYSQL_USERNAME:=-haproxy}"
MYSQL_TIMEOUT=10
MYSQL_CMDLINE="mysql -nNE --connect-timeout=${MYSQL_TIMEOUT} -u ${MYSQL_USERNAME}"
WSREP_STATUS=$($MYSQL_CMDLINE -e "SHOW STATUS LIKE 'wsrep_local_state_comment';")
if [[ "${WSREP_STATUS}" == "Synced" ]]
then
echo "MariaDB Galera Cluster Node is synced."
exit 0
else
echo "MariaDB Galera Cluster Node is NOT synced"
exit 0
fi
| true |
168ffcc08da6ef9013b6ca4dfd5b3962a68f0709 | Shell | gachouchani1999/SecretStaking | /scripts/integration_tests.sh | UTF-8 | 8,973 | 2.9375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -e
docker_name=secretdev
# docker exec "$docker_name"
#function secretcli() {
# secretcli "$@";
#}
function wait_for_tx() {
until (secretcli q tx "$1"); do
sleep 5
done
}
# export SGX_MODE=SW
deployer_name=bob
deployer_address=$(secretcli keys show -a $deployer_name)
echo "Deployer address: '$deployer_address'"
# validator_address=$(docker exec -it secretdev secretcli q staking validators | jq '.[0].operator_address')
validator_address="secretvaloper1xey4ymz4tmlgy6pp54e2ccj307ff6kx647p3hq"
echo "Validator address: '$validator_address'"
validator_address2="secretvaloper1snzrncgy6p8aqtamr0w8gk7arhg2lmp2ecq5sm"
echo "Validator address 2: '$validator_address'"
# docker exec -it "$docker_name"
# /root/code
secretcli tx compute store "../build/secretstaking_token.wasm" --from $deployer_name --gas 4000000 -b block -y
token_code_id=$(secretcli query compute list-code | jq '.[-1]."id"')
token_code_hash=$(secretcli query compute list-code | jq '.[-1]."data_hash"')
echo "Stored token: '$token_code_id', '$token_code_hash'"
# docker exec -it $docker_name
# /root/code
secretcli tx compute store "../build/secret_staking.wasm" --from $deployer_name --gas 4000000 -b block -y
factory_code_id=$(secretcli query compute list-code | jq '.[-1]."id"')
echo "Stored staking: '$factory_code_id'"
echo "Deploying contract..."
tokenlabel=$(date +"%T")
#export STORE_TX_HASH=$(
# secretcli tx compute instantiate $token_code_id '{"admin": "'$deployer_address'", "symbol": "TST", "decimals": 6, "initial_balances": [], "prng_seed": "YWE", "name": "test"}' --from $deployer_name --gas 1500000 --label $label -b block -y |
# jq -r .txhash
#)
#wait_for_tx "$STORE_TX_HASH" "Waiting for instantiate to finish on-chain..."
sleep 2
label=$(date +"%T")
export STORE_TX_HASH=$(
secretcli tx compute instantiate $factory_code_id '{ "prng_seed": "YWE", "token_code_id": '$token_code_id', "token_code_hash": '$token_code_hash', "label": "'$tokenlabel'", "validator": "'$validator_address'"}' --label $label --from $deployer_name --gas 1500000 -y |
jq -r .txhash
)
wait_for_tx "$STORE_TX_HASH" "Waiting for instantiate to finish on-chain..."
token_addr=$(secretcli q compute label "$tokenlabel" | tail -c 46)
echo "Token address: $token_addr"
gtoken_addr=$(secretcli q compute label "$token_addr"-gov | tail -c 46)
echo "governance token address: $gtoken_addr"
staking_contract=$(secretcli query compute list-contract-by-code $factory_code_id | jq '.[-1].address')
echo "staking address: '$staking_contract'"
echo "Adding validator '$validator_address2'"
secretcli tx compute execute $(echo "$staking_contract" | tr -d '"') '{"add_validator": {"address": "'$validator_address2'"}}' -b block -y --from $deployer_name
# secretcli tx compute execute $(echo "$token_addr" | tr -d '"') '{"add_minters": {"minters": ['$staking_contract']}}' -b block -y --from $deployer_name
secretcli tx compute execute $token_addr '{"set_viewing_key": {"key": "yo"}}' -b block -y --from $deployer_name
secretcli tx compute execute $gtoken_addr '{"set_viewing_key": {"key": "yo"}}' -b block -y --from $deployer_name
balance=$(secretcli q account $deployer_address | jq '.value.coins[0].amount')
echo "USCRT balance before deposit: '$balance'"
tbalance=$(secretcli q compute query $token_addr '{"balance": {"address": "'$deployer_address'", "key": "yo"}}' | jq '.balance.amount')
echo "Token balance before deposit: '$tbalance'"
secretcli tx compute execute $(echo "$staking_contract" | tr -d '"') '{"stake": {}}' --amount 1000000uscrt -b block -y --gas 1000000 --from $deployer_name
tbalance=$(secretcli q compute query $token_addr '{"balance": {"address": "'$deployer_address'", "key": "yo"}}' | jq '.balance.amount')
echo "Token balance after deposit: '$tbalance'"
tbalance=$(secretcli q compute query $gtoken_addr '{"balance": {"address": "'$deployer_address'", "key": "yo"}}' | jq '.balance.amount')
echo "Token balance after deposit: '$tbalance'"
balance=$(secretcli q account $deployer_address | jq '.value.coins[0].amount')
echo "USCRT balance after deposit: '$balance'"
echo "Waiting for 2 blocks"
sleep 13
secretcli q compute query $(echo "$staking_contract" | tr -d '"') '{"exchange_rate": {}}'
echo "Depositing 1,000,000 uscrt"
secretcli tx compute execute $(echo "$staking_contract" | tr -d '"') '{"stake": {}}' --amount 1000000uscrt -b block -y --gas 1000000 --from $deployer_name
tbalance=$(secretcli q compute query $token_addr '{"balance": {"address": "'$deployer_address'", "key": "yo"}}' | jq '.balance.amount')
echo "Token balance after deposit2: '$tbalance'"
balance=$(secretcli q account $deployer_address | jq '.value.coins[0].amount')
echo "USCRT balance after deposit2: '$balance'"
secretcli tx compute execute $token_addr '{"send": {"recipient": '$staking_contract', "amount": "1000000", "msg": "eyJ3aXRoZHJhdyI6IHt9fQ"}}' -b block -y --gas 1000000 --from $deployer_name
tbalance=$(secretcli q compute query $token_addr '{"balance": {"address": "'$deployer_address'", "key": "yo"}}' | jq '.balance.amount')
echo "Token balance after withdraw: '$tbalance'"
tbalance=$(secretcli q compute query $gtoken_addr '{"balance": {"address": "'$deployer_address'", "key": "yo"}}' | jq '.balance.amount')
echo "gToken balance after withdraw: '$tbalance'"
balance=$(secretcli q account $deployer_address | jq '.value.coins[0].amount')
echo "USCRT balance after withdraw: '$balance'"
echo "Testing multiple withdraws"
echo "Depositing 1,000,000 uscrt"
secretcli tx compute execute $(echo "$staking_contract" | tr -d '"') '{"stake": {}}' --amount 1000000uscrt -b block -y --gas 1000000 --from $deployer_name
echo "Depositing 1,000,000 uscrt"
secretcli tx compute execute $(echo "$staking_contract" | tr -d '"') '{"stake": {}}' --amount 1000000uscrt -b block -y --gas 1000000 --from $deployer_name
tbalance=$(secretcli q compute query $token_addr '{"balance": {"address": "'$deployer_address'", "key": "yo"}}' | jq '.balance.amount')
echo "Token balance after deposits: '$tbalance'"
echo "Withdrawing 2,000,000 uscrt"
secretcli tx compute execute $token_addr '{"send": {"recipient": '$staking_contract', "amount": '$tbalance', "msg": "eyJ3aXRoZHJhdyI6IHt9fQ"}}' -b block -y --gas 1000000 --from $deployer_name
# Test exchange rate
secretcli q compute query $(echo "$staking_contract" | tr -d '"') '{"exchange_rate": {}}'
echo "Waiting for 2 blocks"
sleep 7
secretcli q compute query $(echo "$staking_contract" | tr -d '"') '{"exchange_rate": {}}'
# Test claims query
secretcli q compute query $(echo "$staking_contract" | tr -d '"') '{"claims": {"address": "'$deployer_address'"}}'
echo "Current time: '$(date "+%s")'"
echo "Waiting 5 seconds..."
sleep 5
echo "Current time: '$(date "+%s")'"
secretcli q compute query $(echo "$staking_contract" | tr -d '"') '{"claims": {"address": "'$deployer_address'", "current_time": '$(date "+%s")'}}'
secretcli tx compute execute $(echo "$staking_contract" | tr -d '"') '{"claim": {}}' -b block -y --gas 1000000 --from $deployer_name
balance=$(secretcli q account $deployer_address | jq '.value.coins[0].amount')
echo "USCRT balance after claim: '$balance'"
# Test withdraw removed from claims
secretcli q compute query $(echo "$staking_contract" | tr -d '"') '{"claims": {"address": "'$deployer_address'", "current_time": '$(date "+%s")'}}'
# Test failed to withdraw
echo "Depositing 1,000,000 uscrt"
secretcli tx compute execute $(echo "$staking_contract" | tr -d '"') '{"stake": {}}' --amount 1000000uscrt -b block -y --gas 1000000 --from $deployer_name
tbalance=$(secretcli q compute query $token_addr '{"balance": {"address": "'$deployer_address'", "key": "yo"}}' | jq '.balance.amount')
echo "Token balance after withdraw: '$tbalance'"
tbalance=$(secretcli q compute query $gtoken_addr '{"balance": {"address": "'$deployer_address'", "key": "yo"}}' | jq '.balance.amount')
echo "gToken balance after withdraw: '$tbalance'"
echo "Withdrawing 1,000,000 uscrt"
secretcli tx compute execute $token_addr '{"send": {"recipient": '$staking_contract', "amount": '$tbalance', "msg": "eyJ3aXRoZHJhdyI6IHt9fQ"}}' -b block -y --gas 1000000 --from $deployer_name
echo "Current time: '$(date "+%s")'"
secretcli q compute query $(echo "$staking_contract" | tr -d '"') '{"claims": {"address": "'$deployer_address'", "current_time": '$(date "+%s")'}}'
secretcli tx compute execute $(echo "$staking_contract" | tr -d '"') '{"claim": {}}' -b block -y --gas 1000000 --from $deployer_name
# voting
# secretcli tx gov submit-proposal community-pool-spend /root/code/build/proposal.json -b block -y --from $deployer_name
# secretcli query gov proposal 1
# secretcli tx compute execute $(echo "$staking_contract" | tr -d '"') '{"vote": {"proposal": 1, "vote": "Yes"}}' -b block -y --gas 1000000 --from $deployer_name
# secretcli tx compute execute $(echo "$staking_contract" | tr -d '"') '{"tally": {"proposal": 1}}' -b block -y --gas 1000000 --from $deployer_name
# secretcli query gov votes 1 | true |
28f8db64e782123fe1fea0940cf7a78bedcd023a | Shell | dut3062796s/pigsty | /bin/reloadhba | UTF-8 | 2,241 | 3.46875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -euo pipefail
#==============================================================#
# File : bin/createdb
# Ctime : 2021-11-02
# Mtime : 2021-11-02
# Desc : Wrapper of pgsql playbook reload hba task
# Usage : bin/reloadhba <cluster>
# Path : bin/reloadhba
# Depend : pigsty playbook pgsql.yml
# Copyright (C) 2018-2021 Ruohang Feng
#==============================================================#
#==============================================================#
# environment
#==============================================================#
APP_NAME="$(basename $0))"
APP_DIR="$(cd $(dirname $0) && pwd)"
PIGSTY_HOME=$(cd $(dirname ${APP_DIR}) && pwd)
#==============================================================#
# log util
#==============================================================#
__CN='\033[0m' # no color
__CB='\033[0;30m' # black
__CR='\033[0;31m' # red
__CG='\033[0;32m' # green
__CY='\033[0;33m' # yellow
__CB='\033[0;34m' # blue
__CM='\033[0;35m' # magenta
__CC='\033[0;36m' # cyan
__CW='\033[0;37m' # white
function log_info() { printf "[${__CG} OK ${__CN}] ${__CG}$*${__CN}\n"; }
function log_warn() { printf "[${__CY}WARN${__CN}] ${__CY}$*${__CN}\n"; }
function log_error() { printf "[${__CR}FAIL${__CN}] ${__CR}$*${__CN}\n"; }
function log_debug() { printf "[${__CB}HINT${__CN}] ${__CB}$*${__CN}\n"; }
function log_input() { printf "[${__CM} IN ${__CN}] ${__CM}$*\n=> ${__CN}"; }
function log_hint() { printf "${__CB}$*${__CN}\n"; }
#==============================================================#
#==============================================================#
# parameters
#==============================================================#
PG_CLUSTER=${1-''}
if [[ -z "${PG_CLUSTER}" ]]; then
log_error "pg_cluster is empty"
log_hint "bin/reloadhba <pg_cluster> # define database in pg_databases first"
exit 1
fi
#==============================================================#
#==============================================================#
echo "reload ${PG_CLUSTER} hba"
#==============================================================#
cd ${PIGSTY_HOME} >/dev/null
${PIGSTY_HOME}/pgsql.yml -l ${PG_CLUSTER} -t pg_hba
| true |
19bd40a48b2187a13bca3a5dcf849023db04878a | Shell | jiyeonkkk/private-terraform-enterprise | /examples/aws/user-data.tpl | UTF-8 | 876 | 2.6875 | 3 | [] | no_license | #!/bin/bash
# create replicated unattended installer config
cat > /etc/replicated.conf <<EOF
{
"DaemonAuthenticationType": "password",
"DaemonAuthenticationPassword": "ptfe-pwd",
"TlsBootstrapType": "self-signed",
"LogLevel": "debug",
"ImportSettingsFrom": "/tmp/replicated-settings.json",
"LicenseFileLocation": "/tmp/license.rli"
"BypassPreflightChecks": true
}
EOF
cat > /tmp/replicated-settings.json <<EOF
{
"hostname": {
"value": "jiyeon-ptfe-pes.hashidemos.io"
}
"installation_type": {
"value": "production"
},
"production_type": {
"value": "disk"
},
"disk_path": {
"value": "/data"
},
"letsencrypt_auto": {
"value": "1"
},
"letsencrypt_email": {
"value": "null@null.com"
},
}
EOF
# install replicated
curl https://install.terraform.io/ptfe/beta > /home/ubuntu/install.sh
bash /home/ubuntu/install.sh no-proxy
| true |
f6d47f44a420cd33af2efe6fd293f1b00e7ffe9e | Shell | TomFrost/amet | /amet.sh | UTF-8 | 3,094 | 4 | 4 | [
"Unlicense"
] | permissive | #!/bin/bash
set -e
getTimezone() {
if [ -n "$TZ" ]; then
echo $TZ
elif [ -f /etc/timezone ]; then
echo $(cat /etc/timezone)
else
echo $(ls -l /etc/localtime | awk '{print $NF}' | sed 's/.*zoneinfo\///')
fi
}
# DEFAULTS
username=$(whoami)
password=password
shell=bash
runArgs="-d"
portRangeArgs=""
forceRebuild=0
appPort=3000
sshPort=3022
mountHome=1
sshKeyPath=$HOME/.ssh/id_rsa.pub
timezone=$(getTimezone)
syncFreq=900
# HELP, I NEED SOMEBODY
showHelp() {
echo "Usage: $(basename $0) ARGUMENTS [-- docker run arguments]"
echo ""
echo " Available arguments:"
echo ""
echo " -a <port> The port that code-server should be exposed on. Defaults to $appPort."
echo " -b <secs> The frequency with which to back up the home folder, or 0 to disable."
echo " Defaults to $syncFreq."
echo " -h Display this help, exit 0"
echo " -i Launch the container interactively and show logs. Otherwise, the container"
echo " will run in daemon mode."
echo " -k <path> The path to the public key used to authenticate over SSH, so that password"
echo " authentication is not necessary. Defaults to $sshKeyPath"
echo " -o <ranges> Opens the specified port ranges to the host machine. Ex: -o 8000-8100."
echo " Can be specified multiple times."
echo " -p <passwd> The password to set for the user and the code-server instance."
echo " Defaults to '$password'."
echo " -r <port> The port that remote ssh connections can be established on."
echo " Defaults to $sshPort."
echo " -s <shell> The shell to set for the user. Defaults to '$shell'."
echo " -t <zone> The timezone to use. Defaults to '$timezone'."
echo " -u <user> The user to create in the container. Defaults to '$username'."
echo ""
echo " Any arguments included after the argument terminator (--) will be passed directly"
echo " to the docker run command."
echo ""
}
# PARSE COMMAND LINE ARGS
while getopts ':a:b:hik:o:p:s:t:u:' OPT; do
case "$OPT" in
a) appPort="$OPTARG" ;;
b) syncFreq="$OPTARG" ;;
h) showHelp; exit 0 ;;
i) runArgs="-it" ;;
k) sshKeyPath="$OPTARG" ;;
o) portRangeArgs+="-p $OPTARG:$OPTARG " ;;
p) password="$OPTARG" ;;
s) shell="$OPTARG" ;;
t) timezone="$OPTARG" ;;
u) username="$OPTARG" ;;
?) showHelp; exit 1 ;;
esac
done
shift $(($OPTIND-1))
# BUILD
if [ -f $sshKeyPath ]; then
cp "$sshKeyPath" ./key.pub
else
touch ./key.pub
fi
docker build . -t amet-${username} \
--build-arg username=$username \
--build-arg password=$password \
--build-arg shell=$shell \
--build-arg timezone="$timezone" \
--build-arg lang=${LANG:-en_US.UTF-8} \
--build-arg syncFreq=$syncFreq
rm ./key.pub
# ENV VARS
[ -n "$timezone" ] && runArgs+=" -e TZ=$timezone"
# RUN
docker run --privileged $runArgs \
-v $PWD/home-${username}:/sync \
--hostname=amet-${username} \
--name=amet-${username} \
-p ${appPort}:3000 \
-p ${sshPort}:22 \
$portRangeArgs \
$@ \
amet-${username}
| true |
e1005e01e2570457746e3e363af6bf36a60cb770 | Shell | oonisim/spark-hadoop-Installation | /installation/ansible/cluster/01_prerequisite/scripts/setup_ansible_user.sh | UTF-8 | 1,667 | 3.59375 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#--------------------------------------------------------------------------------
# Run on the Ansible target machine.
# Setup the REMOTE_USER that Ansible master use to ssh-into the target.
#--------------------------------------------------------------------------------
set -u
USER='ansible'
GROUP='ansible'
SUDO_GROUP='sudo'
#--------------------------------------------------------------------------------
# Account setup
#--------------------------------------------------------------------------------
if [ $(getent group ${GROUP}) ]; then
echo "group ${GROUP} exists."
else
sudo groupadd ${GROUP}
fi
if [ $(getent passwd ${USER}) ]; then
echo "user ${USER} exists."
else
sudo useradd -m -s /bin/bash -G ${SUDO_GROUP} -g ${GROUP} ${USER}
fi
#--------------------------------------------------------------------------------
# SSH public authentication setup
#--------------------------------------------------------------------------------
echo "Provide public key text>"
read key
AUTH_KEY_DIR="$(sudo -i -u ${USER} pwd)/.ssh"
AUTH_KEY_FILE="${AUTH_KEY_DIR}/authorized_keys"
sudo -i -u ${USER} mkdir -p ${AUTH_KEY_DIR}
sudo -i -u ${USER} touch ${AUTH_KEY_FILE}
sudo -i -u ${USER} grep -q -F "${key}" ${AUTH_KEY_FILE}
if [ $? -ne 0 ]; then
sudo -i -u ${USER} /bin/bash -c "echo ${key} >> ${AUTH_KEY_FILE}"
fi
sudo -i -u ${USER} chmod -R go-rwx ${AUTH_KEY_DIR}
#--------------------------------------------------------------------------------
# Sudoers setup
#--------------------------------------------------------------------------------
echo "${USER} ALL=(ALL:ALL) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/${USER}
echo "done" | true |
688384e46938801551e50ed526a03b77e012dd05 | Shell | iigorr/devbox | /scripts/setup.sh | UTF-8 | 159 | 2.59375 | 3 | [] | no_license | #!/bin/bash
if [ -d ~/dev/automation ]; then
cd ~/dev/automation && git pull --ff-only;
else
git clone git@bitbucket.org:iigorr/automation.git ~/dev/automation;
fi | true |
2e0c5a5b71872b9e360116031412752cd03bee70 | Shell | tzik/dev | /pkg/ninja/bin/package.sh | UTF-8 | 371 | 3.046875 | 3 | [] | no_license | set -eu
cd "$(dirname "${BASH_SOURCE}")/.."
base_dir="${PWD}"
toolchain_dir="$(realpath -s "${base_dir}/../..")"
src_dir="${base_dir}/src"
image_dir="${base_dir}/image"
prefix="${toolchain_dir}/out/usr"
package_name="$(basename "${base_dir}")"
package_file="${base_dir}/${package_name}.tar"
metadata_file="${prefix}/packages/${package_name}"
mkdir -p "${image_dir}"
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.