blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
503aa0cf5dbc897bfd516ddeaf8c259ef04e3b2d
|
Shell
|
cms-analysis/DPGAnalysis-SiPixelTools
|
/GainCalibration/test/submit_gain_calib_template.sh
|
UTF-8
| 1,741
| 3.671875
| 4
|
[] |
no_license
|
#! /bin/bash
function peval { echo -e ">>> $@"; eval "$@"; }
echo -e "Starting job at ..." `date`
DATE_START=`date +%s`
[ -z "$1" ] && echo ">>> FED is not given as input!" && exit 1
run=$RUN
fed=$1
ext='dmp'
rundir=$RUNDIR
indir=$INDIR
outdir=$OUTDIR
workdir="$TMPDIR/job_GainCalib_${run}_${fed}"
logdir="${rundir}/job_${fed}"
cpcmd="xrdcp -f --nopbar"
cpurl="root://eoscms.cern.ch/"
# remove leading "/" to make eos cp work
#if [ ${outdir:0:1} = "/" ]; then outdir=${outdir:1}; fi
# FILES
script="gain_calib_cfg.py"
infile="GainCalibration_${fed}_${run}.${ext}"
sqlite="siPixelVCal.db" # VCal DB
outfile="GainCalibration.root"
# ENVIRONMENT
echo ">>> Setting the environment..."
peval "source /afs/cern.ch/cms/cmsset_default.sh"
peval "cd $rundir"
peval "eval `scramv1 runtime -sh`"
# WORKDIR
peval "mkdir -p $workdir"
peval "cd $workdir"
# RETRIEVE SCRIPT, CALIBRATION & VCAL DB
echo ">>> Copying input files from storage to local ..."
peval "$cpcmd $cpurl$indir/$infile $infile"
peval "cp $rundir/../$sqlite $sqlite"
peval "cp $rundir/$script $script"
echo "************************"
peval "ls $rundir"
echo "************************"
peval "ls"
echo -e "************************\n"
# RUN SCRIPT
echo ">>> Running CMSSW job:"
peval "cmsRun $script run=$run fed=$fed"
peval "cat *.log"
# COPY BACK RESULT
echo ">>> Copying output to pnfs:"
peval "$cpcmd $outfile $cpurl${outdir}/$fed.root"
peval "mkdir $logdir"
peval "cp *.log $logdir/"
peval "cp *.txt $logdir/"
# CLEAN
cd $rundir
peval "rm -fr $workdir"
# DONE
echo -e ">>> Job done... \n\n"
DATE_END=`date +%s`
RUNTIME=$((DATE_END-DATE_START))
printf ">>> Wallclock running time: %02d:%02d:%02d" "$(( $RUNTIME / 3600 ))" "$(( $RUNTIME % 3600 /60 ))" "$(( $RUNTIME % 60 ))"
| true
|
1c32a0f724789c1fb91b0245497a230e21d21023
|
Shell
|
chef/gatherlogs-reporter
|
/habitat-packages/cli/plan.sh
|
UTF-8
| 1,738
| 3.078125
| 3
|
[
"Apache-2.0"
] |
permissive
|
pkg_name=gatherlogs_reporter
pkg_origin=chef
pkg_maintainer="Chef Support <support@chef.io>"
pkg_license=('Apache-2.0')
pkg_deps=(
core/tar
core/bzip2
core/wget
core/ruby
core/gzip
core/file
core/grep
core/bash
core/findutils
core/git
core/coreutils
)
pkg_build_deps=(
core/gcc
core/make
)
pkg_bin_dirs=(bin)
pkg_version() {
cat "$SRC_PATH/../../VERSION"
}
do_before() {
do_default_before
update_pkg_version
}
do_setup_environment() {
update_pkg_version
export GEM_HOME="$pkg_prefix/lib"
export GEM_PATH="$GEM_HOME"
set_runtime_env GEM_HOME "$GEM_HOME"
set_buildtime_env GEM_HOME "$GEM_HOME"
push_runtime_env GEM_PATH "$GEM_PATH"
push_buildtime_env GEM_PATH "$GEM_PATH"
set_buildtime_env BUILD_GEM "true"
}
do_unpack() {
mkdir -pv "$HAB_CACHE_SRC_PATH/$pkg_dirname"
cp -RT "$PLAN_CONTEXT"/../.. "$HAB_CACHE_SRC_PATH/$pkg_dirname/"
}
do_build() {
pushd "$HAB_CACHE_SRC_PATH/$pkg_dirname/"
build_line "gem build $pkg_name.gemspec ${GEM_HOME}"
fix_interpreter "bin/*" core/coreutils bin/env
gem build ${pkg_name}.gemspec
popd
}
do_install() {
pushd "$HAB_CACHE_SRC_PATH/$pkg_dirname/"
build_line "Gem install ${pkg_name} gem"
gem install ${pkg_name}-*.gem --no-document --force
popd
wrap_bin 'gatherlog'
}
# Need to wrap the gatherlogs binary to ensure GEM_HOME/GEM_PATH is correct
wrap_bin() {
local bin="$pkg_prefix/bin/$1"
local real_bin="$GEM_PATH/gems/${pkg_name}-${pkg_version}/bin/$1"
build_line "Adding wrapper $bin to $real_bin"
cat <<EOF > "$bin"
#!$(pkg_path_for core/bash)/bin/bash
set -e
source $pkg_prefix/RUNTIME_ENVIRONMENT
export GEM_PATH GEM_HOME PATH
exec $real_bin \$@
EOF
chmod -v 755 "$bin"
}
do_strip() {
return 0
}
| true
|
23297c664648dc0f163501b5a2e217b04691aea6
|
Shell
|
Rombusevil/flixel-gdx
|
/utils/gdx-setup-ui.sh
|
UTF-8
| 1,462
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
# The world's cheapest gdx-setup-ui!!!'
CONFIG=$(zenity --forms \
--title=" Create Flixel-GDX project " \
--text=" Configure " \
--add-entry="Package Name:" \
--add-entry="Game Name:" \
--add-entry="Output Path:" \
)
OK=$?
# If user clicked OK
if [[ $OK == 0 ]]; then
# Get every field
PACKAGE=$(echo $CONFIG | awk -F "|" '{print $1}')
NAME=$(echo $CONFIG | awk -F "|" '{print $2}')
OUTPUT=$(echo $CONFIG | awk -F "|" '{print $3}')
# Replace spaces with underscores
NAME=${NAME// /_}
# Validate searching for empty fields
if [[ ! $PACKAGE =~ [^[:space:]] || ! $NAME =~ [^[:space:]] || ! $OUTPUT =~ [^[:space:]] ]]; then
zenity --error --text "You need to fill all the fields.";
$(pwd)gdx-setup-ui.sh
exit 0
fi
# Run the offline-project-generator.sh and show it's progress
(./gen-project.sh $PACKAGE $NAME $OUTPUT) | zenity --progress \
--title=" Generating Flixel-GDX project " \
--text="Doing ..." \
--width=200 \
--percentage=0
# Status message
if [ "$?" = -1 ] ; then
zenity --error \
--title " Create Flixel-GDX project " \
--width=200 \
--text="An error ocured."
else
zenity --info --title " Create Flixel-GDX project "\
--width=200 \
--text " Finished Successfully! ";
fi
fi
| true
|
9ae315eaa65da772e71b3b8b4b561fade6fd0918
|
Shell
|
fxha/dotfiles
|
/.bashrc
|
UTF-8
| 529
| 3.34375
| 3
|
[
"MIT"
] |
permissive
|
# .bashrc
# load global definitions
if [ -f /etc/bashrc ]; then
. /etc/bashrc
elif [ -f /etc/bash.bashrc ]; then
. /etc/bash.bashrc
fi
# load local definitions
if [ -f ~/.bashrc.local ]; then
. ~/.bashrc.local
fi
# get environment variables
if [ -f ~/.env ]; then
. ~/.env
fi
# test for an non-interactive shell
if [[ $- != *i* ]]; then
return
fi
# add tab completion
if [ -f /etc/bash_completion ]; then
. /etc/bash_completion
fi
# load user configuration
if [ -f ~/.bash_profile ]; then
. ~/.bash_profile
fi
| true
|
932deda30afe74246833c92ea5c779e1eafb2e9a
|
Shell
|
danielgrigg/sandbox
|
/bash/expect_args.sh
|
UTF-8
| 184
| 3.3125
| 3
|
[
"WTFPL"
] |
permissive
|
#!/bin/bash
#
E_WRONG_ARGS=85
script_params="-a -h -m -z"
number_expected=4
if [ $# -ne $number_expected ]
then
echo "Usage: `basename $0` $script_params"
exit $E_WRONG_ARGS
fi
| true
|
95fa6ed85cc24930252112a8fab49e88c3200942
|
Shell
|
Xabster/Botster
|
/apidata/dlapi.sh
|
UTF-8
| 164
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/bash
for i in $(seq 1 27) ; do
echo "downloading $i ..."
wget -q http://download.java.net/jdk8/docs/api/index-files/index-$i.html -O api_$i.txt
done
| true
|
0b4aeaa3b366e15ca40bed758a024508e882ed22
|
Shell
|
vgeorgiev90/Ansible
|
/dynamic-hostpath-with-nfs/run.sh
|
UTF-8
| 3,240
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
usage () {
clear
echo "============================== Usage ================================================"
echo " This script is build on top of ansible, make sure you have it installed "
echo " It sets: volume group, logical volume , nfs-export and mount by the worker "
echo ""
echo "run.sh vginit example present/absent ---> Create/Remove volume group from vars file "
echo "run.sh volume example present/absent ---> Create/Remove logical volume from vars file"
echo "run.sh mount example resent/absent ---> Mount the volume on workers only "
echo "run.sh gen-vars ---> Generate vars file for ansible to use, "
echo " file name to be passed to vginit or volume "
echo ""
echo "====================================================================================="
}
if [ "${1}" == 'vginit' ];then
file="${2}"
state="${3}"
case ${state} in
'present')
ansible-playbook -i hosts/${file} vg-init.yml --extra-vars "state=present"
;;
'absent')
ansible-playbook -i hosts/${file} vg-init.yml --extra-vars "state=absent"
;;
*)
usage
;;
esac
elif [ "${1}" == 'volume' ];then
file="${2}"
state="${3}"
case ${state} in
'present')
ansible-playbook -i hosts/${file} provision.yml --extra-vars "state=present"
ansible-playbook -i hosts/${file} nfs-export.yml --extra-vars "state=present"
ansible-playbook -i hosts/${file} workers-mount.yml --extra-vars "state=present"
;;
'absent')
ansible-playbook -i hosts/${file} workers-mount.yml --extra-vars "state=absent"
ansible-playbook -i hosts/${file} nfs-export.yml --extra-vars "state=absent"
ansible-playbook -i hosts/${file} provision.yml --extra-vars "state=absent"
;;
*)
usage
;;
esac
elif [ "${1}" == 'gen-vars' ];then
read -p 'Volume group name: ' vg_name
read -p 'Devices for volume group specify list ["/dev/vd1", ..]: ' devices
read -p 'Logical volume name: ' lv_name
read -p 'Logical volume size: ' lv_size
read -p 'File system type: ' fs_type
read -p 'Directory to mount: ' mount_name
read -p 'Worker host IP: ' worker_host
read -p 'Nfs server IP: ' nfs_server
read -p 'Directory on worker: ' work_dir
read -p 'Name for the hosts file: ' file
cat > hosts/${file} << EOF
[nfs-server]
${nfs_server}
[workers]
${worker_host}
[all:vars]
######vg init
vg_name=${vg_name}
devices=${devices}
######lvm provision
lv_name=${lv_name}
vg_name=${vg_name}
lv_size=${lv_size}
fs_type=${fs_type}
mount_name=${mount_name}
#nfs export
worker_host=${worker_host}
nfs_server=${nfs_server}
mount_name=${mount_name}
#worker mount
mount_name=${mount_name}
work_dir=${work_dir}
nfs_server=${nfs_server}
EOF
elif [ "${1}" == 'mount' ];then
file="${2}"
state="${3}"
case ${state} in
'present')
ansible-playbook -i hosts/${file} nfs-export.yml --extra-vars "state=present"
ansible-playbook -i hosts/${file} workers-mount.yml --extra-vars "state=present"
;;
'absent')
ansible-playbook -i hosts/${file} workers-mount.yml --extra-vars "state=absent"
ansible-playbook -i hosts/${file} nfs-export.yml --extra-vars "state=absent"
;;
*)
usage
;;
esac
else
usage
fi
| true
|
54a64dd69169a05f5f02e7fe1c33a2b0e69fac9e
|
Shell
|
angvoz/evo-neural-network-agents
|
/remote-run.sh
|
UTF-8
| 1,372
| 3.453125
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash -x
CMD=$1
BOX=root@isp1.6350.lowes.com
export PATH=.:/bin:/usr/local/bin
eval $(keychain --eval ~/.ssh/id_rsa)
SSH_ARGS="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
SSH="ssh $SSH_ARGS"
SCP="scp -r $SSH_ARGS"
WORKSPACE="$($SSH $BOX 'pwd')/workspace"
# test connection
$SSH $BOX true || exit 1
echo "Connection test successful"
case $CMD in
"load")
$SSH $BOX "mkdir -p $WORKSPACE/target"
$SCP $PWD/target $BOX:$WORKSPACE
$SSH $BOX "ls -ld \$(find $WORKSPACE/target -type f)"
;;
"run")
$SSH $BOX "cd $WORKSPACE; nohup java -classpath $WORKSPACE/target/classes com.lagodiuk.agent.evolution.Runner $WORKSPACE/world.xml > $WORKSPACE/Runner.out 2>&1 &"
$SSH $BOX "ps -ef | grep com[.]lagodiuk.agent.evolution.Runner"
;;
"status")
$SSH $BOX "ps -ef | grep com[.]lagodiuk.agent.evolution.Runner"
$SSH $BOX "head $WORKSPACE/world.xml && echo ... && tail -3 $WORKSPACE/world.xml"
;;
"get")
$SCP $BOX:$WORKSPACE/world.xml $PWD
ls -ld $PWD/world.xml
bash -c "head $PWD/world.xml && echo ... && tail -3 $PWD/world.xml"
;;
"abort")
$SSH $BOX "rm $WORKSPACE/world.xml"
echo "Told the program to Abort"
;;
"install-java")
$SSH $BOX "apt-get install default-jdk"
;;
*)
printf "Error: wrong parameters\n"
printf "Usage:\n $0 [load|run|status|get]\n"
exit 1
;;
esac
echo "$0 Finished" > /dev/null
| true
|
83a100d74080dd07288113d0548ddb666aad8c8d
|
Shell
|
QBFreak/TinyMARE
|
/src/dirsize
|
UTF-8
| 613
| 3.90625
| 4
|
[] |
no_license
|
#!/bin/sh
#
# Displays executable size and size of source code for TinyMARE
#
# Updated February 12 2006 Gandalf
if [ -f ../bin/netmare.exe ]; then
exe=../bin/netmare.exe
elif [ -f ../bin/netmare ]; then
exe=../bin/netmare
else
exe=""
fi
echo
if [ -n "$exe" ]; then
size $exe | awk '
{ if($4 != "+") { total = $4; } else { total = $7; }};
END { printf "System Size: %08x (hexadecimal)\n", total }'
fi
wc -lc [a-np-tv-z]*/*.[ch] | awk '
{ if($3 == "total") { lines = $1; bytes = $2 }}; END {
printf "Source Size: %08x (hexadecimal)\n\n %d Lines of code, %d Bytes.\n", bytes, lines, bytes }'
echo
| true
|
9a9a44718ce920f8eb53e4265e99178040594178
|
Shell
|
ncodeitavinash/repo
|
/shortifthenelse.sh
|
UTF-8
| 291
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
[[ condition ]] && echo "true" || echo "false" >&2
Be sure the first command (echo "true") always exits with 0 otherwise, the second command will also be executed!
$ [[ 1 -eq 1 ]] && { echo "it's true"; false; } || echo "but it's also false" >&2
it's true
but it's also false
| true
|
35e86c172432150bf357101ffc76ae1bb7ad80e0
|
Shell
|
neldridge/git-backup
|
/test/test-fill-repo.sh
|
UTF-8
| 940
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/sh
#
# Clones a git repository from repo.bundle file if it does not exist yet,
# simulates changes and creates an incremental backup bundle
# using backup-git.sh.
#
SRC_BUNDLE=${1:-"repo.bundle"}
REPO=${2:-"repoA"}
GBACKUP_DIR=${3:-"../src"}
BACKUP_DIR=${4:-"backups"}
GBACKUP_DIR=`readlink -f ${GBACKUP_DIR}`
BACKUP_DIR=`readlink -f ${BACKUP_DIR}`
if [ ! -f ${GBACKUP_DIR}/backup-git.sh ]; then
echo "${GBACKUP_DIR}/backup-git.sh does not exist!"
exit 1
fi
if [ ! -d ${BACKUP_DIR} ]; then
mkdir ${BACKUP_DIR}
fi
# clone repository from bundle if it does not exist yet
if [ ! -d ${REPO} ]; then
git clone ${SRC_BUNDLE} ${REPO}
fi
# switch to repository directory
oldDir=`pwd`
cd ${REPO}
# change files in repository
echo "AAA" >> A
# commit
datetime=`date +%Y%m%d-%H%M%S`
git commit -am "changes ${datetime}"
# return to original directory
cd ${oldDir}
# run backup
${GBACKUP_DIR}/backup-git.sh ${REPO} ${BACKUP_DIR}
| true
|
5ef7cd066f507c9c88c54cda5dbc21dbd761bd73
|
Shell
|
samyakpuri/scripts
|
/tools/mediactrl
|
UTF-8
| 1,071
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/env sh
# File : Media Control
# Author : Samyak Puri <samyakpuri@hotmail.com>
# Date : 08.10.2018
# Last Modified Date: 02.04.2020
if [ "$(pidof -s i3blocks)" ]
then
update="pkill -RTMIN+11 i3blocks"
elif [ "$(pidof -s dwmblocks)" ]
then
update="pkill -RTMIN+11 dwmblocks"
else
update=""
fi
# Uncomment if i3mpdupdate isn't running:
if [ $(pidof -s spotify) ]; then
toggle() { spotifycli toggle ; }
pause() { spotifycli pause; }
next() { spotifycli next; }
prev() { spotifycli prev; }
seek() { spotifycli prev; spotifycli prev; }
else
toggle() { mpc toggle ;}
pause() { mpc pause; }
next() { mpc next; }
prev() { mpc prev; }
seek() { mpc seek $1; }
fi
case "$1" in
"up") pamixer --allow-boost -i "$2" ; $update ;;
"down") pamixer --allow-boost -d "$2" ; $update ;;
"mute") pamixer --allow-boost -t ; $update ;;
"truemute") pamixer --allow-boost -m ; $update ;;
"toggle") toggle ;;
"pause") pause ; pauseallmpv ;;
"forward") seek +"$2" ;;
"back") seek -"$2" ;;
"next") next ;;
"prev") prev ;;
"replay") seek 0% ;;
esac
| true
|
c9583c3cb9d45d87b5c320fb60a644cff3dde166
|
Shell
|
remsflems/works-primz
|
/every.bash
|
UTF-8
| 305
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/bash
python algo.py prime.txt.bkp cd b5
for entry in "cdb5"/*
do
python algo.py "$entry" 57 a4
done
for entry in "57a4"/*
do
python algo.py "$entry" 54 16
done
for entry in "5416"/*
do
python algo.py "$entry" 12 f4
done
for entry in "12f4"/*
do
python algo.py "$entry" fb 7f
done
| true
|
a333106b7425953b75e403096f36fc9b60cb6e05
|
Shell
|
timmc/CCIS-utils
|
/uptime.sh
|
UTF-8
| 702
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
# Requires non-interactive SSH auth between CCIS machines, such as that provided by auto-ssh.sh
# Suggested usage: ./uptime.sh | sort -rg | head -n 20
FILT_UPTIME_PRE="sed 's|:\([0-9]\+\) up \([0-9]\+\):|:\1 up 0 days, \2:|'"
FILT_UPTIME_1="sed 's|.*[^0-9]\([0-9]\+\) day.*|\1|'"
FILT_UPTIME_2="sed 's|.*up \([0-9]\+\) day.*|\1|'"
for x in `/ccis/bin/linuxmachines 2>&1 | grep -v virtual | grep -v account. | grep ccs.neu.edu | sed 's/^\([a-z0-9]\+\).*/\1/'`
do
days=`ssh -o NumberOfPasswordPrompts=0 -o StrictHostKeyChecking=false $x "uptime | $FILT_UPTIME_PRE | $FILT_UPTIME_1 | $FILT_UPTIME_2"`
SSH_EXIT="$?"
if [ "$SSH_EXIT" -eq "0" ] ; then
echo "$days $x"
fi
done
| true
|
87737cc6bf23b7fa6faf26553be6ef2103016ceb
|
Shell
|
Murasakiiru/dotfiles
|
/zsh/alias
|
UTF-8
| 1,364
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/zsh
. ~/.zsh/vars
alias mv='mv -v'
alias cp='cp -v'
alias rm='rm -iv'
#rebuild video indexes
alias mplayer='mplayer -idx'
alias ll='ls -ahl | more; echo "\e[1;32m --[\e[1;34m Dirs:\e[1;36m `ls -al | egrep \"^drw\" | wc -l` \e[1;32m|\e[1;35m Files: \e[1;31m`ls -al | egrep -v \"^drw\" | grep -v total | wc -l` \e[1;32m]--"'
# Colorized files
if [ -x /usr/bin/dircolors ]; then
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
alias ls='ls --color=auto'
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
fi
alias myippub='echo $(curl -s http://ipwhats.appspot.com)'
## Si ne fonctionne plus : wget -q0 - http://cfaj.freeshell.org/ipaddr.cgi
alias pingg='ping -c 3 www.google.fr'
alias psaux='ps aux'
### adapt with install script
##alias 'pactest=~/Script/pactest'
##alias 'pacedit=vim ~/Scripts/pactest.py'
##alias 'sublime=/opt/sublime-text/sublime_text'
###
alias mount='mount | column -t'
alias du='cdu -idh'
alias free='freec -m'
alias disk_analyz='ncdu'
## Other stuffs
#alias rdesktopad='nohup rdesktop 10.5.1.205 -u administrateur -p olfeoolfeo -g 1820x980 &'
#alias rdesktopad2k8='nohup rdesktop 10.5.2.42 -u administrateur -p olfeoolfeo -g 1820x980 &'
#alias rdesktopseven='nohup rdesktop 10.5.2.44 -u administrateur -p olfeoolfeo -g 1820x980 &'
| true
|
452eb2ed7ee3d9e247acf292ce0f3956fe2c252d
|
Shell
|
mendhak/pash
|
/scripts/workflow/parse.sh
|
UTF-8
| 478
| 3.453125
| 3
|
[
"MIT"
] |
permissive
|
bash ./get_results.sh > out
mv out log_results
cat log_results/out
while read p; do
PASSED=$(echo $p | awk -F'[^0-9]+' '{ print $2 }')
TOTAL=$(echo $p | awk -F'[^0-9]+' '{ print $3 }')
FAILED=$((passed - failed))
# failed, print to stdout
if [ $PASSED -ne $TOTAL ]; then
# get the benchmark name
f=${p%% *}
# strip the :
f="${f%?}"
# dump the failed tests
cat log_results/${f}_failed.log
fi
done < log_results/out
| true
|
3342646ac67f20d1c7c0c8da6803ab336f7e5694
|
Shell
|
tyagi619/Labs
|
/Utility_commands/shell/lab2_ans/script9.sh
|
UTF-8
| 233
| 3.53125
| 4
|
[] |
no_license
|
#! /bin/bash
ls -l -1 | awk '{print $9,$8,$6,$7,$1}'
file=$((0))
dir=$((0))
for i in $(ls -1)
do
if [ -f "$i" ]
then
((file++))
fi
if [ -d "$i" ]
then
((dir++))
fi
done
echo "Files = $file"
echo "Directories = $dir"
| true
|
aa84cd431a0af0cb213b358a1a4032b95969a7c9
|
Shell
|
Nodraak/DotFiles
|
/dotfiles/config/i3lock/lock
|
UTF-8
| 355
| 2.5625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
cd $(dirname $(realpath $0))
scrot -e 'mv $f _lockbg.png'
convert _lockbg.png \
-blur 15x15 \
-family Hack -fill '#FFFFFF' -stroke black -strokewidth 1 -pointsize 32 \
-gravity south -annotate +0+200 'Type password to unlock' \
debian.png -geometry +0+0 -composite _lockbg.png
i3lock -e -t -f -i _lockbg.png
| true
|
684bb82dd83ca057538052675c280cb7a5733315
|
Shell
|
csiro-dcfp/postprocess_ci
|
/make_CI.sh
|
UTF-8
| 633
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash -l
# scripts to build the climate indices files for verification analysis
# plot an ensmble of forecasts for a give initial start time
# include the observation index on the plot
conda activate ferret
conda env list
cd postprocess_ci
mkdir indices
mkdir tmp
year=1982
year=$1 # pass to script at command line
# builds climate indices for
# forecast ensemble members
# observed values on the same time axis as forecasts
./indices_1.sh ${year}
# produce plot of the climate indices
conda activate notebook
python climate_indices.py ${year}
mkdir plots
# move plot to a common directory
mv ${year}_*.pdf plots
| true
|
7fb8aa32863debd04907eadc8d9ccc9b76b85f7c
|
Shell
|
dollalilz/2041_20T2
|
/lab10/which_webserver.sh
|
UTF-8
| 124
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/sh
for a in $@
do
echo -n "$a "
curl -I /dev/null $a 2>&1| egrep -i server: |cut -d ":" -f2| cut -c 2-
done
| true
|
a03c7865e16a537e0325b48f99d1e0c0f5e04613
|
Shell
|
bomc/hack
|
/bomc-kubernetes-event/bomc-hrm/run_istio_servicegraph.sh
|
UTF-8
| 832
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
echo #################################################
echo Start port-forwarding for servicegraph
echo -------------------------------------------------
echo Navigate to servicegraph:
echo .
http://localhost:8088/force/forcegraph.html
echo ..
http://localhost:8088/dotviz
echo ...
http://localhost:8088/dotgraph
# To run port-forwarding in background, use the following cli command, see end '&':
# kubectl -n istio-system port-forward $(kubectl -n istio-system get pod -l app=servicegraph -o jsonpath="{.items[0].metadata.name}") 8088:8088 &
# NOTE: e.g. running port-forwarding for a pod in a special namespace, add the namespace at the end: -n bomc-app to the command.
kubectl -n istio-system port-forward $(kubectl -n istio-system get pod -l app=servicegraph -o jsonpath="{.items[0].metadata.name}") 8088:8088
| true
|
cd6e024de0b02289818ebdd5e0a1c41ba4ca3bb2
|
Shell
|
shutdown57/dotfiles
|
/.fzf.zsh
|
UTF-8
| 20,467
| 3.6875
| 4
|
[] |
no_license
|
# Setup fzf
# ---------
if [[ ! "$PATH" == */home/sam/.fzf/bin* ]]; then
export PATH="${PATH:+${PATH}:}/home/sam/.fzf/bin"
fi
# Auto-completion
# ---------------
[[ $- == *i* ]] && source "/home/sam/.fzf/shell/completion.zsh" 2> /dev/null
# Key bindings
# ------------
source "/home/sam/.fzf/shell/key-bindings.zsh"
# General
# ------------
# Use fd and fzf to get the args to a command.
# Works only with zsh
# Examples:
# f mv # To move files. You can write the destination after selecting the files.
# f 'echo Selected:'
# f 'echo Selected music:' --extention mp3
# fm rm # To rm files in current directory
function f() {
sels=( "${(@f)$(fd "${fd_default[@]}" "${@:2}"| fzf)}" )
test -n "$sels" && print -z -- "$1 ${sels[@]:q:q}"
}
# Like f, but not recursive.
function fm() f "$@" --max-depth 1
# Deps
alias fz="fzf-noempty --bind 'tab:toggle,shift-tab:toggle+beginning-of-line+kill-line,ctrl-j:toggle+beginning-of-line+kill-line,ctrl-t:top' --color=light -1 -m"
function fzf-noempty () {
local in="$(</dev/stdin)"
test -z "$in" && (
exit 130
) || {
ec "$in" | fzf "$@"
}
}
function ec () {
if [[ -n $ZSH_VERSION ]]
then
print -r -- "$@"
else
echo -E -- "$@"
fi
}
# Opening files
# ------------
# fe [FUZZY PATTERN] - Open the selected file with the default editor
# - Bypass fuzzy finder if there's only one match (--select-1)
# - Exit if there's no match (--exit-0)
function fe() {
local files
IFS=$'\n' files=($(fzf-tmux --query="$1" --multi --select-1 --exit-0))
[[ -n "$files" ]] && ${EDITOR:-vim} "${files[@]}"
}
# Modified version where you can press
# - CTRL-O to open with `open` command,
# - CTRL-E or Enter key to open with the $EDITOR
function fo() {
local out file key
IFS=$'\n' out=("$(fzf-tmux --query="$1" --exit-0 --expect=ctrl-o,ctrl-e)")
key=$(head -1 <<< "$out")
file=$(head -2 <<< "$out" | tail -1)
if [ -n "$file" ]; then
[ "$key" = ctrl-o ] && open "$file" || ${EDITOR:-vim} "$file"
fi
}
# vf - fuzzy open with vim from anywhere
# ex: vf word1 word2 ... (even part of a file name)
# zsh autoload function
function vf() {
local files
files=(${(f)"$(locate -Ai -0 $@ | grep -z -vE '~$' | fzf --read0 -0 -1 -m)"})
if [[ -n $files ]]
then
vim -- $files
print -l $files[1]
fi
}
# fuzzy grep open via ag
function vg() {
local file
file="$(ag --nobreak --noheading $@ | fzf -0 -1 | awk -F: '{print $1}')"
if [[ -n $file ]]
then
vim $file
fi
}
# fuzzy grep open via ag with line number
function vg() {
local file
local line
read -r file line <<<"$(ag --nobreak --noheading $@ | fzf -0 -1 | awk -F: '{print $1, $2}')"
if [[ -n $file ]]
then
vim $file +$line
fi
}
# Changing directory
# ------------
# fd - cd to selected directory
function fd() {
local dir
dir=$(find ${1:-.} -path '*/\.*' -prune \
-o -type d -print 2> /dev/null | fzf +m) &&
cd "$dir"
}
# Another fd - cd into the selected directory
# This one differs from the above, by only showing the sub directories and not
# showing the directories within those.
function fd() {
DIR=`find * -maxdepth 0 -type d -print 2> /dev/null | fzf-tmux` \
&& cd "$DIR"
}
# fda - including hidden directories
function fda() {
local dir
dir=$(find ${1:-.} -type d 2> /dev/null | fzf +m) && cd "$dir"
} # fdr - cd to selected parent directory
function fdr() {
local declare dirs=()
get_parent_dirs() {
if [[ -d "${1}" ]]; then dirs+=("$1"); else return; fi
if [[ "${1}" == '/' ]]; then
for _dir in "${dirs[@]}"; do echo $_dir; done
else
get_parent_dirs $(dirname "$1")
fi
}
local DIR=$(get_parent_dirs $(realpath "${1:-$PWD}") | fzf-tmux --tac)
cd "$DIR"
}
# cf - fuzzy cd from anywhere
# ex: cf word1 word2 ... (even part of a file name)
# zsh autoload function
function cf() {
local file
file="$(locate -Ai -0 $@ | grep -z -vE '~$' | fzf --read0 -0 -1)"
if [[ -n $file ]]
then
if [[ -d $file ]]
then
cd -- $file
else
cd -- ${file:h}
fi
fi
}
# cdf - cd into the directory of the selected file
function cdf() {
local file
local dir
file=$(fzf +m -q "$1") && dir=$(dirname "$file") && cd "$dir"
}
# Another CTRL-T script to select a directory and paste it into line
function __fzf_select_dir ()
{
builtin typeset READLINE_LINE_NEW="$(
command find -L . \( -path '*/\.*' -o -fstype dev -o -fstype proc \) \
-prune \
-o -type f -print \
-o -type d -print \
-o -type l -print 2>/dev/null \
| command sed 1d \
| command cut -b3- \
| env fzf -m
)"
if
[[ -n $READLINE_LINE_NEW ]]
then
builtin bind '"\er": redraw-current-line'
builtin bind '"\e^": magic-space'
READLINE_LINE=${READLINE_LINE:+${READLINE_LINE:0:READLINE_POINT}}${READLINE_LINE_NEW}${READLINE_LINE:+${READLINE_LINE:READLINE_POINT}}
READLINE_POINT=$(( READLINE_POINT + ${#READLINE_LINE_NEW} ))
else
builtin bind '"\er":'
builtin bind '"\e^":'
fi
}
# Searching file contents
# ------------
# alternative using ripgrep-all (rga) combined with preview
# allows to search in PDFs, E-Books, Office documents, zip, tar.gz, etc. (see https://github.com/phiresky/ripgrep-all)
# find-in-file - usage: fif <searchTerm>
function fif() {
if [ ! "$#" -gt 0 ]; then echo "Need a string to search for!"; return 1; fi
rga --ignore-case --files-with-matches --no-messages "$1" | fzf-tmux --preview "highlight -O ansi -l {} 2> /dev/null | rga --ignore-case --pretty --context 10 "$1" {}"
}
# Command history
# ------------
# fh - repeat history
function runcmd (){ perl -e 'ioctl STDOUT, 0x5412, $_ for split //, <>' ; }
function fh() {
([ -n "$ZSH_NAME" ] && fc -l 1 || history) | fzf +s --tac | sed -re 's/^\s*[0-9]+\s*//' | runcmd
}
# fhe - repeat history edit
function writecmd (){ perl -e 'ioctl STDOUT, 0x5412, $_ for split //, do{ chomp($_ = <>); $_ }' ; }
function fhe() {
([ -n "$ZSH_NAME" ] && fc -l 1 || history) | fzf +s --tac | sed -re 's/^\s*[0-9]+\s*//' | writecmd
}
# re-wrote the script above
function __fzf_history ()
{
__ehc $(history | fzf --tac --tiebreak=index | perl -ne 'm/^\s*([0-9]+)/ and print "!$1"')
}
function __ehc()
{
if
[[ -n $1 ]]
then
bind '"\er": redraw-current-line'
bind '"\e^": magic-space'
READLINE_LINE=${READLINE_LINE:+${READLINE_LINE:0:READLINE_POINT}}${1}${READLINE_LINE:+${READLINE_LINE:READLINE_POINT}}
READLINE_POINT=$(( READLINE_POINT + ${#1} ))
else
bind '"\er":'
bind '"\e^":'
fi
}
# Processes
# ------------
# fkill - kill processes - list only the ones you can kill. Modified the earlier script.
function fkill() {
local pid
if [ "$UID" != "0" ]; then
pid=$(ps -f -u $UID | sed 1d | fzf -m | awk '{print $2}')
else
pid=$(ps -ef | sed 1d | fzf -m | awk '{print $2}')
fi
if [ "x$pid" != "x" ]
then
echo $pid | xargs kill -${1:-9}
fi
}
# Git
# ------------
# fbr - checkout git branch
function fbr() {
local branches branch
branches=$(git --no-pager branch -vv) &&
branch=$(echo "$branches" | fzf +m) &&
git checkout $(echo "$branch" | awk '{print $1}' | sed "s/.* //")
}
# fbr - checkout git branch (including remote branches)
function fbr() {
local branches branch
branches=$(git branch --all | grep -v HEAD) &&
branch=$(echo "$branches" |
fzf-tmux -d $(( 2 + $(wc -l <<< "$branches") )) +m) &&
git checkout $(echo "$branch" | sed "s/.* //" | sed "s#remotes/[^/]*/##")
}
# fbr - checkout git branch (including remote branches), sorted by most recent commit, limit 30 last branches
function fbr() {
local branches branch
branches=$(git for-each-ref --count=30 --sort=-committerdate refs/heads/ --format="%(refname:short)") &&
branch=$(echo "$branches" |
fzf-tmux -d $(( 2 + $(wc -l <<< "$branches") )) +m) &&
git checkout $(echo "$branch" | sed "s/.* //" | sed "s#remotes/[^/]*/##")
}
# fco - checkout git branch/tag
function fco() {
local tags branches target
branches=$(
git --no-pager branch --all \
--format="%(if)%(HEAD)%(then)%(else)%(if:equals=HEAD)%(refname:strip=3)%(then)%(else)%1B[0;34;1mbranch%09%1B[m%(refname:short)%(end)%(end)" \
| sed '/^$/d') || return
tags=$(
git --no-pager tag | awk '{print "\x1b[35;1mtag\x1b[m\t" $1}') || return
target=$(
(echo "$branches"; echo "$tags") |
fzf --no-hscroll --no-multi -n 2 \
--ansi) || return
git checkout $(awk '{print $2}' <<<"$target" )
}
# fco_preview - checkout git branch/tag, with a preview showing the commits between the tag/branch and HEAD
function fco_preview() {
local tags branches target
branches=$(
git --no-pager branch --all \
--format="%(if)%(HEAD)%(then)%(else)%(if:equals=HEAD)%(refname:strip=3)%(then)%(else)%1B[0;34;1mbranch%09%1B[m%(refname:short)%(end)%(end)" \
| sed '/^$/d') || return
tags=$(
git --no-pager tag | awk '{print "\x1b[35;1mtag\x1b[m\t" $1}') || return
target=$(
(echo "$branches"; echo "$tags") |
fzf --no-hscroll --no-multi -n 2 \
--ansi --preview="git --no-pager log -150 --pretty=format:%s '..{2}'") || return
git checkout $(awk '{print $2}' <<<"$target" )
}
# fcoc - checkout git commit
function fcoc() {
local commits commit
commits=$(git log --pretty=oneline --abbrev-commit --reverse) &&
commit=$(echo "$commits" | fzf --tac +s +m -e) &&
git checkout $(echo "$commit" | sed "s/ .*//")
}
# fshow - git commit browser
function fshow() {
git log --graph --color=always \
--format="%C(auto)%h%d %s %C(black)%C(bold)%cr" "$@" |
fzf --ansi --no-sort --reverse --tiebreak=index --bind=ctrl-s:toggle-sort \
--bind "ctrl-m:execute:
(grep -o '[a-f0-9]\{7\}' | head -1 |
xargs -I % sh -c 'git show --color=always % | less -R') << 'FZF-EOF'
{}
FZF-EOF"
}
# fcoc_preview - checkout git commit with previews
function fcoc_preview() {
local commit
commit=$( glNoGraph |
fzf --no-sort --reverse --tiebreak=index --no-multi \
--ansi --preview="$_viewGitLogLine" ) &&
git checkout $(echo "$commit" | sed "s/ .*//")
}
# fshow_preview - git commit browser with previews
function fshow_preview() {
glNoGraph |
fzf --no-sort --reverse --tiebreak=index --no-multi \
--ansi --preview="$_viewGitLogLine" \
--header "enter to view, alt-y to copy hash" \
--bind "enter:execute:$_viewGitLogLine | less -R" \
--bind "alt-y:execute:$_gitLogLineToHash | xclip"
}
# fcs - get git commit sha
# example usage: git rebase -i `fcs`
function fcs() {
local commits commit
commits=$(git log --color=always --pretty=oneline --abbrev-commit --reverse) &&
commit=$(echo "$commits" | fzf --tac +s +m -e --ansi --reverse) &&
echo -n $(echo "$commit" | sed "s/ .*//")
}
# fstash - easier way to deal with stashes
# type fstash to get a list of your stashes
# enter shows you the contents of the stash
# ctrl-d shows a diff of the stash against your current HEAD
# ctrl-b checks the stash out as a branch, for easier merging
function fstash() {
local out q k sha
while out=$(
git stash list --pretty="%C(yellow)%h %>(14)%Cgreen%cr %C(blue)%gs" |
fzf --ansi --no-sort --query="$q" --print-query \
--expect=ctrl-d,ctrl-b);
do
mapfile -t out <<< "$out"
q="${out[0]}"
k="${out[1]}"
sha="${out[-1]}"
sha="${sha%% *}"
[[ -z "$sha" ]] && continue
if [[ "$k" == 'ctrl-d' ]]; then
git diff $sha
elif [[ "$k" == 'ctrl-b' ]]; then
git stash branch "stash-$sha" $sha
break;
else
git stash show -p $sha
fi
done
}
# fgst - pick files from `git status -s`
function is_in_git_repo() {
git rev-parse HEAD > /dev/null 2>&1
}
function fgst() {
# "Nothing to see here, move along"
is_in_git_repo || return
local cmd="${FZF_CTRL_T_COMMAND:-"command git status -s"}"
eval "$cmd" | FZF_DEFAULT_OPTS="--height ${FZF_TMUX_HEIGHT:-40%} --reverse $FZF_DEFAULT_OPTS $FZF_CTRL_T_OPTS" fzf -m "$@" | while read -r item; do
echo "$item" | awk '{print $2}'
done
echo
}
# JRNL
# ------------
# fjrnl - Search JRNL headlines
function fjrnl() {
title=$(jrnl --short | fzf --tac --no-sort) &&
jrnl -on "$(echo $title | cut -c 1-16)" $1
}
# Tags
# ------------
# ftags - search ctags
function ftags() {
local line
[ -e tags ] &&
line=$(
awk 'BEGIN { FS="\t" } !/^!/ {print toupper($4)"\t"$1"\t"$2"\t"$3}' tags |
cut -c1-80 | fzf --nth=1,2
) && ${EDITOR:-vim} $(cut -f3 <<< "$line") -c "set nocst" \
-c "silent tag $(cut -f2 <<< "$line")"
}
# TMUX
# ------------
# zsh; needs setopt re_match_pcre. You can, of course, adapt it to your own shell easily.
function tmuxkillf () {
local sessions
sessions="$(tmux ls|fzf --exit-0 --multi)" || return $?
local i
for i in "${(f@)sessions}"
do
[[ $i =~ '([^:]*):.*' ]] && {
echo "Killing $match[1]"
tmux kill-session -t "$match[1]"
}
done
}
# tm - create new tmux session, or switch to existing one. Works from within tmux too. (@bag-man)
# `tm` will allow you to select your tmux session via fzf.
# `tm irc` will attach to the irc session (if it exists), else it will create it.
function tm() {
[[ -n "$TMUX" ]] && change="switch-client" || change="attach-session"
if [ $1 ]; then
tmux $change -t "$1" 2>/dev/null || (tmux new-session -d -s $1 && tmux $change -t "$1"); return
fi
session=$(tmux list-sessions -F "#{session_name}" 2>/dev/null | fzf --exit-0) && tmux $change -t "$session" || echo "No sessions found."
}
# fs [FUZZY PATTERN] - Select selected tmux session
# - Bypass fuzzy finder if there's only one match (--select-1)
# - Exit if there's no match (--exit-0)
function fs() {
local session
session=$(tmux list-sessions -F "#{session_name}" | \
fzf --query="$1" --select-1 --exit-0) &&
tmux switch-client -t "$session"
}
# ftpane - switch pane (@george-b)
function ftpane() {
local panes current_window current_pane target target_window target_pane
panes=$(tmux list-panes -s -F '#I:#P - #{pane_current_path} #{pane_current_command}')
current_pane=$(tmux display-message -p '#I:#P')
current_window=$(tmux display-message -p '#I')
target=$(echo "$panes" | grep -v "$current_pane" | fzf +m --reverse) || return
target_window=$(echo $target | awk 'BEGIN{FS=":|-"} {print$1}')
target_pane=$(echo $target | awk 'BEGIN{FS=":|-"} {print$2}' | cut -c 1)
if [[ $current_window -eq $target_window ]]; then
tmux select-pane -t ${target_window}.${target_pane}
else
tmux select-pane -t ${target_window}.${target_pane} &&
tmux select-window -t $target_window
fi
}
# In tmux.conf
# bind-key 0 run "tmux split-window -l 12 'bash -ci ftpane'"
# ASDF
# ------------
# Install one or more versions of specified language
# e.g. `vmi rust` # => fzf multimode, tab to mark, enter to install
# if no plugin is supplied (e.g. `vmi<CR>`), fzf will list them for you
# Mnemonic [V]ersion [M]anager [I]nstall
function vmi() {
local lang=${1}
if [[ ! $lang ]]; then
lang=$(asdf plugin-list | fzf)
fi
if [[ $lang ]]; then
local versions=$(asdf list-all $lang | fzf -m)
if [[ $versions ]]; then
for version in $(echo $versions);
do; asdf install $lang $version; done;
fi
fi
}
# Remove one or more versions of specified language
# e.g. `vmi rust` # => fzf multimode, tab to mark, enter to remove
# if no plugin is supplied (e.g. `vmi<CR>`), fzf will list them for you
# Mnemonic [V]ersion [M]anager [C]lean
function vmc() {
local lang=${1}
if [[ ! $lang ]]; then
lang=$(asdf plugin-list | fzf)
fi
if [[ $lang ]]; then
local versions=$(asdf list $lang | fzf -m)
if [[ $versions ]]; then
for version in $(echo $versions);
do; asdf uninstall $lang $version; done;
fi
fi
}
# V
# ------------
# fasd & fzf change directory - open best matched file using `fasd` if given argument, filter output of `fasd` using `fzf` else
v() {
[ $# -gt 0 ] && fasd -f -e ${EDITOR} "$*" && return
local file
file="$(fasd -Rfl "$1" | fzf -1 -0 --no-sort +m)" && vi "${file}" || return 1
}
# CD
# ------------
function cd() {
if [[ "$#" != 0 ]]; then
builtin cd "$@";
return
fi
while true; do
local lsd=$(echo ".." && ls -p | grep '/$' | sed 's;/$;;')
local dir="$(printf '%s\n' "${lsd[@]}" |
fzf --reverse --preview '
__cd_nxt="$(echo {})";
__cd_path="$(echo $(pwd)/${__cd_nxt} | sed "s;//;/;")";
echo $__cd_path;
echo;
ls -p --color=always "${__cd_path}";
')"
[[ ${#dir} != 0 ]] || return 0
builtin cd "$dir" &> /dev/null
done
}
# Autojump
# ------------
function j() {
if [[ "$#" -ne 0 ]]; then
cd $(autojump $@)
return
fi
cd "$(autojump -s | sort -k1gr | awk '$1 ~ /[0-9]:/ && $2 ~ /^\// { for (i=2; i<=NF; i++) { print $(i) } }' | fzf --height 40% --reverse --inline-info)"
}
# Z
# ------------
unalias z 2> /dev/null
# fasd & fzf change directory - jump using `fasd` if given argument, filter output of `fasd` using `fzf` else
function z() {
[ $# -gt 0 ] && fasd_cd -d "$*" && return
local dir
dir="$(fasd -Rdl "$1" | fzf -1 -0 --no-sort +m)" && cd "${dir}" || return 1
}
# mpd
# ------------
function fmpc() {
local song_position
song_position=$(mpc -f "%position%) %artist% - %title%" playlist | \
fzf-tmux --query="$1" --reverse --select-1 --exit-0 | \
sed -n 's/^\([0-9]\+\)).*/\1/p') || return 1
[ -n "$song_position" ] && mpc -q play $song_position
}
# Readline
# ------------
# CTRL-X-1 - Invoke Readline functions by name
function __fzf_readline ()
{
builtin eval "
builtin bind ' \
\"\C-x3\": $(
builtin bind -l | command fzf +s +m --toggle-sort=ctrl-r
) \
'
"
}
# FZF-marker
# ------------
# marker templete select
function _fzf_marker_main_widget() {
if echo "$BUFFER" | grep -q -P "{{"; then
_fzf_marker_placeholder
else
local selected
if selected=$(cat ${FZF_MARKER_CONF_DIR:-~/.config/marker}/*.txt |
sed -e "s/\(^[a-zA-Z0-9_-]\+\)\s/${FZF_MARKER_COMMAND_COLOR:-\x1b[38;5;255m}\1\x1b[0m /" \
-e "s/\s*\(#\+\)\(.*\)/${FZF_MARKER_COMMENT_COLOR:-\x1b[38;5;8m} \1\2\x1b[0m/" |
fzf --bind 'tab:down,btab:up' --height=80% --ansi -q "$LBUFFER"); then
LBUFFER=$(echo $selected | sed 's/\s*#.*//')
fi
zle redisplay
fi
}
function _fzf_marker_placeholder() {
local strp pos placeholder
strp=$(echo $BUFFER | grep -Z -P -b -o "\{\{[\w]+\}\}")
strp=$(echo "$strp" | head -1)
pos=$(echo $strp | cut -d ":" -f1)
placeholder=$(echo $strp | cut -d ":" -f2)
if [[ -n "$1" ]]; then
BUFFER=$(echo $BUFFER | sed -e "s/{{//" -e "s/}}//")
CURSOR=$(($pos + ${#placeholder} - 4))
else
BUFFER=$(echo $BUFFER | sed "s/$placeholder//")
CURSOR=pos
fi
}
function _fzf_marker_placeholder_widget() { _fzf_marker_placeholder "defval" }
# Buku
# ------------
function fb() {
# save newline separated string into an array
mapfile -t website <<< "$(buku -p -f 5 | column -ts$'\t' | fzf --multi)"
# open each website
for i in "${website[@]}"; do
index="$(echo "$i" | awk '{print $1}')"
buku -p "$index"
buku -o "$index"
done
}
# Man pages
# ------------
function man-find() {
f=$(fd . $MANPATH/man${1:-1} -t f -x echo {/.} | fzf) && man $f
}
function fman() {
man -k . | fzf --prompt='Man> ' | awk '{print $1}' | xargs -r man
}
| true
|
1d68358b55515088141d93ba0877c165b6074e29
|
Shell
|
dilawar/Scripts
|
/g
|
UTF-8
| 614
| 4.125
| 4
|
[] |
no_license
|
#!/bin/bash
# Find a file recursively containing a pattern
set -f
if [ $# -eq 1 ]; then
pattern="$1"
file_glob_pattern="*"
dir_pattern="."
elif [ $# -eq 2 ]; then
pattern="$1"
file_glob_pattern="$2"
dir_pattern="."
elif [ $# -eq 3 ]; then
pattern="$1"
file_glob_pattern="$2"
dir_pattern="$3"
else
echo "Usage: $0 text_pattern [file_glob_pattern] [dir_pattern]"
exit
fi
echo "Searching $dir_pattern for text $pattern in files $file_glob_pattern"
files=`find $dir_pattern -type f -name "$file_glob_pattern"`
for f in $files; do
grep -Hnr -B 0 -A 0 "$pattern" $f
done
| true
|
b2377134c7dc44cbf51a6709e1712f3111bb87a1
|
Shell
|
Azure/azure-notificationhubs-ios
|
/Scripts/build-framework.sh
|
UTF-8
| 1,268
| 3.5
| 4
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
#!/bin/sh
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# Builds the framework for the specified target.
# Usage: build-framework.sh <target> <sdk>
# Note: it must be run from Xcode's build phase.
set -e
# Print only target name and configuration. Mimic Xcode output to make prettify tools happy.
echo "=== BUILD TARGET $1 OF PROJECT $PROJECT_NAME WITH CONFIGURATION $CONFIGURATION ==="
# OBJROOT must be customized to avoid conflicts with the current process.
if [ "$2" == "maccatalyst" ]; then
# Mac Catalyst is a special case - "destination" parameter must be used here.
env -i "PATH=$PATH" xcodebuild \
SYMROOT="$SYMROOT" OBJROOT="$BUILD_DIR/$CONFIGURATION-$2/$PROJECT_NAME" PROJECT_TEMP_DIR="$PROJECT_TEMP_DIR" \
ONLY_ACTIVE_ARCH=NO \
-project "$PROJECT_NAME.xcodeproj" -configuration "$CONFIGURATION" \
-scheme "$1" -destination 'platform=macOS,variant=Mac Catalyst'
else
env -i "PATH=$PATH" xcodebuild \
SYMROOT="$SYMROOT" OBJROOT="$BUILD_DIR/$CONFIGURATION-$2/$PROJECT_NAME" PROJECT_TEMP_DIR="$PROJECT_TEMP_DIR" \
ONLY_ACTIVE_ARCH=NO \
-project "$PROJECT_NAME.xcodeproj" -configuration "$CONFIGURATION" \
-target "$1" -sdk "$2"
fi
| true
|
97236d54a73a429af252b3b4094b972fdd0a56ec
|
Shell
|
IThawk/rust-project
|
/rust-master/src/ci/docker/disabled/dist-x86_64-dragonfly/build-toolchain.sh
|
UTF-8
| 3,324
| 3.796875
| 4
|
[
"MIT",
"LicenseRef-scancode-other-permissive",
"Apache-2.0",
"BSD-3-Clause",
"BSD-2-Clause",
"NCSA"
] |
permissive
|
#!/usr/bin/env bash
set -ex
ARCH=x86_64
PATCH_TOOLCHAIN=$1
BINUTILS=2.25.1
GCC=6.4.0
hide_output() {
set +x
on_err="
echo ERROR: An error was encountered with the build.
cat /tmp/build.log
exit 1
"
trap "$on_err" ERR
bash -c "while true; do sleep 30; echo \$(date) - building ...; done" &
PING_LOOP_PID=$!
$@ &> /tmp/build.log
trap - ERR
kill $PING_LOOP_PID
set -x
}
mkdir binutils
cd binutils
# First up, build binutils
curl https://ftp.gnu.org/gnu/binutils/binutils-$BINUTILS.tar.bz2 | tar xjf -
mkdir binutils-build
cd binutils-build
hide_output ../binutils-$BINUTILS/configure \
--target=$ARCH-unknown-dragonfly
hide_output make -j10
hide_output make install
cd ../..
rm -rf binutils
# Next, download the DragonFly libc and relevant header files
URL=http://mirror-master.dragonflybsd.org/iso-images/dfly-x86_64-5.0.0_REL.iso.bz2
mkdir dragonfly
curl $URL | bzcat | bsdtar xf - -C dragonfly ./usr/include ./usr/lib ./lib
dst=/usr/local/$ARCH-unknown-dragonfly
mkdir -p $dst/lib
cp -r dragonfly/usr/include $dst/
cp dragonfly/usr/lib/crt1.o $dst/lib
cp dragonfly/usr/lib/Scrt1.o $dst/lib
cp dragonfly/usr/lib/crti.o $dst/lib
cp dragonfly/usr/lib/crtn.o $dst/lib
cp dragonfly/usr/lib/libc.a $dst/lib
cp dragonfly/usr/lib/libutil.a $dst/lib
cp dragonfly/usr/lib/libm.a $dst/lib
cp dragonfly/usr/lib/librt.so.0 $dst/lib
cp dragonfly/usr/lib/libexecinfo.so.1 $dst/lib
cp dragonfly/lib/libc.so.8 $dst/lib
cp dragonfly/lib/libm.so.4 $dst/lib
cp dragonfly/lib/libutil.so.4 $dst/lib
cp dragonfly/usr/lib/libpthread.so $dst/lib/libpthread.so
cp dragonfly/usr/lib/thread/libthread_xu.so.2 $dst/lib/libpthread.so.0
ln -s libc.so.8 $dst/lib/libc.so
ln -s libm.so.4 $dst/lib/libm.so
ln -s librt.so.0 $dst/lib/librt.so
ln -s libutil.so.4 $dst/lib/libutil.so
ln -s libexecinfo.so.1 $dst/lib/libexecinfo.so
rm -rf dragonfly
# Finally, download and build gcc to target DragonFly
mkdir gcc
cd gcc
curl https://ftp.gnu.org/gnu/gcc/gcc-$GCC/gcc-$GCC.tar.gz | tar xzf -
cd gcc-$GCC
# The following three patches are taken from DragonFly's dports collection:
# https://github.com/DragonFlyBSD/DPorts/tree/master/lang/gcc5
# The dports specification for gcc5 contains a few more patches, but they are
# not relevant in this situation, as they are for a language we don't need
# (e.g. java), or a platform which is not supported by DragonFly (e.g. i386,
# powerpc64, ia64, arm).
#
# These patches probably only need to be updated in case the gcc version is
# updated.
patch -p0 < $PATCH_TOOLCHAIN
./contrib/download_prerequisites
mkdir ../gcc-build
cd ../gcc-build
hide_output ../gcc-$GCC/configure \
--enable-languages=c,c++ \
--target=$ARCH-unknown-dragonfly \
--disable-multilib \
--disable-nls \
--disable-libgomp \
--disable-libquadmath \
--disable-libssp \
--disable-libvtv \
--disable-libcilkrts \
--disable-libada \
--disable-libsanitizer \
--disable-libquadmath-support \
--disable-lto
hide_output make -j10
hide_output make install
cd ../..
rm -rf gcc
| true
|
a32648840d4ab2a6192b5b2142311fb6a058fa3a
|
Shell
|
weijuwei/mygit
|
/scripts/deluser.sh
|
UTF-8
| 205
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
#批量删除用户
while read line ;
do
username=`echo $line | cut -d: -f1`
echo $username | grep -E "^user[0-9]+$" && userdel -r $username && echo "$username is removed"
done < /etc/passwd
| true
|
1b698707cc04f7130f1a964f077d1cd52912b28a
|
Shell
|
njoubert/PerVERT
|
/pervert_debug
|
UTF-8
| 531
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash
if [ $# -ne 2 ];
then
echo "Incorrect usage!"
echo $0" <exec> <path>"
exit 1
fi
HERE=$(dirname $0)
SERVER=$HERE/backend/server
CURL=/usr/bin/curl
#killall server
RESPONSE=`$CURL localhost:8083/ping 2>/dev/null`
if [ "$RESPONSE" != "pong" ];
then
cd $HERE/backend
./server --daemon
cd -
# TODO: Check that server has come up?
fi
URL="localhost:8083/pp/update?exec="$1"&logs="$2
$CURL $URL > /dev/null 2>&1
if [ $? -ne 0 ];
then
echo "Error sending update message to server!"
exit 1
fi
exit 0
| true
|
cd82268ba1fa01d80edc57e9c2b4652861f8dfaa
|
Shell
|
mateusmcg/java-trabalho-final
|
/devops/start-app.sh
|
UTF-8
| 139
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/sh
./devops/build.sh
if [ $? -eq 0 ]; then
docker-compose up --build -d
else
echo "Ocorreu um erro ao buildar o app =("
fi
| true
|
73c014eab81dd26101da8c446e4f8aeb6f69ceec
|
Shell
|
sasdf/deapk-openshift
|
/bin/compile
|
UTF-8
| 1,216
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$(pwd)/bin/lib
echo "Compiling $3 ..."
apktool="$(pwd)/bin/apktool"
mkkey="$(pwd)/bin/mkkey.sh"
signapk="$(pwd)/bin/signapk.jar"
mkdir "$1"
cd "$1"
echo "Initializing git..."
git init
git remote add origin "$2" > /dev/null
git checkout --orphan "$3"
if [ $? -ne 0 ]
then
echo "Checkout failed." >&2
exit 1
fi
echo 'Pulling from git server...'
git pull --depth=1 origin "$3"
if [ $? -ne 0 ]
then
echo "Pull falied." >&2
exit 1
fi
echo 'Building apk...'
$apktool b -f -o "$1/unsigned.apk" "$1" -p "/tmp" 2>&1 | tee -a "$1/apktool.log"
if [ $? -ne 0 ]
then
echo "Build falied." >&2
exit 1
else
echo "Signing apk..."
if [ ! -f "$1/unsigned.apk" ]
then
echo "apk not found."
exit 1
fi
if [ ! -f key.x509.pem ] || [ ! -f key.pk8 ]
then
echo "Generating keys..."
$mkkey key
fi
java -jar $signapk key.x509.pem key.pk8 "$1/unsigned.apk" "$1/$3"
rm -f "$1/unsigned.apk"
fi
rm -rf "$1/build"
echo ""
echo "Uploading..."
echo "Adding commit..."
git add . > /dev/null
git commit -m 'Build apk' > /dev/null
echo "Pushing to remote..."
git push --all --force > /dev/null 2>&1
echo "Upload ended"
echo "Cleaning local..."
rm -rf "$1"
| true
|
9193df24cd3b6d87f597f970e19f831660da27a8
|
Shell
|
molleweide/dorothy
|
/commands/git-review
|
UTF-8
| 332
| 2.8125
| 3
|
[
"LicenseRef-scancode-public-domain",
"Unlicense"
] |
permissive
|
#!/usr/bin/env bash
source "$DOROTHY/sources/strict.bash"
source "$DOROTHY/sources/paths.sh"
if get-app "Gitfox.app"; then
open-app "Gitfox.app" "$@"
elif get-app "Tower.app"; then
open-app "Tower.app" "$@"
elif command-exists gf; then
gf "$@"
elif command-exists tower; then
tower "$@"
else
fail "gf and tower are missing"
fi
| true
|
07612c4f663a64a28b39369f0e59eff81820ef07
|
Shell
|
lollipopman/bin
|
/mint-ventra-tx
|
UTF-8
| 1,418
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
# vim: set noexpandtab:
PROGRAM=$(basename "${0}")
if [[ -z "$1" ]] || [[ -z "$2" ]]; then
printf "usage: %s <from_date> <to_date>\n" "${PROGRAM}"
printf "e.g. %s 2016-01-01 2016-01-31\n" "${PROGRAM}"
exit 0
fi
TRANSACTIONS=$(mktemp /tmp/tmp.mint.XXXXXX)
if ! mintapi --transactions jesse@mbuki-mvuki.org >"${TRANSACTIONS}"; then
printf "\n\nUnable to pull down transactions from mint!\n\n"
exit 1
fi
FROM_DATE=$(($(date --date="$1" +%s) * 1000))
TO_DATE=$(($(date --date="$2" +%s) * 1000))
TOTAL=$(jq '
map(
select(.description == "Ventra" and .date > '${FROM_DATE}' and .date < '${TO_DATE}').amount)
) | add' "${TRANSACTIONS}")
function tx_to_markdown() {
printf "## Transactions\n\n"
printf "Date|Description|Amount\n"
printf -- "---|---|---\n"
jq -r '
map(
select(.description == "Ventra" and .date > '${FROM_DATE}' and .date < '${TO_DATE}')
) | map(
.date /= 1000
) | map(.date = ((.date) |
tostring |strptime("%s") |
strftime("%F"))) |
map([.date, .description, .amount ]) |
.[]|@tsv' "${TRANSACTIONS}" |
tr '\t' '|' |
sort -n
printf "\n## Total: \$%s\n" "${TOTAL}"
}
output_pdf="${HOME}/tmp/${PROGRAM}-${1}-${2}.pdf"
if tx_to_markdown | pandoc -f gfm -o "${output_pdf}"; then
printf "PDF Created: %s\n" "${output_pdf}"
else
printf "Unable to create PDF: %s\n" "${output_pdf}"
exit 1
fi
| true
|
f5fe28e4923b778553352a19b52bc533e06c0a2d
|
Shell
|
francoisferrand/docker-scripts
|
/completion/docker-env.bash
|
UTF-8
| 1,229
| 3.75
| 4
|
[
"Apache-2.0"
] |
permissive
|
_docker_env_complete() {
COMPREPLY=()
local current="${COMP_WORDS[COMP_CWORD]}"
# First word completion: can be a mode or an env
if [ $COMP_CWORD -eq 1 ]; then
COMPREPLY=( $(compgen -W "$(docker-env --list) --list --add --update --rm --help" -- ${current}) )
return 0
fi
# Completion after each keyword
case "${COMP_WORDS[COMP_CWORD-1]}" in
--add)
# Must be a new env name
return 0
;;
--update|--rm)
COMPREPLY=( $(compgen -W "$(docker-env --list)" -- ${current}) )
return 0
;;
--cacert|--tlscert|--tlskey)
COMPREPLY=( $(compgen -f -- ${current}) )
return 0
;;
--version)
# --> Docker version completion
return 0
;;
esac
# Options available in each mode
case "${COMP_WORDS[1]}" in
--add)
COMPREPLY=( $(compgen -W "--cacert --tlscert --tlskey --version" -A hostname -- ${current}) )
;;
--update)
COMPREPLY=( $(compgen -W "--version" -- ${current}) )
;;
--list|--rm|--help)
;;
*)
local docker_completion=$(complete -p docker | awk '{print $(NF-1)}')
unset COMP_WORDS[1]
COMP_WORDS[0]="docker"
COMP_WORDS=( "${COMP_WORDS[@]}" )
COMP_LINE="${COMP_WORDS[*]}"
((COMP_CWORD--))
${docker_completion}
;;
esac
}
complete -F _docker_env_complete docker-env
| true
|
161ed58f0879b59d4c1180e0652b89321ab64830
|
Shell
|
geligeli/tf_gpu_bb
|
/bin/start_env.sh
|
UTF-8
| 853
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
if [ ! -f WORKSPACE ]; then
echo "No WORKSPACE file present, this should be called in the root directory of a bazel project"
exit 1
fi
pushd $PWD
cd $(dirname $0)/../development_image
make deploy
docker pull 192.168.0.13:5000/tf-gpu-env:latest
popd
if [ -f .bazel_docker_id ]; then
if [ ! -z $( docker ps -f id=$(cat .bazel_docker_id) -q ) ]; then
echo "bazel contianer already running as $(cat .bazel_docker_id) shutting it down"
docker stop $(cat .bazel_docker_id)
fi
fi
docker run \
--rm -d -it \
--name bazel-container \
--add-host buildfarm-server:192.168.0.13 \
--add-host buildfarm-redis:192.168.0.13 \
-v /home/geli/.bazel_in_docker:/home/geli/.bazel_in_docker \
-v $PWD:$PWD \
-v /tmp:/tmp \
192.168.0.13:5000/tf-gpu-env \
bash > .bazel_docker_id
complete -o nospace -F _bazel__complete blaze
| true
|
aecc1fe6734fae009b6dc52175c852a85e8a7eb6
|
Shell
|
nativelogix/xquerrail2.framework
|
/.circle/bootstrap.sh
|
UTF-8
| 3,383
| 4
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
################################################################
# Use this script to initialize the first (or only) host in
# a MarkLogic Server cluster. Use the options to control admin
# username and password, authentication mode, and the security
# realm. If no hostname is given, localhost is assumed. Only
# minimal error checking is performed, so this script is not
# suitable for production use.
#
# Usage: this_command [options] hostname
#
################################################################
BOOTSTRAP_HOST="localhost"
USER="admin"
PASS="password"
AUTH_MODE="anyauth"
SEC_REALM="public"
N_RETRY=5
RETRY_INTERVAL=10
#######################################################
# restart_check(hostname, baseline_timestamp, caller_lineno)
#
# Use the timestamp service to detect a server restart, given a
# a baseline timestamp. Use N_RETRY and RETRY_INTERVAL to tune
# the test length. Include authentication in the curl command
# so the function works whether or not security is initialized.
# $1 : The hostname to test against
# $2 : The baseline timestamp
# $3 : Invokers LINENO, for improved error reporting
# Returns 0 if restart is detected, exits with an error if not.
#
function restart_check {
LAST_START=`$AUTH_CURL "http://$1:8001/admin/v1/timestamp"`
for i in `seq 1 ${N_RETRY}`; do
if [ "$2" == "$LAST_START" ] || [ "$LAST_START" == "" ]; then
sleep ${RETRY_INTERVAL}
LAST_START=`$AUTH_CURL "http://$1:8001/admin/v1/timestamp"`
else
return 0
fi
done
echo "ERROR: Line $3: Failed to restart $1"
exit 1
}
#######################################################
# Parse the command line
OPTIND=1
while getopts ":a:p:r:u:" opt; do
case "$opt" in
a) AUTH_MODE=$OPTARG ;;
p) PASS=$OPTARG ;;
r) SEC_REALM=$OPTARG ;;
u) USER=$OPTARG ;;
\?) echo "Unrecognized option: -$OPTARG" >&2; exit 1 ;;
esac
done
shift $((OPTIND-1))
if [ $# -ge 1 ]; then
BOOTSTRAP_HOST=$1
shift
fi
# Suppress progress meter, but still show errors
CURL="curl -s -S"
# Add authentication related options, required once security is initialized
AUTH_CURL="${CURL} --${AUTH_MODE} --user ${USER}:${PASS}"
#######################################################
# Bring up the first (or only) host in the cluster. The following
# requests are sent to the target host:
# (1) POST /admin/v1/init
# (2) POST /admin/v1/instance-admin?admin-user=X&admin-password=Y&realm=Z
# GET /admin/v1/timestamp is used to confirm restarts.
# (1) Initialize the server
echo "Initializing $BOOTSTRAP_HOST..."
$CURL -X POST -d "" http://${BOOTSTRAP_HOST}:8001/admin/v1/init
sleep 30
# (2) Initialize security and, optionally, licensing. Capture the last
# restart timestamp and use it to check for successful restart.
TIMESTAMP=`$CURL -X POST \
-H "Content-type: application/x-www-form-urlencoded" \
--data "admin-username=${USER}" --data "admin-password=${PASS}" \
--data "realm=${SEC_REALM}" \
http://${BOOTSTRAP_HOST}:8001/admin/v1/instance-admin \
| grep "last-startup" \
| sed 's%^.*<last-startup.*>\(.*\)</last-startup>.*$%\1%'`
if [ "$TIMESTAMP" == "" ]; then
echo "ERROR: Failed to get instance-admin timestamp." >&2
exit 1
fi
# Test for successful restart
restart_check $BOOTSTRAP_HOST $TIMESTAMP $LINENO
echo "Initialization complete for $BOOTSTRAP_HOST..."
exit 0
| true
|
815a7f1c6e325416ce1c5f09ca8d54841d6acfc8
|
Shell
|
JaySandesara/madminer
|
/examples/First_Project/mg_processes/signal1/SubProcesses/combine_root.sh
|
UTF-8
| 1,737
| 3.453125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Main driver for the combination.
# To be executed in ./SubProcesses; the upper-level command
# (the only one to be executed) is:
#
# steerall born_G*
# steerall all_G*
# steerall $1
#
#It is recommended to use a very recent version of root.
#On SLC6 maching, from a tcsh shell execute e.g.
# setenv ROOTSYS /afs/cern.ch/sw/lcg/app/releases/ROOT/5.34.11/x86_64-slc6-gcc46-dbg/root/
# set path= ( $ROOTSYS/bin $path )
# setenv LD_LIBRARY_PATH ${LD_LIBRARY_PATH}:${ROOTSYS}/lib
#
function combine_root_files {
#Removes target file MADatNLO.root if present,
#and writes a file (temp_root_files.txt) with the list
#of input files. These are searched in the directories
# ./P*/$1
if [ -f temp_root_files.txt ]
then
rm -f temp_root_files.txt
fi
# Remove target file if already present: no warning is issued
if [ -f MADatNLO.root ]
then
rm -f MADatNLO.root
fi
thisdir=`pwd`
i=0
for p in P* ; do
cd $p
for el in $* ; do
i=`expr $i + 1`
echo $thisdir/$p/$el"/MADatNLO.root" >> ../temp_root_files.txt
done
cd ../
done
if [ -f definitely_temporary.txt ]
then
\rm definitely_temporary.txt
fi
echo $i >> definitely_temporary.txt
echo $thisdir >> definitely_temporary.txt
cat temp_root_files.txt >> definitely_temporary.txt
mv -f definitely_temporary.txt temp_root_files.txt
}
function steerall {
combine_root_files $1
rm -f temp_root_files.txt
echo $[$#-1] >> temp_root_files.txt
echo $thisdir >> temp_root_files.txt
for i in ${@:2}
do
echo $i >> temp_root_files.txt
done
if [ -f rootinput.txt ]
then
rm -f rootinput.txt
fi
echo ".x combine_root.C" >> rootinput.txt
echo ".q" >> rootinput.txt
root -b < rootinput.txt
rm -f rootinput.txt
}
steerall $@
rm -f AutoDict_vector_TH1D*
| true
|
f01870aae796548e038430fec713fdf464fc201b
|
Shell
|
OpenVnmrJ/OpenVnmrJ
|
/src/scripts/vnmr_jplot.sh
|
UTF-8
| 457
| 2.765625
| 3
|
[
"Apache-2.0",
"GPL-3.0-only"
] |
permissive
|
#! /bin/bash
#
#
# Copyright (C) 2015 University of Oregon
#
# You may distribute under the terms of either the GNU General Public
# License or the Apache License, as specified in the LICENSE file.
#
# For more information, see the LICENSE file.
#
#
javabin="$vnmrsystem/jre/bin/java"
if [ ! -f $javabin ]
then
javabin="java"
fi
"$javabin" -Dsysdir="$vnmrsystem" -Duserdir="$vnmruser" -Duser=$USER -cp "$vnmrsystem"/java/jplot.jar PlotConfig $* &
| true
|
b04bcfa0786623ee4be417fd6296a9daf8aa34ee
|
Shell
|
framasoft/lufi_ynh
|
/scripts/install
|
UTF-8
| 2,820
| 3.90625
| 4
|
[] |
no_license
|
#!/bin/bash
# Exit on command errors and treat unset variables as an error
set -eu
# Source YunoHost helpers
source /usr/share/yunohost/helpers
app=$YNH_APP_INSTANCE_NAME
domain=$YNH_APP_ARG_DOMAIN
path=$YNH_APP_ARG_PATH
contact=$YNH_APP_ARG_CONTACT
is_public=$YNH_APP_ARG_IS_PUBLIC
secret=$(ynh_string_random 24)
src_path=/var/www/$app
port=8081
while ! sudo yunohost app checkport $port ; do
port=$((port+1))
done
dir=$(pwd)
# Save app settings
ynh_app_setting_set "$app" version "7db7688b"
ynh_app_setting_set "$app" is_public "$is_public"
ynh_app_setting_set "$app" secret "$secret"
# Check domain/path availability
sudo yunohost app checkurl "${domain}${path}" -a "$app" \
|| ynh_die "Path not available: ${domain}${path}"
# Install dependencies
if ! ynh_package_is_installed "build-essential" ; then
ynh_package_install build-essential
fi
if ! ynh_package_is_installed "libssl-dev" ; then
ynh_package_install libssl-dev
fi
echo "yes" | sudo cpan Carton
# Create user
if ! ynh_system_user_exists "$app" ; then
sudo adduser $app --home $src_path --no-create-home --disabled-password --gecos $app
fi
# Copy source files
sudo git clone https://framagit.org/luc/lufi.git $src_path
cd $src_path
sudo carton install
sudo cp lufi.conf.template lufi.conf
sudo chown -R $app: $src_path
# Configure
sudo sed -e "s@listen *=>.*@listen => ['http://127.0.0.1:$port'],@" \
-e "s@#proxy *=>.*@proxy => 1,@" \
-e "s@#contact *=>.*@contact => '$contact',@" \
-e "s@#secrets *=>.*@secrets => ['$secret'],@" \
-e "s@#prefix *=>.*@prefix => '$path',@" \
-i lufi.conf
sudo cp utilities/lufi.service /etc/systemd/system/
sudo sed -e "s@User=www-data@User=$app@" \
-e "s@WorkingDirectory=/var/www/lufi/@WorkingDirectory=$src_path@" \
-e "s@PIDFile=/var/www/lufi/script/hypnotoad.pid@PIDFile=$src_path/script/hypnotoad.pid@" \
-i /etc/systemd/system/lufi.service
sudo systemctl daemon-reload
sudo systemctl enable lufi.service
# Start
sudo systemctl start lufi.service
# Modify Nginx configuration file and copy it to Nginx conf directory
cd $dir
nginx_conf=../conf/nginx.conf
sed -i "s@YNH_WWW_PATH@$path@g" $nginx_conf
sed -i "s@YNH_LUFI_PORT@$port/@g" $nginx_conf
sudo cp $nginx_conf /etc/nginx/conf.d/$domain.d/$app.conf
# If app is public, add url to SSOWat conf as skipped_uris
if [[ $is_public -eq 1 ]]; then
# unprotected_uris allows SSO credentials to be passed anyway.
ynh_app_setting_set "$app" unprotected_uris "/"
fi
domainregex=$(echo "$domain" | sed 's/-/\%&/g')
pathregex=$(echo "$path" | sed 's/-/\%&/g')
ynh_app_setting_set "$app" skipped_regex "$domainregex$pathregex/r/","$domainregex$pathregex/about","$domainregex$pathregex/download"
# Reload services
sudo service nginx reload
| true
|
3a2d3926fff2577e8c2b0e502de077d8336058a0
|
Shell
|
rahworkx/sftp-xfer
|
/sftp-xfer.sh
|
UTF-8
| 4,017
| 3.890625
| 4
|
[] |
no_license
|
#!/bin/bash
## Vars
sftp_user=$1
sftp_group=$2
JAILPATH="/home/$sftp_user"
user_pass="$(/usr/local/bin/aws secretsmanager get-random-password --password-length 15 --exclude-punctuation --query 'RandomPassword' --output text --region us-west-2)"
wrkr_key="$3"
#if [ $# -lt 2 ]
#then
# echo "This script must be run with super-user privileges."
# echo -e "\nUsage:\n sudo sh add_sftp_user.sh sftpuser sftpuser_sftpgrp /file/path/ssh.pub --if applicable-- \n"
#fi
## Make sure the Group Exists ##
/bin/egrep -i "^${sftp_group}" /etc/group
if [ $? -eq 0 ]; then
echo "Great, group $sftp_group already exists in /etc/group"
else
echo "Group does not exist, Creating Group $sftp_group..."
groupadd $sftp_group
fi
## Make Sure User Exists ##
/bin/egrep -i "^${sftp_user}" /etc/passwd
if [ $? -eq 0 ]; then
echo "User $sftp_user exists in /etc/passwd, aborting..."
exit 1
else
echo "Good, $sftp_user is a new user."
if [ -d "/home/$sftp_user" ]; then
echo "/home/$sftp-user already exists, aborting..."
exit 1
else
echo "Creating User $sftp_user"
adduser $sftp_user
echo "Seting User: $sftp_user Password to Pass: $user_pass"
echo "$user_pass" | passwd --stdin $sftp_user
echo "Creating Folder Directory Structure"
mkdir -p /home/$sftp_user
cd /home/ && chown root:$sftp_user $sftp_user
mkdir -p /home/$sftp_user/Processed
usermod -m -d /home/$sftp_user/Processed $sftp_user
mkdir -p /home/$sftp_user/Error
usermod -m -d /home/$sftp_user/Error $sftp_user
mkdir -p /home/$sftp_user/Inbound
usermod -m -d /home/$sftp_user/Inbound $sftp_user
mkdir -p /home/$sftp_user/Outbound
usermod -m -d /home/$sftp_user/Outbound $sftp_user
echo "Done setting Directory Structure"
echo "Setting Permissions on Folder Directory Structure"
chown $1:$2 /home/$sftp_user/Processed/
chmod ug+rwX /home/$sftp_user/Processed/
chown $1:$2 /home/$sftp_user/Error/
chmod ug+rwX /home/$sftp_user/Error/
chown $1:$2 /home/$sftp_user/Inbound/
chmod ug+rwX /home/$sftp_user/Inbound/
chown $1:$2 /home/$sftp_user/Outbound/
chmod ug+rwX /home/$sftp_user/Outbound/
chmod 777 /home/$sftp_user/Outbound/
echo "Done setting Permissions on Folder Directory Structure"
echo "Adding User $sftp_user to $sftp_group"
usermod -a -G $sftp_group $sftp_user
fi
fi
## Disable all Login Access except SFTP Only ##
usermod -s /usr/sbin/nologin $sftp_user
chmod 755 /home/$sftp_user
## Add Pub Key from worker to User Authorized Keys ##
if [ -z "$wrkr_key" ]
then
echo "Wrker Key is empty, Not adding a Key"
else
echo "Adding the Worker key to the users authorized list"
mkdir /home/$sftp_user/.ssh
cd /home/$sftp_user && chmod 700 .ssh
mv $wrkr_key /home/$sftp_user/.ssh/authorized_keys
cd /home/$sftp_user/.ssh && chmod 600 authorized_keys
cd /home/$sftp_user && chown -R $sftp_user:$sftp_user .ssh
fi
## Jail User to its own Home Folder ##
if ! grep -q "Match group $sftp_group" /etc/ssh/sshd_config
then
echo "* jailing user $sftp_user to group $sftp_group *"
echo "
## Sftp $sftp_user Group Jail ##
Match group $sftp_group
AuthorizedKeysFile /home/$sftp_user/.ssh/authorized_keys
ChrootDirectory $JAILPATH
AllowTCPForwarding no
X11Forwarding no
ForceCommand internal-sftp
" >> /etc/ssh/sshd_config
fi
echo "#### Completed Addition Of SFTP User ####"
echo "HOST: xfer.materialbank.com"
echo "USER: $sftp_user"
echo "PASS: $user_pass"
echo "### Restarting SSH Daemon for changes to take affect ####"
systemctl restart sshd
| true
|
cac51afc1162601b4afdccd4b61a370ce842d14a
|
Shell
|
joyanceguan/demo
|
/src/test/resources/server/shell/tomcat_setup.sh
|
UTF-8
| 801
| 3.65625
| 4
|
[] |
no_license
|
#定义基本路径,安装路径及安装包路径
setup_path=/usr/setup/ #安装包基本路径
base_path=/usr/test/ #安装基本路径
tomcat_name=apache-tomcat-8.5.33.tar.gz #tomcat名(与setup_path拼接)
tomcat_path=tomcat/8.5 #安装路径
echo 开始安装tomcat
#判断是否有tomcat路径
if [ ! -d $base_path$tomcat_path ];
then
mkdir -m 777 -p $base_path$tomcat_path
else
chmod 777 -R $base_path$tomcat_path
fi
#判断是否有安装包
if [ ! -e $setup_path$tomcat_name ];
then
#需要下载安装包
wget -P $setup_path https://mirrors.tuna.tsinghua.edu.cn/apache/tomcat/tomcat-8/v8.5.33/bin/apache-tomcat-8.5.33.tar.gz
echo download finish
else
echo have download
fi
tar -xvf $setup_path$tomcat_name -C $base_path$tomcat_path
echo tomcat安装成功
| true
|
bb93687ee12ae320b546f50f38e68b30e26b14d6
|
Shell
|
echohack/macbot
|
/crutil.sh
|
UTF-8
| 463
| 2.890625
| 3
|
[
"Apache-2.0"
] |
permissive
|
# Remove Unnecessary Certificates
delete_certificates() {
cert_list=$(security find-certificate -c "$@" -a -Z "/System/Library/Keychains/SystemRootCertificates.keychain"| grep SHA-1 | awk '{print $NF}')
if [[ $cert_list != 0 ]] ; then
for cert in $cert_list
do
run sudo security delete-certificate -Z $cert -t "/System/Library/Keychains/SystemRootCertificates.keychain"
done
fi
}
delete_certificates "Izenpe.com"
| true
|
0542a718b7258902d62cf254c0b42dc551a9182f
|
Shell
|
xuexiao-weizi/Debian_on_Buffalo
|
/Buster/installer_images/build/armhf-payload/source/ifup-mac.sh
|
UTF-8
| 319
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
##script to set mac addresses pre-ifup
ip link list | grep eth1
if [ $? -eq 0 ]; then
eth0mac="$(fw_printenv -n ethaddr)"
eth1mac="$(fw_printenv -n eth1addr)"
ip link set dev eth1 address "$eth1mac"
else
eth0mac="$(fw_printenv -n eth1addr)"
fi
ip link set dev eth0 address "$eth0mac"
exit 0
| true
|
da9ce68104bead4032b3b65bb0c48d365415af3d
|
Shell
|
aduermael/dockerblog
|
/test-build.sh
|
UTF-8
| 1,218
| 2.578125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
COMMIT=$(git rev-parse --verify HEAD)
COMMIT_SHORT=${COMMIT:0:8}
echo "$COMMIT_SHORT"
IMAGE_NAME="blog-router:$COMMIT_SHORT"
DOCKERFILE="router.Dockerfile"
docker build -f "$DOCKERFILE" -t "$IMAGE_NAME" .
# docker build -t blog-router
# blog-router:
# build:
# context: .
# dockerfile: router.Dockerfile
# container_name: blog-router
# ports:
# - "80:80"
# stdin_open: true
# tty: true
# restart: always
# # environment: # PROD
# # - GIN_MODE=release # PROD
# volumes:
# # - blog-data:/blog-data # PROD
# - ./sample/themes:/blog-data/themes # DEV
# - ./sample/config.json:/blog-data/config.json # DEV
# - ./sample/comment-answer-email.html:/blog-data/comment-answer-email.html # DEV
# - ./sample/comment-answer-email.txt:/blog-data/comment-answer-email.txt # DEV
# - ./sample/rss.tmpl:/blog-data/rss.tmpl # DEV
# - ./sample/files:/blog-data/files # DEV
# - ./router/initial-data/admin:/blog-data/admin # DEV
# - ./router/initial-data/js:/blog-data/js # DEV
# - ./router/initial-data/robots.txt:/blog-data/robots.txt # DEV
# - ./go/src:/go/src # DEV
# command: ash # DEV
| true
|
e9c10842505b6862c9edc948ef0ac3928d9f44b3
|
Shell
|
eben0/reddit-crypto-price-bot
|
/scripts/update_env.sh
|
UTF-8
| 494
| 2.8125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
FILE=.env
cat /dev/null > $FILE
VERSION=$(git describe --tags)
echo "CLIENT_ID=${CLIENT_ID}" >> $FILE
echo "CLIENT_SECRET=${CLIENT_SECRET}" >> $FILE
echo "REFRESH_TOKEN=${REFRESH_TOKEN}" >> $FILE
echo "CMC_API_KEY=${CMC_API_KEY}" >> $FILE
echo "LOGGER_PATH=${LOGGER_PATH}" >> $FILE
echo "NODE_ENV=${NODE_ENV}" >> $FILE
echo "VERSION=${VERSION}" >> $FILE
echo "$(date +"%s")" > .timestamp
mkdir -p dist/
cp $FILE dist/
if [ "$CI" ]; then
rm -rf db/*.json
rm -rf logs/
fi
| true
|
8a69a6a72682d9546d395dce0921b71a4635484b
|
Shell
|
kybernetyk/linuxscripts
|
/polybat
|
UTF-8
| 143
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/sh
remaining=`acpi | awk '{print $5}'`
power=`batpower`
if [ -z "$remaining" ]; then
echo "]"
else
echo " | $remaining | $power]"
fi
| true
|
f048907bba0fb17ac315461b6f8268dbb290c8b4
|
Shell
|
mikeplavsky/docker-network
|
/run.sh
|
UTF-8
| 740
| 3.34375
| 3
|
[] |
no_license
|
CMD=$1
NUM=$2
function create_subnet {
for ((i=1;i<=$NUM;i++)) do
docker network create --subnet=10.10.$i.0/24 test_$i
done
}
function create {
for ((i=1;i<=$NUM;i++)) do
docker network create test_$i
done
}
function create_overlay {
for ((i=1;i<=$NUM;i++)) do
docker network create --driver=overlay test_$i
done
}
function remove {
for ((i=1;i<=$NUM;i++)) do
docker network remove test_$i
done
}
function create_srv {
for ((i=1;i<=$NUM;i++)) do
docker service create \
--network=test_$i --name=nice_$i \
python:3.5 sleep 2000
done
}
function remove_srv {
for ((i=1;i<=$NUM;i++)) do
docker service rm nice_$i
done
}
$CMD
| true
|
e92734a555b683db96952f32ddfb6478751c9d87
|
Shell
|
nicolasaguenot/nabackupdb
|
/config_sample.sh
|
UTF-8
| 602
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/sh
## DB
DBNAME=[YOUR_DATABASE_NAME]
DBUSER=[YOUR_DATABASE_USER]
DBPSWD=[YOUR_DATABASE_PASSWORD]
## FOLDER
LASTBACKUPFOLDER=sql/lastbackup
TMPFOLDER=sql/tmpbackup
ARCHIVEFOLDER=sql/archives
RESTOREDFOLDER=sql/lastrestored
## SCRIPT PATH - [YOUR_MYSQLDUMP_COMMAND_PATH - In general : /usr/bin/mysqldump]
MYSQLDUMPPATH=/usr/bin/mysqldump
## DATEFORMAT - [YOUR DATE FORMAT - In General : %Y-%m-%d-%H-%M (Year-Month-Day-Hours-Minuts)]
DATEFORMAT=%Y-%m-%d-%H-%M
## MAX BACKUP STORED - [MAXBACKUP - In General : 5]
MAXBACKUP=5
## MAX RESTORED BACKUP - [MAXRESTORE - In General : 5]
MAXRESTORE=5
| true
|
b027f292bdd5a4c7e3035c501e2f8f9205cbaf12
|
Shell
|
kif/edna
|
/sandbox/id11/bin/edna-tango-server
|
UTF-8
| 1,646
| 2.8125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#
# Project: ID11 EDNA stuff
# http://www.edna-site.org
#
# File: "$Id: $"
#
# Copyright (C) 2008-2009 European Synchrotron Radiation Facility
# Grenoble, France
#
# Principal author: Jerome Kieffer (kieffer@esrf.fr)
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# and the GNU Lesser General Public License along with this program.
# If not, see <http://www.gnu.org/licenses/>.
#
#OAR -l nodes=1/core=4,walltime=144:00:00
#
full_path=$( readlink -fn $0)
export EDNA_HOME=`dirname "$full_path" | sed 's/\/sandbox\/.*\/bin$//'`
export PATH=/bliss/users/blissadm/bin:$PATH
export PYTHON=/bliss/users/blissadm/bin/python
export TestSpace=/tmp/edna-$(date +"%Y%m%dT%H%M%S")
export PYTHONPATH=${EDNA_HOME}/kernel/tests/src
if [ -z $EDNA_SITE ]
then
export EDNA_SITE=ESRF
fi
if [ -z $TANGO_HOST ]
then
export TANGO_HOST=lid112:20000
fi
if [ ! -d ${TestSpace} ] ;
then
mkdir ${TestSpace}
fi
PythonProg=${EDNA_HOME}/tango/bin/tango-EdnaDS.py
echo "Server Mode"
cd $TestSpace
$PYTHON -u $PythonProg DAU11 -v2
| true
|
d528375d2661c12e43acabc4173743a13ca7f67c
|
Shell
|
parampavar/plash
|
/www/deploy
|
UTF-8
| 320
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -eu
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
tmpd=$(mktemp -d)
$SCRIPT_DIR/generate "$tmpd"
cd "$tmpd"
# echo -n 'plash.io' > CNAME # for github pages
git init
git add .
git commit -m 'deploy docs'
git push git@github.com:ihucos/plash-docs-deploy.git HEAD:master --force
| true
|
b54c31e212544ada56184ea8f3049931e82002e3
|
Shell
|
ocawley/ittsqa09
|
/x00114388v2.sh
|
UTF-8
| 995
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
PASSCOUNT=0
FAILCOUNT=0
TEST1=$(java grades_v2 100 20)
if [ "$TEST1" == "Component Fail" ]; then
echo Test Case 1 Passed
((PASSCOUNT++))
else
echo Test Case 1 Failed
((FAILCOUNT++))
fi
echo
TEST2=$(java grades_v2 50 50)
if [ "$TEST2" == "Pass" ]; then
echo Test Case 2 Passed
((PASSCOUNT++))
else
echo Test Case 2 Failed
((FAILCOUNT++))
fi
echo
TEST3=$(java grades_v2 40 40)
if [ "$TEST3" == "Fail" ]; then
echo Test Case 3 Passed
((PASSCOUNT++))
else
echo Test Case 3 Failed
((FAILCOUNT++))
fi
echo
TEST4=$(java grades_v2 100 100)
if [ "$TEST4" == "Pass with distinction" ]; then
echo Test Case 4 Passed
((PASSCOUNT++))
else
echo Test Case 4 Failed
((FAILCOUNT++))
fi
echo
TEST5=$(java grades_v2 110 100)
if [ "$TEST5" == "Invalid input" ]; then
echo Test Case 5 Passed
((PASSCOUNT++))
else
echo Test Case 5 Failed
((FAILCOUNT++))
fi
echo
echo ==================
echo Test Suite Summary
echo ==================
echo Passed: $PASSCOUNT
echo Failed: $FAILCOUNT
| true
|
2e712061d1f83cc01145b7dbffdd97abfc7557fc
|
Shell
|
moos3/macbook-setup
|
/run-setup.sh
|
UTF-8
| 1,196
| 3.65625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
data=$(tempfile 2>/dev/null)
trap "rm -f $data" 0 1 2 5 15
dialog --title "Ansible Vault Password" \
--clear \
-insecure \
--paswordbox "Enter your password" 10 30 2> $data
ret=$?
case $ret in
0)
echo "$(cat $data)" > ~/.vault_pass2.txt;;
1)
echo "Cancelled!";;
255)
[ -s $data ] && cat $data || echo "ESC pressed";;
esac
echo "Enter ansible vault password: "
read password_vault
echo $password_vault >> ~/.vault_pass.txt
echo "Running Provisioning"
echo "##########################"
echo "\n"
echo "Accepting xcode license"
sudo xcodebuild -license accept
echo "Installing xcode cli tools"
sudo xcode-select --install
echo "installing pip"
sudo easy_install pip
echo "installing ansible"
sudo pip install ansible
echo "making provision directory"
mkdir .provision && cd .provision
echo "pulling down provisioning tools"
git clone https://github.com/moos3/macbook-setup.git
cd macbook-setup
echo "installing requirements"
ansible-galaxy install -r requirements.yml
echo "running provisioning"
ansible-playbook -i inventory -K --vault-password-file ~/.vault_pass.txt main.yml
rm ~/.vault_pass.txt
echo "########################"
echo "\n"
echo "Happing Computing!"
| true
|
d637378a85d49ccd9c080813304087969cbd0449
|
Shell
|
libretro/Lakka
|
/packages/mediacenter/kodi/scripts/kodi.sh
|
UTF-8
| 2,918
| 3.90625
| 4
|
[] |
no_license
|
#!/bin/sh
# Copyright (C) 2008-2013 Team XBMC
# http://xbmc.org
#
# This Program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This Program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
. /etc/profile
trap cleanup TERM
SAVED_ARGS="$@"
CRASHLOG_DIR=/storage/.kodi/temp
cleanup() {
# make systemd happy by not exiting immediately but
# wait for kodi to exit
while killall -0 kodi.bin &>/dev/null; do
sleep 0.5
done
}
command_exists()
{
command -v $1 &>/dev/null
}
single_stacktrace()
{
# core filename is "core.*kodi.bin.*"
find "$1" -name 'core.*kodi.bin.*' | while read core; do
echo "=====> Core file: "$core"" >> $FILE
echo " =========================================" >> $FILE
gdb /usr/lib/kodi/kodi.bin --core="$core" --batch -ex "thread apply all bt" 2>/dev/null >> $FILE
rm -f "$core"
done
}
print_crash_report()
{
if [ ! -d $CRASHLOG_DIR ] ; then
mkdir -p $CRASHLOG_DIR
fi
DATE=`date +%Y%m%d%H%M%S`
FILE="$CRASHLOG_DIR/kodi_crashlog_$DATE.log"
echo "############## kodi CRASH LOG ###############" > $FILE
echo >> $FILE
echo "################ SYSTEM INFO ################" >> $FILE
echo -n " Date: " >> $FILE
date >> $FILE
echo " kodi Options: $SAVED_ARGS" >> $FILE
echo -n " Arch: " >> $FILE
uname -m >> $FILE
echo -n " Kernel: " >> $FILE
uname -rvs >> $FILE
echo -n " Release: " >> $FILE
. /etc/os-release
echo $NAME $VERSION >> $FILE
echo "############## END SYSTEM INFO ##############" >> $FILE
echo >> $FILE
echo "############### STACK TRACE #################" >> $FILE
if command_exists gdb; then
single_stacktrace /storage/.cache/cores
else
echo "gdb not installed, can't get stack trace." >> $FILE
fi
echo "############# END STACK TRACE ###############" >> $FILE
echo >> $FILE
echo "################# LOG FILE ##################" >> $FILE
echo >> $FILE
cat /storage/.kodi/temp/kodi.log >> $FILE
echo >> $FILE
echo "############### END LOG FILE ################" >> $FILE
echo >> $FILE
echo "############ END kodi CRASH LOG #############" >> $FILE
echo "Crash report available at $FILE"
}
if command_exists gdb; then
ulimit -c unlimited
fi
# clean up any stale cores. just in case
rm -f /storage/.cache/cores/*
/usr/lib/kodi/kodi.bin $SAVED_ARGS
RET=$?
if [ $(( ($RET >= 131 && $RET <= 136) || $RET == 139 )) = "1" ] ; then
# Crashed with core dump
print_crash_report
# Cleanup. Keep only youngest 10 reports
rm -f $(ls -1t $CRASHLOG_DIR/kodi_crashlog_*.log | tail -n +11)
fi
exit $RET
| true
|
63970d20703cf45239b44afb1498b473128be232
|
Shell
|
dionearle/comp2041-ass1
|
/test09.sh
|
UTF-8
| 1,253
| 2.875
| 3
|
[] |
no_license
|
# further tests legit-status's implementation
# by Dion Earle (z5205292)
legit-init
legit-add real.txt
legit-commit -m "added a real file"
# tests that if a file in directory is same as repository, status is same as repo
legit-status
echo 123 > new.txt
# tests if a file isn't in the index or repository, status is untracked
legit-status
legit-rm real.txt
# tests that if a file is deleted from the index and the directory, status is deleted
legit-status
legit-add new.txt
legit-commit "added new file"
rm new.txt
# tests that if a file is deleted only from the directory, status is file deleted
legit-status
echo test > hello.txt
legit-add hello.txt
# tests that if a file is added to the index, status is added to index
legit-status
legit-commit -m "third commit"
echo newline >> hello.txt
# tests that if a file is changed yet not added, status is changes not staged for commit
legit-status
legit-add hello.txt
# tests that if a file is changed and added to index, status is changed staged for commit
legit-status
echo another new line >> hello.txt
# tests that if file is different in directory, index and repository, status is different changes staged for commit
legit-status
| true
|
ba3b27f8dcab3ff480d96a5cf5f8f2c5b613bee1
|
Shell
|
mferpan/devops
|
/workaround/app/network.sh
|
UTF-8
| 332
| 3.484375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# Description: Manage containers network
function getIP () {
LAST_IP=`lxc-ls -f | grep -iv ipv | awk '{print $3}' | sort | tail -1 | awk -F. '{print $4}'`
if [[ "${LAST_IP}" == ""]]; then
IP=$IP_START
else
IP=$NETWORK.$((LAST_IP+1))
fi
}
function getContainerTraffic (){
ifconfig wlp2s0 | grep 'RX'
}
| true
|
4f1ba701d7b5c16f0af672a15a954742355fac13
|
Shell
|
peterkinalex/curl-www
|
/mirror/mirror_curl.sh
|
UTF-8
| 831
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/ksh
PATH=/usr/bin:/usr/local/bin:/bin:/opt/bin export PATH
MIRROR_DIR=/virtual/cubic.ch/www/html/mirror/curl export MIRROR_DIR
REMOTE_URL=http://curl.haxx.se/download export REMOTE_URL
echo "`date` Starting mirroring $REMOTE_URL to $MIRROR_DIR"
cd $MIRROR_DIR
OLDLIST="`ls $MIRROR_DIR`" export OLDLIST
NEWLIST="`curl -s $REMOTE_URL/curldist.txt`" export NEWLIST
for file in $NEWLIST
do
if [ ! "`echo $OLDLIST | grep $file`" ] || [ $file = curldist.txt ] || [ $file = README.curl ]
then
# File is new, get it
printf "Getting remote file $file ... "
curl -Os $REMOTE_URL/$file
printf "done\n"
fi
done
for file in $OLDLIST
do
if [ ! "`echo $NEWLIST | grep $file`" ]
then
# File is not present on the server, delete it
printf "Deleting obsolete local file $file ... "
rm -f $file
printf "done\n"
fi
done
| true
|
9eed6b5adf69acd253b3c210d6e3498d43f4f8df
|
Shell
|
nxtime/MoniTech
|
/old/cgi-bin/aequip.cgi
|
UTF-8
| 545
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
read EQUIP
echo "content-type: text/html"
echo
urldecode(){ echo -e $(sed '/s/%/\\x/g') ;}
EQUIP=$(echo $EQUIP | urldecode | tr + ' ')
NAME=$(echo $EQUIP | cut -d"&" -f1 | cut -d"=" -f2)
LOCL=$(echo $EQUIP | cut -d"&" -f2 | cut -d"=" -f2)
USER=$(echo $EQUIP | cut -d"&" -f3 | cut -d"=" -f2)
IPAS=$(echo $EQUIP | cut -d"&" -f4 | cut -d"=" -f2)
CIPA=$(grep ";$IPAS$" equips.csv)
if [[ $IPAS = $CIPA ]] ; then
echo "$NAME;$LOCL;$USER;$IPAS" >> equips.csv
echo "<h1>Deu Certo</h1>"
else
echo "<h1>Equipamento não existente.</h1>"
fi
| true
|
384c4669c1c9113a1110e4411e73fc2b84866327
|
Shell
|
greg-erskine/pCP-github
|
/tcz/pcp-bt6/pcp-bt6/usr/local/etc/init.d/pcp-bt6
|
UTF-8
| 3,897
| 3.625
| 4
|
[] |
no_license
|
#!/bin/sh
#========================================================================================
# Bluetooth Speaker Init Script
#----------------------------------------------------------------------------------------
#
# Version 6.0.0
. /usr/local/etc/pcp/pcp.cfg
. /usr/local/bin/pcp-bt-functions
init() {
sudo modprobe hci_uart
if [ "$RPIBLUETOOTH" = "on" ]; then
HCIATTACH=/usr/local/bin/hciattach
SERIAL=`cat /proc/device-tree/serial-number | cut -c9-`
B1=`echo $SERIAL | cut -c3-4`
B2=`echo $SERIAL | cut -c5-6`
B3=`echo $SERIAL | cut -c7-8`
BDADDR=`printf b8:27:eb:%02x:%02x:%02x $((0x$B1 ^ 0xaa)) $((0x$B2 ^ 0xaa)) $((0x$B3 ^ 0xaa))`
uart0="`cat /proc/device-tree/aliases/uart0`"
serial1="`cat /proc/device-tree/aliases/serial1`"
if [ "$uart0" = "$serial1" ] ; then
uart0_pins="`wc -c /proc/device-tree/soc/gpio@7e200000/uart0_pins/brcm\,pins | cut -f 1 -d ' '`"
if [ "$uart0_pins" = "16" ] ; then
$HCIATTACH /dev/serial1 bcm43xx 3000000 flow - $BDADDR
else
$HCIATTACH /dev/serial1 bcm43xx 921600 noflow - $BDADDR
fi
else
$HCIATTACH /dev/serial1 bcm43xx 460800 noflow - $BDADDR
fi
rpi_bthelper
fi
#Set the name of the bluetooth controller
sudo sed -i '/\[General\]/a Name = '$(hostname) /usr/local/etc/bluetooth/main.conf
sudo /usr/local/etc/init.d/bluez start > /dev/null 2>&1
RET=$(pcp_bt_controller_address 2>/dev/null)
if [ "$RET" == "" ]; then
echo "Bluetooth loaded, but no controller detected."
exit 1
fi
case $RET in
#RPi bluetooth or USB
B8:27:EB*) echo "RPi Bluetooth Hardware Address: $RET";;
*) echo "USB Bluetooth Hardware Address: $RET";;
esac
sed -i "s/\(\BTCONTROLLER=\).*/\1\"$RET\"/" $BTDEVICECONF
/usr/local/etc/init.d/pcp-pair-agent start
/usr/local/bin/bluealsa --profile=a2dp-source --profile=a2dp-sink --profile=hsp-ag --profile=hfp-ag > /dev/null 2>&1 &
# Enable Controller
RET=$(pcp_bt_power_status)
if [ $RET -ne 0 ]; then
echo "Powering on the BT controller"
POWER=0
CNT=0
until [ $POWER -eq 1 ]; do
if [ $((CNT++)) -gt 3 ]; then
echo "Error powering on Bluetooth Adapter"
break
else
RET=$(pcp_bt_power_enable)
RET=$(pcp_bt_power_status)
if [ $RET -eq 0 ]; then
echo "Success."
POWER=1
touch /var/run/bt_init
else
sleep 0.5
fi
fi
done
else
echo "BT Controller Ready"
touch /var/run/bt_init
fi
}
rpi_bthelper() {
# For on-board BT, route SCO packets to the HCI interface (enables HFP/HSP)
# Sound quality is horrible.....just puting this here for info
#echo "Usage: $0 <bluetooth hci device>"
dev="hci0"
# Need to bring hci up before looking at MAC as it can be all zeros during init
/usr/local/bin/hciconfig "$dev" up
/usr/local/bin/hciconfig "$dev" |grep -q "BD Address: B8:27:EB:" || exit 0
/usr/local/bin/hcitool -i "$dev" cmd 0x3f 0x1c 0x01 0x02 0x00 0x01 0x01 > /dev/null
}
PNAME="pcp-btspeaker-daemon.py"
DAEMON="/usr/local/bin/${PNAME}"
DAEMON_PID="/var/run/${PNAME}.pid"
start() {
# OPTIONS="$1"
OPTIONS=""
if [ ! -f /var/run/bt_init ]; then
echo "Starting BT Controller" >> $BT_LOG
init >>$BT_LOG
fi
echo "Starting pCP BT Speaker Daemon"
if [ -f /var/run/bt_init ]; then
start-stop-daemon --start --quiet -m -p ${DAEMON_PID} -b --exec $DAEMON -- $OPTIONS
sleep 0.25
exit 0
else
exit 1
fi
}
stop() {
echo "Stoping pCP BT Speaker Daemon"
start-stop-daemon --stop --quiet -p ${DAEMON_PID}
rm ${DAEMON_PID}
}
status() {
# Check if our squeezelite daemon is running.
if [ -f $DAEMON_PID ]; then
PID=`cat $DAEMON_PID`
ps ax | grep -v grep | grep -q $PID
if [ $? -eq 0 ]; then
echo "pCP BT Speaker Daemon is running. PID=$PID"
exit 0
fi
fi
echo "pCP BT Speaker Daemon not running."
exit 1
}
case $1 in
start) start;;
stop) stop;;
status) status;;
restart) stop; sleep 5; start;;
*) echo -e "\n$0 [start|stop|restart|status]\n";;
esac
| true
|
5a139eb7e1731fcc984918ab7eaea2dfa5e23718
|
Shell
|
Klortho/edirect
|
/ftp-ls
|
UTF-8
| 250
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/sh
# Usage: ftp-ls SERVER PATH
mydir=`dirname "$0"`
addr=`"$mydir"/eaddress`
list_file_names()
{
ftp -n -V -i "$1" <<EOF
user anonymous "$addr"
cd "$2"
ls *
exit
EOF
}
list_file_names "$@" | sed -ne 's,^-.*[ ],,p; s,^d.*[ ]\(.*\),\1/,p'
| true
|
08b2a638c7f7d4c2e3fae02e0389086486444081
|
Shell
|
librae8226/gmsv
|
/genver.sh
|
UTF-8
| 242
| 3
| 3
|
[] |
no_license
|
#!/bin/sh
IN=./include/version.h
OUT=./genver.h
touch main.c
echo "char *genver=\"" > $OUT
cat $IN | grep -e "^#define" | cut -b 9- | sed "s/\"/\'/g" | \
sed 's/\\/\\\\/g' >> $OUT
echo "<<Generated at "`date`">>" >> $OUT
echo "\";" >> $OUT
| true
|
f5c5bb9bb770ec4160ec1c151bacfeaf3882aa2c
|
Shell
|
brmzkw/forticrap
|
/run.sh
|
UTF-8
| 454
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/sh
. /etc/forticrap
mknod /dev/ppp c 108 0
eth0_net=$(ip a | grep eth0 | grep inet | awk '{print $2}')
iptables -t nat -A POSTROUTING -s "$eth0_net" -j MASQUERADE
expect -c '
set timeout -1
spawn /opt/forticlient-sslvpn/64bit/forticlientsslvpn_cli --server "'${VPN_SERVER}'" --vpnuser "'${VPN_USER}'" --keepalive
expect "Password for VPN:"
send "'${VPN_PASSWORD}'\n"
expect "Would you like to connect to this server?"
send Y\n
expect eof
'
| true
|
134b0f5bfa59ce177e8069a4436d4d085ef7d375
|
Shell
|
sathyapulse/code-deploy
|
/scripts/config_generator.sh
|
UTF-8
| 307
| 2.59375
| 3
|
[
"MIT"
] |
permissive
|
cd /var/www/html
sudo rm wp-config.php
FILE=/tmp/wp-config.php
if test -f "$FILE"; then
sudo cp -R /tmp/wp-config.php /var/www/html/wp-config.php
sudo rm /tmp/wp-config.php
fi
FILE=/tmp/.htaccess
if test -f "$FILE"; then
sudo cp -R /tmp/.htaccess /var/www/html/.htaccess
sudo rm /tmp/.htaccess
fi
| true
|
436f3a214882ae7d0c1d4a31e7cf93ca39630d10
|
Shell
|
mdevaluate/md-demo-script
|
/setup.sh
|
UTF-8
| 1,195
| 3.515625
| 4
|
[
"MIT"
] |
permissive
|
workdir=$(dirname $(realpath $0))
pushd $workdir
# Check that Gromacs 2016 is installed
if ! gmx &>/dev/null
then
echo "[ERROR] Gromacs not found."
exit 1
fi
if [[ ! $(gmx -version | grep "GROMACS version") == *"2016"* ]]
then
echo "[ERROR] $(gmx -version | grep "GROMACS version")"
echo "Make sure to install Gromacs 2016"
exit 1
fi
# Install and activate miniconda... if necessary
if [ ! -d conda ]
then
wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh
bash $workdir/Miniconda3-latest-Linux-x86_64.sh -p $workdir/conda -b
fi
source $workdir/conda/bin/activate
# install python dependencies
conda install -y --file requirements.txt
# install pygmx and mdevaluate
source GMXRC
if [ ! -d pygmx ]
then
git clone https://github.com/mdevaluate/pygmx.git
fi
pushd pygmx
python setup.py install
popd
if [ ! -d mdevaluate ]
then
git clone https://github.com/mdevaluate/mdevaluate.git
fi
pushd mdevaluate
python setup.py install
popd
if python -c "import mdevaluate"
then
echo
echo "*** ### ***"
echo "Installation of mdevaluate successfull...!"
echo "To activate this python version in a new session, run:"
echo "source $workdir/conda/bin/activate"
fi
| true
|
ec51840f1a93b54b50986a470d515557ce5418a2
|
Shell
|
jjyy-debear/FDSSTTest
|
/make.sh
|
UTF-8
| 3,096
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
function getbazel(){
LINE=`readlink -f /home/$USER/code1/tensorflow-1.4.0-rc0/bazel-bin/`
POS1="_bazel_$USER/"
STR=${LINE##*$POS1}
BAZEL=${STR:0:32}
echo $BAZEL
}
BAZEL=`getbazel`
APPN="FDSSTTest"
#add export OPENCV_INCLUDEPATH=/home/$USER/code/test/pp/opencvlib/include to .bashrc
#add export OPENCV_LIBPATH=/home/$USER/code/test/pp/opencvlib/lib to .bashrc
#add export CAFFEROOT="/home/xyz/code1/py-faster-rcnn-master/caffe-fast-rcnn" to .bashrc
#add export TBBROOT=.. to .bashrc
IINCLUDE="-I$OPENCV_INCLUDEPATH -I/usr/local/include -I/home/$USER/.cache/bazel/_bazel_$USER/$BAZEL/external/eigen_archive/Eigen -I$TBBROOT/include -I$TBBROOT/include/tbb"
TBBLIBWHOLEPATH=`find $TBBROOT -name "libtbb.so"`
TBBLIBPATH=${TBBLIBWHOLEPATH/\/libtbb.so/}
LLIBPATH="-L$OPENCV_LIBPATH -L/usr/local/lib -L/home/$USER/code1/$APPN/deepsort/FeatureGetter -L$TBBLIBPATH "
rm $APPN -rf
function FF(){
# if has FFMPEG_PATH
if [ "A$FFMPEG_PATH" != "A" ]
then
IINCLUDE="$IINCLUDE -I$FFMPEG_PATH/include"
fi
# ========lib path================================================
LLIBPATH="$LLIBPATH -L$OPENCV_LIBPATH -L$FACETRACKER_PATH"
# if has FFMPEG_PATH
if [ "A$FFMPEG_PATH" != "A" ]
then
LLIBPATH="$LLIBPATH -L$FFMPEG_PATH/lib"
fi
LLIBS="$LLIBS -lavcodec"
LLIBS="$LLIBS -lavdevice"
LLIBS="$LLIBS -lavfilter"
LLIBS="$LLIBS -lavformat"
LLIBS="$LLIBS -lavutil"
LLIBS="$LLIBS -lswscale"
#
FFMPEG33=`ls $FFMPEG_PATH/lib | grep avcodec | grep so.57 | wc -l`
if [ $FFMPEG33 -gt 0 ]
then
DDEFINES="$DDEFINES -DFFMPEG33"
fi
}
function BCore(){
LLIBS="$LLIBS -ltcmalloc -lDetector"
OOS=`cat /proc/version | grep "Red Hat" | wc -l`
if [ $OOS -gt 0 ]
then
DDEFINES="$DDEFINES -DCENTOS"
fi
IINCLUDE="$IINCLUDE -I$OPENCV_INCLUDEPATH/opencv -I$CAFFEROOT/include"
IINCLUDE="$IINCLUDE -I/usr/local/cuda/include"
IINCLUDE="$IINCLUDE -I$CAFFEROOT/build/src -I/usr/include/python2.7"
CV24=`ls $OPENCV_LIBPATH | grep opencv | grep so.2.4 | wc -l`
if [ $CV24 -gt 0 ]
then
DDEFINES="$DDEFINES -DOPENCV24"
LLIBS="$LLIBS -l:libopencv_core.so.2.4 -l:libopencv_imgproc.so.2.4 -l:libopencv_highgui.so.2.4 -lboost_system -lglog"
else
LLIBS="$LLIBS -lDeepirIO -lDeepirAlgorithm -lDeepirFaceAlgorithm -lcaffe -l:libopencv_imgcodecs.so.3.2 -l:libopencv_videoio.so.3.2 -lpython2.7 -lboost_python"
LLIBS="$LLIBS -l:libopencv_core.so.3.2 -l:libopencv_imgproc.so.3.2 -l:libopencv_highgui.so.3.2 -lboost_system -lglog"
fi
LLIBPATH="$LLIBPATH -L$OPENCV_LIBPATH -L$CAFFEROOT/distribute/lib -L/home/xyz/code1/FaceTracker/detectalign"
FF
g++ --std=c++14 -fopenmp $DDEFINES -o $APPN $IINCLUDE $LLIBPATH FaceTracker.cpp fdsst/fdssttracker.cpp fdsst/fhog.cpp Main.cpp $LLIBS
}
DDEFINES="$DDEFINES -DWITHDETECT -DUFDSST"
#DDEFINES="$DDEFINES -DUFDSST"
function BHOG(){
DDEFINES="$DDEFINES -DUHOG"
BCore
}
function BDL(){
DDEFINES="$DDEFINES -DUDL"
LLIBS="$LLIBS -lFeatureGetter"
LLIBPATH="$LLIBPATH -L./deepsort/FeatureGetter"
BCore
}
DDEFINES="$DDEFINES -DUSETBB"
LLIBS="$LLIBS -ltbb"
BHOG
| true
|
7bec6c34e882c76e501327984f365cc961975f16
|
Shell
|
fitzgeraldr/ares
|
/Execute - Production 4.2 Metadata/FlyTracking_default/.svn/text-base/fotrak.sh.svn-base
|
UTF-8
| 1,694
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
. /sge/current/default/common/settings.sh
source /usr/local/SOURCEME
fotrak_dir=$(cd "$(dirname "$0")"; pwd)
pipeline_scripts_dir=$(dirname "$fotrak_dir")
pipeline_dir=$("$pipeline_scripts_dir/Tools/pipeline_settings.pl" pipeline_root)
avi_sbfmf_dir="$pipeline_scripts_dir"/SBFMFConversion
do_sage_load=$("$pipeline_scripts_dir/Tools/pipeline_settings.pl" do_sageload_str)
# Make sure the next folders in the pipeline exist.
mkdir -p "$pipeline_dir/01_quarantine_not_compressed"
mkdir -p "$pipeline_dir/02_fotracked"
if [ $do_sage_load = true ]
then
# All SBFMF jobs have finished, run follow up scripts.
"$avi_sbfmf_dir/store_sbfmf_stats.pl"
"$avi_sbfmf_dir/avi_sbfmf_conversion_QC.pl"
fi
# Make sure each experiment has a "Logs" directory.
# (This normally happens at the transfer step but we're skipping that for re-tracking.)
for exp_name in `ls "$pipeline_dir/01_sbfmf_compressed" 2>/dev/null`
do
mkdir -p "$pipeline_dir/01_sbfmf_compressed/$exp_name/Logs"
done
# Make sure the tracking tool has been built.
if [ ! -x "$fotrak_dir/build/distrib/fo_trak" ]
then
echo "Doing one-time build of fo_trak tool..."
cd "$fotrak_dir"
"$fotrak_dir/build_fo_trak.sh"
sleep 5 # Give the cluster nodes time to see the new file.
echo "Build complete."
fi
# Now run fotrak on them.
cd "$pipeline_dir/01_sbfmf_compressed"
ls -d */*/*sbfmf 2>/dev/null > /tmp/stacks.flyolympiad_box_fotrak
if [ -s /tmp/stacks.flyolympiad_box_fotrak ]
then
# Make sure we're in the directory where this script was run from so the xml, etc. files can be found.
cd "$fotrak_dir"
pipeline -v -config fotrak.xml -file /tmp/stacks.flyolympiad_box_fotrak
fi
| true
|
84ac69bdc42cc9a6f3c72057a6f0c7f92689051e
|
Shell
|
Beats/ye-bash4
|
/test/UnitTestTarget.sh
|
UTF-8
| 6,893
| 3.703125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
SCRIPT_NAME="target"
SCRIPT_HOME=$(cd `dirname "${BASH_SOURCE[0]}"` && pwd)
SCRIPT_PATH="$SCRIPT_HOME/$SCRIPT_NAME"
source "$SCRIPT_HOME/asserts.sh"
EXPECTED="$SCRIPT_HOME/expected/$SCRIPT_NAME"
ACTUAL="$SCRIPT_HOME/actual/$SCRIPT_NAME"
mkdir -p $ACTUAL
expectedUsage="$EXPECTED/ye_bash4_usage.txt"
expectedDebug="$EXPECTED/ye_bash4_debug.txt"
function run() {
$SCRIPT_PATH "$@"
}
function var() {
local __var="$1"
shift;
$SCRIPT_PATH --debug "$1" "$2" | grep $__var | xargs
}
function testStack() {
expected="$EXPECTED/ye_bash4_stack.txt"
actual="$ACTUAL/ye_bash4_stack.txt"
run --stack > $actual
assertDiff "Stack differs." $expected $actual
}
function testVersion() {
expected="$EXPECTED/ye_bash4_version.txt"
actual="$ACTUAL/ye_bash4_version.txt"
run --version > $actual
assertDiff "Explicit output differs." $expected $actual
}
function testUsage() {
actual="$ACTUAL/ye_bash4_usage.txt"
run -h > $actual
assertDiff "Explicit short output differs." $expectedUsage $actual
run --help > $actual
assertDiff "Explicit long output differs." $expectedUsage $actual
run -w > $actual
assertDiff "Invalid short output differs." $expectedUsage $actual
run --none > $actual
assertDiff "Invalid long output differs." $expectedUsage $actual
run -a -b > $actual
assertDiff "Multiple short output differs." $expectedUsage $actual
run --action1 --action2 > $actual
assertDiff "Multiple long output differs." $expectedUsage $actual
run -a --action2 > $actual
assertDiff "Multiple both output differs." $expectedUsage $actual
run --action1 -b > $actual
assertDiff "Multiple both output differs." $expectedUsage $actual
}
function testDebug() {
actual="$ACTUAL/ye_bash4_debug.txt"
run --debug --action1 Argument1 -m "Missing value" "Argument Two" > $actual
assertDiff "Explicit long output differs." $expectedDebug $actual
run --debug "Argument1" -a "Argument Two" -m"Missing value" > $actual
assertDiff "Explicit long output differs." $expectedDebug $actual
run --debug Argument1 "Argument Two" --action1 --missing="Missing value" > $actual
assertDiff "Explicit long output differs." $expectedDebug $actual
run --debug Argument1 -a --missing "Missing value" "Argument Two" > $actual
assertDiff "Explicit long output differs." $expectedDebug $actual
}
function testDefault() {
expected="$EXPECTED/ye_bash4_default.txt"
actual="$ACTUAL/ye_bash4_default.txt"
run > $actual
assertDiff "Implicit output differs." $expected $actual
}
function testAction1() {
expected="$EXPECTED/yb4_action1.txt"
actual="$ACTUAL/yb4_action1.txt"
run -a > $actual
assertDiff "Explicit short output differs." $expected $actual
run --action1 > $actual
assertDiff "Explicit long output differs." $expected $actual
}
function testAction2() {
expected="$EXPECTED/yb4_action2.txt"
actual="$ACTUAL/yb4_action2.txt"
run -b > $actual
assertDiff "Explicit short output differs." $expected $actual
run --action2 > $actual
assertDiff "Explicit long output differs." $expected $actual
}
function testAction3() {
expected="$EXPECTED/yb4_action3.txt"
actual="$ACTUAL/yb4_action3.txt"
run --action3 > $actual
assertDiff "Explicit long output differs." $expectedUsage $actual
run -c > $actual
assertDiff "Explicit short output differs." $expected $actual
}
function testAction4() {
expected="$EXPECTED/yb4_action4.txt"
actual="$ACTUAL/yb4_action4.txt"
run -d > $actual
assertDiff "Explicit short output differs." $expectedUsage $actual
run --action4 > $actual
assertDiff "Explicit long output differs." $expected $actual
}
function testFlagON() {
local __var="YB4_ON"
actual=`var $__var`
assertEquals "Implicit differs" "$__var:1" "$actual"
actual=`var $__var -f`
assertEquals "Explicit short differs" "$__var:0" "$actual"
actual=`var $__var --flag-no`
assertEquals "Explicit long differs" "$__var:0" "$actual"
}
function testFlagNO() {
local __var="YB4_NO"
actual=`var $__var`
assertEquals "Implicit differs" "$__var:0" "$actual"
actual=`var $__var -n`
assertEquals "Explicit short differs" "$__var:1" "$actual"
actual=`var $__var --flag-on`
assertEquals "Explicit long differs" "$__var:1" "$actual"
}
function testParameterDefault() {
local __var="YB4_DEFAULT"
actual=`var $__var`
assertEquals "Implicit differs" "$__var:Default value" "$actual"
actual=`var $__var -dCustom`
assertEquals "Explicit short 1 differs" "$__var:Custom" "$actual"
actual=`var $__var -d"Custom"`
assertEquals "Explicit short 2 differs" "$__var:Custom" "$actual"
actual=`var $__var -d "Custom value"`
assertEquals "Explicit short 3 differs" "$__var:Custom value" "$actual"
actual=`var $__var --default=Custom`
assertEquals "Explicit long 1 differs" "$__var:Custom" "$actual"
actual=`var $__var --default="Custom"`
assertEquals "Explicit long 2 differs" "$__var:Custom" "$actual"
actual=`var $__var --default="Custom value"`
assertEquals "Explicit long 3 differs" "$__var:Custom value" "$actual"
actual=`var $__var --default Custom`
assertEquals "Explicit long 4 differs" "$__var:Custom" "$actual"
actual=`var $__var --default "Custom"`
assertEquals "Explicit long 5 differs" "$__var:Custom" "$actual"
actual=`var $__var --default "Custom value"`
assertEquals "Explicit long 6 differs" "$__var:Custom value" "$actual"
}
function testParameterMising() {
local __var="YB4_MISSING"
actual=`var $__var`
assertEquals "Implicit differs" "$__var:" "$actual"
actual=`var $__var -mCustom`
assertEquals "Explicit short 1 differs" "$__var:Custom" "$actual"
actual=`var $__var -m"Custom"`
assertEquals "Explicit short 2 differs" "$__var:Custom" "$actual"
actual=`var $__var -m "Custom value"`
assertEquals "Explicit short 3 differs" "$__var:Custom value" "$actual"
actual=`var $__var --missing=Custom`
assertEquals "Explicit long 1 differs" "$__var:Custom" "$actual"
actual=`var $__var --missing="Custom"`
assertEquals "Explicit long 2 differs" "$__var:Custom" "$actual"
actual=`var $__var --missing="Custom value"`
assertEquals "Explicit long 3 differs" "$__var:Custom value" "$actual"
actual=`var $__var --missing Custom`
assertEquals "Explicit long 4 differs" "$__var:Custom" "$actual"
actual=`var $__var --missing "Custom"`
assertEquals "Explicit long 5 differs" "$__var:Custom" "$actual"
actual=`var $__var --missing "Custom value"`
assertEquals "Explicit long 6 differs" "$__var:Custom value" "$actual"
}
. shunit2
| true
|
4f57b5b077ed32e904b56aefedf2eca826c4eaf7
|
Shell
|
siqiyyyy/monox_fit
|
/monov/scripts/limits.sh
|
UTF-8
| 363
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
#remove limit on stack size to prevent related segfault
ulimit -s unlimited
### Asimov limit
mkdir -p limit
pushd limit
for file in ../cards/*.root; do
TAG=$(basename $file | sed 's/card_//g;s/.root//g');
combine -M AsymptoticLimits $file -t -1 -n $TAG --setParameters LUMISCALE=1 --freezeParameters LUMISCALE | tee log_$TAG.txt &
done
popd
| true
|
2d01015804beb260672ec9ad17a1d38ac079bc95
|
Shell
|
ohyoungjooung2/u18kvk8s
|
/k8s/kvm_install.sh
|
UTF-8
| 640
| 3.3125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#kvm-ok (cpu-checker instlal)
CHECK_KOK=$(which kvm-ok)
if [[ $?=!"0" ]]
then
sudo apt install -y cpu-checker
else
echo "kvm-ok(cpu-checker) installed"
fi
kvm-ok > /dev/null 2>&1
if [[ $?=="0" ]]
then
echo "Using kvm is possible"
echo "Installing kvm related packages!"
sudo apt install -y qemu qemu-kvm libvirt-bin bridge-utils virt-manager
echo "Enabling libvirtd service"
sudo systemctl enable libvirtd
echo "Stargint libvirtd service"
sleep 1
sudo systemctl start libvirtd
else
echo "kvm is disabled"
exit 1
fi
#Check libvirtd
ps -ef | grep libvirtd | grep -v grep
exit 0
| true
|
184e86b616f06111a7f28f41d2e2f4e48c1493ed
|
Shell
|
bsk01/origin-aggregated-logging
|
/hack/testing/test-fluentd-forward.sh
|
UTF-8
| 5,816
| 3.53125
| 4
|
[] |
no_license
|
#! /bin/bash
# test by having a fluentd forward securely to another fluentd (and not ES)
# have that second fluentd send logs to ES
# verify the same way we do now (for ES copy)
# need to create a custom configmap for both fluentd?
if [[ $VERBOSE ]]; then
set -ex
else
set -e
VERBOSE=
fi
set -o nounset
set -o pipefail
if ! type get_running_pod > /dev/null 2>&1 ; then
. ${OS_O_A_L_DIR:-../..}/deployer/scripts/util.sh
fi
if [[ $# -ne 1 || "$1" = "false" ]]; then
# assuming not using OPS cluster
CLUSTER="false"
ops=
else
CLUSTER="$1"
ops="-ops"
fi
ARTIFACT_DIR=${ARTIFACT_DIR:-${TMPDIR:-/tmp}/origin-aggregated-logging}
if [ ! -d $ARTIFACT_DIR ] ; then
mkdir -p $ARTIFACT_DIR
fi
PROJ_PREFIX=project.
get_test_user_token
cleanup_forward() {
# undeploy fluentd
oc label node --all logging-infra-fluentd-
wait_for_pod_ACTION stop $fpod
# Clean up only if it's still around
oc delete daemonset/logging-forward-fluentd || :
# Revert configmap if we haven't yet
if [ -n "$(oc get configmap/logging-fluentd -o yaml | grep '<match \*\*>')" ]; then
oc get configmap/logging-fluentd -o yaml | sed -e '/<match \*\*>/ d' \
-e '/@include configs\.d\/user\/secure-forward\.conf/ d' \
-e '/<\/match>/ d' | oc replace -f -
fi
oc patch configmap/logging-fluentd --type=json --patch '[{ "op": "replace", "path": "/data/secure-forward.conf", "value": "\
# @type secure_forward\n\
# self_hostname forwarding-${HOSTNAME}\n\
# shared_key aggregated_logging_ci_testing\n\
# secure no\n\
# <server>\n\
# host ${FLUENTD_FORWARD}\n\
# port 24284\n\
# </server>"}]' || :
# redeploy fluentd
oc label node --all logging-infra-fluentd=true
# wait for fluentd to start
wait_for_pod_ACTION start fluentd
fpod=`get_running_pod fluentd`
}
update_current_fluentd() {
# this will update it so the current fluentd does not send logs to an ES host
# but instead forwards to the forwarding fluentd
# undeploy fluentd
oc label node --all logging-infra-fluentd-
wait_for_pod_ACTION stop $fpod
# edit so we don't send to ES
oc get configmap/logging-fluentd -o yaml | sed '/## matches/ a\
<match **>\
@include configs.d/user/secure-forward.conf\
</match>' | oc replace -f -
POD=$(oc get pods -l component=forward-fluentd -o name)
FLUENTD_FORWARD=$(oc get $POD --template='{{.status.podIP}}')
# update configmap secure-forward.conf
oc patch configmap/logging-fluentd --type=json --patch '[{ "op": "replace", "path": "/data/secure-forward.conf", "value": "\
@type secure_forward\n\
self_hostname forwarding-${HOSTNAME}\n\
shared_key aggregated_logging_ci_testing\n\
secure no\n\
buffer_queue_limit \"#{ENV['"'BUFFER_QUEUE_LIMIT'"']}\"\n\
buffer_chunk_limit \"#{ENV['"'BUFFER_SIZE_LIMIT'"']}\"\n\
<server>\n\
host '${FLUENTD_FORWARD}'\n\
port 24284\n\
</server>"}]'
# redeploy fluentd
oc label node --all logging-infra-fluentd=true
# wait for fluentd to start
wait_for_pod_ACTION start fluentd
}
create_forwarding_fluentd() {
# create forwarding configmap named "logging-forward-fluentd"
oc create configmap logging-forward-fluentd \
--from-file=fluent.conf=../templates/forward-fluent.conf
# create forwarding daemonset
oc get template/logging-fluentd-template -o yaml | \
sed -e 's/logging-infra-fluentd: "true"/logging-infra-forward-fluentd: "true"/' \
-e 's/name: logging-fluentd/name: logging-forward-fluentd/' \
-e 's/ fluentd/ forward-fluentd/' \
-e '/image:/ a \
ports: \
- containerPort: 24284' | \
oc new-app -f -
oc label node --all logging-infra-forward-fluentd=true
# wait for fluentd to start
wait_for_pod_ACTION start forward-fluentd
}
write_and_verify_logs() {
# expected number of matches
expected=$1
rc=0
if ! wait_for_fluentd_to_catch_up "" "" ; then
rc=1
fi
return $rc
}
restart_fluentd() {
oc label node --all logging-infra-fluentd-
# wait for fluentd to stop
wait_for_pod_ACTION stop $fpod
# create the daemonset which will also start fluentd
oc label node --all logging-infra-fluentd=true
# wait for fluentd to start
wait_for_pod_ACTION start fluentd
fpod=`get_running_pod fluentd`
}
TEST_DIVIDER="------------------------------------------"
# configure fluentd to just use the same ES instance for the copy
# cause messages to be written to a container - verify that ES contains
# two copies
# cause messages to be written to the system log - verify that OPS contains
# two copies
fpod=`get_running_pod fluentd`
# run test to make sure fluentd is working normally - no forwarding
write_and_verify_logs 1 || {
oc logs $fpod > $ARTIFACT_DIR/test-fluentd-forward.fluentd.log
ffpod=`get_running_pod forward-fluentd`
oc logs $ffpod > $ARTIFACT_DIR/test-fluentd-forward.forward-fluentd.log
oc get events -o yaml > $ARTIFACT_DIR/all-events.yaml 2>&1
exit 1
}
cleanup() {
# put back original configuration
cleanup_forward
oc get events -o yaml > $ARTIFACT_DIR/all-events.yaml 2>&1
}
trap "cleanup" INT TERM EXIT
create_forwarding_fluentd
update_current_fluentd
write_and_verify_logs 1 || {
oc logs $fpod > $ARTIFACT_DIR/test-fluentd-forward.fluentd.log
ffpod=`get_running_pod forward-fluentd`
oc logs $ffpod > $ARTIFACT_DIR/test-fluentd-forward.forward-fluentd.log
oc get events -o yaml > $ARTIFACT_DIR/all-events.yaml 2>&1
exit 1
}
# put back original configuration
cleanup
write_and_verify_logs 1 || {
oc logs $fpod > $ARTIFACT_DIR/test-fluentd-forward.fluentd.log
ffpod=`get_running_pod forward-fluentd`
oc logs $ffpod > $ARTIFACT_DIR/test-fluentd-forward.forward-fluentd.log
oc get events -o yaml > $ARTIFACT_DIR/all-events.yaml 2>&1
exit 1
}
| true
|
7142fcb19efffee897cbcd58c5fc6fc01b192da7
|
Shell
|
yw4509/DS-GA-1007-python-for-data-science
|
/homework/hw05/rename_files.sh
|
UTF-8
| 153
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
cd data/university/
old_characters=$1
new_characters=$2
for str in $(ls ${1}*)
do
mv "$str" "${str/$old_characters/$new_characters}"
done
| true
|
293051fef5027cf94055f049256e90b083a40c64
|
Shell
|
armoutihansen/dotfiles
|
/.local/bin/quickmark
|
UTF-8
| 178
| 2.625
| 3
|
[] |
no_license
|
#!/bin/sh
chosen=$(cat ~/.config/qutebrowser/quickmarks | awk '{print $2}' | dmenu -i -l 5 -p "Open quickmark:")
[ -z "$chosen" ] && exit
qutebrowser --target window "$chosen"
| true
|
21257f550bdeaa9a34709307cdccefd22e53e9e4
|
Shell
|
meghamohan/holberton-system_engineering-devops
|
/0x04-loops_conditions_and_parsing/6-superstitious_numbers
|
UTF-8
| 269
| 3.34375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#displaying bad luck numbers
i=1
while [ $i -le 20 ];
do
case $i in
5) echo "bad luck from China"
;;
10) echo "bad luck from Japan"
;;
18) echo "bad luck from Italy"
esac
echo $i
let i+=1
done
| true
|
23435b42dec63e96764902d070c24c01071854c5
|
Shell
|
tantra35/ec2-resource-agents
|
/elasticip
|
UTF-8
| 7,856
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
#
OCF_ROOT=/usr/lib/ocf
: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/resource.d/heartbeat}
. ${OCF_FUNCTIONS_DIR}/.ocf-shellfuncs
metadata() {
cat <<END
<?xml version="1.0"?>
<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
<resource-agent name="elasticip">
<version>1.0</version>
<longdesc lang="en">
Resource agnet for managing an EC2 Elastic IP. It relies on Tim Kay's perl aws client
available @ http://timkay.com/aws/.
</longdesc>
<shortdesc lang="en">Manage an EC2 elastic ip</shortdesc>
<parameters>
<parameter name="address" required="1">
<longdesc lang="en">
EC2 elastic IP address
</longdesc>
<shortdesc lang="en">ec2 ip</shortdesc>
<content type="string" default="" />
</parameter>
<parameter name="api_tools" required="1">
<longdesc lang="en">
API tools to use.
</longdesc>
<shortdesc lang="en">api tools</shortdesc>
<content type="string" default="aws" />
</parameter>
<parameter name="credentials" required="0">
<longdesc lang="en">
Location of file containing appropriate credentials.
</longdesc>
<shortdesc lang="en">credentials</shortdesc>
<content type="string" default="/root/.cred.txt" />
</parameter>
<parameter name="iptype" required="0">
<longdesc lang="en">
type of ip (vpc|classic).
</longdesc>
<shortdesc lang="en">iptype</shortdesc>
<content type="string" default="classic" />
</parameter>
</parameters>
<actions>
<action name="start" timeout="180" />
<action name="stop" timeout="180" />
<action name="notify" timeout="180" />
<action name="monitor" depth="0" timeout="30" interval="30" />
<action name="validate-all" timeout="5" />
<action name="meta-data" timeout="5" />
</actions>
</resource-agent>
END
}
debug=0
API_TIMEOUT=20
MAX_RETRY=10
AWS_COMMAND="aws"
AWS_TIMEOUT=60
debugger() {
[[ $debug != 0 ]] && ocf_log info $1
return $OCF_SUCCESS
}
ec2ip_metadata() {
EC2_INSTANCE_ID=$(ec2metadata --instance-id)
EC2_PUBLIC_IP=$(ec2metadata --public-ipv4)
}
ec2ip_load_credentials() {
debugger "load_credentials:"
local missing_cred
[[ ! -e $OCF_RESKEY_credentials ]] && ocf_log error "EC2: ERROR: Credentials file not found at $OCF_RESKEY_credentials" \
exit $OCF_ERR_INSTALLED
case $OCF_RESKEY_api_tools in
"ec2-api-tools")
. $OCF_RESKEY_credentials
[[ -z $EC2_PRIVATE_KEY ]] || \
[[ -z $EC2_CERT ]] || \
[[ -z $EC2_KEY ]] || \
[[ -z $EC2_KEYID ]] && missing_cred=1
export EC2_PRIVATE_KEY
export EC2_CERT
export EC2_KEY
export EC2_KEYID
debugger "- Loaded ec2-api-tools credentials"
debugger "-- EC2_PRIVATE_KEY = $EC2_PRIVATE_KEY"
debugger "-- EC2_CERT = $EC2_CERT"
debugger "-- EC2_KEY = $EC2_KEY"
debugger "-- EC2_KEYID= $EC2_KEYID"
;;
"aws") # AWS credentials get loaded via file, not environment
[[ $OCF_RESKEY_credentials != "/root/.awssecret" ]] && missing_cred=1
debugger "- Found aws secrets @ $OCF_RESKEY_credentials"
;;
"euca-2ools")
. $OCF_RESKEY_credentials
[[ -z $EC2_ACCESS_KEY ]] || \
[[ -z $EC2_SECRET_KEY ]] && missing_cred=1
# TODO, load this from file OR set default
export EC2_ACCESS_KEY
export EC2_SECRET_KEY
export EC2_URL="https://ec2.amazonaws.com"
debugger "- Loaded euca-2ools credentials"
debugger "-- EC2_ACCESS_KEY = $EC2_ACCESS_KEY"
debugger "-- EC2_SECRET_KEY = $EC2_SECRET_KEY"
;;
esac
[[ $missing_cred -eq 1 ]] && \
ocf_log error "EC2 ERROR: Missing credentials for $OCF_RESKEY_api_tools in $OCF_RESKEY_credentials" && \
exit $OCF_ERR_INSTALLED
return 0
}
ec2ip_validate() {
debugger "validate"
[[ -z $OCF_RESKEY_address ]] && ocf_log error "EC2 ERROR: address param not set $OCF_RESKEY_ADDRESS!" && exit $OCF_ERR_CONFIGURED
case $OCF_RESKEY_api_tools in
"ec2-api-tools")
ASSOCIATE_CMD="ec2-associate-address"
DISASSOCIATE_CMD="ec2-disassociate-address"
EC2_COMMANDS="$ASSOCIATE_CMD $DISASSOCIATE_CMD"
;;
"aws")
ASSOCIATE_CMD="aws associate-address"
DISASSOCIATE_CMD="aws disassociate-address"
EC2_COMMANDS="aws"
;;
"euca-2ools")
ASSOCIATE_CMD="euca-associate-address"
DISASSOCIATE_CMD="euca-disassociate-address"
EC2_COMMANDS="euca-associate-address euca-disassociate-address"
;;
*) ocf_log error "EC2 ERROR: Invalid api tools flavor: $OCF_RESKEY_api_tools" && exit $OCF_ERR_CONFIGURED
;;
esac
ec2ip_load_credentials
EC2_COMMANDS="$EC2_COMMANDS ec2metadata"
for i in $EC2_COMMANDS ; do
debugger "- Locating command $i: "
[[ ! -x $(which $i) ]] && ocf_log error "ERROR: Command $i not found/exectuable" && exit $OCF_ERR_INSTALLED
debugger "found"
done
}
ec2ip_monitor() {
debugger "monitor"
ec2ip_metadata
[[ $EC2_PUBLIC_IP == $OCF_RESKEY_address ]] && debugger "Running" && return $OCF_SUCCESS
debugger "Not running"
return $OCF_NOT_RUNNING
}
###### GENERIC
ec2ip_disassociate() {
debugger "ec2ip_disassociate: $DISASSOCIATE_CMD"
if [ "$OCF_RESKEY_iptype" == "vpc" ]; then
ec2ip_metadata
case $OCF_RESKEY_api_tools in
"euca-2ools")
l_cols=( `euca-describe-addresses --show-empty-fields --filter instance-id=$EC2_INSTANCE_ID $OCF_RESKEY_address` );
IP_eipassoc=${l_cols[5]}
;;
esac
$DISASSOCIATE_CMD --association-id $IP_eipassoc
else
$DISASSOCIATE_CMD $OCF_RESKEY_address
fi
if [ $? != 0 ]; then
debugger "- failed"
return $OCF_ERR_GENERIC
fi
debugger "- success"
return $OCF_SUCCESS
}
ec2ip_associate() {
debugger "ec2ip_associate: $ASSOCIATE_CMD"
if [ "$OCF_RESKEY_iptype" == "vpc" ]; then
ec2ip_metadata
case $OCF_RESKEY_api_tools in
"euca-2ools")
l_cols=( `euca-describe-addresses --show-empty-fields $OCF_RESKEY_address` );
IP_eipalloc=${l_cols[4]}
;;
esac
$ASSOCIATE_CMD -i $EC2_INSTANCE_ID --allocation-id $IP_eipalloc
else
$ASSOCIATE_CMD -i $EC2_INSTANCE_ID $OCF_RESKEY_address
fi
if [ $? != 0 ]; then
debugger "- failed, rc: $?"
return $OCF_ERR_GENERIC
fi
debugger "-success"
return $OCF_SUCCESS
}
ec2ip_stop() {
ocf_log info "EC2: Bringing down elastic ip $OCF_RESKEY_address"
local i
ec2ip_monitor
[[ $? == $OCF_NOT_RUNNING ]] && ocf_log info "EC2: Address $OCF_RESKEY_address already down" && return $OCF_SUCCESS
ocf_log info "EC2: Sending request to AWS via $OCF_RESKEY_api_tools"
ec2ip_disassociate
[[ $? != $OCF_SUCCESS ]] && return $OCF_ERR_GENERIC
i=0
ocf_log info "EC2: API request sent, waiting for IP to go down."
debugger "Stop loop"
while [[ $i -le $AWS_TIMEOUT ]] ; do
ec2ip_monitor
if [ $? == $OCF_NOT_RUNNING ]; then
ocf_log info "EC2: Successfully brought down $OCF_RESKEY_address"
return $OCF_SUCCESS
fi
sleep 1
i=$[$i+1]
done
ocf_log error "EC2: ERROR timeout reached ($AWS_TIMEOUT) while waiting for IP to get released."
return $OCF_ERR_GENERIC
}
ec2ip_start() {
local i
ocf_log info "EC2: Starting elastic ip $OCF_RESKEY_address"
ec2ip_monitor
[[ $? == $OCF_SUCCESS ]] && ocf_log info "EC2: $OCF_RESKEY_address already started" && return $OCF_SUCCESS
ocf_log info "EC2: Sending request to AWS via $OCF_RESKEY_api_tools"
ec2ip_associate
[[ $? != 0 ]] && echo "ERROR: Received $? from 'aws'" && return $OCF_ERR_GENERIC
i=0
ocf_log info "EC2: API request sent, waiting for IP."
while [[ $i -le $AWS_TIMEOUT ]] ; do
ec2ip_monitor
[[ $? == $OCF_SUCCESS ]] && return $?
sleep 1
i=$[$i+1]
done
ocf_log error "EC2: ERROR timeout reached ($AWS_TIMEOUT sec) while waiting for IP"
return $OCF_ERR_GENERIC
}
case $__OCF_ACTION in
meta-data) metadata
exit $OCF_SUCCESS;;
monitor)
ec2ip_monitor;;
stop)
ec2ip_validate && ec2ip_stop;;
validate-all) ec2ip_validate;;
start)
ec2ip_validate && ec2ip_start;;
*) exit $OCF_ERR_UNIMPLEMENTED;;
esac
exit $?
| true
|
79c0fdc2e499d85c8194ce303055916bac7a0487
|
Shell
|
pkgw/conda-recipes
|
/dockerfiles/forge-builder/build.sh
|
UTF-8
| 420
| 2.96875
| 3
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
#! /bin/bash
# Copyright 2015-2019 Peter Williams
# Licensed under the MIT License.
set -e
# XXXX: errors building the image related to resource limits, unless I change
# the user/group ID away from those of my user account? Mysterious but I worked
# around by just replace $(id -u) with 1111, etc.
exec docker build --network=host --build-arg=EXTUSERID=$(id -u) --build-arg=EXTGRPID=$(id -g) -t forge-builder $(dirname $0)
| true
|
5baf48432a09cf0fb1c766f5a0369d1375a6c38b
|
Shell
|
qgg-lab/metazExp-pipeline
|
/005-tag-exp-Stdy-to-comTrspts/0000.job1.tag.exp.stdy.to.cmbTrspts.sb
|
UTF-8
| 1,263
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
source ../../00-config/commonEnvironmentVars.cfg
#source $commonEnvir
export currDir=`pwd`
export combinedAssemblyList=$currDir/../004-combine-assemblies-and-annos/cutoff.info.of.assembled.experiment.tsv
export combinedGtf=$currDir/../004-combine-assemblies-and-annos/$finalCmbTrsptAnnGtfFile
export sampleInfoFile=$currDir/../004-combine-assemblies-and-annos/filtered.alignment.info.of.assembled.experiment.tsv
:<<block
grep -P "\ttranscript\t" $combinedGtf \
| awk -F '\t' '{print $2"; "$9}' \
| awk -F ';' '{s=$1;for(i=2;i<=NF;i++){if(match($i, "transcript_id")){s=s$i}};print s;}' \
| awk -F ' ' '{print $1"\t"$3}' \
| sed -n 's/^\(.*\)\t"\(.*\)"$/\1\t\2/p' \
> $currDir/originNameMappingTrsptId.tsv \
2> $currDir/log.e.tag.trspt.origin
block
export currentAssemblyLinkDir=$currDir/assemblyDir
mkdir -p $currentAssemblyLinkDir
perl $customizedSoftwareDir/tag.experiment.tissue.study.info.to.combined.trspts.pl.20191016 \
--cmpRltListFile $currDir/compared.Rlt.list.txt \
--sampleInfoFile $sampleInfoFile \
--trsptOriginFile $currDir/originNameMappingTrsptId.tsv \
--outputTrsptOrignTissueExptListFile $currDir/trspt.with.orign.tissue.exptList.tsv \
--outputTrsptOrignStudyExptListFile $currDir/trspt.with.orign.study.exptList.tsv
| true
|
fe4049a90bb7d6bae1096b4c80ddf7a92e7bd0fc
|
Shell
|
hakuna0829/next-abcam-beta-version-infra
|
/tools/scripts/deploy/helm.sh
|
UTF-8
| 2,197
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/bash
set -euo pipefail
THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null 2>&1 && pwd )"
source ${THIS_DIR}/log.sh
function help {
log_warning "Usage: helm.sh [ -v ] EKS_CLUSTER_NAME HELM_CHART_DIR HELM_RELEASE_NAME HELM_VALUES_FILE HELM_ARGS"
log_warning " EKS_CLUSTER_NAME: name of the EKS cluster"
log_warning " HELM_CHART_DIR: directory where Helm chart is stored"
log_warning " HELM_RELEASE_NAME: name of the helm release"
log_warning " HELM_VALUES_FILE: helm variable file path relative to HELM_CHART_DIR"
log_warning " HELM_ARGS: helm additional args. i.e. '--set var1=value1 --set var2=val2'"
}
function getArgs {
EKS_CLUSTER_NAME=${1}
HELM_CHART_DIR=${2}
HELM_RELEASE_NAME=${3}
HELM_VALUES_FILE=${4}
HELM_SET_ARGS=${HELM_SET_ARGS:-""}
if [[ $# < 4 ]]; then
log_error "Missing mandatory params: '$0 $@'";
help;
exit 1;
fi
}
function install-deps {
source ${THIS_DIR}/install-helm.sh
source ${THIS_DIR}/install-awscli.sh
}
function auth-k8s {
if [[ "${CI}" == "true" ]]; then
ROLE_ARN=${DEPLOYER_ROLE_ARN}
role_arn_args="--role-arn ${ROLE_ARN}"
aws eks \
--region eu-west-1 \
update-kubeconfig \
--name ${EKS_CLUSTER_NAME} \
${role_arn_args}
fi
}
function deploy {
APP_NAMESPACE="yeti";
log_header "Deploying Helm chart ${HELM_RELEASE_NAME}"
log_debug "helm upgrade --install --wait --create-namespace --namespace ${APP_NAMESPACE} ${HELM_RELEASE_NAME} ${HELM_CHART_DIR} --values ${HELM_CHART_DIR}/${HELM_VALUES_FILE} ${HELM_SET_ARGS}"
helm upgrade --install \
--wait \
--create-namespace \
--namespace ${APP_NAMESPACE} \
${HELM_RELEASE_NAME} \
${HELM_CHART_DIR} \
--values ${HELM_CHART_DIR}/${HELM_VALUES_FILE} \
${HELM_SET_ARGS}
helm -n ${APP_NAMESPACE} list --all \
--output json
}
function main {
install-deps
auth-k8s
deploy
}
if [ "${0}" = "${BASH_SOURCE}" ]; then
getArgs $@
main;
fi
| true
|
5c0646e5c67def7234f0136d059859ca262e82b3
|
Shell
|
voltch/ramdisk
|
/ramdisk/res/synapse/json.gen.logs
|
UTF-8
| 1,260
| 2.796875
| 3
|
[] |
no_license
|
#!/system/bin/sh
cat << CTAG
{
name:Logs,
elements:[
{ SDescription:{
description:"To report any issue with the kernel, you should share the zip file of logs created by using below button. This will put logcat, dmesg, last_kmsg & other relevant information into one zip file which will be used only for debug purposes."
}},
{ SSpacer:{
height:1
}},
{ SButton:{
label:"Save All Logs",
action:"log alllogs"
}},
{ SSpacer:{
height:2
}},
{ SPane:{
title:"Android Logger",
description:"You must set 'Enabled' or 'Auto-Suspend' to save the latest logcat (app/system debug output) to file. Auto-Suspend mode will enable logcat whilst screen-ON & auto disable whilst screen-OFF."
}},
{ SSpacer:{
height:1
}},
{ SOptionList:{
title:"Enable Android logger",
description:" ",
default:2,
action:"generic /sys/module/logger/parameters/log_mode",
values:{ 0:"Enabled", 1:"Auto-Suspend", 2:"Disabled"
}
}},
{ SSpacer:{
height:1
}},
{ SButton:{
label:"Save logcat",
action:"log logcat"
}},
{ SSpacer:{
height:1
}},
{ SButton:{
label:"Save dmesg",
action:"log dmesg"
}},
{ SSpacer:{
height:1
}},
{ SButton:{
label:"Save last_kmsg",
action:"log kmsg"
}},
{ SSpacer:{
height:1
}},
]
}
CTAG
| true
|
25ebb5bda83c4b14cd4a442105c887b70d81e472
|
Shell
|
zundra/vagrantbootstrap
|
/1204/scala.sh
|
UTF-8
| 884
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
echo "===================================================================="
echo "======================= Installing Scala ==========================="
echo "===================================================================="
sudo dpkg --purge scala
sudo dpkg --purge sbt
SCALA_ARCHIVE=scala-2.11.6.deb
SBT_ARCHIVE=sbt-0.13.6.deb
wget http://www.scala-lang.org/files/archive/$SCALA_ARCHIVE
sudo dpkg -i $SCALA_ARCHIVE
sudo apt-get update
sudo apt-get install scala
wget http://dl.bintray.com/sbt/debian/$SBT_ARCHIVE
sudo dpkg -i $SBT_ARCHIVE
sudo apt-get update
sudo apt-get install sbt
rm -f $SCALA_ARCHIVE
rm -f $SBT_ARCHIVE
echo "==================================================================="
echo "===================== Scala Install Complete ======================"
echo "==================================================================="
echo
echo
| true
|
146509929dfa79c78a64e90595b4a453afc4b757
|
Shell
|
fladna9/PKCS11Explorer
|
/build.sh
|
UTF-8
| 1,234
| 4.03125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# VARIABLES
TARGETS=( "win-x64" "osx-x64" "linux-x64" "linux-arm")
RELEASEDIR="./release"
BASENAME="PKCS11Explorer"
BUILDTYPE="Release"
VERSIONNUMBER="0"
# START OF CODE
function Main()
{
Setup
#VerifyGitStatus
GetCurrentVersion
echo "Building version $VERSIONNUMBER"
Build
}
function Setup()
{
if [[ ! -d $RELEASEDIR ]];
then
mkdir $RELEASEDIR
fi
}
function VerifyGitStatus()
{
if ! git diff-index --quiet HEAD --; then
echo "Seems that some changes have not been commited. Please commit and try again."
exit 99
fi
}
function GetCurrentVersion()
{
if [[ $VERSIONNUMBER == "0" ]];
then
VERSIONNUMBER=`grep "ThisIsForScriptToFindVersionNumber" PKCS11Explorer/App.xaml.cs |cut -d"\"" -f2`
fi
}
function Build()
{
for TARGET in ${TARGETS[@]}; do
echo "Building target $TARGET"
CURRENTDIRNAME="$BASENAME"_"$BUILDTYPE"_"$TARGET"_"$VERSIONNUMBER"
CURRENTOUTPUTDIR="$RELEASEDIR"/$CURRENTDIRNAME
echo Output is $CURRENTOUTPUTDIR
mkdir $CURRENTOUTPUTDIR
dotnet publish -v m -r $TARGET -c $BUILDTYPE --self-contained true -f netcoreapp2.1 -o "`pwd`"/"$CURRENTOUTPUTDIR"/
cd "$RELEASEDIR" && zip -r "$CURRENTDIRNAME".zip $CURRENTDIRNAME && cd -
rm -rf $CURRENTOUTPUTDIR
done
}
Main
| true
|
9bd3af9de9389a9259c5f4311fe6dcf9a102388b
|
Shell
|
loigu/termuxAndroidHelpers
|
/bin/join-video.sh
|
UTF-8
| 526
| 3.515625
| 4
|
[] |
no_license
|
#! /bin/bash
# join video files together
if [ -z "$1" -o "$#" -lt 3 ]; then
echo "extra=y $0 [-k] <out> <first> <second> [...]"
exit 0
fi
[ "$1" = "-k" ] && keep=1 && shift
out="$1"
shift
for i in $(seq 1 $#); do
in="$(eval echo \$$i)"
ffmpeg -nostdin ${extra} -i "$in" -c copy -bsf:v h264_mp4toannexb -f mpegts "$in.ts"
[ -n "${colist}" ] && colist="$colist|"
colist="$colist$in.ts"
done
ffmpeg ${extra} -i "concat:$colist" -c copy -bsf:a aac_adtstoasc "$out"
[ -z "$keep" ] && rm -f $(echo $colist | tr '|' ' ')
| true
|
c343750c77ab54ffe27a385c97ef366ed1740318
|
Shell
|
kaelzhan/conda_auto_activate
|
/conda_auto_activate.sh
|
UTF-8
| 1,324
| 4.34375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# conda_auto_activate automatically activates a conda environment when
# enterring a folder that contains a `environment.yml` or `.venv` file.
# The `environment.yml` can exported from a existed conda env and we
# use it to create a new conda env. The first line in the `.venv` file
# is the name of the conda env.
#
# To make this work you have to source conda_auto_activate.sh from your
# ~/.bashrc or ~/.zshrc.
function _conda_auto_activate() {
if [ -e "environment.yml" ]; then
# echo "environment.yml file found"
ENV=$(head -n 1 environment.yml | cut -f2 -d ' ')
# Check if you are already in the environment
if [[ $PATH != *$ENV* ]]; then
source activate $ENV
# Check if the environment exists
if [ $? -eq 0 ]; then
:
else
# Create the environment and activate
echo "Conda env '$ENV' doesn't exist."
conda env create -q
source activate $ENV
fi
fi
elif [ -e ".venv" ]; then
# echo ".venv file found"
ENV=$(head -n 1 .venv)
# Check if you are already in the environment
if [[ $PATH != *"$ENV"* ]]; then
source activate $ENV
if [ $? -eq 0 ]; then
:
else
echo "Conda env '$ENV' doesn't exist."
fi
fi
fi
}
function chpwd() {
_conda_auto_activate
}
| true
|
a8c44fb29771086ec43565cec31263dd3a826e83
|
Shell
|
karlpokus/url-stash
|
/urst.sh
|
UTF-8
| 327
| 3.734375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# $1 must be a dir or symlink
if test $# -ne 1 || ! test -d $1; then
echo "error: argument missing or is not a dir. please view readme"
exit 1
fi
DATADIR="$1"
# pipe urls found in $DATADIR to peco where selected url will be opened in default browser
grep -ir http $DATADIR | peco | cut -d : -f 2- | xargs open
| true
|
0a6678824583f3a9ab4cc3e76c6e9714dcf7499a
|
Shell
|
Rombusevil/fgdx-template
|
/setup.sh
|
UTF-8
| 2,200
| 3.859375
| 4
|
[] |
no_license
|
#!/bin/bash
# ----------------------------#
# CORE------------------------#
# ----------------------------#
function process {
# Create projects folder structure
mkdir $project;
cp -r android $project/$project-android;
cp -r desktop $project/$project-desktop;
cp -r core $project/$project;
cd $project;
find . -type f -name "*.java" -exec sed -i'' -e 's/com.rombus.evilbones.template/'$package'/g' {} +
# Create package folder structure
for pkg in $(find . -type f -name "*.java" -exec head -n 1 {} +); do
if [ "$pkg" != "package" ] && [[ "$pkg" != *"=="* ]]; then
if [[ "$pkg" != *"java"* ]]; then
folder=$(echo $pkg | sed "s/;$//")
folder=$(echo $folder | sed "s/\./\//g")
mkdir -p "$proj/src/$folder"
mv $file "$proj/src/$folder/"
else
file=$(echo "$pkg" | sed "s/^\.\///")
proj=$(echo "$file" | cut -d'/' -f 1)
fi
fi
done
# Delete old dirs
find . -type d -empty -delete
}
# ----------------------------#
# CLI ------------------------#
# ----------------------------#
if [ $# -eq 2 ]; then
project=$1
package=$2
process
exit 0
elif [ $# -ne 0 ]; then
echo ""
echo "Creates a project folder using Flixel-GDX template."
echo " ./setup.sh <project name> <package name>"
echo " Run without arguments to launch the GUI (needs Zenity)."
echo ""
exit 1
fi
# ----------------------------#
# GUI ------------------------#
# ----------------------------#
title="Flixel-GDX template"
t=0.2
function zenityInput {
local result=$(zenity --entry --title "$title" --text "$1" --entry-text "$2")
echo $result
}
function checkCancel {
if [ -z "$1" ]; then
zenity --error --title="$title" --text "$2"
exit $?
fi
}
project=$(zenityInput "Insert project name:")
checkCancel "$project" "Cannot continue without a project name."
package=$(zenityInput "Insert package name:" "com.rombus.evilbones.")
checkCancel "$package" "Cannot continue without a package name."
process
(echo "10"; sleep $t;
echo "20"; sleep $t;
echo "35"; sleep $t;
echo "45"; sleep $t;
echo "60"; sleep $t;
echo "75"; sleep $t;
echo "90"; sleep $t) | zenity --progress --title="$title" --ok-label="Done!" --text="Configuring template" --percentage=0
| true
|
007cf1f163c67fe715433dcd7e522a810562ff22
|
Shell
|
ntwairay/bitbucket_server
|
/postgres/setup-bitbucket-db-.sh
|
UTF-8
| 656
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
echo "******CREATING Bitbucket DATABASE******"
psql --username postgres <<- EOSQL
CREATE DATABASE bitbucketdb;
CREATE USER bitbucket WITH PASSWORD 'insert_bitbucket_db_password_here';
ALTER USER bitbucket WITH SUPERUSER;
EOSQL
echo ""
{ echo; echo "host bitbucket bitbucket 0.0.0.0/0 trust"; } >> "$PGDATA"/pg_hba.conf
if [ -r '/tmp/dumps/bitbucket.dump' ]; then
echo "**IMPORTING Bitbucket DATABASE BACKUP**"
gosu postgres postgres &
SERVER=$!; sleep 2
gosu postgres psql bitbucket < /tmp/dumps/bitbucket.dump
kill $SERVER; wait $SERVER
echo "**Bitbucket DATABASE BACKUP IMPORTED***"
fi
echo "******Bitbucket DATABASE CREATED******"
| true
|
664972172019cf1cf980438279db52dbecd0ce03
|
Shell
|
lordkev/ssc-imputation
|
/mutation-rates/preprocess-vcfs/get_str_tmrcas_bysample.sh
|
UTF-8
| 972
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
source params.sh
SAMPLE=$1
raw=${RAW_PSMC}/${SAMPLE}.psmc
out=${TMPDIR}/${SAMPLE}.strpsmc.bed
echo "Processing PSMC for sample ${SAMPLE} ${raw} ${out}"
# Get weighted average of all intervals overlapping each STR locus
# NOTE!! looks like PSMC numbers are missing last two digits? binned by 100?
# mean genome-wide het
meanhet=$(cat ${raw} | awk '($1=="DC")' | awk '{print ($4-$3) "\t" ($4-$3)*$6}' | datamash sum 1 sum 2 | awk '{print $2/$1}')
scale=$(echo "${GWAVG}/${meanhet}" | bc -l)
echo "${sample} ${meanhet} ${scale}" > ${LOGDIR}/${SAMPLE}.strpsmc.log
cat ${raw} | awk '($1=="DC")' | \
awk '{print $2 "\t" $3"00" "\t" $4"99" "\t" $6}' | \
intersectBed -a ${HIPREF} -b stdin -wb -wa | \
awk '{print $1 "\t" $2 "\t" $3 "\t" ($9-$8) "\t" ($9-$8)*$10}' | \
sort -k 1,1 -k2,2n | \
datamash -g1,2,3 sum 4 sum 5 | \
awk -v"sample=$SAMPLE" -v"scale=${scale}" '{print $1 "\t" $2 "\t" $3 "\t" ($5/$4)*scale "\t" sample}' > ${out}
| true
|
7415305374b35858dc199b31e34fadc69b538bcd
|
Shell
|
devs-in-the-cloud/action-deploy-wordpress
|
/main.sh
|
UTF-8
| 6,420
| 3.625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
hosts_file="$GITHUB_WORKSPACE/.github/hosts.yml"
export PATH="$PATH:$COMPOSER_HOME/vendor/bin"
export PROJECT_ROOT="$(pwd)"
export HTDOCS="$HOME/htdocs"
export GITHUB_BRANCH=${GITHUB_REF##*heads/}
export CI_SCRIPT_OPTIONS="ci_script_options"
function init_checks() {
# Check if branch is available
if [[ "$GITHUB_REF" = "" ]]; then
echo "\$GITHUB_REF is not set"
exit 1
fi
# Check for SSH key if jump host is defined
if [[ ! -z "$JUMPHOST_SERVER" ]]; then
if [[ -z "$SSH_PRIVATE_KEY" ]]; then
echo "Jump host configuration does not work with vault ssh signing."
echo "SSH_PRIVATE_KEY secret needs to be added."
echo "The SSH key should have access to the server as well as jumphost."
exit 1
fi
fi
# Exit if branch deletion detected
if [[ "true" == $(jq --raw-output .deleted "$GITHUB_EVENT_PATH") ]]; then
echo 'Branch deletion trigger found. Skipping deployment.'
exit 78
fi
}
function setup_hosts_file() {
# Setup hosts file
rsync -av "$hosts_file" /hosts.yml
cat /hosts.yml
}
function check_branch_in_hosts_file() {
match=0
for branch in $(cat "$hosts_file" | shyaml keys); do
[[ "$GITHUB_REF" = "refs/heads/$branch" ]] && \
echo "$GITHUB_REF matches refs/heads/$branch" && \
match=1
done
# Exit neutral if no match found
if [[ "$match" -eq 0 ]]; then
echo "$GITHUB_REF does not match with any given branch in 'hosts.yml'"
exit 78
fi
}
function setup_private_key() {
if [[ -n "$SSH_PRIVATE_KEY" ]]; then
echo "$SSH_PRIVATE_KEY" | tr -d '\r' > "$SSH_DIR/id_rsa"
chmod 600 "$SSH_DIR/id_rsa"
eval "$(ssh-agent -s)"
ssh-add "$SSH_DIR/id_rsa"
if [[ -n "$JUMPHOST_SERVER" ]]; then
ssh-keyscan -H "$JUMPHOST_SERVER" >> /etc/ssh/known_hosts
fi
else
# Generate a key-pair
ssh-keygen -t rsa -b 4096 -C "GH-actions-ssh-deploy-key" -f "$HOME/.ssh/id_rsa" -N ""
fi
}
function maybe_get_ssh_cert_from_vault() {
# Get signed key from vault
if [[ -n "$VAULT_GITHUB_TOKEN" ]]; then
unset VAULT_TOKEN
vault login -method=github token="$VAULT_GITHUB_TOKEN" > /dev/null
fi
if [[ -n "$VAULT_ADDR" ]]; then
vault write -field=signed_key ssh-client-signer/sign/my-role public_key=@$HOME/.ssh/id_rsa.pub > $HOME/.ssh/signed-cert.pub
fi
}
function configure_ssh_config() {
if [[ -z "$JUMPHOST_SERVER" ]]; then
# Create ssh config file. `~/.ssh/config` does not work.
cat > /etc/ssh/ssh_config <<EOL
Host $hostname
HostName $hostname
IdentityFile ${SSH_DIR}/signed-cert.pub
IdentityFile ${SSH_DIR}/id_rsa
User $ssh_user
EOL
else
# Create ssh config file. `~/.ssh/config` does not work.
cat > /etc/ssh/ssh_config <<EOL
Host jumphost
HostName $JUMPHOST_SERVER
UserKnownHostsFile /etc/ssh/known_hosts
User $ssh_user
Host $hostname
HostName $hostname
ProxyJump jumphost
UserKnownHostsFile /etc/ssh/known_hosts
User $ssh_user
EOL
fi
}
function setup_ssh_access() {
# get hostname and ssh user
export hostname=$(cat "$hosts_file" | shyaml get-value "$GITHUB_BRANCH.hostname")
export ssh_user=$(cat "$hosts_file" | shyaml get-value "$GITHUB_BRANCH.user")
printf "[\e[0;34mNOTICE\e[0m] Setting up SSH access to server.\n"
SSH_DIR="$HOME/.ssh"
mkdir -p "$SSH_DIR"
chmod 700 "$SSH_DIR"
setup_private_key
maybe_get_ssh_cert_from_vault
configure_ssh_config
}
function maybe_install_submodules() {
# Check and update submodules if any
if [[ -f "$GITHUB_WORKSPACE/.gitmodules" ]]; then
# add github's public key
echo "|1|qPmmP7LVZ7Qbpk7AylmkfR0FApQ=|WUy1WS3F4qcr3R5Sc728778goPw= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==" >> /etc/ssh/known_hosts
identity_file=''
if [[ -n "$SUBMODULE_DEPLOY_KEY" ]]; then
echo "$SUBMODULE_DEPLOY_KEY" | tr -d '\r' > "$SSH_DIR/submodule_deploy_key"
chmod 600 "$SSH_DIR/submodule_deploy_key"
ssh-add "$SSH_DIR/submodule_deploy_key"
identity_file="IdentityFile ${SSH_DIR}/submodule_deploy_key"
fi
# Setup config file for proper git cloning
cat >> /etc/ssh/ssh_config <<EOL
Host github.com
HostName github.com
User git
UserKnownHostsFile /etc/ssh/known_hosts
${identity_file}
EOL
git submodule update --init --recursive
fi
}
function setup_wordpress_files() {
mkdir -p "$HTDOCS"
cd "$HTDOCS"
export build_root="$(pwd)"
hosts_wp_version=$(cat "$hosts_file" | shyaml get-value "$GITHUB_BRANCH.WP_VERSION" 2> /dev/null)
# Check if WP_VERSION is already defined in hosts.yml
# Priority: 1. hosts.yml, 2. workflow file, else use latest
if [[ -n $hosts_wp_version ]]; then
WP_VERSION="$hosts_wp_version"
elif [[ -z $WP_VERSION ]]; then
WP_VERSION="latest"
fi
if [[ "$WP_MINOR_UPDATE" == "true" ]] && [[ "$WP_VERSION" != "latest" ]]; then
LATEST_MINOR_VERSION=$(\
curl -s "https://api.wordpress.org/core/version-check/1.7/?version=$WP_VERSION" | \
jq -r '[.offers[]|select(.response=="autoupdate")][-1].version'
)
MAJOR_DOT_MINOR=$(echo "$WP_VERSION" | cut -c1-3)
if [[ "$LATEST_MINOR_VERSION" == "$MAJOR_DOT_MINOR"* ]]; then
WP_VERSION="$LATEST_MINOR_VERSION"
echo "Using $LATEST_MINOR_VERSION as the latest minor version."
else
echo "$WP_VERSION is the latest minor version."
fi
fi
wp core download --version="$WP_VERSION" --allow-root
rm -r wp-content/
# Include webroot-files in htdocs if they exists
if [[ -d "$GITHUB_WORKSPACE/webroot-files" ]]; then
rsync -av "$GITHUB_WORKSPACE/webroot-files/" "$HTDOCS/" > /dev/null
rm -rf "$GITHUB_WORKSPACE/webroot-files/"
fi
rsync -av "$GITHUB_WORKSPACE/" "$HTDOCS/wp-content/" > /dev/null
# Remove uploads directory
cd "$HTDOCS/wp-content/"
rm -rf uploads
# Setup mu-plugins if VIP
if [[ -n "$MU_PLUGINS_URL" ]]; then
if [[ "$MU_PLUGINS_URL" = "vip" ]]; then
MU_PLUGINS_URL="https://github.com/Automattic/vip-mu-plugins-public"
fi
MU_PLUGINS_DIR="$HTDOCS/wp-content/mu-plugins"
echo "Cloning mu-plugins from: $MU_PLUGINS_URL"
git clone -q --recursive --depth=1 "$MU_PLUGINS_URL" "$MU_PLUGINS_DIR"
fi
}
function deploy() {
cd "$GITHUB_WORKSPACE"
dep deploy "$GITHUB_BRANCH"
}
function main() {
init_checks
setup_hosts_file
check_branch_in_hosts_file
setup_ssh_access
maybe_install_submodules
setup_wordpress_files
deploy
}
main
| true
|
e212c7e291a6abdb0d3559929d81fd3679171dfb
|
Shell
|
xsystems/windhappers-cms
|
/build.sh
|
UTF-8
| 1,163
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/sh
VERSION=${VERSION:-latest}
BUILD_DIR_API=build-api
BUILD_DIR_GUI=build
rm --recursive ${BUILD_DIR_API}
mkdir --parents ${BUILD_DIR_API}
cp --recursive \
package.json \
favicon.ico \
api \
components \
config \
extensions \
public \
${BUILD_DIR_API}
docker build \
--file docker/api/Dockerfile \
--tag xsystems/windhappers-cms-api:${VERSION} \
${BUILD_DIR_API}
docker tag \
xsystems/windhappers-cms-api:${VERSION} \
xsystems/windhappers-cms-api:latest
npm install
for ENVIRONMENT in prd acc dev ; do
case ${ENVIRONMENT} in
prd) API_SUB_DOMAIN="api" ;;
*) API_SUB_DOMAIN="api.${ENVIRONMENT}" ;;
esac
NODE_ENV=production \
CMS_API_EXTERNAL_URL="https://${API_SUB_DOMAIN}.windhappers.nl" \
CMS_GUI_EXTERNAL_PATH="/" \
npm run build
cp favicon.ico \
docker/gui/httpd-windhappers-cms-gui.conf \
${BUILD_DIR_GUI}
docker build \
--file docker/gui/Dockerfile \
--tag xsystems/windhappers-cms-gui:${VERSION}-${ENVIRONMENT} \
${BUILD_DIR_GUI}
docker tag \
xsystems/windhappers-cms-gui:${VERSION}-${ENVIRONMENT} \
xsystems/windhappers-cms-gui:latest-${ENVIRONMENT}
done
| true
|
4cc4b2c246937045e8a7d7ffa02282ec7531a11f
|
Shell
|
dwxie/salt
|
/state/prod/application/cs/storage/files/control.sh
|
UTF-8
| 1,282
| 3.875
| 4
|
[] |
no_license
|
#!/bin/bash
storage_path="/opt/storage"
start() {
local proc_num="${1:-6}"
cd "${storage_path}"
echo -n "Start CStorage ... "
./CStorage > /dev/null 2>&1 &
sleep 1
if [ $(status) == ${proc_num} ];
then
echo '[OK]'
return 0
else
echo '[FAILED]'
return 2
fi
}
stop() {
local times="${1:-1}"
# Stop CStorage
cd "${storage_path}"
if [ "${times}" -eq 1 ];
then
echo -n "Stop CStorage ... "
fi
killall CStorage
sleep 1
if [ $(status) == 0 ];
then
echo '[OK]'
return 0
else
if [ "${times}" -le 2 ];
then
times=$((${times} + 1))
stop "${times}"
else
echo '[FAILED]'
return 2
fi
fi
}
status() {
local proc_num
proc_num="$(ps -eH | grep CStorage | grep -v grep | wc -l)"
echo "${proc_num}"
}
status_detail() {
local procs
procs="$(ps -eH | grep CStorage | grep -v grep)"
echo "${procs}"
}
proc_num="${2:-6}"
case "${1}" in
start)
start "${proc_num}"
;;
stop)
stop
;;
status)
status
;;
status_detail)
status_detail
;;
*)
exit 255
;;
esac
| true
|
717376b085c59dfdc05dd71daf334ccfef442d21
|
Shell
|
trashSydowDev/dotfiles-3
|
/runcom/.bash_profile
|
UTF-8
| 2,277
| 3.546875
| 4
|
[] |
no_license
|
# If not running interactively, don't do anything
#
[ -z "$PS1" ] && return
# Shell
SHELL_BASH=true
SHELL_ZSH=false
# OS
if [ "$(uname -s)" = "Darwin" ]; then
OS="OSX"
else
OS=$(uname -s)
fi
# Resolve DOTFILES_DIR (assuming ~/.dotfiles on distros without readlink and/or $BASH_SOURCE/$0)
READLINK=$(which greadlink || which readlink)
if $SHELL_BASH; then
CURRENT_SCRIPT=$BASH_SOURCE
else
CURRENT_SCRIPT=$0
fi
if [[ -n $CURRENT_SCRIPT && -x "$READLINK" ]]; then
SCRIPT_PATH=$($READLINK -n "$CURRENT_SCRIPT")
DOTFILES_DIR=$(dirname "$(dirname "$SCRIPT_PATH")")
elif [ -d "$HOME/.dotfiles" ]; then
DOTFILES_DIR="$HOME/.dotfiles"
else
echo "Unable to find dotfiles, exiting."
return # `exit 1` would quit the shell itself
fi
# Finally we can source the dotfiles (order matters)
for DOTFILE in "$DOTFILES_DIR"/system/.{env,grep,prompt}; do
[ -f "$DOTFILE" ] && . "$DOTFILE"
done
if [ "$OS" = "OSX" ]; then
for DOTFILE in "$DOTFILES_DIR"/system/.{env,}.osx; do
[ -f "$DOTFILE" ] && . "$DOTFILE"
done
fi
if $SHELL_BASH; then
for DOTFILE in "$DOTFILES_DIR"/system/.*.bash; do
[ -f "$DOTFILE" ] && . "$DOTFILE"
done
fi
# Hook for extra/custom stuff
EXTRA_DIR="$HOME/.extra"
if [ -d "$EXTRA_DIR" ]; then
for EXTRAFILE in "$EXTRA_DIR"/runcom/*.sh; do
[ -f "$EXTRAFILE" ] && . "$EXTRAFILE"
done
fi
# Source git autocomplete
source ~/git-completion.bash
# Source aditional functions
source $DOTFILES_DIR/terminal/colors.sh
# Terminal settings
terminal_set_foreground_color white
terminal_set_background_color SuperGray
terminal_set_font "Andale Mono" 16
# Vim
cp $DOTFILES_DIR/vim/vimrc ~/.vimrc
cp $DOTFILES_DIR/vim/vundle.vim ~/.vim
# Clean up
unset READLINK CURRENT_SCRIPT SCRIPT_PATH DOTFILE
# Export
export SHELL_BASH SHELL_ZSH OS DOTFILES_DIR EXTRA_DIR
#################################################################
# Netshoes
#################################################################
export CATALINA_HOME=/Users/$(whoami)/bin/apache-tomcat-6.0.43
export JRE_HOME="/Library/Java/Home"
export JAVA_HOME=$JRE_HOME
# Services
alias ns-up="cd $CATALINA_HOME/bin && . catalina.sh start && cd -"
alias ns-down="cd $CATALINA_HOME/bin && . catalina.sh stop && cd -"
| true
|
2973e978bc2ab8c9dfc6689831c02393b59f9bd2
|
Shell
|
segmentio/pkgsite
|
/devtools/docker_ci.sh
|
UTF-8
| 1,081
| 3.75
| 4
|
[
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-google-patent-license-golang"
] |
permissive
|
#!/usr/bin/env bash
# Copyright 2020 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
set -e
usage() {
cat <<EOUSAGE
Usage: $0 [--sudo]
Run standard CI (tests and linters) using local docker. If --sudo is set, run
docker with sudo.
EOUSAGE
}
maybe_sudo=
while [[ $# -gt 0 ]]; do
case "$1" in
"-h" | "--help" | "help")
usage
exit 0
;;
"--sudo")
maybe_sudo="sudo "
shift
;;
*)
usage
exit 1
esac
done
# Find the repo root.
script_dir=$(dirname "$(readlink -f "$0")")
pkgsite_dir=$(readlink -f "${script_dir}/..")
# Run postgres.
pg_container=$(${maybe_sudo}docker run --rm -d -e LANG=C postgres:11.4)
trap "${maybe_sudo} docker stop ${pg_container}" EXIT
# Run all.bash. To avoid any port conflict, run in the postgres network.
cd "${pkgsite_dir}"
${maybe_sudo}docker run --rm -t \
--network container:${pg_container} \
-v $(pwd):"/workspace" -w "/workspace" \
-e GO_DISCOVERY_TESTDB=true golang:1.14 ./all.bash ci
| true
|
269065b818d72bf6f7a1d082f16c7b90a097b531
|
Shell
|
vanloswang/oVirt-iso-build
|
/iso-building/recipe/common-post.ks
|
UTF-8
| 9,436
| 3.015625
| 3
|
[
"Apache-2.0"
] |
permissive
|
# -*-Shell-script-*-
echo "Starting Kickstart Post"
PATH=/sbin:/usr/sbin:/bin:/usr/bin
export PATH
# cleanup rpmdb to allow non-matching host and chroot RPM versions
echo "Removing yumdb data"
rm -f /var/lib/rpm/__db*
echo "Creating shadow files"
# because we aren't installing authconfig, we aren't setting up shadow
# and gshadow properly. Do it by hand here
pwconv
grpconv
echo "Lock root account"
passwd -l root
echo "Relabeling files"
restorecon -R /
echo "Configuring libvirt"
# make sure we don't autostart virbr0 on libvirtd startup
rm -f /etc/libvirt/qemu/networks/autostart/default.xml
# rhevh uses libvirtd upstart job, sysv initscript must not interfere
rm -f /etc/rc.d/init.d/libvirtd
# Remove the default logrotate daily cron job
# since we run it every 10 minutes instead.
rm -f /etc/cron.daily/logrotate
# Logrotate more judiciously so the size of syslog stays under control
sed -i '/^.*sharedscripts/a \ rotate 5\n size 15M\n compress' /etc/logrotate.d/syslog
if rpm -q --quiet rhn-virtualization-host; then
sed -i -e 's/\.py/\.pyc/' -e 's/<//' /etc/cron.d/rhn-virtualization.cron
fi
# root's bash profile
cat >> /root/.bashrc << \EOF_bashrc
# aliases used for the temporary
function mod_vi() {
/bin/vi $@
restorecon -v $@ >/dev/null 2>&1
}
function mod_yum() {
if [ "$1" == "--force" ]; then
echo $@ > /dev/null
shift
/usr/bin/yum $@
else
printf "\nUsing yum is not supported\n\n"
fi
}
function mod_less() {
cat $1 | less
}
alias ping='ping -c 3'
alias yum="mod_yum"
alias less="mod_less"
export MALLOC_CHECK_=1
export LVM_SUPPRESS_FD_WARNINGS=0
EOF_bashrc
# directories required in the image with the correct perms
# config persistance currently handles only regular files
mkdir -p /root/.ssh
chmod 700 /root/.ssh
mkdir -p /boot
mkdir -p /boot-kdump
mkdir -p /config
mkdir -p /data
mkdir -p /data2
mkdir -p /live
mkdir -p /liveos
mkdir -p /root/.uml
mkdir -p /var/cache/multipathd
touch /var/lib/random-seed
echo "/dev/HostVG/Config /config ext4 defaults,noauto,noatime 0 0" >> /etc/fstab
# Create wwids file to prevent an error on boot, rhbz #805570
mkdir -p /etc/multipath
touch /etc/multipath/wwids
chmod 0600 /etc/multipath/wwids
# prepare for STATE_MOUNT in rc.sysinit
augtool << \EOF_readonly-root
set /files/etc/sysconfig/readonly-root/STATE_LABEL CONFIG
set /files/etc/sysconfig/readonly-root/STATE_MOUNT /config
set /files/etc/sysconfig/readonly-root/READONLY yes
save
EOF_readonly-root
# comment out /etc/* entries in rwtab to prevent overlapping mounts
sed -i '/^files \/etc*/ s/^/#/' /etc/rwtab
cat > /etc/rwtab.d/ovirt << \EOF_rwtab_ovirt
files /etc
dirs /var/lib/multipath
files /var/lib/net-snmp
dirs /var/lib/dnsmasq
files /root/.ssh
dirs /root/.uml
files /var/cache/libvirt
files /var/empty/sshd/etc/localtime
files /var/lib/libvirt
files /var/lib/multipath
files /var/cache/multipathd
empty /mnt
files /boot
empty /boot-kdump
empty /cgroup
files /var/lib/yum
files /var/cache/yum
files /usr/share/snmp/mibs
files /var/lib/lldpad
dirs /var/cache/rpcbind
files /usr/share/snmp/mibs
files /var/lib/lldpad
dirs /var/cache/rpcbind
EOF_rwtab_ovirt
# fix iSCSI/LVM startup issue
sed -i 's/node\.session\.initial_login_retry_max.*/node.session.initial_login_retry_max = 60/' /etc/iscsi/iscsid.conf
# replace Red Hat with OCselected in initiatorname.iscsi
sed -i 's/1994-05\.com\.redhat/2011-06\.com\.ocselected/' /etc/iscsi/initiatorname.iscsi
#lvm.conf should use /dev/mapper and /dev/sdX devices
# and not /dev/dm-X devices
sed -i 's/preferred_names = \[ "^\/dev\/mpath\/", "^\/dev\/mapper\/mpath", "^\/dev\/\[hs\]d" \]/preferred_names = \[ "^\/dev\/mapper", "^\/dev\/\[hsv\]d" \]/g' /etc/lvm/lvm.conf
# unset AUDITD_LANG to prevent boot errors
sed -i '/^AUDITD_LANG*/ s/^/#/' /etc/sysconfig/auditd
# kdump configuration
augtool << \EOF_kdump
set /files/etc/sysconfig/kdump/KDUMP_BOOTDIR /boot-kdump
set /files/etc/sysconfig/kdump/MKDUMPRD_ARGS --allow-missing
save
EOF_kdump
# add admin user for configuration ui
useradd admin
usermod -G wheel admin
usermod -s /usr/libexec/ovirt-admin-shell admin
echo "%wheel ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
# load modules required by crypto swap
cat > /etc/sysconfig/modules/swap-crypt.modules << \EOF_swap-crypt
#!/bin/sh
modprobe aes >/dev/null 2>&1
modprobe dm_mod >/dev/null 2>&1
modprobe dm_crypt >/dev/null 2>&1
modprobe cryptoloop >/dev/null 2>&1
modprobe cbc >/dev/null 2>&1
modprobe sha256 >/dev/null 2>&1
EOF_swap-crypt
chmod +x /etc/sysconfig/modules/swap-crypt.modules
#strip out all unncesssary locales
localedef --list-archive | grep -v -i -E 'en_US.utf8' |xargs localedef --delete-from-archive
mv /usr/lib/locale/locale-archive /usr/lib/locale/locale-archive.tmpl
/usr/sbin/build-locale-archive
# use static RPC ports, to avoid collisions
augtool << \EOF_nfs
set /files/etc/sysconfig/nfs/RQUOTAD_PORT 875
set /files/etc/sysconfig/nfs/LOCKD_TCPPORT 32803
set /files/etc/sysconfig/nfs/LOCKD_UDPPORT 32769
set /files/etc/sysconfig/nfs/MOUNTD_PORT 892
set /files/etc/sysconfig/nfs/STATD_PORT 662
set /files/etc/sysconfig/nfs/STATD_OUTGOING_PORT 2020
save
EOF_nfs
# XXX someting is wrong with readonly-root and dracut
# see modules.d/95rootfs-block/mount-root.sh
sed -i "s/defaults,noatime/defaults,ro,noatime/g" /etc/fstab
echo "StrictHostKeyChecking no" >> /etc/ssh/ssh_config
#mount kernel debugfs
echo "debugfs /sys/kernel/debug debugfs auto 0 0" >> /etc/fstab
#symlink ovirt-node-setup into $PATH
ln -s /usr/bin/ovirt-node-setup /usr/sbin/setup
#set NETWORKING off by default
augtool << \EOF_NETWORKING
set /files/etc/sysconfig/network/NETWORKING no
save
EOF_NETWORKING
# disable SSH password auth by default
# set ssh timeouts for increased security
augtool << \EOF_sshd_config
set /files/etc/ssh/sshd_config/PasswordAuthentication no
set /files/etc/ssh/sshd_config/ClientAliveInterval 900
set /files/etc/ssh/sshd_config/ClientAliveCountMax 0
save
EOF_sshd_config
echo "
disable yum repos by default"
rm -f /tmp/yum.aug
for i in $(augtool match /files/etc/yum.repos.d/*/*/enabled 1); do
echo "set $i 0" >> /tmp/yum.aug
done
if [ -f /tmp/yum.aug ]; then
echo "save" >> /tmp/yum.aug
augtool < /tmp/yum.aug
rm -f /tmp/yum.aug
fi
echo "cleanup yum directories"
rm -rf /var/lib/yum/*
echo "enable strong random number generation"
sed -i '/SSH_USE_STRONG_RNG/d' /etc/sysconfig/sshd
# sosreport fixups for node image:
echo "use .pyc for plugins enumeration, .py is blacklisted"
# include *-release
if [[ $(rpm -E "%{?fedora}") = 20 ]] ||
[[ $(rpm -E "%{?rhel}") = 7 ]] ||
[[ $(rpm -E "%{?centos}") = 7 ]]; then
patch --fuzz 3 -d /usr/lib/python2.7/site-packages/sos -p0 << \EOF_sos_patch
--- utilities.py.orig 2013-08-04 08:36:51.000000000 -0700
+++ utilities.py 2014-03-18 15:25:02.675059445 -0700
@@ -296,13 +296,13 @@
plugins = [self._plugin_name(plugin)
for plugin in list_
if "__init__" not in plugin
- and plugin.endswith(".py")]
+ and plugin.endswith(".pyc")]
plugins.sort()
return plugins
def _find_plugins_in_dir(self, path):
if os.path.exists(path):
- py_files = list(find("*.py", path))
+ py_files = list(find("*.pyc", path))
pnames = self._get_plugins_from_list(py_files)
if pnames:
return pnames
--- plugins/general.py.orig 2014-03-18 15:07:20.570811354 -0700
+++ plugins/general.py 2014-03-18 15:28:49.371866760 -0700
@@ -51,8 +51,7 @@
super(RedHatGeneral, self).setup()
self.add_copy_specs([
- "/etc/redhat-release",
- "/etc/fedora-release",
+ "/etc/*-release",
])
EOF_sos_patch
else
patch --fuzz 3 -d /usr/lib/python2.*/site-packages/sos -p0 << \EOF_sos_patch
--- sosreport.py.orig 2011-04-07 11:51:40.000000000 +0000
+++ sosreport.py 2011-07-06 13:26:44.000000000 +0000
@@ -428,8 +428,8 @@
# validate and load plugins
for plug in plugins:
- plugbase = plug[:-3]
- if not plug[-3:] == '.py' or plugbase == "__init__":
+ plugbase = plug[:-4]
+ if not plug[-4:] == '.pyc' or plugbase == "__init__":
continue
try:
if GlobalVars.policy.validatePlugin(pluginpath + plug):
--- plugins/general.py.orig 2011-02-09 15:25:48.000000000 +0000
+++ plugins/general.py 2011-07-06 23:13:32.000000000 +0000
@@ -25,8 +25,7 @@
("all_logs", "collect all log files defined in syslog.conf", "", False)]
def setup(self):
- self.addCopySpec("/etc/redhat-release")
- self.addCopySpec("/etc/fedora-release")
+ self.addCopySpec("/etc/*-release")
self.addCopySpec("/etc/inittab")
self.addCopySpec("/etc/sos.conf")
self.addCopySpec("/etc/sysconfig")
EOF_sos_patch
fi
python -m compileall /usr/lib/python2.*/site-packages/sos
# https://bugzilla.redhat.com/show_bug.cgi?id=1168582
rm -vf /usr/lib64/python2.*/site-packages/backports/*
# https://bugzilla.redhat.com/show_bug.cgi?id=1167620
# Ensure that mpath is enabled and find_multipaths is y
mpathconf --enable --find_multipaths y
# Then ensure that getuid_callout is set for b/c
sed -i \
-e "/find_multipaths / a getuid_callout \"/lib/udev/scsi_id --replace-whitespace --whitelisted --device=/dev/%n\"" \
-e "/^#/ d" \
-e "/user_friendly_names/ d" \
/etc/multipath.conf
| true
|
e81f65740d76e5d6221c029f621123dee1636486
|
Shell
|
polfliet/instruqt
|
/kubernetes-new-relic-university/nr-prometheus/check-kubernetes
|
UTF-8
| 173
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
kubectl get pods | grep nri-prometheus
if [ $? -ne 0 ]; then
fail-message "Could not find the New Relic Prometheus integration. Please follow the steps."
fi
| true
|
62593405e629b32ad73b898a8d3725f10e65bd41
|
Shell
|
rimusz/deis-workflow-aws
|
/install_workflow_2_aws.sh
|
UTF-8
| 10,839
| 3.6875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set_settings() {
# check for settings file
if [[ ! -f settings ]]
then
echo "'settings' file is missing!!!"
echo "Rename/copy 'settings.tpl' file to 'settings', then set S3 region, and AWS keys there"
exit 0
fi
# Overall Workflow settings
# S3 region
# AWS credentials
source settings
}
install() {
# get seeitngs
set_settings
# get k8s cluster name
cluster
# get lastest macOS helmc cli version
install_helmc
# get lastest macOS deis cli version
install_deis
# add Deis Chart repo
echo "Adding Deis Chart repository ... "
helmc repo add deis https://github.com/deis/charts
# get the latest version of all Charts from all repos
echo " "
echo "Get the latest version of all Charts from all repos ... "
helmc up
# get latest Workflow version
echo " "
echo "Getting latest Deis Workflow version ..."
WORKFLOW_RELEASE=$(ls ~/.helmc/cache/deis | grep workflow-v2. | grep -v -e2e | sort -rn | head -1 | cut -d'-' -f2)
echo "Got Deis Workflow ${WORKFLOW_RELEASE} ..."
# delete the old folder if such exists
rm -rf ~/.helmc/workspace/charts/workflow-${WORKFLOW_RELEASE}-${K8S_NAME} > /dev/null 2>&1
# fetch Deis Workflow Chart to your helmc's working directory
echo " "
echo "Fetching Deis Workflow Chart to your helmc's working directory ..."
helmc fetch deis/workflow-${WORKFLOW_RELEASE} workflow-${WORKFLOW_RELEASE}-${K8S_NAME}
####
# set env vars
# so we do not have to edit generate_params.toml in chart’s tpl folder
# set storage to AWS S3
STORAGE_TYPE=s3
S3_REGION=${BUCKETS_S3_REGION}
AWS_ACCESS_KEY=${AWS_ACCESS_KEY_ID}
AWS_SECRET_KEY=${AWS_SECRET_ACCESS_KEY}
AWS_REGISTRY_BUCKET=${K8S_NAME}-deis-registry
AWS_DATABASE_BUCKET=${K8S_NAME}-deis-database
AWS_BUILDER_BUCKET=${K8S_NAME}-deis-builder
# set off-cluster registry
REGISTRY_LOCATION=ecr
ECR_REGION=${BUCKETS_S3_REGION}
ECR_ACCESS_KEY=${AWS_ACCESS_KEY_ID}
ECR_SECRET_KEY=${AWS_SECRET_ACCESS_KEY}
# export as env vars
export STORAGE_TYPE S3_REGION AWS_ACCESS_KEY AWS_SECRET_KEY AWS_REGISTRY_BUCKET AWS_DATABASE_BUCKET AWS_BUILDER_BUCKET REGISTRY_LOCATION ECR_ACCESS_KEY ECR_SECRET_KEY ECR_REGION
####
# set off-cluster Postgres
set_database
# generate manifests
echo " "
echo "Generating Workflow ${WORKFLOW_RELEASE}-${K8S_NAME} manifests ..."
helmc generate -x manifests -f workflow-${WORKFLOW_RELEASE}-${K8S_NAME}
# set intenal AWS LB - WIP
if [[ ! -z "$ILB" ]]
then
echo "Enabling internal LoadBalancer for Workflow Router ..."
fi
# install Workflow
echo " "
echo "Installing Workflow ..."
helmc install workflow-${WORKFLOW_RELEASE}-${K8S_NAME}
# Waiting for Deis Workflow to be ready
wait_for_workflow
#
# get router's external IP
echo " "
echo "Fetching Router's LB IP:"
LB_IP=$(kubectl --namespace=deis get svc | grep [d]eis-router | awk '{ print $3 }')
echo "$LB_IP"
echo " "
echo "Workflow install ${WORKFLOW_RELEASE} is done ..."
echo " "
}
upgrade() {
# get seeitngs
set_settings
# get k8s cluster name
cluster
# get lastest macOS helmc cli version
install_helmc
# get lastest macOS deis cli version
install_deis
# get the latest version of all Charts from all repos
echo " "
echo "Get the latest version of all Charts from all repos ... "
helmc up
echo " "
# Fetch the current database credentials
echo " "
echo "Fetching the current database credentials ..."
kubectl --namespace=deis get secret database-creds -o yaml > ~/tmp/active-deis-database-secret-creds.yaml
# Fetch the builder component ssh keys
echo " "
echo "Fetching the builder component ssh keys ..."
kubectl --namespace=deis get secret builder-ssh-private-keys -o yaml > ~/tmp/active-deis-builder-secret-ssh-private-keys.yaml
# export environment variables for the previous and latest Workflow versions
export PREVIOUS_WORKFLOW_RELEASE=$(cat ~/tmp/active-deis-builder-secret-ssh-private-keys.yaml | grep chart.helm.sh/version: | awk '{ print $2 }')
export DESIRED_WORKFLOW_RELEASE=$(ls ~/.helmc/cache/deis | grep workflow-v2. | grep -v -e2e | sort -rn | head -1 | cut -d'-' -f2)
# delete the old chart folder if such exists
rm -rf ~/.helmc/workspace/charts/workflow-${DESIRED_WORKFLOW_RELEASE}-${K8S_NAME} > /dev/null 2>&1
# Fetching the new chart copy from the chart cache into the helmc workspace for customization
echo " "
echo "Fetching Deis Workflow Chart to your helmc's working directory ..."
helmc fetch deis/workflow-${DESIRED_WORKFLOW_RELEASE} workflow-${DESIRED_WORKFLOW_RELEASE}-${K8S_NAME}
####
# set env vars
# so we do not have to edit generate_params.toml in chart’s tpl folder
# set storage to AWS S3
STORAGE_TYPE=s3
S3_REGION=${BUCKETS_S3_REGION}
AWS_ACCESS_KEY=${AWS_ACCESS_KEY_ID}
AWS_SECRET_KEY=${AWS_SECRET_ACCESS_KEY}
AWS_REGISTRY_BUCKET=${K8S_NAME}-deis-registry
AWS_DATABASE_BUCKET=${K8S_NAME}-deis-database
AWS_BUILDER_BUCKET=${K8S_NAME}-deis-builder
# set off-cluster registry
REGISTRY_LOCATION=ecr
ECR_REGION=${BUCKETS_S3_REGION}
ECR_ACCESS_KEY=${AWS_ACCESS_KEY_ID}
ECR_SECRET_KEY=${AWS_SECRET_ACCESS_KEY}
# export as env vars
export STORAGE_TYPE S3_REGION AWS_ACCESS_KEY AWS_SECRET_KEY AWS_REGISTRY_BUCKET AWS_DATABASE_BUCKET AWS_BUILDER_BUCKET REGISTRY_LOCATION ECR_ACCESS_KEY ECR_SECRET_KEY ECR_REGION
####
# set off-cluster Postgres
set_database
# Generate templates for the new release
echo " "
echo "Generating Workflow ${DESIRED_WORKFLOW_RELEASE}-${K8S_NAME} manifests ..."
helmc generate -x manifests workflow-${DESIRED_WORKFLOW_RELEASE}-${K8S_NAME}
# Copy your active database secrets into the helmc workspace for the desired version
cp -f ~/tmp/active-deis-database-secret-creds.yaml \
$(helmc home)/workspace/charts/workflow-${DESIRED_WORKFLOW_RELEASE}-${K8S_NAME}/manifests/deis-database-secret-creds.yaml
# Copy your active builder ssh keys into the helmc workspace for the desired version
cp -f ~/tmp/active-deis-builder-secret-ssh-private-keys.yaml \
$(helmc home)/workspace/charts/workflow-${DESIRED_WORKFLOW_RELEASE}-${K8S_NAME}/manifests/deis-builder-secret-ssh-private-keys.yaml
# Uninstall Workflow
echo " "
echo "Uninstalling Workflow ${PREVIOUS_WORKFLOW_RELEASE} ... "
helmc uninstall workflow-${PREVIOUS_WORKFLOW_RELEASE}-${K8S_NAME} -n deis
sleep 3
# Install of latest Workflow release
echo " "
echo "Installing Workflow ${DESIRED_WORKFLOW_RELEASE} ... "
helmc install workflow-${DESIRED_WORKFLOW_RELEASE}-${K8S_NAME}
# Waiting for Deis Workflow to be ready
wait_for_workflow
echo " "
echo "Workflow upgrade to ${DESIRED_WORKFLOW_RELEASE} is done ..."
echo " "
}
set_database() {
if [[ ! -f postgres_settings ]]
then
echo " "
echo "No postgres_settings file found !!! "
echo "PostgreSQL database will be set to on-cluster ..."
else
echo " "
echo "postgres_settings file found !!!"
echo "PostgreSQL database will be set to off-cluster ..."
DATABASE_LOCATION="off-cluster"
# import values from file
source postgres_settings
# export values as environment variables
export DATABASE_LOCATION DATABASE_HOST DATABASE_PORT DATABASE_NAME DATABASE_USERNAME DATABASE_PASSWORD
fi
}
cluster() {
# get k8s cluster name
echo " "
echo "Fetching Kubernetes cluster name ..."
K8S_NAME=$(kubectl config current-context | cut -c 5-)
echo "Kubernetes cluster name is ${K8S_NAME} ..."
echo " "
}
install_deis() {
# get lastest macOS deis cli version
echo "Downloading latest version of Workflow deis cli ..."
curl -sSL http://deis.io/deis-cli/install-v2.sh | bash
mv -f deis ~/bin/
chmod +x ~/bin/deis
echo " "
echo "Installed deis cli to ~/bin ..."
echo " "
}
install_helmc() {
# get lastest macOS helmc cli version
echo "Downloading latest version of helmc cli ..."
curl -o ~/bin/helmc https://storage.googleapis.com/helm-classic/helmc-latest-darwin-amd64
chmod +x ~/bin/helmc
echo " "
echo "Installed helmc cli to ~/bin ..."
echo " "
}
install_helm() {
# get lastest macOS helm cli version
echo " "
echo "Checking for latest Helm version..."
mkdir ~/tmp > /dev/null 2>&1
LATEST_HELM=$(curl -s https://api.github.com/repos/kubernetes/helm/releases/latest | grep "tag_name" | awk '{print $2}' | sed -e 's/"\(.*\)"./\1/')
# check if the binary exists
if [ ! -f ~/bin/helm ]; then
INSTALLED_HELM=v0.0.0
else
INSTALLED_HELM=$(~/bin/helm version)
fi
#
MATCH=$(echo "${INSTALLED_HELM}" | grep -c "${LATEST_HELM}")
if [ $MATCH -ne 0 ]; then
echo " "
echo "Helm is up to date !!!"
else
echo " "
echo "Downloading latest ${LATEST_HELM} of 'helm' cli for macOS"
curl -k -L http://storage.googleapis.com/kubernetes-helm/helm-${LATEST_HELM}-darwin-amd64.tar.gz > ~/tmp/helm.tar.gz
tar xvf ~/tmp/helm.tar.gz -C ~/tmp --strip=1 darwin-amd64/helm > /dev/null 2>&1
chmod +x ~/tmp/helm
mv -f ~/tmp/helm ~/bin/helm
rm -f ~/tmp/helm.tar.gz
echo " "
echo "Installed latest ${LATEST_HELM} of 'helm' cli to ~/bin ..."
echo " "
echo "Installing new version of Helm Tiller..."
kubectl --namespace=kube-system delete deployment tiller-deploy > /dev/null 2>&1
~/bin/helm init
echo "Helm is ready to sail ..."
fi
}
wait_for_workflow() {
echo " "
echo "Waiting for Deis Workflow to be ready... but first, coffee! "
spin='-\|/'
i=1
until kubectl --namespace=deis get po | grep [d]eis-builder- | grep "1/1" >/dev/null 2>&1; do i=$(( (i+1) %4 )); printf "\r${spin:$i:1}"; sleep .1; done
until kubectl --namespace=deis get po | grep [d]eis-registry- | grep "1/1" >/dev/null 2>&1; do i=$(( (i+1) %4 )); printf "\r${spin:$i:1}"; sleep .1; done
if [[ ! -f postgres_settings ]]
then
until kubectl --namespace=deis get po | grep [d]eis-database- | grep "1/1" >/dev/null 2>&1; do i=$(( (i+1) %4 )); printf "\r${spin:$i:1}"; sleep .1; done
fi
until kubectl --namespace=deis get po | grep [d]eis-router- | grep "1/1" >/dev/null 2>&1; do i=$(( (i+1) %4 )); printf "\r${spin:$i:1}"; sleep .1; done
until kubectl --namespace=deis get po | grep [d]eis-controller- | grep "1/1" >/dev/null 2>&1; do i=$(( (i+1) %4 )); printf "\r${spin:$i:1}"; sleep .1; done
echo " "
}
usage() {
echo "Usage: install_workflow_2_aws.sh install | upgrade | deis | helmc | helm | cluster"
}
case "$1" in
install)
install
;;
upgrade)
upgrade
;;
deis)
install_deis
;;
helmc)
install_helmc
;;
helm)
install_helm
;;
cluster)
cluster
;;
*)
usage
;;
esac
| true
|
619b10251a68fdaec24aaa50bc4b5e091fc22ccd
|
Shell
|
obonobo/PayStubs
|
/zzz_deprecated/build.sh
|
UTF-8
| 859
| 3.953125
| 4
|
[] |
no_license
|
if [ $EUID -ne 0 ]; then
echo 'This script must be run as root...'
exit 1
fi
if [ -z $1 ] || [ -z $2 ]; then
echo 'You need to pass MongoDB username and password for the app to connect:'
echo ' ./build.sh <username> <password>'
exit 1
fi
# Builds a docker image for PayStubs
main() {
echo 'Removing running containers...'
docker rm --force ppp
echo 'Running npm install...'
npm install
# Compile typscript
echo 'Transpiling JS...'
./node_modules/typescript/bin/tsc
# Builds the docker container
buildPayStubsContainer $1 $2
}
buildPayStubsContainer() {
echo 'Building docker image...'
docker build -t paystubs -f paystubs.dockerfile --build-arg USER=$1 --build-arg PASS=$2 .
echo 'Running docker image...'
docker run -d --name ppp -p 3000:3000 paystubs
}
main $1 $2
| true
|
f9be5f1f31a67215d2ffac1467f2107b2f40a73a
|
Shell
|
boyle/2018-measure-stress
|
/www/deploy
|
UTF-8
| 3,136
| 3.625
| 4
|
[
"MIT"
] |
permissive
|
#! /bin/bash
S=$(dirname $0)
D=/var/www/html
R=root@saans.ca
function help() {
cat <<EOF
usage: $(basename $0) <cmd>
<none> do the following steps:
tx transfer $S --> $R:$D
venv update remote's virtualenv dependencies
migrate migrate remote's database
restart restart the webserver
EOF
}
if [ $# -eq 1 ] && [ "$1" == "help" ]; then
help
exit 0
fi
if [ $# -gt 1 ]; then
help
exit 1
fi
if [ $# == 1 ]; then
case $1 in
tx) ;;
venv) ;;
migrate) ;;
restart) ;;
venv_remote) ;;
migrate_remote) ;;
restart_remote) ;;
announce) ;;
*)
echo "error: $1: unrecognized cmd" 1>&2
help
exit 1;;
esac
fi
SSH="ssh -o 'ControlMaster=auto'"
SCP="scp -o 'ControlMaster=auto'"
RSYNC="rsync -e \"ssh -o 'ControlPath=auto'\""
if [ $# == 0 ] || [ $1 == "tx" ]; then
echo "$S -> $R:$D"
eval ${RSYNC} -au $R:/var/www/rawdata/config.py $S/
sed -i -e "s/^\\(WEBSITE_VERSION\\) = .*/\\1 = '$(git describe --tags --match='www-*' --dirty=+ | sed -e 's/\(-[0-9][0-9]*\)-[^+]*\(\+\)*/\1\2/' -e 's/^www-//')'/" config.py
sed -i -e "s/^\\(APPLICATION_VERSION\\) = .*/\\1 = '$(git describe --tags --match='app-*' --dirty=+ | sed -e 's/\(-[0-9][0-9]*\)-[^+]*\(\+\)*/\1\2/' -e 's/^app-//')'/" config.py
[ -f $S/config.py ] && eval ${RSYNC} -au $S/config.py $R:/var/www/rawdata/
eval ${RSYNC} -raq --delete --delete-excluded \
--exclude '__pycache__' --exclude "/deploy" --exclude "/instance" --exclude "/venv" --exclude "/node_modules" \
--exclude '.pytest_cache' --exclude '/htmlcov' --exclude "/.well-known" \
"$S/" $R:$D/
fi
if [ $# == 0 ] || [ $1 == "venv" ]; then
set -ex
eval $RSYNC $0 $R:~/
eval $SSH $R "./$(basename $0) venv_remote"
set +ex
fi
if [ $# == 0 ] || [ $1 == "migrate" ]; then
set -ex
eval $RSYNC $0 $R:~/
eval $SSH $R "./$(basename $0) migrate_remote"
set +ex
fi
if [ $# == 0 ] || [ $1 == "restart" ]; then
set -ex
eval $RSYNC $0 $R:~/
eval $SSH $R "./$(basename $0) restart_remote"
set +ex
fi
if [ $# == 1 ] && [ $1 == "venv_remote" ]; then
cd $D
[ -d venv ] || python3 -m venv venv
source venv/bin/activate
set -ex
pip install -r requirements.txt
nodeenv -p
npm install
set +ex
fi
if [ $# == 1 ] && [ $1 == "migrate_remote" ]; then
cd $D
[ -f /var/www/rawdata/bikeshed.db ] && echo "migration: db exists... nop" && exit 0
source venv/bin/activate
set -ex
BIKESHED_SETTINGS=/var/www/rawdata/config.py FLASK_APP=bikeshed flask init-db
chown www-data:www-data /var/www/rawdata/bikeshed.db
chmod 750 /var/www/rawdata/bikeshed.db
set +ex
fi
if [ $# == 1 ] && [ $1 == "restart_remote" ]; then
set -ex
chown -R www-data:www-data $D
service lighttpd force-reload
set +ex
fi
[ -f slack.cfg ] && source slack.cfg
SLACKHOOK=${SLACKHOOK:-BADHOOK}
if [ $# == 0 ] || [ $1 == "announce" ]; then
set -ex
HASH=$(git describe --tags --match='www-*' --dirty=+)
curl -X POST -H 'Content-type: application/json' \
--data "{\"text\":\"Commit \`${HASH}\` is live.\"}" \
${SLACKHOOK}
fi
| true
|
3ef365cea147a0d4717fae16c8d470b5ffde69b8
|
Shell
|
git-for-windows/MSYS2-packages
|
/libyaml/PKGBUILD
|
UTF-8
| 1,386
| 2.734375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
# Maintainer: Alexey Pavlov <alexpux@gmail.com>
pkgname=('libyaml' 'libyaml-devel')
pkgver=0.1.7
pkgrel=1
pkgdesc="YAML 1.1 library"
arch=('i686' 'x86_64')
url="https://pyyaml.org/wiki/LibYAML"
license=('MIT')
source=(https://pyyaml.org/download/libyaml/yaml-$pkgver.tar.gz
'yaml-0.1.7-fix-undefined.patch'
'yaml-0.1.7-msys2.patch')
sha256sums=('8088e457264a98ba451a90b8661fcb4f9d6f478f7265d48322a196cec2480729'
'6fae0d088cd67ad61a3ec83c03116bfb829276201a18ba583a9acc230e2f57f6'
'bea0e3d9d01b2769abb71c358babb2c2c98cc98ddb9ac2cd5dd77ed2932c2212')
prepare() {
cd "$srcdir/yaml-$pkgver"
patch -p1 -i ${srcdir}/yaml-0.1.7-fix-undefined.patch
patch -p1 -i ${srcdir}/yaml-0.1.7-msys2.patch
autoreconf -fi
}
build() {
cd "$srcdir/yaml-$pkgver"
./configure --build=${CHOST} --prefix=/usr
make
setup
}
setup() {
cd "$srcdir/yaml-$pkgver"
make DESTDIR="$srcdir/dest" install
install -m644 -D LICENSE $srcdir/dest/usr/share/licenses/$pkgname/LICENSE
}
package_libyaml() {
groups=('libraries')
install -dm755 $pkgdir/usr/bin
install -m755 -t ${pkgdir}/usr/bin $srcdir/dest/usr/bin/*.dll
cp -rf $srcdir/dest/usr/share ${pkgdir}/usr/
}
package_libyaml-devel() {
groups=('development')
depends=("libyaml=${pkgver}")
install -dm755 $pkgdir/usr
cp -rf $srcdir/dest/usr/include ${pkgdir}/usr/
cp -rf $srcdir/dest/usr/lib ${pkgdir}/usr/
}
| true
|
f3e487fbefcaee63275bf71c46d8ff891616f5be
|
Shell
|
matthewmcnew/dotfiles
|
/zsh/upgrade.zsh
|
UTF-8
| 374
| 2.921875
| 3
|
[] |
no_license
|
function upgrade_brew(){
echo "upgrading brew"
brew update
brew outdated
brew cask outdated
brew upgrade
brew cask upgrade
}
function upgrade_all() {
set -x
upgrade_dotfiles
upgrade_brew
set +x
upgrade_antibody
softwareupdate -i -a
}
function upgrade_dotfiles(){
pushd ~/workspace/dotfiles
git pull
popd
}
| true
|
751e4b150695b457f183cc18b20bf2979ba53d7c
|
Shell
|
trialuser/aws-tricks
|
/scripts/housekeeper/orphaned_ebs_snapshots.sh
|
UTF-8
| 2,497
| 3.859375
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Counting and showing list of orphaned EBS Snapshots.
aws_counting_orphaned_ebs_snapshots() {
printf "${BLUE}=== Orphaned EBS Snapshots ===${RESET}\n"
AWS_EBS_ORF=$(comm -23 <(echo $(aws --region "${AWS_REGION}" --profile \
"${AWS_CLI_PROFILE}" ec2 describe-snapshots --owner-ids self --query \
'Snapshots[*].[VolumeId]' --output text | sort | uniq) | \
tr ' ' '\n') <(echo $(aws --region "${AWS_REGION}" --profile \
"${AWS_CLI_PROFILE}" ec2 describe-volumes --query \
'Volumes[*].[VolumeId]' --output text | sort | uniq) | tr ' ' '\n')\
| tr '\n' ',' | sed 's/\,$//')
AWS_EBS_ORF_COUNT=$(comm -23 <(echo $(aws --region "${AWS_REGION}" --profile \
"${AWS_CLI_PROFILE}" ec2 describe-snapshots --owner-ids self \
--query 'Snapshots[*].[VolumeId]' --output text | sort | \
uniq) | tr ' ' '\n') <(echo $(aws --region "${AWS_REGION}" \
--profile "${AWS_CLI_PROFILE}" ec2 describe-volumes --query \
'Volumes[*].[VolumeId]' --output text | sort | uniq) | \
tr ' ' '\n') | wc -l)
AWS_EBS_ORF_DRAFT=$(aws --region "${AWS_REGION}" --profile "${AWS_CLI_PROFILE}" \
ec2 describe-snapshots --owner-ids self --query \
'Snapshots[*].{volid:VolumeId,desc:Description,size:VolumeSize,created:StartTime,id:SnapshotId}' --filters Name=volume-id,Values="${AWS_EBS_ORF}")
AWS_EBS_ORF_SIZE=$(jq '[.[] .size] | add' <<< "${AWS_EBS_ORF_DRAFT}")
AWS_EBS_ORF_LIST=$(aws --region "${AWS_REGION}" --profile "${AWS_CLI_PROFILE}" \
ec2 describe-snapshots --owner-ids self --query \
'Snapshots[*].{volid:VolumeId,desc:Description,size:VolumeSize,created:StartTime,id:SnapshotId}' --filters Name=volume-id,Values="${AWS_EBS_ORF}" --output table)
if ((AWS_EBS_ORF_COUNT>0)); then
AWS_SNAP_SUMM=$(jq -n "${AWS_EBS_ORF_SIZE:=0} * ${AWS_SNAP_PRICE}")
printf "You have orphaned EBS snapshots, which cost you approximately \
${GREEN}\$%.2f${RESET} per month \n" "${AWS_SNAP_SUMM}"
printf "These snapshots are:\n"
printf "%s\n" "${AWS_EBS_ORF_LIST}"
else
printf "${GREEN}You have no orphaned EBS snapshots${RESET}"
fi
COST=$(printf "%.0f\n" ${AWS_SNAP_SUMM})
reply=(${COST} "$@")
}
| true
|
d65c980126d92184b2ee619a577896757498167d
|
Shell
|
cs-rodrigo-morais/trainning-devops
|
/exec-shell/iterando-itens.sh
|
UTF-8
| 501
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
#####################################################################
# Program:
# Description:
# iterando por todos os itens de um diretório
# Author: Rodrigo Morais <rodrigo.morais@concretesolutions.com.br>
#####################################################################
for item in /home/rmorais/*
do
# Verifica se existe e se é um diretório
if [ -d "$item" ]
then
echo "O item $item é um diretório"
# Verifica se existe e é um arquivo
elif [ -f "$item" ]
then
echo "O item $item é um arquivo"
fi
done
| true
|
6491846abacb37530536f90d0e8a18b0759512c2
|
Shell
|
hollychen503/dockcross
|
/tools/container-diff.sh
|
UTF-8
| 969
| 3.484375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -euo pipefail
# More info: https://github.com/GoogleContainerTools/container-diff
if (( $# < 1 || $# > 2 )); then
echo "Need 1 or 2 arguments: <img1> <img2>"
exit 1
fi
if (( $# == 1 )); then
container-diff analyze $1 --type=history 2>&1 | tee -a analyze-history.txt
container-diff analyze $1 --type=file 2>&1 | tee -a analyze-file.txt
container-diff analyze $1 --type=size 2>&1 | tee -a analyze-size.txt
container-diff analyze $1 --type=apt 2>&1 | tee -a analyze-apt.txt
container-diff analyze $1 --type=pip 2>&1 | tee -a analyze-pip.txt
fi
if (( $# == 2 )); then
container-diff diff $1 $2 --type=history 2>&1 | tee -a diff-history.txt
container-diff diff $1 $2 --type=file 2>&1 | tee -a diff-file.txt
container-diff diff $1 $2 --type=size 2>&1 | tee -a diff-size.txt
container-diff diff $1 $2 --type=apt 2>&1 | tee -a diff-apt.txt
container-diff diff $1 $2 --type=pip 2>&1 | tee -a diff-pip.txt
fi
| true
|
d28b8b7703012d2afdb28ccca84a3e6cca1acebb
|
Shell
|
YuSangHuck/install
|
/mac/fswatch.sh
|
UTF-8
| 679
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
if [ -z "$(which fswatch)" ]; then
echo "fswatch not installed."
echo "In most distros, it is available in the inotify-tools package."
exit 1
fi
GITHUB_DIR="~/_/install"
WATCH_LIST=".zshrc .p10k.zsh .fzf.zsh .fzf.bash .vimrc .gitconfig"
for FILE in $WATCH_LIST; do
echo $FILE
ABS_PATH_TO_FILE="$HOME/$FILE"
found=`ps -ef | grep "fswatch $ABS_PATH_TO_FILE" | grep -v grep`
if [ ! -z "$found" ]; then
echo "fswatch proc found: '$found'"
else
fswatch "$ABS_PATH_TO_FILE" \
| xargs -t -n 1 -I {} sh -c "echo {}; cp {} ${GITHUB_DIR}/${FILE}" &
fi
done
# zombie?
# trap 'kill $(jobs -p)' EXIT #INT QUIT KILL TERM
| true
|
f74727b44e67363ad33f2d4246cddd82c42b581f
|
Shell
|
plk/biber
|
/dist/darwinlegacy_x86_64/build.sh
|
UTF-8
| 4,009
| 2.96875
| 3
|
[
"Artistic-2.0",
"LicenseRef-scancode-philippe-de-muyter"
] |
permissive
|
#!/bin/bash
# The cp/rm steps are so that the packed biber main script is not
# called "biber" as on case-insensitive file systems, this clashes with
# the Biber lib directory and generates a (harmless) warning on first run
# Have to explicitly include the Input* modules as the names of these are dynamically
# constructed in the code so Par::Packer can't auto-detect them.
# Same with some of the output modules.
# Unicode::Collate is bundled with perl but is often updated and this is a critical module
# for biber. There are some parts of this module which must be explicitly bundled by pp.
# Unfortunately, updates to the module go into site_perl and we must bundle the right version
# and so we check if there are any newer versions than came with the version of perl we are using
# by looking to see if there is a site_perl directory for the module. If there is, we use that
# version.
declare -r perlv='5.32'
declare ucpath="/opt/local/lib/perl5/${perlv}/Unicode/Collate"
# Unicode::Collate has a site_perl version so has been updated since this
# perl was released
if [ -d "/opt/local/lib/perl5/site_perl/${perlv}/darwin-thread-multi-2level/Unicode/Collate" ]
then
ucpath="/opt/local/lib/perl5/site_perl/${perlv}/darwin-thread-multi-2level/Unicode/Collate"
fi
echo "USING Unicode::Collate at: ${ucpath}"
cp /opt/local/libexec/perl${perlv}/sitebin/biber /tmp/biber-darwin
PAR_VERBATIM=1 pp \
--module=deprecate \
--module=Biber::Input::file::bibtex \
--module=Biber::Input::file::biblatexml \
--module=Biber::Output::dot \
--module=Biber::Output::bbl \
--module=Biber::Output::bblxml \
--module=Biber::Output::bibtex \
--module=Biber::Output::biblatexml \
--module=Pod::Simple::TranscodeSmart \
--module=Pod::Simple::TranscodeDumb \
--module=List::MoreUtils::XS \
--module=List::SomeUtils::XS \
--module=List::MoreUtils::PP \
--module=HTTP::Status \
--module=HTTP::Date \
--module=Encode:: \
--module=File::Find::Rule \
--module=IO::Socket::SSL \
--module=IO::String \
--module=PerlIO::utf8_strict \
--module=Text::CSV_XS \
--module=DateTime \
--link=/opt/local/lib/libz.1.dylib \
--link=/opt/local/lib/libiconv.2.dylib \
--link=/opt/local/libexec/perl${perlv}/sitebin/libbtparse.dylib \
--link=/opt/local/lib/libxml2.2.dylib \
--link=/opt/local/lib/libxslt.1.dylib \
--link=/opt/local/lib/libgdbm.6.dylib \
--link=/opt/local/lib/libexslt.0.dylib \
--link=/opt/local/libexec/openssl3/lib/libssl.3.dylib \
--link=/opt/local/libexec/openssl3/lib/libcrypto.3.dylib \
--link=/opt/local/lib/liblzma.5.dylib \
--link=/opt/local/lib/libintl.8.dylib \
--link=/opt/local/lib/libicui18n.67.dylib \
--link=/opt/local/lib/libicuuc.67.dylib \
--link=/opt/local/lib/libicudata.67.dylib \
--addfile="../../data/biber-tool.conf;lib/Biber/biber-tool.conf" \
--addfile="../../data/schemata/config.rnc;lib/Biber/config.rnc" \
--addfile="../../data/schemata/config.rng;lib/Biber/config.rng" \
--addfile="../../data/schemata/bcf.rnc;lib/Biber/bcf.rnc" \
--addfile="../../data/schemata/bcf.rng;lib/Biber/bcf.rng" \
--addfile="../../lib/Biber/LaTeX/recode_data.xml;lib/Biber/LaTeX/recode_data.xml" \
--addfile="../../data/bcf.xsl;lib/Biber/bcf.xsl" \
--addfile="${ucpath}/Locale;lib/Unicode/Collate/Locale" \
--addfile="${ucpath}/CJK;lib/Unicode/Collate/CJK;lib/Unicode/Collate/CJK" \
--addfile="${ucpath}/allkeys.txt;lib/Unicode/Collate/allkeys.txt" \
--addfile="${ucpath}/keys.txt;lib/Unicode/Collate/keys.txt" \
--addfile="/opt/local/lib/perl5/site_perl/${perlv}/Mozilla/CA/cacert.pem;lib/Mozilla/CA/cacert.pem" \
--addfile="/opt/local/lib/perl5/site_perl/${perlv}/Business/ISBN/RangeMessage.xml;lib/Business/ISBN/RangeMessage.xml" \
--addfile="/opt/local/lib/perl5/site_perl/${perlv}/darwin-thread-multi-2level/auto/Unicode/LineBreak/LineBreak.bundle;lib/auto/Unicode/LineBreak/LineBreak.bundle" \
--cachedeps=scancache \
--output=biber-darwinlegacy_x86_64 \
/tmp/biber-darwin
\rm -f /tmp/biber-darwin
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.