blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
4cd6d7339778eb3f4b4e0164a2c1d28f07051cbb | Shell | schanur/libbivalvia | /bivalvia/color.sh | UTF-8 | 645 | 3.59375 | 4 | [
"MIT"
] | permissive | declare -A COLOR_TABLE
COLOR_TABLE['red']=31
COLOR_TABLE['green']=32
COLOR_TABLE['yellow']=33
COLOR_TABLE['blue']=34
COLOR_TABLE['magenta']=35
COLOR_TABLE['cyan']=36
COLOR_TABLE['white']=37
COLOR_TABLE['color_reset']=0
function color_name_to_color_code {
local COLOR_NAME="${1}"
echo ${COLOR_TABLE["${COLOR_NAME}"]}
}
function color_escape_sequence {
local COLOR_NAME="${1}"
echo -ne "\033[$(color_name_to_color_code ${COLOR_NAME})m"
}
function with_color {
local COLOR_NAME="${1}"
shift
local STR="$@"
color_escape_sequence "${COLOR_NAME}"
echo -n "${STR}"
color_escape_sequence color_reset
}
| true |
520ed110a8fa71fbd29a1a1141e25e78e471608b | Shell | udhayakumar2507/run-time-scripts | /acr-auto-build.sh | UTF-8 | 451 | 2.640625 | 3 | [] | no_license | #!/usr/bin/env bash
docker_imagename="httpd"
ecrRepoName="node-api"
ecrURL="xxxxxxxxx.dkr.ecr.eu-west-1.amazonaws.com"
awsECR="$ecrURL/$ecrRepoName"
docker pull $docker_imagename
docker tag $docker_imagename $awsECR
aws ecr get-login-password --region eu-west-1 | docker login --username AWS --password-stdin $ecrURL
docker push $awsECR
docker logout $ecrURL
# Require pakages
# 1. awscli
# 2. iam access key and secret key
# 3. execute the scripts
| true |
418eb46e37ead04fed0513793b47099eccd436c9 | Shell | ololduck/Dotfiles | /install.sh | UTF-8 | 1,273 | 3.3125 | 3 | [
"MIT"
] | permissive | #!/bin/sh
export PATH=$HOME/.local/bin:$PATH
PREFIX=$HOME/.local
SRC_SOFTS=$PREFIX/src
install_dotfiles() {
if test -d ~/dotfiles; then
echo "dotfiles already there"
return 1
fi
pip install --user dotfiles
git clone https://github.com/paulollivier/Dotfiles.git ~/dotfiles
dotfiles -s
}
install_vundle(){
if test -d ~/.vim/bundle/Vundle.vim; then
echo "vundle already installed"
return 1
fi
git clone https://github.com/gmarik/Vundle.vim.git ~/.vim/bundle/Vundle.vim
vim -c "execute \"PluginInstall\" | qa"
}
install_dmenu2() {
if test "$(which dmenu_run)" = "$HOME/.local/bin/dmenu_run"; then
echo "dmenu2 already installed"
return 1
fi
git clone https://github.com/mrshankly/dmenu2 $SRC_SOFTS/dmenu2
cd $SRC_SOFTS/dmenu2
make
PREFIX=$PREFIX make install
}
install_autojump() {
if test -d ~/.autojump; then
echo "autojump already installed"
return 1;
fi
git clone https://github.com/joelthelion/autojump.git $SRC_SOFTS/autojump
cd $SRC_SOFTS/autojump
./install.py
}
install_i3pystatus() {
pip3 install --user i3pystatus colour psutil
}
install_dotfiles
install_vundle
install_dmenu2
install_i3pystatus
install_autojump
| true |
a95f39f39360993cfeccb95f19c88e131b833a7e | Shell | Dyr-El/advent_of_code_2017 | /tethik-python3/tests/12a.bats | UTF-8 | 229 | 2.609375 | 3 | [] | no_license |
@test "12a.py sample 1" {
input=$(cat << EOF
0 <-> 2
1 <-> 1
2 <-> 0, 3, 4
3 <-> 2, 4
4 <-> 2, 3, 6
5 <-> 6
6 <-> 4, 5
EOF
)
output=$(cat << EOF
6
EOF
)
result=$(echo "$input" | python 12a.py)
[[ "$result" == "$output" ]]
}
| true |
de22d6e0ec08e61eb83e7a67d3ddf586c76448a3 | Shell | jafingerhut/clojure-benchmarks | /bin/tmcurve-all.sh | UTF-8 | 4,541 | 3.1875 | 3 | [
"BSD-3-Clause"
] | permissive | #! /bin/bash
# Uncomment this line to make the script only do echoing, no "real work"
#ECHO_ONLY="echo"
# Uncomment this line to make the script do real work
ECHO_ONLY=""
ROOT=`pwd`
# 1>&2 echo "usage: `basename $0` [ benchmark-name ... ]"
if [ $# -ge 1 ]; then
BENCHMARK="$@"
else
BENCHMARK="binarytrees fannkuch fannkuchredux fasta knucleotide mandelbrot nbody regexdna revcomp revlines spectralnorm"
fi
#echo "ROOT: $ROOT BENCHMARK: $BENCHMARK"
#exit 0
OS=`uname -o 2>/dev/null`
if [ $? != 0 ]; then
# Then likely we are running on a Mac OS X system with the default
# uname installation, which accepts -s but not -o option.
OS=`uname -s 2>/dev/null`
fi
JROCKIT=0
#set -ex
COMMON_ARGS="--verbose --jvm-gc-stats --alpha 1.05 --sorted-summary --results-file ${ROOT}/results.xml"
#COMMON_ARGS="--verbose --jvm-gc-stats --alpha 1.05 --sorted-summary --results-file ${ROOT}/results.xml --sweep-only --num-mb-values 2"
# Useful when testing hello.clj memory use
EXTRA_ARGS="--min 1 --precision 1"
for b in $BENCHMARK
do
cd "${ROOT}/$b"
#${ECHO_ONLY} ./java-compile.sh
${ECHO_ONLY} ./clj-compile.sh clj-1.2
${ECHO_ONLY} ./clj-compile.sh clj-1.3-alpha1
${ECHO_ONLY} ./clj-compile.sh clj-1.3-alpha3
case $b in
binarytrees)
BENCHMARK_ARGS="binarytrees 20"
INFILE=""
;;
fannkuch)
BENCHMARK_ARGS="fannkuch 12"
INFILE=""
;;
fannkuchredux)
BENCHMARK_ARGS="fannkuchredux 12"
INFILE=""
;;
fasta)
BENCHMARK_ARGS="fasta 25000000"
INFILE=""
;;
knucleotide)
# TBD: Would be nice to have infrastructure that makes it easy
# to run the same source file with same "size" but different
# command line args, like having "1" below present, or absent.
BENCHMARK_ARGS="knucleotide 1"
INFILE="input/long-input.txt"
;;
mandelbrot)
BENCHMARK_ARGS="mandelbrot 1"
INFILE=""
;;
nbody)
BENCHMARK_ARGS="nbody"
INFILE=""
;;
regexdna)
BENCHMARK_ARGS="regexdna"
INFILE="input/long-input.txt"
;;
revcomp)
BENCHMARK_ARGS="revcomp"
INFILE="input/long-input.txt"
;;
revlines)
BENCHMARK_ARGS="revlines"
INFILE="input/long-input.txt"
;;
spectralnorm)
BENCHMARK_ARGS="spectralnorm 5500"
INFILE=""
;;
*)
1>&2 echo "Unrecognized value of variable b: $b"
exit 1
;;
esac
# TBD: Add Java support
for lang in clj-1.2 clj-1.3-alpha1 clj-1.3-alpha3
do
echo "----------------------------------------"
echo $b $lang
echo "----------------------------------------"
echo ""
OUTFILE="output/long-${lang}-output.txt"
case $lang in
clj-1.2)
CP1="${HOME}/lein/swank-clj-1.2.0/lib/clojure-1.2.0.jar"
CP2="./obj/${lang}"
;;
clj-1.3-alpha1)
CP1="${HOME}/lein/clj-1.3.0-alpha1/lib/clojure-1.3.0-alpha1.jar"
CP2="./obj/${lang}"
;;
clj-1.3-alpha3)
CP1="${HOME}/lein/clj-1.3.0-alpha3/lib/clojure-1.3.0-alpha3.jar"
CP2="./obj/${lang}"
;;
*)
;;
esac
EXP_OUTFILE="output/long-expected-output.txt"
if [ "$OS" == "Cygwin" ]
then
PS_INFILE=`cygpath -w ${INFILE}`
PS_OUTFILE=`cygpath -w ${OUTFILE}`
PS_EXP_OUTFILE=`cygpath -w ${EXP_OUTFILE}`
PS_CLASSPATH="`cygpath -w ${CP1};`cygpath -w ${CP2}"
else
PS_INFILE=${INFILE}
PS_OUTFILE=${OUTFILE}
PS_EXP_OUTFILE=${EXP_OUTFILE}
PS_CLASSPATH="${CP1}:${CP2}"
fi
if [ "${INFILE}" == "" ]
then
INFILE_ARGS=""
else
INFILE_ARGS="--input ${PS_INFILE}"
fi
OUTFILE_ARGS="--output ${PS_OUTFILE}"
if [ "$OS" == "Cygwin" ]
then
if [ $JROCKIT -eq 1 ]
then
${ECHO_ONLY} "${ROOT}/bin/tmcurve" ${COMMON_ARGS} ${EXTRA_ARGS} ${INFILE_ARGS} ${OUTFILE_ARGS} --check-output-cmd "diff --strip-trailing-cr --brief ${EXP_OUTFILE} ${OUTFILE}" \\Program\ Files\\Java\\jrmc-4.0.1-1.6.0\\bin\\java -server -Xmx%mbm -classpath "${PS_CLASSPATH}" ${BENCHMARK_ARGS}
else
${ECHO_ONLY} "${ROOT}/bin/tmcurve" ${COMMON_ARGS} ${EXTRA_ARGS} ${INFILE_ARGS} ${OUTFILE_ARGS} --check-output-cmd "diff --strip-trailing-cr --brief ${EXP_OUTFILE} ${OUTFILE}" \\Program\ Files\\Java\\jdk1.6.0_21\\bin\\java -server -Xmx%mbm -classpath "${PS_CLASSPATH}" ${BENCHMARK_ARGS}
fi
else
${ECHO_ONLY} "${ROOT}/bin/tmcurve" ${COMMON_ARGS} ${EXTRA_ARGS} ${INFILE_ARGS} ${OUTFILE_ARGS} --check "diff --strip-trailing-cr --brief ${EXP_OUTFILE} ${OUTFILE}" java -server -Xmx%mbm -classpath "${PS_CLASSPATH}" ${BENCHMARK_ARGS}
fi
done
cd "${ROOT}"
done
| true |
850b1bade3c2bec6a43c4fee6406802bad6b495f | Shell | hanksudo/osx-wifi-proxy-switch | /osx-wifi-proxy.sh | UTF-8 | 687 | 4.09375 | 4 | [] | no_license | #!/bin/sh
# Set paths to our utilities
NETWORKSETUP=/usr/sbin/networksetup
usage ()
{
echo 'Usage :'
echo "\t$0 on"
echo "\t$0 off"
exit
}
current_state ()
{
echo 'Web Proxy(HTTP) state:'
echo `$NETWORKSETUP -getwebproxy wi-fi`
echo '\r'
echo 'Secure Web Proxy(HTTPS) state:'
echo `$NETWORKSETUP -getsecurewebproxy wi-fi`
echo '\r'
}
current_state
if [ "$#" -ne 1 ]
then
usage
fi
STATE=$1
if [ "$STATE" = "on" ] || [ "$STATE" = "off" ]
then
echo "Turning $STATE proxy..."
echo '\r'
`sudo $NETWORKSETUP -setwebproxystate wi-fi $STATE`
`sudo $NETWORKSETUP -setsecurewebproxystate wi-fi $STATE`
current_state
exit
fi
usage
| true |
6a397da23f44edd6a2108d378a8698bf14d80231 | Shell | jusa/utils | /vim-grep | UTF-8 | 6,013 | 3.65625 | 4 | [] | no_license | #!/bin/bash
# Works with any editor but best with vim :)
#
# Add to your .vimrc:
#
# au BufRead *.gitopenbuffer set filetype=gitopenbuffer
# au BufRead *.gitopenbuffer hi openbuffer_left ctermfg=lightblue
# au BufRead *.gitopenbuffer syn match openbuffer_left /\zs.*:[0-9]*: \ze/
# autocmd FileType gitopenbuffer map <buffer> t ^<C-w>gF
# autocmd FileType gitopenbuffer map <buffer> j <Down> ^
# autocmd FileType gitopenbuffer map <buffer> k ^<Backspace><Backspace>^
# autocmd FileType gitopenbuffer map <buffer> q :q!<CR>
# autocmd FileType gitopenbuffer map <buffer> <ENTER> ^gF
#
# If using just git for grepping use following:
#
# command! -nargs=* -complete=shellcmd G tabnew | setlocal filetype=gitopenbuffer buftype=nofile bufhidden=hide noswapfile | r !vim-grep "<args>"
# nnoremap <C-F> :G <C-R><C-W><CR>
#
# With silversearcher-ag you can use:
#
# command -nargs=* G :Ack! <args>
# nnoremap <C-F> :Ack! <C-R><C-W><CR>
#
# With this configuration it is possible to invoke grepping from inside vim with command :G
# or by over any text under cursor with keyboard Ctrl+F
SCRIPTPATH="$(dirname "`readlink -f $0`")"
source "$SCRIPTPATH/common.sh" || exit 1
SCRIPT_VERSION="1.4.0"
expect_common_version 3
load_config
check_config EDITOR=less FORCE_GREP=0 IGNORE_CASE=0 USE_AG=0
if [ $USE_AG -eq 1 ]; then
need_binaries ag
else
need_binaries git grep
fi
print_usage() {
echo "$(basename $0) v$SCRIPT_VERSION"
echo ""
echo " Configuration can be saved to config file to avoid using arguments,"
echo " in $COMMON_CONFIG_LOCATION"
echo ""
echo " -h|--help Print this help"
echo " -g|--grep Use grep even when grepping from git tree"
echo " -i|--ignore-case Use case insensitive grepping"
echo " -e|--editor Override default editor used to view results"
echo " (default: $EDITOR)"
echo ""
exit 0
}
args_missing() {
case $1 in
EDITOR)
echo "fatal: value missing for editor"
exit 1 ;;
*)
echo "fatal: no pattern given. $1"
# same exit code as with git grep
exit 128 ;;
esac
}
handle_options \
"default: handle_options_store_to = search " \
"min-arguments: 1 " \
"missing: args_missing " \
"-h,--help, 0, print_usage " \
"-g,--grep, 0, FORCE_GREP " \
"-e,--editor, 1, EDITOR " \
"-i,--ignore-case, 0, IGNORE_CASE " \
--- \
"$@"
# Check editor after handling options to take override into account
need_binaries $EDITOR
result=""
print_log() {
# only print when outside vim
if [ -z "$VIMRUNTIME" ]; then
echo -e -n "${last_log//?/ }\r"
last_log="$@"
echo -e -n "$last_log\r"
fi
}
write_output() {
# write to file when outside vim
if [ -z "$VIMRUNTIME" ]; then
if [ -z "$result" ]; then
common_tempfile result grep.XXXXXX.gitopenbuffer
fi
echo "$@" >> "$result"
else
echo "$@"
fi
}
git_submodules() {
git submodule status | awk 'BEGIN { ORS=" " } { print $2 }'
}
print_log ">> Grepping..."
use_git=0
if [ $USE_AG -eq 0 ] && [ $FORCE_GREP -eq 0 ]; then
git branch > /dev/null 2>&1
if [ $? -eq 0 ]; then
use_git=1
fi
fi
if [ $use_git -eq 1 ]; then
need_binaries awk
grepcommand="git grep -I -n -E"
grepcommandpost=""
grepdirectories=". $(git_submodules)"
elif [ $USE_AG -eq 1 ]; then
grepcommand="ag"
grepcommandpost=""
grepdirectories="."
else
grepcommand="grep -r -n"
grepcommandpost="*"
grepdirectories="."
fi
if [ $IGNORE_CASE -eq 1 ]; then
grepcommand="$grepcommand -i"
fi
common_tempfile output
finish() {
if [ -e "$output" ]; then
rm -f "$output"
fi
if [ -e "$result" ]; then
rm -f "$result"
fi
}
trap finish EXIT
grep_directory() {
local dir=$1
local last=$2
if [[ ! -d "$dir" || "$dir" == ..* ]]; then
# Skip submodule if the submodule directory doesn't exist
# or the submodule path is not below our current path.
return
fi
pushd "$dir" >/dev/null
local path_prefix=
if [ "$dir" != "." ]; then
print_log ">> Grepping $last$dir..."
path_prefix="${last//\//\\/}${dir//\//\\/}\/"
fi
$grepcommand -- "$search" $grepcommandpost | sed -e 's/\(.*:[0-9]\+\):/'"$path_prefix"'\1: /' >> "$output"
if [[ $use_git -eq 1 && "$dir" != "." && -f .gitmodules ]]; then
if [ -n "$last" ]; then
last="$last/$dir/"
else
last="$dir/"
fi
for add_d in $(git_submodules); do
grep_directory $add_d $last
done
fi
popd >/dev/null
}
for d in $grepdirectories; do
grep_directory $d
done
hits=$(wc -l < "$output")
if [ $hits -eq 0 ]; then
exit 1
fi
# Going through all the lines with Bash is going
# to take a while...
if [ $hits -gt 1000 ]; then
print_log ">> Sorting..."
fi
plural=""
if [ $hits -gt 1 ]; then
plural="s"
fi
write_output "== ${hits} hit${plural} =="
set -f
old_filename=""
while IFS='' read -r line || [[ -n "$line" ]]; do
filename="${line%%:*}"
if [ "$filename" != "$old_filename" ]; then
write_output ""
old_filename="$filename"
fi
write_output "$line"
done < "$output"
print_log ""
case $EDITOR in
gvim)
EDITOR="gvim -f"
;&
vim)
if [ -z "$VIMRUNTIME" ]; then
search_case=""
if [ $IGNORE_CASE -eq 1 ]; then
search_case="\c"
fi
$EDITOR "+/${search_case}${search//\//\\/}" "$result"
exit $?
fi
;;
*)
$EDITOR "$result"
exit $?
;;
esac
| true |
f317c91a679b57298e639b0a879eaa27a530021d | Shell | ChrisGottsacker/concept_demos | /bash/functions.sh | UTF-8 | 149 | 2.59375 | 3 | [] | no_license | # Easy double-spaced printing
function printds {
echo -e $* \\n
}
printds Dear bash
printds "I'm sorry it turned out this way"
echo love,
echo Mom
| true |
aa3ea8e2cffab98188f1daab53b5ba139427daec | Shell | hminh0407/dotfiles | /scripts/installation/tmux.sh | UTF-8 | 740 | 3.203125 | 3 | [] | no_license | #!/usr/bin/env bash
install () {
echo "... Installing Tmux ..."
sudo apt-fast install --no-install-recommends -y tmux
echo "... Installing Tmux Plugin Manager ..."
git cl https://github.com/tmux-plugins/tpm.git ~/.tmux/plugins/tpm
echo "... Installing Tmux Plugins ..."
~/.tmux/plugins/tpm/bin/install_plugins
if [ -x "$(command -v pip)" ]; then
echo "... Installing Tmux Profile Tool tmuxp ..."
sudo pip install wheel tmuxp
~/.tmux/plugins/tpm/scripts/install_plugins.sh
elif [ -x "$(command -v pip3)" ]; then
echo "... Installing Tmux Profile Tool tmuxp ..."
sudo pip3 install wheel tmuxp
~/.tmux/plugins/tpm/scripts/install_plugins.sh
fi
}
install
| true |
7ac67076fbaa586dbfa8a269c8f7a7711a3bcf9c | Shell | movabletype/mt-docs-data-api-reference | /redoc-build.sh | UTF-8 | 356 | 2.5625 | 3 | [] | no_license | #!/bin/sh
for v in v1 v2 v3 v4 v5 v6; do \
npx redoc-cli build src/openapi/${v}.json \
--template src/openapi/data-api-redoc.hbs \
--disableGoogleFont \
--title "Movable Type Data API ${v}" \
--templateOptions.metaDescription "Movable Type Data API ${v}" \
--options.sortTagsAlphabetically=true \
--output docs/${v}.html; \
done
| true |
bdb4a69aec78cd503474bc1fc87ee0679eef8309 | Shell | ziseputi/upf-epc | /scripts/build.sh | UTF-8 | 378 | 2.515625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
# SPDX-License-Identifier: Apache-2.0
# Copyright(c) 2019 Intel Corporation
set -o errexit
set -o pipefail
set -o nounset
: "${MAKEFLAGS:=-j$(nproc)}"
: "${DOCKER_BUILDKIT:=1}"
export MAKEFLAGS
export DOCKER_BUILDKIT
docker build --pull --build-arg MAKEFLAGS --target=bess -t spgwu .
docker build --pull --build-arg MAKEFLAGS --target=cpiface -t cpiface .
| true |
025c53e85f05539f38c5e999a5c017e5769b6077 | Shell | bjucps/cps209-tests | /lab2/_lab2.sh | UTF-8 | 643 | 2.59375 | 3 | [] | no_license |
require-src-folder
copy-gradle-buildfiles
cat >> build.gradle <<EOF
// Fail unit tests that do not complete within specified duration
test {
timeout = Duration.ofMillis(6000)
testLogging {
events "passed", "failed"
}
}
EOF
do-compile gradle jar || exit
echo -e "\nExecuting your tests..."
run-program --test-category "Unit Tests" --test-message "Your tests run without error" gradle test
rm src/GuessTest.java
cp $TEST_DIR/MyGuessTest.java src
echo -e "\nExecuting official tests..."
run-program --test-category "Unit Tests" --test-message "Official tests run without error" gradle test
require-files src/report.md
| true |
5cd87ae2fb59a6e404a4bd2408d3c3fa8581bf52 | Shell | TunisianWhiteHatSecurity/R2ES-Reverse_Engineering_Environment_Setup | /main.sh | UTF-8 | 3,970 | 3.203125 | 3 | [] | no_license | #!/bin/bash
#Global Variables
WORKING_DIRECTORY=${PWD}
tabToInstall=()
tabDependencies=()
tabToInstall[0]="NoThing"
tabDependencies[0]="NoThing"
#includes
source Colors.sh
source Check_installation.sh
source Install_tools.sh
vMain_iAppendToolsToInstall()
{
if [ ${WINE_IS_INSTALLED} == "YES" ]; then
echo -e "\t${BGreen}[+] WINE is already installed."
else
echo -e "\t${BRed}[-] WINE is not installed installed."
tabDependencies[${#tabDependencies[@]} ]="WINE"
fi
if [ ${GDB_INSTALLED} == "YES" ]; then
echo -e "\t${BGreen}[+] GDB is already installed."
else
echo -e "\t${BRed}[-] GDB is not installed installed."
tabToInstall[${#tabToInstall[@]} ]="GDB"
fi
if [ ${NASM_INSTALLED} == "YES" ]; then
echo -e "\t${BGreen}[+] NASM is already installed."
else
echo -e "\t${BRed}[-] NASM is not installed installed."
tabToInstall[${#tabToInstall[@]}]="NASM"
fi
if [ ${COMPILER_INSTALLED} == "YES" ]; then
echo -e "\t${BGreen}[+] Compiler is already installed."
else
echo -e "\t${BRed}[-] Compiler is not installed installed."
tabDependencies[${#tabDependencies[@]}]="Compiler"
fi
if [ ${QMAKE_INSTALLED} == "YES" ]; then
echo -e "\t${BGreen}[+] QMAKE is already installed."
else
echo -e "\t${BRed}[-] QMAKE is not installed installed."
tabDependencies[${#tabDependencies[@]}]="QMAKE"
fi
if [ ${MAKE_INSTALLED} == "YES" ]; then
echo -e "\t${BGreen}[+] MAKE is already installed."
else
echo -e "\t${BRed}[-] MAKE is not installed installed."
tabDependencies[${#tabDependencies[@]}]="MAKE"
fi
if [ ${DECOMPRESSOR_INSTALLED} == "YES" ]; then
echo -e "\t${BGreen}[+] Decompressor is already installed."
else
echo -e "\t${BRed}[-] Decompressor is not installed installed."
tabDependencies[${#tabDependencies[@]}]="Decompressor"
fi
if [ ${PYTHON27_WINDOWS_INSTALLED} == "YES" ]; then
echo -e "\t${BGreen}[+] PYTHON27 for windows is already installed."
else
echo -e "\t${BRed}[-] PYTHON27 for windows is not installed installed."
tabDependencies[${#tabDependencies[@]} ]="PYTHON27_WINDOWS"
fi
if [ ${IMMUNITY_DBUGGER_INSTALLED} == "YES" ]; then
echo -e "\t${BGreen}[+] IMMUNITY_DBUGGER is already installed."
else
echo -e "\t${BRed}[-] IMMUNITY_DBUGGER is not installed installed."
tabToInstall[${#tabToInstall[@]} ]="IMMUNITY_DBUGGER"
fi
if [ ${WIN_DBG_INSTALLED} == "YES" ]; then
echo -e "\t${BGreen}[+] WIN_DBG is already installed."
else
echo -e "\t${BRed}[-] WIN_DBG is not installed installed."
tabToInstall[${#tabToInstall[@]} ]="WIN_DBG"
fi
if [ ${PE_BROWSE_INSTALLED} == "YES" ]; then
echo -e "\t${BGreen}[+] PE_BROWSE is already installed."
else
echo -e "\t${BRed}[-] PE_BROWSE is not installed installed."
tabToInstall[${#tabToInstall[@]} ]="PE_BROWSE"
fi
if [ ${EDB_INSTALLED} == "YES" ]; then
echo -e "\t${BGreen}[+] EDB is already installed."
else
echo -e "\t${BRed}[-] EDB is not installed installed."
tabToInstall[${#tabToInstall[@]} ]="EDB"
fi
if [ ${X64_DBG_INSTALLED} == "YES" ]; then
echo -e "\t${BGreen}[+] X64_DBG is already installed."
else
echo -e "\t${BRed}[-] X64_DBG is not installed installed."
tabToInstall[${#tabToInstall[@]} ]="X64_DBG"
fi
if [ ${IDA_INSTALLED} == "YES" ]; then
echo -e "\t${BGreen}[+] IDA is already installed."
else
echo -e "\t${BRed}[-] IDA is not installed installed."
tabToInstall[${#tabToInstall[@]} ]="IDA"
fi
}
main()
{
vCheck_eIsGdbInstalled
vCheck_eIsWineInstalled
vCheck_eIsImmunityDebuggerInstalled
vCheck_eIsQmakeInstalled
vCheck_eIsMakeInstalled
vCheck_eIsDecompressorInstalled
vCheck_eIsPython27WindowsInstalled
vCheck_eIsCompilerInstalled
vCheck_eIsWinDbgInstalled
vCheck_eIsPeBrowseInstalled
vCheck_eIsEdbInstalled
vCheck_eIsX64DbgInstalled
vCheck_eIsIdaInstalled
vMain_iAppendToolsToInstall
#vInstall_eInstallDependencies
}
main
for ((i = 1; i <= ${#tabToInstall[@]} - 1; i++)); do
echo -e "\t${BCyan}Element $i to install : '${tabToInstall[i]}'"
done
| true |
ca8c9d219b05469c7ed1c55ca62b8a366d1f250e | Shell | erikarn/freebsd-wifi-build | /build/files/rc2.11a | UTF-8 | 2,261 | 2.921875 | 3 | [] | no_license | #!/bin/sh
# This is run once rc has setup /tmp, /etc and /var; it's our
# responsibility to set things up to run.
# Directories
echo "*** Populating /var .."
mkdir -p /var/run/hostapd
mkdir -p /var/log
mkdir -p /var/tmp
mkdir -p /var/db
mkdir -p /var/empty
mkdir -p /var/cron
mkdir -p /var/cron/tabs
echo "*** setting up hostname"
/bin/hostname TEST_AP_11A
# Bring up loopback
echo "*** bringing up loopback .."
/sbin/ifconfig lo0 inet 127.0.0.1/8 up
echo "*** Password/login databases .."
/usr/sbin/pwd_mkdb /etc/master.passwd
/usr/bin/cap_mkdb /etc/login.conf
echo "*** bringing up bridge .."
/sbin/ifconfig bridge0 create
/sbin/ifconfig bridge0 addm arge0
/sbin/ifconfig bridge0 inet 10.61.8.9/24
/sbin/ifconfig bridge0 up
echo "*** bringing up arge0 .."
/sbin/ifconfig arge0 up
# debugging
/sbin/sysctl net.wlan.cac_timeout=5
/sbin/sysctl net.wlan.nol_timeout=30
echo "*** bringing up wlan0/hostapd .. "
/sbin/ifconfig wlan0 create wlandev ath0 wlanmode ap
/sbin/ifconfig wlan0 country US regdomain FCC3
/usr/sbin/wlandebug +state
# newstate + beacon proc
# /sbin/sysctl dev.ath.0.debug=0x40080
# HT/40 - test interop with HT/20 and 11a
# /sbin/ifconfig wlan0 channel 157:ht/40
/sbin/ifconfig wlan0 channel 116:a
# disable ampdutx - we just don't support it
# disable amsdu - again, just to be safe
/sbin/ifconfig wlan0 -ampdutx -amsdu
# Allow larger RX A-MPDU burst sizes in hostap mode
/sbin/ifconfig wlan0 ampdulimit 64k
# atheros NICs require more A-MPDU spacing than '0'
/sbin/ifconfig wlan0 ampdudensity 8
# Make sure ampdu-age is set 500 ticks; recent changes to the
# ath/net80211 code seem to have fixed this.. :)
/sbin/sysctl net.wlan.ampdu_age=500
/sbin/ifconfig wlan0 up
/usr/sbin/hostapd -B /etc/hostapd.conf
/sbin/ifconfig bridge0 addm wlan0 -stp wlan0
# Bring up wlan1 - channel 149:a for testing
/sbin/ifconfig wlan1 create wlandev ath1 wlanmode ap
/sbin/ifconfig wlan1 country US regdomain FCC3
/sbin/ifconfig wlan1 channel 149:a
/sbin/ifconfig wlan1 up
/usr/sbin/hostapd -B /etc/hostapd.wlan1.conf
# disable interference mitigation (ANI) here; doing it too
# early causes a panic
/sbin/sysctl dev.ath.0.intmit=0
/sbin/ifconfig bridge0 addm wlan1 -stp wlan1
echo "*** inetd"
/usr/sbin/inetd
echo "*** Done!"
exit 0
| true |
69a67ebb41dfedc5c02ddcd721b73b6ccd4718a4 | Shell | petronny/aur3-mirror | /libopensync-plugin-sunbird/PKGBUILD | UTF-8 | 755 | 2.703125 | 3 | [] | no_license | # Contributor: Andreas Schönfelder <passtschu@freenet.de>
pkgname=libopensync-plugin-sunbird
pkgver=0.22
pkgrel=1
pkgdesc='Sunbird plugin for OpenSync'
url='http://www.opensync.org/'
license=('LGPL')
arch=('i686' 'x86_64')
depends=('libopensync')
source=(http://www.opensync.org/attachment/wiki/download/$pkgname-$pkgver.tar.bz2?format=raw)
options=('!libtool')
md5sums=('c23d0cc6c128831c8a129d0b21aa4fe9')
build()
{
cd ${startdir}/src
mv ${pkgname}-${pkgver}.tar.bz2?format=raw ${pkgname-$pkgver}.tar.bz2
tar xjf ${pkgname}-${pkgver}.tar.bz2
cd ${pkgname}-${pkgver}
./configure --prefix=/usr
# add compatibility for inline functions
sed -i -e "s/^CFLAGS = /CFLAGS = -fgnu89-inline /g" src/Makefile
make || return 1
make DESTDIR=${startdir}/pkg install
}
| true |
83e8d2dffb5528f68476e440678095e4cf989d12 | Shell | vdmeer/project-manager-maven-plugin | /src/main/bash/skb-bin/create-env-settings.sh | UTF-8 | 2,024 | 3.15625 | 3 | [
"MIT",
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
## Copyright 2014-2016 Sven van der Meer <vdmeer.sven@mykolab.com>
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
##
## Script to create environment settings required by the modules script.
##
## @package de.vandermeer.skb
## @author Sven van der Meer <vdmeer.sven@mykolab.com>
## @copyright 2014-2016 Sven van der Meer
## @license http://www.apache.org/licenses/LICENSE-2.0 Apache License, Version 2.0
## @version v2.1.0 build 160226 (26-Feb-16)
##
pwd=`pwd`
file=bin/env.settings
echo "creating environment settings"
echo "" > ${file}
echo "# File containing environment settings or modules" >> ${file}
echo "#" >> ${file}
echo "# - blank lines are ignored" >> ${file}
echo "# - lines starting with '#' are ignored (i.e. are comments)" >> ${file}
echo "# - other lines are read as \"NAME VALUE\"" >> ${file}
echo "# -> reference to other variables using \${NAME}" >> ${file}
echo "#" >> ${file}
echo "# -> use source-bash or source-tcsh to set in shell" >> ${file}
echo "#" >> ${file}
echo "#" >> ${file}
echo "" >> ${file}
echo "PROJECT_HOME ${pwd}" >> ${file}
echo "PROJECT_APPS_DIR ${pwd}/../applications" >> ${file}
if [ -f src/module/build-versions.skb ];then
echo "PROJECT_BUILD_VERSION_FILE $PROJECT_HOME/src/module/build-versions.skb" >> ${file}
else
echo "no build-versions.skb file found, automatic external dependency generation will not work"
fi
echo "MVN_GROUP_ID de.vandermeer" >> ${file}
echo "" >> ${file}
mkdir ../applications >& /dev/null
mkdir /tmp/logs >& /dev/null
mkdir /tmp/logs/skb >& /dev/null
exit 0
| true |
719ab9d4946fb204efe6e28dc6e212c85bf634c3 | Shell | Tristan2252/.dotfiles | /install/roles/setup/files/scripts/de_connect.sh | UTF-8 | 349 | 3.078125 | 3 | [] | no_license | #!/bin/sh
CONTAINER=CSE-326
STATE=$(lxc info $CONTAINER | grep -o Running)
if [ "$STATE" = "Running" ]; then
lxc exec $CONTAINER -- sudo --login --user ubuntu -i env DISPLAY=$DISPLAY bash
exit 0
fi
lxc start $CONTAINER
echo "Connecting..."
sleep 2
lxc exec $CONTAINER -- sudo --login --user ubuntu -i env DISPLAY=$DISPLAY bash
exit 0
| true |
234e43f5e54814d71cd8d8509da118e7fb3d312c | Shell | msneddon/search | /scripts/start_service.sh | UTF-8 | 989 | 3.21875 | 3 | [] | no_license | #!/bin/bash
echo
echo " _ ______ _____ _ "
echo " | |/ / _ \ / ____| | | "
echo " | ' /| |_) | __ _ ___ ___ | (___ ___ __ _ _ __ ___| |__ "
echo " | < | _ < / _' / __|/ _ \ \___ \ / _ \/ _' | '__/ __| '_ \ "
echo " | . \| |_) | (_| \__ \ __/ ____) | __/ (_| | | | (__| | | | "
echo " |_|\_\____/ \__,_|___/\___| |_____/ \___|\__,_|_| \___|_| |_|2"
echo
log() {
printf " \033[36m%10s\033[0m : \033[90m%s\033[0m\n" $1 "$2"
}
error() {
printf "\n \033[31mError: $@\033[0m\n\n" && exit 1
}
start_service() {
log starting "search service"
export PYTHONPATH=$PYTHONPATH:$(pwd)/lib
export SEARCH_CONFIG_DIRECTORY=$(pwd)/config
uwsgi --ini $SEARCH_CONFIG_DIRECTORY/uwsgi_search.ini
if [ "$?" = "0" ]; then
log status "Service successfully started"
else
error "Could not start service"
fi
}
log pwd $(pwd)
start_service
echo ;
| true |
30e2b05fbb554f3ce9714c2a1840179d993f5cc4 | Shell | Dynamedia/docker-redis | /entrypoint.sh | UTF-8 | 558 | 2.796875 | 3 | [] | no_license | #!/bin/sh
REDIS_MAXMEMORY=${REDIS_MAXMEMORY:-64M}
REDIS_MAXMEMORY_POLICY=${REDIS_MAXMEMORY_POLICY:-allkeys-lfu}
# Modify the redis config
sed -i "s#REDIS_MAXMEMORY_PLACEHOLDER#maxmemory $REDIS_MAXMEMORY#g" /usr/local/etc/redis/redis.conf
sed -i "s#REDIS_MAXMEMORY_POLICY_PLACEHOLDER#maxmemory-policy $REDIS_MAXMEMORY_POLICY#g" /usr/local/etc/redis/redis.conf
# Unset some of the more sensitive environment variables we inherited by using a single .env file
unset MYSQL_ROOT_PASSWORD
unset MYSQL_DATABASE
unset MYSQL_USER
unset MYSQL_PASSWORD
exec "$@"
| true |
1177e4a9adc9a86d4790567130ade6eadcf19611 | Shell | lucaswannen/source_code_classification_with_CNN | /dataset_v2/bash/4956517.txt | UTF-8 | 993 | 2.703125 | 3 | [] | no_license | #!/bin/bash
echo “Stopping any Firefox that might be running”
sudo killall -9 firefox
echo “Removing any other flash plugin previously installed:”
sudo apt-get remove -y –purge flashplugin-nonfree gnash gnash-common mozilla-plugin-gnash swfdec-mozilla libflashsupport nspluginwrapper
sudo rm -f /usr/lib/mozilla/plugins/*flash*
sudo rm -f ~/.mozilla/plugins/*flash*
sudo rm -f /usr/lib/firefox/plugins/*flash*
sudo rm -f /usr/lib/firefox-addons/plugins/*flash*
sudo rm -rfd /usr/lib/nspluginwrapper
echo “Installing Flash Player 10″
#cd ~
sudo cp /home/libflashplayer.so /usr/lib/mozilla/plugins/
echo “Linking the libraries so Firefox and apps depending on XULRunner.”
sudo ln -sf /usr/lib/mozilla/plugins/libflashplayer.so /usr/lib/firefox-addons/plugins/
sudo ln -sf /usr/lib/mozilla/plugins/libflashplayer.so /usr/lib/xulrunner-addons/plugins/
# now doing some cleaning up:
sudo rm -rf libflashplayer.so
sudo rm -rf libflashplayer-10.0.32.18.linux-x86_64.so.tar.gz
| true |
2c850aadde27bb671bc947d2342d74ef933e7aa9 | Shell | disableNoise/bash-login-script | /sshd and httpd checker | UTF-8 | 320 | 3.265625 | 3 | [] | no_license | #!/bin/bash
apache=`ps aux | grep httpd | awk '{ print $11 }' | grep -v grep -`
sshd=`ps aux | grep sshd | awk '{ print $11 }' | grep -v grep -`
if [ -n "$sshd" ]; then
echo "sshd is active"
else
echo "sshd is NOT active"
fi
if [ -n "$apache" ]; then
echo "httpd is active"
else
echo "httpd is NOT active"
fi
| true |
09fe09b2319984c30eb6b656ea3472b9eebf5efa | Shell | aks60808/COMP9044 | /9044AS1/legit/test03.sh | UTF-8 | 447 | 2.734375 | 3 | [] | no_license | #!/bin/dash
# author: Heng-Chuan Lin (z5219960@unsw.edu.au)
# class: 9041 soft-con
# file description: error test for branch and status
# written in 14/07/2019
if [ -d .legit ]
then
rm -r .legit #just in case
fi
./legit-init
touch a
./legit-branch b1
./legit-status # error occured : didn't create such folders inside commit cause ls error - ls: cannot access .legit/repo/master/commit: No such file or directory
echo "done the script - passed"
rm -r .legit
rm a
| true |
63c373408490f810da7cddcb5e8ccbcfa6c6ea9b | Shell | yomox9/nmrih | /c.sh | UTF-8 | 355 | 2.875 | 3 | [] | no_license | #!/bin/sh
while :
do
printf "`date +%Y%m%d_%H%M%S`"
if [ -e nmrih_d.lock ];then
printf " lock O"
else
printf " lock X"
fi
#ps -ef|grep -v grep|grep srcds_linux|cut -d " " -f 14,15,16
p=`ps -ef|grep -v grep|grep srcds_linux|cut -d " " -f 15`
if [ "$p" = "./srcds_linux" ];then
printf " proc O"
else
printf " proc X"
fi
echo
sleep 1
done
| true |
4f62981d3ebb18e3cc1ed358f610b03c662c650b | Shell | eol-uchile/backup-container | /scripts/services/mongodb_comments.sh | UTF-8 | 874 | 3.1875 | 3 | [] | no_license | #!/usr/bin/env bash
set -eu
folder=$1
remote_folder=$2
# Dump mongodb
mongodump --host "$PLATFORM_MONGODB_HOST" --port $PLATFORM_MONGODB_PORT --username "$PLATFORM_MONGODB_USER" --password "$PLATFORM_MONGODB_PASSWORD" --authenticationDatabase edxapp --archive --db cs_comments_service | gzip > $folder/mongodb_cs_comment_service.gz
openssl aes-256-cbc -md sha256 -salt -out $folder/mongodb_cs_comment_service.gz.enc -in $folder/mongodb_cs_comment_service.gz -pass pass:"$BACKUP_PASSWORD"
rm $folder/mongodb_cs_comment_service.gz
echo "Uploading to NAS"
rclone copy $folder/mongodb_cs_comment_service.gz.enc nas:/share/eol_backup/$PLATFORM_NAME/$remote_folder
if [ $3 = 'keep' ]
then
mkdir -p $HOST_MOUNT/$PLATFORM_NAME/mongodb
mv $folder/mongodb_cs_comment_service.gz.enc $HOST_MOUNT/$PLATFORM_NAME/mongodb
else
rm -rf $folder/mongodb_cs_comment_service.gz.enc
fi | true |
9000f50dc9598d3629a99fb863f497912e63bd03 | Shell | atorero/sp | /deploy.sh | UTF-8 | 213 | 2.609375 | 3 | [] | no_license | FOLDER=$(cat DEPLOY_PATH)
git pull
./sbt dist
DISTPATH=target/universal/
DISTNAME=$(basename $(find $DISTPATH -name "*.zip"))
mv $DISTPATH$DISTNAME $FOLDER
unzip $FOLDER$DISTNAME -d $FOLDER
rm -f $FOLDER$DISTNAME
| true |
9addb3a6b76aea33e0e406f21726afae74f9021c | Shell | MarkAYoder/BeagleBoard-exercises | /setup/setMandi.sh | UTF-8 | 127 | 2.578125 | 3 | [] | no_license | #!/bin/bash
# Copy remote's Mandi server date to local
REMOTE=14.139.34.32
DATE=`ssh yoder@$REMOTE date`
sudo date -s "$DATE"
| true |
0fb8f0a8c6f78eccbb899bda91653eada837d3f1 | Shell | AlexRogalskiy/shell | /case_30.sh | UTF-8 | 216 | 2.5625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
echo "Parent process (PID=$pid) is running"
case_01 & pid=$!
echo "CHild process (PID=$pid) is running"
sleep 20
wait $pid
echo "Parent process: child process is finished"
echo "Parent process: exit"
| true |
81e91df64cbd7799df96861e77c278396563af52 | Shell | prhys/commandgui | /scripts/Operations with files/linux .txt files into windows .txt files.sh | UTF-8 | 228 | 3.03125 | 3 | [] | no_license | #!/usr/bin/env bash
#Description = This command will turn plain text files (like .txt) made in Linux into a format that can be read on Windows
#Danger = None
#Risks = None
#Commands
awk 'sub("$", "\r")' "$1" > "$2"
exit "$?" | true |
d1126a06a2d013e9c8885bbd7743dfd7c189ae1e | Shell | rojajullapalli/Batch325 | /day5/weekdisplay.sh | UTF-8 | 1,017 | 3.296875 | 3 | [] | no_license | #!/bin/bash -x
echo "enter the number to display week"
read number
if [ $number -eq 1 ] || [ $number -eq 8 ] || [ $number -eq 15 ] || [ $number -eq 22 ] || [ $number -eq 29 ]
then
echo "sunday"
elif [ $number -eq 2 ] || [ $number -eq 9 ] || [ $number -eq 16 ] || [ $number -eq 23 ] || [ $number -eq 30 ]
then
echo "monday"
elif [ $number -eq 3 ] || [ $number -eq 10 ] || [ $number -eq 17 ] || [ $number -eq 24 ] || [ $number -eq 31 ]
then
echo "tuesday"
elif [ $number -eq 4 ] || [ $number -eq 11 ] || [ $number -eq 18 ] || [ $number -eq 25 ]
then
echo "wednesday"
elif [ $number -eq 5 ] || [ $number -eq 12 ] || [ $number -eq 19 ] || [ $number -eq 26 ]
then
echo "thrusday"
elif [ $number -eq 6 ] || [ $number -eq 13 ] || [ $number -eq 20 ] || [ $number -eq 27 ]
then
echo "friday"
elif [ $number -eq 7 ] || [ $number -eq 14 ] || [ $number -eq 21 ] || [ $number -eq 28 ]
then
echo "saturday"
else
echo "enter the correct number to display the week"
fi
| true |
de3fb0178580af9bbd82f0272875dac42bbc76ba | Shell | Ivanyanqi/shell-learn | /简单语法/iteartor.sh | UTF-8 | 826 | 3.875 | 4 | [] | no_license | #!/bin/bash
#对一系列值进行迭代,循环非常有用,bash提供了多种类型的循环
#for 循环
#for var in list
#do
# commands 使用变量$var
#done
#list可以是一个字符串,也可以是一个序列
#我们可以轻松地生成不同的序列
#echo {1..50}能够生成一个从1~50的数字列表
echo {1..50}
#生成字母序列
echo {a..z}
echo {A..Z}
echo {A..z}
#for循环也可以采用C语言中for循环的格式。例如:
## for((i=0;i<10;i++))
{
# commands; #使用变量$i
#}
#while循环
# while condition
# do
# commands;
# done
#用true作为循环条件能够产生无限循环。
#在Bash中还可以使用一个特殊的循环until。它会一直执行循环,直到给定的条件为真
x=0;
until [ $x -eq 9 ]; #条件是[$x -eq 9 ]
do
let x++; echo $x;
done | true |
3fa9f45875030e7c6b4784f6cea17b955cdc3e30 | Shell | declanmalone/ODROID-SHOW | /example/images.sh | UTF-8 | 868 | 2.734375 | 3 | [] | no_license | #!/bin/bash
flag=0
serial="/dev/ttyUSB0"
trap "flag=1" SIGINT SIGKILL SIGTERM
./port_open &
subppid=$!
sleep 0.1
echo -ne "\ec\e[0r" > $serial
sleep 0.3
while true
do
if [ $flag -ne 0 ] ; then
echo -ne "\ec\e[1r" > $serial
kill $subppid
exit
fi
echo -ne "\e[0r" > $serial
sleep 0.2
echo -ne "\e[0;0,239;319i" > $serial
cat penguin.raw > $serial
sleep 0.1
echo -ne "\e[1r" > $serial
sleep 0.2
echo -ne "\e[0;0,319;239i" > $serial
cat butterfly.raw > $serial
sleep 0.1
echo -ne "\e[0r" > $serial
sleep 0.2
echo -ne "\e[0;0,239;319i" > $serial
cat woof.raw > $serial
sleep 0.1
echo -ne "\ec\e[0r" > $serial
sleep 0.3
echo -ne "\e[40;10,219;199i" > $serial
cat paint.raw > $serial
sleep 0.1
echo -ne "\ec\e[1r" > $serial
sleep 0.3
echo -ne "\e[10;10,189;199i" > $serial
cat paint.raw > $serial
sleep 0.1
done
| true |
a347ab8e2a86ffa29442efd8301bedfe120c4de6 | Shell | diadatp/nasti-ddrx-mc | /install-verilator.sh | UTF-8 | 365 | 3.1875 | 3 | [] | no_license | #!/bin/sh
set -e
if [ ! -d "${HOME}/verilator/bin" ]; then
wget https://github.com/diadatp/verilator/archive/verilator_3_882.tar.gz
tar -xzvf verilator_3_882.tar.gz && cd verilator-verilator_3_882
autoconf && ./configure --prefix="${HOME}/verilator" && make && make test && make install
else
echo "Using Verilator 3.882 from cached directory."
fi
| true |
3d3ac0cc84ad306a3bb0e72c24ac28df8cb74b95 | Shell | jessedmarshall/MarkerBasedImputation | /mbi/cluster/submit_training_gpu_requeue.sh | UTF-8 | 1,816 | 2.84375 | 3 | [] | no_license | #!/bin/bash
#SBATCH -J MultiGpuTrain
#SBATCH -p gpu_requeue # partition (queue)
#SBATCH -N 1 # number of nodes
#SBATCH -n 2 # number of tasks
#SBATCH --gres=gpu:4 # number of total gpus
#SBATCH --mem 160000 # memory for all cores
#SBATCH -t 0-7:00 # time (D-HH:MM)
#SBATCH --export=ALL
#SBATCH -o Job.%N.%j.out # STDOUT
#SBATCH -e Job.%N.%j.err # STDERR
srun -l -n1 hostname
srun -l -n1 echo $CUDA_VISIBLE_DEVICES
# Specify paths and variables for training. Be sure all arrays have the same length.
FUNC="/n/holylfs02/LABS/olveczky_lab/Diego/code/MarkerBasedImputation/mbi/training.py"
DATAPATH=(\
"/n/holylfs02/LABS/olveczky_lab/Diego/data/JDM25_caff_imputation_test/JDM25_fullDay.h5")
BASEOUTPUTPATH=(\
"/n/holylfs02/LABS/olveczky_lab/Diego/data/JDM25_caff_imputation_test/models")
# Run training with parameters specified above.
count=0
while [ "x${DATAPATH[count]}" != "x" ]
do
srun -l --gres=gpu:1 -n1 --mem=40000 cluster/py.sh $FUNC ${DATAPATH[count]} --base-output-path=${BASEOUTPUTPATH[count]} &
count=$(( $count + 1 ))
done
# Rather than always specifying all parameter, use this instead of the loop if you want to specify less common parameters.
# count=0
# srun -l --gres=gpu:1 -n1 --mem=40000 cluster/py.sh $FUNC ${DATAPATH[count]} --base-output-path=${BASEOUTPUTPATH[count]} &
# count=$(( $count + 1 ))
# srun -l --gres=gpu:1 -n1 --mem=40000 cluster/py.sh $FUNC ${DATAPATH[count]} --base-output-path=${BASEOUTPUTPATH[count]} &
# count=$(( $count + 1 ))
# srun -l --gres=gpu:1 -n1 --mem=40000 cluster/py.sh $FUNC ${DATAPATH[count]} --base-output-path=${BASEOUTPUTPATH[count]} &
# count=$(( $count + 1 ))
# srun -l --gres=gpu:1 -n1 --mem=40000 cluster/py.sh $FUNC ${DATAPATH[count]} --base-output-path=${BASEOUTPUTPATH[count]} &
wait
| true |
47f53cee590097685cfb38ea3782f054d0c8e7f6 | Shell | century-arcade/src | /c64/vice-2.4/src/arch/sdl/make-bindist_dingoo.sh | UTF-8 | 2,443 | 3.5 | 4 | [
"GPL-2.0-or-later",
"GPL-1.0-or-later",
"GPL-2.0-only",
"MIT"
] | permissive | #!/bin/sh
# make-bindist.sh for the DINGOO SDL port
#
# written by Marco van den Heuvel <blackystardust68@yahoo.com>
#
# make-bindist.sh <strip> <vice-version> <--enable-arch> <zip|nozip> <top-srcdir>
# $1 $2 $3 $4 $5
STRIP=$1
VICEVERSION=$2
ENABLEARCH=$3
ZIPKIND=$4
TOPSRCDIR=$5
EMULATORS="x64 x64dtv x128 xcbm2 xcbm5x0 xpet xplus4 xvic vsid"
CONSOLE_TOOLS="c1541 cartconv petcat"
EXECUTABLES="$EMULATORS $CONSOLE_TOOLS"
for i in $EXECUTABLES
do
if [ ! -e src/$i ]
then
echo Error: executable file\(s\) not found, do a \"make\" first
exit 1
fi
done
echo Generating DINGOO SDL port binary distribution.
rm -f -r SDLVICE-dingoo-$VICEVERSION
mkdir SDLVICE-dingoo-$VICEVERSION
for i in $EXECUTABLES
do
$STRIP src/$i
cp src/$i SDLVICE-dingoo-$VICEVERSION
done
cp $TOPSRCDIR/src/arch/sdl/dingoo-files/*.dge SDLVICE-dingoo-$VICEVERSION
cp $TOPSRCDIR/src/arch/sdl/dingoo-files/sdl-vicerc* SDLVICE-dingoo-$VICEVERSION
cp -a $TOPSRCDIR/data/C128 $TOPSRCDIR/data/C64 SDLVICE-dingoo-$VICEVERSION
cp -a $TOPSRCDIR/data/C64DTV SDLVICE-dingoo-$VICEVERSION
cp -a $TOPSRCDIR/data/CBM-II SDLVICE-dingoo-$VICEVERSION
cp -a $TOPSRCDIR/data/DRIVES $TOPSRCDIR/data/PET SDLVICE-dingoo-$VICEVERSION
cp -a $TOPSRCDIR/data/PLUS4 $TOPSRCDIR/data/VIC20 SDLVICE-dingoo-$VICEVERSION
cp $TOPSRCDIR/doc/readmes/Readme-SDL.txt SDLVICE-dingoo-$VICEVERSION
rm -rf `find SDLVICE-dingoo-$VICEVERSION -name ".svn"`
rm `find SDLVICE-dingoo-$VICEVERSION -name "Makefile*"`
rm `find SDLVICE-dingoo-$VICEVERSION -name "amiga_*.vkm"`
rm `find SDLVICE-dingoo-$VICEVERSION -name "beos_*.vkm"`
rm `find SDLVICE-dingoo-$VICEVERSION -name "dos_*.vkm"`
rm `find SDLVICE-dingoo-$VICEVERSION -name "os2*.vkm"`
rm `find SDLVICE-dingoo-$VICEVERSION -name "win_*.v*"`
rm `find SDLVICE-dingoo-$VICEVERSION -name "*.vsc"`
if test x"$ZIPKIND" = "xzip"; then
if test x"$ZIP" = "x"; then
zip -r -9 -q SDLVICE-dingoo-$VICEVERSION.zip SDLVICE-dingoo-$VICEVERSION
else
$ZIP SDLVICE-dingoo-$VICEVERSION.zip SDLVICE-dingoo-$VICEVERSION
fi
rm -f -r SDLVICE-dingoo-$VICEVERSION
echo DINGOO SDL port binary distribution archive generated as SDLVICE-dingoo-$VICEVERSION.zip
else
echo DINGOO SDL port binary distribution directory generated as SDLVICE-dingoo-$VICEVERSION
fi
if test x"$ENABLEARCH" = "xyes"; then
echo Warning: binaries are optimized for your system and might not run on a different system, use --enable-arch=no to avoid this
fi
| true |
3a6476864e11ba762dcc8efef460f8063e747916 | Shell | jleluyer/sibship_SNPpanel | /00_scripts/01_makevcf_freebayes.sh | UTF-8 | 830 | 2.65625 | 3 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | #!/usr/bin/env bash
#PBS -N trimmomatic__BASE__
#PBS -o freebayes.out
#PBS -q omp
#PBS -l walltime=36:00:00
#PBS -l ncpus=2
#PBS -l mem=20g
cd $PBS_O_WORKDIR
#Working directories, input and output files
data=list.bam.files
outdir=02_data
ref="reference.fa"
tag="output"
ls "$outdir"/*bam >01_info_files/"$data"
#Freebayes parameters
minMapQ="30"
minCOV=10
Ploidy=2
# load Freebayes module
#Calling SNP with Freebayes on trimmed bam file
echo "Running Freebayes on ${data} samples..."
time freebayes -f $ref \
--min-mapping-quality $minMapQ \
--no-indels \
--no-complex \
--min-coverage $minCOV \
--genotype-qualities \
--bam-list 01_info_files/${data} \
--vcf ${outdir}/${tag}.vcf
echo "Running Freebayes on ${data} samples done."
| true |
f04c6d23d5ee42b946e268d15241d20a22bae528 | Shell | cedric-dufour/custom-conf | /generic/all/custom-conf-docker/usr/share/custom-conf-docker/config/etc/iptables/hooks.d/ipv4/post-start.d/50_custom-conf-docker | UTF-8 | 395 | 2.921875 | 3 | [
"CC-BY-4.0",
"LicenseRef-scancode-public-domain"
] | permissive | #!/bin/sh
## CUSTOM-CONF: File automatically created/updated [custom-conf-docker]
# (Please use 'dpkg-reconfigure custom-conf-docker' to change this setting)
%{CUSTOM_CONF_DOCKER_IPTABLES_MANUAL_EXIT}
# Start Docker daemon
[ -e /var/run/iptables-control.hook.docker ] || exit 0
echo "WARNING: Docker daemon service MUST be restarted MANUALLY!" >&2
rm -f /var/run/iptables-control.hook.docker
| true |
6ca2c3d4df3a1f2648e496235192039467b208cf | Shell | CloudSen/archlinux_installer_vm | /install.sh | UTF-8 | 433 | 2.96875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Author: CloudS3n https://yangyunsen.com
# Description: Install daily soft
source ./conf/config.sh
source ./src/post_install.sh
ROOT_UID=0
function checkConfig() {
if [[ "$UID" -ne "$ROOT_UID" ]]; then
echo "[ POST-INSTALL ] Must be root to run this script!" >> ./log/error.log
killall tail
exit 87
fi
}
function doInstall() {
clear
checkConfig
doPostInstall
}
doInstall | true |
182791145e5260c765af7471294746b95efcd357 | Shell | habitat-sh/core-plans | /luajit/tests/test.bats | UTF-8 | 252 | 2.78125 | 3 | [
"Apache-2.0"
] | permissive | source "${BATS_TEST_DIRNAME}/../plan.sh"
@test "Version matches" {
result=$(luajit -v | awk '{print $2}')
[ "$result" = "${pkg_version}" ]
}
@test "Hello World" {
result=$(luajit -e 'print("Hello Habitat")')
[ "$result" = "Hello Habitat" ]
}
| true |
5a1d69575233686ecf590e9cc552a75d319ad0af | Shell | probonopd/Tools | /dies/dies | UTF-8 | 5,038 | 4.125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# dies
# v1.0.1
# (C) 2017 Joss Brown
# Shell script utility to convert English to Latin weekday names
LANG=en_US.UTF-8
VERSION="1.0.1"
DATE="2017.08"
echoerr() {
echo "$1" 1>&2
}
usage() {
echo "dies v$VERSION ($DATE) by Joss Brown"
echo "Shell script utility to convert English to Latin weekday names"
echo ""
echo "USAGE"
echo -e "\tdies [-i | -s | -u | -v] <WEEKDAY>"
echo -e "\tdies [-h | -V]"
echo ""
echo "MAIN OPTIONS"
echo -e "\t-i, --j2i"
echo -e "\t\tPrint 'I' instead of 'J', i.e. 'Iovis' instead of 'Jovis'"
echo ""
echo -e "\t-s, --short"
echo -e "\t\tPrint short-form names, e.g. 'Mar' instead of 'dies Martis'"
echo ""
echo -e "\t-u, --upper"
echo -e "\t\tPrint output in uppercase, e.g. 'DIES SOLIS' or 'MAR'"
echo ""
echo -e "\t-v, --u2v"
echo -e "\t\tPrint 'V' instead of 'U', e.g. 'SATVRNI' instead of 'SATURNI'"
echo -e "\t\tThis option is ignored in lowercase."
echo ""
echo "ADDITIONAL OPTIONS"
echo -e "\t-h, --help"
echo -e "\t\tThis help page"
echo ""
echo -e "\t-V, --version"
echo -e "\t\tVersion number and release date"
echo ""
echo "FORMATS"
echo -e "\tUse the following conventions for the days of the week:"
echo -e "\t\tSunday | Sun. | Sun | Su | U | 0"
echo -e "\t\tMonday | Mon. | Mon | Mo | M | 1"
echo -e "\t\tTuesday | Tues. | Tues | Tue. | Tue | Tu | T | 2"
echo -e "\t\tet cetera"
echo ""
echo -e "\tdies also accepts:"
echo -e "\t\ttoday | now | yesterday | tomorrow"
echo ""
echo "EXAMPLES"
echo -e "\tdies 6"
echo -e "\tdies -u Sunday"
echo -e "\tdies -s -i Thu"
echo -e "\tdies -u -i -v U M T W R F S"
echo -e "\tdies \$(date +%w)"
echo -e "\tdate -f '%Y %m %d' -j \"1969 07 20\" +%a | xargs -I {} dies -u -s -i -v {}"
echo ""
echo -e "\tRegarding numeral input, dies presupposes the %w option (GNU & BSD date) with Sunday as '0'"
echo ""
echo -e "\tTo avoid the printing of negative results and errors, redirect stderr to /dev/null:"
echo -e "\tdies <WEEKDAY> 2>/dev/null"
exit 0
}
daycheck () {
case $1 in
Sunday|Sun.|Sun|Su|U|0)
if ! $SHORT ; then
if $UPPER ; then
echo "DIES SOLIS"
else
echo "dies Solis"
fi
else
if $UPPER ; then
echo "SOL"
else
echo "Sol"
fi
fi
;;
Monday|Mon.|Mon|Mo|M|1)
if ! $SHORT ; then
if $UPPER ; then
if $U2V ; then
echo "DIES LVNAE"
else
echo "DIES LUNAE"
fi
else
echo "dies Lunae"
fi
else
if $UPPER ; then
if $U2V ; then
echo "LVN"
else
echo "LUN"
fi
else
echo "Lun"
fi
fi
;;
Tuesday|Tues.|Tues|Tue.|Tue|Tu|T|2)
if ! $SHORT ; then
if $UPPER ; then
echo "DIES MARTIS"
else
echo "dies Martis"
fi
else
if $UPPER ; then
echo "MAR"
else
echo "Mar"
fi
fi
;;
Wednesday|Wed.|Wed|We|W|3)
if ! $SHORT ; then
if $UPPER ; then
if $U2V ; then
echo "DIES MERCVRII"
else
echo "DIES MERCURII"
fi
else
echo "dies Mercurii"
fi
else
if $UPPER ; then
echo "MER"
else
echo "Mer"
fi
fi
;;
Thursday|Thurs.|Thurs|Thur.|Thur|Thu.|Thu|Th|R|4)
if ! $SHORT ; then
if $UPPER ; then
if $J2I ; then
echo "DIES IOVIS"
else
echo "DIES JOVIS"
fi
else
if $J2I ; then
echo "dies Iovis"
else
echo "dies Jovis"
fi
fi
else
if $UPPER ; then
if $J2I ; then
echo "IOV"
else
echo "JOV"
fi
else
if $J2I ; then
echo "Iov"
else
echo "Jov"
fi
fi
fi
;;
Friday|Fri.|Fri|Fr|F|5)
if ! $SHORT ; then
if $UPPER ; then
echo "DIES VENERIS"
else
echo "dies Veneris"
fi
else
if $UPPER ; then
echo "VEN"
else
echo "Ven"
fi
fi
;;
Saturday|Sat.|Sat|Sa|S|6)
if ! $SHORT ; then
if $UPPER ; then
if $U2V ; then
echo "DIES SATVRNI"
else
echo "DIES SATURNI"
fi
else
echo "dies Saturni"
fi
else
if $UPPER ; then
echo "SAT"
else
echo "Sat"
fi
fi
;;
?*)
echoerr "Error: false weekday string"
echoerr ""
usage
;;
*)
break
esac
}
SHORT=false
UPPER=false
J2I=false
U2V=false
while :; do
case $1 in
-h|-\?|--help)
usage
;;
-i|--j2i)
J2I=true
;;
-s|--short)
SHORT=true
;;
-u|--upper)
UPPER=true
;;
-v|--u2v)
U2V=true
;;
-V|--version)
echo "$VERSION ($DATE)"
exit 0
;;
--)
shift
break
;;
-?*)
echoerr "Error: invalid option: $1"
echoerr ""
usage
;;
*)
break
esac
shift
done
[[ $# == 0 ]] && echoerr "Error: dies requires a weekday string." && echoerr "" && usage
while :; do
if [[ $1 == "today" ]] || [[ $1 == "now" ]] ; then
DAY=$(/bin/date +%w)
daycheck $DAY
elif [[ $1 == "tomorrow" ]] ; then
DAY=$(/bin/date -j -v+1d +%w)
daycheck $DAY
elif [[ $1 == "yesterday" ]] ; then
DAY=$(/bin/date -j -v-1d +%w)
daycheck $DAY
else
daycheck $1
fi
shift
done
| true |
02dcf52b6f277064d494a31f837056403497e612 | Shell | r2p2/dot | /zsh/install.sh | UTF-8 | 254 | 3.359375 | 3 | [] | no_license | #!/bin/bash
DATE=`date +%Y-%m-%d:%H:%M:%S`
if [ -h ~/.zshrc ]; then
echo "Your zshrc is a link: removed"
rm ~/.zshrc
fi
if [ -f ~/.zshrc ]; then
echo "Create backup of ~/.zshrc"
mv ~/.zshrc ~/zshrc_backup_$DATE
fi
ln -s ~/dot/zsh/zshrc ~/.zshrc
| true |
ce0fcb0e42a48c57d3867d09287309967a263b25 | Shell | zhangsirsdo/openstack-install | /roles/role-controller/install.sh | UTF-8 | 9,263 | 3.515625 | 4 | [] | no_license | #!/bin/bash
readValue(){
if [ "$1"x = "null"x ];then
echo $2
else
echo $1
fi
}
ADVERTISEMENT_URL=`cat ../global.conf |grep ADVERTISEMENT_URL|awk -F '=' '{print $2}'`
ADVERTISEMENT_URL=${ADVERTISEMENT_URL:=127.0.0.1:2379}
compose_path="/etc/docker_compose/"
if [ ! -x "$compose_path" ]; then
mkdir -p $compose_path
fi
#install rabbitmq container
RABBITMQ_PORT1=`curl -L -XGET \
http://$ADVERTISEMENT_URL/v2/keys/endpoints/rabbitmq/rabbitmq_port1|jq -r '.node.value'`
RABBITMQ_PORT2=`curl -L -XGET \
http://$ADVERTISEMENT_URL/v2/keys/endpoints/rabbitmq/rabbitmq_port2|jq -r '.node.value'`
RABBITMQ_PORT3=`curl -L -XGET \
http://$ADVERTISEMENT_URL/v2/keys/endpoints/rabbitmq/rabbitmq_port3|jq -r '.node.value'`
RABBITMQ_PORT4=`curl -L -XGET \
http://$ADVERTISEMENT_URL/v2/keys/endpoints/rabbitmq/rabbitmq_port4|jq -r '.node.value'`
RABBITMQ_USER=`curl -L -XGET \
http://$ADVERTISEMENT_URL/v2/keys/endpoints/rabbitmq/rabbitmq_user|jq -r '.node.value'`
RABBITMQ_PASS=`curl -L -XGET \
http://$ADVERTISEMENT_URL/v2/keys/endpoints/rabbitmq/rabbitmq_pass|jq -r '.node.value'`
RABBITMQ_HOST=`echo $ADVERTISEMENT_URL|awk -F ':' '{print $1}'`
RABBITMQ_PORT1=`readValue $RABBITMQ_PORT1 "4369"`
RABBITMQ_PORT2=`readValue $RABBITMQ_PORT2 "5671"`
RABBITMQ_PORT3=`readValue $RABBITMQ_PORT3 "5672"`
RABBITMQ_PORT4=`readValue $RABBITMQ_PORT4 "25672"`
RABBITMQ_USER=`readValue $RABBITMQ_USER "rabbit"`
RABBITMQ_PASS=`readValue $RABBITMQ_PASS "root"`
#=`readValue $ ""`
cp openstack_rabbitmq_compose_template.yml $compose_path"openstack_rabbitmq_compose.yml"
sed -i "s#RABBITMQ_PORT1#$RABBITMQ_PORT1#g" $compose_path"openstack_rabbitmq_compose.yml"
sed -i "s#RABBITMQ_PORT2#$RABBITMQ_PORT2#g" $compose_path"openstack_rabbitmq_compose.yml"
sed -i "s#RABBITMQ_PORT3#$RABBITMQ_PORT3#g" $compose_path"openstack_rabbitmq_compose.yml"
sed -i "s#RABBITMQ_PORT4#$RABBITMQ_PORT4#g" $compose_path"openstack_rabbitmq_compose.yml"
sed -i "s#RABBITMQ_USER#$RABBITMQ_USER#g" $compose_path"openstack_rabbitmq_compose.yml"
sed -i "s#RABBITMQ_PASS#$RABBITMQ_PASS#g" $compose_path"openstack_rabbitmq_compose.yml"
docker-compose -f $compose_path"openstack_rabbitmq_compose.yml" up -d
rabbitmq_container_id=`docker ps -a|grep rabbitmq|awk '{print $1}'`
while [ $rabbitmq_container_id == "" ]
do
sleep 1
rabbitmq_container_id=`docker ps -a|grep rabbitmq|awk '{print $1}'`
done
docker exec -it $rabbitmq_container_id rabbitmqctl set_permissions $RABBITMQ_USER ".*" ".*" ".*"
# save configuration of rabbit to etcd
curl -L -XPUT http://$ADVERTISEMENT_URL/v2/keys/endpoints/rabbitmq/host -d value="$RABBITMQ_HOST"
# install keystone container
KEYSTONE_HOST=`echo $ADVERTISEMENT_URL|awk -F ':' '{print $1}'`
KEYSTONE_ADMIN_PORT=`curl -L -XGET \
http://$ADVERTISEMENT_URL/v2/keys/endpoints/keystone/admin_port|jq -r '.node.value'`
KEYSTONE_INTERNAL_PORT=`curl -L -XGET \
http://$ADVERTISEMENT_URL/v2/keys/endpoints/keystone/internal_port|jq -r '.node.value'`
ADMIN_TOKEN=`curl -L -XGET \
http://$ADVERTISEMENT_URL/v2/keys/endpoints/keystone/admin_token|jq -r '.node.value'`
KEYSTONE_DB_PASS=`curl -L -XGET \
http://$ADVERTISEMENT_URL/v2/keys/endpoints/mariadb/keystone_db_pass|jq -r '.node.value'`
DB_HOST=`curl -L -XGET \
http://$ADVERTISEMENT_URL/v2/keys/endpoints/mariadb/host|jq -r '.node.value'`
DB_PORT=`curl -L -XGET \
http://$ADVERTISEMENT_URL/v2/keys/endpoints/mariadb/port|jq -r '.node.value'`
DB_USER=`curl -L -XGET \
http://$ADVERTISEMENT_URL/v2/keys/endpoints/mariadb/user|jq -r '.node.value'`
DB_PASS=`curl -L -XGET \
http://$ADVERTISEMENT_URL/v2/keys/endpoints/mariadb/pass|jq -r '.node.value'`
KEYSTONE_ADMIN_PORT=`readValue $KEYSTONE_ADMIN_PORT "35357"`
KEYSTONE_INTERNAL_PORT=`readValue $KEYSTONE_INTERNAL_PORT "5000"`
ADMIN_TOKEN=`readValue $ADMIN_TOKEN "016f77abde58da9c724b"`
KEYSTONE_DB_PASS=`readValue $KEYSTONE_DB_PASS "root"`
DB_HOST=`readValue $DB_HOST $KEYSTONE_HOST`
DB_PORT=`readValue $DB_PORT "3306"`
DB_USER=`readValue $DB_USER "root"`
DB_PASS=`readValue $DB_PASS "root"`
cp openstack_keystone_compose_template.yml $compose_path"openstack_keystone_compose.yml"
sed -i "s#KEYSTONE_ADMIN_PORT#$KEYSTONE_ADMIN_PORT#g" $compose_path"openstack_keystone_compose.yml"
sed -i "s#KEYSTONE_INTERNAL_PORT#$KEYSTONE_INTERNAL_PORT#g" $compose_path"openstack_keystone_compose.yml"
sed -i "s#ADMIN_TOKEN_VAR#$ADMIN_TOKEN#g" $compose_path"openstack_keystone_compose.yml"
sed -i "s#KEYSTONE_DB_PASS_VAR#$KEYSTONE_DB_PASS#g" $compose_path"openstack_keystone_compose.yml"
sed -i "s#MARIADB_HOST_VAR#$DB_HOST#g" $compose_path"openstack_keystone_compose.yml"
sed -i "s#MARIADB_PORT_VAR#$DB_PORT#g" $compose_path"openstack_keystone_compose.yml"
sed -i "s#MARIADB_USER_VAR#$DB_USER#g" $compose_path"openstack_keystone_compose.yml"
sed -i "s#MARIADB_PASS_VAR#$DB_PASS#g" $compose_path"openstack_keystone_compose.yml"
docker-compose -f $compose_path"openstack_keystone_compose.yml" up -d
# save configuration of keystone to etcd
curl -L -XPUT http://$ADVERTISEMENT_URL/v2/keys/endpoints/keystone/host -d value="$KEYSTONE_HOST"
# install keystone_setup container
ADMIN_PASS=`curl -L -XGET \
http://$ADVERTISEMENT_URL/v2/keys/endpoints/keystone/admin_pass|jq -r '.node.value'`
DEMO_PASS=`curl -L -XGET \
http://$ADVERTISEMENT_URL/v2/keys/endpoints/keystone/demo_pass|jq -r '.node.value'`
OS_IDENTITY_API_VERSION=`curl -L -XGET \
http://$ADVERTISEMENT_URL/v2/keys/endpoints/keystone/os_identity_api_version|jq -r '.node.value'`
ADMIN_PASS=`readValue $ADMIN_PASS "root"`
DEMO_PASS=`readValue $DEMO_PASS "root"`
OS_IDENTITY_API_VERSION=`readValue $OS_IDENTITY_API_VERSION "3"`
cp openstack_keystone_setup_compose_template.yml $compose_path"openstack_keystone_setup_compose.yml"
sed -i "s#ADMIN_TOKEN_VAR#$ADMIN_TOKEN#g" $compose_path"openstack_keystone_setup_compose.yml"
sed -i "s#ADMIN_PASS_VAR#$ADMIN_PASS#g" $compose_path"openstack_keystone_setup_compose.yml"
sed -i "s#DEMO_PASS_VAR#$DEMO_PASS#g" $compose_path"openstack_keystone_setup_compose.yml"
sed -i "s#OS_IDENTITY_API_VERSION_VAR#$OS_IDENTITY_API_VERSION#g" $compose_path"openstack_keystone_setup_compose.yml"
sed -i "s#ADVERTISEMENT_URL_VAR#$ADVERTISEMENT_URL#g" $compose_path"openstack_keystone_setup_compose.yml"
docker-compose -f $compose_path"openstack_keystone_setup_compose.yml" up -d
# save configuration of keystone_setup to etcd
# install glance_api container
GLANCE_HOST=`echo $ADVERTISEMENT_URL|awk -F ':' '{print $1}'`
GLANCE_PASS=`curl -L -XGET \
http://$ADVERTISEMENT_URL/v2/keys/endpoints/glance/pass|jq -r '.node.value'`
GLANCE_DB_PASS=`curl -L -XGET \
http://$ADVERTISEMENT_URL/v2/keys/endpoints/glance/db_pass|jq -r '.node.value'`
GLANCE_API_PORT=`curl -L -XGET \
http://$ADVERTISEMENT_URL/v2/keys/endpoints/glance/api_port|jq -r '.node.value'`
GLANCE_REGISTRY_PORT=`curl -L -XGET \
http://$ADVERTISEMENT_URL/v2/keys/endpoints/glance/registry_port|jq -r '.node.value'`
GLANCE_PASS=`readValue $GLANCE_PASS "root"`
GLANCE_DB_PASS=`readValue $GLANCE_DB_PASS "root"`
GLANCE_API_PORT=`readValue $GLANCE_API_PORT "9292"`
GLANCE_REGISTRY_PORT=`readValue $GLANCE_REGISTRY_PORT "9191"`
cp openstack_glance_api_compose_template.yml $compose_path"openstack_glance_api_compose_template.yml"
sed -i "s#GLANCE_PASS_VAR#$GLANCE_PASS#g" $compose_path"openstack_glance_api_compose_template.yml"
sed -i "s#GLANCE_DB_PASS_VAR#$GLANCE_DB_PASS#g" $compose_path"openstack_glance_api_compose_template.yml"
sed -i "s#KEYSTONE_INTERNAL_PORT_VAR#$KEYSTONE_INTERNAL_PORT#g" $compose_path"openstack_glance_api_compose_template.yml"
sed -i "s#KEYSTONE_ADMIN_PORT_VAR#$KEYSTONE_ADMIN_PORT#g" $compose_path"openstack_glance_api_compose_template.yml"
sed -i "s#ADMIN_PASS_VAR#$ADMIN_PASS#g" $compose_path"openstack_glance_api_compose_template.yml"
sed -i "s#ADVERTISEMENT_URL_VAR#$ADVERTISEMENT_URL#g" $compose_path"openstack_glance_api_compose_template.yml"
sed -i "s#GLANCE_API_PORT_VAR#$GLANCE_API_PORT#g" $compose_path"openstack_glance_api_compose_template.yml"
docker-compose -f $compose_path"openstack_glance_api_compose_template.yml" up -d
# save configuration of keystone_setup to etcd
curl -L -XPUT http://$ADVERTISEMENT_URL/v2/keys/endpoints/glance/api_host -d value="$GLANCE_HOST"
# install glance-registry container
cp openstack_glance_registry_compose_template.yml $compose_path"openstack_glance_registry_compose_template.yml"
sed -i "s#GLANCE_PASS_VAR#$GLANCE_PASS#g" $compose_path"openstack_glance_registry_compose_template.yml"
sed -i "s#GLANCE_DB_PASS_VAR#$GLANCE_DB_PASS#g" $compose_path"openstack_glance_registry_compose_template.yml"
sed -i "s#KEYSTONE_INTERNAL_PORT_VAR#$KEYSTONE_INTERNAL_PORT#g" $compose_path"openstack_glance_registry_compose_template.yml"
sed -i "s#KEYSTONE_ADMIN_PORT_VAR#$KEYSTONE_ADMIN_PORT#g" $compose_path"openstack_glance_registry_compose_template.yml"
sed -i "s#ADVERTISEMENT_URL_VAR#$ADVERTISEMENT_URL#g" $compose_path"openstack_glance_registry_compose_template.yml"
sed -i "s#GLANCE_REGISTRY_PORT_VAR#$GLANCE_REGISTRY_PORT#g" $compose_path"openstack_glance_registry_compose_template.yml"
docker-compose -f $compose_path"openstack_glance_registry_compose_template.yml" up -d
# save configuration of keystone_setup to etcd
curl -L -XPUT http://$ADVERTISEMENT_URL/v2/keys/endpoints/glance/registry_host -d value="$GLANCE_HOST"
| true |
b2dafeecb76d146c7aed2e530b147de6962655c0 | Shell | HiMyNameIsIlNano/linux-makemkv-lite | /oss/play-stream | UTF-8 | 2,014 | 3.484375 | 3 | [] | no_license | #!/bin/sh
#
# Depends on mpv/mplayer.
#
# Revision History:
#
# 26-MAY-2016: Initial release.
#
# Begin functions
# Print out the help
usage(){
echo "usage: play-stream [-fp value] | [[-h]]"
}
# End functions
INSTALL_PATH=/usr/local/bin
FP_FLAG=0
TIMESTAMP=$(date +%s)
LOG_FILE="make_mkv-$TIMESTAMP"
while [ "$1" != "" ]; do
case $1 in
-fp | --force-player ) shift
PLAYER=$1
FP_FLAG=1
;;
-h | --help ) usage
exit
;;
* ) usage
exit 1
esac
shift
done
if [ -e $INSTALL_PATH/mplayer -a $FP_FLAG == 0 ] || [ $FP_FLAG == 1 -a "$PLAYER" == "mplayer" ]
then
#mplayer -fs -demuxer lavf -vo vdpau:hqscaling=1 -vc ffmpeg12vdpau,ffh264vdpau,ffvc1vdpau,ffwmv3vdpau, \
#-vfm ffmpeg, -ao pulse -ac fftruehd,ffdca,ffeac3,ffac3,fflpcm, -afm ffmpeg, -alang en -slang en \
#-forcedsubsonly -channels 6 -af volnorm -cache 8192 -dvd-device /dev/cd0 \
#http://localhost:51000/stream/title0.ts
mplayer -fs -demuxer lavf -vo x11 -vc ffmpeg12vdpau,ffh264vdpau,ffvc1vdpau,ffwmv3vdpau, \
-vfm ffmpeg, -ac fftruehd,ffdca,ffeac3,ffac3,fflpcm, -afm ffmpeg, -alang en -slang en \
-forcedsubsonly -channels 6 -af volnorm -cache 8192 \
http://localhost:51000/stream/title0.ts > /tmp/$LOG_FILE
exit 0
fi
if [ -e $INSTALL_PATH/mpv -a $FP_FLAG == 0 ] || [ $FP_FLAG == 1 -a "$PLAYER" == "mpv" ]
then
mpv --vo x11 --demuxer-lavf-probesize=1000 --demuxer-lavf-buffersize=16000 \
http://localhost:51000/stream/title0.ts > /tmp/$LOG_FILE
exit 0
fi
# It the script reaches this statements it means that none of the two above mentioned multimedia players is installed
echo "No media player was found. Please consder installing either mplayer or mpv or feel free to extend the support of this script to your favourite media player."
# Shut down MakeMKV after Mplayer closes.
# killall makemkvcon
| true |
93ce763a68566303f6a0be0236a797bc85d904e7 | Shell | ProbablePattern/Teaching | /OpenBB/install.sh | UTF-8 | 641 | 2.5625 | 3 | [] | no_license | #!/bin/bash
# Instructions from https://docs.openbb.co/terminal/quickstart/installation
# Linux
sudo apt install -y gcc cmake
# MacOS
brew install cmake
# Clone the repository
cd ~/
git clone https://github.com/OpenBB-finance/OpenBBTerminal.git OpenBB
# Create Virtual Environment
cd OpenBB/
conda env create -n OpenBB --file build/conda/conda-3-9-env-full.yaml
conda activate OpenBB
# Install Dependencies
# M1 Mac ML Toolkit and Optimization
conda install -c conda-forge lightgbm=3.3.3 cvxpy=1.2.2 -y
# All
poetry install
# Portfolio Optimization
poetry install -E optimization
# ML Toolkit
poetry install -E forecast
# Start
openbb
| true |
a0b0aa6687891bf21e090c6ed71d53b61a3b1014 | Shell | parantapa/dotfiles | /bin/sample-lines-whead | UTF-8 | 240 | 3.390625 | 3 | [] | no_license | #!/bin/bash
#
# Randomly sample lines from files
# http://stackoverflow.com/a/692321
if [[ -z $1 ]] ; then
echo "Need sampling fraction"
exit 1
fi
frac=$1
awk 'BEGIN {srand()} !/^$/ { if (rand() <= '$frac' || FNR==1) print $0}'
| true |
93e4556b4584e8b36a10963a734c4c1bcc2ae9f6 | Shell | cysce/grafana-ppc64le | /k8s/start.sh | UTF-8 | 1,513 | 3.609375 | 4 | [] | no_license |
#!/bin/bash
while [ $# -gt 0 ]; do
case "$1" in
--base=*)
base="${1#*=}"
;;
--host=*)
host="${1#*=}"
;;
*)
printf "***************************\n"
printf "* Error: Invalid argument.*\n"
printf "***************************\n"
exit 1
esac
shift
done
echo base $base
echo host $host
if [[ $base == '' ]]
then
printf "*******************************************************************************\n"
printf "* Error: Invalid argument. missing --base=/Volumes/DATA/CYSCE/COC2 --base=/home/cocdata/grafana \n"
printf "*******************************************************************************\n"
exit 1
fi
if [[ $host == '' ]]
then
printf "*******************************************************************************\n"
printf "* Error: Invalid argument. missing --host=grafana.cysce.com \n"
printf "*******************************************************************************\n"
exit 1
fi
rm -rf ${base}/grafana-ppc64le/temp
mkdir ${base}/grafana-ppc64le/temp
cat ${base}/grafana-ppc64le/k8s/grafana.template | sed "s#<HOST>#$host#g" > ${base}/grafana-ppc64le/temp/grafana01.template
cat ${base}/grafana-ppc64le/temp/grafana01.template | sed "s#<PATH>#$base#g" > ${base}/grafana-ppc64le/temp/deployment.yaml
kubectl create configmap cysce-grafana --from-file=${base}/grafana-ppc64le/k8s/grafana.ini
kubectl apply -f ${base}/grafana-ppc64le/temp/deployment.yaml
kubectl apply -f ${base}/grafana-ppc64le/k8s/ingress.yaml
| true |
e4f98245c5f9c15ad98e5db0b6c8483ce17b49f3 | Shell | cPJerry/Tweaks | /installAutoSSH | UTF-8 | 1,214 | 3.140625 | 3 | [] | no_license | #!/bin/bash
sudo mv /usr/bin/telnet /usr/bin/telnet.bak
sudo echo '#!/bin/bash
if [ "$2" == "ticket" ]; then
~/bin/t $3 $4;
else
/usr/bin/telnet.bak $@
fi;' > /usr/bin/telnet
chmod 755 /usr/bin/telnet
mkdir ~/bin
chmod 755 ~/bin
echo '#!/usr/bin/expect -f
set ticket [lrange $argv 0 0]
set server [lrange $argv 0 1]
set timeout -1
spawn ssh jerald.johnson@pacha.cpanel.net
match_max 100000
expect "*jerald.johnson@pacha*"
send -- "ticket --nossp $ticket $server\r"
# Feel free to remove -nossp, my script runs ESP then SSP
expect "*going interactive*"
send -- "screen 2> /dev/null || (chmod 1755 /var/run/screen 2> /dev/null && screen 2> /dev/null) || (chmod 1777 /var/run/screen 2> /dev/null && screen 2> /dev/null) || echo \"Could not execute screen. :(\"\r"
#Also feel free to remove this, this makes all attempts to enter a screen session
send -- "source /dev/stdin <<< \"\$(curl -sL https://raw.githubusercontent.com/cPanelTechs/ESP/master/esp)\"\r"
#Again, feel free to remove if you dont want ESP
send -- "export PS1=\$PS1\"\\n> \"\r"
# If you use ESP, keep this in place to resolve a current known bug with viewing your history with the up/down arrows
send -- "ssp\r"
# Remove if desired
interact' > ~/bin/t
chmod 755 ~/bin/t
| true |
b45f7516522ae93c63e4308c14cc57456dc779ee | Shell | fizzoo/dotfiles | /.zshrc | UTF-8 | 7,281 | 2.828125 | 3 | [
"MIT"
] | permissive | # If AUTO_TMUX is set (probably as part of ssh command sent to the remote desktop), exec tmux in a default session.
if [[ -n "$AUTO_TMUX" ]] && WHICH_FISH=$(whence -p fish); then
echo "Starting tm(ux) through fish."
export WHICH_FISH
exec "$WHICH_FISH" -i -c "tm new -A -s main"
fi
# start fish shell if interactive, it's available, and safeguard not already defined.
# doing this at the start since the rest of the file affects interactive (zsh) mode only.
if [[ $- = *i* && -z $WHICH_FISH ]] && WHICH_FISH=$(whence -p fish); then
export WHICH_FISH
exec "$WHICH_FISH" -i
fi
#completers, _approximate tolerates 1 - max-errors faults
zstyle ':completion:*' completer _complete _approximate
zstyle ':completion:*' max-errors 4
#menu + color on cd and recommended group-name for that
zstyle ':completion:*' menu select=1
zstyle ':completion:*' list-colors ''
zstyle ':completion:*' group-name ''
#show it all, tab again to see the start of it, less keystrokes
LISTMAX=800
#ignore case if nothing found, allow writing something in the middle of a word
zstyle ':completion:*' matcher-list '' 'm:{[:lower:]}={[:upper:]} l:|=* r:|=*'
#load the completing
autoload -Uz compinit && compinit
HISTFILE=~/.zhistory
HISTSIZE=800
SAVEHIST=800
setopt extendedglob histverify autopushd pushdsilent nobeep hist_ignore_all_dups hist_ignore_space inc_append_history
export PROMPT="%F{blue}${RANGER_LEVEL:+(r${RANGER_LEVEL})}%B%(!,%F{red},%F{green})%(0?,>,!)> %f%b"
bindkey -v
export KEYTIMEOUT=1
bindkey "^?" backward-delete-char
bindkey "^w" backward-kill-word
bindkey "^r" history-incremental-search-backward
bindkey "^a" beginning-of-line
bindkey "^e" end-of-line
bindkey "^[[3~" delete-char
bindkey "^[[A" up-line-or-search && bindkey "^[[B" down-line-or-search
bindkey '^xa' _expand_alias
bindkey '^[*' _expand_alias
# Exports, path and stuff
# Make path an array-unique, so no duplicates
typeset -U PATH path
# Then pathmunge is trivial
pathmunge () {
if (( $# == 0 )); then
echo $path
return
elif (( $# == 1 )); then
path=($1 $path)
return
elif (( $# == 2 )); then
if [[ $2 == "after" ]]; then
path=($path $1)
else
echo "Unknown \$2: $2"
fi
fi
}
export EDITOR='vim'
export VISUAL='vim'
export PAGER='less'
export R_LIBS_USER='/opt/R-user/'
# `less` colors, mainly for `man`.
export LESS_TERMCAP_mb=$(tput bold; tput setaf 3) # blink, rarely used
export LESS_TERMCAP_md=$(tput bold; tput setaf 1) # h1, bold
export LESS_TERMCAP_me=$(tput sgr0) # end bold, blink, underline
export LESS_TERMCAP_so=$(tput bold; tput setaf 5; tput setab 0) # help text on bottom
export LESS_TERMCAP_se=$(tput sgr0) # end standout
export LESS_TERMCAP_us=$(tput bold; tput setaf 2) # h2, underline
export LESS_TERMCAP_ue=$(tput sgr0) # end underline
export LESS_TERMCAP_mr=$(tput rev)
export LESS_TERMCAP_mh=$(tput dim)
export LESS_TERMCAP_ZN=$(tput ssubm)
export LESS_TERMCAP_ZV=$(tput rsubm)
export LESS_TERMCAP_ZO=$(tput ssupm)
export LESS_TERMCAP_ZW=$(tput rsupm)
pathmunge "$HOME/.local/bin"
pathmunge "/opt/cuda/bin"
alias an='pathmunge "/opt/anaconda3/bin"'
ensure_n_args () {
if (( $1 != $2 )); then
printf "Function %s wants %d parameters, got %d\n" $funcstack[2] $1 $2
return -1
fi
}
alias nargs0='ensure_n_args 0 $# || return $?'
alias nargs1='ensure_n_args 1 $# || return $?'
alias nargs2='ensure_n_args 2 $# || return $?'
alias nargs3='ensure_n_args 3 $# || return $?'
alias nargs4='ensure_n_args 4 $# || return $?'
alias nargs5='ensure_n_args 5 $# || return $?'
# functions & aliases
color () {
for i in {0..255}
do
tput setab $i
printf "%8s" $i
done
tput op
echo
}
csv () {
column -s, -t $@ | less -\#8 -S
}
perm () {
namei -mo $(readlink -f $*)
}
jsondiff () {
diff -u --color <(jq -S . $1) <(jq -S . $2)
}
serve () {
TRAPINT () { sudo nginx -s stop; return 42; }
nginxfile='/tmp/nginx.conf'
printf "user fizzo a; events { worker_connections 1024; } http { server { root \"$PWD\"; autoindex on; } }" >$nginxfile
sudo nginx -c $nginxfile
printf "Started server on directory '$PWD'\n"
while true; do sleep 1; done
}
cpr () {
if (( $# <= 1 )); then
echo "Requires at least 2 arguments."
return
fi
if [ ! -d "${@: -1}" ]; then
mkdir -p "${@: -1}"
fi
cp -rv $*
}
manrg () {
rg -z "$@" /usr/share/man/man{0,1,2,4,5,6,7,8,n}
}
c () {
dir=$(find "$@" -xdev -print 2> /dev/null | fzf)
if [[ -z "$dir" ]]; then return; fi
if [[ ! -d "$dir" ]]; then dir=$(dirname "$dir"); fi
cd "$dir" || return -1
}
alias ck='c /k/ && pwd'
alias cm='c /media/* && pwd'
f () {
rootfind="."
if [[ ! -z "$1" && -d "$1" ]]; then rootfind="$1"; fi
find $rootfind -xdev -print 2> /dev/null | fzf -m
}
res () {
stty sane iutf8
}
cl () {
res
clear
}
wifi () {
line=$(nmcli d wifi | tac | fzf +s)
[[ -z $line ]] && return
ssid=$(echo $line | sed 's/^. //' | sed 's/ \+Infra.*//')
nmcli -a d wifi connect "$ssid"
}
checkhs () {
cp $1 /tmp/lel.hs && \
echo "return []\nrunchecks = \$quickCheckAll" >> /tmp/lel.hs && \
echo runchecks | ghci -XTemplateHaskell /tmp/lel.hs
}
e () {
emacs -nw $*
}
ew () {
emacs $* &!
}
gs () {
emacs -nw --eval "(progn (magit-status)(delete-other-windows))"
}
cmak () {
mkdir build
cd build
cmake -DCMAKE_EXPORT_COMPILE_COMMANDS=1 ..
make
}
alias syn='rsync --size-only --del -vrun '
alias crash='coredumpctl info -1'
alias g='g++ -std=c++14 -g '
_l_help () {
if (( $# == 0 )); then
echo `pwd`
elif (( $# == 1 )); then
echo $(readlink -f $1)
fi
}
l () {
_l_help $*
ls --color=always -lh $*
}
ll () {
_l_help $*
ls --color=always -Alh $*
}
alias d='du -had1 | sort -h'
alias s='ranger'
ats () { sg a "tmux -S /tmp/tmuxs"; }
at () {
if [[ ! -S /tmp/tmuxs ]]; then
echo "no socket yet, wait for host to create."
else
sg a "tmux -S /tmp/tmuxs attach"
fi
}
own () { sudo chown -R $USER: $*; }
alias ana='make clean && scan-build -enable-checker alpha --view make'
nixi () { nix-env -qaP --description ".*$1.*"; }
haskelly () {
nix-shell -p "haskellPackages.ghcWithPackages (pkgs: with pkgs; [ $* ])"
}
pythony () {
nix-shell -E "with import <nixpkgs> { }; with python35Packages; runCommand \"dummy\" { buildInputs = [ $* ]; } \"\""
}
spam () {
for i in {1..100}; do
echo $*
done
}
pyprofile () {
python -m cProfile -s cumtime $*
}
clean () {
paccache -rvk 1
paccache -urvk 0
rm -vrf ~/.local/share/Trash
}
gitclean () {
git clean -xdn
echo -n "Proceed? (y)"
read YESNO
if [[ $YESNO =~ "^[Yy]$" ]]; then
git clean -xdf
fi
}
spectroview () {
nargs1
file=`mktemp --suffix .png`
sox $1 -n remix 1 spectrogram -x 3000 -y 513 -z 120 -w Kaiser -o $file
sxiv $file
rm $file
}
test_and_src () {
[[ -f $1 && -r $1 ]] && source $1
}
test -f $HOME/.dircolors && eval $( dircolors -b $HOME/.dircolors )
test_and_src $HOME/.zshtmp
test_and_src /usr/share/fzf/completion.zsh
test_and_src /usr/share/fzf/key-bindings.zsh
# In case tests fail, do not produce an error code on startup
true
| true |
5adbb37e5840e914904653c35357e4f492603641 | Shell | tiagoaf5/lambda-zsh-theme | /tiagoaf5-lambda.zsh-theme | UTF-8 | 976 | 3.40625 | 3 | [
"MIT"
] | permissive | MNML_ELLIPSIS_CHAR="${MNML_ELLIPSIS_CHAR:-..}"
function mnml_cwd {
local echar="$MNML_ELLIPSIS_CHAR"
local segments="${1:-2}"
local seg_len="${2:-0}"
local _w="%{\e[0m%}"
local _g="%{\e[38;5;244m%}"
if [ "$segments" -le 0 ]; then
segments=0
fi
if [ "$seg_len" -gt 0 ] && [ "$seg_len" -lt 4 ]; then
seg_len=4
fi
local seg_hlen=$((seg_len / 2 - 1))
local cwd="%${segments}~"
cwd="${(%)cwd}"
cwd=("${(@s:/:)cwd}")
local pi=""
for i in {1..${#cwd}}; do
pi="$cwd[$i]"
if [ "$seg_len" -gt 0 ] && [ "${#pi}" -gt "$seg_len" ]; then
cwd[$i]="${pi:0:$seg_hlen}$_w$echar$_g${pi: -$seg_hlen}"
fi
done
printf '%b' "$_g${(j:/:)cwd//\//$_w/$_g}$_w"
}
#local ret_status="%(?:%{$fg_bold[cyan]%}λ :%{$fg_bold[red]%}λ )" # with bold lambda
local ret_status="%(?:%{$fg[cyan]%}λ :%{$fg[red]%}λ )" # with normal lambda
PROMPT='$(mnml_cwd 10 20) ${ret_status}%f› '
| true |
59c942ff68ed4d262267b6807449abdc43d20759 | Shell | clamsey/dotfiles | /bash/shortcuts.bash | UTF-8 | 456 | 2.84375 | 3 | [
"MIT"
] | permissive | # helpful commmand line shortcut aliases
# get out of directories quickly
alias ..="cd .."
alias ...="cd ../.."
alias ....="cd ../../.."
alias .....="cd ../../../.."
# do openssl hashing easily
alias sha1="openssl sha1"
alias md5="openssl md5"
# don't type sudo <package manager> every time
alias yum="sudo yum"
alias apt="sudo apt"
alias apt-get="sudo apt-get"
# ls shortcuts
alias ll="ls -l"
# jupyter notebook shortcut
alias jnb="jupyter notebook"
| true |
6b64013ede5e737e6c0f6cddf12f1481f6519c60 | Shell | tenk224/newton | /controller.sh | UTF-8 | 16,765 | 2.53125 | 3 | [] | no_license | #!/bin/bash
ipadd=$1
ifname=$2
#----------------------------------------------------------------------------------
yum install mariadb mariadb-server python2-PyMySQL \
rabbitmq-server \
memcached python-memcached \
openstack-keystone httpd mod_wsgi \
openstack-glance \
openstack-nova-api openstack-nova-conductor \
openstack-nova-console openstack-nova-novncproxy \
openstack-nova-scheduler \
openstack-neutron openstack-neutron-ml2 \
openstack-neutron-linuxbridge ebtables \
openstack-dashboard \
expect -y
#----------------------------------------------------------------------------------
systemctl enable rabbitmq-server.service
systemctl start rabbitmq-server.service
rabbitmqctl add_user openstack 123456
rabbitmqctl set_permissions openstack ".*" ".*" ".*"
#----------------------------------------------------------------------------------
cat > /etc/my.cnf.d/openstack.cnf <<EOF
[mysqld]
bind-address = $ipadd
default-storage-engine = innodb
innodb_file_per_table
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8
EOF
systemctl enable mariadb.service
systemctl start mariadb.service
mysql_secure_installation
mysql -u "root" "-p123456" < ./db.sql
#----------------------------------------------------------------------------------
cat > /etc/sysconfig/memcached <<EOF
PORT="11211"
USER="memcached"
MAXCONN="1024"
CACHESIZE="64"
OPTIONS="-l $ipadd,::1"
EOF
systemctl enable memcached.service
systemctl start memcached.service
#----------------------------------------------------------------------------------
sed -i "/^\[database\]$/a connection = mysql+pymysql://keystone:123456@controller/keystone" /etc/keystone/keystone.conf
sed -i "/^\[token\]$/a provider = fernet" /etc/keystone/keystone.conf
su -s /bin/sh -c "keystone-manage db_sync" keystone
keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
keystone-manage bootstrap --bootstrap-password 123456 \
--bootstrap-admin-url http://controller:35357/v3/ \
--bootstrap-internal-url http://controller:35357/v3/ \
--bootstrap-public-url http://controller:5000/v3/ \
--bootstrap-region-id RegionOne
sed -i "/^#ServerName www.example.com:80$/a ServerName controller" /etc/httpd/conf/httpd.conf
ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/
systemctl enable httpd.service
systemctl start httpd.service
openstack project create \
--os-project-domain-name default \
--os-user-domain-name default \
--os-project-name admin \
--os-username admin \
--os-password 123456 \
--os-auth-url http://controller:35357/v3 \
--os-identity-api-version 3 \
--domain default --description "Service Project" service
openstack project create --domain default \
--os-project-domain-name default \
--os-user-domain-name default \
--os-project-name admin \
--os-username admin \
--os-password 123456 \
--os-auth-url http://controller:35357/v3 \
--os-identity-api-version 3 \
--description "Demo Project" demo
openstack user create --domain default \
--os-project-domain-name default \
--os-user-domain-name default \
--os-project-name admin \
--os-username admin \
--os-password 123456 \
--os-auth-url http://controller:35357/v3 \
--os-identity-api-version 3 \
--password 123456 demo
openstack role create \
--os-project-domain-name default \
--os-user-domain-name default \
--os-project-name admin \
--os-username admin \
--os-password 123456 \
--os-auth-url http://controller:35357/v3 \
--os-identity-api-version 3 \
user
openstack role add \
--os-project-domain-name default \
--os-user-domain-name default \
--os-project-name admin \
--os-username admin \
--os-password 123456 \
--os-auth-url http://controller:35357/v3 \
--os-identity-api-version 3 \
--project demo --user demo user
openstack \
--os-project-domain-name default \
--os-user-domain-name default \
--os-project-name demo \
--os-username demo \
--os-password 123456 \
--os-auth-url http://controller:5000/v3 \
--os-identity-api-version 3 \
token issue
openstack \
--os-project-domain-name default \
--os-user-domain-name default \
--os-project-name admin \
--os-username admin \
--os-password 123456 \
--os-auth-url http://controller:35357/v3 \
--os-identity-api-version 3 \
token issue
openstack user create \
--os-project-domain-name default \
--os-user-domain-name default \
--os-project-name admin \
--os-username admin \
--os-password 123456 \
--os-auth-url http://controller:35357/v3 \
--os-identity-api-version 3 \
--domain default --password 123456 glance
openstack role add \
--os-project-domain-name default \
--os-user-domain-name default \
--os-project-name admin \
--os-username admin \
--os-password 123456 \
--os-auth-url http://controller:35357/v3 \
--os-identity-api-version 3 \
--project service --user glance admin
openstack service create \
--os-project-domain-name default \
--os-user-domain-name default \
--os-project-name admin \
--os-username admin \
--os-password 123456 \
--os-auth-url http://controller:35357/v3 \
--os-identity-api-version 3 \
--name glance --description "OpenStack Image" image
openstack endpoint create \
--os-project-domain-name default \
--os-user-domain-name default \
--os-project-name admin \
--os-username admin \
--os-password 123456 \
--os-auth-url http://controller:35357/v3 \
--os-identity-api-version 3 \
--region RegionOne image public http://controller:9292
openstack endpoint create \
--os-project-domain-name default \
--os-user-domain-name default \
--os-project-name admin \
--os-username admin \
--os-password 123456 \
--os-auth-url http://controller:35357/v3 \
--os-identity-api-version 3 \
--region RegionOne image internal http://controller:9292
openstack endpoint create \
--os-project-domain-name default \
--os-user-domain-name default \
--os-project-name admin \
--os-username admin \
--os-password 123456 \
--os-auth-url http://controller:35357/v3 \
--os-identity-api-version 3 \
--region RegionOne image admin http://controller:9292
openstack user create \
--os-project-domain-name default \
--os-user-domain-name default \
--os-project-name admin \
--os-username admin \
--os-password 123456 \
--os-auth-url http://controller:35357/v3 \
--os-identity-api-version 3 \
--domain default --password 123456 nova
openstack role add \
--os-project-domain-name default \
--os-user-domain-name default \
--os-project-name admin \
--os-username admin \
--os-password 123456 \
--os-auth-url http://controller:35357/v3 \
--os-identity-api-version 3 \
--project service --user nova admin
openstack service create \
--os-project-domain-name default \
--os-user-domain-name default \
--os-project-name admin \
--os-username admin \
--os-password 123456 \
--os-auth-url http://controller:35357/v3 \
--os-identity-api-version 3 \
--name nova --description "OpenStack Compute" compute
openstack endpoint create \
--os-project-domain-name default \
--os-user-domain-name default \
--os-project-name admin \
--os-username admin \
--os-password 123456 \
--os-auth-url http://controller:35357/v3 \
--os-identity-api-version 3 \
--region RegionOne compute public http://controller:8774/v2.1/%\(tenant_id\)s
openstack endpoint create \
--os-project-domain-name default \
--os-user-domain-name default \
--os-project-name admin \
--os-username admin \
--os-password 123456 \
--os-auth-url http://controller:35357/v3 \
--os-identity-api-version 3 \
--region RegionOne compute internal http://controller:8774/v2.1/%\(tenant_id\)s
openstack endpoint create \
--os-project-domain-name default \
--os-user-domain-name default \
--os-project-name admin \
--os-username admin \
--os-password 123456 \
--os-auth-url http://controller:35357/v3 \
--os-identity-api-version 3 \
--region RegionOne compute admin http://controller:8774/v2.1/%\(tenant_id\)s
openstack user create \
--os-project-domain-name default \
--os-user-domain-name default \
--os-project-name admin \
--os-username admin \
--os-password 123456 \
--os-auth-url http://controller:35357/v3 \
--os-identity-api-version 3 \
--domain default --password 123456 neutron
openstack role add \
--os-project-domain-name default \
--os-user-domain-name default \
--os-project-name admin \
--os-username admin \
--os-password 123456 \
--os-auth-url http://controller:35357/v3 \
--os-identity-api-version 3 \
--project service --user neutron admin
openstack service create \
--os-project-domain-name default \
--os-user-domain-name default \
--os-project-name admin \
--os-username admin \
--os-password 123456 \
--os-auth-url http://controller:35357/v3 \
--os-identity-api-version 3 \
--name neutron --description "OpenStack Networking" network
openstack endpoint create \
--os-project-domain-name default \
--os-user-domain-name default \
--os-project-name admin \
--os-username admin \
--os-password 123456 \
--os-auth-url http://controller:35357/v3 \
--os-identity-api-version 3 \
--region RegionOne network public http://controller:9696
openstack endpoint create \
--os-project-domain-name default \
--os-user-domain-name default \
--os-project-name admin \
--os-username admin \
--os-password 123456 \
--os-auth-url http://controller:35357/v3 \
--os-identity-api-version 3 \
--region RegionOne network internal http://controller:9696
openstack endpoint create \
--os-project-domain-name default \
--os-user-domain-name default \
--os-project-name admin \
--os-username admin \
--os-password 123456 \
--os-auth-url http://controller:35357/v3 \
--os-identity-api-version 3 \
--region RegionOne network admin http://controller:9696
cat > admin-openrc <<EOF
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=123456
export OS_AUTH_URL=http://controller:35357/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
EOF
cat > demo-openrc <<EOF
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=demo
export OS_USERNAME=demo
export OS_PASSWORD=123456
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
EOF
#----------------------------------------------------------------------------------
sed -i "/^\[database\]$/a connection = mysql+pymysql://glance:123456@controller/glance" /etc/glance/glance-api.conf
sed -i "/^\[keystone_authtoken\]$/a auth_uri = http://controller:5000\n\
auth_url = http://controller:35357\n\
memcached_servers = controller:11211\n\
auth_type = password\n\
project_domain_name = Default\n\
user_domain_name = Default\n\
project_name = service\n\
username = glance\n\
password = 123456" /etc/glance/glance-api.conf
sed -i "/^\[paste_deploy\]$/a flavor = keystone" /etc/glance/glance-api.conf
sed -i "/^\[glance_store\]$/a stores = file,http\n\
default_store = file\n\
filesystem_store_datadir = /var/lib/glance/images/" /etc/glance/glance-api.conf
sed -i "/^\[database\]$/a connection = mysql+pymysql://glance:123456@controller/glance" /etc/glance/glance-registry.conf
sed -i "/^\[keystone_authtoken\]$/a auth_uri = http://controller:5000\n\
auth_url = http://controller:35357\n\
memcached_servers = controller:11211\n\
auth_type = password\n\
project_domain_name = Default\n\
user_domain_name = Default\n\
project_name = service\n\
username = glance\n\
password = 123456" /etc/glance/glance-registry.conf
sed -i "/^\[paste_deploy\]$/a flavor = keystone" /etc/glance/glance-registry.conf
su -s /bin/sh -c "glance-manage db_sync" glance
systemctl enable openstack-glance-api.service openstack-glance-registry.service
systemctl start openstack-glance-api.service openstack-glance-registry.service
#----------------------------------------------------------------------------------
sed -i "/^\[DEFAULT\]$/a enabled_apis = osapi_compute,metadata\n\
transport_url = rabbit://openstack:123456@controller\n\
auth_strategy = keystone\n\
my_ip = $ipadd\n\
use_neutron = True\n\
firewall_driver = nova.virt.firewall.NoopFirewallDriver" /etc/nova/nova.conf
sed -i "/^\[api_database\]$/a connection = mysql+pymysql://nova:123456@controller/nova_api" /etc/nova/nova.conf
sed -i "/^\[database\]$/a connection = mysql+pymysql://nova:123456@controller/nova" /etc/nova/nova.conf
sed -i "/^\[keystone_authtoken\]$/a auth_uri = http://controller:5000\n\
auth_url = http://controller:35357\n\
memcached_servers = controller:11211\n\
auth_type = password\n\
project_domain_name = Default\n\
user_domain_name = Default\n\
project_name = service\n\
username = nova\n\
password = 123456" /etc/nova/nova.conf
sed -i "/^\[vnc\]$/a vncserver_listen = $my_ip\n\
vncserver_proxyclient_address = $my_ip" /etc/nova/nova.conf
sed -i "/^\[glance\]$/a api_servers = http://controller:9292" /etc/nova/nova.conf
sed -i "/^\[oslo_concurrency\]$/a lock_path = /var/lib/nova/tmp" /etc/nova/nova.conf
su -s /bin/sh -c "nova-manage api_db sync" nova
su -s /bin/sh -c "nova-manage db sync" nova
systemctl enable openstack-nova-api.service \
openstack-nova-consoleauth.service openstack-nova-scheduler.service \
openstack-nova-conductor.service openstack-nova-novncproxy.service
systemctl start openstack-nova-api.service \
openstack-nova-consoleauth.service openstack-nova-scheduler.service \
openstack-nova-conductor.service openstack-nova-novncproxy.service
#----------------------------------------------------------------------------------
sed -i "/^\[database\]$/a connection = mysql+pymysql://neutron:123456@controller/neutron" /etc/neutron/neutron.conf
sed -i "/^\[DEFAULT\]$/a core_plugin = ml2\n\
service_plugins =\n\
transport_url = rabbit://openstack:123456@controller\n\
auth_strategy = keystone\n\
notify_nova_on_port_status_changes = True\n\
notify_nova_on_port_data_changes = True" /etc/neutron/neutron.conf
sed -i "/^\[keystone_authtoken\]$/a auth_uri = http://controller:5000\n\
auth_url = http://controller:35357\n\
memcached_servers = controller:11211\n\
auth_type = password\n\
project_domain_name = Default\n\
user_domain_name = Default\n\
project_name = service\n\
username = neutron\n\
password = 123456" /etc/neutron/neutron.conf
sed -i "/^\[nova\]$/a auth_url = http://controller:35357\n\
auth_type = password\n\
project_domain_name = Default\n\
user_domain_name = Default\n\
region_name = RegionOne\n\
project_name = service\n\
username = nova\n\
password = 123456" /etc/neutron/neutron.conf
sed -i "/^\[oslo_concurrency\]$/a lock_path = /var/lib/neutron/tmp" /etc/neutron/neutron.conf
sed -i "/^\[ml2\]$/a type_drivers = flat,vlan" /etc/neutron/plugins/ml2/ml2_conf.ini
sed -i "/^\[ml2\]$/a tenant_network_types =" /etc/neutron/plugins/ml2/ml2_conf.ini
sed -i "/^\[ml2\]$/a mechanism_drivers = linuxbridge" /etc/neutron/plugins/ml2/ml2_conf.ini
sed -i "/^\[ml2\]$/a extension_drivers = port_security" /etc/neutron/plugins/ml2/ml2_conf.ini
sed -i "/^\[securitygroup\]$/a enable_ipset = True" /etc/neutron/plugins/ml2/ml2_conf.ini
sed -i "/^\[ml2_type_flat\]$/a flat_networks = provider" /etc/neutron/plugins/ml2/ml2_conf.ini
sed -i "/^\[linux_bridge\]$/a physical_interface_mappings = provider:$ifname" /etc/neutron/plugins/ml2/linuxbridge_agent.ini
sed -i "/^\[securitygroup\]$/a enable_security_group = False\n\
firewall_driver = neutron.agent.firewall.NoopFirewallDriver" /etc/neutron/plugins/ml2/linuxbridge_agent.ini
sed -i "/^\[vxlan\]$/a enable_vxlan = False" /etc/neutron/plugins/ml2/linuxbridge_agent.ini
sed -i "/^\[DEFAULT\]$/a einterface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver\n\
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq\n\
enable_isolated_metadata = True" /etc/neutron/dhcp_agent.ini
sed -i "/^\[DEFAULT\]$/a nova_metadata_ip = controller\n\
metadata_proxy_shared_secret = 123456" /etc/neutron/metadata_agent.ini
sed -i "/^\[neutron\]$/a url = http://controller:9696\n\
auth_url = http://controller:35357\n\
auth_type = password\n\
project_domain_name = Default\n\
user_domain_name = Default\n\
region_name = RegionOne\n\
project_name = service\n\
username = neutron\n\
password = 123456\n\
\n\
service_metadata_proxy = True\n\
metadata_proxy_shared_secret = 123456" /etc/nova/nova.conf
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
systemctl restart openstack-nova-api.service
systemctl enable neutron-server.service \
neutron-linuxbridge-agent.service neutron-dhcp-agent.service \
neutron-metadata-agent.service
systemctl start neutron-server.service \
neutron-linuxbridge-agent.service neutron-dhcp-agent.service \
neutron-metadata-agent.service
| true |
f0a66d885ccd4b88b263db7109e2be3fe7050d27 | Shell | michalsarna/docker-useful-scripts | /clean_none_images.sh | UTF-8 | 229 | 3.296875 | 3 | [] | no_license | #!/bin/bash
LIST=$(sudo docker images | grep none | awk '{print $3}' | xargs)
if [[ -z $LIST ]]; then
echo "Nothing to remove from exited list"
else
echo "Removing <none> images"
sudo docker rmi $LIST
fi
| true |
6f2af5072b8e65494c9ac098de8254321048a05c | Shell | sunnysideup/silverstripe-easy-coding-standards | /bin/sake-ss-dump-database | UTF-8 | 1,097 | 3.765625 | 4 | [] | no_license | #!/bin/bash
############################################ BASICS
SCRIPT_DIR="$COMPOSER_RUNTIME_BIN_DIR"
WORKING_DIR=$(pwd)
source $SCRIPT_DIR/sake-self-methods
############################################ DIR
dir='./'
############################################ SETTINGS
source $dir/.env
help='no'
name="$SS_DATABASE_NAME"
while (( $# )); do
case $1 in
-n|--db-name) name=$2;shift ;;
-h|--help) help='yes';shift ;;
-*) printf 'Unknown option: %q\n\n' "$1";
help='yes' ;;
*) dir=$1;;
esac
shift
done
help_and_exit() {
echohead "Dump database";
echohead "Available settings:";
echonice "-h, --help show help information"
echonice "-n, --db-name name for file"
echohead "Example usage:"
echonice "sake-ss-dump-database -n mydatabase";
echofunctions
exit;
}
############################################ HELP ONLY
if [[ "$help" == "yes" ]]; then
help_and_exit
fi
sspak save . "$name.sspak" --db
| true |
6939c769f25d218f4bb8043f0f256cb23df401d5 | Shell | neilgiri/PhylodynamicSimulation | /scripts/calib.sh | UTF-8 | 8,219 | 4.21875 | 4 | [] | no_license | #!/bin/bash
# Global variables for min, max, and step values specific to this
# script. These values are changed in main() and setupRange() function.
paramFile=""
param="?"
min=0
max=0
step=0
country=""
refPhyTree=""
reps=20
# Import the shared variables and file names between qsub_job.sh and
# calib.sh (to keep names consistent across scripts)
source ${ANTIGEN_PATH}/scripts/env.sh
source ${ANTIGEN_PATH}/scripts/param_ranges.sh
# Setup the range of values for this script to iterate on. This
# function gets all the parameters to explore (specified as
# command-line parameters) passed to this function. This function
# uses helper functions defined in the param_ranges.sh script to setup
# parameters and adjust their ranges based on parallel job
# configuration.
function setupRange() {
# Setup output directory based on PBS job id and MPI rank using
# helper function in env.sh
setOutputDir "0"
# Have helper script process parameters
fillParamArrayVals $*
if [ $? -ne 0 ]; then
# Parameters specified are incorrect.
return 1
fi
# Adjust range of first parameter based on parallel job config.
changeParamRange 1
if [ $? -ne 0 ]; then
# Job configuration incompatible
return 2
fi
# Everything went well
return 0
}
# Run the simulation using the specified parameters and values. The
# parameters and values are passed on as parameters to the Java
# simulation.
function runSim() {
local dir="${ANTIGEN_PATH}"
local javaOut="${outDir}/java_output.txt"
local cp="${dir}/classmexer.jar:${dir}/colt-1.2.0.jar:${dir}/snakeyaml-1.8.jar:${dir}:."
# Run the java code
echo -n "At time: "
date
echo "Running simulation with: $*"
java -Xmx4G -Xss5M -cp "$cp" Antigen outputDir "$outDir" $* >> "$javaOut"
local exitCode=$?
if [ $exitCode -ne 0 ]; then
echo "Java simulation did not complete successfully."
return $exitCode
fi
# Everything went well
return 0
}
# Convenience method to run scripts to compare phylogenetic trees.
# This function assumes parameters are supplied in the following order:
# $1 - Country name
# $2 - File with reference phylogenetic tree
# $3, $4 - Input parameters configuration YML file.
#
# Remaining parameters are just passed onto the simulator.
#
function simAndCompare() {
# Set-up variables to make function readable.
local script="${ANTIGEN_PATH}/scripts/ComparePhyTrees.py"
local simTree="${outDir}/out.trees"
local resultsFile="${outDir}/${rawResultsFile}"
local country="$1"
local refTree="$2"
local paramFile="$3"
# Consume the country and reference tree parameters.
shift 3
# Now run the simulation with remaining parameters
runSim "paramFile" "$paramFile" $*
local exitCode=$?
if [ $exitCode -ne 0 ]; then
return $exitCode
fi
# With very small contacts trees may not get created
if [ ! -f "$simTree" ]; then
# Create a dummy tree file
echo ";" > "$simTree"
fi
# Now run the comparison scripts and dump output
echo "Generating comparison stats for simulated tree with $refTree"
echo -n "$country $* " >> "$resultsFile"
python ${script} "$country" "$refTree" "$simTree" >> "$resultsFile"
# Everything went well?
return $?
}
# Convenience function to compute average from a given number of lines
# The search key for computing the average is passed-in as the
# parameter to this function
# $1 - Columns to skip
# $2 - Column to average (after skipping $1 cols)
# $* - Search key
function getAverage() {
local rawDataFile="${outDir}/${rawResultsFile}"
local skipCols="$1"
local col="$2"
shift 2
local key="$*"
col=$(( skipCols + col ))
local count=`grep -c "$srchKey" "$rawDataFile"`
local vals=`grep "$key" "$rawDataFile" | cut -d" " -f $col | tr "\n" "+"`
local avg=`echo "(${vals} 0) / $reps" | bc -l`
echo $avg
return 0
}
#
# Convenience method to run experiments for a given parameter setting
# $reps times and generate average difference/error values. This
# function is called by the processRange function in the
# param_ranges.sh script.
#
# This function assumes parameters are supplied in the following order:
# $1 - Country name
# $2 - File with reference phylogenetic tree
# $3 - Input parameters configuration YML file.
#
# Other parameters are settings for different parameters for simulation
#
function simAndCompareMany() {
local rawDataFile="${outDir}/${rawResultsFile}"
local resultsFile="${outDir}/${avgResultsFile}"
local country="$1"
local refTree="$2"
local paramFile="$3"
shift 3
# Run $reps repetitions of the simulation
for rep in `seq 1 $reps`
do
# Run the simulation and compare results
simAndCompare "$country" "$refTree" "$paramFile" $*
local exitCode=$?
if [ $exitCode -ne 0 ]; then
return $exitCode
fi
done
echo -n "----- Generating average diff-stats for '$*' at "
date
# Check to ensure we have $reps lines of output to process
local key="$country $*"
local lineCount=`grep -c "$key" "$rawDataFile"`
if [ $lineCount -ne $reps ]; then
echo "Expected $reps lines for '$country $*' but got $lineCount"
return 3
fi
# Now compute average errors from $reps for various statistics
local numParams=$#
local skipCols=$(( numParams + 2 ))
local avgRefClusts=`getAverage $skipCols 2 "$key"`
local avgSimClusts=`getAverage $skipCols 4 "$key"`
local avgInterClusts=`getAverage $skipCols 6 "$key"`
local avgIntraClusts=`getAverage $skipCols 8 "$key"`
local avgChilds=`getAverage $skipCols 10 "$key"`
local avgDist=`getAverage $skipCols 12 "$key"`
local avgNodeDepth=`getAverage $skipCols 14 "$key"`
# Finally print summary statistics into the results file.
echo "$key avg_ref_clusters $avgRefClusts \
avg_sim_clusters $avgSimClusts avg_inter_clust_dist_diff $avgInterClusts \
avg_intra_clust_dist_diff $avgIntraClusts avg_node_size_diff $avgChilds \
avg_distance_diff $avgDist avg_depth_diff $avgNodeDepth " >> "$resultsFile"
# Everything went well?
return $?
}
# Convenience function to check if necessary environment variables are defined
function checkEnvParams {
# The valid flag is changed to 0 if any of the checks fail below.
valid=1
# Check for values that are expected
if [ -z $ANTIGEN_PATH ]; then
echo "Ensure ANTIGEN_PATH enviornment variable is set."
echo "ANTIGEN_PATH must be set to the top-level directory as in:"
echo "/home/${USER}/research/phyloDynH5N1/antigen"
valid=0
fi
# If any of the checks above fail erport an error
if [ $valid -ne 1 ]; then
echo "Necessary environment variables not found. Exiting!"
return 2
fi
# Check to ensure that necessary parameters have been supplied.
if [ $# -lt 7 ]; then
echo "Specify 7 command-line arguments in the following order"
echo "Order : <Cntry> <RefPhyTree> <ParamFile> <Parameter> <MinVal> <MaxVal> <Step> ..."
echo "Example: vietnam vietnam.py parameters.yml contact 0.1 3.5 0.1 stepSize 0.01 0.009 0.001"
return 1
fi
# Everything looks good so far
return 0
}
# The main function that performs necessary operations.
function main() {
# Check to ensure environment variables are defined.
checkEnvParams $*
if [ $? -ne 0 ]; then
exit
fi
# Switch to working directory if running as PBS job
if [ ! -z "$PBS_O_WORKDIR" ]; then
cd "$PBS_O_WORKDIR"
echo "Changed working directory to:"
pwd
fi
# Save command-line arguments for future use
country="$1"
refPhyTree="$2"
paramFile="$3"
shift 3
# Store parameters into arrays and adjust range based on parallel
# job configuration.
setupRange $*
if [ $? -ne 0 ]; then
# Error processing parameter range.
return 1
fi
# Use helper function in params_ranges.sh to iterate over the
# specified ranges of parameters.
processRange simAndCompareMany "$country" "$refPhyTree" "$paramFile"
# Use return value as exit code
return $?
}
# Let the main function perform all the necessary task.
main $*
# End of script
| true |
c8b65fc74aaf9a60fd49f581f30f9a517048dc3c | Shell | strbum/webc | /lib/live/config/1030-gnome-power-manager | UTF-8 | 1,085 | 3.8125 | 4 | [] | no_license | #!/bin/sh
## live-config(7) - System Configuration Components
## Copyright (C) 2006-2015 Daniel Baumann <mail@daniel-baumann.ch>
##
## This program comes with ABSOLUTELY NO WARRANTY; for details see COPYING.
## This is free software, and you are welcome to redistribute it
## under certain conditions; see COPYING for details.
#set -e
Cmdline ()
{
# Reading kernel command line
for _PARAMETER in ${LIVE_CONFIG_CMDLINE}
do
case "${_PARAMETER}" in
live-config.username=*|username=*)
LIVE_USERNAME="${_PARAMETER#*username=}"
;;
esac
done
}
Init ()
{
# Checking if package is installed or already configured
if [ ! -e /var/lib/dpkg/info/gnome-power-manager.list ] || \
[ -e /var/lib/live/config/gnome-power-manager ]
then
exit 0
fi
echo -n " gnome-power-manager"
}
Config ()
{
# Not authorizing the user to hibernate the computer
# (might damage existing swap partitions).
sudo -u "${LIVE_USERNAME}" gconftool-2 -s -t bool /apps/gnome-power-manager/general/can_hibernate false
# Creating state file
touch /var/lib/live/config/gnome-power-manager
}
Cmdline
Init
Config
| true |
e69c285cad12d8fbd6cb5f3372a6d974d3fcc24e | Shell | t2t-io/toe-example-plugins | /scripts/run-sensorweb3 | UTF-8 | 3,780 | 3.421875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
#
function generate_sensorweb3_communicator_connection {
cat <<__EOF__
{
"enabled": true,
"url": "tcp://$1:$2",
"channel": null,
"broadcast": false
}
__EOF__
}
function generate_sensorweb3_sock {
cat <<__EOF__
{
"uri": "tcp://0.0.0.0:10000",
"line": true
}
__EOF__
}
function generate_sensorweb3_tcp_proxy_bridge {
cat <<__EOF__
{
"metadata": {
"guess": false,
"defaults": {
"device": "remote",
"app": "$1"
}
}
}
__EOF__
}
function run_sensorweb3 {
CONF=$0.yml \
${SCRIPT_DIR}/start-yapps-docker \
sensor-web3 \
6020,6021,6022,6023,6024 \
${PLUGIN_DIR} \
YAPPS_EXTRA_PERIPHERAL_SERVICES \
-b "ps-manager.handlers.console.enabled=true" \
"$@"
}
function echoerr {
echo "$@" 1>&2;
}
function transform_address {
local ADDR=$1
if [ "127.0.0.1" == "${ADDR}" ]; then
if [ "darwin" == "${OS_NAME}" ]; then
ADDR=$(ifconfig | sed -En 's/127.0.0.1//;s/.*inet (addr:)?(([0-9]*\.){3}[0-9]*).*/\2/p')
echoerr "replace 127.0.0.1 with ${ADDR} for container to access"
else
ADDR=$(ifconfig | grep "^eth" | head -n1 | awk '{print $1}' | xargs -I{} sh -c "ifconfig {}" | grep "inet addr" | awk '{print $2}' | awk -F':' '{print $2}')
echoerr "replace 127.0.0.1 with ${ADDR} for container to access"
fi
fi
echo ${ADDR}
}
function run_with_remote_uart {
local ADDR=$1; shift
[ "" == "${ADDR}" ] && echo "please specify ip address of remote UART Tcp server" && exit 1
[ "" == "${PORT}" ] && PORT="10001"
ADDR=$(transform_address ${ADDR})
run_sensorweb3 \
-b "communicator.connections.sb0.enabled=true" \
-s "communicator.connections.sb0.url=tcp://${ADDR}:${PORT}" \
$@
}
function run_with_extra_tcp {
local NAME=$1; shift
local ADDR=$1; shift
local PORT=$1; shift
[ "" == "${NAME}" ] && echo "please specify name for the extra tcp connection" && exit 1
[ "" == "${ADDR}" ] && echo "please specify remote ip address for the extra tcp connection to connect" && exit 1
[ "" == "${PORT}" ] && echo "please specify remote port number for the extra tcp connection to connect" && exit 1
ADDR=$(transform_address ${ADDR})
local OPT1="^communicator.connections.${NAME}:$(generate_sensorweb3_communicator_connection ${ADDR} ${PORT} | base64 ${BASE64_OPTS})"
local OPT2="^sock.servers.${NAME}:$(generate_sensorweb3_sock | base64 ${BASE64_OPTS})"
local OPT3="^tcp-proxy.bridges.${NAME}:$(generate_sensorweb3_tcp_proxy_bridge ${NAME} | base64 ${BASE64_OPTS})"
echo "-o ${OPT1}"
echo "-o ${OPT2}"
echo "-o ${OPT3}"
echo "${OPT_ADVANCED}"
run_sensorweb3 \
-o ${OPT1} \
-o ${OPT2} \
-o ${OPT3} \
${OPT_ADVANCED} \
$@
#local CONFS=($(find ${PLUGIN_DIR}/confs -type f -name '*.ls'))
#for c in "${CONFS[@]}"; do
# echo "-o '$(basename $c).${NAME}=$(cat $c | lsc -cjp | base64)'"
#done
}
function run_standalone {
run_sensorweb3 "$@"
}
function fn_exist {
declare -f -F $1 > /dev/null
return $?
}
function wrapper {
export OS_NAME=$(uname -s | tr '[A-Z]' '[a-z]')
local READLINK_OPTS=""
if [ "darwin" != "${OS_NAME}" ]; then
READLINK_OPTS="-f"
BASE64_OPTS="-w 0"
fi
local CURRENT=$(pwd)
local SCRIPT_CURRENT_NAME=$(basename $0)
local SCRIPT_BASE_NAME=$(basename $(readlink ${READLINK_OPTS} $0))
local SCRIPT_SUBCOMMAND=$(echo ${SCRIPT_CURRENT_NAME} | sed "s/${SCRIPT_BASE_NAME}-//g")
local FUNC="run_$(echo ${SCRIPT_SUBCOMMAND} | tr '-' '_')"
echo "SCRIPT_CURRENT_NAME = ${SCRIPT_CURRENT_NAME}"
echo "SCRIPT_BASE_NAME = ${SCRIPT_BASE_NAME}"
echo "SCRIPT_SUBCOMMAND = ${SCRIPT_SUBCOMMAND}"
echo "FUNC = ${FUNC}"
local CURRENT=$(pwd)
cd $(dirname $0)
export PLUGIN_DIR=$(pwd)
cd $(dirname $(readlink ${READLINK_OPTS} $0))
export SCRIPT_DIR=$(pwd)
cd ${CURRENT}
fn_exist ${FUNC} && ${FUNC} $@ || echo "no such function: ${FUNC}"
}
wrapper $@
| true |
039314f3986e8d9c46ebcce84b34dd4a0e8e5365 | Shell | ushavyas/data-extract | /shell/cited_refs_script.sh | UTF-8 | 915 | 3.640625 | 4 | [] | no_license | ###############################################
# #
# Name : cited_refs_script.sh #
# Desc : Executes cited_refs.py #
# #
###############################################
PYTHON_DIR="/home/usha/wos/python"
LOG_DIR="/home/usha/wos/log"
if [ $# -ne 4 ];then
echo "Error: cited_refs_script.sh - Number of arguments"
exit 1
fi
cited_ref_file=$1
cited_ref_res_file=$2
cited_ref_dat_file=$3
cited_ref_link_file=$4
if [ ! -f $cited_ref_file ];then
echo "Error: cited_refs_script.sh - $cited_ref_file does not exist"
exit 1
fi
python ${PYTHON_DIR}/cited_refs.py -i $cited_ref_file -1 $cited_ref_res_file -2 $cited_ref_dat_file -3 $cited_ref_link_file -l ${LOG_DIR}/cited_refs.log
ret=$?
if [ $ret -ne 0 ];then
echo "Error: cited_refs_script.sh - cited_refs.py failed"
exit 1
fi
| true |
6ff5640083a5b3099b3dd5f3ccfc09190f43322d | Shell | Python-tao/Linux_learn | /shell_scripts/add_even_odd_number.sh | UTF-8 | 515 | 3.703125 | 4 | [] | no_license | #!/bin/bash
#求100以内奇数Odd之和。
declare -i sum=0
for i in $(seq 1 2 100); do
#echo "\$sum is $sum,\$i is $i"
sum=$[$sum+$i]
done
echo "With For loop,result is $sum"
##While循环##
declare -i sum=0
declare -i i=1
while [ $i -le 100 ]; do
#echo "\$sum is $sum,\$i is $i"
sum=$[ $sum+$i ]
let i=$i+2
done
echo "With While循环 loop,result is $sum"
##until循环##
declare -i sum=0
declare -i i=1
until [ $i -gt 100 ]; do
let sum+=$i
let i=$i+2
done
echo "With Until循环 loop,result is $sum" | true |
45af075e6046e20ebf37b94680fb062ae8539a59 | Shell | snarfmason/dotfiles | /bashrc | UTF-8 | 173 | 2.625 | 3 | [
"MIT"
] | permissive | if [ -d ~/.asdf ]; then
. $HOME/.asdf/asdf.sh
fi
if [ -e /usr/local/bin/direnv ]; then
eval "$(direnv hook bash)"
fi
export PS1='\[\033[01;32m\]\w\[\033[00m\] bash: '
| true |
c439c306b0e11acd0819ad316ce5920f39df89be | Shell | Shivanirudh/UNIX | /ShellScript/passCheck.sh | UTF-8 | 191 | 3.25 | 3 | [] | no_license | #!/bin/bash
read -p "Enter password " pw
if [[ $pw =~ [0-9]+ && $pw =~ [a-z]+ && $pw =~ [A-Z]+ && $pw =~ ^[A-Za-z0-9]{8,} ]];
then
echo "Strong password "
else
echo "Weak password "
fi
| true |
3e47d41551af90eb94b3e359efc6b840f4b4d7cc | Shell | fnord0/blackarch | /packages/tiger/PKGBUILD | UTF-8 | 1,305 | 2.734375 | 3 | [] | no_license | pkgname=tiger
pkgver=3.2.3
pkgrel=1
epoch=100
groups=('blackarch' 'blackarch-scanner' 'blackarch-automation')
pkgdesc="A security scanner, that checks computer for known problems. Can also use tripwire, aide and chkrootkit."
arch=('i686' 'x86_64' 'armv6h' 'armv7h')
url='http://www.nongnu.org/tiger/'
license=('GPL')
depends=('perl')
makedepends=('patch')
backup=('etc/tiger/tigerrc' 'etc/tiger/cronrc')
source=("http://download.savannah.gnu.org/releases/tiger/tiger-$pkgver.tar.gz"
tiger-makefile.diff
build-fix.diff)
md5sums=('f41076f645da9de937819bf6d516e546'
'aadd12186c717dfe202d55d1192851c2'
'9e516291e4081733ca551fb46ad37c06')
prepare() {
cd "$srcdir/tiger-$pkgver"
patch -Np0 -i "$srcdir/tiger-makefile.diff"
patch -Np0 -i "$srcdir/build-fix.diff"
}
build() {
cd "$srcdir/tiger-$pkgver"
./configure --prefix=/usr \
--mandir=/usr/share/man \
--with-tigerhome=/usr/share/tiger \
--with-tigerconfig=/etc/tiger \
--with-tigerwork=/var/run/tiger \
--with-tigerlog=/var/log/tiger \
--with-tigerbin=/usr/bin
make
}
package() {
cd "$srcdir/tiger-$pkgver"
# Base directories.
install -dm755 "$pkgdir/usr/share/tiger"
install -dm755 "$pkgdir/usr/bin"
make "DESTDIR=$pkgdir" install
}
| true |
dd018206e1b047505f1482fd1f268788cab2e8b3 | Shell | Monkey0720/Singularity | /updatebuild.sh | UTF-8 | 5,387 | 3.96875 | 4 | [] | no_license | #!/bin/bash
# Default parameter
build='new';
URLBase='https://www.phenix.bnl.gov/WWW/publish/phnxbld/EIC/Singularity';
sysname='x8664_sl7'
DownloadBase='cvmfs/eic.opensciencegrid.org';
CleanDownload=false
# Parse input parameter
for i in "$@"
do
case $i in
-b=*|--build=*)
build="${i#*=}"
shift # past argument=value
;;
--sysname=*)
sysname="${i#*=}"
shift # past argument=value
;;
-s=*|--source=*)
URLBase="${i#*=}"
shift # past argument=value
;;
-t=*|--target=*)
DownloadBase="${i#*=}"
shift # past argument=value
;;
-c|--clean)
CleanDownload=true
shift # past argument=value
;;
--help|-h|*)
echo "Usage: $0 [--build=<new>] [--sysname=<x8664_sl7|gcc-8.3>] [--source=URL] [--target=directory] [--clean]";
exit;
shift # past argument with no value
;;
esac
done
echo "This macro download/update EIC ${build} build to $DownloadBase"
echo "Source is at $URLBase"
echo ""
echo "If you have CVMFS file system directly mounted on your computer,"
echo "you can skip this download and mount /cvmfs/eic.opensciencegrid.org to the singularity container directly."
#cache check function
md5_check ()
{
local target_file=$1
local md5_cache=$2
#echo "target_file $target_file"
local new_md5=`curl -H 'Cache-Control: no-cache' -ks $target_file`
#echo "new_md5 : $new_md5 ..."
# echo "searching for $md5_cache ..."
if [ -f $md5_cache ]; then
# echo "verifying $md5_cache ..."
local md5_cache=`cat $md5_cache`
if [ "$md5_cache" = "$new_md5" ]; then
# echo "$target_file has not changed since the last download"
return 0;
fi
fi
return 1;
}
if [ $CleanDownload = true ]; then
echo "--------------------------------------------------------"
echo "Clean up older download"
echo "--------------------------------------------------------"
if [ -d "$DownloadBase" ]; then
echo "First, wiping out previous download at $DownloadBase ..."
/bin/rm -rf $DownloadBase
else
echo "Previous download folder is empty: $DownloadBase"
fi
fi
echo "--------------------------------------------------------"
echo "Singularity image"
echo "--------------------------------------------------------"
#echo "${URLBase}/rhic_sl7_ext.simg -> ${DownloadBase}/singularity/"
mkdir -p ${DownloadBase}/singularity
md5_check ${URLBase}/rhic_sl7_ext.simg.md5 ${DownloadBase}/singularity/rhic_sl7_ext.simg.md5
if [ $? != 0 ]; then
echo "Downloading ${URLBase}/rhic_sl7_ext.simg -> ${DownloadBase}/singularity/ ..."
curl -H 'Cache-Control: no-cache' -k ${URLBase}/rhic_sl7_ext.simg > ${DownloadBase}/singularity/rhic_sl7_ext.simg
curl -H 'Cache-Control: no-cache' -ks ${URLBase}/rhic_sl7_ext.simg.md5 > ${DownloadBase}/singularity/rhic_sl7_ext.simg.md5
else
echo "${URLBase}/rhic_sl7_ext.simg has not changed since the last download"
echo "- Its md5 sum is ${DownloadBase}/singularity/rhic_sl7_ext.simg.md5 : " `cat ${DownloadBase}/singularity/rhic_sl7_ext.simg.md5`
fi
echo "--------------------------------------------------------"
echo "Monte Carlos"
echo "--------------------------------------------------------"
#echo "${URLBase}/rhic_sl7_ext.simg -> ${DownloadBase}/singularity/"
mkdir -p ${DownloadBase}/singularity
md5_check ${URLBase}/MCEG.tar.bz2.md5 ${DownloadBase}/singularity/MCEG.tar.bz2.md5
if [ $? != 0 ]; then
echo "Downloading ${URLBase}/MCEG.tar.bz2 -> ${DownloadBase}/singularity/ ..."
curl -H 'Cache-Control: no-cache' -k ${URLBase}/MCEG.tar.bz2 | tar xjf -
curl -H 'Cache-Control: no-cache' -ks ${URLBase}/MCEG.tar.bz2.md5 > ${DownloadBase}/singularity/MCEG.tar.bz2.md5
else
echo "${URLBase}/MCEG.tar.bz2 has not changed since the last download"
echo "- Its md5 sum is ${DownloadBase}/singularity/MCEG.tar.bz2.md5 : " `cat ${DownloadBase}/singularity/MCEG.tar.bz2.md5`
fi
echo "--------------------------------------------------------"
echo "EIC build images"
echo "--------------------------------------------------------"
declare -a images=("opt.tar.bz2" "offline_main.tar.bz2" "utils.tar.bz2")
mkdir -p ${DownloadBase}/.md5/${build}/
## now loop through the above array
for tarball in "${images[@]}"
do
# echo "Downloading and decompress ${URLBase}/${build}/${tarball} ..."
md5file="${DownloadBase}/.md5/${build}/${tarball}.md5";
md5_check ${URLBase}/${sysname}/${build}/${tarball}.md5 ${md5file}
if [ $? != 0 ]; then
echo "Downloading ${URLBase}/${sysname}/${build}/${tarball} -> ${DownloadBase} ..."
curl -H 'Cache-Control: no-cache' -k ${URLBase}/${sysname}/${build}/${tarball} | tar xjf -
curl -H 'Cache-Control: no-cache' -ks ${URLBase}/${sysname}/${build}/${tarball}.md5 > ${md5file}
else
echo "${URLBase}/${sysname}/${build}/${tarball} has not changed since the last download"
echo "- Its md5 sum is ${md5file} : " `cat ${md5file}`
fi
done
echo "--------------------------------------------------------"
echo "Done! To run the EIC container in shell mode:"
echo ""
echo "singularity shell -B cvmfs:/cvmfs cvmfs/eic.opensciencegrid.org/singularity/rhic_sl7_ext.simg"
echo "source /cvmfs/eic.opensciencegrid.org/$sysname/opt/fun4all/core/bin/eic_setup.sh -n $build"
echo ""
echo "More on singularity tutorials: https://www.sylabs.io/docs/"
echo "More on directly mounting cvmfs instead of downloading: https://github.com/eic/Singularity"
echo "--------------------------------------------------------"
| true |
797922c8c468ed13caf3f8501f8c61993e1920a6 | Shell | vyesubabu/WF1 | /AOD2BUFR/run_check_modis.ksh | UTF-8 | 406 | 3.03125 | 3 | [] | no_license | #!/bin/ksh
set -euax
day=20190806
for hh in 01 02 03 04 05 06 07 08 09 10 11 12 13 14 15 16 17 18 19 20 21 22 23
do
file="AOD.Prepbufr.${hh}.QC00"
if [[ -f ../Data/PrepBUFR/${day}/$file ]]; then
ln -sf ../Data/PrepBUFR/${day}/${file} AODbufr
aod_decode.x > log.${file}.${day} 2>&1
grep hdr= log.${file}.${day} | awk '{print $4,$5}' > log.${file}.${day}.latlon
fi
done
| true |
8868ed68f971dd6a1bf1aeeae10b99fc3194c4fd | Shell | mlpinit/dotfiles-1 | /bin/rio | UTF-8 | 1,853 | 3.546875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# Rio: Load CSV from stdin into R as a data frame, execute given commands, and get the output as CSV or PNG on stdout
#
# Example usage:
# curl -s 'https://raw.github.com/pydata/pandas/master/pandas/tests/data/iris.csv' > iris.csv
# < iris.csv Rio 'summary(df)'
# < iris.csv Rio -s 'sqldf("select Name from df where df.SepalLength > 7")'
# < iris.csv Rio -g 'g+geom_point(aes(x=SepalLength,y=SepalWidth,colour=Name))' > iris.png
#
# Dependency: R (with optionally the R packages ggplot2, plyr, and sqldf)
#
# Original Author: http://jeroenjanssens.com
# Modifications by: Stefan Novak
usage() {
cat << EOF
Rio: Load CSV from stdin into R as a data frame, execute given commands, and get the output as CSV on stdout
usage: Rio OPTIONS
OPTIONS:
-g Import ggplot2
-p Import plyr
-s Import sqldf
-v Verbose
EOF
}
callR() {
Rscript --vanilla -e "df<-read.csv('${IN}',header=${HEADER},sep='${DELIMITER}',stringsAsFactors=F);${REQUIRES}${SCRIPT};last<-.Last.value;if(is.data.frame(last)){write.table(last,'${OUT_PNG}',sep='${DELIMITER}',quote=F,row.names=F,col.names=${HEADER});}else if(exists('is.ggplot')&&is.ggplot(last)){ggsave(filename='${OUT_PNG}',last,dpi=72);}else{sink('${OUT_PNG}');print(last);}"
}
SCRIPT="${!#}"
REQUIRES=
DELIMITER="\\\\t"
HEADER="F"
VERBOSE=false
IN=$(mktemp /tmp/temp.XXXX)
OUT=$(mktemp /tmp/temp.XXXX)
OUT_PNG="${OUT}.png"
touch $OUT_PNG
while getopts "gpsv:" OPTION
do
case $OPTION in
h)
usage
exit 1
;;
g)
REQUIRES="${REQUIRES}require(ggplot2);g<-ggplot(df);"
;;
p)
REQUIRES="${REQUIRES}require(plyr);"
;;
s)
REQUIRES="${REQUIRES}require(sqldf);"
;;
v)
VERBOSE=true
;;
?)
usage
exit
;;
esac
done
cat /dev/stdin > $IN
if $VERBOSE
then
callR
else
callR >/dev/null 2>&1
fi
cat $OUT_PNG
rm $IN $OUT $OUT_PNG
rm -f Rplots.pdf
| true |
de493310b198b4f02a16710bd47de1cab396abbc | Shell | Romop5/holoinjector-tests | /dataset_helpers/nehe/nehe.sh | UTF-8 | 386 | 3.453125 | 3 | [] | no_license | #!/bin/bash
SCRIPT_PATH=`dirname $0`
ABS_PATH=`realpath -s ${SCRIPT_PATH}`
if [[ -z $1 ]]
then
echo "USAGE: $0 <ID>"
echo "where ID is number from 1-30"
exit 1
fi
LESSON_ID=`printf "%2d" $1 | tr " " "0"`
LESSON_NAME=$(echo "lesson${LESSON_ID}")
START_DIR=`pwd`
LESSON_PATH=`echo "${ABS_PATH}/linuxglx/${LESSON_NAME}"`
cd ${LESSON_PATH}
LD_PRELOAD=$2 ./${LESSON_NAME}
| true |
342efe8dec3f25d47eecb33b1bcf14f2336a3a1d | Shell | openshift/release | /ci-operator/step-registry/network/conf/enable-nm-trace/network-conf-enable-nm-trace-commands.sh | UTF-8 | 864 | 3.171875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -o nounset
set -o errexit
set -o pipefail
nm_config=$(cat <<EOF | base64 -w0
[logging]
level=TRACE
EOF
)
for role in master worker; do
cat << EOF > "${SHARED_DIR}/manifest_${role}-networkmanager-configuration.yaml"
apiVersion: machineconfiguration.openshift.io/v1
kind: MachineConfig
metadata:
labels:
machineconfiguration.openshift.io/role: ${role}
name: ${role}-nm-trace-logging
spec:
config:
ignition:
version: 3.2.0
storage:
files:
- contents:
source: data:text/plain;charset=utf-8;base64,${nm_config}
mode: 0644
overwrite: true
path: /etc/NetworkManager/conf.d/99-trace-logging.conf
EOF
done
echo "master-networkmanager-configuration.yaml"
echo "---------------------------------------------"
cat "${SHARED_DIR}/manifest_${role}-networkmanager-configuration.yaml"
| true |
faec6f9a647352964e53115cc9200981a8ce1634 | Shell | Branchout/branchout | /output/escape-text | UTF-8 | 229 | 3.40625 | 3 | [] | no_license | #!/bin/bash
for each in $(find output -name *.txt); do
mkdir -p $(dirname "target/${each}")
while IFS= read -r line; do echo -e "$line"; done < "$each" | sed -e "s,BASE_DIRECTORY,$(pwd)," > "target/${each%.txt}.output"
done
| true |
d1ae43e4fc184b23c27e66aa2eec460254949fd7 | Shell | Azintelecom/azcloud-apps | /modules/mariadb/main | UTF-8 | 497 | 2.84375 | 3 | [] | no_license | #!/usr/bin/env bash
azapps_databases_mariadb()
{
AZCLOUD_APPS_STORAGE="/var/lib/mysql"
export AZCLOUD_APPS_STORAGE
DB_PASS="$(_get_db_args)"
export DB_PASS
PROXY="$(get_proxy)"
export PROXY
set_proxy
setup_app_storage 'mariadb' \
&& stage_finished setup_app_storage
setup_firewall_and_selinux '3306:tcp' 'mysqld_t:permissive' \
&& stage_finished firewall_and_selinux_setup
_install_and_setup_mariadb \
&& stage_finished install_and_setup_mariadb
_finish
}
| true |
95d234d489858f19826baf63bd992e140efc5ee7 | Shell | cameronbracken/sites | /cb/files/swfit.sh | UTF-8 | 192 | 2.828125 | 3 | [] | no_license | #!/bin/bash
for file in pdf/*.pdf
do
base=$(basename $file .pdf)
pdf2swf $file -o swf/$base.swf
swfcombine -X 600 -Y 800 swf/rfxview.swf swf=swf/$base.swf -o swf/$base.swf
done
| true |
96f553736d56b617c223a8848b7814913b1a2907 | Shell | raphigaziano/.dotfiles | /scripts/getpkgs.sh | UTF-8 | 699 | 3.703125 | 4 | [] | no_license | #! /bin/bash
#
# Automagically install the listed packages.
# Update the package list below accordingly.
#
# Should run on any Debian-like system (needs apt-get)
#
# raphigaziano <r.gaziano@gmail.com>
# Created: 05-22-2013
#
##############################################################################
packages=(
tmux
terminator
tree
python-pip
exuberant-ctags
curl
autojump
)
# Check if user is root
if [[ $EUID -ne 0 ]]; then
echo "This script must be run as root" 1>&2
exit 1
fi
echo "Updating package index..."; echo
apt-get update
echo "Done."
for pname in ${packages[@]}
do
echo "Installing $pname..."
apt-get install $pname
echo "Done."
done
exit 0
| true |
ce7e7a9db1029ca6173a88d3f9cde51bc5cb5c89 | Shell | jrjhealey/BadassBash | /BadassBash.sh | UTF-8 | 5,555 | 4.375 | 4 | [] | no_license | #!/bin/bash
# An all-singing, all-dancing example script to write pretty and versatile commandline scripts
# A useful construct to control flow in the script (the script will exit if any processes within
# fail or exit abnormally.
set -eo pipefail
# Make your output colorful! This checks to see that there is an active terminal/interactive session
# and uses tput to assign colors. It's a bit less versatile (fewer colors) but supposedly more portable
# than ANSI escape characters. The use of a HEREDOC for `cat` below for the script usage requires this
# as ANSI characters cannot be used. If you want colorful --help, tput is the way to go
if [ -t 1 ] ; then
ncols=$(tput colors)
if [ -n "$ncols" ] && [ "$ncols" -ge 8 ] ; then
bold="$(tput bold)"
underline="$(tput smul)"
rmunderline="$(tput rmul)"
standout="$(tput smso)"
black="$(tput setaf 0)"
red="$(tput setaf 1)"
green="$(tput setaf 2)"
yellow="$(tput setaf 3)"
blue="$(tput setaf 4)"
magenta="$(tput setaf 5)"
cyan="$(tput setaf 6)"
white="$(tput setaf 7)"
default="$(tput sgr0)"
fi
fi
# Some handy general logging and warning functions. They can be used as pipes or function calls
log(){
# Logging function (prints to STDOUT in WHITE).
echo -e >&1 "${white}${underline}INFO:${rmunderline} ${1:-$(</dev/stdin)}${default}"
}
err(){
# Error function (prints to STDERR in RED).
echo -e >&2 "${red}${underline}ERROR:${rmunderline} ${1:-$(</dev/stdin)}${default}"
}
warn(){
# Warning function (prints to STDOUT in YELLOW/ORANGE).
echo -e >&1 "${yellow}${underline}WARNING:${rmunderline} ${1:-$(</dev/stdin)}${default}"
}
# Using a cat HEREDOC (EOF) to write the usage of the script. cat supports tput colors.
# Using $0 means the scriptname will autopopulate itself when run.
usage(){
cat << EOF >&2
Usage: $0
This is a boilerplate/cookiecutter bash script to handle arguments from the commandline.
This is the help/usage information for the script. It might normally look something like...
$ bash $0 -f -x arg positional
OPTIONS:
-h | --help Show this message.
-f | --flag Some boolean (truthy/falsy) flag.
-x | --xarg Some flag that takes an argument.
-a | --array Some argument that creates an array of items.
Through the use of tput, it also handles ${underline}${red}C${green}O${yellow}L${blue}O${magenta}R${cyan}F${white}U${black}L${default} ${underline}${red}O${green}U${yellow}T${blue}P${magenta}U${cyan}T${default}
EOF
}
# EOF demarcates end of the HEREDOC
# Now begin handling arguments from the commandline. One option for providing the GNU style
# long and short arguments, is to simply reset the long arg to its counterpart short arg
# which is what the case-loop below does.
for arg in "$@"; do # for every arg in the commandline array ("$@")
shift # Shift by one so as to skip the script name
case "$arg" in
"--help") set -- "$@" "-h" ;; # Match the args, and reset that particular "$@"
"--flag") set -- "$@" "-f" ;; # value to the equivalent short arg
"--xarg") set -- "$@" "-x" ;; #
"--arr") set -- "$@" "-a" ;; #
*) set -- "$@" "$arg" ;; # Lastly, deal with any unmatched args.
esac
done
# Call getopts assigns the arguments to variables for use in the script
while getopts "hfx:a:" OPTION ; do # Letters in quotes correspond to the short arguments.
case $OPTION in # Letters followed by a ":" are expecting an argument.
h) usage; exit 0 ;; # -> Output usage and exit normally.
f) flag="True" ;; # -> Flag is boolean, so if it exists, set it to true (or false)
x) xarg=$OPTARG ;; # -> Set the argument of xarg to the variable $xarg (contained in $OPTARG)
a) arr+=($OPTARG) ;; # -> Append -a arguments to an array (useful for gathering filenames etc).
# This requires that -a be given for each filename though.
esac
done
# Lastly, check for the case where no arguments were provided at all, and take this as a signal to just
# print the script usage to the screen (with an error code).
if [[ $# -eq 0 ]] ; then
usage ; exit 1
fi
# Now, check for the existence of required arguments, and/or populate with with default behaviours
# if required.
if [[ -z $flag ]]; then
warn "Flag is not set, assuming default of FALSE"
flag="False"
fi
# Exit the script if a required argument is not given (for example)
if [[ -z $xarg ]]; then
usage ; err "xarg not supplied. Exiting." ; exit 1
fi
# If an optional argument wasn't given, perhaps warn and continue on or set some default.
if [[ -z $arr ]]; then
warn "No array name was specified. Continuing with an empty array."
fi
# All helper functions etc must be declared above, before they are called to do any actual work.
# Below this line the actual script begins and performs tasks.
log "I am now running the script processes!"
log "Hi, I'm flag: ${flag}"
warn "Hi, I'm xarg: ${xarg}"
err "Hi, I'm array: ${arr[*]}"
# A useful block for dealing with Truthy/Falsey booleans (covers all cases of T/TRUE/True/true etc
if [[ $flag =~ ^[Tt][Rr][Uu][Ee]$ ]] || [[ $flag =~ ^[Tt]$ ]] || [[ -z $flag ]] ; then
log "Doing something"
elif [[ $flag =~ ^[Ff][Aa][Ll][Ss][Ee]$ ]] || [[ $flag =~ ^[Ff]$ ]] ; then
log "Doing nothing because false"
: # Do nothing if false-y
else
err 'Unrecognised argument to flag (should be T(rue), F(alse) or empty - case insensitive).'
fi
| true |
4301ed932e3eab1d62ea32531b6dca716331c533 | Shell | link-webcreations/moonlight-config | /autostart.sh | UTF-8 | 322 | 2.59375 | 3 | [
"BSD-2-Clause"
] | permissive | #!/bin/bash
# Start PC
wakeonlan D4:3D:7E:4D:DD:13
# Wait until steam is reachable
echo Connecting to the PC ...
while ! nc -z -w5 192.168.1.221 47989 &>/dev/null; do :; done
# Start Moonlight
echo "PC available, starting moonlight"
cd ~/git/moonlight-config
moonlight -mapping xbox_one.map -60fps stream 192.168.1.221
| true |
a966c90ed381306ec7ca0aebe1a15975c3c50b75 | Shell | ahorn/benchmarks | /sw-hw/linux/tmp105_x86/get_result.sh | UTF-8 | 1,799 | 4.1875 | 4 | [] | no_license | #!/bin/sh
outputFolder=$1
if [ -z $1 ]
then
echo "Error: requires one parameter: $0 [Data folder]"
exit
fi
if [ ! -d $outputFolder ]; then
echo "ERROR: Folder $outputFolder doesn't exist."
exit
fi
# Compute total runtime in average
sum=0
size=`ls $outputFolder/data* | wc -l`
tmpFile=bc_tmp.txt
for f in `ls $outputFolder/data*`
do
#echo "file: $f"
time=`cat $f | grep "decision procedure" | cut -d \ -f 4 | cut -d s -f 1`
#echo "$time + $sum"
echo "$time + $sum" > $tmpFile
echo "quit" >> $tmpFile
sum=`bc $tmpFile | tail -n 1`
#echo $sum
done
# Compute decision runtime in average
min_sum=0
sec_sum=0
tmpFile2=bc_tmp2.txt
grep real $outputFolder/runtime*.txt |
while read line
do
#echo $line
time=`echo $line | cut -d \ -f 2`
min=`echo $time | cut -d m -f 1`
sec=`echo $time | cut -d m -f 2 | cut -d s -f 1`
#echo $time
#echo $min
#echo $sec
echo "$min + $min_sum" > $tmpFile
echo "quit" >> $tmpFile
min_sum=`bc $tmpFile | tail -n 1`
echo "$sec + $sec_sum" > $tmpFile2
echo "quit" >> $tmpFile2
sec_sum=`bc $tmpFile2 | tail -n 1`
#echo $min_sum
#echo $sec_sum
done
# Values of min_sum and sec_sum will remain after the for loop finishes
#for line in `grep real $outputFolder/runtime-$1.txt`
#do
# if [ $line != "real" ]
# then
# echo $line
# fi
#done
#echo "($sum*1000)/$size" > $tmpFile
#echo "quit" >> $tmpFile
#avg=`bc $tmpFile | tail -n 1`
#echo min_sum
#echo sec_sum
# Values lost after the while loop finishes
min_sum=`bc $tmpFile | tail -n 1`
sec_sum=`bc $tmpFile2 | tail -n 1`
# Output results
echo "Runing experiment $size times took $min_sum minutes $sec_sum seconds in total."
echo "The decision procedure took $sum seconds in total."
#clean up
rm $tmpFile
rm $tmpFile2
| true |
4a18a4bd6155d3572a80068bf8aed3684b92c142 | Shell | hyleung/docker-multi-app | /start.sh | UTF-8 | 1,064 | 3.078125 | 3 | [] | no_license | if [[ -z "$1" ]];
then
echo -e "Usage: ./start.sh <repository prefix>"
echo -e "\twhere 'repository prefix' is the bit before the slash."
echo -e "\te.g../start.sh jsmith => start all your containers assuming they are named jsmith/<something>"
else
#Start the Redis cluster
docker run -d -h redis_primary --name redis_primary $1/redis_primary
docker run -d -h redis_replica1 --name redis_replica1 --link redis_primary:redis_primary $1/redis_replica
docker run -d -h redis_replica2 --name redis_replica2 --link redis_primary:redis_primary $1/redis_replica
#Start the NodeJS app
docker run -d --name nodeapp -p 3000:3000 --link redis_primary:redis_primary $1/nodejs
#Start ElasticSearch
docker run -d -h elasticsearch --name elasticsearch -p 9200:9200 -p 9300:9300 $1/elasticsearch
#Start Logstash
docker run -d --name logstash --volumes-from nodeapp --volumes-from redis_primary --link elasticsearch:elasticsearch $1/logstash
#Start Kibana
docker run -d --name kibana -p 8888:8888 $1/kibana
fi
| true |
26a5db2e23d2c3d02ce430572d1acd76b8af5cf4 | Shell | FauxFaux/debian-control | /s/solid-pop3d/solid-pop3d_0.15-30_amd64/prerm | UTF-8 | 327 | 3.25 | 3 | [] | no_license | #!/bin/sh
# pre-removal script for Solid POP3
set -e
# Automatically added by dh_installinit/11.3.5
if [ -x "/etc/init.d/solid-pop3d" ] && [ "$1" = remove ]; then
invoke-rc.d solid-pop3d stop || exit 1
fi
# End automatically added section
case "X$1" in
Xupgrade|Xremove)
update-inetd --disable pop3
;;
*)
;;
esac
| true |
5873acffa9538383517c97f27cc61a5d56ea09d0 | Shell | solutionDrive/docker-composer-container | /docker-test.sh | UTF-8 | 822 | 3.078125 | 3 | [] | no_license | #!/usr/bin/env bash
set -e
ATTRIBUTES_TEMPLATE_FILE="tests/inspec/composer-container/attributes.yml.template"
ATTRIBUTES_FILE="tests/inspec/composer-container/attributes.yml"
cp ${ATTRIBUTES_TEMPLATE_FILE} ${ATTRIBUTES_FILE}
printf '%s\n' ",s~{{ composer_version }}~${COMPOSER_VERSION}~g" w q | ed -s "${ATTRIBUTES_FILE}"
printf '%s\n' ",s~{{ php_version }}~${PHP_VERSION}~g" w q | ed -s "${ATTRIBUTES_FILE}"
if [ "$COMPOSER2" = "1" ]; then
DOCKER_CONTAINER_ID=`docker run -d solutiondrive/docker-composer2-container:php$PHP_VERSION`
else
DOCKER_CONTAINER_ID=`docker run -d solutiondrive/docker-composer-container:php$PHP_VERSION`
fi
bundle exec inspec exec tests/inspec/composer-container --attrs tests/inspec/composer-container/attributes.yml -t docker://${DOCKER_CONTAINER_ID}
docker stop ${DOCKER_CONTAINER_ID}
| true |
67cc78526695aa82ce867547cf9d15c2ed5e81b1 | Shell | ishine/kaldi_offline | /model_SAD_diarize_transcribe_v2.sh | UTF-8 | 5,704 | 2.515625 | 3 | [] | no_license | #!/bin/bash
AUDIODIR='/opt/aspire/audio'
TRANSCRIPTDIR='/opt/aspire/transcripts'
mkdir -p /opt/aspiretemp
TEMPDIR='/opt/aspiretemp'
paste <(ls $AUDIODIR/*.wav | xargs -n 1 basename | sed -e 's/\.wav$//') <(ls -d $AUDIODIR/*.wav) > $TEMPDIR/wav.scp && \
paste <(ls $AUDIODIR/*.wav | xargs -n 1 basename | sed -e 's/\.wav$//') <(ls $AUDIODIR/*.wav | xargs -n 1 basename | sed -e 's/\.wav$//') > $TEMPDIR/utt2spk && \
paste <(ls $AUDIODIR/*.wav | xargs -n 1 basename | sed -e 's/\.wav$//') <(ls $AUDIODIR/*.wav | sed -e 's/.*/2/g' ) > $TEMPDIR/reco2num_spk && \
cd /opt/kaldi/egs/aspire/s5 && \
. cmd.sh && \
. path.sh && \
utils/utt2spk_to_spk2utt.pl $TEMPDIR/utt2spk > $TEMPDIR/spk2utt && \
rm -rf data/eval2000_* && \
rm -rf exp/nnet3/ivectors_eval2000 && \
rm -rf exp/chain/tdnn_7b/decode_eval2000_pp_tg && \
utils/copy_data_dir.sh $TEMPDIR data/eval2000_hires && \
cd /opt/kaldi/egs/wsj/s5 && \
steps/segmentation/detect_speech_activity.sh \
--nj 4 \
--cmd "/opt/kaldi/egs/wsj/s5/utils/run.pl" \
--extra-left-context 79 --extra-right-context 21 \
--extra-left-context-initial 0 --extra-right-context-final 0 \
--frames-per-chunk 150 --mfcc-config /opt/kaldi/egs/callhome_diarization/conf/mfcc_hires.conf \
/opt/kaldi/egs/aspire/s5/data/eval2000_hires \
/opt/kaldi/egs/callhome_diarization/exp/segmentation_1a/tdnn_stats_asr_sad_1a \
/opt/kaldi/egs/aspire/s5/data/eval2000_hires_seg \
/opt/kaldi/egs/aspire/s5/data/eval2000_hires_temp \
/opt/kaldi/egs/aspire/s5/data/eval2000_hires && \
cd /opt/kaldi/egs/aspire/s5 && \
steps/make_mfcc.sh --mfcc-config /opt/kaldi/egs/callhome_diarization/0006_callhome_diarization_v2_1a/conf/mfcc.conf \
--nj 4 --write-utt2num-frames true \
data/eval2000_hires_seg exp/make_mfcc data/eval2000_hires_seg_mfcc && \
utils/fix_data_dir.sh data/eval2000_hires_seg && \
/opt/kaldi/egs/callhome_diarization/v1/local/nnet3/xvector/prepare_feats.sh --nj 4 data/eval2000_hires_seg data/eval2000_hires_seg_cmn exp/eval2000_hires_seg && \
cp data/eval2000_hires_seg/segments data/eval2000_hires_seg_cmn/ && \
utils/fix_data_dir.sh data/eval2000_hires_seg_cmn && \
/opt/kaldi/egs/callhome_diarization/v1/diarization/nnet3/xvector/extract_xvectors.sh \
--cmd "/opt/kaldi/egs/wsj/s5/utils/run.pl" \
--nj 4 --window 1.5 --period 0.75 --apply-cmn false \
--min-segment 0.5 \
/opt/kaldi/egs/callhome_diarization/0006_callhome_diarization_v2_1a/exp/xvector_nnet_1a \
data/eval2000_hires_seg_cmn \
/opt/kaldi/egs/callhome_diarization/0006_callhome_diarization_v2_1a/exp/xvector_nnet_1a/xvectors_eval2000_hires_seg && \
/opt/kaldi/egs/callhome_diarization/v1/diarization/nnet3/xvector/score_plda.sh \
--cmd "/opt/kaldi/egs/wsj/s5/utils/run.pl" \
--target-energy 0.9 --nj 4 \
/opt/kaldi/egs/callhome_diarization/0006_callhome_diarization_v2_1a/exp/xvector_nnet_1a/xvectors_callhome2 \
/opt/kaldi/egs/callhome_diarization/0006_callhome_diarization_v2_1a/exp/xvector_nnet_1a/xvectors_eval2000_hires_seg \
/opt/kaldi/egs/callhome_diarization/0006_callhome_diarization_v2_1a/exp/xvector_nnet_1a/xvectors_eval2000_hires_seg/plda_scores && \
cd /opt/kaldi/egs/callhome_diarization/v1/ && \
/opt/kaldi/egs/callhome_diarization/v1/diarization/cluster.sh \
--cmd "/opt/kaldi/egs/wsj/s5/utils/run.pl" \
--nj 4 \
--reco2num-spk $TEMPDIR/reco2num_spk \
/opt/kaldi/egs/callhome_diarization/0006_callhome_diarization_v2_1a/exp/xvector_nnet_1a/xvectors_eval2000_hires_seg/plda_scores \
/opt/kaldi/egs/callhome_diarization/0006_callhome_diarization_v2_1a/exp/xvector_nnet_1a/xvectors_eval2000_hires_seg/plda_scores_num_speakers && \
cp /opt/kaldi/egs/callhome_diarization/0006_callhome_diarization_v2_1a/exp/xvector_nnet_1a/xvectors_eval2000_hires_seg/plda_scores_num_speakers/rttm $TRANSCRIPTDIR && \
cd /opt/kaldi/egs/aspire/s5 && \
mkdir data/eval2000_hires1 && \
cp $TEMPDIR/{wav.scp,utt2spk,spk2utt} data/eval2000_hires1 && \
utils/fix_data_dir.sh data/eval2000_hires1
mkdir data/eval2000_hires_seg1 && \
cp $TEMPDIR/wav.scp data/eval2000_hires_seg1 && \
awk '{print $2 "-" sprintf("%07i", $4*100) "-" sprintf("%07i", ($4 + $5)*100) "-" $8, $2, $4, $4+$5 }' \
/opt/kaldi/egs/callhome_diarization/0006_callhome_diarization_v2_1a/exp/xvector_nnet_1a/xvectors_eval2000_hires_seg/plda_scores_num_speakers/rttm > \
data/eval2000_hires_seg1/segments && \
awk '{print $1,$2 }' data/eval2000_hires_seg1/segments > data/eval2000_hires_seg1/utt2spk && \
utils/fix_data_dir.sh data/eval2000_hires_seg1 && \
steps/make_mfcc.sh --mfcc-config conf/mfcc_hires.conf --nj 4 data/eval2000_hires_seg1 && \
steps/compute_cmvn_stats.sh data/eval2000_hires_seg1 && \
utils/fix_data_dir.sh data/eval2000_hires_seg1 && \
steps/online/nnet2/extract_ivectors.sh --nj 4 --cmd "run.pl" \
data/eval2000_hires_seg1 \
data/lang_pp_test \
exp/nnet3/extractor \
exp/nnet3/ivectors_eval2000 && \
steps/nnet3/decode.sh --nj 4 --cmd 'run.pl' --config conf/decode.config \
--acwt 1.0 --post-decode-acwt 10.0 \
--beam 15 --lattice_beam 6 \
--frames-per-chunk 50 --skip-scoring true \
--online-ivector-dir exp/nnet3/ivectors_eval2000 \
exp/tdnn_7b_chain_online/graph_pp \
data/eval2000_hires_seg1 \
exp/chain/tdnn_7b/decode_eval2000_pp_tg && \
SRC_DT="$(date +%Y%m%d%H)" && \
SRC_TM="$(date +%M%S%6N)" && \
FILE_NM=$(echo "$TRANSCRIPTDIR"'/transcript_'"$SRC_DT"'_'"$HOSTNAME"_"$SRC_TM"'.txt') && \
for i in exp/chain/tdnn_7b/decode_eval2000_pp_tg/lat.*.gz; \
do lattice-best-path ark:"gunzip -c $(echo "$i") |" "ark,t:|int2sym.pl -f 2- exp/tdnn_7b_chain_online/graph_pp/words.txt" | sed -r 's/\s+/|/' | awk -F'|' -v OFS='|' '{x=$1;y=gsub(/-/,"|",$1);z='"$SRC_DT"';print $1,$2,z}'; \
done | \
( echo -e "sourcemediaid|startoffset|endoffset|party|phrase|src_file_date"; cat ) \
> "$FILE_NM"
| true |
2b935a29692bf24f0a4d01b5c97179721bb2d704 | Shell | RakhithJK/Octopus-1 | /Subsystem_WIN.sh | UTF-8 | 3,783 | 3.828125 | 4 | [] | no_license | #!/bin/bash
########################################################################################################################################
#
# This script is made for installing the dependencies necessary for Octopus for Windows systems which run Kali Linux from the subsystem.
#
# @Author : UnknowUser50
#
# @For : Octopus script
#
# @Release : September 2020
#
##########################################################################################################################################
# Colors declaration :
export BLUE='\033[1;94m'
export RED='\033[1;91m'
export YELLOW='\033[1;93m'
export GREEN='\033[1;92m'
export RESETCOLOR='\033[1;00m'
# Informations :
sys_name=$(uname -a | grep "Linux *" | cut -d# -f1)
hostname=$(hostname)
date=$(date +%c)
__ROOT() {
if [[ $UID != 0 ]]; then
printf "$YELLOW[$RED!$YELLOW] Please, run this script as sudo $RESETCOLOR\n"
exit 1
fi
}
bannerscreen() {
clear
printf "\n"
printf "$BLUE .=====================================================. \n"
printf "$BLUE || || \n"
printf "$BLUE ||$RED _ $RED _--'$GREEN'--_ $BLUE|| \n"
printf "$BLUE ||$RED ' $RED--'' |$GREEN | $RESETCOLOR .--. | || $BLUE|| \n"
printf "$BLUE ||$RED ' . $RED_| |$GREEN | $RESETCOLOR | | | || $BLUE|| \n"
printf "$BLUE ||$RED _ $RED| _--'$GREEN'--_| $RESETCOLOR |----| |.- .-i |.-. || $BLUE|| \n"
printf "$BLUE || ' $BLUE --'' |$YELLOW | |$RESETCOLOR | | | | | | $BLUE || \n"
printf "$BLUE || ' $BLUE. _| |$YELLOW | |$RESETCOLOR | | '-( | | () $BLUE || \n"
printf "$BLUE || $BLUE_ | _--'$YELLOW'--_| $RESETCOLOR | | $BLUE || \n"
printf "$BLUE || $BLUE ' --'' $RESETCOLOR '--' $BLUE|| \n"
printf "$BLUE || || \n"
printf "$BLUE .=====================================================. \n"
printf "\n"
echo -e -n " $BLUE[$GREEN!$BLUE] Enter current user : $RESETCOLOR"
read current_user
sleep 10
}
global_conf() {
printf "$BLUE [$GREEN*$BLUE] Starting SSH on subsystem ... $RESETCOLOR \n"
sudo /etc/init.d/ssh start &>/dev/null
printf "$BLUE [$GREEN*$BLUE] SSH service started at : $GREEN$date $RESETCOLOR \n"
sleep 1
# Nmap installation, checking in /bin
printf "$BLUE [$GREEN*$BLUE] Installing NMAP for you $RESETCOLOR \n"
if [[ -e /bin/nmap ]]; then
printf "$BLUE [$GREEN!$BLUE] Nmap was already installed $RESETCOLOR \n"
sleep 1
else
sudo apt install -y nmap &>/dev/null
printf "$BLUE [$GREEN!$BLUE] Nmap is now installed $RESETCOLOR \n"
sleep 1
fi
# Wireshark installation, checking in /bin
printf "$BLUE [$GREEN*$BLUE] Installing Wireshark for you $RESETCOLOR \n"
if [[ -e /bin/wireshark ]]; then
printf "$BLUE [$GREEN*$BLUE] WireShark was already installed $RESETCOLOR \n"
sleep 1
else
sudo apt install -y wireshark &>/dev/null
printf "$BLUE [$GREEN*$BLUE] Wireshark is now installed $RESETCOLOR \n"
sleep 1
fi
# SQLMAP installation, checking in /bin
printf "$BLUE [$GREEN*$BLUE] Installing SqlMap for you $RESETCOLOR \n"
if [[ -e /bin/sqlmap && -e /bin/sqlmapapi ]]; then
printf "$BLUE [$GREEN*$BLUE] SqlMap was already installed $RESETCOLOR \n"
sleep 1
else
sudo apt install -y sqlmap &>/dev/null
printf "$BLUE [$GREEN*$BLUE] SqlMap is now installed $RESETCOLOR \n"
sleep 1
fi
}
__ROOT
bannerscreen
global_conf
| true |
0b96ef1bf706b2f887e504ba7dbbd92b0e4bca2f | Shell | Stale-1/V2ray | /domain.sh | UTF-8 | 291 | 2.765625 | 3 | [] | no_license | #!/bin/bash
clear
echo "Enter your server hostname or domain"
echo ""
echo -e "Enter your response \c"
read -r n
case $n in
$n) sed -i "s/stale_domain/$n/g" /root/v2ray.sh && clear
sed -i "s/hostname/$n/g" /root/v2rayserver && cd /root && ./uuid.sh;;
esac
| true |
8f7e936cfe9917050c02137b60213ec9626dacf9 | Shell | nikotrone/Linux-Scripts | /newFedora_installation.sh | UTF-8 | 1,170 | 3.828125 | 4 | [] | no_license | #!/bin/bash
# In order to run the script needs the sudo password as the first argument in the command line
# TODO: check that the password is provided
#if[ $? ]
echo -e "Welcome to your new Fedora environment!\\n"
# Upgrading system
echo -e "Upgrading the system\\t\\t"
if [[ $("echo -e $0 | sudo -S dnf -y update | cat > dnf_update_log") ]]; then
echo -e "[ OK ]\\n"
else
echo -e "[ FAIL ]\\n"
fi
# Installing basic tools
echo -e "Installing git\\t\\t"
if [[ $("echo $0 | sudo -S dnf install git nano | cat >> dnf_install_basic_log") ]]; then
echo -e "[ OK ]\\n"
else
echo -e "[ FAIL ]\\n"
fi
echo -e "Checking if ll shortcut is in place\\t\\t"
if [[ $("ll > temp.log") ]]; then
echo -e "[ OK ]\\n"
else
echo -e "\\n ll shortcut does not exist, creating shortcut now\\n" >> dnf_install_basic_log
if [ -e ~/.bash_profile ]
then
echo "alias ll='ls -lGh \$@'" >> ~/.bash_profile
else
echo "alias ll='ls -lGh \$@'" > ~/.bash_profile
fi
echo -e "[ DONE ]\\n"
fi
mkdir ~/installation_tmp
cd ~/installation_tmp || exit
git clone https://github.com/nikotrone/Linux-Scripts.git
cd Linux-Scripts || exit | true |
ecdd6fb42684b4c5d2fcd795bf9b727c9b4c4a63 | Shell | hezhaoqing/Shell | /YJ/history.sh | UTF-8 | 628 | 3.078125 | 3 | [] | no_license | 编辑/etc/profile文件,在文件末尾加入下面代码:
[root@~ /]# vim /etc/profile
history
USER=`whoami`
USER_IP=`who -u am i 2>/dev/null| awk '{print $NF}'|sed -e 's/[()]//g'`
if [ "$USER_IP" = "" ]; then
USER_IP=`hostname`
fi
if [ ! -d /var/log/history ]; then
mkdir /var/log/history
chmod 777 /var/log/history
fi
if [ ! -d /var/log/history/${LOGNAME} ]; then
mkdir /var/log/history/${LOGNAME}
chmod 300 /var/log/history/${LOGNAME}
fi
export HISTSIZE=4096
DT=`date +"%Y%m%d_%H:%M:%S"`
export HISTFILE="/var/log/history/${LOGNAME}/${USER}@${USER_IP}_$DT"
chmod 600 /var/log/history/${LOGNAME}/*history* 2>/dev/null
| true |
d677cb31c30ecde8f97c9aca9a01fcaa68ca12bf | Shell | delight09/gadgets | /filter/bilivideo_resource_filter.sh | UTF-8 | 1,899 | 3.734375 | 4 | [
"MIT"
] | permissive | #!/bin/bash --
# Filter cURL commands of bilibili video webpage
# return cURL commands with all slices from video resource
# Filter keywords
FILTER_SITE_METADATA="api.bilibili.com|data.bilibili.com|live.bilibili.com|cnzz.com|qq.com"
FILTER_TYPE_METADATA=".css|.js|.png|.gif|.webp|.jpg"
FILTER_CONTENT_METADATA=";base64|comment"
FILTER_TYPE_VIDEO=".mp4|.flv"
FILTER_KEYWORD_VIDEO="Range: bytes"
# Global variables
FD_TEMPFILE=/tmp/filtered.raw
FD_TEMPFILE_FIN=/tmp/filtered.fin.raw
FD_ALL_CURL=$1
patch_unify_content() {
sed -i -r 's>Range: bytes=[[:digit:]]+->Range: bytes=0->g' $1
}
escape_string() {
echo "$1" | sed 's=\.=\\.=g'
}
is_not_video_resource() {
local _url="$1"
echo $_url |grep -qiE "${FILTER_TYPE_METADATA}"'|'"${FILTER_SITE_METADATA}"'|'"${FILTER_CONTENT_METADATA}"
}
# Init
if [[ -z $1 ]];then
echo "USAGE: $0 <cURL_commands file>"
exit 1
fi
rm -f $FD_TEMPFILE
rm -f $FD_TEMPFILE_FIN
# escape filter keywords for regexp
FILTER_SITE_METADATA=$(escape_string "$FILTER_SITE_METADATA")
FILTER_TYPE_METADATA=$(escape_string "$FILTER_TYPE_METADATA")
FILTER_CONTENT_METADATA=$(escape_string "$FILTER_CONTENT_METADATA")
FILTER_TYPE_VIDEO=$(escape_string "$FILTER_TYPE_VIDEO")
FILTER_KEYWORD_VIDEO=$(escape_string "$FILTER_KEYWORD_VIDEO")
# Main
while read i
do
if is_not_video_resource "$i";then
sleep 0
else
echo $i >> $FD_TEMPFILE
fi
done < $FD_ALL_CURL
# Unify cURL contents then sort, uniq the result
patch_unify_content $FD_TEMPFILE
grep -iE "${FILTER_TYPE_VIDEO}" $FD_TEMPFILE | grep -iE "${FILTER_KEYWORD_VIDEO}" | sort | uniq > $FD_TEMPFILE_FIN
patch_filename() {
local _line="$1"
local _fn=$(echo $_line | grep -oE "[^/]*(${FILTER_TYPE_VIDEO})\?" | tr -d '?')
echo $_line | sed "s>;$>-o $_fn ;>"
}
# Add filename and output commands
while read i
do
patch_filename "$i"
done < $FD_TEMPFILE_FIN
| true |
6df03bd259fd1220790ab9679f99e489957d8987 | Shell | dmbursty/school | /3B/cs343/A3/test.sh | UTF-8 | 1,272 | 2.90625 | 3 | [] | no_license | echo "Testing no arguments"
./tokenring 2> myout
./q1 2> out
diff myout out
echo "Testing too many arguments"
./tokenring 1 2 3 4 5 6 7 2> myout
./q1 1 2 3 4 5 6 7 2> out
diff myout out
echo "Testing bad input file"
./tokenring 1 xxxxxxx 2> myout
./q1 1 xxxxxxx 2> out
diff myout out
echo "Testing bad number of stations"
echo "Please specify 1-100 stations" > out
./tokenring 0 t_empty 2> myout
diff myout out
./tokenring -1 t_empty 2> myout
diff myout out
./tokenring 1000 t_empty 2> myout
diff myout out
./tokenring abcd t_empty 2> myout
diff myout out
echo "Testing invalid requests"
./tokenring 5 t_invalid > myout 2> /dev/null
./q1 5 t_invalid > out 2> /dev/null
diff myout out
echo "Testing no requests"
./tokenring 5 t_empty > myout 2> /dev/null
./q1 5 t_empty > out 2> /dev/null
diff myout out
echo "Testing standard funtionality"
./tokenring 8 t_test > myout 2> /dev/null
./q1 8 t_test > out 2> /dev/null
diff myout out
echo "Testing random request order in input"
./tokenring 8 t_bad_order > myout 2> /dev/null
./q1 8 t_bad_order > out 2> /dev/null
diff myout out
echo "Testing huge specification"
python tester.py 200 100 > t_huge
./tokenring 100 t_huge > myout 2> /dev/null
./q1 100 t_huge > out 2> /dev/null
diff myout out
echo "Finished all tests"
| true |
84d93f81a4413b132963d9abd33929334cf5affb | Shell | aditya21891/LearnBash | /shellscript/loop.sh | UTF-8 | 171 | 2.75 | 3 | [] | no_license | # this shell script is top practise linux performance tuning
#! /bin/bash
while true
do
x=$((x+1))
/bin/pwd > /dev/null
if [ $x -gt 50000 ]
then
exit
fi
done
| true |
5f24a7a0e62fbb64932e32d49b3ea4867cbade3a | Shell | sguermond/offerup | /django-python3-vagrant/vagrant_bootstrap.sh | UTF-8 | 1,098 | 3.40625 | 3 | [] | no_license | #!/bin/bash
# Install git for version control, pip for install python packages
echo 'Installing git and python3-pip...'
sudo apt-get -qq install git python3-pip > /dev/null 2>&1
# Install virtualenv / virtualenvwrapper
echo 'Installing virtualenv and virtualenvwrapper...'
pip3 install --quiet virtualenv
pip3 install --quiet virtualenvwrapper
mkdir ~vagrant/.virtualenvs
chown vagrant:vagrant ~vagrant/.virtualenvs
printf "\n\n# Virtualenv settings\n" >> ~vagrant/.bashrc
printf "export PYTHONPATH=/usr/lib/python3.4" >> ~vagrant/.bashrc
printf "export WORKON_HOME=~vagrant/.virtualenvs\n" >> ~vagrant/.bashrc
printf "export PROJECT_HOME=/vagrant\n" >> ~vagrant/.bashrc
printf "export VIRTUALENVWRAPPER_PYTHON=/usr/bin/python3.4\n" >> ~vagrant/.bashrc
printf "source /usr/local/bin/virtualenvwrapper.sh\n" >> ~vagrant/.bashrc
printf "alias python='/usr/bin/python3.4'\n" >> ~vagrant/.bashrc
printf "alias menu='cat /etc/motd'\n" >> ~vagrant/.bashrc
# Complete
cp /vagrant/examples/motd.txt /etc/motd
echo ""
echo "Vagrant install complete."
echo "Now try logging in:"
echo " $ vagrant ssh"
| true |
8607ef011694df7754ce787982e5b58f1ce3b5dc | Shell | agladstein/AJ_ABC | /run_post_process.sh | UTF-8 | 1,069 | 3.359375 | 3 | [] | no_license | #!/usr/bin/env bash
IN_PATH=$1
OUT_PATH=$2
for MODEL in {1..3}
do
if (( ${MODEL}==1 )); then
HEADER=header_M${MODEL}_222.txt
else
HEADER=header_M${MODEL}.txt
fi
for BUCKET in $(ls ${IN_PATH}/sim_values_AJ_M${MODEL})
do
echo "python /vol_c/src/macsswig_simsaj/post_process.py ${IN_PATH} ${OUT_PATH} ${MODEL} ${BUCKET} ${HEADER} same &"
python /vol_c/src/macsswig_simsaj/post_process.py ${IN_PATH} ${OUT_PATH} ${MODEL} ${BUCKET} ${HEADER} same &
# if it starts to be too slow. do something else fancy to not make files that have already been made.
# if [ ! -f ${OUT_PATH}/input_ABCtoolbox_M${MODEL}_${BUCKET}.txt ]; then
# echo "python /vol_c/src/macsswig_simsaj/post_process.py ${IN_PATH} ${OUT_PATH} ${MODEL} ${BUCKET} ${HEADER} same &"
# python /vol_c/src/macsswig_simsaj/post_process.py ${IN_PATH} ${OUT_PATH} ${MODEL} ${BUCKET} ${HEADER} same &
# else
# echo "${OUT_PATH}/input_ABCtoolbox_M${MODEL}_${BUCKET}.txt is already created"
# fi
done
done
| true |
e4935baf4fdc8bea7ccd5605b51dc2c596ef25f8 | Shell | radanalyticsio/oshinko-s2i | /test/travis-help/travis-check-pods.sh | UTF-8 | 2,104 | 2.90625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
oc login -u system:admin
oc project default
while true; do
V=$(oc get dc docker-registry --template='{{index .status "latestVersion"}}')
P=$(oc get pod docker-registry-$V-deploy --template='{{index .status "phase"}}')
if [ "$?" -eq 0 ]; then
echo phase is $P for docker-registry deploy $V
if [ "$P" == "Failed" ]; then
echo "registry deploy failed, try again"
oc get pods
oc rollout retry dc/docker-registry
sleep 10
continue
fi
fi
REG=$(oc get pod -l deploymentconfig=docker-registry --template='{{index .items 0 "status" "phase"}}')
if [ "$?" -eq 0 ]; then
break
fi
oc get pods
echo "Waiting for registry pod"
sleep 10
done
while true; do
REG=$(oc get pod -l deploymentconfig=docker-registry --template='{{index .items 0 "status" "phase"}}')
if [ "$?" -ne 0 -o "$REG" == "Error" ]; then
echo "Registy pod is in error state..."
exit 1
fi
if [ "$REG" == "Running" ]; then
break
fi
sleep 5
done
while true; do
V=$(oc get dc router --template='{{index .status "latestVersion"}}')
P=$(oc get pod router-$V-deploy --template='{{index .status "phase"}}')
if [ "$?" -eq 0 ]; then
echo phase is $P for router deploy $V
if [ "$P" == "Failed" ]; then
echo "router deploy failed, try again"
oc get pods
oc rollout retry dc/router
sleep 10
continue
fi
fi
REG=$(oc get pod -l deploymentconfig=router --template='{{index .items 0 "status" "phase"}}')
if [ "$?" -eq 0 ]; then
break
fi
oc get pods
echo "Waiting for router pod"
sleep 10
done
while true; do
REG=$(oc get pod -l deploymentconfig=router --template='{{index .items 0 "status" "phase"}}')
if [ "$?" -ne 0 -o "$REG" == "Error" ]; then
echo "Router pod is in error state..."
exit 1
fi
if [ "$REG" == "Running" ]; then
break
fi
sleep 5
done
echo "Registry and router pods are okay"
| true |
dd4781248329bb31c2edcad98822fc08d9e7658c | Shell | ekumenlabs/roscpp_android | /get_ros_stuff.sh | UTF-8 | 749 | 3.640625 | 4 | [] | no_license | #!/bin/bash
# Abort script on any failures
set -e
my_loc="$(cd "$(dirname $0)" && pwd)"
source $my_loc/config.sh
source $my_loc/utils.sh
if [ $# != 1 ] || [ $1 == '-h' ] || [ $1 == '--help' ]; then
echo "Usage: $0 prefix_path"
echo " example: $0 /home/user/my_workspace"
exit 1
fi
cmd_exists git || die 'git was not found'
prefix=$(cd $1 && pwd)
[ "$CMAKE_PREFIX_PATH" = "" ] && die 'could not find target basedir. Have you run build_catkin.sh and sourced setup.bash?'
#cd $CMAKE_PREFIX_PATH
cd $prefix
mkdir -p catkin_ws/src && cd catkin_ws
if [ -f src/.rosinstall ]; then
cd src/
wstool merge $my_loc/ndk.rosinstall --merge-replace
wstool update
cd ..
else
wstool init -j$PARALLEL_JOBS src $my_loc/ndk.rosinstall
fi
| true |
c6b3b4af6c77ecd57ab8663be0d4938564698cf4 | Shell | googledatalab/datalab | /containers/gateway/prepare.sh | UTF-8 | 1,586 | 3.53125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash -e
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Prepares the local filesystem to build the Google Cloud DataLab
# gateway docker image. Note that invocations of this should generally
# be followed by a `docker build` and then `cleanup.sh`.
#
# Usage:
# prepare.sh
# docker build -t datalab-gateway ./
# cleanup.sh
#
# If [path_of_pydatalab_dir] is provided, it will copy the content of that dir into image.
# Otherwise, it will get the pydatalab by "git clone" from pydatalab repo.
pushd $(pwd) >> /dev/null
HERE=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd)
cd ${HERE}
# Create a versioned Dockerfile based on current date and git commit hash
export REVISION_ID="${1:-}"
source ../../tools/release/version.sh
VERSION_SUBSTITUTION="s/_version_/$DATALAB_VERSION/"
COMMIT_SUBSTITUTION="s/_commit_/$DATALAB_COMMIT/"
cat Dockerfile.in | sed $VERSION_SUBSTITUTION | sed $COMMIT_SUBSTITUTION > Dockerfile
# Copy the license file into the container
cp ../../third_party/license.txt content/license.txt
popd >> /dev/null
| true |
50072ed14e3adb608c1e2af142df6b4c982be85b | Shell | GauravWalia19/TETRIS | /conf/release.bash | UTF-8 | 693 | 2.734375 | 3 | [
"MIT"
] | permissive | #!bin/bash
# this script is used for releasing new tetris jar file for use
cd ../build # move to build directory for application build
bash build.bash # build the whole application
cd ../bin # move to bin directory
echo "Main-Class: Main" > Manifest.txt # write manifest file
jar cfm Tetris.jar Manifest.txt *.class RAINBOW/*.class # make jar file
echo "jar file created ..."
rm Manifest.txt # remove manifest file
mv Tetris.jar ../release # move jar file to release | true |
9728964887a6c7b3c717d88d00a03e50c0d91e96 | Shell | waldirio/errata_report | /errata_report.sh | UTF-8 | 1,169 | 3.671875 | 4 | [] | no_license | #!/bin/bash
# Waldirio M Pinheiro <waldirio@gmail.com> / <waldirio@redhat.com>
# Some variables
FULL_LIST="/tmp/full_list.log"
FULL_PARSED_LIST="/tmp/full_list_parsed.log"
ERRATA_CH="/tmp/errata_ch.log"
# Zeroing all the files
>$FULL_LIST
>$FULL_PARSED_LIST
>$ERRATA_CH
# Script to generate a complete Errata Report
for b in $(hammer --csv host list --search 'name !~ virt-who*' --thin 1 | grep -v ^Id | cut -d, -f2); do err_list=$(hammer --csv erratum list --host $b | grep -v ^ID); echo "$err_list" | sed -e "s/^/$b,/g"; done >$FULL_LIST
cat $FULL_LIST | awk 'FS="," {print $4","$3","$2","$5","$1}' >$FULL_PARSED_LIST
cat $FULL_PARSED_LIST | sort -k 2 -t , | cut -d, -f2,3,5 | grep ^R >$ERRATA_CH
echo "Type,Id,Title,Issued,Affected Hosts"
while read line
do
errata_id=$(echo $line | cut -d, -f1)
internal_id=$(echo $line | cut -d, -f2)
fqdn=$(echo $line | cut -d, -f3)
issue_date=$(hammer erratum info --id $internal_id | grep ^Issued | awk '{print $2}')
type=$(grep -E -o "$errata_id.*" /tmp/full_list.log | cut -d, -f2 | sort -u)
title=$(grep -E -o "$errata_id.*" /tmp/full_list.log | cut -d, -f3- | sort -u)
echo "$type,$errata_id,$title,$issue_date,$fqdn"
done < $ERRATA_CH
| true |
b9c62c2692d1f4e20ea5d1ae23353c7bfa54680b | Shell | ComplianceAsCode/content | /linux_os/guide/system/network/network-ipv6/disabling_ipv6/grub2_ipv6_disable_argument/tests/wrong_value_etcdefaultgrub_recovery_disabled.fail.sh | UTF-8 | 1,022 | 3.296875 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
# platform = Red Hat Enterprise Linux 7
# Break the ipv6.disable argument in kernel command line in /etc/default/grub
if grep -q '^GRUB_CMDLINE_LINUX_DEFAULT=.*ipv6\.disable=.*"' '/etc/default/grub' ; then
# modify the GRUB command-line if an ipv6.disable= arg already exists
sed -i 's/\(^GRUB_CMDLINE_LINUX_DEFAULT=".*\)ipv6\.disable=[^[:space:]]*\(.*"\)/\1 ipv6\.disable=0 \2/' '/etc/default/grub'
else
# no ipv6\.disable=arg is present, append it
sed -i 's/\(^GRUB_CMDLINE_LINUX_DEFAULT=".*\)"/\1 ipv6\.disable=0"/' '/etc/default/grub'
fi
# removing the parameter from the no recovery kernel parameters as well
sed -i 's/\(^GRUB_CMDLINE_LINUX=".*\)ipv6\.disable=[^[:space:]]*\(.*"\)/\1 \2/' '/etc/default/grub'
# disabling recovery
sed -i 's/\(^.*GRUB_DISABLE_RECOVERY=\).*/\1true/' '/etc/default/grub'
#if the line is not present at all, add it
if ! grep -q '^GRUB_CMDLINE_LINUX_DEFAULT=.*$' '/etc/default/grub'; then
echo 'GRUB_CMDLINE_LINUX_DEFAULT="ipv6.disable=0"' >> /etc/default/grub
fi
| true |
e4b3b53a3d75a26e8bc0b26c6bf7d431d3189b4e | Shell | pengdan01/spider | /crawler/crawler2/general_crawler/tools/deploy_crawler/deploy_scheduler.sh | UTF-8 | 784 | 2.875 | 3 | [] | no_license | #!/bin/bash
# 本脚本生成 scheduler 运行时目录.
# 注意: 有两个符号链接
# 1. data/bin_index_model: 指向 ../../../dict/bin_index_model
# 2. web: 指向 ../../../dict/web
rm -rf scheduler
# bin
mkdir -p scheduler/bin && cp /home/pengdan/workplace/wly/.build/opt/targets/crawler2/general_crawler/scheduler scheduler/bin/
# shell
mkdir -p scheduler/shell && cp /home/pengdan/workplace/wly/crawler2/general_crawler/script/scheduler/* scheduler/shell/
# data
mkdir -p scheduler/data && \
cp /home/pengdan/workplace/wly/crawler2/general_crawler/data/scheduler/* scheduler/data/ && \
cd scheduler/data && ln -s ../../../dict/bin_index_model bin_index_model && cd -
mkdir -p scheduler/log && mkdir -p scheduler/run
cd scheduler && ln -s ../../dict/web web
exit 0
| true |
a12a55f2c846ac9f16e7e8855aff71bc24a994b4 | Shell | ppassion/myShell | /bin/all-check.sh | UTF-8 | 904 | 3.578125 | 4 | [] | no_license | #!/bin/bash
###
# @Author : chyh
# @Date : 2021-04-07 22:03:20
# @LastEditTime : 2021-04-23 22:27:36
# @Description : 调用其他脚本,检查所有组件的启动情况
###
# hadoop
./hadoop/hadoop-check.sh >/dev/null
hadoop_result=$?
#zookeeper
./zookeeper/zookeeper-check.sh > /dev/null
zookeeper_result=$?
#mysql
./mysql/mysql-check.sh > /dev/null
mysql_result=$?
#hive
./hive/hive-check.sh > /dev/null
hive_result=$?
#hbase
./hbase/hbase-check.sh > /dev/null
hbase_result=$?
function printStatus() {
component_name=$1
component_result=$(eval echo '$'${component_name}_result)
case "${component_result}" in
0)
echo "$component_name success"
;;
1)
echo "$component_name failed"
;;
esac
}
echo '====================================='
printStatus hadoop
printStatus zookeeper
printStatus mysql
printStatus hive
printStatus hbase
| true |
52a1583184be24c85d0ad553b31f52ecb40619ff | Shell | STAMP-project/evosuite-runner | /pitest/scripts/bash/parsing.sh | UTF-8 | 1,476 | 3.515625 | 4 | [] | no_license | ## inputs
pid=$1
execution_id=$2
project=$3
classPaths=$4
outDir=$5
target_class=$6
sourceDirs=$7
mutableCPs=$8
logFile="$9"
resultDir="${10}"
outDir="${11}"
##
## wait for the process to finish
while kill -0 "$pid"; do
sleep 1
done
exists=$(python pitest/scripts/python/exists_in_file.py "$logFile" "did not pass without mutation")
if [[ "$exists" == "TRUE" ]]
then
echo "A problem in the PIT execution has been found."
echo "Fixing the problem ..."
failedClass=$(python pitest/scripts/python/detect_failing_class.py "$logFile" "did not pass without mutation")
failedTest=$(python pitest/scripts/python/detect_failing_test.py "$logFile" "did not pass without mutation")
# Add @Ignore to failing tests
for mainTest in `find $resultDir -name "*_ESTest.java" -type f`; do
java -jar pitest/libs/flaky_related/IgnoreAdder.jar $mainTest "$failedTest"
projectCP=$(ls -d -1 "$PWD/bins/$project/"* | tr '\n' ':')
test_execution_libs=$(ls -p "$PWD/pitest/libs/test_execution/"* | tr '\n' ':')
preparedCPs="$projectCP:$test_execution_libs"
javac -cp "$preparedCPs:$resultDir" $mainTest
done
echo "Problem has been fixed."
echo "Rerunning the PIT execution ..."
. pitest/scripts/bash/execution.sh $execution_id $project $classPaths $outDir $target_class $sourceDirs "$mutableCPs" "$logFile" "$resultDir" "$outDir" &
else
echo "PIT execution is finished. Execution log: $logFile"
fi | true |
9926635748ae78573127d4e187a07eef603297d4 | Shell | amezin/zdotdir | /zprofile.d/50prezto.zsh | UTF-8 | 253 | 2.734375 | 3 | [] | no_license | #
# Executes commands at login pre-zshrc.
#
# Authors:
# Sorin Ionescu <sorin.ionescu@gmail.com>
#
#
# Language
#
if [[ -z "$LANG" ]]; then
export LANG='en_US.UTF-8'
fi
#
# Paths
#
# Ensure path arrays do not contain duplicates.
typeset -gU cdpath fpath mailpath path
| true |
3d644125cc6f7f6930b689bb71753ce44114c886 | Shell | NaokiMizutani/FirewallSetting | /dns_a.sh | UTF-8 | 565 | 3.671875 | 4 | [] | no_license | #!/bin/sh
HOST=$1
NEST=$2
# DNSレコードを取得します
DNS=$(dig a $HOST | grep "^$1")
if [ "$DNS" = "" ] ; then
[ $NEST -eq 1 ] && echo "[$HOST] DNS does not exist!"
exit
fi
# DNSレコードからipsetのTRUST-LISTにエントリを追加します
IFS_BACKUP=$IFS
IFS=$'\n'
for LINE in $(echo "$DNS" | sed "s/[[:blank:]]\+/ /g"); do
TYPE=$(echo $LINE | cut -d " " -f 4)
ADDR=$(echo $LINE | cut -d " " -f 5)
case $TYPE in
A)
ipset -q add TRUST-LIST $ADDR
;;
CNAME)
./dns_a.sh $ADDR $((NEST + 1))
;;
esac
done
IFS=$IFS_BACKUP
| true |
61c639ff6a848c689ab0785df6dfd713e19da5cf | Shell | sagravat/gistic-x | /average.sh | UTF-8 | 144 | 2.8125 | 3 | [] | no_license | #!/bin/sh
total=18743
for i in {1..22}
do
count=`cut -d"," -f2 same_corr.csv | sort | grep "^${i}$" | wc -l`
echo "$count/$total"
done
| true |
cc7550675e70eedec786e33b47a4242b208fc3ad | Shell | ballestr/play_vagrant | /netbox_demo/bootstrap_common.sh | UTF-8 | 5,736 | 3.796875 | 4 | [] | no_license | #!/usr/bin/env bash
## original from https://github.com/ryanmerolle/netbox-vagrant
## Port to Centos7, sergio.ballestrero@protonmail.com, January 2018
PATH=$PATH:/usr/local/bin/
NETBOX_BRANCH=master
if [ -e /etc/redhat-release ] ; then
## Assume Centos7
OS=Centos7
CFG="/tmp/netbox-vagrant/config_files_$OS"
## for Centos7, the package install part is done in a separate script
## run from Vagrant
#bash /vagrant/bootstrap_Centos7.sh
else
OS=Ubuntu
CFG="/tmp/netbox-vagrant/config_files"
# Prevent
export DEBIAN_FRONTEND=noninteractive
fi
# Install Git
printf "Step 1 of 20: Installing git & cloning netbox-vagrant...\n"
#apt-get install git -y -qq > /dev/null
cd /tmp/ || exit
[ -d netbox-vagrant/.git ] || git clone -b master https://github.com/ryanmerolle/netbox-vagrant.git
## use /vagrant on top of checking out from git, faster dev cycle
rsync -av /vagrant/ netbox-vagrant/
# Update Ubuntu
#printf "Step 2 of 20: Updating Ubuntu...\n"
#apt-get update -y -qq > /dev/null
# Install Postgres & start service
printf "Step 3 of 20: Installing & starting Postgres...\n"
#apt-get install postgresql libpq-dev -y -qq > /dev/null
#sudo service postgresql start
# Setup Postgres with netbox user, database, and permissions
printf "Step 4 of 20: Setup Postgres with netbox user, database, & permissions."
sudo -u postgres psql -c "CREATE DATABASE netbox"
sudo -u postgres psql -c "CREATE USER netbox WITH PASSWORD 'J5brHrAXFLQSif0K'"
sudo -u postgres psql -c "GRANT ALL PRIVILEGES ON DATABASE netbox TO netbox"
# Install nginx
printf "Step 5 of 20: Installing nginx...\n"
#apt-get install nginx -y -qqm> /dev/null
# Install Python 2
printf "Step 6 of 20: Installing Python 3 dependencies...\n"
#apt-get install python3 python3-dev python3-pip libxml2-dev libxslt1-dev libffi-dev graphviz libpq-dev libssl-dev -y -qq > /dev/null
# Upgrade pip
printf "Step 7 of 20: Upgrade pip\n"
pip3 install --upgrade pip
# Install gunicorn & supervisor
printf "Step 8 of 20: Installing gunicorn & supervisor...\n"
pip3 install gunicorn
#apt-get install supervisor -y -qq > /dev/null
printf "Step 9 of 20: Cloning NetBox repo...\n"
# Create netbox base directory & navigate to it
mkdir -p /opt/netbox/ && cd /opt/netbox/
# git clone netbox master branch
[ -d .git ] || git clone -b master https://github.com/digitalocean/netbox.git .
## exit on error
#set -e
# Install NetBox requirements
printf "Step 10 of 20: Installing NetBox requirements...\n"
# pip install -r requirements.txt
pip3 install -r requirements.txt
# Use configuration.example.py to create configuration.py
printf "Step 11 of 20: Configuring Netbox...\n"
cp /opt/netbox/netbox/netbox/configuration.example.py /opt/netbox/netbox/netbox/configuration.py
# Update configuration.py with database user, database password, netbox generated SECRET_KEY, & Allowed Hosts
sed -i "s/'USER': '', /'USER': 'netbox', /g" /opt/netbox/netbox/netbox/configuration.py
sed -i "s/'PASSWORD': '', /'PASSWORD': 'J5brHrAXFLQSif0K', /g" /opt/netbox/netbox/netbox/configuration.py
sed -i "s/ALLOWED_HOSTS \= \[\]/ALLOWED_HOSTS \= \['netbox.internal.local', 'netbox.localhost', 'localhost', '127.0.0.1'\]/g" /opt/netbox/netbox/netbox/configuration.py
SECRET_KEY=$( python3 /opt/netbox/netbox/generate_secret_key.py )
sed -i "s~SECRET_KEY = ''~SECRET_KEY = '$SECRET_KEY'~g" /opt/netbox/netbox/netbox/configuration.py
# Clear SECRET_KEY variable
unset SECRET_KEY
# Setup apache, gunicorn, & supervisord config using premade examples (need to change netbox-setup)
SRCDIR="/tmp/netbox-vagrant/config_files_$OS"
printf "Step 12 of 20: Configuring nginx... \n"
#cp $SRCDIR/nginx-netbox.example /etc/nginx/sites-available/netbox
cp $SRCDIR/nginx-netbox.example /etc/nginx/sites-available/netbox.conf ## Centos7
printf "Step 13 of 20: Configuring gunicorn...\n"
cp $SRCDIR/gunicorn_config.example.py /opt/netbox/gunicorn_config.py
printf "Step 14 of 20: Configuring supervisor...\n"
#cp $SRCDIR/supervisord-netbox.example.conf /etc/supervisor/conf.d/netbox.conf
cp $SRCDIR/supervisord-netbox.example.conf /etc/supervisord.d/netbox.ini ## Centos7
# Apache Setup (enable the proxy and proxy_http modules, and reload Apache)
printf "Step 15 of 20: Completing web service setup...\n"
cd /etc/nginx/sites-enabled/
[ -e default ] && rm -f default
[ -e netbox ] || ln -s /etc/nginx/sites-available/netbox.conf ## Centos7
service nginx restart
#service supervisor restart
service supervisord restart ## Centos7
# Install the database schema
printf "Step 16 of 20: Install the database schema...\n"
python3 /opt/netbox/netbox/manage.py migrate
# Create admin / admin superuser
printf "Step 17 of 20: Create NetBox superuser...\n"
echo "from django.contrib.auth.models import User; User.objects.create_superuser('admin', 'admin@example.com', 'admin')" | python3 /opt/netbox/netbox/manage.py shell --plain
# Collect Static Files
printf "Step 18 of 20: collectstatic\n"
python3 /opt/netbox/netbox/manage.py collectstatic --no-input <<<yes
#
printf "Step 18B of 20: Permissions on MEDIA_DIR...\n"
chgrp -R nginx /opt/netbox/netbox/media/ ## Centos7
chmod -R g+rwx /opt/netbox/netbox/media/
# Load Initial Data (Optional) Comment out if you like
printf "Step 19 of 20: Load intial data.\n"
python3 /opt/netbox/netbox/manage.py loaddata initial_data
# Cleanup netbox-vagrant setup
printf "Step 20 of 20: Cleaning up netbox-vagrant setup files...\n"
rm -rf /tmp/netbox-vagrant/
printf "netbox-vagrant setup files deleted...\n"
# Status Complete
printf "%s\nCOMPLETE: NetBox-Demo Provisioning COMPLETE!!\n"
printf "%s\nTo login to the Vagrant VM use vagrant ssh in the current directory\n"
printf "%s\nTo login to the Netbox-Demo web portal go to http://netbox.localhost:8080\n"
printf "%s\nWeb portal superuser credentials are admin / admin\n" | true |
69be6cb0ddd2dcd6f3da98a27c45ccc71fae5e20 | Shell | levasc/docker-icinga2 | /install.sh | UTF-8 | 5,526 | 2.65625 | 3 | [
"MIT"
] | permissive | #!/bin/sh
export DEBIAN_FRONTEND=noninteractive
#Initial install
apt update
apt upgrade -y
apt-get install -y --no-install-recommends apache2 ca-certificates curl dnsutils gnupg locales lsb-release mailutils mariadb-client mariadb-server php-curl php-ldap php-mysql procps pwgen supervisor unzip wget libdbd-mysql-perl
#start mysql
service mysql start
#Add icinga2 key
curl -s https://packages.icinga.com/icinga.key | apt-key add -
echo "deb http://packages.icinga.org/ubuntu icinga-$(lsb_release -cs) main" > /etc/apt/sources.list.d/icinga2.list
apt update
apt-get install -y --no-install-recommends icinga2 icinga2-ido-mysql icingacli icingaweb2 icingaweb2-module-doc icingaweb2-module-monitoring monitoring-plugins nagios-nrpe-plugin nagios-plugins-contrib nagios-snmp-plugins nsca
cat > /etc/icinga2/features-available/ido-mysql.conf << EOF
library "db_ido_mysql"
object IdoMysqlConnection "ido-mysql" {
user = "root"
password = "root"
host = "localhost"
database = "icinga2idomysql"
}
EOF
#Configure icinga2 ido-mysql
mysqladmin -u root password root
#enable ido
ln -s /etc/icinga2/features-available/ido-mysql.conf /etc/icinga2/features-available/ido-myql.conf
icinga2 feature enable ido-mysql
#enable apache mod
a2enmod rewrite
usermod -a -G icingaweb2 www-data;
icingacli setup config directory --group icingaweb2;
icinga2 api setup
#create db for ido
mysql -uroot -proot -e "CREATE DATABASE icinga2idomysql CHARACTER SET latin1;"
mysql -uroot -proot -e "update mysql.user set password=password('root') where user='root';"
mysql -uroot -proot -e "update mysql.user set plugin='' where user='root';"
mysql -uroot -proot -e "flush privileges;"
mysql -uroot -proot icinga2idomysql < /usr/share/icinga2-ido-mysql/schema/mysql.sql
echo "date.timezone =Europe/Berlin" >> /etc/php/7.2/apache2/php.ini
#create icingaweb db
mysql -uroot -proot -e "CREATE DATABASE icingaweb;"
mysql -uroot -proot icingaweb < /usr/share/icingaweb2/etc/schema/mysql.schema.sql
#create user for icingaweb2
#icingaadmin:icinga
export pass=$(openssl passwd -1 icinga)
mysql -uroot -proot -e "INSERT INTO icingaweb.icingaweb_user (name, active, password_hash) VALUES ('icingaadmin', 1, '$pass');"
#authentication.ini
cat > /etc/icingaweb2/authentication.ini << EOF
[icingaweb2]
backend = "db"
resource = "icingaweb_db"
EOF
# #config.ini
cat > /etc/icingaweb2/config.ini << EOF
[global]
show_stacktraces = "1"
show_application_state_messages = "1"
config_backend = "db"
config_resource = "icingaweb_db"
[logging]
log = "syslog"
level = "ERROR"
application = "icingaweb2"
facility = "user"
EOF
#groups.ini
cat > /etc/icingaweb2/groups.ini << EOF
[icingaweb2]
backend = "db"
resource = "icingaweb_db"
EOF
#resources.ini
cat > /etc/icingaweb2/resources.ini << EOF
[icingaweb_db]
type = "db"
db = "mysql"
host = "localhost"
port = "3306"
dbname = "icingaweb"
username = "root"
password = "root"
charset = "latin1"
use_ssl = "0"
[icinga_ido]
type = "db"
db = "mysql"
host = "localhost"
port = "3306"
dbname = "icinga2idomysql"
username = "root"
password = "root"
charset = "latin1"
use_ssl = "0"
EOF
#roles.ini
cat > /etc/icingaweb2/roles.ini << EOF
[Administrators]
users = "icingaadmin"
permissions = "*"
groups = "Administrators"
EOF
#Configuration Icingaweb Modules
mkdir -p /etc/icingaweb2/modules/monitoring
mkdir -p /etc/icingaweb2/enabledModules
#Enable Monitoring Modules
ln -s /usr/share/icingaweb2/modules/monitoring/ /etc/icingaweb2/enabledModules/monitoring
#backends.ini
cat > /etc/icingaweb2/modules/monitoring/backends.ini << EOF
[icinga]
type = "ido"
resource = "icinga_ido"
EOF
#config.ini
cat > /etc/icingaweb2/modules/monitoring/config.ini << EOF
[security]
protected_customvars = "*pw*,*pass*,community"
EOF
#Module installation - Graphite, Director
#Director
# mkdir -p /usr/share/icingaweb2/modules/
# mkdir -p /usr/share/icingaweb2/modules/director/
# wget -q --no-cookies -O - https://github.com/Icinga/icingaweb2-module-director/archive/v1.4.3.tar.gz | tar xz --strip-components=1 --directory=/usr/share/icingaweb2/modules/director --exclude=.gitignore -f -
# ln -s /usr/share/icingaweb2/modules/director/ /etc/icingaweb2/enabledModules/director
#Graphite
mkdir -p /usr/share/icingaweb2/modules/graphite
wget -q --no-cookies -O - "https://github.com/Icinga/icingaweb2-module-graphite/archive/v1.0.1.tar.gz" | tar xz --strip-components=1 --directory=/usr/share/icingaweb2/modules/graphite -f -
rm /etc/icinga2/features-available/graphite.conf
#config will be written in run.sh
ln -s /usr/share/icingaweb2/modules/graphite/ /etc/icingaweb2/enabledModules/graphite
mkdir -p /etc/icingaweb2/modules/graphite
#fix https://github.com/Icinga/icingaweb2-module-graphite/pull/171/files
sed -i '33s/protected $handles/protected $handles = []/' /etc/icingaweb2/enabledModules/graphite/library/vendor/iplx/Http/Client.php
sed -i '33s/$ch = $this->handles ? array_pop($this->handles) : curl_init()/$ch = ! empty($this->handles) ? array_pop($this->handles) : curl_init()/' /etc/icingaweb2/enabledModules/graphite/library/vendor/iplx/Http/Client.php
#graphite config will be enabled and wrote in run.sh
#Add NSCA Config
icinga2 feature enable command
sed -i 's#command_file.*#command_file=/run/icinga2/cmd/icinga2.cmd#g' /etc/nsca.cfg
#disable main log
icinga2 feature disable mainlog
#Add /icinga2conf
echo "include_recursive \"/icinga2conf\"" >> /etc/icinga2/icinga2.conf
apt clean
rm -rf /var/lib/apt/lists/*
| true |
0da1c7c2734c214e264050bc8503f9180630b531 | Shell | rafaqz/.dotfiles | /bin/mdinit | UTF-8 | 935 | 4.15625 | 4 | [] | no_license | #!/bin/bash
# Copy startup markdown files and rename as the current dir name.
set -e
yaml=~/Uni/Templates/yaml.md
template=~/Uni/Templates/template.md
rsetup=~/Uni/Templates/r_setup.rmd
makefile=~/Uni/Templates/Makefile
# Filetype is set in first argument
filetype=${1:-md}
# Repo is the present working directory
repo=${PWD##*/}
filename="$repo.$filetype"
# Get unit name from parent dir, removing underscores
pushd '..'
pwd=${PWD##*/}
unit=${pwd//_/ }
popd
# Copy yaml header to file
cp $yaml $filename
if [ "${filetype,,}" = "rmd" ]; then
# Add rmd setup code block for Rmd or rmd filetypes
cat $rsetup >> $filename
fi
# Add section header template
cat $template >> $filename
# Run templating
sed -i "s/{{unit}}/$unit/g" $filename
# Tutor
if [ $# -eq 2 ]
then
$tutor=$2
sed -i "s/{{tutor}}/$tutor/g" $filename
else
sed -i "s/{{tutor}}//g" $filename
fi
# Symlink standard makefile
ln -s $makefile Makefile
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.