blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
162b8a04809bbe4818e8386d1b71eea2c5e67fe7 | Shell | dafvid/amplify-freebsd-install | /amplify-freebsd-install.sh | UTF-8 | 7,402 | 3.734375 | 4 | [] | no_license | #!/bin/sh
# Author: David W (david at dafnet.se)
# Based on install-source.sh
#
# Differences:
# - FreeBSD only
# - No auto installs
# - Requires virtualenv
pip_url="https://bootstrap.pypa.io/get-pip.py"
agent_url="https://github.com/nginxinc/nginx-amplify-agent"
agent_conf_path="/usr/local/etc/amplify-agent"
agent_conf_file="${agent_conf_path}/agent.conf"
nginx_conf_file="/usr/local/etc/nginx/nginx.conf"
virtualenv="/usr/local/pyvenv/amplify" # FILL ME IN
python_bin_path="${virtualenv}/bin"
pip_command=${python_bin_path}/pip
set -e
if [ -z "${virtualenv}" ]; then
echo "Virtualenv must be set"
exit 1
fi
install_warn1 () {
echo "The script will CHECK for amplify dependencies"
echo ""
printf "Continue (y/n)? "
read line
test "${line}" = "y" -o "${line}" = "Y" || \
exit 1
echo ""
}
check_packages () {
printf 'Checking if python 2.6 or 2.7 exists ... '
for version in '2.7' '2.6'
do
# checks for python2.7, python2, python, etc
major=`echo $version | sed 's/\(.\).*/\1/'`
for py_base_command in "python${version}" "python${major}" 'python'
do
py_command="${python_bin_path}/${py_base_command}"
# checks if it's a valid command
if ! command -V "${py_command}" >/dev/null 2>&1; then
py_command=''
# checks what python version it runs
elif [ "$(${py_command} -c 'import sys; print(".".join(map(str, sys.version_info[:2])))' 2>&1)" != "${version}" ]; then
py_command=''
else
break 2
fi
done
done
if [ -n "${py_command}" ]; then
found_python='yes'
echo 'yes'
else
found_python='no'
echo 'no'
fi
for i in git wget curl gcc
do
printf "Checking if ${i} exists ... "
if command -V ${i} >/dev/null 2>&1; then
eval "found_${i}='yes'"
echo 'yes'
else
eval "found_${i}='no'"
echo 'no'
fi
done
printf 'Checking if python-dev exists ... '
if [ "${found_python}" = 'no' ]; then
found_python_dev='no'
echo 'no'
elif [ ! -e "$(${py_command} -c 'from distutils import sysconfig as s; print(s.get_config_vars()["INCLUDEPY"])' 2>&1)" ]; then
found_python_dev='no'
echo 'no'
else
found_python_dev='yes'
echo 'yes'
fi
echo
}
# Detect the user for the agent to use
detect_amplify_user() {
if [ -f "${agent_conf_file}" ]; then
amplify_user=`grep -v '#' ${agent_conf_file} | \
grep -A 5 -i '\[.*nginx.*\]' | \
grep -i 'user.*=' | \
awk -F= '{print $2}' | \
sed 's/ //g' | \
head -1`
nginx_conf_file=`grep -A 5 -i '\[.*nginx.*\]' ${agent_conf_file} | \
grep -i 'configfile.*=' | \
awk -F= '{print $2}' | \
sed 's/ //g' | \
head -1`
fi
if [ -f "${nginx_conf_file}" ]; then
nginx_user=`grep 'user[[:space:]]' ${nginx_conf_file} | \
grep -v '[#].*user.*;' | \
grep -v '_user' | \
sed -n -e 's/.*\(user[[:space:]][[:space:]]*[^;]*\);.*/\1/p' | \
awk '{ print $2 }' | head -1`
fi
if [ -z "${amplify_user}" ]; then
test -n "${nginx_user}" && \
amplify_user=${nginx_user} || \
amplify_user="nginx"
fi
}
printf "\n --- This script will install the NGINX Amplify Agent from source ---\n\n"
# Detect root
if [ "`id -u`" = "0" ]; then
sudo_cmd=""
else
if command -V sudo >/dev/null 2>&1; then
sudo_cmd="sudo "
echo "HEADS UP - will use sudo, you need to be in sudoers(5)"
echo ""
else
echo "Started as non-root, sudo not found, exiting."
exit 1
fi
fi
if [ -n "$API_KEY" ]; then
api_key=$API_KEY
else
echo " What's your API key? Please check the docs and the UI."
echo ""
printf " Enter your API key: "
read api_key
echo ""
fi
if uname -m | grep "_64" >/dev/null 2>&1; then
arch64="yes"
else
arch64="no"
fi
os="freebsd"
install_warn1
check_packages
if [ "${found_python}" = "no" -o "${found_git}" = "no" -o ${found_curl}" = "no" -a ${found_wget}" = "no" ]; then
echo "Missing dependencies. Exiting. Bye bye!"
exit 0
else
echo "All dependencies found, continuing"
fi
if command -V curl >/dev/null 2>&1; then
downloader="curl -fs -O"
else
if command -V wget >/dev/null 2>&1; then
downloader="wget -q --no-check-certificate"
else
echo "no curl or wget found, exiting."
exit 1
fi
fi
# Set up Python stuff
rm -f get-pip.py
${downloader} ${pip_url}
${py_command} get-pip.py
${pip_command} install setuptools -U
# Clone the Amplify Agent repo
${sudo_cmd} rm -rf nginx-amplify-agent
git clone ${agent_url}
# Install the Amplify Agent
cd nginx-amplify-agent
rel=`uname -r | sed 's/^\(.[^.]*\)\..*/\1/'`
test "${rel}" = "11" && opt='-std=c99'
grep -v gevent packages/nginx-amplify-agent/requirements.txt > packages/nginx-amplify-agent/req-nogevent.txt
grep gevent packages/nginx-amplify-agent/requirements.txt > packages/nginx-amplify-agent/req-gevent.txt
${pip_command} install --upgrade --no-compile -r packages/nginx-amplify-agent/req-nogevent.txt
CFLAGS=${opt} ${pip_command} install --upgrade --no-compile -r packages/nginx-amplify-agent/req-gevent.txt
${sudo_cmd} cp ../setup.py . # use a better setup.py
${sudo_cmd} ${py_command} setup.py install
${sudo_cmd} cp nginx-amplify-agent.py /usr/bin
${sudo_cmd} chown root /usr/bin/nginx-amplify-agent.py
if [ ! -d "${agent_conf_path}" ]; then
${sudo_cmd} mkdir -p "${agent_conf_path}"
fi
${sudo_cmd} cp etc/agent.conf.default ${agent_conf_path}
# Generate new config file for the agent
${sudo_cmd} rm -f ${agent_conf_file}
${sudo_cmd} sh -c "sed -e 's/api_key.*$/api_key = $api_key/' ${agent_conf_file}.default > ${agent_conf_file}"
${sudo_cmd} chmod 644 ${agent_conf_file}
detect_amplify_user
if ! grep ${amplify_user} /etc/passwd >/dev/null 2>&1; then
${sudo_cmd} pw user add ${amplify_user}
fi
${sudo_cmd} chown ${amplify_user} ${agent_conf_path} >/dev/null 2>&1
${sudo_cmd} chown ${amplify_user} ${agent_conf_file} >/dev/null 2>&1
# Create directories for the agent in /var/log and /var/run
${sudo_cmd} mkdir -p /var/log/amplify-agent
${sudo_cmd} chmod 755 /var/log/amplify-agent
${sudo_cmd} chown ${amplify_user} /var/log/amplify-agent
${sudo_cmd} mkdir -p /var/run/amplify-agent
${sudo_cmd} chmod 755 /var/run/amplify-agent
${sudo_cmd} chown ${amplify_user} /var/run/amplify-agent
echo ""
echo " --- Finished successfully! --- "
echo ""
echo " To start the Amplify Agent use:"
echo ""
echo " # sudo -u ${amplify_user} ${py_command} /usr/bin/nginx-amplify-agent.py start \ "
echo " --config=${agent_conf_file} \ "
echo " --pid=/var/run/amplify-agent/amplify-agent.pid"
echo ""
echo " To stop the Amplify Agent use:"
echo ""
echo " # sudo -u ${amplify_user} ${py_command} /usr/bin/nginx-amplify-agent.py stop \ "
echo " --config=${agent_conf_file} \ "
echo " --pid=/var/run/amplify-agent/amplify-agent.pid"
echo ""
exit 0
| true |
916f0e464c5037582523fd834bf543aa053fb421 | Shell | EthanSmo02/dotfiles-2 | /local/bin/i3cmd/screencapture | UTF-8 | 2,178 | 3.90625 | 4 | [] | no_license | #!/usr/bin/env bash
# Help: https://wiki.archlinux.org/index.php/Sway#Screen_capture
#Screen capture
#Capturing the screen can be done using grim or swayshotAUR for screenshots and wf-recorder-gitAUR for video. Optionally, slurp can be used to select the part of the screen to capture.
# To stop recording
# PID=`ps -ef | grep 'wf-recorder' | awk '{print $2}'`
# kill -2 $PID 2>&1 > /dev/null
pgrep -x wf-recorder && killall -s 2 wf-recorder > /dev/null 2>&1
btn="$1"
[ -n "${btn}" ] || {
pgrep -x wofi && exit
btnstr=$(printf "screen\npart+screen\nrecorder\naudio+recorder\npart+recorder" | wofi -L 5 -b -x 0 -y 0 -W 190 --show dmenu -i -p "Screen Capture Select:") || exit
case "$btnstr" in
"screen")
btn="sw"
;;
"part+screen")
btn="sp"
;;
"recorder")
btn="rw"
;;
"audio+recorder")
btn="raw"
;;
"part+recorder")
btn="rp"
;;
*)
notify-check-send "📹Screen Capture:" "Pls select option!"
exit
;;
esac
}
FILE=$HOME/screenshot/screenshot-`date +%F-%H%M%S`.webp
VFILE=$HOME/screenshot/recorder-`date +%F-%H%M%S`.mkv
case "$btn" in
sw)
grim - | tee $FILE > /dev/null 2>&1
;;
sp)
grim -g "$(slurp)" - | tee $FILE > /dev/null 2>&1
;;
rw)
{ sleep 1 && pkill -RTMIN+3 -x waybar; } &
wf-recorder -f $VFILE
pkill -RTMIN+3 -x waybar
;;
raw)
{ sleep 1 && pkill -RTMIN+3 -x waybar; } &
wf-recorder -a -f $VFILE
pkill -RTMIN+3 -x waybar
;;
rp)
notify-send -t 2000 'Screen sharing' 'Select an area to start the recording...'
geometry="$(slurp)"
{ sleep 1 && pkill -RTMIN+3 -x waybar; } &
wf-recorder -g "$geometry" -f $VFILE
pkill -RTMIN+3 -x waybar
;;
*)
echo "Usage: $0 {sw|sp|rw|raw|rp}"
exit 2
esac
[ ! -f ${FILE} ] || {
echo -n "${FILE}" | wl-copy
notify-check-send "📹Screen Capture:" "copy screen capture file path success,${FILE}\nWait 5s show..."
sleep 5
feh ${FILE}
}
[ ! -f ${VFILE} ] || {
echo -n "${VFILE}" | wl-copy
notify-check-send "📹Screen Capture:" "copy screen capture video file path success,${VFILE}\nWait 5s play..."
sleep 5
mpv ${VFILE}
}
| true |
724d4c4bc78b8895520243359c8478d7e32ccbb2 | Shell | zitadel/zitadel | /build/entrypoint.sh | UTF-8 | 238 | 2.984375 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | #!/bin/bash
case $@ in
sh*)
${@:3}
;;
bash*)
${@:5}
;;
*)
if [[ ! -z "$@" ]]
then
ZITADEL_ARGS="$@"
fi
/app/zitadel ${ZITADEL_ARGS}
;;
esac
| true |
507694b0c6a6f5ba0680cbbe0adeb68011ac164d | Shell | olethrosdc/beliefbox | /src/algorithms/tests/scripts/pomdp_test.sh | UTF-8 | 994 | 2.953125 | 3 | [] | no_license | n_runs=10000
n_episodes=1
n_steps=100000
gamma=0.75
lambda=0.5
random=0.01
n_actions=2
for n_states in 4 8 6
do
#environment=Gridworld
#resdir=~/experiments/aggregate/results/maze2_g${gamma}
environment=OneDMaze
resdir=~/experiments/pomdp/results/1dmaze_${n_states}_g${gamma}
#environment=ContextBandit
#resdir=~/experiments/aggregate/results/context
mkdir -p $resdir
cd $resdir
exc=~/projects/beliefbox/src/algorithms/tests/bin/pomdp_algorithms
echo "Run parameters" >run.params
echo "exc n_states n_actions gamma lambda random n_runs n_episodes n_steps" >>run.params
echo ${exc} $n_states $n_actions $gamma $lambda $random $n_runs $n_episodes $n_steps >>run.params
for model in Sarsa QLearning Model
do
time ${exc} $n_states $n_actions $gamma $lambda $random $n_runs $n_episodes $n_steps ${model} $environment >${model}.out
grep REWARD ${model}.out >${model}.reward
grep PAYOFF ${model}.out >${model}.payoff
done
done | true |
6942d1877e4ca4406094cc8a8b6d467b06ba519f | Shell | pcasaretto/dotfiles | /git/alias.zsh | UTF-8 | 690 | 3.46875 | 3 | [] | no_license | # Aliases
alias gst='git status'
alias gsw='git switch'
alias gl='git pull --prune'
alias gup='git pull --prune --rebase'
alias gp='git push'
alias gd='git diff'
alias gc='git commit -v'
alias gwc='git whatchanged -p --abbrev-commit --pretty=medium'
# Will return the current branch name
# Usage example: git pull origin $(current_branch)
#
function current_branch() {
ref=$(git symbolic-ref HEAD 2> /dev/null) || \
ref=$(git rev-parse --short HEAD 2> /dev/null) || return
echo ${ref#refs/heads/}
}
function current_repository() {
ref=$(git symbolic-ref HEAD 2> /dev/null) || \
ref=$(git rev-parse --short HEAD 2> /dev/null) || return
echo $(git remote -v | cut -d':' -f 2)
}
| true |
798ba712aa272999122e0d81200845261d4265c7 | Shell | cmoose/cmsc773_final_project | /try_corenlp.sh | UTF-8 | 2,910 | 2.703125 | 3 | [] | no_license | #!/bin/bash
#Dependencies: http://nlp.stanford.edu/software/stanford-corenlp-full-2015-01-29.zip
#To visualize dep tree: http://chaoticity.com/dependensee-a-dependency-parse-visualisation-tool/
scriptdir="./stanford-corenlp-full-2015-01-29"
cp="$scriptdir/*"
#Some weird pipeline thing that is supposed to do "all" processing - produces really verbose output
#echo java -mx3g -cp \"$scriptdir/*\" edu.stanford.nlp.pipeline.StanfordCoreNLP $*
#java -mx3g -cp "$scriptdir/*" edu.stanford.nlp.pipeline.StanfordCoreNLP $*
#java -Xmx3g -cp stanford-corenlp-full-2015-01-29/stanford-corenlp-3.5.1.jar:stanford-corenlp-full-2015-01-29/stanford-corenlp-3.5.1-models.jar:stanford-corenlp-full-2015-01-29/xom.jar:stanford-corenlp-full-2015-01-29/joda-time.jar:stanford-corenlp-full-2015-01-29/jollyday.jar:stanford-corenlp-full-2015-01-29/ejml-0.23.jar edu.stanford.nlp.pipeline.StanfordCoreNLP -props corenlp.properties -filelist test_filelist.txt -outputDirectory testing -sentences
#POS tagger - tokenizes and parses
#java -mx3g -cp "$scriptdir/*" edu.stanford.nlp.tagger.maxent.MaxentTagger -model edu/stanford/nlp/models/pos-tagger/english-left3words/english-left3words-distsim.tagger -textFile reddit/depressed.txt -outputFormat slashTags > depressed3_pos.txt
java -mx3g -cp "$scriptdir/*" edu.stanford.nlp.tagger.maxent.MaxentTagger -model edu/stanford/nlp/models/pos-tagger/english-left3words/english-left3words-distsim.tagger -textFile depressed.txt -outputFormat slashTags > depressed_pos.txt
#CFG parsing - assumes POS tagged input
#java -mx1g -cp "$scriptdir/*" edu.stanford.nlp.parser.lexparser.LexicalizedParser -sentences newline -tokenized -tagSeparator _ -tokenizerFactory edu.stanford.nlp.process.WhitespaceTokenizer -tokenizerMethod newCoreLabelTokenizerFactory edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz depressed_pos.txt > depressed_cfg.txt
#Dependency parse
java -mx3g -cp "$scriptdir/*" edu.stanford.nlp.parser.nndep.DependencyParser -model edu/stanford/nlp/models/parser/nndep/english_SD.gz -textFile depressed.txt -outFile depressed_dependency.txt
#Visualize the parse tree
#java -cp "$cp" com.chaoticity.dependensee.Main -t depressed_dependency.txt out.png
#NER
#java -cp "$cp" edu.stanford.nlp.ie.crf.CRFClassifier -loadClassifier "${scriptdir}/classifiers/english.all.3class.distsim.crf.ser.gz" -textFile sentence.txt -outFile depressed_ner.txt
#java -Xmx3g -cp stanford-corenlp-full-2014-08-27/stanford-corenlp-3.4.1.jar:stanford-corenlp-full-2014-08-27/stanford-corenlp-3.4.1-models.jar:stanford-corenlp-full-2014-08-27//xom.jar:stanford-corenlp-full-2014-08-27//joda-time.jar:stanford-corenlp-full-2014-08-27//jollyday.jar:stanford-corenlp-full-2014-08-27/ejml-0.23.jar edu.stanford.nlp.pipeline.StanfordCoreNLP -props /Users/chris/School/UMCP/CMSC773-S15/final_project/project_materials/corenlp-python/corenlp/default.properties -filelist /var/folders/yj/t9z4w2kd7fbcykwwpk4_hhn00000gn/T/tmpnlMdW1 -outputDirectory /var/folders/yj/t9z4w2kd7fbcykwwpk4_hhn00000gn/T/tmpw5ydeI
| true |
13f8f16cc961b6edea467cf86aa1829c9ef1c9b6 | Shell | ptdecker/chosen-reference | /launch | UTF-8 | 1,627 | 4.1875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#A simple bash script to check prerequisite services and then launch
#node.js in a MySQL-NodeJS stack with Express session persistance using
#redis.
#
# Assumes:
# - apps are stored in './app'
#
# Usage: launch <app name>
#
# Define ANSI (ISO 6429) escape sequences for colorization of feedback messages
blue='\E[34m'
yellow='\E[33m'
green='\E[32m'
red='\E[31m'
reset='\E[0m'
bold='\E[1m'
# Define standard feedback colorized text
info=$blue'[INFO]'$reset
warn=$yellow'[WARNING]'$reset
error=$red'\n[ERROR]'$reset
check=$bold$green'check'$reset
negative=$red'negative'$reset
# Get things started
clear
echo -e $info'Welcome to the '$bold$green'Chosen Demo'$reset' startup bash script.'
echo -e $info'Running pre-launch checklist...'
# Validate that the Redis service is running
#echo -en $info' - Redis service running? '
#upcheck=$(pgrep redis | wc -l);
#if [ $upcheck -ge 1 ]; then
# echo -e $check
#else
# echo -e $negative
# echo -e $error'redis service is not running'
# exit 1
#fi
# Validate that the MySQL service is running
#echo -en $info' - MySQL service running? '
#upcheck=$(pgrep mysql | wc -l);
#if [ $upcheck -ge 1 ]; then
# echo -e $check
#else
# echo -e $negative
# echo -e $error'MySQL status check indicates an error'
# exit 1
#fi
# If an application name was provided on the command line, pass it to node.js;
# otherwise, start node.js in REPL mode.
if [ -z "$1" ]; then
echo -e $info'Starting Node.js REPL interface (Ctrl-C twice to quit)...'
nodejs
else
echo -e $info'Starting Node.js with '$1'...'
cd ./app
nodejs $1
cd ..
fi
| true |
63a54d872a3cc257a932b4b67e55f7013e6428c7 | Shell | postmodern/ruby-install | /share/ruby-install/truffleruby-graalvm/functions.sh | UTF-8 | 2,357 | 3.640625 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
case "$os_platform" in
Linux) graalvm_platform="linux" ;;
Darwin) graalvm_platform="darwin" ;;
*) fail "Unsupported platform $os_platform" ;;
esac
case "$os_arch" in
x86_64) graalvm_arch="amd64" ;;
aarch64) graalvm_arch="aarch64" ;;
arm64) graalvm_arch="aarch64" ;;
*) fail "Unsupported platform $os_arch" ;;
esac
if [ "$ruby_version" = "23.0.0" ]; then
log "TruffleRuby-GraalVM 23.0 and later installed by ruby-install use the faster Oracle GraalVM distribution"
log "Oracle GraalVM uses the GFTC license, which is free for development and production use, see https://medium.com/graalvm/161527df3d76"
ruby_dir_name="graalvm-jdk-17.0.7+8.1"
ruby_archive="${ruby_archive:-graalvm-jdk-17.0.7_${graalvm_platform/darwin/macos}-${graalvm_arch/amd64/x64}_bin.tar.gz}"
ruby_mirror="${ruby_mirror:-https://download.oracle.com/graalvm/17/archive}"
ruby_url="${ruby_url:-$ruby_mirror/$ruby_archive}"
else
ruby_dir_name="graalvm-ce-java11-$ruby_version"
ruby_archive="${ruby_archive:-graalvm-ce-java11-$graalvm_platform-$graalvm_arch-$ruby_version.tar.gz}"
ruby_mirror="${ruby_mirror:-https://github.com/graalvm/graalvm-ce-builds/releases/download}"
ruby_url="${ruby_url:-$ruby_mirror/vm-$ruby_version/$ruby_archive}"
fi
#
# Install GraalVM into $install_dir.
#
function install_ruby()
{
if [[ "$install_dir" == '/usr/local' ]]; then
error "Unsupported see https://github.com/oracle/truffleruby/issues/1389"
return 1
fi
log "Installing GraalVM $ruby_version ..."
copy_into "$ruby_build_dir" "$install_dir/graalvm" || return $?
}
#
# Post-install tasks.
#
function post_install()
{
cd "$install_dir/graalvm" || return $?
if [[ "$graalvm_platform" == "darwin" ]]; then
run cd Contents/Home || return $?
fi
log "Installing the Ruby component ..."
run ./bin/gu install ruby || return $?
local ruby_home="$(./bin/ruby -e 'print RbConfig::CONFIG["prefix"]')"
if [[ -z "$ruby_home" ]]; then
error "Could not determine TruffleRuby home"
return 1
fi
# Make gu available in PATH (useful to install other languages)
run ln -fs "$PWD/bin/gu" "$ruby_home/bin/gu" || return $?
run cd "$install_dir" || return $?
run ln -fs "${ruby_home#"$install_dir/"}/bin" . || return $?
log "Running truffleruby post-install hook ..."
run "$ruby_home/lib/truffle/post_install_hook.sh" || return $?
}
| true |
b06773d7ed6094cfbb563c1dc2cccfacc9b286fb | Shell | petronny/aur3-mirror | /fasttrack/PKGBUILD | UTF-8 | 1,828 | 2.78125 | 3 | [] | no_license | # Contributor: fnord0 < fnord0 AT riseup DOT net >
pkgname=fasttrack
pkgver=4.0.1
pkgrel=5
pkgdesc='Automated Penetration Testing'
arch=('i686' 'x86_64')
url=https://www.trustedsec.com/
license=('BSD')
depends=('metasploit' 'subversion' 'python2' 'nmap' 'setuptools' 'freetds' 'python2-pexpect' 'tcl' 'sqlite3' 'ruby-sqlite3' 'python-clientform' 'proftpd' 'python2-beautifulsoup3' 'pymssql' 'pymills' 'winexe')
optdepends=('psyco2-svn: for increased speeds (i686 only)')
_svntrunk=http://svn.secmaniac.com/fasttrack
install=fasttrack.install
build() {
if [ -d ${srcdir}/.svn ]; then
msg 'Updating...'
svn up ${srcdir}
else
msg 'Checking out...'
svn co ${_svntrunk} ${srcdir}
fi
mkdir -p ${pkgdir}/usr/{bin,src} || return 1
install -d ${pkgdir}/usr/share/licenses/fasttrack || return 1
cd ${pkgdir}/usr/src
svn export ${srcdir} ${pkgname} || return 1
cd ${pkgname}
#check if metasploit-svn or metasploit stable is installed (metasploit-svn takes presidence)
#apply archlinux metasploit home directory to fasttrack_config
if [ -d /usr/src/metasploit ]; then
sed -i 's|/pentest/exploits/framework3|/usr/src/metasploit|g' ./bin/config/config || return 1
else
sed -i 's|/pentest/exploits/framework3|/opt/metasploit|g' ./bin/config/config || return 1
fi
echo -e ""
echo -e "\e[1;34m>>>\e[0m \e[1;31march linux users\e[0m"
echo -e "\e[1;34m>>>\e[0m \e[1;31manswer '\e[0m\e[1;34mno\e[0m\e[1;31m' to the install question about fasttrack dependency resolving\e[0m"
python2 setup.py install --root=${pkgdir}/ --optimize=1 || return 1
install -Dm755 ${startdir}/${pkgname} ${pkgdir}/usr/bin/${pkgname} || return 1
cd ${pkgdir}/usr/src/${pkgname}/readme
cp -pR LICENSE ${pkgdir}/usr/share/licenses/${pkgname}/ || return 1
}
# vim:syntax=sh
| true |
6c53d15a1ba283725fde34926ca60ee1446f6936 | Shell | rowoflo/Templates | /new_cpp_project | UTF-8 | 1,807 | 3.5 | 4 | [] | no_license | #!/bin/bash
# NAME
# new_cpp_project -- Creates a new C++ project
#
# USAGE
# new_cpp_project ?project_name?
#
# Written by: Rowland O'Flaherty (rowlandoflaherty.com)
# Created on: 2014 DEC 11
project_name=$1
if [ -d $project_name ]; then
echo ERROR: directory $project_name exist
exit 1
fi
mkdir $project_name
cd $project_name
mkdir build
mkdir doc
mkdir src
mkdir test
cp ~/Dropbox/Templates/cpp_project/CMakeLists_txt_root_template.txt CMakeLists.txt
sed -i "" s/___project_name___/$project_name/g CMakeLists.txt
cp ~/Dropbox/Templates/cpp_project/CMakeLists_txt_src_template.txt src/CMakeLists.txt
sed -i "" s/___project_name___/$project_name/g src/CMakeLists.txt
cp ~/Dropbox/Templates/cpp_simple_project/main_cpp_template.txt src/main.cpp
sed -i "" s/___project_name___/$project_name/g src/main.cpp
cp ~/Dropbox/Templates/cpp_project/CMakeLists_txt_test_template.txt test/CMakeLists.txt
sed -i "" s/___project_name___/$project_name/g test/CMakeLists.txt
cp ~/Dropbox/Templates/cpp_project/test_main_cpp_template.txt test/main.cpp
sed -i "" s/___project_name___/$project_name/g test/main.cpp
cp ~/Dropbox/Templates/cpp_project/dotGitignore_template.txt .gitignore
sed -i "" s/___project_name___/$project_name/g .gitignore
cp ~/Dropbox/Templates/cpp_project/Doxyfile_template.txt Doxyfile
sed -i "" s/___project_name___/$project_name/g Doxyfile
cp ~/Dropbox/Templates/cpp_project/LICENSE_template.txt LICENSE
cp ~/Dropbox/Templates/cpp_project/README_md_template.txt README.md
sed -i "" s/___project_name___/$project_name/g README.md
cp ~/Dropbox/Templates/cpp_project/sublime-project_template.txt $project_name.sublime-project
sed -i "" s/___project_name___/$project_name/g $project_name.sublime-project
doxygen
git init
git add -A
git commit -m "Initial commit"
open $project_name.sublime-project
| true |
c333716e350020d7bad2cac38fba4a21a1d947f5 | Shell | rickysos/openvpn-server-ldap-otp | /files/configuration/setup_networking.sh | UTF-8 | 1,757 | 3.8125 | 4 | [
"MIT"
] | permissive | #Create the VPN tunnel interface
mkdir -p /dev/net
if [ ! -c /dev/net/tun ]; then
mknod /dev/net/tun c 10 200
fi
ovpn_net_net=`whatmask ${OVPN_NETWORK} | grep 'Network Address' | awk '{ print $5 }'`
ovpn_net_cidr=`whatmask ${OVPN_NETWORK} | grep 'CIDR' | awk '{ print $4 }'`
ovpn_net="${ovpn_net_net}${ovpn_net_cidr}"
export this_natdevice=`route | grep '^default' | grep -o '[^ ]*$'`
#Set up routes to push to the client.
if [ "${OVPN_ROUTES}x" != "x" ] ; then
IFS=","
read -r -a route_list <<< "$OVPN_ROUTES"
echo "" >/tmp/routes_config.txt
for this_route in ${route_list[@]} ; do
echo "routes: adding route $this_route to server config"
echo "push \"route $this_route\"" >> /tmp/routes_config.txt
if [ "$OVPN_NAT" == "true" ]; then
IFS=" "
this_net=`whatmask $this_route | grep 'Network Address' | awk '{ print $5 }'`
this_cidr=`whatmask $this_route | grep 'CIDR' | awk '{ print $4 }'`
IFS=","
to_masquerade="${this_net}${this_cidr}"
echo "iptables: masquerade from $ovpn_net to $to_masquerade via $this_natdevice"
iptables -t nat -C POSTROUTING -s "$ovpn_net" -d "$to_masquerade" -o $this_natdevice -j MASQUERADE || \
iptables -t nat -A POSTROUTING -s "$ovpn_net" -d "$to_masquerade" -o $this_natdevice -j MASQUERADE
fi
done
IFS=" "
else
#If no routes are set then we'll redirect all traffic from the client over the tunnel.
echo "push \"redirect-gateway def1\"" >> /tmp/routes_config.txt
echo "iptables: masquerade from $ovpn_net to everywhere via $this_natdevice"
if [ "$OVPN_NAT" == "true" ]; then
iptables -t nat -C POSTROUTING -s "$ovpn_net" -o $this_natdevice -j MASQUERADE || \
iptables -t nat -A POSTROUTING -s "$ovpn_net" -o $this_natdevice -j MASQUERADE
fi
fi
| true |
fc68f2be7c7d633155a448c535d89316674ed9a0 | Shell | hakancar/my-projects | /linux_age-test | UTF-8 | 461 | 3.609375 | 4 | [] | no_license | #!/bin/bash
read -p "enter your name: " name
read -p "enter your age: " age
read -p "enter your ale: " ale
let y=65-$age
let z=$ale-$age
echo $name
if [[ $age -lt 18 ]]
then
let x=18-$age
echo "student"
echo "At least $x year to become a worker"
elif [[ $age -gt 18 ]] && [[ $age -lt 65 ]]
then
echo "worker"
echo "$y years to retire"
elif [[ $age -ge 65 ]] && [[ ale -ge age ]]
then
echo "retired"
echo "$z years to die"
else
echo "already died"
fi
| true |
46707f36c99c1c6fe7271438bee26af3d76c5ba4 | Shell | TechLabs-Berlin/st21-birdsAI | /backend/python/run_flask.sh | UTF-8 | 363 | 3.328125 | 3 | [
"MIT"
] | permissive | #! /bin/bash
PWD=$(pwd)
FOLDER="/python"
if grep "$FOLDER" <<< "$PWD"
then
echo "starting up venv..."
source venv/bin/activate
echo "starting up server.."
export FLASK_APP=app.py
export FLASK_ENV=development
flask run
else
echo "wrong folder. Are you in the birds_ai/pyhton folder?"
echo "here is your current folder: ${PWD}"
fi
| true |
c7acad8f474c54c41a3da31f2ccf648e094bcd5f | Shell | fteychene/dokku-alt | /plugins/dokku-tag/commands | UTF-8 | 958 | 3.734375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
source "$(dirname $0)/../dokku_common"
case "$1" in
tag:add)
verify_app_name "$2"
verify_max_args 3 "$@"
TAG="$3"
tag_image "$IMAGE_GENERIC:latest" "$IMAGE_GENERIC:$TAG"
info2 "Tagged latest image of $IMAGE_GENERIC as $TAG"
;;
tag:rm)
verify_app_name "$2"
verify_max_args 3 "$@"
TAG="$3"
case "$TAG" in
build|latest|release)
fail "You can't remove internal dokku tag ($TAG) for $IMAGE_GENERIC"
;;
*)
docker rmi "$IMAGE_GENERIC:$TAG"
;;
esac
;;
tag:list)
verify_app_name "$2"
docker images "$IMAGE_GENERIC"
;;
help)
cat && cat<<EOF
tag:add <app> <tag> Tag latest running image using specified name
tag:rm <app> <tag> Tag latest running image using specified name
tag:list <app> List all image tags
EOF
;;
*)
exit $DOKKU_NOT_IMPLEMENTED_EXIT
;;
esac
| true |
c239fe54b7f83bc35de35e791dabe0a173277d38 | Shell | AntonKokschool/LinuxMachine | /Weekopdracht2/order_directorie.sh | UTF-8 | 347 | 3.578125 | 4 | [] | no_license | #!/bin/bash
source=$1
dest=$2
echo "source is $source"
echo "destination is $dest"
for file in "$source"/*
do
echo "$file"
if [ ! -d "$dest" ]; then
mkdir "$dest"
echo 'diractory $dest has bin made'
else
echo 'directory $dest already exist'
fi
cp "$file" "$dest/${file##*/}"
echo 'made file'
rm "$file"
echo 'removed file'
done
| true |
cc2c49b6e26a086085684857ea180b2879c69fd3 | Shell | dowjames/Cubox-i-Debian | /build.sh | UTF-8 | 2,776 | 2.984375 | 3 | [] | no_license | #!/bin/bash
#--------------------------------------------------------------------------------------------------------------------------------
# Which board are we building for?
# choose "cubox-i" for hummingboard;
### valid options: "bananapi", "cubietruck", "cubox-i", "bananapi-next", "cubietruck-next"
BOARD="cubox-i"
# Branch
# don't change this if you don't know what this is
### valid options:
# "default" = 3.4.x
# "mainline" = next
BRANCH="default"
# Which Debian release are we building?
### valid options:
# "jessie" "wheezy"
### default: "wheezy"
RELEASE="wheezy"
# Hostname
### default: "hummingboard"
# hostname you'd like to use
HOST="hummingboard"
#--------------------------------------------------------------------------------------------------------------------------------
# SD size in MB
### default: "1200"
# (1.2GB) will be automatically expanded to fit your SD card on first boot.
SDSIZE="1200"
# image release version
REVISION="2.4"
#--------------------------------------------------------------------------------------------------------------------------------
# compile sources
### default: "yes"
SOURCE_COMPILE="yes"
# change default configuration
### default: "no"
KERNEL_CONFIGURE="no"
# MAKE clean before kernel compilation
### default: "yes"
KERNEL_CLEAN="yes"
# Use all CPU cores for compiling
### default: "yes"
USEALLCORES="yes"
#--------------------------------------------------------------------------------------------------------------------------------
# Locale settings
# set your locale
### default: "en_US.UTF-8"
DEST_LANG="en_US.UTF-8"
# Time zone settings
# set your timezone
### default: "America/Vancouver"
TZDATA="America/Vancouver"
# Root password
# must be changed on first login
### default: "root"
ROOTPWD="root"
MAINTAINER="dowjames" # deb signature
MAINTAINERMAIL="" # deb signature
#--------------------------------------------------------------------------------------------------------------------------------
# Linux Framebuffer drivers for small TFT LCD display modules
# https://github.com/notro/fbtft
### default: "no"
FBTFT="no"
# compile external drivers?
### default: "no"
EXTERNAL="no"
#--------------------------------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------------------------------
# DON'T EDIT BELOW
#--------------------------------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------------------------------
SRC=$(pwd)
DEST=$(pwd)"/output"
source $SRC/lib/main.sh
| true |
adac78bdd089813c48c610becbe5bd4aca72b9d0 | Shell | funalab/PredictMovingDirection | /download_datasets.sh | UTF-8 | 404 | 3.453125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
baseURL='https://fun.bio.keio.ac.jp/software/MDPredictor/'
zipfile='datasets.zip'
concatURL=${baseURL}${zipfile}
echo "downloading the image datasets..."
if type wget > /dev/null 2>&1; then
wget $concatURL
elif type curl > /dev/null 2>&1; then
curl -O $concatURL
else
echo "both 'wget' and 'curl' command were not found, please install"
exit 1
fi
unzip $zipfile
rm $zipfile
| true |
d08cbcfe8f1d52c0974ffbe635e50991da5770e1 | Shell | velkjaer/openshift-notes | /install/environment.sh | UTF-8 | 5,169 | 2.703125 | 3 | [] | no_license | #Env variables for native online OpenShift install
#path to pull-secret.json
export OCP_PULL_SECRET_FILE=/root/pull-secret.json
#when instructed during installation
#set this to no when removing bootstrap from haproxy
export OCP_NODE_HAPROXY_ADD_BOOTSTRAP=yes
#OCP version to install/upgrade
#check https://mirror.openshift.com/pub/openshift-v4/clients/ocp/
#for desired version
export OCP_VERSION=4.6.8
#Find correct RHCOS major release and version from
#https://mirror.openshift.com/pub/openshift-v4/dependencies/rhcos/
#match RHCOS with chosen OCP
export OCP_RHCOS_MAJOR_RELEASE=4.6
export OCP_RHCOS_VERSION=4.6.8
#set this variable is cluster is only three nodes (that is, 3 masters)
#use values 'yes' or 'no'
export OCP_THREE_NODE_CLUSTER=yes
#OCP_DOMAIN is your domain where OpenShift is installed
export OCP_DOMAIN=forum.fi.ibm.com
export OCP_CLUSTER_NAME=cluster2
#OpenShift install user, created in bastion server
export OCP_INSTALL_USER=ocp
export OCP_RELEASE="${OCP_VERSION}-x86_64"
export OCP_LOCAL_REPOSITORY='ocp/openshift4'
export OCP_PRODUCT_REPO='openshift-release-dev'
export OCP_RELEASE_NAME="ocp-release"
#Bastion IP address, used in other variables
export OCP_NODE_BASTION_IP_ADDRESS=192.168.47.20
#host and port for Apache servers that hold RHCOS and ignition files
#typically bastion is the Apache host
export OCP_APACHE_HOST=${OCP_NODE_BASTION_IP_ADDRESS}
export OCP_APACHE_PORT=8080
#network CIDR for OCP nodes, used in install-config.yaml
export OCP_NODE_NETWORK_CIDR=192.168.47.0/24
#bastion and haproxy hostname and IP (MAC is not required)
export OCP_NODE_BASTION="bastion ${OCP_NODE_BASTION_IP_ADDRESS}"
export OCP_NODE_HAPROXY="haproxy ${OCP_NODE_BASTION_IP_ADDRESS}"
#bootstrap and master nodes, hostname, IP and MAC required
export OCP_NODE_BOOTSTRAP="bootstrap 192.168.47.21 00:50:56:b3:0c:7b"
export OCP_NODE_MASTER_01="master2-01 192.168.47.22 00:50:56:b3:01:7d"
export OCP_NODE_MASTER_02="master2-02 192.168.47.23 00:50:56:b3:7a:6d"
export OCP_NODE_MASTER_03="master2-03 192.168.47.24 00:50:56:b3:1c:65"
#OCP worker nodes that are served by DNS and DHCP server
#hostname, IP and MAC required
#syntax: "<HOSTNAME> <IP> <MAC>; <HOSTNAME> <IP> <MAC>;"
#where hostname, ip and mac are separated by space and followed by ;
#note: if OCP_THREE_NODE_CLUSTER is yes, then this variable is ignored
export OCP_NODE_WORKER_HOSTS=" \
worker-01 192.168.47.111 00:50:56:b3:93:4f; \
worker-02 192.168.47.112 00:50:56:b3:33:f1; \
worker-03 192.168.47.113 00:50:56:b3:7e:23; \
"
#hosts used in HAProxy configuration
#these are configured in HAProxy to receive workload requests
#use all worker nodes or chosen worker if
#note: if OCP_THREE_NODE_CLUSTER is yes, then this variable is ignored
export OCP_HAPROXY_WORKER_HOSTS=" \
worker-01 192.168.47.111 00:50:56:b3:93:4f; \
worker-02 192.168.47.112 00:50:56:b3:33:f1; \
"
#hosts that are not OCP worker nodes but need to be in DNS and DHCP
#syntax: "<HOSTNAME> <IP> <MAC>; <HOSTNAME> <IP> <MAC>;"
#where hostname, ip and mac are separated by space and followed by ;
export OCP_OTHER_HOSTS_DHCP=" \
dummy-test 192.168.47.254 DE:AD:C0:DE:CA:FE;\
test-bootstrap 192.168.47.200 00:50:56:b3:4d:f4; \
"
#other hosts in the OCP environment these are in DNS but not in DHCP
#syntax: "<HOSTNAME> <IP>; <HOSTNAME> <IP>;"
export OCP_OTHER_DNS_HOSTS=" \
mirror-registry 192.168.47.100; \
registry 192.168.47.100; \
ocp-registry 192.168.47.100; \
external-registry 192.168.47.100;\
"
#DNS config
#OCP_DNS_FORWARDERS format: <ip>;<ip>; (semicolon separated list of DNS servers)"
export OCP_DNS_FORWARDERS="10.31.0.10;10.31.11.10;"
#OC_DNS_ALLOWED_NETWORKS format: ip/mask;ip/mask; (semicolon separated list of networks)"
export OCP_DNS_ALLOWED_NETWORKS="127.0.0.0/8;10.0.0.0/8;192.0.0.0/8;172.0.0.0/8;"
#Network information for DHCP server
#network interface that is used by DHCP, this interface is in the host where DHCP container is running
#make sure to set DHCP network interface to correct interface
export OCP_DHCP_NETWORK_INTERFACE=ens224
export OCP_DHCP_NETWORK=192.168.47.0
export OCP_DHCP_NETWORK_MASK=255.255.255.0
export OCP_DHCP_NETWORK_BROADCAST_ADDRESS=192.168.47.255
#if having more than one router, NTP or DNS server, separate them using comma ','
export OCP_DHCP_NETWORK_ROUTER=192.168.47.1
export OCP_DHCP_NTP_SERVER=${OCP_NODE_BASTION_IP_ADDRESS}
export OCP_DHCP_DNS_SERVER=${OCP_NODE_BASTION_IP_ADDRESS}
#PXE variables, RHCOS files
export OCP_PXE_RHCOS_KERNEL_URL=http://${OCP_APACHE_HOST}:${OCP_APACHE_PORT}/rhcos/rhcos-${OCP_RHCOS_VERSION}-x86_64-live-kernel-x86_64
export OCP_PXE_RHCOS_INITRAMFS_URL=http://${OCP_APACHE_HOST}:${OCP_APACHE_PORT}/rhcos/rhcos-${OCP_RHCOS_VERSION}-x86_64-live-initramfs.x86_64.img
export OCP_PXE_RHCOS_ROOTFS_URL=http://${OCP_APACHE_HOST}:${OCP_APACHE_PORT}/rhcos/rhcos-${OCP_RHCOS_VERSION}-x86_64-live-rootfs.x86_64.img
#Ignition files
export OCP_IGNITION_URL_BOOTSTRAP=http://${OCP_APACHE_HOST}:${OCP_APACHE_PORT}/ignition/bootstrap.ign
export OCP_IGNITION_URL_MASTER=http://${OCP_APACHE_HOST}:${OCP_APACHE_PORT}/ignition//master.ign
export OCP_IGNITION_URL_WORKER=http://${OCP_APACHE_HOST}:${OCP_APACHE_PORT}/ignition//worker.ign
| true |
8bea6e736ea776841c9edc7de88c5aec7f5c59c6 | Shell | jackdempsey/oh-my-zsh | /plugins/git/git.plugin.zsh | UTF-8 | 1,226 | 2.859375 | 3 | [] | no_license | # Aliases
alias git='noglob git'
alias g='nocorrect git'
alias gst='git status'
alias gl='git log'
alias glp='git log -p'
alias gup='git fetch && git rebase'
alias gap='git add -p'
alias gp='git pull'
alias gpr='git pull --rebase'
alias gd='git diff'
alias gdc='git diff --cached'
alias gdv='git diff -w "$@" | vim -R -'
alias gc='git commit -v'
alias ga='nocorrect git add'
alias gca='git commit -v -a'
alias gb='git branch'
alias gba='git branch -a'
alias gcount='git shortlog -sn'
alias gcp='git cherry-pick'
alias glg='git log --stat --max-count=5'
alias gco='git checkout'
alias gsb='git show-branch'
alias gri='git rebase -i'
alias gf='git fetch'
alias grm='git rebase master'
# Git and svn mix
alias git-svn-dcommit-push='git svn dcommit && git push github master:svntrunk'
#
# Will return the current branch name
# Usage example: git pull origin $(current_branch)
#
function current_branch() {
ref=$(git symbolic-ref HEAD 2> /dev/null) || return
echo ${ref#refs/heads/}
}
# these aliases take advangate of the previous function
alias ggpull='git pull origin $(current_branch)'
alias ggpush='git push origin $(current_branch)'
alias ggpnp='git pull origin $(current_branch) && git push origin $(current_branch)'
| true |
9edae79f80d32e5a7abc82552043eda24cbfe530 | Shell | Vish36/CDOT-MCU | /conference/custom/deb/DEBIAN/postinst | UTF-8 | 879 | 3.40625 | 3 | [] | no_license | #!/bin/sh
DAEMON_USER="mcu"
DAEMON_NAME="openmcu-ru"
DAEMON_HOMEDIR="/var/lib/$DAEMON_NAME"
DAEMON_LOGDIR="/var/log/$DAEMON_NAME"
DAEMON_CONFIG="/etc/$DAEMON_NAME"
case "$1" in
configure)
# Creating system user
adduser --home $DAEMON_HOMEDIR --no-create-home --quiet --system --group $DAEMON_USER
# Creating home directory
mkdir -p $DAEMON_HOMEDIR
chown $DAEMON_USER: $DAEMON_HOMEDIR
chmod 0700 $DAEMON_HOMEDIR
# Creating log directory
mkdir -p $DAEMON_LOGDIR
chown $DAEMON_USER:adm $DAEMON_LOGDIR
chmod 0750 $DAEMON_LOGDIR
# chown config directory
chown -R $DAEMON_USER: $DAEMON_CONFIG
;;
abort-upgrade|abort-remove|abort-deconfigure)
;;
*)
echo "postinst called with unknown argument '$1'" >&2
exit 1
;;
esac
update-rc.d $DAEMON_NAME defaults 95 >/dev/null
/etc/init.d/$DAEMON_NAME start
exit 0
| true |
3dd3e319f71eac49f6c45991fa64276f7fcc5698 | Shell | nevir/code-style | /.scripts/templates/bootstrap/scripts/test-style.sh | UTF-8 | 813 | 3.609375 | 4 | [] | no_license | #!/usr/bin/env bash
set -e
source ./node_modules/@nevir/code-style/.scripts/include/globbing.sh
if [[ "${#TYPESCRIPT_FILES[@]}" != "0" || "${#FILES[@]}" == "0" ]]; then
./node_modules/.bin/tsc --noEmit "${TYPESCRIPT_FILES[@]}"
fi
if [[ "${#ESLINT_FILES[@]}" != "0" ]]; then
./node_modules/.bin/eslint "${ESLINT_FILES[@]}"
fi
if [[ "${#PRETTIER_FILES[@]}" != "0" ]]; then
set +e
UGLY_FILES=($(
./node_modules/.bin/prettier --list-different "${PRETTIER_FILES[@]}"
))
set -e
if [[ "${#UGLY_FILES[@]}" != "0" ]]; then
echo
echo -e "\033[4m\033[33mThe following files are not well formatted:\033[0m"
echo
for file in "${UGLY_FILES[@]}"; do
echo " ${file}"
done
echo
echo -e "\033[31mPlease fix via: \033[33mnpm run fix-style\033[0m"
echo
exit 1
fi
fi
| true |
da914cfd3f11a19ecf24775dc3dacd5477c15590 | Shell | JoelAtDeluxe/local-playground | /go/build.sh | UTF-8 | 298 | 3.265625 | 3 | [
"MIT"
] | permissive | #! /usr/bin/env bash
set -e
here="$(pwd)/$(dirname "$0")"
project=$1
mkdir -p $project
cd $project
go mod init scratch
filename=main.go
touch $filename
echo "// Scratch file located here: $(pwd)" >> $filename
echo "" >> $filename
cat $here/hello_world.go >> $filename
echo "$(pwd)/$filename"
| true |
f32957beea41102ed44fca3eae63d24327a3801f | Shell | leonardonc/script | /lista_11/lista_11_q03.sh | UTF-8 | 170 | 2.859375 | 3 | [] | no_license | #!/bin/bash
#substituir num de cpf por **CENSURADO**
read -p "Digite o nome do arquivo: " arq
sed -E 's/([0-9]{3}.[0-9]{3}.[0-9]{3}-[0-9]{2})/**CENSURADO**/g' < $arq
| true |
ea0dd37f11aacbeb363cc07b8532467c38946f45 | Shell | shweppsie/520-reporting-tools | /plot_wc.sh | UTF-8 | 1,433 | 3.34375 | 3 | [] | no_license | #!/bin/bash
students=(
"Name:/path/to/wordcountfile.wc"
)
outputfile='/path/to/webroot/progress.png'
teabreakfile="http://someurl/"
# subtrack the contents from the points
# best attempt to fix the values produced by the makefile :(
#count=`cat build/thesis.pdf.txt | sed -e '/Chapter 1/q' | wc -w`
# bump the maxtime on the x axis an hour ahead of now
date="`echo "
from datetime import datetime, timedelta
d=datetime.today()+timedelta(hours=1)
print d.strftime('%d/%m/%y|%H:%M:%S')
" | /usr/bin/python`"
echo 'set terminal png nocrop size 1920,1080 font "/home/no15/Helvetica.ttf" 20 linewidth 2
set output "'${outputfile}'"
set key left top
set xdata time
set xtics format "%b %d"
set timefmt "%d/%m/%y|%H:%M:%S"
set xrange ["22/09/12|00:00:00":"'${date}'"]
set xlabel "Date" offset 0,-1
set ylabel "Word count"
set label "Teabreaks: " at graph 0.125, graph 0.95
set arrow from graph(0,0.205),graph(0,0.93) to graph(0,0.205),graph(0,0.97) linecolor rgb "#708090" nohead
'`
curl -s -L "${teabreakfile}" | while read line
do
start=${line:0:17}
end=${line:18:17}
echo 'set arrow from "'${start}'",graph(0,0) to "'${start}'",graph(1,1) linecolor rgb "#708090" nohead;'
done
`'
plot '`
i=0
for student in ${students[@]}
do
path=${student#*:}
name=${student%%:*}
echo '"'${path}'" using 1:($3 - 0) with steps \
title "'${name}'"'
i=$(($i+1))
if [ $i -ne ${#students[@]} ]; then
echo ","
fi
done` | gnuplot -persist
| true |
f99e23463ca3828b551514bccb1319724ce0442f | Shell | superDross/autoTransmission | /test/test_autoTransmission.sh | UTF-8 | 2,073 | 3.359375 | 3 | [] | no_license | oneTimeSetUp() {
TEMP_DIR="/tmp/autoTransmision/"
mkdir -p $TEMP_DIR
. ../autoTransmission.sh --torrent_dir $TEMP_DIR
apply_defaults
TEST_TORRENT_NAME="ubuntu-18.04-desktop-amd64.iso"
}
get_test_torrent_id() {
# returns torrent id number for ubuntu test torrent
local torrent_id_list=$(transmission-remote -l | sed -e '1d;$d;s/^ *//' | \
cut --only-delimited --delimiter \ --fields 1)
local torrent_id=$(echo $torrent_id_list | awk '{print $NF}')
echo $torrent_id
}
test_exit() {
startup_app
exit_transmission
if $(ps aux | grep -v grep | grep transmission-daemon); then
fail "The exit transmission function did not work"
fi
}
test_startup() {
# ensure startup results in transmission-daemon to be active
startup_app
# NOTE: below code sometimes states inactive despite being active
## status=$(systemctl is-active transmission-daemon.service)
## assertEquals "active" $status
# should fail if pgrep retieves nothing
pgrep transmission
}
test_add_torrents() {
wget http://releases.ubuntu.com/18.04/ubuntu-18.04-desktop-amd64.iso.torrent -P $TEMP_DIR
add_torrents
local torrent_id=$(get_test_torrent_id)
local name=$(transmission-remote -t $torrent_id --info | grep -o $TEST_TORRENT_NAME)
assertEquals $TEST_TORRENT_NAME $name
}
test_scheduler() {
# remove current autoTransmission entry from crontab
if [ ! -z "$(crontab -l | grep autoTransmission)" ]; then
local original_entry=$(crontab -l | grep autoTransmission)
crontab -l | grep -v autoTransmission | crontab -
fi
# test with 10:33 schedule
scheduler 10:33
# ensure test entry is within crontab
local test_entry=$(crontab -l | grep "33 10.*autoTransmission")
# remove test entry from crontab
crontab -l | grep -v "33 10.*autoTransmission" | crontab -
# re-add original autoTransmission schedule
if [ ! -z "$original_entry" ]; then
(crontab -l; echo "$original_entry") | crontab -
fi
}
oneTimeTearDown() {
rm -r $TEMP_DIR
local torrent_id=$(get_test_torrent_id)
transmission-remote -t $torrent_id --remove-and-delete
exit_transmission
}
. shunit2
| true |
5650ccafd655d803cf87b12a39798438d0c930a3 | Shell | lvapeab/scripts | /tm_combination/merge_models.sh | UTF-8 | 6,094 | 3.5625 | 4 | [] | no_license | #!/bin/bash
SRILM_DIR=/home/alvaro/smt/software/srilm/bin/i686-m64
AWK=/usr/bin/awk
SORT=/usr/bin/sort
PYTHON=/usr/bin/python
sortpars="-S 131072"
sortT="yes"
sortm="yes"
SPLIT=/usr/bin/split
SED=/bin/sed
GREP=/bin/grep
SED=/bin/sed
UNIQ=/usr/bin/uniq
BASENAME=/usr/bin/basename
SSH=/usr/bin/ssh
HEAD=/usr/bin/head
TAIL=/usr/bin/tail
MKTEMP=/bin/mktemp
usage(){
echo "Usage: merge_models -main <trans1> -backoff <trans2> [-max_unk <int>] [-unk_sym <string>] [-tmpdir <dir>] [-out <output>] [-v] [-t <string>] [-lm <string>] [-order <int>]"
echo " -main <string> : File with sentences generated by the main TM."
echo " -backoff <string> : File with sentences generated by the secondary TM"
echo " -max_unk <int> : Number of unknown words per sentence allowed in the main translation file."
echo " -unk_sym <string> : Unknown word symbol (default UNK)."
echo " -tmpdir <string> : Temporal directory (default current directory)."
echo " -out <string> : Output file (default, standard output)."
echo " -v : Verbose mode."
echo " -t <string> : Technique to select the translations. One of: \"unk\", \"ppl\", \"combined\" (default \"unk\")."
echo " -lm <string> : Language model for computing perplexities."
echo " -order <int> : Order of the language model (default 4)."
}
str_is_option()
{
echo "" | ${AWK} -v s=$1 '{if(!match(s,"-[a-zA-Z]")) print "0"; else print "1"}'
}
main=""
tmpdir="./"
main_given=0
backoff=""
backoff_given=0
max_unk=""
max_unk_given=0
unk_sym="UNK"
lm_given=0
technique="unk"
order=4
output="1"
while [ $# -ne 0 ]; do
case $1 in
"--help") usage
exit 0
;;
"-tdir") shift
if [ $# -ne 0 ]; then
tmpdir=$1
else
tmpdir="./"
fi
;;
"-main") shift
if [ $# -ne 0 ]; then
main=$1
main_given=1
else
main_given=0
fi
;;
"-backoff") shift
if [ $# -ne 0 ]; then
backoff=$1
backoff_given=1
else
backoff_given=0
fi
;;
"-max_unk") shift
if [ $# -ne 0 ]; then
max_unk=$1
max_unk_given=1
else
max_unk_given=0
fi
;;
"-unk_sym") shift
if [ $# -ne 0 ]; then
unk_sym=$1
fi
;;
"-out") shift
if [ $# -ne 0 ]; then
output=$1
else
tmpdir="1"
fi
;;
"-t") shift
if [ $# -ne 0 ]; then
technique=$1
else
technique="unk"
fi
;;
"-lm") shift
if [ $# -ne 0 ]; then
lm=$1
lm_given=1
else
lm_given=0
fi
;;
"-order") shift
if [ $# -ne 0 ]; then
order=$1
else
order=4
fi
;;
"-v") verbose_opt="-v"
;;
esac
shift
done
#### Verify parameters
if [ ${main_given} -eq 0 ]; then
# invalid parameters
echo "Error: -main option not given"
exit 1
fi
if [ ${backoff_given} -eq 0 ]; then
# invalid parameters
echo "Error: -backoff option not given"
exit 1
fi
#Parameters are OK
mkdir -p $tmpdir
### Script selection
if [ "${technique}" == "unk" ]; then
if [ ${max_unk_given} -eq 0 ]; then
# invalid parameters
echo "Error: -max_unk option not given"
exit 1
fi
MERGE_SCRIPT=/home/alvaro/smt/software/scripts/tm_combination/combine_tm_unk.py
cat $main | awk -v unk=$unk_sym '{counter=0;
for(i=1;i<=NF;i++){
if($i == unk)
counter++;
}
print counter}' > ${tmpdir}/unk_count
$PYTHON $MERGE_SCRIPT $main $backoff ${tmpdir}/unk_count $max_unk $verbose_opt >& $output
exit 1
fi
if [ "${technique}" == "ppl" ]; then
if [ ${lm_given} -eq 0 ]; then
echo "Error: -lm option not given"
exit 1
fi
MERGE_SCRIPT=/home/alvaro/smt/software/scripts/tm_combination/combine_tm_ppl.py
$SRILM_DIR/ngram -ppl $main -order $order -lm $lm -debug 1 |grep logprob |awk '{print $4}' > ${tmpdir}/nmt.probs
$SRILM_DIR/ngram -ppl $backoff -order $order -lm $lm -debug 1 |grep logprob |awk '{print $4}' > ${tmpdir}/pbmt.probs
$PYTHON $MERGE_SCRIPT $main $backoff ${tmpdir}/nmt.probs ${tmpdir}/pbmt.probs $verbose_opt >& $output
exit 1
fi
if [ "${technique}" == "combined" ]; then
if [ ${lm_given} -eq 0 ]; then
echo "Error: -lm option not given"
exit 1
fi
MERGE_SCRIPT=/home/alvaro/smt/software/scripts/tm_combination/combine_tm_pplUnk.py
cat $main | awk -v unk=$unk_sym '{counter=0;
for(i=1;i<=NF;i++){
if($i == unk)
counter++;
}
print counter}' > ${tmpdir}/unk_count
$SRILM_DIR/ngram -ppl $main -order $order -lm $lm -debug 1 |grep logprob |awk '{print $4}' > ${tmpdir}/nmt.probs
$SRILM_DIR/ngram -ppl $backoff -order $order -lm $lm -debug 1 |grep logprob |awk '{print $4}' > ${tmpdir}/pbmt.probs
$PYTHON $MERGE_SCRIPT $main $backoff ${tmpdir}/nmt.probs ${tmpdir}/pbmt.probs ${tmpdir}/unk_count $max_unk $verbose_opt >& $output
exit 1
fi
| true |
4f79a54f43dc95fce1f53d1256f80dd6dda3c6b2 | Shell | alok1929/bashscripting | /loop.sh | UTF-8 | 91 | 2.578125 | 3 | [] | no_license | # !/bin/bash
for NAMES in $(cat names.txt); do
echo " the names are $NAMES "
done
| true |
e5178e931996ae34380ff13a4e733799046915e0 | Shell | openstack/openstack-helm | /tools/deployment/multinode/130-libvirt.sh | UTF-8 | 1,593 | 3.359375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -xe
export OS_CLOUD=openstack_helm
CEPH_ENABLED=false
if openstack service list -f value -c Type | grep -q "^volume" && \
openstack volume type list -f value -c Name | grep -q "rbd"; then
CEPH_ENABLED=true
fi
#NOTE: Get the over-rides to use
: ${OSH_INFRA_PATH:="../openstack-helm-infra"}
: ${OSH_EXTRA_HELM_ARGS_LIBVIRT:="$(./tools/deployment/common/get-values-overrides.sh libvirt)"}
#NOTE: Lint and package chart
make -C ${OSH_INFRA_PATH} libvirt
#NOTE: Get resource_type of kubernetes cgroup
KUBERNETES_CGROUP=$(sudo docker info | grep "Cgroup Driver" | awk -F': ' '{print $2}' | grep -q systemd && echo kubepods.slice || echo kubepods)
#NOTE: Deploy libvirt
tee /tmp/libvirt.yaml << EOF
conf:
kubernetes:
cgroup: ${KUBERNETES_CGROUP}
EOF
#NOTE: Deploy libvirt
helm upgrade --install libvirt ${OSH_INFRA_PATH}/libvirt \
--namespace=openstack \
--values=/tmp/libvirt.yaml \
--set conf.ceph.enabled=${CEPH_ENABLED} \
${OSH_EXTRA_HELM_ARGS} \
${OSH_EXTRA_HELM_ARGS_LIBVIRT}
| true |
cce7fe734dbd7e5f7a8283002252087cc4abfb2b | Shell | pinf-io/io.pinf.pio.postsync | /bin/link-to | UTF-8 | 383 | 2.9375 | 3 | [] | no_license | #!/bin/bash
# Source https://github.com/cadorn/bash.origin
. "$BO_ROOT_SCRIPT_PATH"
function init {
eval BO_SELF_BASH_SOURCE="$BO_READ_SELF_BASH_SOURCE"
BO_deriveSelfDir ___TMP___ "$BO_SELF_BASH_SOURCE"
PGS_DIR="$___TMP___"
rm -f "$1" > /dev/null || true
BO_log "$VERBOSE" "Linking '$PGS_DIR/io-pinf-pio-postsync' to '$1'"
ln -s "$PGS_DIR/io-pinf-pio-postsync" "$1"
}
init $@ | true |
3de04d0eda200e2a30f2f483c74711647efc9f1e | Shell | pcantrell/sweetxml | /ant/bin/ant | UTF-8 | 793 | 3.765625 | 4 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
if [ "$SWEETXML_HOME" ]; then
sweetxml="$SWEETXML_HOME/bin/sweetxml"
else
sweetxml=sweetxml
fi
if [ "$ANT_HOME" ]; then
ant="$ANT_HOME/bin/ant"
else
echo "ANT_HOME environment variable not set" >&2
exit 1
fi
unset generated_buildfile
if [ "$1" == "--xml" ]; then
shift
else
if [ -f build.sxml ]; then
echo "Converting build.sxml..."
generated_buildfile=build.xml
if [ -f "${generated_buildfile}" ]; then
unset generated_buildfile
fi
"$sweetxml" --overwrite build.sxml || exit 1
echo "Launching Ant..."
fi
fi
"$ant" "$@"
result=${PIPESTATUS[0]}
if [ "${generated_buildfile}" ]; then
echo "Cleaning generated ${generated_buildfile}."
rm "${generated_buildfile}"
fi
exit $result
| true |
47755088b605a6e5574f787622bb9d641dc5035e | Shell | julioohhhh/api-rest | /build-and-deploy.sh | UTF-8 | 2,398 | 3.703125 | 4 | [] | no_license | #!/bin/bash
#
setting_variable () {
sed -i "s/IMAGE_NAME/${IMAGE_NAME}/g" ./build-and-deploy.sh
sed -i "s/MEMORY_RESERVATION/${MEMORY_RESERVATION}/g" ./deploy/apirest.json
sed -i "s/CPU_UNITY/${CPU_UNITY}/g" ./deploy/apirest.json
sed -i "s/HOST_PORT/${HOST_PORT}/g" ./deploy/apirest.json
sed -i "s/IMAGE_REPOSITORY/${IMAGE_REPOSITORY}/g" ./deploy/apirest.json
sed -i "s/TASK_NAME/${TASK_NAME}/g" ./deploy/apirest.json
}
#############
### BUILD ###
#############
#Build docker image
build_docker () {
docker build -t ${IMAGE_NAME} .
}
#Compile the project to generate the jar file
compile_docker_image () {
echo "Compiling the project to generate .jar"
mvn clean install -DskipTests
mvn clean package -DskipTests
if [ $? -eq 0 ]; then
echo "Compiled with Sucess, building the new docker image"
else
echo "Maven FAILED to compile, please check Jenkins errors"
exit 1
fi
}
#Check if docker build was completed
build_docker_image () {
build_docker
if [ $? -eq 0 ]; then
echo "Build executed, pushing the image to a Repository"
else
echo "Maven FAILED to compile, please check Jenkins errors"
exit 1
fi
}
#Push image to a repository, could be a Docker Hub or AWS ECR for example
push_docker_image () {
docker push "{REPOSITORY}"
if [ $? -eq 0 ]; then
echo "Image pushed to Repository"
else
echo "push FAILED, please check Jenkins errors"
exit 1
fi
}
#############
### DEPLOY ###
#############
#To deploy image in an AWS ECS environment
deploy_docker_image () {
sed -i "s/IMAGE_VERSION/${IMAGE_VERSION}/g" ./deploy/apirest.json
TASK_DEFINITION=`aws ecs register-task-definition \
--cli-input-json file://./deploy/apirest.json \
--network-mode host \
--profile "${AWS_PROFILE}" \
| jq '.taskDefinition | .revision'`
#The parameters needs to be declared on Jenkins task
aws ecs update-service \
--cluster "${ECS_CLUSTER}" \
--service "${ECS_SERVICE}" \
--desired-count "${ECS_TASK_NUMBER}" \
--task-definition "${ECS_SERVICE}":"${TASK_DEFINITION}" \
--deployment-configuration maximumPercent="${MAXIMUM_PERCENT}",minimumHealthyPercent="${MINIMUM_HEALTH}" \
--profile "${AWS_PROFILE}"
}
setting_variable
compile_docker_image
build_docker_image
push_docker_image
deploy_docker_image
| true |
170117144f4422d35e05097b4abdb6784b349a2b | Shell | king4Come/shell | /shell安装sakila/install_sakila.sh | UTF-8 | 740 | 3.125 | 3 | [] | no_license | #!/bin/sh
su root
prompt(){
read -p "请回车==============开始=======$1"
CURRENT_PATH=$(pwd);
echo "当前路径为==========$CURRENT_PATH"
history -c
unset CURRENT_PATH;
clear
}
prompt 'centos7 安装sakila'
prompt 'https://github.com/MyCATApache/Mycat-download'
prompt '下载解压sakila'
down_sakila(){
cd /usr/local
wget http://downloads.mysql.com/docs/sakila-db.zip
unzip sakila-db.zip
}
down_sakila
prompt '导入sakila结构和数据'
import_sakila(){
cd /usr/local/sakila-db
mysql -uroot -p <sakila-schema.sql
mysql -uroot -p <sakila-data.sql
}
prompt '验证结果'
verity_mysql(){
mysql
show databases;
use sakila;
show tables;
select count(*) from customer;
}
verity_mysql
prompt '执行完成'
| true |
093de1846352d78365c0edff3a7f1b93735d6540 | Shell | supriya-ops/uc-employee | /firstname-uc.sh | UTF-8 | 144 | 3.1875 | 3 | [] | no_license | #!/bin/bash
pat="^[A-z]{0,3}$"
echo "give a input"
read input
if [[ $input =~ $pat ]]
then
echo "matching"
else
echo "not matching"
fi
| true |
7e4654e7c1da7efebdc84c64cc5fff14d6e78ef6 | Shell | jwhulette/dotfiles | /shell/.aliases | UTF-8 | 11,375 | 2.890625 | 3 | [] | no_license | #!/bin/bash
############################################################
####
#### Exports
####
export MY_EDITOR=joe
export EDITOR=joe
export HISTFILESIZE=50000
export COMPOSER_HOME=$_COMPOSER_HOME
############################################################
####
#### System
####
alias hostfile="ed /etc/hosts"
alias sshconfig="ed ~/.ssh/config"
alias copykey='command cat ~/.ssh/id_rsa.public | pbcopy'
# Enable aliases to be sudo’ed
alias sudo='sudo '
# Fast open
alias o="open ."
# Show/hide hidden files in Finder
alias show="defaults write com.apple.finder AppleShowAllFiles -bool true && killall Finder"
alias hide="defaults write com.apple.finder AppleShowAllFiles -bool false && killall Finder"
# IP addresses
alias ip="curl https://diagnostic.opendns.com/myip ; echo"
alias localip="ifconfig -a | grep -o 'inet6\? \(addr:\)\?\s\?\(\(\([0-9]\+\.\)\{3\}[0-9]\+\)\|[a-fA-F0-9:]\+\)' | awk '{ sub(/inet6? (addr:)? ?/, \"\"); print }'"
# Flush Directory Service cache
alias flushdns="sudo dscacheutil -flushcache; sudo killall -HUP mDNSResponder"
# Lock the screen
alias afk="/System/Library/CoreServices/Menu\ Extras/User.menu/Contents/Resources/CGSession -suspend"
# Empty the Trash on all mounted volumes and the main HDD
# Also, clear Apple’s System Logs to improve shell startup speed
alias emptytrash="sudo rm -rfv /Volumes/*/.Trashes; sudo rm -rfv ~/.Trash; sudo rm -rfv /private/var/log/asl/*.asl"
# Scape webpage
alias scrape="scrapeUrl"
############################################################
####
#### Change Directory
####
alias cd..='cd ..'
alias ..='cd ..'
alias ...='cd ../../../'
alias ....='cd ../../../../'
alias .....='cd ../../../../'
alias .4='cd ../../../../'
alias .5='cd ../../../../..'
############################################################
####
#### PHPUnit
####
alias t="clear; phpunit"
alias phpunit="vendor/bin/phpunit"
alias phpunitw="phpunit-watcher watch"
alias p="phpunit"
alias pf="phpunit --filter "
############################################################
####
#### Git
####
# Git
alias gm="git merge"
alias glog="git log --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%cr) %C(bold blue)<%an>%Creset' --abbrev-commit"
alias nah='git reset --hard;git clean -df'
alias ga='git add'
alias gaa='git add -A'
alias gaac="git add -A; git commit -m "
alias gb='git branch'
alias gc='git commit -m '
alias gca='git commit -a'
alias gcam='git commit -a -m'
alias gce='git config --global -e'
alias gcm='git commit -m'
alias gco='git checkout'
alias gcob='git checkout -b '
alias gcp='git cherry-pick'
alias gd='git diff'
alias gdc='git diff --cached'
alias gf='git fetch --all'
alias gl='git log'
alias gll='git log --all --decorate --oneline -n 30'
alias gpo="git push origin"
alias gpom='git push origin master'
alias gpod="git push origin develop"
alias gpomp='git push origin master:production'
alias gr='git remote'
alias gra='git remote add'
alias grr='git remote rename'
alias gs='git status'
alias gsr='gstatus'
alias gss='gstatus'
alias gt='git tag'
alias gdisc='git clean -df; git checkout -- .'
alias gcl='git clone'
alias clone='gcl'
alias gup='git pull --rebase --prune; git submodule update --init --recursive'
alias gundo='reset HEAD~1 --mixed'
alias gamend='git commit -a --amend'
alias gwipe="git add -A ; git commit -qm 'WIPE SAVEPOINT' ; git reset HEAD~1 --hard"
alias gconf="ed .git/config"
# remove files that are not under version control
alias gcf="git clean -f"
# discard changes in the working directory
alias gcod="git checkout -- ."
# grab the latest upstream version
alias gpum="git pull upstream master"
# delete branch from github. follow with branch name
alias gpodelete="git push origin --delete"
# show git status without untracked files
alias gsu="git status -uno"
############################################################
####
#### Date and Time
####
alias path='echo -e ${PATH//:/\\n}'
alias now='date +"%T'
alias nowtime=now
alias nowdate='date +"%d-%m-%Y"'
############################################################
####
#### Ping and Ports
####
# Do not wait interval 1 second, go fast #
alias fastping='ping -c 100 -s.2'
alias ports='netstat -tulanp'
############################################################
####
#### Filesystem Commands Protection
####
# do not delete / or prompt if deleting more than 3 files at a time #
alias rm='sudo rm'
# confirmation #
alias mv='mv -i'
alias cp='cp -i'
alias ln='ln -i'
# Parenting changing perms on / #
alias chown='chown '
alias chmod='chmod '
alias chgrp='chgrp '
############################################################
####
#### Apt (Debian / Ubuntu)
####
alias apt-get="sudo apt-get"
alias updatey="sudo apt-get --yes"
alias ag="sudo apt-get"
alias agi="sudo apt-get install"
alias agiy="sudo apt-get --yes install"
alias agu="sudo apt-get update"
alias acs="sudo apt-cache search"
alias ags=acs # sometimes apt-get search is just easier to remember :)
alias acsh='sudo apt-cache show'
alias afs='sudo apt-file show'
alias afl='sudo apt-file list'
alias upgrade='sudo apt-get update && sudo apt-get upgrade'
############################################################
####
#### Reboot / Shutdown
####
# reboot / halt / poweroff
alias reboot='sudo /sbin/reboot'
alias poweroff='sudo /sbin/poweroff'
alias halt='sudo /sbin/halt'
alias shutdown='sudo /sbin/shutdown'
############################################################
####
#### Wget Protection
####
## this one saved by butt so many times ##
alias wget='wget -c'
############################################################
####
#### Pretty disk free and usage
####
## set some other defaults ##
alias df='df -H'
alias du='du -ch'
############################################################
####
#### ls - The 'sudo ls' family (this assumes you use a recent GNU ls).
####
alias ls='sudo \ls -laFG'
alias l=ls
# Add colors for filetype and human-readable sizes by default on 'sudo ls':
alias lx='sudo ls -lXB' # Sort by extension.
alias lk='sudo ls -lSr' # Sort by size, biggest last.
alias lt='sudo ls -ltr' # Sort by date, most recent last.
alias lc='sudo ls -ltcr' # Sort by/show change time,most recent last.
alias lu='sudo ls -ltur' # Sort by/show access time,most recent last.
# The ubiquitous 'll': directories first, with alphanumeric sorting:
alias ll="sudo ls -lv --group-directories-first"
alias lm='sudo ll |more' # Pipe through 'more'
alias lr='sudo ll -R' # Recursive ls.
alias la='sudo ll -A' # Show hidden files.
alias tree='sudo tree -Csuh' # Nice alternative to 'recursive ls' ...
#List only directories
alias lsd='sudo ls -l | grep "^d"'
############################################################
####
#### Spelling typos - highly personal and keyboard-dependent :-)
####
alias xs='cd'
alias vf='cd'
alias moer='more'
alias moew='more'
alias kk='ll'
############################################################
####
#### File & strings related functions:
####
# Find a file with a pattern in name:
function ff() { find . -type f -iname '*'"$*"'*' -ls ; }
# Find a file with pattern $1 in name and Execute $2 on it:
function fe() { find . -type f -iname '*'"${1:-}"'*' \
-exec ${2:-file} {} \; ; }
############################################################
####
#### Unzip / Extract
####
function extract() # Handy Extract Program
{
if [ -f $1 ] ; then
case $1 in
*.tar.bz2) tar xvjf $1 ;;
*.tar.gz) tar xvzf $1 ;;
*.bz2) bunzip2 $1 ;;
*.rar) unrar x $1 ;;
*.gz) gunzip $1 ;;
*.tar) tar xvf $1 ;;
*.tbz2) tar xvjf $1 ;;
*.tgz) tar xvzf $1 ;;
*.zip) unzip $1 ;;
*.Z) uncompress $1 ;;
*.7z) 7z x $1 ;;
*) echo "'$1' cannot be extracted via >extract<" ;;
esac
else
echo "'$1' is not a valid file!"
fi
}
# Creates an archive (*.tar.gz) from given directory.
function maketar() { tar cvzf "${1%%/}.tar.gz" "${1%%/}/"; }
# Create a ZIP archive of a file or folder.
function makezip() { zip -r "${1%%/}.zip" "$1" ; }
# Make your directories and files access rights sane.
function sanitize() { chmod -R u=rwX,g=rX,o= "$@" ;}
############################################################
####
#### MS-DOS aliases
####
alias md='sudo mkdir -p'
alias copy='cp'
alias rd='sudo rmdir'
alias del='sudo rm'
alias cls='clear'
alias dir='l'
alias move='sudo mv'
alias locate='locate -i'
if [ "$EDITOR" = "" ]; then
alias ed='sudo $MY_EDITOR'
else
alias ed='sudo $EDITOR'
fi
############################################################
####
#### PHP check
####
# Check PHP For Errors
alias phpcheck='find ./ -name \*.php | xargs -n 1 php -l'
############################################################
####
#### chmod
####
#chmod train
alias mx='sudo chmod a+x'
alias 000='sudo chmod 000'
alias 400='sudo chmod 400'
alias 644='sudo chmod 644'
alias 755='sudo chmod 755'
############################################################
####
#### Show all IPs in the current box
####
alias ips="ifconfig -a | perl -nle'/(\d+\.\d+\.\d+\.\d+)/ && print $1'"
############################################################
####
#### Composer
####
alias c="composer"
alias cda="composer dump-autoload"
alias cdo="composer dump-autoload --optimize"
alias cu="composer update"
alias cus="composer update --prefer-source"
alias cups="composer update --prefer-source"
alias cud="composer update --prefer-dist"
alias cupd="composer update --prefer-dist"
alias csu="sudo composer self-update"
alias cr="composer require"
alias ci="composer install"
############################################################
####
#### Laravel
####
# Tail Laravel and Webserver (NGINX & Apache 2) log files
# Compatible with Laravel 4 & 5
#
alias tl="/bin/ls -d /var/log/nginx/* /var/log/apache2/* storage/logs/* app/storage/logs/* storage/laravel.log | grep -v 'gz$' | grep -v '1$' | xargs tail -f"
##### If you don't have artisan anywhere installed, uncomment the next line
#alias artisan="php artisan"
alias a="artisan"
alias d="php artisan dusk"
alias df="php artisan dusk --filter"
function routes()
{
if [ $# -eq 0 ]; then
php artisan route:list
else
php artisan route:list | grep ${1}
fi
}
############################################################
####
#### System
####
alias ulimit='ulimit -S'
alias less='less -r'
alias fuck='sudo $(history -p \!\!)'
############################################################
####
#### JavaScript
####
# Jest
alias jest="./node_modules/.bin/jest"
# Switch NPM
alias npmnpm="npm config set registry https://registry.npmjs.org"
alias npmspatie="npm config set registry https://npm.spatie.be"
############################################################
####
#### Editors
####
# PhpStorm
alias phpstorm='open -a /Applications/PhpStorm.app "`pwd`"'
# VSCode
alias code='open -a "/Applications/Visual Studio Code.app" "`pwd`"'
############################################################
####
#### Misc
####
# Redis
alias flush-redis="redis-cli FLUSHALL"
| true |
0ae80ec23f8f1240b85777988e50fcbd3bfdfc96 | Shell | aogilvie/safety-first | /scripts/list.sh | UTF-8 | 462 | 3.09375 | 3 | [] | no_license | #!/bin/bash
cd ${0%/*}/..
. ./scripts/common.sh
###############################################################################
printHeader
echo -e "$(echowhite 'List LV Snapshots') \n"
getParams "$@"
if [[ -z "$prefix" ]]; then
prefix="SNAP"
fi
if [[ -n $selected_origin ]]; then
lvs -olv_name,vg_name,lv_size,origin,lv_time -S "origin = $selected_origin"
else
lvs -olv_name,vg_name,lv_size,origin,lv_time -S "lv_name =~ ^${prefix}_"
fi
completed
| true |
0de61bbcd18d3735c1bf7041bdac15a2189f937e | Shell | dockerfornovices/DockerSimpleDemo | /speakerSetup | UTF-8 | 1,422 | 3.484375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# This material is free software for demonstration purposes only.
# Copyright (c) 2018 Alec Clews <alecclews@gmail.com>
#
#
# Use of this source code is governed by an MIT license.
# See the project's LICENSE file for more information.
# Setup local docker envirobment to start the talk
IMAGES="alpine:latest \
dockerfornovices/sqlite:0.1 \
dockerfornovices/lua-dev:latest \
dockerfornovices/api-server:latest \
gcr.io/google.com/cloudsdktool/cloud-sdk:slim \
$(cat docker-compose/docker/dev-compose.yaml |
sed -nEe '/^[[:space:]]*image:[[:space:]]+(.+)$/s//\1/p' | sort -u |
sed -Ee 's/^([^:]+)$/\1:latest/' | tr "\n" " ")"
printf "Image list is:\n$(echo $IMAGES | tr ' ' '\n')\n"
[[ $(docker container ls -aq | wc -l) -gt 0 ]] && echo Remove all running containers && docker container rm -f $(docker container ls -aq)
[[ $(docker volume ls -q | wc -l) -gt 0 ]] && echo Remove all volumes && docker volume rm $(docker volume ls -q)
REQUIRED_NETWORKS="bridge host none"
for n in $(docker network ls --format '{{.Name}}') ; do
if [[ $REQUIRED_NETWORKS =~ $n ]] ;then
echo keep network $n
else
echo delete network $n
docker network rm $n
fi
done
for i in $(docker image ls --format '{{.Repository}}:{{.Tag}}') ; do
if [[ ! $IMAGES =~ $i ]] ; then
echo Remove image $i
docker image rm -f $i
fi
done
for i in $IMAGES ; do echo ; docker image pull $i; done
| true |
c9ac182a6f7308fc41bcba18fc9868e1762dc5a3 | Shell | spb-metrics/curupira | /usr/share/curupira/scripts/insere-usuario | UTF-8 | 7,449 | 3.484375 | 3 | [] | no_license | #!/bin/bash
# VERSAO 1.0
# 10/04/2006
grupo_padrao="Nivel1" # grupo padrao para inclusao na base de dados
dir_base="/etc/pykota" # diretorio base do script
editor_pykota=`which edpykota` # localizacao do edpykota
arquivo_log=/var/tmp/script_insersao.log
###############################################################################
# Funcoes
# Exibe a ajuda e sai
function ajuda() {
echo " as opcoes sao:
-h: exibe essa ajuda
-u nome-usuario (login do usuario na rede)
-g grupo-usuario (grupo do usuario no curupira)
-e email-usuario (email do usuario)
-q quota-usuario (quota do usuario, -1 para quota infinita)
Exemplo:
$0 -u p523496 -e fulano@mail.caixa -g \"Nivel1\" -q -1
"
exit 0
}
# funcao que testa se o grupo existe no banco.
# (recebe como parametro o grupo)
# retorna 0 caso exista e 1 caso contrario
function testa_grupo() {
local fgrupo=$1
local res=""
SQL="select id from groups where groupname='${fgrupo}';"
lin=`echo ${SQL} | psql -U pykotaadmin pykota | grep "^(" \
| sed 's/(\([[:digit:]]\).*/\1/'`
# caso nao haja registros lin vale 0
if [ -z "$lin" ]; then
lin=0
fi
res=$((1-$lin))
return $res
}
# Busca informacoes de um usuario no global catalog
function pesquisa_ad() {
local server=""
local porta=3268
local dominio=""
local user=""
local pass=""
local tmpfile=/tmp/adsearch.tmp
local x=""
local y=""
if [ -z $1 ]; then
echo "(pesquisa_ad): Parametro \"login\" nao especificado"
exit 1
fi
ldapsearch -h $server -p $porta -b dc=caixa \
-D cn=$user,cn=Users,dc=$dominio,dc=caixa \
-x -w $pass \
sAMAccountName=$1 > $tmpfile
nome_usuario=$(cat $tmpfile | grep ^displayName | sed 's/.*: //')
# verifica se o nome do usuario eh valido.
echo $nome_usuario | egrep "==" > /dev/null 2> /dev/null
x=$?
# verifica se o nome do usuario eh valido.
echo "$1" | egrep "^s[0-9]{6}$" > /dev/null 2> /dev/null
y=$?
# insere nome desconhecido caso ache o "==" ou nao tenha encontrado nada.
if [ $x -eq 0 ] || [ -z "$nome_usuario" ] || [ $y -eq 0 ] ; then
nome_usuario="Desconhecido"
if [ $y -eq 0 ] || [ $x -eq 0 ]; then
nome_usuario="$1"
fi
fi
cargo_usuario=$(cat $tmpfile | grep ^title | sed 's/.*: //')
echo $cargo_usuario | egrep "==" > /dev/null 2> /dev/null
x=$?
# verifica se o cargo do usuario eh valido.
echo "$1" | egrep "^s[0-9]{6}$" > /dev/null 2> /dev/null
y=$?
# insere no cargo desconhecido caso ache o "==" ou nao tenha encontrado nada.
if [ $x -eq 0 ] || [ -z "$cargo_usuario" ] || [ $y -eq 0 ]; then
cargo_usuario="Desconhecido"
if [ $y -eq 0 ]; then
cargo_usuario="Conta de Servico"
fi
fi
cod_unidade=$(cat $tmpfile | grep ^"extensionAttribute1:" | sed 's/^.*: //' \
| cut -d '-' -f 2 | cut -d ' ' -f 2)
echo $cod_unidade | egrep "^[0-9]+$" > /dev/null 2> /dev/null
x=$?
# insere na unidade 0 caso nao seja possivel encontrar outra.
if [ $x -ne 0 ]; then
cod_unidade=0
fi
nome_unidade=$(cat $tmpfile | grep ^department | sed 's/^.*: //')
echo $nome_unidade | egrep "==" > /dev/null 2> /dev/null
x=$?
# insere na unidade desconhecida caso ache o "==" ou nao tenha encontrado nada.
if [ $x -eq 0 ] || [ -z "$nome_unidade" ]; then
nome_unidade="Desconhecido"
fi
endereco_unidade=$(cat $tmpfile | grep ^streetAddress | sed 's/^.*: //')
email_usuario=$(cat $tmpfile | grep ^proxyAddresses | grep smtp \
| sed 's/^.*smtp://')
rm -f $tmpfile
}
function pesquisa_usuario() {
pesquisa_ad $1
}
# funcao que insere o usuario na base da dados postgreSQL do pykota.
# trata a questao da quota de impressao.
function insere_usuario() {
local teste_id
local group_id
# ATENCAO verifica se eh baseado no pykota 1.25 ou nos anteriores
versao=$(edpykota -v | sed 's/1\.\([0-9][0-9]\).*/\1/')
if [ $versao -ge 25 ] ; then
pykota="novo"
else
pykota="velho"
fi
# verifica se o usuario ja esta no banco
teste_id=$(echo "SELECT id FROM users where username='${usuario}'" | \
psql -U postgres pykota | egrep "[[:blank:]]+[[:digit:]]+"\
| awk '{ print $1}')
#TODO: COLOCAR INCLUSAO DE QUOTA!
# executa a inclusao na base de dados do pykota
# caso o usuario ja esteja no bando da update.
if [ ! -z "$teste_id" ]; then
edpykota -a ${usuario}
#Só muda o grupo se este foi passado como paramêtro!!!!
if [ -z "$c_grupo" ]; then
# pega o groupid do grupo em questao
group_id=$(echo "select id from groups where groupname='$grupo'" \
| psql -U postgres pykota | egrep "[[:blank:]]+[[:digit:]]+" | \
awk '{ print $1}')
# atualiza a entrada na groupsmembers
echo "update groupsmembers set groupid=${group_id} where userid=${teste_id}" \
| psql -U postgres pykota
fi
else
if [ "$pykota" == "velho" ]; then
edpykota --add --ingroups $grupo ${usuario}/${email}
elif [ "$pykota" == "novo" ]; then
pkusers -a ${usuario}/${email}
pkusers -i "${grupo}" ${usuario}
edpykota -a ${usuario}
fi
fi
}
# Esta funcao insere informacoes sobre a unidade na base de dados postgreSQL,
# Caso a unidade ja exista na base nenhuma operacao e realizada.
function insere_unidade() {
cod_ilha=0
SQL="INSERT INTO unidades SELECT '"${cod_unidade}"','"${nome_unidade}"','"${endereco_unidade}"','"${cod_ilha}"' WHERE NOT EXISTS (SELECT TRUE FROM unidades WHERE codunidade = '"${cod_unidade}"');"
echo $SQL | psql -U pykotaadmin pykota
}
function insere_nome_completo_usuario() {
SQL="UPDATE users SET nome = '"${nome_usuario}"', codunidade = '"${cod_unidade}"', description = '"${cargo_usuario}"' WHERE username like '%"${usuario}"'";
echo $SQL | psql -U pykotaadmin pykota
}
###############################################################################
# Fluxo principal
# Verifica se o edpykota foi encontrado, caso negativo a casa caiu, pois
# a instalacao do pykota deve estar zoneada.
if [ -z $editor_pykota ]; then
echo "[31;1m[FATAL] Nao foi possivel encontrar o executavel edpykota."
echo "Verifique se a instalacao do pykota foi feita corretamente [0m"
exit 1
fi
# caso nao tenham sido passados parametros chama a ajuda()
if [ $# -lt 1 ]; then
ajuda
exit 1;
fi
# trata os argumentos, no caso de arqumento errado, chama a ajuda()
while getopts "hu:g:q:e:" OPT; do
case "$OPT" in
"h") ajuda;; # exibe a ajuda
"u") usuario=${OPTARG};;
"e") email=${OPTARG};;
"g") grupo=${OPTARG};;
"q") quota=${OPTARG};;
"?") ajuda;;
esac
done
# caso o argumento com o login do usuario nao tenha sido informado
# chama a ajuda pois esse eh um erro grave
if [ -z $usuario ]; then
echo "[31;1m[ERRO] Nao foi informado o usuario.[0m"
ajuda
fi
# caso nao tenha sido utilizado o parametro que define o email, assume padrao
if [ -z $email ]; then
echo "Assumindo email $usuario@mail.caixa"
email="${usuario}@mail.caixa"
fi
# caso nao tenha sido passado parametro, insere o cara no grupo padrao
# verifica se o grupo passado invalido
if [ -z "$grupo" ]; then
echo "Assumindo grupo ${grupo_padrao}"
grupo=$grupo_padrao
c_grupo=1
else
# verifica se o grupo passado existe no banco
testa_grupo $grupo
if [ $? -ne 0 ]; then
echo "Grupo $grupo nao existe. Assumindo grupo $grupo_padrao"
grupo=$grupo_padrao
fi
fi
pesquisa_usuario $usuario
insere_usuario
insere_unidade
insere_nome_completo_usuario
#ajusta_log
exit 0
| true |
b00531246c9c95d728a71f90647bee36d812e5f3 | Shell | EpicEric/pony-mqtt | /.ci-scripts/deploy_docs.bash | UTF-8 | 411 | 2.578125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -ex
echo "Installing MkDocs, Pony theme and PyYAML..."
pip install mkdocs-ponylang pyyaml
echo "Fixing docs..."
make docs-online
echo "Uploading docs using MkDocs..."
git remote add gh-token "https://${GITHUB_TOKEN}@github.com/epiceric/pony-mqtt-docs"
git fetch gh-token
git reset gh-token/master
pushd mqtt-docs
mkdocs gh-deploy -v --clean --remote-name gh-token --remote-branch master
popd
| true |
5ae6e6b7d7f9ac07e912db9e0d7b90550b339a81 | Shell | weidonggg/ovpn-with-openvpn-admin | /bin/makeclient | UTF-8 | 385 | 3.109375 | 3 | [] | no_license | #!/bin/bash
client=$1
nopass=$2
if [ "$client" == "" ]; then
echo "first argument can't be null."
echo ""
echo " Example:"
echo " $0 client"
exit 1
fi
if [ "$nopass" == "nopass" ]; then
docker-compose run --rm openvpn-server easyrsa build-client-full $client nopass
else
docker-compose run --rm openvpn-server easyrsa build-client-full $client
fi
| true |
663653ee0b1fefa9ea48462f7d84d5bdcf6fe546 | Shell | FrauBSD/pkgcenter-R | /depend/vcr/libexec/rm | UTF-8 | 24,930 | 3.671875 | 4 | [
"BSD-2-Clause"
] | permissive | #!/bin/sh
############################################################ IDENT(1)
#
# $Title: Script to uninstall R libraries $
# $Copyright: 2019-2020 Devin Teske. All rights reserved. $
# $FrauBSD: pkgcenter-R/depend/vcr/libexec/rm 2020-07-16 16:40:51 -0700 freebsdfrau $
#
############################################################ CONFIGURATION
#
# Where to install packages
#
VCR_DESTDIR=library
############################################################ ENVIRONMENT
#
# Debugging
#
: ${VCR_DEBUG:=}
#
# Inherited from vcr
#
: ${VCR_PROGRAM:=vcr}
############################################################ GLOBALS
#
# Program basename
#
case "${UNAME_s:=$( uname -s )}" in
Linux)
pgm=$( readlink -f "$0" )
pgm="${pgm##*/}"
;;
*)
pgm="${0##*/}"
esac
#
# Global exit status
#
SUCCESS=0
FAILURE=1
#
# Command-line options
#
R= # -B R
DESTDIR= # -d dir
DRYRUN= # -n
FORCE= # -f
#
# Literals
#
NL="
" # END-QUOTE
#
# Miscellaneous
#
INSTALLED=
REMOVE=
REQUIREDBY=
############################################################ I18N
case "${LANG:-$LC_ALL}" in
de|de_DE|de_DE.*) # German
fmt_base_library="Basisbibliothek (-B %s)"
fmt_invalid_package_name="%s: Ungültiger paketname"
fmt_no_such_file_or_directory="%s: Keine solche datei oder ordner"
fmt_no_such_r_interpreter="%s: Kein solcher R interpreter"
fmt_option_cannot_be_combined_with="%s: option kann nicht kombiniert werden mit \`%s'"
fmt_something_went_wrong_in="Etwas ist schief gelaufen in %s"
fmt_unable_to_remove="Entfernen nicht möglich %s"
msg_all_good="Alles gut"
msg_check_for_removal_dependencies="Überprüfen sie ob entfernungsabhängigkeiten vorliegen"
msg_debug="Debuggen"
msg_default="Standard"
msg_destination="Ziel"
msg_destination_directory="Zielverzeichnis."
msg_dry_run_dont_actually_remove_anything="Probelauf. Entferne eigentlich nichts."
msg_dry_run_enabled="Probelauf aktiviert (-n). Die endgültige deinstallation wird nicht durchgeführt."
msg_enable_additional_debugging_statements="Aktivieren sie zusätzliche debuggen-Anweisungen."
msg_exiting="Verlassen."
msg_fatal="TÖDLICH"
msg_force_remove_even_if_required="Macht. Entfernen selbst wenn von einer paket benötigt."
msg_library="bibliothek"
msg_load_dependencies="Abhängigkeiten laden"
msg_missing_package_argument="Fehlendes paketargument"
msg_options="Optionen"
msg_print_help_message="Hilfemeldung an stderr ausgeben und beenden."
msg_proceeding_anyway="Trotzdem weitermachen (-f gegeben)"
msg_removal_prevented_by="Entfernung verhindert durch"
msg_remove_packages="Pakete entfernen"
msg_success="ERFOLG"
msg_unable_to_extract_description_file="DESCRIPTION-datei kann nicht extrahiert werden"
msg_usage="Verwendungszweck"
msg_use_r_base_library_as_destination="Verwenden sie die R base bibliothek als ziel."
msg_warning="ACHTUNG"
;;
he|he_IL|he_IL.*) # Hebrew
fmt_base_library="(-B %s) ספריית בסיס"
fmt_invalid_package_name="%s: שם חבילה לא חוקי"
fmt_no_such_file_or_directory="%s: אין כזה קובץ או תקייה"
fmt_no_such_r_interpreter="%s: אין מתורגמן מסוג R כזה"
fmt_option_cannot_be_combined_with="%s: \`%s' לא ניתן לשלב את האפשרות עם"
fmt_something_went_wrong_in="%s משהו השתבש ב"
fmt_unable_to_remove="%s לא ניתן להסיר"
msg_all_good="הכל טוב"
msg_check_for_removal_dependencies="בדוק אם יש תלות בהסרה"
msg_debug="אשפוז"
msg_default="ברירת מחדל"
msg_destination="יעד"
msg_destination_directory="ספריית יעד."
msg_dry_run_dont_actually_remove_anything="ריצה יבשה. אל תסיר שום דבר."
msg_dry_run_enabled="הפעלה יבשה מופעלת (-n). הסרת ההתקנה הסופית לא תתבצע."
msg_enable_additional_debugging_statements="הפעל הצהרות באגים נוספות."
msg_exiting="יציאה."
msg_fatal="פאטאל"
msg_force_remove_even_if_required="כוח. הסר גם אם נדרש על ידי חבילה כלשהי."
msg_library="ספריה"
msg_load_dependencies="טען תלות"
msg_missing_package_argument="ארגומנט חבילה חסר"
msg_options="אפשרויות"
msg_print_help_message="הדפס את הודעת העזרה ל stdr ולצאת."
msg_proceeding_anyway="ההליך בכל מקרה (-f נתון)"
msg_removal_prevented_by="הסרה מנעה"
msg_remove_packages="הסר חבילות"
msg_success="הצלחה"
msg_unable_to_extract_description_file="אין אפשרות לחלץ קובץ DESCRIPTION"
msg_usage="שימוש"
msg_use_r_base_library_as_destination="השתמש בספריית הבסיס R כיעד."
msg_warning="אזהרה"
;;
ja_JP.UTF8) # Japanese (Kanji)
fmt_base_library="ベースライブラリ(-B %s)"
fmt_invalid_package_name="%s:無効なパッケージ名"
fmt_no_such_file_or_directory="%s:そのようなファイルまたはディレクトリはありません"
fmt_no_such_r_interpreter="%s:そのようなRインタープリターはありません"
fmt_option_cannot_be_combined_with="%s:オプションを\`%s'と組み合わせることはできません"
fmt_something_went_wrong_in="%sで問題が発生しました"
fmt_unable_to_remove="%sを削除できません"
msg_all_good="すべて良い"
msg_check_for_removal_dependencies="削除の依存関係を確認する"
msg_debug="デバッグ"
msg_default="デフォルト"
msg_destination="先"
msg_destination_directory="宛先ディレクトリ。"
msg_dry_run_dont_actually_remove_anything="ドライラン。 実際には何も削除しないでください。"
msg_dry_run_enabled="ドライランが有効(-n)。 最終アンインストールは実行されません。"
msg_enable_additional_debugging_statements="追加のデバッグステートメントを有効にします。"
msg_exiting="終了します。"
msg_fatal="致命的"
msg_force_remove_even_if_required="力。 一部のパッケージで必要な場合でも削除します。"
msg_library="図書館"
msg_load_dependencies="依存関係の読み込み"
msg_missing_package_argument="パッケージ引数がありません"
msg_options="オプション"
msg_print_help_message="ヘルプメッセージをstderrに出力して終了します。"
msg_proceeding_anyway="続行します(-fが指定されています)"
msg_removal_prevented_by="除去は"
msg_remove_packages="パッケージを削除する"
msg_success="成功"
msg_unable_to_extract_description_file="ディスクリプションファイルを抽出できません"
msg_usage="使用法"
msg_use_r_base_library_as_destination="宛先としてRベースライブラリを使用します。"
msg_warning="警告"
;;
ja|ja_JP|ja_JP.*) # Japanese (Latin)
fmt_base_library="Bēsuraiburari (-B %s)"
fmt_invalid_package_name="%s: Mukōna pakkēji-mei"
fmt_no_such_file_or_directory="%s: Sono yōna fairu matawa direkutori wa arimasen"
fmt_no_such_r_interpreter="%s: Sono yōna R intāpuritā wa arimasen"
fmt_option_cannot_be_combined_with="%s: Opushon o\`% s' to kumiawaseru koto wa dekimasen"
fmt_something_went_wrong_in="%s de mondai ga hassei shimashita"
fmt_unable_to_remove="%s o sakujo dekimasen"
msg_all_good="Subete yoi"
msg_check_for_removal_dependencies="Sakujo no isonkankei o kakunin suru"
msg_debug="DEBAGGU"
msg_default="Deforuto"
msg_destination="Saki"
msg_destination_directory="Atesaki direkutori."
msg_dry_run_dont_actually_remove_anything="Dorairan. Jissai ni wa nani mo sakujo shinaide kudasai."
msg_dry_run_enabled="Dorairan ga yūkō (-n). Saishū an'insutōru wa jikkō sa remasen."
msg_enable_additional_debugging_statements="Tsuika no debaggusutētomento o yūkō ni shimasu."
msg_exiting="Shūryō shimasu."
msg_fatal="CHIMEI-TEKI"
msg_force_remove_even_if_required="Chikara. Ichibu no pakkēji de hitsuyōna baai demo sakujo shimasu."
msg_library="Toshokan"
msg_load_dependencies="Isonkankei no yomikomi"
msg_missing_package_argument="Pakkēji hikisū ga arimasen"
msg_options="Opushon"
msg_print_help_message="Herupumessēji o stderr ni shutsuryoku shite shūryō shimasu."
msg_proceeding_anyway="Zokkō shimasu (-f ga shitei sa rete imasu)"
msg_removal_prevented_by="Jokyo wa"
msg_remove_packages="Pakkēji o sakujo suru"
msg_success="SEIKŌ"
msg_unable_to_extract_description_file="Disukuripushonfairu o chūshutsu dekimasen"
msg_usage="Shiyō-hō"
msg_use_r_base_library_as_destination="Atesaki to shite R bēsuraiburari o shiyō shimasu."
msg_warning="KEIKOKU"
;;
pl|pl_PL|pl_PL.*) # Polish
fmt_base_library="Biblioteka bazowa (-B %s)"
fmt_invalid_package_name="%s: Nieprawidłowa nazwa pakietu"
fmt_no_such_file_or_directory="%s: Brak takiego pliku lub katalogu"
fmt_no_such_r_interpreter="%s: Nie ma takiego tłumacza R"
fmt_option_cannot_be_combined_with="%s: opcja nie może być łączona z \`%s'"
fmt_something_went_wrong_in="Coś poszło nie tak %s"
fmt_unable_to_remove="Nie można usunąć %s"
msg_all_good="Wszystko dobrze"
msg_check_for_removal_dependencies="Sprawdź zależności od usuwania"
msg_debug="ODPLUSKWIĆ"
msg_default="Domyślna"
msg_destination="Przeznaczenie"
msg_destination_directory="Katalog docelowy."
msg_dry_run_dont_actually_remove_anything="Próba. Nie usuwaj niczego."
msg_dry_run_enabled="Próba włączony (-n). Ostateczne odinstalowanie nie zostanie wykonane."
msg_enable_additional_debugging_statements="Włącz dodatkowe instrukcje debugowania."
msg_exiting="Wyjście."
msg_fatal="FATALNY"
msg_force_remove_even_if_required="Siła. Usuń, nawet jeśli wymaga tego jakiś pakiet."
msg_library="Biblioteka"
msg_load_dependencies="Załaduj zależności"
msg_missing_package_argument="Brakujący argument pakietu"
msg_options="Opcje"
msg_print_help_message="Wydrukuj komunikat pomocy na stderr i wyjdź."
msg_proceeding_anyway="Postępowanie mimo to (-f podane)"
msg_removal_prevented_by="Usunięcie uniemożliwiono"
msg_remove_packages="Usuń pakiety"
msg_success="POWODZENIE"
msg_unable_to_extract_description_file="Nie można wyodrębnić pliku DESCRIPTION"
msg_usage="Stosowanie"
msg_use_r_base_library_as_destination="Użyj biblioteki podstawowej R jako miejsca docelowego."
msg_warning="OSTRZEŻENIE"
;;
ru_RU.UTF8) # Russian (Cyrillic)
fmt_base_library="базовая библиотека (-B %s)"
fmt_invalid_package_name="%s: Неверное пакета имя"
fmt_no_such_file_or_directory="%s: Данный файл или каталог отсутствует"
fmt_no_such_r_interpreter="%s: Нет такого R переводчика"
fmt_option_cannot_be_combined_with="%s: опция не может быть объединена с \`%s'"
fmt_something_went_wrong_in="Что-то пошло не так в %s"
fmt_unable_to_remove="Невозможно удалить %s"
msg_all_good="Все хорошо"
msg_check_for_removal_dependencies="Проверьте для удаления зависимости"
msg_debug="ОТЛАЖИВАТЬ"
msg_default="По умолчанию"
msg_destination="Место назначения"
msg_destination_directory="Целевой каталог."
msg_dry_run_dont_actually_remove_anything="Пробный прогон. На самом деле ничего не удаляйте."
msg_dry_run_enabled="Пробный запуск включен (-n). Окончательное удаление не будет выполнено."
msg_enable_additional_debugging_statements="Включить дополнительные операторы отладки."
msg_exiting="Выход."
msg_fatal="ФАТАЛЬНЫЙ"
msg_force_remove_even_if_required="Силы. Удалите, даже если требуется какой-то пакет."
msg_library="библиотека"
msg_load_dependencies="Загрузить зависимости"
msg_missing_package_argument="Отсутствует аргумент пакета"
msg_options="Опции"
msg_print_help_message="Распечатать справочное сообщение в stderr и выйти."
msg_proceeding_anyway="Продолжая в любом случае (-f дано)"
msg_removal_prevented_by="Удаление предотвращено"
msg_remove_packages="Удалить пакеты"
msg_success="УСПЕХ"
msg_unable_to_extract_description_file="Невозможно извлечь DESCRIPTION файл"
msg_usage="Использование"
msg_use_r_base_library_as_destination="Используйте базовую библиотеку R в качестве места назначения."
msg_warning="ПРЕДУПРЕЖДЕНИЕ"
;;
ru|ru_RU|ru_RU.*) # Russian (Latin)
fmt_base_library="Bazovaya biblioteka (-B %s)"
fmt_invalid_package_name="%s: Nevernoye paketa imya"
fmt_no_such_file_or_directory="%s: Dannyy fayl ili katalog otsutstvuyet"
fmt_no_such_r_interpreter="%s: Net takogo R perevodchika"
fmt_option_cannot_be_combined_with="%s: optsiya ne mozhet byt' ob\"yedinena s \`%s'"
fmt_something_went_wrong_in="Chto-to poshlo ne tak v %s"
fmt_unable_to_remove="Nevozmozhno udalit' %s"
msg_all_good="Vse khorosho"
msg_check_for_removal_dependencies="Prover'te dlya udaleniya zavisimosti"
msg_debug="OTLAZHIVAT'"
msg_default="Po umolchaniyu"
msg_destination="Mesto naznacheniya"
msg_destination_directory="Tselevoy katalog."
msg_dry_run_dont_actually_remove_anything="Probnyy progon. Na samom dele nichego ne udalyayte."
msg_dry_run_enabled="Probnyy zapusk vklyuchen (-n). Okonchatel'noye udaleniye ne budet vypolneno."
msg_enable_additional_debugging_statements="Vklyuchit' dopolnitel'nyye operatory otladki."
msg_exiting="Vykhod."
msg_fatal="FATAL'NYY"
msg_force_remove_even_if_required="Sily. Udalite, dazhe yesli trebuyetsya kakoy-to paket."
msg_library="Biblioteka"
msg_load_dependencies="Zagruzit' zavisimosti"
msg_missing_package_argument="Otsutstvuyet argument paketa"
msg_options="Optsii"
msg_print_help_message="Raspechatat' spravochnoye soobshcheniye v stderr i vyyti."
msg_proceeding_anyway="Prodolzhaya v lyubom sluchaye (-f dano)"
msg_removal_prevented_by="Udaleniye predotvrashcheno"
msg_remove_packages="Udalit' pakety"
msg_success="USPEKH"
msg_unable_to_extract_description_file="Nevozmozhno izvlech' DESCRIPTION fayl"
msg_usage="Ispol'zovaniye"
msg_use_r_base_library_as_destination="Ispol'zuyte bazovuyu biblioteku R v kachestve mesta naznacheniya."
msg_warning="PREDUPREZHDENIYe"
;;
zh_CN.UTF8) # Chinese (Simplified)
fmt_base_library="基础库 (-B %s)"
fmt_invalid_package_name="%s: 包名称无效"
fmt_no_such_file_or_directory="%s: 没有相应的文件和目录"
fmt_no_such_r_interpreter="%s: 没有这样的R翻译"
fmt_option_cannot_be_combined_with="%s: 选项不能与 \`%s'"
fmt_something_went_wrong_in="出了点问题 %s"
fmt_unable_to_remove="无法删除 %s"
msg_all_good="都好"
msg_check_for_removal_dependencies="检查删除依赖项"
msg_debug="调试"
msg_default="默认"
msg_destination="目的地"
msg_destination_directory="目标目录。"
msg_dry_run_dont_actually_remove_anything="干运行。 实际上不要删除任何东西。"
msg_dry_run_enabled="启用空运行(-n)。 最终卸载将不会执行。"
msg_enable_additional_debugging_statements="启用其他调试语句。"
msg_exiting="退出。"
msg_fatal="致命"
msg_force_remove_even_if_required="力。 即使某些包裹需要,也要删除。"
msg_library="图书馆"
msg_load_dependencies="加载依赖项"
msg_missing_package_argument="缺少包参数"
msg_options="选项"
msg_print_help_message="将帮助消息打印到stderr并退出。"
msg_proceeding_anyway="无论如何都要进行(-f给出)"
msg_removal_prevented_by="去除阻止"
msg_remove_packages="删除包"
msg_success="成功"
msg_unable_to_extract_description_file="无法提取DESCRIPTION文件"
msg_usage="用法"
msg_use_r_base_library_as_destination="使用R库作为目标。"
msg_warning="警告"
;;
zh|zh_CN|zh_CN.*) # Chinese (Latin)
fmt_base_library="Jīchǔ kù (-B %s)"
fmt_invalid_package_name="%s: Bāo míngchēng wúxiào"
fmt_no_such_file_or_directory="%s: Méiyǒu xiāngyìng de wénjiàn hé mùlù"
fmt_no_such_r_interpreter="%s: Méiyǒu zhèyàng de R fānyì"
fmt_option_cannot_be_combined_with="%s: Xuǎnxiàng bùnéng yǔ \`%s'"
fmt_something_went_wrong_in="Chūle diǎn wèntí %s"
fmt_unable_to_remove="Wúfǎ shānchú %s"
msg_all_good="Dōu hǎo"
msg_check_for_removal_dependencies="Jiǎnchá shānchú yīlài xiàng"
msg_debug="TIÁOSHÌ"
msg_default="Mòrèn"
msg_destination="Mùdì de"
msg_destination_directory="Mùbiāo mùlù."
msg_dry_run_dont_actually_remove_anything="Gàn yùnxíng. Shíjì shang bùyào shānchú rènhé dōngxī."
msg_dry_run_enabled="Qǐyòng kōng yùnxíng (-n). Zuìzhōng xièzài jiāng bù huì zhíxíng."
msg_enable_additional_debugging_statements="Qǐyòng qítā tiáoshì yǔjù."
msg_exiting="Tuìchū."
msg_fatal="ZHÌMÌNG"
msg_force_remove_even_if_required="Lì. Jíshǐ mǒu xiē bāoguǒ xūyào, yě yào shānchú."
msg_library="Túshū guǎn"
msg_load_dependencies="Jiāzài yīlài xiàng"
msg_missing_package_argument="Quēshǎo bāo cānshù"
msg_options="Xuǎnxiàng"
msg_print_help_message="Jiāng bāngzhù xiāoxī dǎyìn dào stderr bìng tuìchū."
msg_proceeding_anyway="Wúlùn rúhé dōu yào jìnxíng (-f gěi chū)"
msg_removal_prevented_by="Qùchú zǔzhǐ"
msg_remove_packages="Shānchú bāo"
msg_success="CHÉNGGŌNG"
msg_unable_to_extract_description_file="Wúfǎ tíqǔ DESCRIPTION wénjiàn"
msg_usage="Yòngfǎ"
msg_use_r_base_library_as_destination="Shǐyòng R kù zuòwéi mùbiāo."
msg_warning="JǏNGGÀO"
;;
*) # English
fmt_base_library="Base library (-B %s)"
fmt_invalid_package_name="%s: Invalid package name"
fmt_no_such_file_or_directory="%s: No such file or directory"
fmt_no_such_r_interpreter="%s: No such R interpreter"
fmt_option_cannot_be_combined_with="%s: option cannot be combined with \`%s'"
fmt_something_went_wrong_in="Something went wrong in %s"
fmt_unable_to_remove="Unable to remove %s"
msg_all_good="All good"
msg_check_for_removal_dependencies="Check for removal dependencies"
msg_debug="DEBUG"
msg_default="Default"
msg_destination="Destination"
msg_destination_directory="Destination directory."
msg_dry_run_dont_actually_remove_anything="Dry run. Don't actually remove anything."
msg_dry_run_enabled="Dry run enabled (-n). Final uninstall will not be performed."
msg_enable_additional_debugging_statements="Enable additional debugging statements."
msg_exiting="Exiting."
msg_fatal="FATAL"
msg_force_remove_even_if_required="Force. Remove even if required by some package."
msg_library="Library"
msg_load_dependencies="Load dependencies"
msg_missing_package_argument="Missing package argument"
msg_options="Options"
msg_print_help_message="Print help message to stderr and exit."
msg_proceeding_anyway="Proceeding anyway (-f given)"
msg_removal_prevented_by="Removal prevented by"
msg_remove_packages="Remove packages"
msg_success="SUCCESS"
msg_unable_to_extract_description_file="Unable to extract DESCRIPTION file"
msg_usage="Usage"
msg_use_r_base_library_as_destination="Use R base library as destination."
msg_warning="WARNING"
esac
############################################################ FUNCTIONS
have(){ type "$@" > /dev/null 2>&1; }
matches(){ awk -v line="$1" '$0==line{exit ++found}END{exit !found}'; }
usage()
{
local optfmt="\t%-9s %s\n"
exec >&2
printf "$msg_usage: %s %s [-Dfhn] [-B R | -d dir] [--] pkg ...\n" \
"$VCR_PROGRAM" "$pgm"
printf "$msg_options:\n"
printf "$optfmt" "-B R" "$msg_use_r_base_library_as_destination"
printf "$optfmt" "-D" "$msg_enable_additional_debugging_statements"
printf "$optfmt" "-d dir" \
"$msg_destination_directory $msg_default \`$VCR_DESTDIR'."
printf "$optfmt" "-f" "$msg_force_remove_even_if_required"
printf "$optfmt" "-h" "$msg_print_help_message"
printf "$optfmt" "-n" "$msg_dry_run_dont_actually_remove_anything"
if [ "$R" ]; then
printf "$fmt_base_library:\n" "$R"
printf "\t%s\n" \
"$( R -e "'cat(.libPaths(.Library))'" 3> /dev/null )"
fi
exit $FAILURE
}
exec 3<&1
if [ -t 1 ]; then # stdout is a tty
eval2(){ printf "\e[2m%s\e[m\n" "$*" >&3; eval "$@"; }
step(){ printf "\e[32;1m==>\e[39m %s\e[m\n" "$*"; }
step2(){ [ ! "$VCR_DEBUG" ] ||
printf "\e[32;1m->\e[39m %s\e[m\n" "$*"; }
warn(){ printf "\e[33;1m$msg_warning!\e[m %s\n" "$*" >&2; }
die()
{
local fmt="$1"
if [ "$fmt" ]; then
shift 1 # fmt
printf "\e[1;31m$msg_fatal!\e[m $fmt\n" "$@" >&2
fi
exit $FAILURE
}
debug()
{
[ ! "$VCR_DEBUG" ] && return
local fmt="$1"
shift 1 # fmt
printf "\e[35m$msg_debug\e[m $fmt\n" "$@" >&2
}
else # stdout is not a tty
eval2(){ printf "%s\n" "$*" >&3; eval "$@"; }
step(){ printf "==> %s\n" "$*"; }
step2(){ [ ! "$VCR_DEBUG" ] || printf "%s %s\n" "->" "$*"; }
warn(){ printf "$msg_warning! %s\n" "$*" >&2; }
die()
{
local fmt="$1"
if [ "$fmt" ]; then
shift 1 # fmt
printf "$msg_fatal! $fmt\n" "$@" >&2
fi
exit $FAILURE
}
debug()
{
[ ! "$VCR_DEBUG" ] && return
local fmt="$1"
shift 1 # fmt
printf "$msg_debug $fmt\n" "$@" >&2
}
fi
R()
{
eval2 command $R --slave --no-restore "$@"
}
Rcat()
{
R -e "'cat(paste0($1, \"\n\"))'"
}
############################################################ MAIN
set -e # errexit
#
# Process command-line options
#
while getopts B:Dd:fhn flag; do
case "$flag" in
B) if [ "$DESTDIR" ]; then
die "$fmt_option_cannot_be_combined_with" "-B" "-d dir"
else
R="$OPTARG"
fi ;;
D) VCR_DEBUG=$(( ${VCR_DEBUG:-0} + 1 )) ;;
d) if [ "$R" ]; then
die "$fmt_option_cannot_be_combined_with" "-d" "-B R"
else
DESTDIR="$OPTARG"
fi ;;
f) FORCE=1 ;;
n) DRYRUN=1
warn "$msg_dry_run_enabled" ;;
*) usage # NOTREACHED
esac
done
shift $(( $OPTIND - 1 ))
#
# Check command-line arguments
#
[ $# -ge 1 ] || die "$msg_missing_package_argument"
: ${DESTDIR:=$VCR_DESTDIR}
if [ "$R" ]; then
have "$R" || die "$fmt_no_such_r_interpreter" "$R"
DESTDIR=$( Rcat ".libPaths(.Library)" 3> /dev/null ) || die
fi
[ -e "$DESTDIR" ] || die "$fmt_no_such_file_or_directory" "$DESTDIR"
[ -d "$DESTDIR" ] || die "$fmt_not_a_directory" "$DESTDIR"
#
# Process command-line arguments
#
while [ $# -gt 0 ]; do
case "$1" in
*"'"*|*[$IFS]*|*==*|*://*)
die "$fmt_invalid_package_name" "$1"
# NOTREACHED
;;
esac
package="$DESTDIR/$1"
[ -e "$package" ] || die "$fmt_no_such_file_or_directory" "$package"
[ -d "$package" ] || die "$fmt_not_a_directory" "$package"
REMOVE="$REMOVE$NL$1"
shift 1
done
REMOVE="${REMOVE#$NL}"
##
## Check package dependencies
##
step "$msg_load_dependencies"
#
# Get a list of libraries installed in the destination directory
#
step2 "$msg_destination $msg_library"
debug "DESTDIR=[$DESTDIR]"
[ ! -d "$DESTDIR" ] || INSTALLED=$(
for dir in "$DESTDIR"/*; do
n=0
name="${dir##*/}"
echo "$REMOVE" | matches "$name" && continue
[ -e "$dir/DESCRIPTION" ] || continue
printf "%s\n" "$name"
done
) || die "$fmt_something_went_wrong_in" "$DESTDIR"
debug "INSTALLED=[$( echo $INSTALLED )]"
#
# Verify no installed packages require pending removals
#
n=0
set -- $INSTALLED
for name in "$@"; do
n=$(( $n + 1 ))
step2 "$name [$n/$#]"
package="$DESTDIR/$name"
descr=$( eval2 cat "$package/DESCRIPTION" ) ||
die "$msg_unable_to_extract_description_file"
depinfo=$(
[ "$VCR_DEBUG" ] || exec 2> /dev/null
echo "$descr" | awk '
BEGIN { catch = "^(Depends|Imports):" }
$0 ~ catch && ++start, $0 ~ /^[^[:space:]]/ &&
$1 !~ catch && stop = 1 { }
!start { next }
!stop { print; next }
{ start = stop = 0 }
' | tee /dev/stderr
)
deps=$( echo "$depinfo" | awk '
{
sub(/^[^[:space:]]+:/, "")
buf = buf " " $0
}
END {
gsub(/\([^)]+\)/, "", buf)
gsub(/,/, " ", buf)
sub(/^[[:space:]]*/, "", buf)
sub(/[[:space:]]*$/, "", buf)
ndeps = split(buf, deps, /[[:space:]]+/)
delete seen
for (i = 1; i <= ndeps; i++) {
if (!((dep = deps[i]) in seen))
print dep
seen[dep]
}
}
' )
debug "deps=[$deps]"
for dep in $deps; do
echo "$REMOVE" | matches "$dep" || continue
REQUIREDBY="$REQUIREDBY$NL$name"
done
done
REQUIREDBY="${REQUIREDBY#$NL}"
#
# Produce list of packages requirements
#
step2 "$msg_check_for_removal_dependencies"
if [ ! "$REQUIREDBY" ]; then
printf "%s\n" "$msg_all_good"
else
warn "$msg_removal_prevented_by"
echo "$REQUIREDBY" | sort | awk '$0="\t"$0' >&2
if [ "$FORCE" ]; then
warn "$msg_proceeding_anyway"
else
die "$msg_exiting"
fi
fi
##
## Remove requested packages
##
[ ! "$VCR_DEBUG" ] || step "$msg_remove_packages"
n=0
set -- $REMOVE
for name in "$@"; do
n=$(( $n + 1 ))
step${VCR_DEBUG:+2} "$name [$n/$#]"
# Skip remaining actions if given `-n'
[ ! "$DRYRUN" ] || continue
# NB: Actual removal process
package="$DESTDIR/$name"
eval2 rm -Rf "$package" || die "$fmt_unable_to_remove" "$package"
done
step "$msg_success"
exit $SUCCESS
################################################################################
# END
################################################################################
| true |
d6076dcb1b5c04fab1b48d9fa7875da34fde8e66 | Shell | assout/scripts | /tmux_pane.sh | UTF-8 | 695 | 3.34375 | 3 | [] | no_license | #!/bin/sh -ue
# TODO Generalization
# TODO Args check
# TODO Usage
# TODO Args variable num
name=""
while getopts n: OPT ; do
case ${OPT} in
n) name=${OPTARG} ;;
\?) exit 1 ;;
esac
done
shift $((OPTIND - 1))
if [ -z "${name}" ] ; then
tmux new-window
else
tmux new-window -n "${name}"
fi
tmux split-window -h -t '{end}.{top-left}'
tmux split-window -v -t '{end}.{top-left}'
tmux split-window -v -t '{end}.{top-right}'
tmux send-keys -t '{end}.{top-left}' "${1:-}" C-m
tmux send-keys -t '{end}.{top-right}' "${2:-}" C-m
tmux send-keys -t '{end}.{bottom-left}' "${3:-}" C-m
tmux send-keys -t '{end}.{bottom-right}' "${4:-}" C-m
tmux select-pane -t '{end}.{top-left}'
| true |
34489a8fd78a2a890771397db4a4d48a3f1e8e35 | Shell | tianyi-ge/cloudlab-profiles | /CloudSLOStore/ubuntu-pango-4/primary/boot.sh | UTF-8 | 1,084 | 2.8125 | 3 | [
"MIT"
] | permissive | # ssd
ssd=/dev/$ssd/sda4;
sudo mkfs.ext4 $ssd;
dir=/users/tianyige/mnt;
sudo mkdir -p $dir;
sudo mount $ssd $dir;
sudo chown -R tianyige $dir;
echo $ssd' '$dir' ext4 defaults 0 0' | sudo tee -a /etc/fstab;
# gcc
sudo add-apt-repository ppa:ubuntu-toolchain-r/test;
sudo apt update;
sudo apt install -y gcc-9 g++-9 liblzma-dev;
cd /usr/bin;
sudo mv gcc gcc.back;
sudo mv g++ g++.back;
sudo ln -s gcc-9 gcc;
sudo ln -s g++-9 g++;
# isa-l
sudo apt update;
sudo apt install -y nasm dh-autoreconf;
cd ~/mnt;
wget https://github.com/intel/isa-l/archive/v2.29.0.zip && unzip *.zip;
cd isa-l-2.29.0;
./autogen.sh;
./configure --prefix=/usr --libdir=/usr/lib;
make;
sudo make install;
# mongo
wget -qO - https://www.mongodb.org/static/pgp/server-4.4.asc | sudo apt-key add -;
echo "deb [ arch=amd64,arm64 ] https://repo.mongodb.org/apt/ubuntu bionic/mongodb-org/4.4 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-4.4.list;
sudo apt update;
sudo apt install -y mongodb-org=4.4.0 mongodb-org-server=4.4.0 mongodb-org-shell=4.4.0 mongodb-org-mongos=4.4.0 mongodb-org-tools=4.4.0;
| true |
bceead0f6b9da974f6c6d5d3355ee91163ac8213 | Shell | aliwithay/Fortran-Modernization | /scripts/fixedtofree/f2f/f2f.sh | UTF-8 | 879 | 3.59375 | 4 | [] | no_license | #! /bin/sh
# Developed by Aly Ammar at NCAR
# as a SIParCS intern summer 2019
DIRECTORY="f2foutput"
if [ ! -d $DIRECTORY ]; then
mkdir $DIRECTORY
fi
LOG=$DIRECTORY/convert_log.txt
module load ftools
n=0
echo "Converting files..."
for f in *.f; do
n=$((n+1))
f=${f%.f}
echo " "
echo $f.f
perl /glade/work/alyammar/ff/f2f.pl $f.f $f.f90
done>$LOG
echo "Converted" $n "files."
mv *.f90 $DIRECTORY
CHECK=$DIRECTORY/syntax_log.txt
scount=0
pcount=0
echo "Analyzing converted files..."
for f in $DIRECTORY/*.f90; do
#read something
#clear
scount=$((scount+1))
#echo " "
#echo $f
var=$(gfortran -std=f2008 -fsyntax-only $f 2>&1)
if [ -z "$var" ]; then
pcount=$((pcount+1))
else
echo "$var"
echo " "
fi
done>$CHECK
ecount=$((scount-pcount))
echo "Analyzed" $scount "files."
echo $ecount "files failed."
echo "Check syntax_log.txt for details on the files."
| true |
b8304e585fa13592bd8109c6b87c571a98790df8 | Shell | min9nim/if-logger | /publish.sh | UTF-8 | 272 | 2.609375 | 3 | [] | no_license |
#!/bin/sh
yarn test
if [ $? -ne 0 ]; then
echo "test failed"
exit 1
fi
rm -r dist
yarn build
if [ $? -ne 0 ]; then
echo "build failed"
exit 1
fi
mv ~/.npmrc ~/.npmrc.tmp
mv ~/.npmrc.mgsong ~/.npmrc
npm publish
mv ~/.npmrc ~/.npmrc.mgsong
mv ~/.npmrc.tmp ~/.npmrc | true |
90346241c677dc680017776efaefe3830e70c368 | Shell | mdcallag/mytools | /bench/run_linkbench/nuc/zmy1.sh | UTF-8 | 296 | 2.734375 | 3 | [] | no_license | nrows=$1
nsecs=$2
bdir=$3
dev=$4
wdop=$5
ldop=$6
for d in pg12.7 in80.9 in57.9 in56.9 rx56.5 ; do
echo Run $d at $( date ) with $nrows rows and $nsecs secs
bash rall.sh $nrows $dev $wdop $nsecs 127.0.0.1 $ldop $d no $bdir 1 1 1 1 1 1
echo Sleep 20 minutes to let HW rest
sleep 1200
done
| true |
381d8942a95c6740adbcaa27a8e7d6bb9618b6ce | Shell | woto/avtorif | /system/emex_replacements/rename-and-copy.sh | UTF-8 | 332 | 3.359375 | 3 | [] | no_license | #!/bin/bash
SAVEIFS=$IFS
IFS=$(echo -en "\n\b")
FILES=`ls -1 ../emex/*`
for i in $FILES
do
# remove all blanks and store them OUT
#echo "$i"
OUT=$(echo $i | sed 's/|/#/g')
#else execute command such as mv or cp or rm
[ "$i" != "$OUT" ] && $(mv "$i" /mnt/emex/Корнев/server-price/emex/"$OUT")
done
IFS=$SAVEIFS
| true |
7da287c5265f60ca45338d88cbfac150cd7e1d4b | Shell | arclogicsoftware/arcshell | /sh/core/_ssh_check.sh | UTF-8 | 4,407 | 3.6875 | 4 | [
"Apache-2.0"
] | permissive |
returnTrue="return 0"
returnFalse="return 1"
_ssh_check_test_text=
_ssh_check_fix=${_ssh_check_fix:-0}
while (( $# > 0)); do
case "${1}" in
"-fix"|"-f") _ssh_check_fix=1 ;;
*) break ;;
esac
shift
done
function _ssh_check_pass {
echo "[OK] ${_ssh_check_test_text}"
}
function _ssh_check_fail {
echo "[FAIL] ${_ssh_check_test_text}"
}
function _ssh_check_fixed {
echo "[FIXED] ${_ssh_check_test_text}"
}
function _ssh_check_setup {
_ssh_check_test_text="${1} "
}
function _ssh_check_is_dir_writable_by_non_owner {
typeset d x
d="${1}"
x=$(ls -al ${d} 2>/dev/null | grep "\ \.$" | cut -d" " -f1)
if [[ "${x:5:1}" == "w" ]]; then
${returnTrue}
fi
if [[ "${x:8:1}" == "w" ]]; then
${returnTrue}
fi
${returnFalse}
}
function _ssh_check_is_file_secure {
typeset f x
f="${1}"
x=$(ls -l ${f} 2>/dev/null | cut -d" " -f1)
if (( $(echo "${x:1:9}" | grep "r.*------" | wc -l) )); then
${returnTrue}
else
${returnFalse}
fi
}
_ssh_check_setup "${HOME} directory not writable by group or others"
if ! _ssh_check_is_dir_writable_by_non_owner "${HOME}"; then
_ssh_check_pass
else
if (( ${_ssh_check_fix} )); then
chmod 755 "${HOME}"
_ssh_check_fixed
else
_ssh_check_fail
fi
fi
_ssh_check_setup ".ssh directory exists"
if [[ -d "${HOME}/.ssh" ]]; then
_ssh_check_pass
else
if (( ${_ssh_check_fix} )); then
mkdir "${HOME}/.ssh"
_ssh_check_fixed
else
_ssh_check_fail
fi
fi
if [[ -d "${HOME}/.ssh" ]]; then
_ssh_check_setup ".ssh directory not writable by group or others"
if ! _ssh_check_is_dir_writable_by_non_owner "${HOME}/.ssh"; then
_ssh_check_pass
else
if (( ${_ssh_check_fix} )); then
chmod 700 "${HOME}/.ssh"
_ssh_check_fixed
else
_ssh_check_fail
fi
fi
fi
_ssh_check_setup "authorized_keys file exists"
if [[ -f "${HOME}/.ssh/authorized_keys" ]]; then
_ssh_check_pass
else
if (( ${_ssh_check_fix} )); then
touch "${HOME}/.ssh/authorized_keys"
_ssh_check_fixed
else
_ssh_check_fail
fi
fi
if [[ -f "${HOME}/.ssh/authorized_keys" ]]; then
_ssh_check_setup "authorized_keys is secure"
if _ssh_check_is_file_secure "${HOME}/.ssh/authorized_keys"; then
_ssh_check_pass
else
if (( ${_ssh_check_fix} )); then
chmod 600 "${HOME}/.ssh/authorized_keys"
_ssh_check_fixed
else
_ssh_check_fail
fi
fi
fi
_ssh_check_setup "known_hosts exists"
if [[ -f "${HOME}/.ssh/known_hosts" ]]; then
_ssh_check_pass
else
if (( ${_ssh_check_fix} )); then
touch "${HOME}/.ssh/known_hosts"
_ssh_check_fixed
else
_ssh_check_fail
fi
fi
if [[ -f "${HOME}/.ssh/known_hosts" ]]; then
_ssh_check_setup "known_hosts is secure"
if _ssh_check_is_file_secure "${HOME}/.ssh/known_hosts"; then
_ssh_check_pass
else
if (( ${_ssh_check_fix} )); then
chmod 600 "${HOME}/.ssh/known_hosts"
_ssh_check_fixed
else
_ssh_check_fail
fi
fi
fi
_ssh_check_setup "public key file exists"
if [[ -f "${HOME}/.ssh/id_rsa.pub" ]]; then
_ssh_check_pass
else
if (( ${_ssh_check_fix} )); then
rm "${HOME}/.ssh/id_rsa" 2> /dev/null
ssh-keygen -f "${HOME}/.ssh/id_rsa" -t rsa -N ''
_ssh_check_fixed
else
_ssh_check_fail
fi
fi
_ssh_check_setup "public key file is secure"
if [[ -f "${HOME}/.ssh/id_rsa.pub" ]]; then
if _ssh_check_is_file_secure "${HOME}/.ssh/id_rsa.pub"; then
_ssh_check_pass
else
if (( ${_ssh_check_fix} )); then
chmod 600 "${HOME}/.ssh/id_rsa.pub"
_ssh_check_fixed
else
_ssh_check_fail
fi
fi
fi
_ssh_check_setup "private key file exists"
if [[ -f "${HOME}/.ssh/id_rsa" ]]; then
_ssh_check_pass
else
if (( ${_ssh_check_fix} )); then
rm "${HOME}/.ssh/id_rsa.pub" 2> /dev/null
ssh-keygen -f id_rsa -t rsa -N ''
_ssh_check_fixed
else
_ssh_check_fail
fi
fi
_ssh_check_setup "private key file is secure"
if [[ -f "${HOME}/.ssh/id_rsa" ]]; then
if _ssh_check_is_file_secure "${HOME}/.ssh/id_rsa"; then
_ssh_check_pass
else
if (( ${_ssh_check_fix} )); then
chmod 600 "${HOME}/.ssh/id_rsa"
_ssh_check_fixed
else
_ssh_check_fail
fi
fi
fi
| true |
3bd8022d860e8ff3b2fe7778384c6807da6e61d4 | Shell | open-estuary/test-definitions | /auto-test/middleware/tool/pointer_tagging/pointer_tagging.sh | UTF-8 | 1,291 | 3.71875 | 4 | [] | no_license | #!/bin/sh
set -x
# shellcheck disable=SC1091
cd ../../../../utils
. ./sys_info.sh
. ./sh-test-lib
cd -
OUTPUT="$(pwd)/output"
RESULT_FILE="${OUTPUT}/result.txt"
export RESULT_FILE
usage() {
echo "Usage: $0 [-s <true>]" 1>&2
exit 1
}
while getopts "s:" o; do
case "$o" in
s) SKIP_INSTALL="${OPTARG}" ;;
*) usage ;;
esac
done
pointer_tagging_build_test() {
wget ${ci_http_addr}/test_dependents/pointer-tagging-tests.zip
unzip pointer-tagging-tests.zip && rm -rf pointer-tagging-tests.zip
sleep 20
cd pointer-tagging-tests
make all
# Run tests
for tests in $(./pointer_tagging_tests -l) ; do
./pointer_tagging_tests -t "${tests}"
print_info $? "${tests}"
# check_return "${tests}"
done
}
# Test run.
! check_root && error_msg "This script must be run as root"
create_out_dir "${OUTPUT}"
info_msg "About to run pointer-tagging-tests test..."
info_msg "Output directory: ${OUTPUT}"
# Install packages
pkgs="binutils gcc make glibc-static wget unzip"
install_deps "${pkgs}" "${SKIP_INSTALL}"
# Build pointer tagging tests and run tests
pointer_tagging_build_test
#remove_deps "${pkgs}
cd ../
rm -rf pointer-tagging-tests
if [ $? ];then
print_info 0 remove
else
print_info 1 remove
fi
| true |
337920170d89b473a44e728ff5ff5c4d5aca9ff5 | Shell | skewwhiffy/ossetup | /arch/add.budgie.sh | UTF-8 | 267 | 2.625 | 3 | [] | no_license | #!/usr/bin/env bash
echo Installing Budgie
toInstall=(
x
budgie-desktop
budgie-extras
)
./add.sh ${toInstall[@]}
echo export XDG_CURRENT_DESKTOP=Budgie:GNOME > $HOME/.xinitrc
echo exec budgie-desktop >> $HOME/.xinitrc
echo Reboot. Start cinnamon with startx.
| true |
8d3d19c5fdc6c6460fd55572258d947427ec69fe | Shell | beatkyo/docker-haproxy | /build.sh | UTF-8 | 929 | 3.5 | 4 | [] | no_license | #!/bin/bash
set -e
source "version"
ARCH=${1:-$(uname -m)}
DIST="haproxy-${VERSION}"
function build {
echo
echo "+ build"
echo "+ arch: ${ARCH:?}"
echo "+ image: ${IMAGE:?}"
echo "+ version: ${VERSION:?}"
echo "+ dist: ${DIST:?}"
echo
export IMAGE
export VERSION
docker build \
--pull \
--build-arg "IMAGE=$IMAGE" \
--build-arg "VERSION=$VERSION" \
--build-arg "DIST=$DIST" \
--tag "dalexandre/haproxy-$ARCH:$VERSION" \
--tag "dalexandre/haproxy-$ARCH:latest" \
.
}
function build-i386 {
ARCH="i386"
IMAGE="i386/haproxy:$VERSION-alpine"
DIST="$DIST-linux-386"
build
}
function build-amd64 {
ARCH="amd64"
IMAGE="amd64/haproxy:$VERSION-alpine"
DIST="$DIST-linux-amd64"
build
}
function build-aarch64 {
ARCH="arm64v8"
IMAGE="arm64v8/haproxy:$VERSION-alpine"
DIST="$DIST-linux-arm64"
build
}
function build-x86_64 {
build-amd64
}
build-${ARCH:?}
| true |
343e8728cacb57827141890d017be65695e4ce46 | Shell | sam-hilliard/kali-scripts | /wifi/grab_wpa_handshake.sh | UTF-8 | 1,478 | 4.21875 | 4 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | #!/bin/bash
# Use this script to capture a wpa-handshake file to be
# cracked later.
# places the specified interface into monitor mode
monitor_mode() {
echo "[+] Putting $1 into monitor mode..."
ifconfig $1 down
iwconfig $1 mode monitor
ifconfig $1 up
echo "[+] Killing NetworkManager"
systemctl stop NetworkManager
}
# captures the handshake of a specified wpa network
get_handshake() {
airodump-ng $1
echo -e "\nEnter the BSSID of the target network: "
read bssid
echo -e "\nEnter the channel of the target network: "
read channel
konsole --hold -e "airodump-ng -c $channel --bssid $bssid -w ./captured_key --output-format cap $1" &> /dev/null &
pid=$!
echo "Enter the mac of the client to boot: "
read client
echo -e "\n[+] Deauthing $client..."
aireplay-ng --deauth 4 -a $bssid $1
echo -e "\nPress ENTER to continue begin exit process"
read
kill $!
}
# puts the interface back into managed mode
revert_interface() {
echo "[+] Putting $1 back into managed mode..."
ifconfig $1 down
iwconfig $1 mode managed
ifconfig $1 up.
systemctl start NetworkManager
}
# forces script to be run as root
if [[ $EUID -ne 0 ]]; then
echo "[-] This script must be run as root."
echo "[-] Exiting..."
exit 1
fi
if [ $# -ne 1 ]; then
echo "[-] Interface not specified."
echo " Usage: ./grab_wpa_handshake.sh <interface name>"
echo "[-] Exiting..."
exit 1
fi
iface=$1
monitor_mode $iface
get_handshake $iface
revert_interface $iface | true |
3fe640d8ec25b19e72ec4ebb2950823177a89274 | Shell | ntaka19/network_analysis | /network_model/autonomous spatial distribution model/heatmap/heatmap-cycbound.sh | UTF-8 | 854 | 2.875 | 3 | [] | no_license | #!/bin/bash
gcc 2darray-cycbound.c -lm
awk '{print $2,$3}' coordinates30000-2.2.dat > coordinates.dat
./a.out coordinates.dat > list.dat
starte=`grep -e "clusters" -n list.dat| sed "s/:/ /g" |awk '{print $1}'`
awk 'NR<'${starte}'{print}' list.dat > matrix.dat
cut -d " " -f2- matrix.dat | awk 'NR>1{if (NR<'${starte}'-1) print}' > matrix1.dat
gnuplot -p << EOF
set term png
set output 'heatmap.png'
set pm3d map
set palette rgbformula 21,22.23
YTICS="`awk 'BEGIN{getline}{printf "%s ",$1}' matrix.dat`"
XTICS="`head -1 matrix.dat`"
#set xrange[-1:10]
set for [i=1:words(XTICS)] xtics ( word(XTICS,i) i-1.5 )
set for [i=1:words(YTICS)] ytics ( word(YTICS,i) i-1.5 )
#set pm3d interpolate 2,2
plot "matrix1.dat" matrix with image
replot
EOF
#matrix1.dat
#splot "<awk '{${1}=\"\"}1' matrix.dat | sed '1 d'" matrix
#set pm3d interpolate 2,2
| true |
fe3fa287a341b540090ee6dcd7455c955bfcb2a4 | Shell | isghe/btcrpcapi | /generate-tag-indexes.sh | UTF-8 | 186 | 3.265625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# Generates an index webpage for each release tag.
for tagpath in apis/*; do
tag=$(basename $tagpath)
./generate-tag-index.py "$tag" > "tags/$tag.html"
done
| true |
d7126c9fa17b4f60b6c57e49db70dfe32fd09745 | Shell | jdebp/nosh | /source/convert/mysql@.service.do | UTF-8 | 429 | 3.15625 | 3 | [
"MIT",
"BSD-3-Clause",
"ISC",
"BSD-2-Clause"
] | permissive | #!/bin/sh -e
## **************************************************************************
## For copyright and licensing terms, see the file named COPYING.
## **************************************************************************
# vim: set filetype=sh:
case "`mysqld --version`" in
*MariaDB*) ext=mariadb ;;
*Percona*) ext=percona ;;
*) ext=mysql ;;
esac
redo-ifchange "$1.${ext}"
ln -s -f "`basename \"$1\"`.${ext}" "$3"
| true |
09b2389c2237b37c90acac4a9916abafeed927df | Shell | spring01/molecule | /zmat2cart.sh | UTF-8 | 4,043 | 3 | 3 | [] | no_license | # above is based on gjftogcrt from: http://charles.karney.info/b2d-scripts/
#! /bin/sh
ID='$Id: gjftogms.awk 5724 2004-12-03 15:28:39Z ckarney $'
usage="
$ID
(c) 2004, Sarnoff Corporation, Princeton, NJ, USA
This shell script converts a Gaussian input file to a pure Cartesian
representation.
Run as a filter:
$0 [-h] < input > output
Optional argument -h prints this message.
For more info see:
http://www.gaussian.com
"
while getopts h c; do
case $c in
h ) echo "usage: $usage"; exit;;
* ) echo "usage: $usage" 1>&2; exit 1;;
esac
done
shift `expr $OPTIND - 1`
# If more than zero arguments passed
if [ $# -ne 0 ]; then
echo "usage: $usage" 1>&2
exit 1
fi
#
# Beginning of main shell script
#
awk '
BEGIN {
proc = 0;
track = 0;
comment = "";
control = "";
natoms = 0;
nvars = 0;
charge = 0;
multiplicity = 1;
delete line;
delete value;
delete id;
delete ind;
deg = atan2(1,1)/45;
}
{
if (proc == 1) {
if ($1 == "" || $1 == "Variables:")
proc = 2;
else { # Processing atoms
natoms++;
line[natoms] = $0;
id[natoms] = $1;
ind[$1] = natoms;
}
} else if (proc == 2) { # Processing variables
if ($1 == "")
proc = 3;
else {
nvars++;
sub(/=/, " = ");
value[$1] = $3;
}
} else if (proc > 2) {
# At the end; do nothing
} else { # Processing header
printf "%s\n", $0;
if (substr($1, 1, 1) == "#") {
control = $0;
track = 1;
} else if (track > 0) {
track++;
if (track == 3)
comment = $0;
else if (track == 5) {
charge = $1;
multiplicity = $2;
proc = 1;
}
}
}
}
END {
delete x; delete y; delete z;
x[0] = 0; y[0] = 1; z[0] = 0; # Dummy atom positions
x[-1] = 1; y[-1] = 1; z[-1] = 0;
for (i = 1; i <= natoms; i++) {
$0 = line[i];
if (NF == 1) { # Starting position
x[i] = 0; y[i] = 0; z[i] = 0;
} else if (NF == 4) { # Cartesian line
x[i] = $2; y[i] = $3; z[i] = $4;
} else if (NF == 3 || NF == 5 || NF == 7 ) { # Z-matrix line
# Look up atom indices
k1 = lookupatom($2);
k2 = lookupatom($4);
k3 = lookupatom($6);
# Look up values. Note: only one level of evaluation and only var,
# -var, +var suported (Gamess supports -var)
b = lookupvar($3);
a = lookupvar($5);
t = lookupvar($7);
# Support initial "partial" Z-matrix entries
if (NF == 3) {
k2 = i - 2; a = 90; # 2nd atom on x axis
}
if (NF <= 5) {
k3 = i - 3; t = 0; # 3rd atom in x,y plane
}
a *= deg; t *= deg; # Convert to radians
x[i] = x[k1]; y[i] = y[k1]; z[i] = z[k1];
# First reference vector
x1 = x[k2] - x[k1]; y1 = y[k2] - y[k1]; z1 = z[k2] - z[k1];
norm = sqrt(x1^2 + y1^2 + z1^2);
x1 /= norm; y1 /= norm; z1 /= norm;
# Second reference vector
x2 = x[k3] - x[k2]; y2 = y[k3] - y[k2]; z2 = z[k3] - z[k2];
norm = x1 * x2 + y1 * y2 + z1 * z2; # Project into perp plane
x2 -= norm*x1; y2 -= norm*y1; z2 -= norm*z1;
norm = sqrt(x2^2 + y2^2 + z2^2); # Normalize if possible.
if (norm > 0) { # Can skip if sin(a) == 0.
x2 /= norm; y2 /= norm; z2 /= norm;
}
# Third reference vector
x3 = y1 * z2 - y2 * z1;
y3 = z1 * x2 - z2 * x1;
z3 = x1 * y2 - x2 * y1;
# Compute final position
x[i] += b * (cos(a) * x1 + sin(a) * (cos(t) * x2 - sin(t) * x3));
y[i] += b * (cos(a) * y1 + sin(a) * (cos(t) * y2 - sin(t) * y3));
z[i] += b * (cos(a) * z1 + sin(a) * (cos(t) * z2 - sin(t) * z3));
}
}
for (i = 1; i <= natoms; i++) {
printf " %-6s %14.6f %14.6f %14.6f\n", id[i], x[i], y[i], z[i];
}
printf "\n";
}
function lookupatom(id) {
# Look up id in ind array
if (ind[id] == "")
return id;
else
return ind[id];
}
function lookupvar(var) {
# Look up var in value array
if (var == "")
return 0;
s = substr(var,1,1);
if (s ~ /[-+]/) {
s = s 1;
var = substr(var,2);
} else
s = 1;
if (value[var] == "")
return s * var;
return s * value[var];
}
'
| true |
b0f2605aad8d9221c8c67da28a6a57862606010e | Shell | dottww/Corewar | /sh/asm_cor_list.sh | UTF-8 | 1,047 | 2.96875 | 3 | [] | no_license | # #!/bin/sh
# make
# ./corewar $1 $2 $
# ./corewar42 $i $1 $2 $3 $4 > zzofi.txt
# result=`diff zzmine.txt zzofi.txt -s`
# if [ "${result}" != "Files zzmine.txt and zzofi.txt are identical" ];then
# echo "Dump diff at cycle= $i"
# break
# fi
# done
# diff zzmine.txt zzofi.txt -s > zzdiff.txt
./asm42 $1
./asm42 $2
./asm42 $3
./asm42 $4
echo ""${1%.s}.cor" "${2%.s}.cor" "${3%.s}.cor" "${4%.s}.cor""
echo "1=\""${1%.s}.cor"\" ; 2=\""${2%.s}.cor"\" ; 3=\""${3%.s}.cor"\" ; 4=\""${4%.s}.cor"\""
# echo 2=\""${2%.s}.cor"\"
echo 3=\""${3%.s}.cor"\"
echo 4=\""${4%.s}.cor"\"
# echo "./corewar42 -d $i $1 $2 $3 $4"
# open -a Visual\ Studio\ Code zzmine.txt
# open -a Visual\ Studio\ Code zzofi.txt
# open -a Visual\ Studio\ Code zzdiff.txt
# ./corewar -n 2 p1.cor -n 1 zork.cor
# test() {./asm42 $1 && make && ./corewar "${1%.s}.cor" ;}
# ./corewar seg.cor
# ./asm $1
# cp "${1%.s}.cor" "zmine.cor"
# ./42asm_mac $1
# cp "${1%.s}.cor" "zofi.cor"
# hexdump -vC zmine.cor > zmine
# hexdump -vC zofi.cor > zofi
# diff zmine zofi -s | true |
5029673159d371593faa99db2378f37a45099778 | Shell | AbdullahGhani1/shell-scripting | /practice/05-exitstatus.sh | UTF-8 | 424 | 2.96875 | 3 | [] | no_license | #!/bin/bash
# in shell scripting we dont bother mych on command outputs, But we look for exit status of the commands which is executed
# to determine wheather that is successfull or failure.
# Exit status is number, it ranges from 0-255
# 0 - Successful
# 1-255 - Non Successfull / semi Successful / semi failure
#3 EXIT 0-255
# But system uses the numbers from 126+
# so for them user use 1-125 values for exit status | true |
e60197bf4771f0bcbe6bc6cb97f5bd5156e1da6d | Shell | MarcPorto/Scrips2020 | /scripts/03-exemple-if.sh | UTF-8 | 213 | 2.9375 | 3 | [] | no_license | #! /bin/bash
# MarcPorto ASIX M01-ISO
# Febrer 2021
# Exemple if
# $ prog edat
# ------------------------------
if [ $# -ne 1 ];
echo "Error: nºarguments incorrecte"
echo "Usage: $0 edat"
exit 1
fi
echo $1
| true |
75a503ee566becf47cc026660217bc6d77b0f828 | Shell | denjukebox/WinRT_Recovery_Toolkit | /CompilationScript.sh | UTF-8 | 1,025 | 3.15625 | 3 | [] | no_license | #!/bin/bash
# Name of device recovery image for resources folderDEFAULT Surface_RT1
DEVICE_NAME=$1
# Version of windows to use
WIN_VERSION=$2
# Type of image for device in device recovery fs folder
IMAGE_VARIANT=$3
#Compound image name & location
IMAGE_NAME="${DEVICE_NAME}_WinRT_${WIN_VERSION}_${IMAGE_VARIANT}"
IMAGE_LOCATION="ISO/${IMAGE_NAME}.iso"
echo $IMAGE_NAME
LODEV=$(sudo losetup -f)
fallocate -l 500MiB ${IMAGE_LOCATION}
sudo -s <<EOF
losetup --partscan --show --find "${IMAGE_LOCATION}"
(echo o; echo n; echo p; echo 1; echo ""; echo ""; echo "t"; echo "b"; echo w; echo q) | fdisk $(echo $LODEV)
mkfs.vfat -n "RECOVERY" -M 0xF9 -v "${LODEV}p1"
mount "${LODEV}p1" mount
unzip "Resources/RecoveryFS/${DEVICE_NAME}/WinRT_${WIN_VERSION}_${IMAGE_VARIANT}.zip" -d mount
cp -r -v Resources/Windows mount
cp -r -v Resources/Linux mount
cp -r -v Scripts mount
cp -v menu.cmd mount
umount mount
losetup -d $(echo $LODEV)
EOF
exit 0
#unzip "Resources/Installs/WinRT_${WIN_VERSION}_${IMAGE_VARIANT}.zip" -d mount/sources
| true |
66a30a23379a01bf8cff226904bc1ff346dedf9d | Shell | IsaacElenbaas/dotfiles | /dest/zsh/plugins/you-should-use.plugin.zsh | UTF-8 | 3,381 | 4.28125 | 4 | [
"MIT"
] | permissive | #!/bin/zsh
NONE="\033[0m"
BOLD="\033[1m"
RED="\033[0;31m"
LIGHT_RED="\033[1;31m"
YELLOW="\033[0;33m"
# Writing to a buffer rather than directly to stdout/stderr allows us to decide if we want to write before or after a command has been executed
function _write_ysu_buffer() {
_YSU_BUFFER+="$@"
}
function _flush_ysu_buffer() {
# Passing as first argument to interpret escape sequences
printf "$_YSU_BUFFER" >&2
_YSU_BUFFER=""
if [ "$YSU_MESSAGE_POSITION" != "before" ] && [ "$YSU_MESSAGE_POSITION" != "after" ]; then
YSU_MESSAGE_POSITION="after"
printf "$RED${BOLD}Unknown value for YSU_MESSAGE_POSITION '$position.' Expected value 'before' or 'after'$NONE\n" >&2
fi
}
function ysu_message() {
local DEFAULT_MESSAGE_FORMAT="${BOLD}${YELLOW}Found existing %alias_type for ${LIGHT_RED}\"%command\"${YELLOW} - you should use ${LIGHT_RED}\"%alias\"${NONE}"
local alias_type_arg="${1}"
local command_arg="${2}"
local alias_arg="${3}"
command_arg="${command_arg//\%/%%}"
command_arg="${command_arg//\\/\\\\}"
local MESSAGE="${YSU_MESSAGE_FORMAT:-"$DEFAULT_MESSAGE_FORMAT"}"
MESSAGE="${MESSAGE//\%alias_type/$alias_type_arg}"
MESSAGE="${MESSAGE//\%command/$command_arg}"
MESSAGE="${MESSAGE//\%alias/$alias_arg}"
_write_ysu_buffer "$MESSAGE\n"
}
function _check_ysu_hardcore() {
[ "$YSU_HARDCORE" = 1 ] && kill -s INT $$
}
function _check_git_aliases() {
local typed="$1"
typed="${typed#${typed%%[![:space:]]*}}"
local expanded="$2"
local found=false
# sudo will use another user's profile and so aliases would not apply
if [[ "$typed" == "sudo "* ]]; then
return
fi
if [[ "$typed" == "git "* ]]; then
git config --get-regexp "^alias\..+$" | sort | while read key value; do
key="${key#alias.}"
if [[ "$expanded" == "git $value" || "$expanded" == "git $value "* ]]; then
ysu_message "git alias" "$value" "git $key"
found=true
fi
done
$found && _check_ysu_hardcore
[ "$YSU_MESSAGE_POSITION" = "before" ] && _flush_ysu_buffer
fi
}
function _check_aliases() {
local typed="$1"
typed="${typed#${typed%%[![:space:]]*}}"
local expanded="$2"
local found=false
local key
local value
local entry
# sudo will use another user's profile and so aliases would not apply
if [[ "$typed" = "sudo "* ]]; then
return
fi
{ alias -g; alias -r; } | sort | while read entry; do
key="${entry%%=*}"
value="${entry#*=}"
# Remove leading and trailing ' if they exist
value="${(Q)value}"
# Skip ignored aliases
[ "${YSU_IGNORED_ALIASES[(r)$key]}" = "$key" ] && continue
if [[ "$typed" == "$value" || "$typed" == "$value "* ]]; then
# An alias was used
[[ "$typed" == "$key" || "$typed" == "$key "* ]] && { _YSU_BUFFER=""; return; }
# Aliases longer than or equal in length to the original command are likely for typos
[ ${#key} -ge ${#value} ] && continue
ysu_message "alias" "$value" "$key"
found=true
fi
done
$found && _check_ysu_hardcore
[ "$YSU_MESSAGE_POSITION" = "before" ] && _flush_ysu_buffer
}
function disable_you_should_use() {
add-zsh-hook -D preexec _check_aliases
add-zsh-hook -D preexec _check_git_aliases
add-zsh-hook -D precmd _flush_ysu_buffer
}
function enable_you_should_use() {
disable_you_should_use
add-zsh-hook preexec _check_aliases
add-zsh-hook preexec _check_git_aliases
add-zsh-hook precmd _flush_ysu_buffer
}
autoload -Uz add-zsh-hook
enable_you_should_use
| true |
e856769b42b605003d5cecb343541ca6c9ecfb72 | Shell | phdoerfler/system | /bin/pyro.sh | UTF-8 | 974 | 3.203125 | 3 | [] | no_license | #!/usr/bin/env bash
#set -x xtrace
set -e
set -o pipefail
channel=Pyropuncher
key=AIzaSyBcjoRQhzYG7O8tTr2dwzgbkSlk0-vhAic
idDir=/home/infinisil/ids
if [ ! -d $idDir ]; then
mkdir -p $idDir
fi
touch $idDir/$channel
uploadsId=$(curl -s https://www.googleapis.com/youtube/v3/channels\?key\=$key\&forUsername\=$channel\&part\=contentDetails | jq -r ".items[0].contentDetails.relatedPlaylists.uploads")
videoIds=$(curl -s https://www.googleapis.com/youtube/v3/playlistItems\?key\=$key\&playlistId\=$uploadsId\&part\=contentDetails\&maxResults\=5 | jq -r ".items[].contentDetails.videoId" | tr " " "\n")
videoIds=$(tac <(echo $videoIds))
oldIds=$(cat $idDir/$channel)
newIds=$(diff --changed-group-format='%>' --unchanged-group-format='' <(echo $oldIds) <(echo $videoIds) || true)
for id in $newIds; do
echo Downloading $id
youtube-dl -x -f m4a --add-metadata --embed-thumbnail --xattrs -o "/youtube/%(title)s.%(ext).s" $id
beet import -s /youtube/*
rm /youtube/*
echo $id >> $idDir/$channel
done
| true |
f31b848895f0364366ea4c1d9446fc060f28aab0 | Shell | hvdthong/DEFECT_PREDICTION | /PROMISE/archives/log4j/1.1/org/apache/log4j/test/minreg | UTF-8 | 822 | 3.78125 | 4 | [] | no_license | #!/bin/bash
# Regression test script.
# Something is fishy if file $1.minreg != current.minreg
# The file $1.minreg was created by a supposedly correct code.
# The file current.minreg is created by running the current code.
# Read the .functions file
. .functions
# ==============================================
function usage {
echo "Usage minreg simple|ttcc"
exit 1
}
# ==============================================
if [ -z "$PERL" ]
then
PERL=perl
fi
case $1 in
simple|ttcc )
;;
* ) usage
;;
esac
java org.apache.log4j.test.Min $1 > $TEMP
$PERL minreg.pl $1 < $TEMP > $OUTPUT
if [ $? != 0 ]
then
echo "The output is not in expected format. See the file [$OUTPUT]."
popd;
exit 1
fi
check witness/$1.minreg $OUTPUT; echo "minreg $1 - OK."
| true |
b3bf2bb86b7876e659a45f199ef53974184593a5 | Shell | andornaut/docker-pgbouncer-postgresql | /docker-entrypoint-initdb.d/0-create-user-and-db.sh | UTF-8 | 492 | 3.640625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
if [[ -z "${DB_NAME}" || -z "${DB_USER}" || -z "${DB_PASS}" ]]; then
echo "\$DB_NAME, \$DB_USER and \$DB_PASS environment variables are not set." >&2
echo "Skipping user and database creation." >&2
else
echo "Creating user: ${DB_USER}"
echo "Creating database: ${DB_NAME}"
psql -v ON_ERROR_STOP=0 --username "${POSTGRES_USER}" <<-EOL
CREATE USER ${DB_USER};
ALTER USER ${DB_USER} WITH PASSWORD '${DB_PASS}';
CREATE DATABASE ${DB_NAME} OWNER ${DB_USER};
EOL
fi
| true |
ffb929f53934eef844ef1f9f69805e6d3ef6e941 | Shell | AgustinParmisano/iso-unlp | /p2f/ej15.sh | UTF-8 | 575 | 4.125 | 4 | [] | no_license | #!/bin/bash
if [ $# -ne 1 ]; then
echo "Se debe recibir un parámetro"
exit 1
fi
if [ $1 == "c" ]; then
echo "Ingrese el valor de la temperatura para convertir a Celsius"
read numero
echo "El número ingresado es $numero"
#aux=$((5 * ($numero - 32) / 9)) #como sería com expr?
aux=$(expr 3 \* \( $numero + 1 \) / 9)
echo "La temperatura es $aux C"
fi
if [ $1 == "F" ]; then
echo "Ingrese el valor de la temperatura para convertir a Farenheit"
read numero
aux=$((((9 * $numero) / 5) + 32)) #como sería com expr?
echo "La temperatura es $aux F"
fi
| true |
4834d5c1425a47bc483183cf6eb39a4040941685 | Shell | MarcoSantonastasi/nailted | /front/scripts/docker/install.sh | UTF-8 | 196 | 2.6875 | 3 | [] | no_license | #!/bin/sh
# Getting .env values to use on the script
CONTAINER_ID=$(grep FRONT_NODE_CONTAINER_NAME .env | cut -d '=' -f2);
docker exec -it $CONTAINER_ID npm install && npm audit fix;
return 0;
| true |
082e25f4c81fd83caf44117567938e26c949c8d7 | Shell | mbainter/vim-terraform | /get_providers.sh | UTF-8 | 1,467 | 3.796875 | 4 | [
"ISC"
] | permissive | #!/bin/bash
# AUTHOR: Phil Porada - philporada@gmail.com
# TICKET: https://github.com/hashivim/vim-terraform/issues/40
# WHAT: As of Terraform 0.10.0, Hashicorp split out each provider into its own
# separate terraform-provider-* project. As a result, we have to hunt
# for all of the resources that each provider provides. This is a PITA
# but I guess good for Terraform. ¯\_(ツ)_/¯
command -v jq >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "You need to install jq. Exiting..."
exit 1
fi
function get_providers() {
mkdir -p terraform-providers
# Make a ramdisk because there is a ton of stuff to download
sudo mount -t tmpfs -o size=512m tmpfs $(pwd)/terraform-providers
cd terraform-providers
for PAGE in {1..2}; do
for REPO in $(curl -sL https://api.github.com/users/terraform-providers/repos?page=${PAGE}\&per_page=100 | jq -r .[].name); do
if [ ! -d ${REPO} ]; then
git clone --depth 1 https://github.com/terraform-providers/${REPO}
# Only get the folder/files we need. There's probably a better way checkout only the files we need, but I don't know it.
cd ${REPO}
find . -type f -not -name "*provider*.go" -delete
cd ..
else
cd ${REPO}
git pull --hard --depth 1 https://github.com/terraform-providers/${REPO}
cd ..
fi
done
done
}
get_providers
| true |
e647f6dda63834741b46d91b6ce03af058ed65b0 | Shell | Chernoslav89/che | /update.sh | UTF-8 | 371 | 2.796875 | 3 | [] | no_license | #!/usr/bin/env bash
branch_name=$(git symbolic-ref -q HEAD)
branch_name=${branch_name##refs/heads/}
branch_name=${branch_name:-HEAD}
ARG1=${1:-$branch_name}
echo "git fetching ..."
git fetch
echo 'go to git branch ->' $ARG1
git checkout $ARG1
echo 'git pull' $ARG1
git pull origin $ARG1
echo "update composer"
composer update
echo "yii migration"
php console/yii migrate
| true |
da91a06b1ffae92847fc223a98b15e329716e3ee | Shell | AndrewHaluza/zsh-update-plugin | /update-plugin.plugin.zsh | UTF-8 | 434 | 3.25 | 3 | [] | no_license | function zupdate (){
plugins_path=~/.oh-my-zsh/custom/plugins/;
if ([[ $1 == 'ls' ]]); then
echo $(ls $plugins_path);
return;
fi
if ([[ $1 == '' ]]); then
list=$(ls $plugins_path);
for plugin in $plugins_path*/;
do
result=$(cd $plugin && git pull);
echo $plugin $result;
done
return;
fi
result=$(cd $plugins_path/$1 && git pull);
echo $1 $result;
}
| true |
14a03ed9c0ffdf60d5114af500513d7081827641 | Shell | nkojima/shell-scripts | /pw_expiration.sh | UTF-8 | 552 | 3.359375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
#-------------------------------------------------------------------------------
# ログイン可能なユーザーを全て取得して、
# 各ユーザーのパスワード有効期限を一覧表示する。
# https://qiita.com/nkojima/items/502119f460017e9e7c1f
#-------------------------------------------------------------------------------
users=$(cat /etc/passwd | grep ':/bin/bash$' | sed s/:.*//)
for user in ${users[@]}; do
expiration=$(chage -l $user | sed -n 2p | sed s/.*://)
echo $user,$expiration
done | true |
e87b6b5853b908096fe978230df1669aa2fcc393 | Shell | juhoffma/jboss-virtual-environments | /vagrant-vms/openshift3/scripts/all.sh | UTF-8 | 2,498 | 3.109375 | 3 | [] | no_license | #!/bin/sh
#
# Params
# $1: hostname
# $2: dnsmasq_server_ip
# $3: poolID
#
_HOSTNAME=$1
_DNSMASQ_SERVER_IP=$2
_POOLID=$3
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# Copy ssh key and add to authorized_keys. All boxes same key.
mkdir -p ~/.ssh
cp ${DIR}/id_rsa ~/.ssh/
cat ${DIR}/id_rsa.pub >> ~/.ssh/authorized_keys
echo "StrictHostKeyChecking no" >> /etc/ssh/ssh_config
subscription-manager attach --pool=${_POOLID}
subscription-manager repos --disable='*'
subscription-manager repos --enable rhel-7-server-rpms --enable rhel-7-server-extras-rpms --enable rhel-7-server-optional-rpms --enable rhel-7-server-ose-3.0-rpms
rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-beta
yum -y install deltarpm
#yum -y remove NetworkManager*
yum -y install wget vim-enhanced net-tools bash-completion bind-utils system-storage-manager tree python-virtualenv
yum -y update
# Setup hostnames
hostnamectl --static set-hostname ${_HOSTNAME}.example.com
# EXTEND STORAGE FOR DOCKER: http://unpoucode.blogspot.com.es/2015/06/docker-and-devicemappers-thinpool-in.html
pvcreate /dev/vdb
vgextend VolGroup00 /dev/vdb
lvextend -l 100%FREE /dev/VolGroup00/docker-pool
# Install docker
yum -y install docker
# Set registry to use
#sed -i -e "s/registry\.access\.redhat\.com/ose3-registry:5000/" /etc/sysconfig/docker
#sed -i -e "s/^# BLOCK_REGISTRY=.*/BLOCK_REGISTRY='--block-registry registry\.access\.redhat\.com --block-registry docker\.io '/" /etc/sysconfig/docker
sed -i -e "s/^# INSECURE_REGISTRY=.*/INSECURE_REGISTRY='--insecure-registry 0\.0\.0\.0\/0 '/" /etc/sysconfig/docker
systemctl stop docker > /dev/null 2>&1 || :
# usermod -a -G docker vagrant
systemctl enable docker && sudo systemctl start docker
# chown root:docker /var/run/docker.sock
# Configure networking
echo "dns=none" >> /etc/NetworkManager/NetworkManager.conf
cp /etc/resolv.conf /etc/resolv.conf.ori
echo "#Custom resolv.conf made for Openshift" > /etc/resolv.conf
echo "search example.com" >> /etc/resolv.conf
echo "nameserver ${_DNSMASQ_SERVER_IP}" >> /etc/resolv.conf
cat /etc/resolv.conf.ori >> /etc/resolv.conf
systemctl restart NetworkManager
# Add aliases
echo "alias tailfmaster='journalctl -f -u openshift-master' " >> ~/.bashrc
echo "alias tailfnode='journalctl -f -u openshift-node' " >> ~/.bashrc
# Add My docker function aliases
curl https://raw.githubusercontent.com/jorgemoralespou/scripts/master/docker/bash_aliases_docker.txt -o ~/.docker_aliases
echo "source ~/.docker_aliases" >> ~/.bashrc
| true |
2acaa7ccaf86175a33ae0d0081ef73928613c447 | Shell | DeepikaV/Test | /SeleniumServer/postAndResponseRunTest.sh | UTF-8 | 252 | 2.5625 | 3 | [] | no_license | while (! test -e /Users/Shared/Jenkins/Home/jobs/postAndResponse/workspace/Output/report.html)
do
echo "Waiting report.html ..."
sleep 10
done
echo "Found report.html ... Proceed to kill all Terminals"
osascript -e 'tell app "Terminal" to quit' | true |
ffd29065ab82da9c21e6670d5e219b0b991b8aec | Shell | cpausmit/MitAna | /bin/clean.sh | UTF-8 | 1,408 | 3.40625 | 3 | [] | no_license | #!/bin/bash
#----------------------------------------------------------------------------------------------------
# Script to cleanup certain files that follow certain patterns like backup file or core files shared
# libraries and dependenciy file generated from the root compiler (AClibC).
#
# C.Paus V0 (Mar 05, 2014)
#----------------------------------------------------------------------------------------------------
if [ ".$1" == ".backup" ]
then
find $HOME \( -name \*.~\*~ -o -name \*~ -o -name \*.bak -o \
-name .\*.~\*~ -o -name .\*~ -o -name .\*.bak \) \
-exec ls -s {} \; -exec rm {} \;
elif [ ".$1" == ".backup-local" ]
then
find ./ \( -name \*.~\*~ -o -name \*~ -o -name \*.bak -o \
-name .\*.~\*~ -o -name .\*~ -o -name .\*.bak \) \
-exec ls -s {} \; -exec rm {} \;
elif [ ".$1" == ".so-d" ]
then
find ./ \( -name \*_C.so -o -name \*~ -o -name \*_C.d \) \
-exec ls -s {} \; -exec rm {} \;
elif [ ".$1" == ".core" ]
then
find $HOME -type f \( -name core -o -name core.\[0-9\]\* \) \
-exec ls -s {} \; -exec rm {} \;
elif [ ".$1" == ".tex" ]
then
find $HOME/tex $HOME/teaching -type f \( -name \*.aux -o -name \*~ -o -name \*.dvi -o -name \*.log \) \
-exec ls -s {} \; -exec rm {} \;
else
echo ERROR - unknown request: \"$1\"
fi
exit 0
| true |
95d0258c4f70cc7f2e314fc3897dc97977c6f7eb | Shell | ndmanvar-prbuild/demo_kevin | /deploy_and_test.sh | UTF-8 | 402 | 3.15625 | 3 | [] | no_license | #!/bin/bash
# deploy and keep track of pid
echo "Deploying via SimpleHTTPServer..."
python -m SimpleHTTPServer &
FOO_PID=$!
sleep 3
# run tests
echo "Running Functional Tests using Protractor"
if ./node_modules/.bin/protractor conf.js ; then
# shut down server
kill $FOO_PID
# return appropriate exit code
exit 0
else
# shut down server
kill $FOO_PID
# return appropriate exit code
exit 1
fi
| true |
a59a99f97ac030d17e6f8750a6e4b1789abf22e9 | Shell | hstalker/dotfiles | /config/shells/.config/zsh/logout | UTF-8 | 422 | 2.546875 | 3 | [] | no_license | #!/usr/bin/env zsh
###############################################################################
# Perform actions required on logout for login zsh shells
# Shouldn't need much if any further configuration beyond the shared
# configuration present in $XDG_CONFIG_HOME/shell/.
require_source "${XDG_CONFIG_HOME:-$HOME/.config}/shell/logout"
# Load shell specific logout modules
load_per_application_modules logout zsh
| true |
9f4e0ff85c661ee1d91e3fa23654c31ceebf9acb | Shell | zmughal/dotfiles | /ctags/ctags_install | UTF-8 | 90 | 2.640625 | 3 | [] | no_license | #!/bin/sh
CURDIR=`dirname "$0"`
cd "$CURDIR"
CUR="`pwd -P`"
ln -sf "$CUR/.ctags" "$HOME"
| true |
29c29090562e1e7609e53c1dd2e4b4d3b552223a | Shell | williamjxj/craiglist-scrape | /uuu.sh | UTF-8 | 1,185 | 2.984375 | 3 | [] | no_license | #!/bin/bash
MYSQL="mysql -u craig -pwilliam -D craig"
if [ $# -ne 1 ]; then
echo "What date to caculate ? like: 2010-06-16, or 2010-06-18."
exit;
fi
date1=$1
$MYSQL <<- __EOT__
select count(distinct email) as "$date1's emails (USJOBS):" from craigslist_usjobs where date like '$date1%';
select count(distinct email) as "$date1's emails not @craigslist.org (USJOBS):" from craigslist_usjobs where date like '$date1%' and ( email !='' and email not like '%@craigslist.org%' );
select "";
select count(distinct email) as "$date1's emails (USGIGS):" from craigslist_usgigs where date like '$date1%';
select count(distinct email) as "$date1's emails not @craigslist.org (USGIGS):" from craigslist_usgigs where date like '$date1%' and ( email !='' and email not like '%@craigslist.org%' );
select "";
select count(distinct email) as "$date1's emails (USSERVICES):" from craigslist_usservices where date like '$date1%';
select count(distinct email) as "$date1's emails not @craigslist.org (USSERVICES):" from craigslist_usservices where date like '$date1%' and ( email !='' and email not like '%@craigslist.org%' );
select "";
__EOT__
| true |
2d21262fe318e514d1ca3be8f92955030a98a5a3 | Shell | ganezasan/shumatsu.github.io | /.circleci/deploy-github-pages.sh | UTF-8 | 434 | 2.609375 | 3 | [
"MIT"
] | permissive | git config user.name "$USER_NAME"
git config user.email "$USER_EMAIL"
git checkout -b gh-pages origin/gh-pages
find . -maxdepth 1 ! -name '_site' ! -name '.git' ! -name '.gitignore' ! -name 'CNAME' ! -name '.' ! -name '.circleci' -exec rm -rf {} \;
mv _site/* .
rm -R _site/
cat .circleci/config.yml
git add -fA
git commit --allow-empty -m "$(git log master -1 --pretty=%B)"
git push origin gh-pages
echo "deployed successfully"
| true |
49c7389d9ba7d7baf372fd74aa04a816ebceb2fe | Shell | FransUrbo/Openstack-BladeCenter | /install_images.sh | UTF-8 | 9,597 | 3.40625 | 3 | [] | no_license | #!/bin/sh
# Import a bunch of external images.
# http://docs.openstack.org/image-guide/obtain-images.html
# http://docs.openstack.org/cli-reference/glance.html
if [ ! -e "/root/admin-openrc" ]; then
echo "The admin-openrc file don't exists."
exit 1
else
set +x
. /root/admin-openrc
if [ -z "${OS_AUTH_URL}" ]; then
echo "Something wrong with the admin-openrc!"
exit 1
fi
fi
echo "=> Starting install_images.sh: $(date) <="
set -ex
GENERAL_OPTS="--public --protected
--project admin
--disk-format qcow2
--container-format docker
--property architecture=x86_64
--property hypervisor_type=kvm
--property hw_watchdog_action=reset"
mkdir -p /var/tmp/Images
cd /var/tmp/Images
# Find out minimum disk size:
# bladeA01b:/var/tmp/Images# qemu-img info CentOS-6-x86_64-GenericCloud-1605.qcow2 | grep 'virtual size'
# virtual size: 8.0G (8589934592 bytes)
# Then round up to nearest GB (in this case '9').
if [ ! -e "CentOS-6-x86_64-GenericCloud-1605.qcow2" ]; then
#wget --quiet http://cloud.centos.org/centos/6/images/CentOS-6-x86_64-GenericCloud-1605.qcow2
wget --quiet http://${LOCALSERVER}/PXEBoot/Images/CentOS-6-x86_64-GenericCloud-1605.qcow2
openstack image create ${GENERAL_OPTS} --min-disk 9 \
--property os_command_line='/usr/sbin/sshd -D' \
--property os_distro=centos --property os_version=6 \
--file CentOS-6-x86_64-GenericCloud-1605.qcow2 centos6
fi
if [ ! -e "CentOS-7-x86_64-GenericCloud-1605.qcow2" ]; then
#wget --quiet http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1605.qcow2
wget --quiet http://${LOCALSERVER}/PXEBoot/Images/CentOS-7-x86_64-GenericCloud-1605.qcow2
openstack image create ${GENERAL_OPTS} --min-disk 9 \
--property os_command_line='/usr/sbin/sshd -D' \
--property os_distro=centos --property os_version=7 \
--file CentOS-7-x86_64-GenericCloud-1605.qcow2 centos7
fi
if [ ! -e "cirros-0.3.4-x86_64-disk.img" ]; then
#wget --quiet http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img
wget --quiet http://${LOCALSERVER}/PXEBoot/Images/cirros-0.3.4-x86_64-disk.img
openstack image create ${GENERAL_OPTS} --min-disk 1 \
--property os_command_line='/usr/sbin/sshd -D' \
--file cirros-0.3.4-x86_64-disk.img cirros
fi
if [ ! -e "trusty-server-cloudimg-amd64-disk1.img" ]; then
#wget --quiet http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img
wget --quiet http://${LOCALSERVER}/PXEBoot/Images/trusty-server-cloudimg-amd64-disk1.img
openstack image create ${GENERAL_OPTS} --min-disk 3 \
--property os_command_line='/usr/sbin/sshd -D' \
--property os_distro=ubuntu --property os_version=14.04 \
--file trusty-server-cloudimg-amd64-disk1.img trusty
fi
if [ ! -e "precise-server-cloudimg-amd64-disk1.img" ]; then
#wget --quiet http://cloud-images.ubuntu.com/precise/current/precise-server-cloudimg-amd64-disk1.img
wget --quiet http://${LOCALSERVER}/PXEBoot/Images/precise-server-cloudimg-amd64-disk1.img
openstack image create ${GENERAL_OPTS} --min-disk 3 \
--property os_command_line='/usr/sbin/sshd -D' \
--property os_distro=ubuntu --property os_version=12.04 \
--file precise-server-cloudimg-amd64-disk1.img precise
fi
if [ ! -e "quantal-server-cloudimg-amd64-disk1.img" ]; then
#wget --quiet http://cloud-images.ubuntu.com/quantal/current/quantal-server-cloudimg-amd64-disk1.img
wget --quiet http://${LOCALSERVER}/PXEBoot/Images/quantal-server-cloudimg-amd64-disk1.img
openstack image create ${GENERAL_OPTS} --min-disk 3 \
--property os_command_line='/usr/sbin/sshd -D' \
--property os_distro=ubuntu --property os_version=12.10 \
--file quantal-server-cloudimg-amd64-disk1.img quantal
fi
if [ ! -e "raring-server-cloudimg-amd64-disk1.img" ]; then
#wget --quiet http://cloud-images.ubuntu.com/raring/current/raring-server-cloudimg-amd64-disk1.img
wget --quiet http://${LOCALSERVER}/PXEBoot/Images/raring-server-cloudimg-amd64-disk1.img
openstack image create ${GENERAL_OPTS} --min-disk 3 \
--property os_command_line='/usr/sbin/sshd -D' \
--property os_distro=ubuntu --property os_version=13.04 \
--file raring-server-cloudimg-amd64-disk1.img raring
fi
if [ ! -e "saucy-server-cloudimg-amd64-disk1.img" ]; then
#wget --quiet http://cloud-images.ubuntu.com/saucy/current/saucy-server-cloudimg-amd64-disk1.img
wget --quiet http://${LOCALSERVER}/PXEBoot/Images/saucy-server-cloudimg-amd64-disk1.img
openstack image create ${GENERAL_OPTS} --min-disk 3 \
--property os_command_line='/usr/sbin/sshd -D' \
--property os_distro=ubuntu --property os_version=13.10 \
--file saucy-server-cloudimg-amd64-disk1.img saucy
fi
if [ ! -e "utopic-server-cloudimg-amd64-disk1.img" ]; then
#wget --quiet http://cloud-images.ubuntu.com/utopic/current/utopic-server-cloudimg-amd64-disk1.img
wget --quiet http://${LOCALSERVER}/PXEBoot/Images/utopic-server-cloudimg-amd64-disk1.img
openstack image create ${GENERAL_OPTS} --min-disk 3 \
--property os_command_line='/usr/sbin/sshd -D' \
--property os_distro=ubuntu --property os_version=14.10 \
--file utopic-server-cloudimg-amd64-disk1.img utopic
fi
if [ ! -e "vivid-server-cloudimg-amd64-disk1.img" ]; then
#wget --quiet http://cloud-images.ubuntu.com/vivid/current/vivid-server-cloudimg-amd64-disk1.img
wget --quiet http://${LOCALSERVER}/PXEBoot/Images/vivid-server-cloudimg-amd64-disk1.img
openstack image create ${GENERAL_OPTS} --min-disk 3 \
--property os_command_line='/usr/sbin/sshd -D' \
--property os_distro=ubuntu --property os_version=15.04 \
--file vivid-server-cloudimg-amd64-disk1.img vivid
fi
if [ ! -e "wily-server-cloudimg-amd64-disk1.img" ]; then
#wget --quiet http://cloud-images.ubuntu.com/wily/current/wily-server-cloudimg-amd64-disk1.img
wget --quiet http://${LOCALSERVER}/PXEBoot/Images/wily-server-cloudimg-amd64-disk1.img
openstack image create ${GENERAL_OPTS} --min-disk 3 \
--property os_command_line='/usr/sbin/sshd -D' \
--property os_distro=ubuntu --property os_version=15.10 \
--file wily-server-cloudimg-amd64-disk1.img wily
fi
if [ ! -e "xenial-server-cloudimg-amd64-disk1.img" ]; then
#wget --quiet http://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
wget --quiet http://${LOCALSERVER}/PXEBoot/Images/xenial-server-cloudimg-amd64-disk1.img
openstack image create ${GENERAL_OPTS} --min-disk 3 \
--property os_command_line='/usr/sbin/sshd -D' \
--property os_distro=ubuntu --property os_version=16.04 \
--file xenial-server-cloudimg-amd64-disk1.img xenial
fi
if [ ! -e "yakkety-server-cloudimg-amd64.img" ]; then
#wget --quiet http://cloud-images.ubuntu.com/yakkety/current/yakkety-server-cloudimg-amd64.img
wget --quiet http://${LOCALSERVER}/PXEBoot/Images/yakkety-server-cloudimg-amd64.img
openstack image create ${GENERAL_OPTS} --min-disk 3 \
--property os_command_line='/usr/sbin/sshd -D' \
--property os_distro=ubuntu --property os_version=16.10 \
--file yakkety-server-cloudimg-amd64.img yakkety
fi
if [ ! -e "Fedora-Cloud-Base-23-20151030.x86_64.qcow2" ]; then
#wget --quiet https://download.fedoraproject.org/pub/fedora/linux/releases/23/Cloud/x86_64/Images/Fedora-Cloud-Base-23-20151030.x86_64.qcow2
wget --quiet http://${LOCALSERVER}/PXEBoot/Images/Fedora-Cloud-Base-23-20151030.x86_64.qcow2
openstack image create ${GENERAL_OPTS} --min-disk 4 \
--property os_command_line='/usr/sbin/sshd -D' \
--property os_distro=fedora --property os_version=23 \
--file Fedora-Cloud-Base-23-20151030.x86_64.qcow2 fedora23
fi
if [ ! -e "Fedora-Cloud-Base-22-20150521.x86_64.qcow2" ]; then
#wget --quiet https://www.mirrorservice.org/sites/dl.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-22-20150521.x86_64.qcow2
wget --quiet http://${LOCALSERVER}/PXEBoot/Images/Fedora-Cloud-Base-22-20150521.x86_64.qcow2
openstack image create ${GENERAL_OPTS} --min-disk 4 \
--property os_command_line='/usr/sbin/sshd -D' \
--property os_distro=fedora --property os_version=22 \
--file Fedora-Cloud-Base-22-20150521.x86_64.qcow2 fedora22
fi
if [ ! -e "openSUSE-13.2-OpenStack-Guest.x86_64-0.0.10-Build2.77.qcow2" ]; then
#wget --quiet http://download.opensuse.org/repositories/Cloud:/Images:/openSUSE_13.2/images/openSUSE-13.2-OpenStack-Guest.x86_64-0.0.10-Build2.77.qcow2
wget --quiet http://${LOCALSERVER}/PXEBoot/Images/openSUSE-13.2-OpenStack-Guest.x86_64-0.0.10-Build2.77.qcow2
openstack image create ${GENERAL_OPTS} --min-disk 11 \
--property os_command_line='/usr/sbin/sshd -D' \
--property os_distro=opensuse --property os_version=13 \
--file openSUSE-13.2-OpenStack-Guest.x86_64-0.0.10-Build2.77.qcow2 opensuse13
fi
if [ ! -e "debian-8.5.0-openstack-amd64.qcow2" ]; then
#wget --quiet http://cdimage.debian.org/cdimage/openstack/8.5.0/debian-8.5.0-openstack-amd64.qcow2
wget --quiet http://${LOCALSERVER}/PXEBoot/Images/debian-8.5.0-openstack-amd64.qcow2
openstack image create ${GENERAL_OPTS} --min-disk 3 \
--property os_command_line='/usr/sbin/sshd -D' \
--property os_distro=debian --property os_version=8 \
--file debian-8.5.0-openstack-amd64.qcow2 jessie
fi
echo "=> W E ' R E A L L D O N E : $(date) <="
| true |
0f76d3df0a793b8db7fe72c5c6ae405fed441623 | Shell | openshift/descheduler | /hack/verify-gofmt.sh | UTF-8 | 1,773 | 3.671875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
function cleanup() {
return_code=$?
os::util::describe_return_code "${return_code}"
exit "${return_code}"
set -o errexit
set -o nounset
set -o pipefail
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
GO_VERSION=($(go version))
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.18|go1.19|go1.20') ]]; then
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
exit 1
fi
cd "${DESCHEDULER_ROOT}"
find_files() {
find . -not \( \
\( \
-wholename './output' \
-o -wholename './_output' \
-o -wholename './release' \
-o -wholename './target' \
-o -wholename './.git' \
-o -wholename '*/third_party/*' \
-o -wholename '*/Godeps/*' \
-o -wholename '*/vendor/*' \
\) -prune \
\) -name '*.go'
}
trap "cleanup" EXIT
bad_files=$(os::util::list_go_src_files | xargs gofmt -s -l)
if [[ -n "${bad_files}" ]]; then
os::log::warning "!!! gofmt needs to be run on the listed files"
echo "${bad_files}"
os::log::fatal "Try running 'gofmt -s -d [path]'
Or autocorrect with 'hack/verify-gofmt.sh | xargs -n 1 gofmt -s -w'"
fi
| true |
0c1a26837cd821f86e9300e4af0090f6cb3f048a | Shell | bartoszmajsak/istio | /bin/update_maistra_deps.sh | UTF-8 | 1,575 | 2.71875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Copyright Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -eux
UPDATE_BRANCH=${UPDATE_BRANCH:-"maistra-2.2"}
# Update go dependencies
go get -d "maistra.io/api@${UPDATE_BRANCH}"
go mod tidy
go mod vendor
# FIXME: https://issues.redhat.com/browse/MAISTRA-2353
# For now we are just copying the files that already exist in istio, i.e., we are not adding any new files.
# We should copy all CRD's from api repo, i.e., uncomment the lines below and delete the other copy commands
# rm -f manifests/charts/base/crds/maistra*
# cp "${dir}"/manifests/* manifests/charts/base/crds
cp ./vendor/maistra.io/api/manifests/federation.maistra.io_servicemeshpeers.yaml manifests/charts/base/crds
cp ./vendor/maistra.io/api/manifests/federation.maistra.io_exportedservicesets.yaml manifests/charts/base/crds
cp ./vendor/maistra.io/api/manifests/federation.maistra.io_importedservicesets.yaml manifests/charts/base/crds
cp ./vendor/maistra.io/api/manifests/maistra.io_servicemeshextensions.yaml manifests/charts/base/crds
# Regenerate files
make clean gen
| true |
6491a9b2f43d1a442dc34b2a754db5d82a226b36 | Shell | Colin-Ragush/AWSR | /scripts/13_create_deployment_package.sh | UTF-8 | 2,168 | 3.796875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# run on EC2 to create deployment package
# preparing project folder, created when files were copied
echo -e "$INFO Creating deployment package"
sudo chmod -R a+w ~/${PRJ_NAME}
cd ~/${PRJ_NAME}
echo -e "$INFO PWD: $(pwd)"
# Python 3 packages transfer
echo -e "$INFO Transferring Python 3.6 packages to the deployment package"
source ~/env/bin/activate
cd ~/${PRJ_NAME}
cp -r ${CP_VERBOSE} ~/env/lib64/python3.6/site-packages/* ~/${PRJ_NAME}
deactivate
cp /usr/lib64/python3.6/lib-dynload/_sqlite3.cpython-36m-x86_64-linux-gnu.so \
~/${PRJ_NAME}
# Copy R needed libraries into project directory
echo -e "$INFO Copy R libraries into project directory."
ls /usr/lib64/R | \
grep -v library | \
xargs -I '{}' \
cp -r ${CP_VERBOSE} /usr/lib64/R/'{}' ~/${PRJ_NAME}/
cp -r ${CP_VERBOSE} /usr/lib64/R/library ~/${PRJ_NAME}/library/
ldd /usr/lib64/R/bin/exec/R | \
grep "=> /" | \
awk '{print $3}' | \
grep 'libgomp.so.1\|libgfortran.so.3\|libquadmath.so.0\|libtre.so.5' | \
xargs -I '{}' cp ${CP_VERBOSE} '{}' ~/${PRJ_NAME}/lib/
echo -e "$INFO R libraries copy finished."
echo -e "$INFO PWD: $(pwd)"
sudo chmod -R a+w ~/${PRJ_NAME}/library
echo -e "$INFO Changed permissions"
# Organizing libraries for deployment package
cp -r ${CP_VERBOSE} ~/library/* ~/${PRJ_NAME}/library
cp ~/${PRJ_NAME}/bin/exec/R ~/${PRJ_NAME}
cp /usr/lib64/libblas.so.3 ~/${PRJ_NAME}/lib
cp /usr/lib64/liblapack.so.3 ~/${PRJ_NAME}/lib
cp ~/${PRJ_NAME}/rpy2/rinterface/_rinterface.cpython-36m-x86_64-linux-gnu.so \
~/${PRJ_NAME}/rpy2/rinterface/_rinterface.so
mkdir ~/${PRJ_NAME}/lib/external
# uncomment the following lines in case mysql is needed
# cp /usr/lib64/mysql/libmysqlclient.so.18.0.0 \
# ~/${PRJ_NAME}/lib/external/libmysqlclient.so.18
# Check package file size
maxsize=250
size=$(du -sm | awk '{ print $1 }')
if [ $size -ge $maxsize ]; then
echo -e "$ERROR File size exceeds 250MB."
exit 1
fi
echo -e "$INFO Zipping the deployment package ..."
LAMBDA_ZIP_NAME="${LAMBDA_FUNCTION_NAME}.zip"
zip -qr9 ~/${LAMBDA_ZIP_NAME} *
echo -e "$INFO Finished zipping the deployment package to" \
"$(FC $LAMBDA_ZIP_NAME)"
| true |
d5a7364e3f1e8f1be14c12a059c58b034ec4c4d6 | Shell | njsoly/miscellany | /bash.fxns.d/full_paths.fxn | UTF-8 | 213 | 3.46875 | 3 | [] | no_license | #!/bin/bash
full_paths ()
{
[[ -n "$@" ]] && {
path=$(printf "%s\n" "$@")
} || {
path="$PWD"
};
echo "the path to get absolute is \"$path\".";
realpath $(printf "%s\n" $@)
}
| true |
d28cf70c594446a5552452bd8c41220208781762 | Shell | JarlPenguin/releases-kernel | /sync.sh | UTF-8 | 1,327 | 3.109375 | 3 | [] | no_license | #!/bin/bash
source config.sh
SYNC_START=$(date +"%s")
telegram -M "Sync started for [${name}](${kernel})"
git clone "${kernel}" --depth 1 -b "${branch}" kernel
git clone git://github.com/JarlPenguin/AnyKernel3 --depth 1 AnyKernel
if [ "${clang}" == "true" ]; then
git clone https://android.googlesource.com/platform/prebuilts/clang/host/linux-x86 --depth 1 clang
fi
if [ "${ARCH}" == "arm" ]; then
git clone git://github.com/LineageOS/android_prebuilts_gcc_linux-x86_arm_arm-linux-androideabi-4.9 --depth 1 gcc
elif [ "${ARCH}" == "arm64" ]; then
git clone git://github.com/LineageOS/android_prebuilts_gcc_linux-x86_arm_arm-linux-androideabi-4.9 --depth 1 gcc32
git clone git://github.com/LineageOS/android_prebuilts_gcc_linux-x86_aarch64_aarch64-linux-android-4.9 --depth 1 gcc
fi
SYNC_END=$(date +"%s")
SYNC_DIFF=$((SYNC_END - SYNC_START))
if [ -d "kernel" ] && [ -d "gcc" ] && [ -d "AnyKernel" ]; then
telegram -M "Sync completed successfully in $((SYNC_DIFF / 60)) minute(s) and $((SYNC_DIFF % 60)) seconds"
else
telegram -M "Sync failed in $((SYNC_DIFF / 60)) minute(s) and $((SYNC_DIFF % 60)) seconds"
curl --data parse_mode=HTML --data chat_id=$TELEGRAM_CHAT --data sticker=CAADBQADGgEAAixuhBPbSa3YLUZ8DBYE --request POST https://api.telegram.org/bot$TELEGRAM_TOKEN/sendSticker
exit 1
fi
| true |
a026453b836a5b3e342ec668f7e9e81226ed5d1f | Shell | rust-kr/doc.rust-kr.org | /tools/doc-to-md.sh | UTF-8 | 1,109 | 4.03125 | 4 | [
"Apache-2.0",
"MIT"
] | permissive | #!/bin/bash
set -eu
# Get all the docx files in the tmp dir.
find tmp -name '*.docx' -print0 | \
# Extract just the filename so we can reuse it easily.
xargs -0 basename -s .docx | \
while IFS= read -r filename; do
# Truncate the `nostarch` dir file and put the "no editing" warning back.
# Tell shellcheck to ignore this because I want the `/src/` printed
# literally, not expanded.
# shellcheck disable=SC2016
echo '<!-- DO NOT EDIT THIS FILE.
This file is periodically generated from the content in the `/src/`
directory, so all fixes need to be made in `/src/`.
-->' > "nostarch/$filename.md"
# Make a directory to put the XML in.
mkdir -p "tmp/$filename"
# Unzip the docx to get at the XML.
unzip -o "tmp/$filename.docx" -d "tmp/$filename"
# Convert to markdown with XSL.
xsltproc tools/docx-to-md.xsl "tmp/$filename/word/document.xml" | \
# Hard wrap at 80 chars at word bourdaries.
fold -w 80 -s | \
# Remove trailing whitespace and append to the file in the `nostarch` dir for comparison.
sed -e "s/ *$//" >> "nostarch/$filename.md"
done
| true |
499efdd8a01fae150705914b094cff4a72e0d489 | Shell | ostefano/process-snapshots-toolkit | /black.sh | UTF-8 | 1,819 | 3.296875 | 3 | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
# Copyright 2020-2021 VMware, Inc.
# SPDX-License-Identifier: BSD-2-Clause
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
set -e
# files/directories to test
FILES="process_snapshot_toolkit process_snapshot_toolkit_test ghidra_scripts scripts"
# Print usage if no arguments passed
if [ "$#" -ne 1 ]; then
echo "Usage: $0 [--inline|OUTPUT_FILE]"
exit 1
fi
# Run black according to arguments passed.
if [ $1 = '--inline' ]; then
black $FILES
else
# If black encounters changes that need to be made, it will exit with status=1
black --check --diff $FILES &> "$1"
fi
exit 0
| true |
e341b9463f99a8c8ec21116d8bf392d942dfe17a | Shell | pavlov99/presentations | /2017-10-22-codeconf/example3-stock-market/scripts/2-calculate-metrics.sh | UTF-8 | 1,778 | 3.09375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Calculate financial metrics based on open/high/low/close/volume data.
# Usage: bash 2-calculate-metrics.sh file.tsv
cat $1 \
| tawk -o 'Date; Open; High; Low; Close; Volume' \
-o 'MA200 = AVG(Close, 200)' \
-o 'MA50 = AVG(Close, 50)' \
-o 'EMA26 = EMA(Close, 26)' \
-o 'EMA13 = EMA(Close, 13)' \
-o 'FastMACD = EMA(Close, 12) - EMA26' \
-o 'SlowMACD = EMA(FastMACD, 9)' \
-o 'MACDHistogram = FastMACD - SlowMACD' \
-o '_MACDSlope = MACDHistogram - PREV(MACDHistogram)' \
-o '_EMA13Slope = EMA13 - PREV(EMA13)' \
-o 'ImpulseIndex = 1 if (_MACDSlope > 0 and _EMA13Slope > 0) else -1 if (_MACDSlope < 0 and _EMA13Slope < 0) else 0' \
-o '_PrevClose = PREV(Close)' \
-o 'CloseSlope = Close - _PrevClose' \
-o 'CloseMin14 = MIN(Close, 14)' \
-o 'CloseMax14 = MAX(Close, 14)' \
-o '_RawStochasticNumerator = Close - CloseMin14' \
-o '_RawStochasticDenominator = CloseMax14 - CloseMin14' \
-o '_RawStochasticNumerator3 = SUM(_RawStochasticNumerator, 3)' \
-o '_RawStochasticDenominator3 = SUM(_RawStochasticDenominator, 3)' \
-o 'FastStochasticK = _RawStochasticNumerator / _RawStochasticDenominator * 100 if _RawStochasticDenominator else 0' \
-o 'FastStochasticD = _RawStochasticNumerator3 / _RawStochasticDenominator3 * 100 if _RawStochasticDenominator3 else 0' \
-o '_SlowStochasticNumerator3 = SUM(_RawStochasticNumerator3, 3)' \
-o '_SlowStochasticDenominator3 = SUM(_RawStochasticDenominator3, 3)' \
-o 'SlowStochasticK = FastStochasticD' \
-o 'SlowStochasticD = _SlowStochasticNumerator3 / _SlowStochasticDenominator3 * 100 if _SlowStochasticDenominator3 else 0'
| true |
4c4d1e699e8f2edd36fcebdd871b707fa00eb941 | Shell | kccarbone/gadget-server-oled | /install.sh | UTF-8 | 3,904 | 3.640625 | 4 | [
"MIT"
] | permissive | ### Background service installation ###
serviceName="gadget-server-oled"
serviceRepo="https://github.com/kccarbone/gadget-server-oled.git"
serviceHome="/etc/$serviceName"
serviceFile="/lib/systemd/system/$serviceName.service"
printf "\033[0;97;104m[ Installing $serviceName ]\033[0m\n\n"
sudo rm -rf $serviceHome
sudo mkdir -p $serviceHome
sudo rm -f $serviceFile
sudo touch $serviceFile
# Setup NPM
printf '\033[0;36mChecking Node.js\033[0m\n'
if ! type node > /dev/null 2>&1;
then
curl -sL https://deb.nodesource.com/setup_10.x | sudo bash -
sudo apt install -y nodejs
fi
printf "Node $(node -v) installed\n"
printf "NPM $(npm -v) installed\n\n"
# Setup git
printf '\033[0;36mChecking git\033[0m\n'
if ! type git > /dev/null 2>&1;
then
printf '\033[0;36m\nInstalling Git...\033[0m\n'
sudo apt install -y git-all
fi
printf "$(git --version) installed\n\n"
# Enable i2c
printf '\033[0;36mSetting up hardware\033[0m\n'
if grep -q 'i2c-bcm2708' /etc/modules;
then
printf 'i2c-bcm2708 is enabled\n'
else
printf 'Enabling i2c-bcm2708\n'
echo 'i2c-bcm2708' | sudo tee -a /etc/modules > /dev/null
fi
if grep -q 'i2c-dev' /etc/modules;
then
printf 'i2c-dev is enabled\n'
else
printf 'Enabling i2c-dev\n'
echo 'i2c-dev' | sudo tee -a /etc/modules > /dev/null
fi
if grep -q 'dtparam=i2c1=on' /boot/config.txt;
then
printf 'i2c1 parameter is set\n'
else
printf 'Setting i2c1 parameter\n'
echo 'dtparam=i2c1=on' | sudo tee -a /boot/config.txt > /dev/null
fi
if grep -q 'dtparam=i2c_arm=on' /boot/config.txt;
then
printf 'i2c_arm parameter is set\n'
else
printf 'Setting i2c_arm parameter\n'
echo 'dtparam=i2c_arm=on' | sudo tee -a /boot/config.txt > /dev/null
fi
if [ -f /etc/modprobe.d/raspi-blacklist.conf ];
then
printf 'Removing blacklist entries\n'
sudo sed -i 's/^blacklist spi-bcm2708/#blacklist spi-bcm2708/' /etc/modprobe.d/raspi-blacklist.conf
sudo sed -i 's/^blacklist i2c-bcm2708/#blacklist i2c-bcm2708/' /etc/modprobe.d/raspi-blacklist.conf
fi
printf '\n'
# Download app
printf '\033[0;36mDownloading service\033[0m\n'
sudo git clone $serviceRepo $serviceHome
printf "\n"
# Install dependencies
printf '\033[0;36mInstalling dependencies\033[0m\n'
sudo npm config set user 0
sudo npm --prefix $serviceHome install
sudo npm config set user $UID
printf "\n"
# Create local service
printf '\033[0;36mEnabling background service\033[0m\n'
echo '[Unit]' | sudo tee -a $serviceFile > /dev/null
echo 'Description=Local server for controlling a pi oled display' | sudo tee -a $serviceFile > /dev/null
echo 'After=network-online.target' | sudo tee -a $serviceFile > /dev/null
echo '' | sudo tee -a $serviceFile > /dev/null
echo '[Service]' | sudo tee -a $serviceFile > /dev/null
echo 'User=root' | sudo tee -a $serviceFile > /dev/null
echo 'Type=simple' | sudo tee -a $serviceFile > /dev/null
echo "WorkingDirectory=$serviceHome" | sudo tee -a $serviceFile > /dev/null
echo 'Environment="PORT=33301"' | sudo tee -a $serviceFile > /dev/null
echo 'ExecStart=npm start' | sudo tee -a $serviceFile > /dev/null
echo 'Restart=on-failure' | sudo tee -a $serviceFile > /dev/null
echo 'RestartSec=10' | sudo tee -a $serviceFile > /dev/null
echo 'StandardOutput=syslog' | sudo tee -a $serviceFile > /dev/null
echo 'StandardError=syslog' | sudo tee -a $serviceFile > /dev/null
echo "SyslogIdentifier=$serviceName" | sudo tee -a $serviceFile > /dev/null
echo '' | sudo tee -a $serviceFile > /dev/null
echo '[Install]' | sudo tee -a $serviceFile > /dev/null
echo 'WantedBy=multi-user.target' | sudo tee -a $serviceFile > /dev/null
sudo rm -f "$rootDir/etc/systemd/system/multi-user.target.wants/$serviceName.service"
sudo ln -s "$serviceFile" "$rootDir/etc/systemd/system/multi-user.target.wants/$serviceName.service"
sudo systemctl daemon-reload
printf 'Service enabled\n'
sudo systemctl start $serviceName
printf 'Service started\n\n'
printf '\033[0;32mDone!\033[0m\n' | true |
a13b5d9f3b2a2cb9a619bb60a9c818dbe0f7a5f7 | Shell | cacalote/honeywall | /rpm-devel/roo-base/src/usr/local/bin/hwvarcheck | UTF-8 | 242 | 3.046875 | 3 | [] | no_license | #!/bin/sh
grep '^Hw' /etc/honeywall.conf.orig | while read LINE; do
VAR=$(echo ${LINE} | awk -F= '{ print $1 }')
if [ ! -f /hw/conf/${VAR} ]; then
VAL=$(echo ${LINE} | awk -F= '{ print $2 }')
echo "${VAL}" > /hw/conf/${VAR}
fi
done
| true |
911542104d113752ab45596e2b1bafca7f625eb8 | Shell | alombarte/dotfiles | /packages/common/terraform.sh | UTF-8 | 666 | 3.640625 | 4 | [] | no_license | #!/bin/bash
# Manual installation of terraform (packages are way behind)
VERSION=0.12.16
PLATFORM="$(uname | tr '[:upper:]' '[:lower:]')"
echo "What version of terraform do you wish to install?"
echo "See https://releases.hashicorp.com/terraform/"
read -p "Version: [${VERSION}] " target
if test -z "$target"
then
target=$VERSION
fi
read -r -p "Proceed with $target? (Y/N): " confirm && [[ $confirm == [yY] ]] || exit 0
wget https://releases.hashicorp.com/terraform/${target}/terraform_${target}_${PLATFORM}_amd64.zip
unzip terraform_${target}_${PLATFORM}_amd64.zip
rm terraform_${target}_${PLATFORM}_amd64.zip
sudo mv terraform /usr/local/bin
terraform -v
| true |
e45542ee5bb377ca903e369d8e2b3a23252dd760 | Shell | libdx/AigDownloader | /tools/classGenerator/class-generator | UTF-8 | 990 | 3.46875 | 3 | [] | no_license | #!/bin/bash
printHelp()
{
echo "$0 PREFIX prefix SUFFIX Suffix suffix ParentType PARENT_TYPE_MACROS"
}
catTemplate()
{
_TEMPLATE_NAME=$1
_OUTPUT_NAME=$2
_PREFIX=$3
_prefix=$4
_SUFFIX=$5
_Suffix=$6
_suffix=$7
_ParentType=$8
_PARENT_TYPE_MACROS=$9
cat $_TEMPLATE_NAME | \
sed "s/PREFIX/$_PREFIX/g" | \
sed "s/prefix/$_prefix/g" | \
sed "s/SUFFIX/$_SUFFIX/g" | \
sed "s/Suffix/$_Suffix/g" | \
sed "s/suffix/$_suffix/g" | \
sed "s/ParentType/$_ParentType/g" | \
sed "s/PARENT_TYPE_MACROS/$_PARENT_TYPE_MACROS/g" > \
$_OUTPUT_NAME
}
_PREFIX=$1
_prefix=$2
_SUFFIX=$3
_Suffix=$4
_suffix=$5
_ParentType=$6
_PARENT_TYPE_MACROS=$7
catTemplate "templates/class.h" generatedClass.h $_PREFIX $_prefix $_SUFFIX $_Suffix $_suffix $_ParentType $_PARENT_TYPE_MACROS
catTemplate "templates/class.c" generatedClass.c $_PREFIX $_prefix $_SUFFIX $_Suffix $_suffix $_ParentType $_PARENT_TYPE_MACROS
| true |
42c2ab42fd82684d6d7ccc036887f268c7a7c540 | Shell | CSIRO-enviro-informatics/loci-infrastructure | /packer/loci-integration-db/prov.sh | UTF-8 | 1,048 | 2.578125 | 3 | [] | no_license | #!/bin/bash
yum update -y
amazon-linux-extras install docker
yum -y install git
curl -L "https://github.com/docker/compose/releases/download/1.24.1/docker-compose-$(uname -s)-$(uname -m)" -o /usr/bin/docker-compose
chmod +x /usr/bin/docker-compose
systemctl enable docker
systemctl start docker && sleep 5
usermod -a -G docker ec2-user
git clone --single-branch --branch master https://github.com/CSIRO-enviro-informatics/loci-cache-scripts.git
mv /tmp/instance.sh /var/lib/cloud/scripts/per-boot/instance.sh
chmod +x /var/lib/cloud/scripts/per-boot/instance.sh
printenv
cd /home/ec2-user/loci-cache-scripts/docker/linksets/asgs2geofab/ && pwd && ls && source ../../../common/common.sh && docker-compose -f docker-compose.base.yml up -d postgis && docker-compose -f docker-compose.base.yml run linksets /bin/sh -c 'curl https://raw.githubusercontent.com/vishnubob/wait-for-it/master/wait-for-it.sh -o ./wait-for-it.sh && chmod +x ./wait-for-it.sh && ./wait-for-it.sh postgis:5432 && sleep 10 && cd /app/mb2cc && python linksets_mb_cc_builder.py'
| true |
6633897151c96f99c06bd8cbadbfdaf02c38e679 | Shell | hm1365166/opencsw | /csw/mgar/pkg/postgrey/trunk/files/CSWpostgrey.init | UTF-8 | 1,384 | 3.34375 | 3 | [] | no_license | #!/sbin/sh
#RC_KLEV 0,1,2,S
#RC_SLEV 3
# rc-script for CSWpostgrey
# Peter Bonivart, 2010-02-01
# Source SMF includes
[ -r /lib/svc/share/smf_include.sh ] && . /lib/svc/share/smf_include.sh
# Source config file
[ -r /etc/opt/csw/postgrey ] && . /etc/opt/csw/postgrey
SERVICE=postgrey
PATH=$PATH:/usr/bin:/opt/csw/bin
# Use /etc/opt/csw/postgrey to configure the options instead of editing this file
#OPTIONS="--inet=10023 -d --whitelist-clients=/etc/opt/csw/postfix/postgrey_whitelist_clients --whitelist-recipients=/etc/opt/csw/postfix/postgrey_whitelist_recipients --pidfile=/var/opt/csw/postgrey/postgrey.pid"
ZONE= # used for initialization, do not change
case "$1" in
start)
echo "Starting $SERVICE ..."
/opt/csw/sbin/postgrey $OPTIONS
;;
stop)
echo "Stopping $SERVICE ..."
if [ -x /usr/bin/zonename ]; then
if [ "`/usr/bin/zonename`" = "global" ]; then
ZONE="-z global"
fi
fi
pkill $ZONE -x $SERVICE
;;
restart)
echo "Restarting $SERVICE ... "
$0 stop
echo "Waiting for $SERVICE to stop: \c"
while ( pgrep $SERVICE > /dev/null )
do
echo ".\c"
sleep 1
done
echo
sleep 1
$0 start
;;
*)
echo "Usage: `basename $0` { start | stop | restart}"
exit 1
esac
exit 0
| true |
978e3c73b30ec61fa5bec0f7156d5bccd969d96a | Shell | 1337hunter/services | /srcs/ftps/ftpstart.sh | UTF-8 | 465 | 2.640625 | 3 | [] | no_license | telegraf &
chown -R vsftp:vsftp /var/lib/ftp
vsftpd /etc/vsftpd/vsftpd.conf &
sleep 3
while true
do
if [[ -z $(ps axu | pgrep "telegraf") ]];
then
echo "telegraf process is dead"
sleep 5
echo "container is going down too :("
break
fi
if [[ -z $(ps axu | pgrep "vsftpd") ]];
then
echo "vsftpd process is dead"
echo "container is going down too :("
break
fi
echo "All services of conteiner is up! Keep going :D"
sleep 5
done
| true |
cfc2e0462df20703250cdc8ed862cc3d746095d0 | Shell | pfritzgerald/nusassifi | /test.sh | UTF-8 | 3,731 | 4.03125 | 4 | [] | no_license | #!/bin/bash
#printf "Steps 1, 2, 3, 4(b), 4(c), and 7(a) mentioned in \"Setting up and running SASSIFI\" section in sassi-user-guide should be completed before proceeding further. Are these steps completed? [y/n]: "
#read answer
#if [ "$answer" != "y" ]; then
# printf "\nCannot proceed further\n"
# exit -1;
#fi
printf "Which mode do you want to run SASSIFI in? [inst/rf] (default is inst): "
read inst_rf
if [ "$inst_rf" == "inst" ] || [ "$inst_rf" == "rf" ] ; then
printf "Okay, $inst_rf\n"
else
inst_rf="inst"
printf "Proceeding with $inst_rf\n"
fi
printf "\nEnter directory for your application: "
read app_directory
printf " Directory is $app_directory"
#printf "\nEnter application name: "
#read app
#printf "App is: $app"
set -x
################################################
# Step 1: Set environment variables
################################################
printf "\nStep 1: Setting environment variables"
if [ `hostname -s` == "kepler1" ]; then
export SASSIFI_HOME=/home/previlon/nusassifi/
export SASSI_SRC=/home/previlon/SASSI/
export INST_LIB_DIR=$SASSI_SRC/instlibs/lib/
export CCDIR=/usr/bin/
export CUDA_BASE_DIR=/home/previlon/sassi7/
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$CUDA_BASE_DIR/lib64/:$CUDA_BASE_DIR/extras/CUPTI/lib64/
else
printf "\nAre SASSI_SRC, INST_LIB_DIR, CCDIR, CUDA_BASE_DIR, and LD_LIBRARY_PATH environment variables set?\n"
read answer
if [ "$answer" != "y" ]; then
printf "\nCannot proceed further\n"
exit -1;
fi
fi
################################################
# Step 4.a: Build the app without instrumentation.
# Collect golden stdout and stderr files.
################################################
printf "\nStep 4.1: Collect golden stdout.txt and stderr.txt files"
cd $app_directory
if [ $? -ne 0 ]; then
echo "Problem with app directory"
exit -1
fi
make 2> stderr.txt
make golden
if [ $? -ne 0 ]; then
echo "Return code was not zero: $?"
exit -1;
fi
# process the stderr.txt file created during compilation to extract number of
# registers allocated per kernel
#python $SASSIFI_HOME/scripts/process_kernel_regcount.py $app sm_35 stderr.txt
################################################
# Step 5: Build the app for profiling and
# collect the instruction profile
################################################
printf "\nStep 5: Profile the application"
make OPTION=profiler
make test
if [ $? -ne 0 ]; then
echo "Return code was not zero: $?"
exit -1;
fi
################################################
# Step 6: Build the app for error injectors
################################################
printf "\nStep 6: Prepare application for error injection"
make OPTION=inst_injector
#make OPTION=rf_injector
################################################
# Step 7.b: Generate injection list for the
# selected error injection model
################################################
printf "\nStep 7.2: Generate injection list for instruction-level error injections"
cd -
cd scripts/
python generate_injection_list.py $inst_rf
if [ $? -ne 0 ]; then
echo "Return code was not zero: $?"
exit -1;
fi
################################################
# Step 8: Run the error injection campaign
################################################
printf "\nStep 8: Run the error injection campaign"
python run_injections.py $inst_rf standalone # to run the injection campaign on a single machine with single gpu
#python run_injections.py $inst_rf multigpu # to run the injection campaign on a single machine with multiple gpus.
################################################
# Step 9: Parse the results
################################################
printf "\nStep 9: Parse results"
python parse_results.py $inst_rf
| true |
ee35aefec451cebd96f933ebe2f1b5d45483728c | Shell | silklabs/silk | /bsp-gonk/vendor/silk/init/silk-setup-dev | UTF-8 | 699 | 3.421875 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/system/bin/sh
if [ "$(getprop ro.debuggable)" != "1" ]; then
echo Device is not debuggable
exit 1
fi
if [ "$(getprop service.adb.root)" != "1" ]; then
echo Restarting adb as root
(
set -x
setprop service.adb.root 1
stop adbd
start adbd
)
fi
if [ "$(getprop partition.system.verified)" != "" ]; then
if [ ! -x /system/bin/adb ]; then
# M+ doesn't include adb on device, so currently there's no way to disable
# verity here.
echo "/system/bin/adb not found. Unable to disable verity."
else
set -x
echo Disabling verity
adb wait-for-device
adb disable-verity
reboot
fi
fi
echo Remounting /system as rw
mount -o remount,rw /system
| true |
3da98e9e288f9532ea3e582390deaf56fb3baea7 | Shell | annusingh100995/Lhx2_Data_Analysis | /lhx2.sh | UTF-8 | 6,259 | 2.546875 | 3 | [] | no_license |
#!/bin/bash
#############TOPHAT############
myarr2=($(ls -l|cut -c 46-))
for j in "${myarr2[@]}"
do
echo ./$j
cd ./$j
ls
myarr=($(find ./ -name "*.fq"|awk 'BEGIN {FS ="/"}{print$NF}'| cut -c 1-))
echo ${myarr[1]} ${myarr[2]}
printf "tophat2 -p 4 /home/darwin/Lhx2_RNASeq/gene/gencode.vM14.chr_patch_hapl_scaff.annotation.gtf -o /home/darwin/Lhx2_RNASeq/E12.5/E12.5_cortex/E12.5_Lhx2_mut_ctx_rep1_ACAGTG_thout /home/darwin/Lhx2_RNASeq/genome/mm10_indexed_file ./${myarr[1]} ./${myarr[2]}"
tophat -p 4 -G /home/darwin/Lhx2_RNASeq/gene/gencode.vM14.chr_patch_hapl_scaff.annotation.gtf -o /home/darwin/Lhx2_RNASeq/E12.5/E12.5_cortex/thout/$i /home/darwin/Lhx2_RNASeq/genome/mm10_indexed_file /home/darwin/
cd ../
###############CUFFLINKS##################3
myarr2=($(ls -l|cut -c 46-))
for j in "${myarr2[@]}"
do
echo ./$j
cd ./$j
ls
myarr=($(find ./ -name "*.bam"|awk 'BEGIN {FS ="/"}{print$NF}'| cut -c 1-))
echo ${myarr[1]} ${myarr[2]}
printf "IN DIRECTORY $j"
cufflinks -p 4 -o clout_$j ./${myarr[2]}
cd ../
done
######################RENAME STUPID FILE NAME ERROR###########################3
myarr2=($(ls -l|cut -c 46-))
for j in "${myarr2[@]}"
do
echo ./$j
cd ./$j
prename 's/clout_thout/clout/' clout_thout*/
cd ../
done
########################ASSEMBLIES AND CUFFMERGE######################################
myarr2=($(find ./ -name "*.txt"|awk 'BEGIN {FS ="/"}{print$NF}'|rev| cut -c 5-|rev))|
for j in "${myarr2[@]}"
do
p=$j|rev| cut -c 5-|rev
cuffmerge -g /home/darwin/Lhx2_RNASeq/gene/gencode.vM14.chr_patch_hapl_scaff.annotation.gtf -s /home/darwin/Lhx2_RNASeq/genome/GRCm38.p5.genome.fa -p 4 -o ./merged_files/mergerd_$p ./$j.txt
done
##########################CUFFDIFF########################################################
cuffdiff -o diff_out_sampledata -b ../genome/GRCm38.p5.genome.fa -p 1 -L CONTROL,RAS -u ./merged_asm/merged.gtf ./CONTROL_REP1_thout/accepted_hits.bam,CONTROL_REP2_thout/accepted_hits.bam,CONTROL_REP3_thout/accepted_hits.bam ./RAS_REP1_thout/accepted_hits.bam,RAS_REP2_thout/accepted_hits.bam,RAS_REP3_thout/accepted_hits.bam
cuffdiff -o ./diff_out/diff_out_E12.5_E15.5_WT_ctx -b ./genome/GRCm38.p5.genome.fa -p 4 -L E12.5_WT_ctx,E15.5_WT_ctx -u ./assemblies/merged_files/mergerd_amb_E12.5_E15.5_WT_ctx/merged.gtf ./E12.5/E12.5_cortex/thout_E12.5_ctx/thout_E12.5_WT_ctx_rep1_CGATGT/accepted_hits.bam,./E12.5/E12.5_cortex/thout_E12.5_ctx/thout_E12.5_WT_ctx_rep2_TTAGGC/accepted_hits.bam ./E15.5/E15.5_cortex/thout_E15.5_ctx/thout_E15.5_WT_ctx_rep1/accepted_hits.bam,./E15.5/E15.5_cortex/thout_E15.5_ctx/thout_E15.5_WT_ctx_rep2/accepted_hits.bam
cuffdiff -o ./diff_out/diff_out_E12.5_E15.5_WT_HC -b ./genome/GRCm38.p5.genome.fa -p 4 -L E12.5_WT_HC,E15.5_WT_HC -u ./assemblies/merged_files/mergerd_amb_E12.5_E15.5_WT_HC/merged.gtf ./E12.5/E12.5_HC/thout_E12.5_HC/thout_E12.5_WT_HC_rep1_ACTTGA/accepted_hits.bam,./E12.5/E12.5_HC/thout_E12.5_HC/thout_E12.5_WT_HC_rep1_CATGATC/accepted_hits.bam ./E15.5/E15.5_HC/thout_E15.5_HC/thout_E15.5_WT_HC_rep1/accepted_hits.bam,./E15.5/E15.5_HC/thout_E15.5_HC/thout_E15.5_WT_HC_rep2/accepted_hits.bam
cuffdiff -o ./diff_out/diff_out_E12.5_WT_ctx_HC -b ./genome/GRCm38.p5.genome.fa -p 4 -L E12.5_WT_ctx,12.5_WT_HC ./assemblies/merged_files/mergerd_amb_E12.5_WT_ctx_HC/merged.gtf ./E12.5/E12.5_cortex/thout_E12.5_ctx/thout_E12.5_WT_ctx_rep1_CGATGT/accepted_hits.bam,./E12.5/E12.5_cortex/thout_E12.5_ctx/thout_E12.5_WT_ctx_rep2_TTAGGC/accepted_hits.bam ./E12.5/E12.5_HC/thout_E12.5_HC/thout_E12.5_WT_HC_rep1_ACTTGA/accepted_hits.bam,./E12.5/E12.5_HC/thout_E12.5_HC/thout_E12.5_WT_HC_rep1_CATGATC/accepted_hits.bam
cuffdiff -o ./diff_out/diff_out_E12.5_WT_Lhx2_mut_ctx -b ./genome/GRCm38.p5.genome.fa -p 4 -L E12.5_WT_ctx,E12.5_Lhx2_mut_ctx ./assemblies/merged_files/merged_amb_E12.5_WT_Lhx2_mut_ctx/merged.gtf ./E12.5/E12.5_cortex/thout_E12.5_ctx/thout_E12.5_WT_ctx_rep1_CGATGT/accepted_hits.bam,./E12.5/E12.5_cortex/thout_E12.5_ctx/thout_E12.5_WT_ctx_rep2_TTAGGC/accepted_hits.bam ./E12.5/E12.5_cortex/thout_E12.5_ctx/thout_E12.5_Lhx2_mut_ctx_rep1_ACAGTG/accepted_hits.bam,./E12.5/E12.5_cortex/thout_E12.5_ctx/thout_E12.5_Lhx2_mut_ctx_rep2_GCCAAT/accepted_hits.bam
cuffdiff -o ./diff_out/diff_out_E12.5_WT_lhx2_mut_HC -b ./genome/GRCm38.p5.genome.fa -p 4 -L E12.5_WT_HC,E12.5_Lhx2_mut_HC ./assemblies/merged_files/mergerd_amb_E12.5_WT_Lhx2_mut_HC/merged.gtf ./E12.5/E12.5_HC/thout_E12.5_HC/thout_E12.5_WT_HC_rep1_ACTTGA/accepted_hits.bam,./E12.5/E12.5_HC/thout_E12.5_HC/thout_E12.5_WT_HC_rep1_CATGATC/accepted_hits.bam ./E12.5/E12.5_HC/thout_E12.5_HC/thout_E12.5_Lhx2_mut_HC_rep1_GGCTAC/accepted_hits.bam,./E12.5/E12.5_HC/thout_E12.5_HC/thout_E12.5_Lhx2_mut_HC_rep2_CTTGTA/accepted_hits.bam
cuffdiff -o ./diff_out/diff_out_E15.5_WT_ctx_HC -b ./genome/GRCm38.p5.genome.fa -p 4 -L E15.5_WT_ctx,E15.5_WT_HC ./assemblies/merged_files/mergerd_amb_E15.5_WT_ctx_HC/merged.gtf ./E15.5/E15.5_cortex/thout_E15.5_ctx/thout_E15.5_WT_ctx_rep1/accepted_hits.bam,./E15.5/E15.5_cortex/thout_E15.5_ctx/thout_E15.5_WT_ctx_rep2/accepted_hits.bam ./E15.5/E15.5_HC/thout_E15.5_HC/thout_E15.5_WT_HC_rep1/accepted_hits.bam,./E15.5/E15.5_HC/thout_E15.5_HC/thout_E15.5_WT_HC_rep2/accepted_hits.bam
cuffdiff -o diff_out/diff_out_E15.5_WT_Lhx2_mut_ctx -b ./genome/GRCm38.p5.genome.fa -p 4 -L E15.5_WT_ctx,E15.5_Lhx2_mut_ctx ./assemblies/merged_files/mergerd_amb_E15.5_WT_Lhx2_mut_ctx/merged.gtf ./E15.5/E15.5_cortex/thout_E15.5_ctx/thout_E15.5_WT_ctx_rep1/accepted_hits.bam,./E15.5/E15.5_cortex/thout_E15.5_ctx/thout_E15.5_WT_ctx_rep2/accepted_hits.bam ./E15.5/E15.5_cortex/thout_E15.5_ctx/thout_E15.5_Lhx2_mut_ctx_rep1/accepted_hits.bam,./E15.5/E15.5_cortex/thout_E15.5_ctx/thout_E15.5_Lhx2_mut_ctx_rep2/accepted_hits.bam
cuffdiff -o diff_out/diff_out_E15.5_WT_Lhx2_mut_HC -b ./genome/GRCm38.p5.genome.fa -p 4 -L E15.5_WT_HC,E15.5_Lhx2_mut_HC ./assemblies/merged_files/mergerd_amb_E15.5_WT_Lhx2_mut_HC/merged.gtf ./E15.5/E15.5_HC/thout_E15.5_HC/thout_E15.5_WT_HC_rep1/accepted_hits.bam,./E15.5/E15.5_HC/thout_E15.5_HC/thout_E15.5_WT_HC_rep2/accepted_hits.bam ./E15.5/E15.5_HC/thout_E15.5_HC/thout_E15.5_Lhx2_mut_HC_rep1/accepted_hits.bam,./E15.5/E15.5_HC/thout_E15.5_HC/thout_E15.5_Lhx2_mut_HC_rep2/accepted_hits.bam
| true |
03f099d8612717efea16cca581cb639fa4a53ffa | Shell | bridgecrew-perf7/devops-automate-init-deployment | /automate.sh | UTF-8 | 1,285 | 3.484375 | 3 | [] | no_license | #!/bin/bash
# permissions
if [ "$(whoami)" != "root" ]; then
echo "Root privileges are required to run this, try running with sudo..."
exit 2
fi
echo "> set variables"
echo "Enter repo url"
read REPO
#REPO=git@gitlab.com:bigio/mipi/web.git
echo "Enter repo branch"
read REPO_BRANCH
#REPO_BRANCH=dev
echo "Enter domain name"
read DOMAIN_NAME
echo "Enter directory path"
read DIRECTORY
echo "Enter directory "
#DIRECTORY=/home/gakeslab/prod/cms/public_html/
read DIRECTORY_HOST
echo "Enter Apache Config file name"
read APACHE_CONF_NAME
echo "Enter Log Error file name"
read LOG_ERROR_NAME
echo "Enter Log Custom file name"
read LOG_CUSTOM_NAME
echo "> make directory and enter it"
if [ ! -d "$DIRECTORY" ]; then
mkdir -p "$DIRECTORY"
fi
echo "> setup git "
cd "$DIRECTORY"
git init
git remote add origin "$REPO"
git pull origin
git checkout "$REPO_BRANCH"
chown -R www-data:www-data "$DIRECTORY"
APACHE_CONF_PATH=/etc/apache2/sites-available/$APACHE_CONF_NAME.conf
cp /root/automate/apache-vhost.conf $APACHE_CONF_PATH
sed -i 's/DOMAIN_NAME/$DOMAIN_NAME/g' $APACHE_CONF_PATH
sed -i 's/DIRECTORY_HOST/$DIRECTORY_HOST/g' $APACHE_CONF_PATH
sed -i 's/LOG_ERROR_NAME/$LOG_ERROR_NAME/g' $APACHE_CONF_PATH
sed -i 's/LOG_CUSTOM_NAME/$LOG_CUSTOM_NAME/g' $APACHE_CONF_PATH
| true |
7ad57dd37fb5183ef1b82ed0cdd2c13391254c14 | Shell | JinsYin/ops | /shell/kubernetes/install-kubectl.sh | UTF-8 | 1,692 | 3.96875 | 4 | [] | no_license | #!/bin/bash
# Author: JinsYin <github.com/jinsyin>
set -e
K8S_VERSION="1.8.2"
fn::check_permission()
{
if [ $(id -u) -ne 0 ]; then
echo "You must run as root user or through the sudo command."
exit 1
fi
}
fn::command_exists()
{
command -v $@ > /dev/null 2>&1
}
fn::package_exists()
{
rpm -q $@ > /dev/null 2>&1
}
# Usage: fn::instasll_package wget net-tools
fn::install_package()
{
for package in $@; do
if ! fn::package_exists $package; then
yum install -y $package
fi
done
}
# kubectl kubefed
fn::install_k8s_client()
{
local version=v${1:-$K8S_VERSION}
local components=(kubectl kubefed)
fn::install_package wget
for component in ${components[@]}; do
if ! fn::command_exists ${component}; then
rm -rf /tmp/k8s-client* && mkdir -p /tmp/k8s-client
wget -O /tmp/k8s-client.tar.gz https://dl.k8s.io/${version}/kubernetes-client-linux-amd64.tar.gz
tar -xzf /tmp/k8s-client.tar.gz -C /tmp/k8s-client --strip-components=1
mv /tmp/k8s-client/client/bin/{kubectl,kubefed} /usr/bin/ && chmod a+x /usr/bin/
rm -rf /tmp/k8s-client*
fi
done
}
fn::enable_autocompletion()
{
mkdir -p /etc/bash_completion.d
echo "source <(kubectl completion bash)" > /etc/bash_completion.d/kubectl.bash
if [ -z "$(grep '^. /etc/bash_completion.d/kubectl.bash' /etc/bash_completion)" ]; then
echo ". /etc/bash_completion.d/kubectl.bash" >> /etc/bash_completion
fi
if [ -z "$(grep '^. /etc/bash_completion' ~/.bashrc)" ]; then
echo ". /etc/bash_completion" >> /etc/bash_completion
source ~/.bashrc
fi
}
main()
{
fn::check_permission
fn::install_k8s_client
fn::enable_autocompletion
}
main $@ | true |
32f96ba931b92481d2f63ee225e330e1fa028381 | Shell | dbkreling/commonFiles | /bash_aliases | UTF-8 | 4,082 | 3.140625 | 3 | [] | no_license | alias c='clear'
alias g='grep'
alias l='ls'
alias p='ping'
alias x='exit'
alias hmc-eio='ssh hscroot@hmc-eio.austin.ibm.com'
alias jupiter='ssh root@9.3.190.16'
alias katyFVT='ssh root@9.3.189.58'
function compile ()
{
cd /opt/ibm/ibm-sdk-lop/ibm-java-60/bin/;
./javac $1; # this has to be the full path of the .java file to be compiled.
cd -;
ls;
}
function pomodoro ()
{
minutes=${1:-25};
notify-send --icon=appointment 'Pomodoro' "Start! \n($minutes minutes)";
sleep $(($minutes * 60));
notify-send --icon=appointment-missed 'Pomodoro' "Stop. \n($minutes minutes)"
}
function rm-gsa ()
{
local force='no';
if [ "$1" = '-f' ]; then
force='yes';
shift;
fi;
local gsa_url="$1";
if [ -z "$gsa_url" ]; then
echo "[ Error ] What is the file/dir's URL?";
return 1;
fi;
local gsa_cell='';
local gsa_path='';
if echo $gsa_url | grep -E 'https?://'; then
gsa_cell=$(echo $gsa_url | sed 's/^https\?:\/\/\([^/]\+\).\+/\1/');
gsa_path=$(echo $gsa_url | sed 's/^https\?:\/\/[^/]\+\(.\+\)/\1/');
else
gsa_cell=${2:-'ausgsa.ibm.com'};
gsa_path=$1;
fi;
gsa_url="$gsa_cell:$gsa_path";
echo "Removing '$gsa_url'";
if [ "$force" != 'yes' ]; then
local confirm;
read -p "Go ahead? [y/N] " confirm;
if [ "$confirm" != "y" ]; then
echo "Aborted.";
return 2;
fi;
fi;
empty_dir=$(mktemp --directory --tmpdir gsa.empty_dir.XXX);
rsync -rvh --progress --delete --include="$(basename $gsa_url)**" --exclude='**' $empty_dir/ $(dirname $gsa_url);
rc=$?;
rm -rf $empty_folder;
return $rc
}
function s ()
{
local host="$1";
shift;
local args="$@";
echo $host | grep '@' 2>&1 > /dev/null || local user='root@';
local command="ssh ${user}${host} $args";
echo "Command: $command";
eval $command
}
function gsa ()
{
local gsa_path=${1:-'/projects/p/perfwklds/'};
local gsa_host=${2:-'ausgsa.ibm.com'};
local command="sftp $gsa_host:$gsa_path";
echo "Command: $command";
eval $command
}
function cl ()
{
local cd_args=${1:-'~'};
local ls_args=${2:-''};
eval "cd $cd_args";
eval "ls $ls_args"
}
function bso-auth ()
{
#local host="${1:-9.5.8.73}"; # begnap1.rch.stglabs.ibm.com - SDK machine
local host="${1:-9.3.191.8}"; #hmc-eio.austin.ibm.com
local ibm_mail="dbkreling@br.ibm.com";
local ibm_passwd="";
read -s -p "[BSO] IBM Intranet Password: " ibm_passwd;
echo;
wget --no-check-certificate https://"$host":443/ --post-data="au_pxytimetag=1396696820&uname=$ibm_mail&pwd=$ibm_passwd&ok=OK" -O - 2> /dev/null | sed -e 's:.*<H1>::g' -e 's:</H1>.*::g' -e 's:<[^>]*>:\n:g' | head -n 3
}
function jbb2012_scores() {
local gsa_url=$1
if [ -z "$gsa_url" ]; then
echo 'Error. No URL.'
return 1
fi
local gsa_cell=$(echo $gsa_url | sed 's/^https\?:\/\/\([^/]\+\).\+/\1/');
local gsa_path=$(echo $gsa_url | sed 's/^https\?:\/\/[^/]\+\(.\+\)/\1/');
local temp_file=$(mktemp --tmpdir jbb2012.scores.XXX)
scp $gsa_cell:$gsa_path/index.html $temp_file
local scores=$(sed 's|^ <td class="metricCell">max IR = \([0-9]\+\) ops/sec; critical IR = \([0-9]\+\) ops/sec</td>|SPECjbb2012_SCORES \1\t\2|' $temp_file | grep SPECjbb2012_SCORES)
echo -e "${scores#* }\t$gsa_url"
}
function blinkWeechat(){
sudo chmod 666 /sys/class/leds/tpacpi\:\:thinklight/brightness;
if [ $?=="0" ]; then
echo;
echo 'Sucess! Light will now blink on all incoming weechat messages.'
else
echo;
echo 'Invalid Password. Try again.'
fi
}
function monitor(){
echo "Turn on: 1 | Turn off: 2"
read option
if [ $option = 1 ];
then xrandr --output VGA1 --auto --left-of LVDS1
elif [ $option = 2 ];
then xrandr --output VGA1 --off
fi
}
function call(){
grep -i $1 /home/dbkreling/Documents/conferenceCallsPasscodes
}
function mi (){
grep -i -a10 $1 /home/dbkreling/Documents/Power_IO/Info/machines.info
}
function rm-host () {
ssh-keygen -f "/home/dbkreling/.ssh/known_hosts" -R $1
}
| true |
e8ccd84330a6701b10b692bfa0f8b007ceacad1d | Shell | hxlhxl/koa2-react-redux-webpack-boilerplate | /scripts/release.sh | UTF-8 | 237 | 2.65625 | 3 | [] | no_license | #!/bin/bash
#
appPath=`pwd`
scriptPath="${appPath}/scripts"
skip_npm=$1
if [ "$skip_npm" != "skip_npm" ];then
npm install
fi
cross-env NODE_ENV=production webpack --colors --display-error-details --config ${appPath}/webpack/prod.js | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.