blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
b0ad39f01fdf2debb8385bc8df64aabe0e6f5714 | Shell | cuijialang/vtr | /vtr_flow/tasks/run_all_modified.sh | UTF-8 | 281 | 3.015625 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"MIT-Modern-Variant"
] | permissive | # find all tasks with a config
tasks=$(find . -name "regression_*" -prune -o -name "config" -print)
for task in $tasks
do
striped="${task%/config}"
striped="${striped#./}"
echo "${striped}"
~/vtr-verilog-to-routing/vtr_flow/scripts/run_vtr_task.pl $striped
done
| true |
f1e5a9e1ad2d96777d33e106a0b89345e09a689b | Shell | msm1723/playground | /bash/445_posit-params3.sh | UTF-8 | 646 | 4 | 4 | [] | no_license | #!/usr/bin/env bash
## 445_posit-params3.sh
## posit-params3: script to demonstrate $* and $@
print_params () {
echo "\$1 = $1"; echo "\$2 = $2" # charecter ; helps to write two commands in one line
echo "\$3 = $3"
echo "\$4 = $4"
}
pass_params () {
echo -e "\n" '$* :'; print_params $*
echo -e "\n" '"$*" :'; print_params "$*"
echo -e "\n" '$@ :' # -e tels echo to interprit \ as special simble so new line appears before printing
print_params $@
echo -e "\n" '"$@" :'
print_params "$@" # "$@" most usefull because keeps parameter integrity
}
echo "pass_params \"word\" \"words with spaces\""
pass_params "word" "words with spaces"
| true |
513b3d0831f1d086938686e655683ad52904e8c0 | Shell | dynamy/ocp18 | /environment/new.sh | UTF-8 | 2,632 | 3.15625 | 3 | [] | no_license | #!/bin/bash
# move to the ec2-user directory and update yum
cd /home/ec2-user
sudo yum update -y
sudo yum install curl-devel expat-devel gettext-devel openssl-devel zlib-devel socat wget -y
# This downloads the OneAgent installer from your tenant
wget --no-check-certificate -O Dynatrace-OneAgent-Linux.sh "https://[YourDynatraceTenant]/api/v1/deployment/installer/agent/unix/default/latest?Api-Token=[YourToken]&arch=x86&flavor=default"
# Installs One Agent
sudo /bin/sh Dynatrace-OneAgent-Linux.sh APP_LOG_CONTENT_ACCESS=1
#Install and start docker
sudo tee /etc/yum.repos.d/docker.repo <<-EOF
[dockerrepo]
name=Docker Repository
baseurl=https://yum.dockerproject.org/repo/main/centos/7
enabled=1
gpgcheck=1
gpgkey=https://yum.dockerproject.org/gpg
EOF
sudo yum -y install docker-engine
sudo service docker start
# Install docker: prepare for OpenShift
sudo sed -i 's/ExecStart=\(.*\)/ExecStart=\1 --insecure-registry 172.30.0.0\/16/' /lib/systemd/system/docker.service
sudo systemctl daemon-reload
sudo systemctl restart docker
# Install oc CLI for OpenShift Origin
wget -q -O oc-linux.tar.gz https://github.com/openshift/origin/releases/download/v3.7.2/openshift-origin-client-tools-v3.7.2-282e43f-linux-64bit.tar.gz
tar xvzf oc-linux.tar.gz
mv openshift-origin-client-tools-v3.7.2-282e43f-linux-64bit/oc .
sudo chown root:root oc
sudo mv oc /usr/bin
sudo gpasswd -a ec2-user docker
#Clone + Enter repo
git clone https://github.com/dynamy/ocp18.git
mv /home/ec2-user/ocp18/start.sh /home/ec2-user/
chmod 755 /home/ec2-user/start.sh
export OS_PUBLIC_IP=`curl http://169.254.169.254/latest/meta-data/public-ipv4`
export OS_PUBLIC_HOSTNAME=`curl http://169.254.169.254/latest/meta-data/public-hostname`
export OS_PULL_DOCKER_IMAGES="true"
# SET env var
OS_PUBLIC_HOSTNAME="${OS_PUBLIC_HOSTNAME:-$OS_PUBLIC_IP}"
# Run OpenShift
cd /home/ec2-user
oc cluster up --public-hostname="${OS_PUBLIC_HOSTNAME}" --routing-suffix="${OS_PUBLIC_IP}.nip.io"
sleep 3
sudo cp /var/lib/origin/openshift.local.config/master/admin.kubeconfig /home/ec2-user/.kube/config
sudo chown "ec2-user:ec2-user" /home/ec2-user/.kube/config
# Add cluster-admin role to user admin
#oc login https://ec2-52-221-223-60.ap-southeast-1.compute.amazonaws.com:8443 -u system:admin
export loginserver=`echo "https://${OS_PUBLIC_HOSTNAME}:8443"`
oc login "${loginserver}" -u system:admin
oc adm policy add-cluster-role-to-user cluster-admin admin
# Add dynatrace as privileged user to the openshift-infra project
oc project openshift-infra
oc create serviceaccount dynatrace
oc adm policy add-scc-to-user privileged -z dynatrace
su - ec2-user -c "./start.sh"
| true |
62330360abb59013ca962237f9a491d1ff234e69 | Shell | 5h4thru/bash_programming | /getopts_shopt_fgkill.sh | UTF-8 | 1,923 | 3.890625 | 4 | [] | no_license | #!/bin/bash
#########################
# Getopts Command
#########################
echo -e "\n##### Getopts Command #####"
# getopts is a part of internal_commands.sh script
while getopts :dm option
do
case $option in
d) d_option=1;;
m) m_option=1;;
*) echo "Usage: -dm"
esac
done
day=`date | awk '{print $1 " " $3}'`
if [ ! -z $d_option ]
then
echo "Date is $day"
fi
month=`date | awk '{print $2}'`
if [ ! -z $m_option ]
then
echo "Month is $month"
fi
shift $(($OPTIND - 1)) # for some reason even if I comment this line the script works
#########################
# Shopt Type Jobs Disown
#########################
echo -e "\n##### Shopt Type Jobs Disown #####"
echo "'shopt' is used to set/unset show options"
echo "shopts if set can be used to consider typos too"
echo "'shopt -s cdspell' will consider typos in cd command"
echo -e "\n'type' command identifies a command and test whether it exists or not\nExample: type tar, type cd, etc..."
echo -e "\n'jobs' command belongs to group of commands that control a job"
echo -e "Example:\nsleep 5 &\njobs"
echo -e "\n'disown' is a shell builtin"
echo "disown command removes jobs from shell's active jobs table"
#########################
# Fg & Kill
#########################
echo -e "\n##### Fg & Kill #####"
echo "fg command brings a process to foreground"
echo -e "Example:\nsleep 1000 &\nsleep 1001 &\njobs\nfg 1\nThis brings the process 1 to foreground"
echo -e "\nWaiting for 5 seconds"
sleep 5 &
wait
times # tells how much time it took to execute a command
echo "Done"
echo -e "\n'kill' can be used to kill a process with pid\n'ps aux' will list the processes\nkill #number"
echo -e "\nBonus:If you name a function with an inubuilt command name, you can use 'command' command to call the bash builtin"
echo -e "Example:\nIf there is a function called ls, you can call the function by invoking 'ls' and can call the inbuilt ls by invoking 'command ls'"
| true |
7590c1c88bd7fad3e82732893574897057ac09be | Shell | rafaelrojasmiliani/kamilscripts | /bin/,todoist | UTF-8 | 6,900 | 3.53125 | 4 | [
"Beerware",
"MIT"
] | permissive | #!/bin/bash
set -euo pipefail
T_get_token() {
jq -r .token ~/.todoist.config.json
}
T_exec_nosynctoken() {
cmd=(
curl -sS
https://api.todoist.com/sync/v8/sync
-d token="$(T_get_token)"
-d "$@"
)
(
set -x
"${cmd[@]}"
)
}
T_exec() {
T_exec_nosynctoken sync_token="$(T_get_sync_token)" -d "$@"
}
g_all=""
g_cachefile="/tmp/todoist_cache.json"
T_all() {
if [[ -z "$g_all" ]]; then
if [[ ! -r "$g_cachefile" || -z "$(find "$g_cachefile" -mmin -60)" ]] || ! g_all=$(<"$g_cachefile"); then
g_all=$(T_exec_nosynctoken sync_token='*' -d 'resource_types=["all"]')
cat <<<"$g_all" >"$g_cachefile"
fi
fi
jq <<<"$g_all"
}
T_get_sync_token() {
T_all | jq -r .sync_token
}
T_get_labels() {
T_all | jq -r '.labels[] | [ .id, .name ] | @tsv'
}
T_get_projects() {
T_all | jq -r '.projects[] | [ .id, .name ] | @tsv'
}
tojs() {
echo "\"$1\":$2"
}
adduuid() {
tojs uuid "\"$(uuidgen)\""
}
T_add1() {
local args cmd content
args=$(getopt -n "T_add" -o L:P:p:h -- "$@")
eval set -- "$args"
cmd=""
while (($#)); do
case "$1" in
-L) cmd+=,$(tojs labels "[$2]"); shift; ;;
-P) cmd+=,$(tojs project_id "$2"); shift; ;;
-p) cmd+=,$(tojs priority "$2"); shift; ;;
-h) echo "no help"; exit; ;;
--) shift; break; ;;
*) L_fatal "Error parsing arguments"; ;;
esac
shift
done
content=$1
if [[ -z "$content" ]]; then
L_fatal "Content is empty"
fi
T_exec 'commands=[{"type": "item_add", "args": '"{\"content\": \"$content\" $cmd}, $(adduuid)}]"
}
notify() {
notify-send -i "$1" "todoist" "$2"
}
g_dryrun=""
if (($#)) && [[ "$1" = "-n" ]]; then
shift
g_dryrun="DRYRUN: "
run() { echo "DRYRUN:" "$@"; }
else
run() { "$@"; }
fi
looperrormsg() {
errormsg+="${errormsg:+$'\n'}$1"
again=true
}
T_dodaj_nowy_project() {
labels=$(T_get_labels)
projects=$(T_get_projects)
readonly labels projects
while "${again:-true}"; do
again=false
if (($#)) && [[ -n "$1" ]]; then
if [[ -n "${txt:-}" ]]; then exit 1; fi
txt=$1
shift
else
txt=$(
text="${errormsg:+$"Error"": $errormsg"$'\n\n'}"
text+=$"Add a new task to todoist with name?"$'\n\n'
text+=$"Available @labels"": $(cut -f2- <<<"$labels" | paste -sd' ')"$'\n\n'
text+=$"Available #projects"": $(cut -f2- <<<"$projects" | paste -sd ' ')"$'\n\n'
text=$(fmt -u -s -w 70 <<<"$text")$'\n'
zenity --entry \
--entry-text="${savetxt:-}" \
--title="todoist" \
--text="$text"
)
fi
savetxt="$txt"
errormsg=""
addlabels=""
addlabelsprint=()
addproject=""
addprojectprint=""
priority=""
if txtlabels=$(<<<"$txt" grep -Pwo "@\K[^[:space:]]*"); then
txtlabels=$(sort -u <<<"$txtlabels")
for i in $txtlabels; do
if ! tmp=$(fzf -q "$i" -1 -0 <<<"$labels"); then
looperrormsg $"The label could not be found"": $i"$'\n'$"Labels"": $(<<<"$labels" cut -f2- | paste -sd' ')"
break
fi
IFS=' ' read -r a b <<<"$tmp"
addlabelsprint+=("$b")
addlabels="${addlabels:+$addlabels,}$a"
done
fi
if txtprojects=$(<<<"$txt" grep -Pwo "#\K[^[:space:]]*"); then
for i in $txtprojects; do
if [[ -n "$addproject" ]]; then
looperrormsg $"Given more then one project"": $(paste -sd' ' <<<"$txtprojects")"
break
fi
if ! tmp=$(fzf -q "$i" -1 -0 <<<"$projects"); then
looperrormsg $"Project not found"": $i"$'\n'$"Projects"": $(<<<"$projects" cut -f2- | paste -sd' ')"
break
fi
IFS=' ' read -r addproject addprojectprint <<<"$tmp"
done
fi
if priorities=$(<<<"$txt" grep -Pwo "p\K[0-9]"); then
priority=$(tail -n1 <<<"$priorities")
if [ "$priority" -lt 1 ] || [ "$priority" -gt 4 ]; then
looperrormsg $"Invalid priority number, it has to be p1, p2, p3 or p4"": $priorities"
fi
fi
if [[ -n "$errormsg" ]]; then
notify dialog-error "$errormsg"
fi
if "$again"; then
continue
fi
for ((i=0;i<2;++i)); do
txt=$(sed 's/\(^\|[[:space:]]\)[#@][^[:space:]]*[[:space:]]*/\1/g' <<<"$txt")
done
mesg=$"task""."$'\n'
mesg+=$"With name"": $txt"
cmd=(T_add1)
if [[ -n "${addlabels:-}" ]]; then
mesg+=$'\n'$"To project"": ${addprojectprint[*]}"
cmd+=(-L "$addlabels")
fi
if [[ -n "${addproject:-}" ]]; then
mesg+=$'\n'$"With labels"": ${addlabelsprint[*]}"
cmd+=(-P "$addproject")
fi
if [[ -n "${priority:-}" ]]; then
mesg+=$'\n'$"With priority"": $priority"
cmd+=(-p "$priority")
fi
cmd+=("$txt")
if run "${cmd[@]}"; then
notify-send -i appointment-new "todoist" "${g_dryrun}"$"Added"": $mesg"
else
notify-send -i dialog-error "todoist" "${g_dryrun}"$"Problem with adding task"" $mesg"$'\n'$"Command"": ${cmd[*]}"
looperrormsg $"Problem with adding task"
fi
done
}
. ,lib_lib "T_" "$@"
exit
exit
: <<EOF
#### bash_autotranslate pl_PL
msgid ""
msgstr ""
"Project-Id-Version: PACKAGE VERSION\n"
"Last-Translator: Automatically generated\n"
"Language-Team: none\n"
"Language: pl\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Plural-Forms: nplurals=3; plural=(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n"
#: bin/,todoist_dodaj_nowe_zadanie.sh:39
msgid "Error"
msgstr "Błąd"
#: bin/,todoist_dodaj_nowe_zadanie.sh:38
msgid "Add a new task to todoist with name?"
msgstr "Dodaj nowe zadanie o nazwie?"
#: bin/,todoist_dodaj_nowe_zadanie.sh:38
msgid "Available @labels"
msgstr "Dostępne etykiety"
#: bin/,todoist_dodaj_nowe_zadanie.sh:38
msgid "Available #projects"
msgstr "Dostępne projekty"
#: bin/,todoist_dodaj_nowe_zadanie.sh:63
msgid "The label could not be found"
msgstr "Etykieta nie znaleziona"
#: bin/,todoist_dodaj_nowe_zadanie.sh:63
msgid "Labels"
msgstr "Etykiety"
#: bin/,todoist_dodaj_nowe_zadanie.sh:76
msgid "Given more then one project"
msgstr "Podano więcej niż jeden projekt"
#: bin/,todoist_dodaj_nowe_zadanie.sh:80
msgid "Project not found"
msgstr "Nie ma takiego projektu"
#: bin/,todoist_dodaj_nowe_zadanie.sh:80
msgid "Projects"
msgstr "Projekty"
#: bin/,todoist_dodaj_nowe_zadanie.sh:90
msgid "Invalid priority number, it has to be p1, p2, p3 or p4"
msgstr "Nieprawidłowy numer priorytetu, musi być p1, p2, p3 lub p4"
#: bin/,todoist_dodaj_nowe_zadanie.sh:105
msgid "task"
msgstr "zadanie"
#: bin/,todoist_dodaj_nowe_zadanie.sh:106
msgid "With name"
msgstr "O nazwie"
#: bin/,todoist_dodaj_nowe_zadanie.sh:110
msgid "To project"
msgstr "Do projektu"
#: bin/,todoist_dodaj_nowe_zadanie.sh:114
msgid "With labels"
msgstr "Z etykietą"
#: bin/,todoist_dodaj_nowe_zadanie.sh:118
msgid "With priority"
msgstr "Z priorytetem"
#: bin/,todoist_dodaj_nowe_zadanie.sh:125
msgid "Added"
msgstr "Dodano"
#: bin/,todoist_dodaj_nowe_zadanie.sh:127
#: bin/,todoist_dodaj_nowe_zadanie.sh:128
msgid "Problem with adding task"
msgstr "Problem z dodaniej zadania"
#: bin/,todoist_dodaj_nowe_zadanie.sh:127
msgid "Command"
msgstr "Polecenie"
#### bash_autotranslate END
EOF
| true |
004ecac067ccc96ca3483c6d67f096313f4012bd | Shell | superdaigo/morning-playlist | /alert_10_minutes.sh | UTF-8 | 514 | 3.5625 | 4 | [] | no_license | #!/bin/bash
STOP_AT=$(date -j -v "+1H" "+%H:%M")
if [ ! -z $1 ] && [[ $1 =~ ^[0-9]{1,2}:[0-9]{2}$ ]] ; then
STOP_AT="${1}"
if [ ${#1} -eq 4 ] ; then
STOP_AT="0${1}"
fi
fi
echo "Will stop at ${STOP_AT}"
while :
do
MS=$(date "+%M%S")
TIME=$(date "+%H:%M")
if [ ${MS:1:3} -eq "000" ] ; then
say -v Kyoko "${TIME} になりました" # Japanese
fi
sleep 1
if [ ${TIME} = ${STOP_AT} ] ; then
say -v Kyoko "指定の時刻を過ぎたので終了します" # Japanese
exit
fi
done
| true |
83f0c256a55ee32846d153637ff72bdd410bd878 | Shell | ProbablyNotArtyom/G-DOS | /test-archs.sh | UTF-8 | 522 | 3.34375 | 3 | [] | no_license | #!/bin/bash
local ARCH_OLD
local PLATFORM_OLD
if [[ -e .config ]]; then
source .config
ARCH_OLD=$ARCH
PLATFORM_OLD=$PLATFORM
fi
function build_arch {
./config.sh ARCH=$1 PLATFORM=$2
make clean
make
if [[ $? != "0" ]]; then
echo "[!!!] PLATFORM FAILED:"
echo "[!!!] $1/$2"
exit
fi
}
make -i distclean
build_arch m68k emu68k
build_arch arm versatilepb
build_arch ppc psim
if [[ ! -z $ARCH_OLD ]] || [[ ! -z $PLATFORM_OLD ]]; then
./config.sh ARCH=$ARCH_OLD PLATFORM=$PLATFORM_OLD
else
make distclean
fi
| true |
78d1864780c440065a4fe357a6f3b6c6fd3d7973 | Shell | dino-/scripts | /mdconv | UTF-8 | 1,210 | 4.125 | 4 | [] | no_license | #! /bin/bash
basename=$(basename "$0")
function usage {
cat <<USAGE
$basename - Convert Markdown to various formats using Pandoc
usage:
$basename OUTPUT_FORMAT MARKDOWN_DOCUMENT
OUTPUT_FORMAT is one of: html, pdf
The new file will be in the same directory and named just like the
Markdown file with the appropriate extension.
Note, on Arch Linux, creating PDFs this way required the installation
of texlive-core and texlive-bin
v1.1 2022-11-17 Dino Morelli <dino@ui3.info>
USAGE
}
# arg parsing
if [ $# -lt 2 ]
then
echo "ERROR: Missing required arguments"
usage
exit 1
fi
format="$1"
infile="$2"
switches=("--standalone")
while true ; do
case "$format" in
html) break;;
pdf) switches+=("--variable=geometry:margin=1in"); break;;
*) echo "ERROR: Unknown format"; usage; exit 1; break;;
esac
done
outfile="${infile%.*}.$format"
# Special 'from' handling for Literate Haskell source code with Markdown
[[ "${infile##*.}" == "lhs" ]] && switches+=("--from=markdown+lhs")
set -x
# These checks are for whitespace, globbing and quoting. We want the whitespace
# in this expanded array.
# shellcheck disable=SC2048 disable=SC2086
pandoc ${switches[*]} --output "$outfile" "$infile"
| true |
f9c541023b6f5f790a98fa46275b688f4e81af13 | Shell | Morgareth99/gists | /wtosd | UTF-8 | 1,313 | 3.53125 | 4 | [] | no_license | #!/bin/bash
# wtosd - Window Title On Screen Display
# Written 20140905 by Armin <netzverweigerer@github>
# This script was made for window manager setups that do not show a title bar
# for windows. It looks for the currently active window, looks up the window
# title via xprop, and then renders an OSD (on screen display) to show the
# window title of that window. It is best being used via a hotkey/keybinding.
# If you use i3, you can define one e.g. via:
# bindsym $mod+t exec /path/to/this/script
# font
# osd_font="-*-terminus-*-*-*-*-*-320-*-*-*-*-*-*"
# osd_font="-*-terminus-*-*-*-*-*-320-*-*-*-*-*-*"
osd_font="-*-terminus-bold-*-normal-*-*-320-*-*-*-*-*-*"
# window title name
title="$(xprop -id $(xprop -root _NET_ACTIVE_WINDOW | cut -d ' ' -f 5) WM_NAME | cut -d"=" -f 2- | cut -b 3- | rev | cut -b 2- | rev)"
# window class name
class="$(xprop -id $(xprop -root _NET_ACTIVE_WINDOW | cut -d ' ' -f 5) | grep -i ^wm_class | rev | cut -b 2- | cut -d"," -f 1 | rev | cut -b 3-)"
# if there is already an osd_cat running, kill it and wait 0.2s before
# displaying a new one (to avoid a flashing effect / assure visual smoothness)
pidof osd_cat && pkill -x osd_cat && sleep 0.2
# display osd_cat
echo -e "[$class]\n[$title]" | osd_cat -A center -f "$osd_font" -O 2 -u "#000" - -T "Window Title:" -c "#8f8" -o 20
| true |
d0ce9f4047c7f3f24ac9945a52abf4d5c37bcb13 | Shell | passy/ndocker-postfix-relay | /rootfs/init/run.sh | UTF-8 | 424 | 2.8125 | 3 | [] | no_license | #!/usr/bin/env sh
set -ex
# shellcheck disable=SC2154
if test "$NOMAD_PORT_smtp"; then
export SMTP_PORT="$NOMAD_PORT_smtp"
else
export SMTP_PORT="smtp"
fi
busybox syslogd -n -O /dev/stdout &
bbchild=$!
newaliases
postmap /etc/postfix/overlay/virtual
consul-template -config /etc/ndocker/postfix.hcl &
child=$!
trap 'kill $bbchild $child' INT TERM
trap 'newaliases && postmap /etc/postfix/virtual' USR1
wait $child
| true |
8626ba0da5d9f3fdcad97dcf325cb0d2fed8bad2 | Shell | pietaridaemonna/job_dsigner_core | /scripts/deployVM.txt | UTF-8 | 4,815 | 3.953125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
QEMU_IMG=/usr/bin/qemu-img
VIRT_INST=/usr/bin/virt-install
NAME=$1
# Change ISO and INST_PATH variable
ISO="debian-6.0.7-amd64-CD-1.iso"
ISODIR="/var/lib/libvirt/images"
INST_PATH="/var/lib/libvirt/images/vms"
# DONT CHANGE BELOW
if [[ ${EUID} != 0 ]]; then
printf "${USER}. You must be root\n"
exit 1
fi
get_isos()
{
files=(); while IFS= read -rd '' f; do files+=("$f"); done < <(find "$ISODIR" -name '*.iso' -print0)
echo -ne "There are ${#files[@]} installation media found"
echo
for index in "${!files[@]}"; do printf '[%s] %s\n' "$((index + 1))" "${files[index]}"; done
echo -ne "Which iso would you like to use? :\t "
echo -ne "To be implemented...."
echo
}
def_deployment(){
DEFNAME=customername
STORAGESIZE=30
DISKNAME=vdisk
RAM=2048
CORES=2
ETH0=virbr0
echo "The default settings are"
echo "#########################################"
echo
echo -en "Client name:\t\t $DEFNAME\n"
echo -en "Storage GB:\t\t $STORAGESIZE\n"
echo -en "CPU cores:\t\t $CORES\n"
echo
echo "#########################################"
echo -n "Would you like to use these settings [y/n]? "
read YesOrNo
YN=$YesOrNo
if [[ "$YN" = "y" ]] && [[ ! -d $INST_PATH/$DEFNAME ]]; then
mkdir -p $INST_PATH/$DEFNAME/
echo "Client directory is being created... $INST_PATH/$NAME "
sleep 2
echo "Creating image of cqow2 format"
sleep 2
$QEMU_IMG create -f qcow2 $INST_PATH/$DEFNAME/$DISKNAME.qcow2 $STORAGESIZE"G"
wait
virt-install --connect=qemu:///system \
--name=$DEFNAME \
--ram=$RAM \
--vcpus=$VCPUS \
--arch=x86_64 \
--os-type=linux \
--hvm \
--virt-type kvm \
--cdrom=$ISODIR/$ISO \
--disk path=$INST_PATH/$DEFNAME/$DISKNAME.cqow2,size=$STORAGESIZE,bus=virtio \
--network=bridge:$ETH0,model=virtio --vnc
fi
}
manual_deployment()
{
get_isos
echo
echo -ne "Client name:\t"
read clientname
CNAME=$clientname
if [[ -e $INST_PATH/$CNAME ]]; then
echo -ne "Client $NAME exists\n"
exit 1
fi
echo -ne "Node name:\t"
read nodename
NN=$nodename
echo -ne "Virtual CPU's:\t"
read virtualcpus
VCPUS=$virtualcpus
echo $VCPUS | grep "[^0-9]" > /dev/null 2>&1
if [[ "$?" -eq "0" ]]; then
echo "Numbers Only"
echo
exit 1
fi
echo -ne "Virtual RAM in MB:\t"
read virtualram
VRAM=$virtualram
echo $VCPUS | grep "[^0-9]" > /dev/null 2>&1
if [[ "$?" -eq "0" ]]; then
echo "Numbers Only"
echo
exit 1
fi
echo -ne "Virtual Disk space in GB:\t"
read virtualdisk
VDISK=$virtualdisk
echo $VCPUS | grep "[^0-9]" > /dev/null 2>&1
if [[ "$?" -eq "0" ]]; then
echo "Numbers Only"
echo
exit 1
fi
one_br_deployment()
{
if [[ ! -d $INST_PATH/$CNAME ]]; then
mkdir -p $INST_PATH/$CNAME
echo "Client directory is being created... $INST_PATH/$CNAME "
sleep 2
echo "Creating image of cqow2 format"
sleep 2
$QEMU_IMG create -f qcow2 $INST_PATH/$CNAME/$NN.qcow2 $VDISK"G"
sleep 5
virt-install --connect=qemu:///system \
--name=$NN \
--ram=$VRAM \
--vcpus=$VCPUS \
--arch=x86_64 \
--os-type=linux \
--hvm \
--virt-type kvm \
--cdrom=$ISODIR/$ISO \
--network=bridge:$brname,model=virtio \
--disk path=$INST_PATH/$CNAME/$NN.cqow2,size=$VDISK,bus=virtio \
--vnc
fi
}
two_br_deployment()
{
if [[ ! -d $INST_PATH/$CNAME ]]; then
mkdir -p $INST_PATH/$CNAME
echo "Client directory is being created... $INST_PATH/$CNAME "
sleep 2
echo "Creating image of cqow2 format"
sleep 2
$QEMU_IMG create -f qcow2 $INST_PATH/$CNAME/$NN.qcow2 $VDISK"G"
sleep 5
virt-install --connect=qemu:///system \
--name=$NN \
--ram=$VRAM \
--vcpus=$VCPUS \
--arch=x86_64 \
--os-type=linux \
--hvm \
--virt-type kvm \
--cdrom=$ISODIR/$ISO \
--network=bridge:$br1name,model=virtio --network=bridge:$br2name,model=virtio \
--disk path=$INST_PATH/$CNAME/$NN.cqow2,size=$VDISK,bus=virtio \
--vnc
fi
}
availbridges=$(ifconfig | awk '/br/ {printf("[ %s%s ]", NR==1?"":" ", $1)} END {print ""}')
echo -ne "How many bridges to add:\t"
read bridge
BR=$bridge
echo $BR | grep "[^0-9]" > /dev/null 2>&1
if [[ $BR == "1" ]]; then
echo -ne "Available bridges are:\t $availbridges"
echo
echo -ne "Enter Bridge name:\t "
read brname
if [[ ! -z $brname ]]; then
one_br_deployment
fi
elif [[ $BR == "2" ]]; then
echo -ne "Available bridges are:\t $availbridges"
echo
echo -ne "Enter first bridg name:\t "
read br1name
echo -ne "Enter second bridge name:\t "
read br2name
if [[ ! -z $br1name ]] && [[ ! -z $br2name ]]; then
two_br_deployment
fi
elif [[ "$?" -eq "0" ]]; then
echo "Numbers and Letters only"
echo
exit 1
fi
}
# DONT CHANGE DOWN HERE
if [[ $# -lt 1 ]]; then
echo "Usage: ./`basename $0` [default | manual]"
exit 1
fi
if [[ $1 = "default" ]]; then
def_deployment
elif [[ $1 = "manual" ]]; then
manual_deployment
fi
| true |
69e7dcaa8bc476f9ff430ab6eb058029b93513ce | Shell | vinaymk009/aws-material | /arguments_ex1.sh | UTF-8 | 145 | 2.609375 | 3 | [] | no_license | #!bin/bash
#Program with arguments in functions
function arguments_ex(){
echo "Hello, $1 location at $2"
}
arguments_ex KnowledgeIT Ameerpet
| true |
37cbfc324c0454cf78c3c5bf1ebd75bf41c87d1e | Shell | nfschina/nfs-antivirus | /nfs-antivirus-0.1/src/bin/detect_usb/autorunDetectUsb | UTF-8 | 1,518 | 3.59375 | 4 | [] | no_license | #!/bin/sh
### BEGIN INIT INFO
# Provides: detect_usb
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# X-Start-Before: kdm gdm3 xdm lightdm
# X-Stop-After: kdm gdm3 xdm lightdm
# Default-Start: 2 3 4 5
# Default-Stop:
# Short-Description: Start the Advanced Configuration and Power Interface daemon
# Description: Provide a socket for X11, hald and others to multiplex
# kernel ACPI events.
### END INIT INFO
set -e
DETECT_USB="/usr/bin/detect_usb"
# Check for daemon presence
[ -x "$DETECT_USB" ] || exit 0
OPTIONS=""
MODULES=""
# Include detect_usb defaults if available
[ -r "$DEFAULTS" ] && . "$DEFAULTS"
# Get lsb functions
. /lib/lsb/init-functions
case "$1" in
start)
log_daemon_msg "Starting detect_usb services" "detect_usb"
start-stop-daemon --start --quiet --oknodo --exec "$DETECT_USB" -- $OPTIONS
log_end_msg $?
;;
stop)
log_daemon_msg "Stopping detect_usb services" "detect_usb"
start-stop-daemon --stop --quiet --oknodo --retry 2 --exec "$DETECT_USB"
log_end_msg $?
;;
restart)
$0 stop
sleep 1
$0 start
;;
reload|force-reload)
log_daemon_msg "Reloading detect_usb services" "usb"
start-stop-daemon --stop --signal 1 --exec "$DETECT_USB"
log_end_msg $?
;;
status)
status_of_proc "$DETECT_USB" detect_usb
;;
*)
log_success_msg "Usage: /etc/init.d/detect_usb {start|stop|restart|reload|force-reload|status}"
exit 1
esac
| true |
7fc36d2582f0a3241c1181116419d975cc6cb46f | Shell | Smithx10/nyble | /drivers/i40e/install | UTF-8 | 723 | 2.828125 | 3 | [] | no_license | #!/bin/bash
VER=2.4.6
if [ -e "/etc/debian_version" ]; then
export KV=`dpkg -l | grep linux-image- | grep -v meta | perl -lane 'print $F[1] =~ /linux-image-(.*)/'`
export KSV=`echo ${KV} | perl -lane 'printf $F[0] =~ /^(\d+\.\d+)/'`
export KP=/usr/src/linux-source-${KSV}
fi
if [ -e "/etc/redhat-release" ]; then
export KV=`rpm -qa | grep kernel | grep -v devel | grep -v headers | grep -v tools | perl -pe 's/kernel-(.*?)-1.x86_64/$1/'`
export KSV=`echo ${KV} | perl -lane 'printf $F[0] =~ /^(\d+\.\d+)/'`
export KP=/usr/src/kernels/${KV}
fi
echo KSV = ${KSV}
echo KP = ${KP}
echo KV = ${KV}
pushd .
cd /root
tar -zxvf i40e-$VER.tar.gz
cd i40e-$VER/src
make KSRC=${KP} install
cd ../..
rm -rf i40e* install
popd
| true |
9284e43a1110d068302a47964717c717d014258b | Shell | kilnamkim/settings | /scripts/vim_local_install.sh | UTF-8 | 770 | 3.90625 | 4 | [] | no_license | #!/bin/bash
# Script for installing vim on systems where you don't have root access.
# vim will be installed in $HOME/opt/vim.
# exit on error
set -e
VERSION=8.2.0430
# create our directories
mkdir -p $HOME/vim_tmp $HOME/opt
cd $HOME/vim_tmp
# download source files for vim
REL=https://github.com/vim/vim/archive
FILENAME=v${VERSION}
curl -LO ${REL}/${FILENAME}.tar.gz
# extract files, configure, and compile
tar xvzf ${FILENAME}.tar.gz
DIRNAME=vim-${VERSION}
cd ${DIRNAME}
# configure & build
make configure
./configure --prefix=$HOME/opt/$DIRNAME-bin && make all
make install
cd ..
# link
cd $HOME/opt
ln -s $DIRNAME-bin vim
# cleanup
rm -rf $HOME/vim_tmp
echo "$HOME/opt/vim/bin/vim is now available. You can optionally add $HOME/opt/vim/bin to your PATH."
| true |
2208ccd68ab2ca414d061fe1922584082c734a09 | Shell | CardiffMathematicsCodeClub/CardiffMathematicsCodeClub.github.io | /script/bump-version.sh | UTF-8 | 787 | 3.890625 | 4 | [] | no_license | #!/bin/bash
# Get the latest tags
git fetch upstream --tags
# Get the latest version in the form x.y
ver=$(git tag | tail -n 1 | sed 's/v//')
# Get the x part of the version
major_ver=$(echo $ver | sed 's/\.[0-9]*//')
# Get the y part of the version
minor_ver=$(echo $ver | sed 's/[0-9]*\.//')
# Calculate the number of commits since the last release for the patch version (z)
patch_ver=$[ $(git log $(git tag | tail -n 1)..HEAD --pretty=oneline | wc -l) + 1]
# Put it all together
version="${major_ver}.${minor_ver}.${patch_ver}"
# Tell the user
echo "Bumped to version v$version"
# Finally find and replace in the _config.yml file
sed -i -- "s/\(version: \)[0-9]*\.[0-9]*\.[0-9]*/\1${version}/" _config.yml
# One last thing, add the changes to the commit
git add _config.yml
| true |
2ae67df6a5c181ba2c8fb16b95c7718f11e6cbcc | Shell | dyno/bin.dyno | /apt-get-download-only.sh | UTF-8 | 203 | 2.921875 | 3 | [] | no_license | #!/bin/sh
#this will download the installed package deb file
#then use dbkg -i --force-<action> $1 force reinstall the package
if [ $# == 1 ]
then
echo sudo apt-get --download-only --reinstall $1
fi
| true |
f05db9ba62e1b9158981624f126cf6a02c76c862 | Shell | lisuke/repo | /archlinuxcn/git-subrepo-git/PKGBUILD | UTF-8 | 791 | 3.046875 | 3 | [] | no_license | # Maintainer: James An <james@jamesan.ca>
pkgname=git-subrepo-git
_pkgname=${pkgname%-git}
epoch=1
pkgver=0.4.6.r0.g110b9eb
pkgrel=1
pkgdesc="Git command is an improvement from git-submodule and git-subtree."
arch=('any')
url="https://github.com/ingydotnet/$_pkgname"
license=('GPL')
depends=('git')
provides=("$_pkgname=$pkgver")
conflicts=("$_pkgname")
source=("$_pkgname"::"git+$url.git")
md5sums=('SKIP')
pkgver() {
cd "$_pkgname"
(
set -o pipefail
git describe --long --tag | sed -r 's/([^-]*-g)/r\1/;s/-/./g' ||
printf "r%s.%s" "$(git rev-list --count HEAD)" "$(git rev-parse --short HEAD)"
)
}
package() {
cd "$_pkgname"
make DESTDIR="$pkgdir" PREFIX=/usr install
install -Dm644 share/zsh-completion/_git-subrepo "$pkgdir/usr/share/zsh/site-functions/_git-subrepo"
}
| true |
23b3c27fb9ef0dfbf6e5d02d2485f12efdfb1850 | Shell | joeking11829/Ubuntu-CB5-311-TK1 | /install-cuda.sh | UTF-8 | 3,354 | 2.59375 | 3 | [] | no_license | cd && clear
echo "--------------------------------------------------------"
echo " Script to re-enable cuda capabilities "
echo " on Acer CB5-311 Chromebooks "
echo "--------------------------------------------------------"
echo
#echo "install tools"
#sudo apt-get -y install cgpt vboot-kernel-utils device-tree-compiler build-essential u-boot-tools ncurses-dev mpi-default-dev mpi-default-bin
cd $HOME/Downloads
#echo
#echo "fetch sources"
#wget http://commondatastorage.googleapis.com/chromeos-localmirror/distfiles/xhci-firmware-2014.10.10.00.00.tbz2
#wget https://chromium.googlesource.com/chromiumos/third_party/kernel/+archive/release-R41-6680.B-chromeos-3.10.tar.gz
# The CUDA repo to use when we are done:
#wget http://developer.download.nvidia.com/embedded/L4T/r21_Release_v3.0/cuda-repo-l4t-r21.3-6-5-prod_6.5-42_armhf.deb
echo
ls -l xhci-firmware*tbz2 release-R41-6680.B*tar.gz
mkdir -p $HOME/src/linux
cd $HOME/src/linux
echo
echo "copy firmware"
sudo tar xf $HOME/Downloads/xhci-firmware-2014.10.10.00.00.tbz2 -C /
echo
echo "extract kernel"
tar -xf $HOME/Downloads/release-R41-6680.B-chromeos-3.10.tar.gz
echo
ls
echo
echo "configure"
./chromeos/scripts/prepareconfig chromeos-tegra
./scripts/config --set-val CONFIG_EXTRA_FIRMWARE \"nvidia/tegra124/xusb.bin\"
./scripts/config --set-val CONFIG_EXTRA_FIRMWARE_DIR \"/lib/firmware\"
./scripts/config -d CONFIG_CC_STACKPROTECTOR
./scripts/config -d CONFIG_SECURITY_CHROMIUMOS
WIFIVERSION=-3.8 make oldnoconfig
cat ./.config|grep CONFIG_EXTRA_FIRMWARE
echo
WIFIVERSION=-3.8 make -j4 zImage
WIFIVERSION=-3.8 make -j4 modules
WIFIVERSION=-3.8 make tegra124-nyan-big.dtb
sudo WIFIVERSION=-3.8 make INSTALL_PATH=/boot INSTALL_MOD_PATH=/ firmware_install modules_install
cat << __EOF__ > arch/arm/boot/kernel.its
/dts-v1/;
/ {
description = "ChromeOS kernel image with one or more FDT-blobs.";
images {
kernel@1{
description = "kernel";
data = /incbin/("zImage");
type = "kernel_noload";
arch = "arm";
os = "linux";
compression = "none";
load = <0>;
entry = <0>;
};
fdt@1{
description = "tegra124-nyan-big.dtb";
data = /incbin/("dts/tegra124-nyan-big.dtb");
type = "flat_dt";
arch = "arm";
compression = "none";
hash@1 {
algo = "sha1";
};
};
};
configurations {
default = "conf@1";
conf@1 {
kernel = "kernel@1";
fdt = "fdt@1";
};
};
};
__EOF__
mkimage -f arch/arm/boot/kernel.its vmlinux.uimg
echo "console=tty1 debug verbose root=/dev/mmcblk0p7 rootfstype=ext4 rootwait rw lsm.module_locking=0" > kernel-config
vbutil_kernel \
--version 1 \
--arch arm \
--keyblock /usr/share/vboot/devkeys/kernel.keyblock \
--signprivate /usr/share/vboot/devkeys/kernel_data_key.vbprivk \
--vmlinuz vmlinux.uimg \
--pack chromeos-R41-6680.B.kpart \
--config kernel-config
echo
echo "--------------------------------------------------------"
echo " We are done. Install kernel now? - Then do:"
echo
echo " cd $HOME/src/linux "
echo " sudo dd if=chromeos-R41-6680.B.kpart of=/dev/mmcblk0p6"
echo " sudo cgpt add -i 6 -P 5 -T 1 /dev/mmcblk0 "
echo
echo " ... and reboot. "
| true |
4914a3c220485ccd2f4effc090759bac6f362a57 | Shell | pnnl/mass2 | /test/flow/spur-dike-1/mkgrid.sh | UTF-8 | 1,092 | 3.046875 | 3 | [] | no_license | #! /bin/sh
# -------------------------------------------------------------
# file: mkgrid.sh
# -------------------------------------------------------------
# -------------------------------------------------------------
# Battelle Memorial Institute
# Pacific Northwest Laboratory
# -------------------------------------------------------------
# -------------------------------------------------------------
# Created October 8, 2003 by William A. Perkins
# -------------------------------------------------------------
# dimensions in meters
L=5.4
W=0.9
nx=217
ny=37
dx=`echo scale=6\;$L/\($nx - 1\) | bc`
dy=`echo scale=6\;$W/\($ny - 1\) | bc`
../../../util/cart_grid/cartgrid <<EOF
$dx ! longitudinal spacing
$dy ! lateral spacing
$nx ! downstream nodes
$ny ! cross stream nodes
0.0 ! starting x coordinate
0.0 ! starting y coordinate
5.9014e-05 ! slope
0.0 ! downstream elevation
EOF
awk -f - grid.out > grid.dat <<EOF
NF == 5 {
x = \$3/0.3048;
y = \$4/0.3048;
z = \$5/0.3048;
printf("%5d %5d %12.6g %12.6g %12.6g\n", \$1, \$2, x, y, z);
next;
}
{ print; }
EOF | true |
064d7e1b4ce6d5e92d0049880e1bddf2a5d0398e | Shell | mtransitapps/commons | /shared/build.sh | UTF-8 | 5,886 | 3.6875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
source commons/commons.sh;
echo "================================================================================";
echo "> BUILD ALL...";
echo "--------------------------------------------------------------------------------";
BEFORE_DATE=$(date +%D-%X);
BEFORE_DATE_SEC=$(date +%s);
CURRENT_PATH=$(pwd);
CURRENT_DIRECTORY=$(basename ${CURRENT_PATH});
AGENCY_ID=$(basename -s -gradle ${CURRENT_DIRECTORY});
setGitBranch
CONFIRM=false;
setIsCI;
setGradleArgs;
declare -a EXCLUDE=(".git" "test" "build" "gen" "gradle");
echo "> CLEANING FOR '$AGENCY_ID'...";
for d in ${PWD}/* ; do
DIRECTORY=$(basename ${d});
if ! [[ -d "$d" ]]; then
echo "> Skip GIT cleaning (not a directory) '$DIRECTORY'.";
echo "--------------------------------------------------------------------------------";
continue;
fi
if contains ${DIRECTORY} ${EXCLUDE[@]}; then
echo "> Skip GIT cleaning in excluded directory '$DIRECTORY'.";
echo "--------------------------------------------------------------------------------";
continue;
fi
if [[ -d "$d" ]]; then
cd ${d} || exit;
echo "> GIT cleaning in '$DIRECTORY'...";
GIT_REV_PARSE_HEAD=$(git rev-parse HEAD);
GIT_REV_PARSE_REMOTE_BRANCH=$(git rev-parse origin/${GIT_BRANCH});
if [[ "$GIT_REV_PARSE_HEAD" != "$GIT_REV_PARSE_REMOTE_BRANCH" ]]; then
echo "> GIT repo outdated in '$DIRECTORY' (local:$GIT_REV_PARSE_HEAD|origin/$GIT_BRANCH:$GIT_REV_PARSE_REMOTE_BRANCH).";
exit 1;
else
echo "> GIT repo up-to-date in '$DIRECTORY' (local:$GIT_REV_PARSE_HEAD|origin/$GIT_BRANCH:$GIT_REV_PARSE_REMOTE_BRANCH).";
fi
git checkout ${GIT_BRANCH};
checkResult $? ${CONFIRM};
echo "> GIT cleaning in '$DIRECTORY'... DONE";
cd ..;
echo "--------------------------------------------------------------------------------";
fi
done
echo "--------------------------------------------------------------------------------";
echo "GRADLE VERSION:";
./gradlew --version ${GRADLE_ARGS};
echo "--------------------------------------------------------------------------------";
echo "--------------------------------------------------------------------------------";
echo "JAVA VERSION:";
java -version;
echo "--------------------------------------------------------------------------------";
echo "--------------------------------------------------------------------------------";
echo "CURL VERSION:";
curl --version;
echo "--------------------------------------------------------------------------------";
echo "--------------------------------------------------------------------------------";
echo "OPENSSL VERSION:";
openssl version;
echo "--------------------------------------------------------------------------------";
echo "--------------------------------------------------------------------------------";
echo "AWK VERSION:";
awk --version;
echo "--------------------------------------------------------------------------------";
echo "--------------------------------------------------------------------------------";
echo "GAWK VERSION:";
gawk --version;
echo "--------------------------------------------------------------------------------";
echo "--------------------------------------------------------------------------------";
echo "MAWK VERSION:";
mawk --version;
echo "--------------------------------------------------------------------------------";
echo "--------------------------------------------------------------------------------";
echo "SPLIT VERSION:";
split --version;
echo "--------------------------------------------------------------------------------";
if [[ -d "agency-parser" ]] && [[ $GIT_BRANCH != "master" ]]; then
echo "> CLEANING FOR '$AGENCY_ID' (GRADLE BUILD)...";
./gradlew :commons-java:clean ${GRADLE_ARGS};
checkResult $? ${CONFIRM};
./gradlew :parser:clean ${GRADLE_ARGS};
checkResult $? ${CONFIRM};
./gradlew :agency-parser:clean ${GRADLE_ARGS};
checkResult $? ${CONFIRM};
echo "> CLEANING FOR '$AGENCY_ID' (GRADLE BUILD)... DONE";
echo "> DOWNLOADING DATA FOR '$AGENCY_ID'...";
cd agency-parser || exit; # >>
./download.sh;
checkResult $? ${CONFIRM};
../commons/gtfs/gtfs-validator.sh "input/gtfs.zip" "output/current";
# checkResult $?; # too many errors for now
if [[ -e "$FILE_PATH/input_url_next" ]]; then
../commons/gtfs/gtfs-validator.sh "input/gtfs_next.zip" "output/next";
# checkResult $?; # too many errors for now
fi
./unzip_gtfs.sh;
checkResult $? ${CONFIRM};
echo "> DOWNLOADING DATA FOR '$AGENCY_ID'... DONE";
echo "> BUILDING FOR '$AGENCY_ID' (GRADLE BUILD)... ";
../gradlew :commons-java:build ${GRADLE_ARGS}; #includes test
checkResult $? ${CONFIRM};
../gradlew :parser:build ${GRADLE_ARGS}; #includes test
checkResult $? ${CONFIRM};
echo "> BUILDING FOR '$AGENCY_ID' (GRADLE BUILD)... DONE";
echo "> PARSING DATA FOR '$AGENCY_ID'...";
# CURRENT...
../gradlew :agency-parser:build ${GRADLE_ARGS};
checkResult $? ${CONFIRM};
./parse_current.sh;
checkResult $? ${CONFIRM};
# CURRENT... DONE
# NEXT...
../gradlew :agency-parser:build ${GRADLE_ARGS};
checkResult $? ${CONFIRM};
./parse_next.sh;
checkResult $? ${CONFIRM};
# NEXT... DONE
./list_change.sh;
checkResult $? ${CONFIRM};
cd ..; # <<
echo "> PARSING DATA FOR '$AGENCY_ID'... DONE";
else
echo "> SKIP PARSING FOR '$AGENCY_ID' (branch:$GIT_BRANCH).";
fi
echo "> BUILDING ANDROID APP FOR '$AGENCY_ID'...";
cd app-android || exit;
./build.sh
checkResult $? ${CONFIRM};
cd ..;
echo "> BUILDING ANDROID APP FOR '$AGENCY_ID'... DONE";
echo "--------------------------------------------------------------------------------";
AFTER_DATE=$(date +%D-%X);
AFTER_DATE_SEC=$(date +%s);
DURATION_SEC=$(($AFTER_DATE_SEC-$BEFORE_DATE_SEC));
echo "> $DURATION_SEC secs FROM $BEFORE_DATE TO $AFTER_DATE";
echo "> BUILD ALL... DONE";
echo "================================================================================";
| true |
ed414e81311b6c56fed2857d2bce3656a18f4bad | Shell | tuian/Hybrid-Darknet-Concept | /hp-installer-active.sh | UTF-8 | 11,092 | 2.765625 | 3 | [
"BSD-2-Clause"
] | permissive | ####################################################
#### ####
#### Ubuntu 14.04LTS Honeypot Install Script ####
#### Updated for 14.04 by Zane Witherspoon & ####
#### tuna@people.ops-trust.net ####
#### ####
####################################################
###### Variables ######
INTERFACE='em1'
RABBIT_USER='amp'
RABBIT_PASSWORD='password'
RABBIT_HOST='1.1.1.1'
###### Get Things ######
apt-get update
apt-get -y install gcc build-essential bind9 dnsutils cmake make gcc g++ flex bison gcc
apt-get -y install libpcap-dev libgeoip-dev libssl-dev python-dev zlib1g-dev libmagic-dev
apt-get -y install hping3 vim ntp xinetd curl default-jre git ruby swig2.0 ruby-dev
mkdir build
cd build
###### setup hostname ######
OLDHOSTNAME=`cat /etc/hostname`
echo honey`/sbin/ifconfig $INTERFACE | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'`> /etc/hostname
NEWHOSTNAME=`cat /etc/hostname`
hostname $NEWHOSTNAME
cat /etc/hosts | sed s/$OLDHOSTNAME/$NEWHOSTNAME/g > /tmp/hosts
mv -f /tmp/hosts /etc/hosts
echo -e "127.0.0.1\t$NEWHOSTNAME" >> /etc/hosts
###### Install NTP ######
mkdir build && cd build
wget http://www.eecis.udel.edu/~ntp/ntp_spool/ntp4/ntp-4.2/ntp-4.2.6p4.tar.gz
tar zxvf ntp-4.2.6p4.tar.gz
cd ntp-4.2.6p4/
./configure --enable-clockctl
make
make install
cd ..
echo "driftfile /var/lib/ntp/ntp.drift" > /etc/ntp.conf
echo "e30003fa000100000001000000000000000000000000000000000000000000000000000000000000d6a42558d961df90" > /root/ntp.bin
/usr/local/bin/ntpd -u ntp:ntp -p /var/run/ntpd.pid -g
echo '#!/usr/bin/env bash
killall ntpd
/usr/local/bin/ntpd -u ntp:ntp -p /var/run/ntpd.pid -g
killall sshpot
service sshpot restart
killall hp-ssdp
/root/build/ssdp/hp-ssdp &
killall hp-heartbeat
/root/build/heartbeat/hp-heartbeat &
killall hp-mdns
/root/build/mdns/hp-mdns &
killall hp-nat-pmp
/root/build/nat-pmp/hp-nat-pmp &
killall hp-quake
/root/build/quake_emu/hp-quake &
killall hp-ripv1
/root/build/ripv1/hp-ripv1 &
killall hp-sentinal
/root/build/sentinel/hp-sentinal &
/usr/sbin/hping3 --rand-source -c 600 --udp -p 123 --fast -n 127.0.0.1 -d 48 -E /root/ntp.bin' > /root/killntp.sh
chmod +x /root/killntp.sh
hping3 --rand-source -c 600 --udp -p 123 --fast -n 127.0.0.1 -d 48 -E /root/ntp.bin
###### Install CHARGEN ######
echo 'service chargen
{
disable = yes
type = INTERNAL
id = chargen-stream
socket_type = stream
protocol = tcp
user = root
wait = no
}
# This is the udp version.
service chargen
{
disable = no
type = INTERNAL
id = chargen-dgram
socket_type = dgram
protocol = udp
user = root
wait = yes
}' > /etc/xinetd.d/chargen
service xinetd restart
###### Install SSDP Service Emulator ######
cd /root/build/
mkdir ssdp
cd /root/build/ssdp/
wget https://github.com/kingtuna/go-emulators/releases/download/0.09/hp-ssdp
chmod +x hp-ssdp
###### Install heartbeat_emulator #######
if [ ! -d /root/build/heartbeat ]; then
mkdir /root/build/heartbeat
fi
cd /root/build/heartbeat
wget https://github.com/kingtuna/go-emulators/releases/download/0.09/hp-heartbeat
chmod +x hp-heartbeat
###### Install mdns Service Emulator #######
if [ ! -d /root/build/mdns ]; then
mkdir /root/build/mdns
fi
cd /root/build/mdns
wget https://github.com/kingtuna/go-emulators/releases/download/0.09/hp-mdns
chmod +x hp-mdns
##### Install nat-pmp Service Emulator #####
if [ ! -d /root/build/nat-pmp ]; then
mkdir /root/build/nat-pmp
fi
cd /root/build/nat-pmp/
wget https://github.com/kingtuna/go-emulators/releases/download/0.09/hp-nat-pmp
chmod +x hp-nat-pmp
##### Install quake Service Emulator *****
if [ ! -d /root/build/quake_emu ]; then
mkdir /root/build/quake_emu
fi
cd /root/build/quake_emu
wget https://github.com/kingtuna/go-emulators/releases/download/0.09/hp-quake
chmod +x hp-quake
##### Install ripv1 Service Emulator *****
if [ ! -d /root/build/ripv1 ]; then
mkdir /root/build/ripv1
fi
cd /root/build/ripv1
wget https://github.com/kingtuna/go-emulators/releases/download/0.09/hp-ripv1
chmod +x hp-ripv1
###### Install sentinel Service Emulator ######
cd /root/build/
mkdir sentinel
cd /root/build/sentinel/
wget https://github.com/kingtuna/go-emulators/releases/download/0.09/hp-sentinal
chmod +x hp-sentinal
###### Install Recursive DNS ######
apt-get install bind9 dnsutils -y
echo 'include "/etc/bind/named.conf.options";
include "/etc/bind/named.conf.local";
include "/etc/bind/rndc.key";
# make it comment
# include "/etc/bind/named.conf.default-zones";
# add
include "/etc/bind/named.conf.internal-zones";
include "/etc/bind/named.conf.external-zones";' > /etc/bind/named.conf
echo '# define for internal section
view "internal" {
match-clients {
localhost;
10.0.0.0/24;
};
zone "." {
type hint;
file "db.root";
};
# set zone for internal
zone "server.world" {
type master;
file "server.world.lan";
allow-update { none; };
};
# set zone for internal *note
zone "0.0.10.in-addr.arpa" {
type master;
file "0.0.10.db";
allow-update { none; };
};
zone "localhost" {
type master;
file "db.local";
};
zone "127.in-addr.arpa" {
type master;
file "db.127";
};
zone "0.in-addr.arpa" {
type master;
file "db.0";
};
zone "255.in-addr.arpa" {
type master;
file "db.255";
};
};' > /etc/bind/named.conf.internal-zones
echo '# define for external section
view "external" {
match-clients { any; };
# allo any query
allow-query { any; };
# prohibit recursion
recursion yes;
# set zone for external
zone "server.world" {
type master;
file "server.world.wan";
allow-update { none; };
};
# set zone for external *note
zone "80.0.16.172.in-addr.arpa" {
type master;
file "80.0.16.172.db";
allow-update { none; };
};
};' > /etc/bind/named.conf.external-zones
rndc-confgen | head -n5 > /etc/bind/rndc.key
echo 'options {
# change
directory "/etc/bind";
# query range you allow
version "eatdix";
allow-query {any;};
# the range to transfer zone files
allow-transfer {any;};
# recursion range you allow
allow-recursion {any;};
dnssec-validation auto;
auth-nxdomain no;
};' > /etc/bind/named.conf.options
service bind9 restart
###### Install Bro ######
#
# apt-get install cmake make gcc g++ flex bison libpcap-dev
# libgeoip-dev libssl-dev python-dev zlib1g-dev libmagic-dev swig2.0 -y
#
# Javier this breaks a lot if bro doesn't get installed this is probabbly why
cd /root/build/
wget http://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz
wget http://geolite.maxmind.com/download/geoip/database/GeoLiteCityv6-beta/GeoLiteCityv6.dat.gz
gunzip GeoLiteCity.dat.gz
gunzip GeoLiteCityv6.dat.gz
mv GeoLiteCity.dat /usr/share/GeoIP/GeoLiteCity.dat
mv GeoLiteCityv6.dat /usr/share/GeoIP/GeoLiteCityv6.dat
ln -s /usr/share/GeoIP/GeoLiteCity.dat /usr/share/GeoIP/GeoIPCity.dat
ln -s /usr/share/GeoIP/GeoLiteCityv6.dat /usr/share/GeoIP/GeoIPCityv6.dat
wget `curl https://www.bro.org/download/index.html | grep "gz" | grep "bro-" | grep -v beta | grep -v aux | grep -v asc | cut -d'"' -f2`
rm -fr *-beta.tar.gz
tar -xvzf bro-2.*.tar.gz
cd bro-2.*
./configure --prefix=/nsm/bro
make
make install
# this is an NTP script for bro to understand the protocol
printf 'bW9kdWxlIE5UUDsKCmV4cG9ydCB7CglyZWRlZiBlbnVtIExvZzo6SUQgKz0geyBMT0cgfTsKCgly
ZWRlZiBlbnVtIE5vdGljZTo6VHlwZSArPSB7CgkJTlRQX0FsYXJtLAoJCU5UUF9Nb25saXN0X1F1
ZXJpZXMsCgkJfTsKCgl0eXBlIG50cF9yZWNvcmQ6IHJlY29yZCB7CgkJdHM6IHRpbWUgJmxvZzsK
CQl1aWQ6IHN0cmluZyAmbG9nOwoJCW9yaWc6IGFkZHIgJmxvZzsKCQlyZXNwOiBhZGRyICZsb2c7
CgkJcmVmaWQ6IGNvdW50ICZkZWZhdWx0PTAgJmxvZzsKCQljb2RlOiBjb3VudCAmZGVmYXVsdD0w
ICZsb2c7CgkJc3RyYXR1bTogY291bnQgJmRlZmF1bHQ9MCAmbG9nOwoJCXBvbGw6IGNvdW50ICZk
ZWZhdWx0PTAgJmxvZzsKCQlwcmVjaXNpb246IGludCAmZGVmYXVsdD10b19pbnQoIjAiKSAmbG9n
OwoJCSNkaXN0YW5jZTogaW50ZXJ2YWw7CgkJI2Rpc3BlcnNpb246IGludGVydmFsOwoJCXJlZnRp
bWU6IHRpbWUgJmxvZzsKCQkjb3JpZzogdGltZTsKCQkjcmVjOiB0aW1lOwoJCSN4bXQ6IHRpbWU7
CgkJZXhjZXNzOiBzdHJpbmcgJmRlZmF1bHQ9Ik5VTEwiICZsb2c7CgkJfTsKCgkjIFRoZSBjb2Rl
IHZhbHVlIG1hcHMgdG8gdGhlIE5UUCBtb2RlIHR5cGUgLSBmb3Igbm93IEkgYW0gbW9zdGx5Cgkj
ICBpbnRlcmVzdGVkIGluIGNvbnRyb2wgbWVzc2FnZXMuCgkjCgkjIE1vZGUJRGVzY3JpcHRpb24K
CSMgMAlyZXNlcnZlZC4KCSMgMQlTeW1tZXRyaWMgYWN0aXZlLgoJIyAyCVN5bW1ldHJpYyBwYXNz
aXZlLgoJIyAzCUNsaWVudC4KCSMgNAlTZXJ2ZXIuCgkjIDUJQnJvYWRjYXN0LgoJIyA2CU5UUCBj
b250cm9sIG1lc3NhZ2UuCgkjIDcJcHJpdmF0ZSB1c2UuCgljb25zdCBOVFBfUkVTRVJWRUQgPSAw
OwoJY29uc3QgTlRQX1NZTV9BQ1RJVkUgPSAxOwoJY29uc3QgTlRQX1NZTV9QQVNTSVZFID0gMjsK
CWNvbnN0IE5UUF9DTElFTlQgPSAzOwoJY29uc3QgTlRQX1NFUlZFUiA9IDQ7Cgljb25zdCBOVFBf
QlJPQURDQVNUID0gNTsKCWNvbnN0IE5UUF9DT05UUk9MID0gNjsKCWNvbnN0IE5UUF9QUklWQVRF
ID0gNzsKCgljb25zdCBwb3J0cyA9IHsgMTIzL3VkcCx9OwoJcmVkZWYgbGlrZWx5X3NlcnZlcl9w
b3J0cyArPSB7IHBvcnRzIH07CgoJY29uc3QgbG9nX29ubHlfY29udHJvbDogYm9vbCA9IEYgJnJl
ZGVmOwoKCSMgU28gd2UgZG9uJ3Qgd2FybiBtb3JlIHRoYW4gb25lIHRpbWUKCWdsb2JhbCBudHBf
aG9zdDogdGFibGVbYWRkcl0gb2YgY291bnQ7CgoJfSAjIGVuZCBleHBvcnQKCgpldmVudCBudHBf
bWVzc2FnZShjOiBjb25uZWN0aW9uLCBtc2c6IG50cF9tc2csIGV4Y2Vzczogc3RyaW5nKQoJewoJ
IyB3ZSBhcmUgaGFuZGVkIGEgbnRwX21zZyB0eXBlIHdoaWNoIGlzIHNsaWdodGx5IGRpZmZlcmVu
dCB0aGFuIHRoZQoJIyAgbnRwX3JlY29yZCB1c2VkIGZvciBkZWFsaW5nIHdpdGggdGhlIHBvbGlj
eSBzaWRlIG9mIHRoaW5ncy4KCglpZiAoIGxvZ19vbmx5X2NvbnRyb2wgJiYgKChtc2ckY29kZSAh
PSBOVFBfQ09OVFJPTCkgfHwgKG1zZyRjb2RlICE9IE5UUF9QUklWQVRFKSkgKQoJCXJldHVybjsK
Cglsb2NhbCB0X3JlYzogbnRwX3JlY29yZDsKCgl0X3JlYyRvcmlnID0gYyRpZCRvcmlnX2g7Cgl0
X3JlYyRyZXNwID0gYyRpZCRyZXNwX2g7Cgl0X3JlYyR1aWQgPSBjJHVpZDsKCXRfcmVjJHRzID0g
YyRzdGFydF90aW1lOwoKCWlmICggbXNnPyRpZCApCgkJdF9yZWMkcmVmaWQgPSBtc2ckaWQ7CgoJ
aWYgKCBtc2c/JGNvZGUgKQoJCXRfcmVjJGNvZGUgPSBtc2ckY29kZTsKCglpZiAoIG1zZz8kc3Ry
YXR1bSApCgkJdF9yZWMkc3RyYXR1bSA9IG1zZyRzdHJhdHVtOwoKCWlmICggbXNnPyRwb2xsICkK
CQl0X3JlYyRwb2xsID0gbXNnJHBvbGw7CgoJaWYgKCBtc2c/JHByZWNpc2lvbiApCgkJdF9yZWMk
cHJlY2lzaW9uID0gbXNnJHByZWNpc2lvbjsKCgkjaWYgKCBtc2c/JHJlZl90ICkKCQkjdF9yZWMk
cmVmdGltZSA9IG1zZyRyZWZfdDsKCgkjdF9yZWMkZXhjZXNzID0gZXhjZXNzOwoKCWlmICgobXNn
JGNvZGUgPT0gTlRQX1BSSVZBVEUpIHx8IChtc2ckY29kZSA9PSBOVFBfQ09OVFJPTCkpIHsKCgkJ
aWYgKCBjJGlkJG9yaWdfaCAhaW4gbnRwX2hvc3QgKSB7CgoJCQlOT1RJQ0UoWyRub3RlPU5UUDo6
TlRQX01vbmxpc3RfUXVlcmllcywKCQkJCSRjb25uPWMsCgkJCQkkc3VwcHJlc3NfZm9yPTZocnMs
CgkJCQkkbXNnPWZtdCgiTlRQIG1vbmxpc3QgcXVlcmllcyIpLAoJCQkJJGlkZW50aWZpZXI9Y2F0
KGMkaWQkb3JpZ19oKV0pOwoJCQl9CgkJZWxzZQoJCQkrK250cF9ob3N0W2MkaWQkb3JpZ19oXTsK
CgkJfQoKCUxvZzo6d3JpdGUoTE9HLCB0X3JlYyk7Cgl9CgpldmVudCBicm9faW5pdCgpICZwcmlv
cml0eT01CiAgICAgICAgewogICAgICAgIExvZzo6Y3JlYXRlX3N0cmVhbShOVFA6OkxPRywgWyRj
b2x1bW5zPW50cF9yZWNvcmRdKTsKICAgICAgICBBbmFseXplcjo6cmVnaXN0ZXJfZm9yX3BvcnRz
KEFuYWx5emVyOjpBTkFMWVpFUl9OVFAsIHBvcnRzKTsKICAgICAgICB9Cg==' | base64 -d > /nsm/bro/share/bro/site/ntp.bro
echo '#add NTP suport
@load ntp' >> /nsm/bro/share/bro/site/local.bro
## Set Interface
cat /nsm/bro/etc/node.cfg | sed -e "s/interface\=eth0/interface\=$INTERFACE/g" > /tmp/node.cfg
mv -f /tmp/node.cfg /nsm/bro/etc/node.cfg
export PATH=/nsm/bro/bin:$PATH
/nsm/bro/bin/broctl install
/nsm/bro/bin/broctl start
cd /root/build/
###### logstash ######
gem install fpm
git clone https://github.com/Yuav/logstash-packaging.git --depth=1
cd logstash-packaging
./package.sh
cd ..
dpkg -i logstash_1.*.deb
/etc/init.d/logstash start
## clear out previous logstash configs
rm -f /etc/logstash/*
#http://www.appliednsm.com/parsing-bro-logs-with-logstash/
printf 'IyBDcmVhdGVkIGJ5IFRlcnJlbmNlIEdhcmVhdSAidHVuYSIgZm9yIGhvbmV5cG90IHByb2plY3QK
IyB0dW5hQHBlb3BsZS5vcHMtdHJ1c3QubmV0CgojIFVzZWQgSmFzb24gU21pdGgncyBzZXR1cCBh
cyBhIGJhc2UKIyBHcmVhdCBCbG9nIHBvc3QgaHR0cDovL3d3dy5hcHBsaWVkbnNtLmNvbS9wYXJz
aW5nLWJyby1sb2dzLXdpdGgtbG9nc3Rhc2gvCiMgaHR0cDovL2Jsb2cubHVzaXMub3JnL2Jsb2cv
MjAxMi8wMS8zMS9sb2FkLWJhbGFuY2luZy1sb2dzdGFzaC13aXRoLWFtcXAvCiMgdHVuYUBwZW9w
bGUub3BzLXRydXN0Lm5ldAojIGh0dHBzOi8vaG9tZS5yZWdpdC5vcmcvMjAxNC8wMS9hLWJpdC1v
Zi1sb2dzdGFzaC1jb29raW5nLyBnZW9pcAojIGh0dHBzOi8vaG9tZS5yZWdpdC5vcmcvdGFnL2xv
Z3N0YXNoLwojTG9ncyBiZWluZyBwYXJzZWQ6CiNjb25uLmxvZwojZG5zLmxvZwojbnRwLmxvZwoK
aW5wdXQgewoKI1Byb2R1Y3Rpb24gTG9ncyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjCiAg
ZmlsZSB7CiAgICB0eXBlID0+ICJCUk9fY29ubmxvZyIKICAgIHBhdGggPT4gIi9uc20vYnJvL2xv
Z3MvY3VycmVudC9jb25uLmxvZyIKICB9CiAgZmlsZSB7CiAgICB0eXBlID0+ICJCUk9fZG5zbG9n
IgogICAgcGF0aCA9PiAiL25zbS9icm8vbG9ncy9jdXJyZW50L2Rucy5sb2ciCiAgfQoKICBmaWxl
IHsKICAgIHR5cGUgPT4gIkJST19udHBsb2ciCiAgICBwYXRoID0+ICIvbnNtL2Jyby9sb2dzL2N1
cnJlbnQvbnRwLmxvZyIKICB9CgogIGZpbGUgewogICAgdHlwZSA9PiAiU1NIUE9UX3NzaGxvZyIK
ICAgIHBhdGggPT4gIi92YXIvbG9nL3NzaHBvdF9hdXRoLmxvZyIKICB9CgojIyMjIyMjIyMjIyMj
IyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMKfQoKZmlsdGVyIHsKICBpZiBb
bWVzc2FnZV0gPX4gL14jLyB7CiAgICBkcm9wIHsgIH0KICB9IGVsc2UgewoKIyBTU0hQT1Rfc3No
bG9nICMjIyMjIyMjIyMjIyMjIyMjIyMjIyMKICBpZiBbdHlwZV0gPT0gIlNTSFBPVF9zc2hsb2ci
IHsKICAgICAgZ3JvayB7CiAgICAgICAgbWF0Y2ggPT4gWyAibWVzc2FnZSIsICIoPzx0cz4oLio/
KSlcdCg/PGlkLm9yaWdfaD4oLio/KSlcdCg/PHVzZXI+KC4qPykpXHQoPzxwYXNzPiguKj8pKVx0
IiBdCiAgICAgIH0KICB9CgojIEJST19udHBsb2cgIyMjIyMjIyMjIyMjIyMjIyMjIyMjIwogIGlm
IFt0eXBlXSA9PSAiQlJPX250cGxvZyIgewogICAgICBncm9rIHsKICAgICAgICBtYXRjaCA9PiBb
ICJtZXNzYWdlIiwgIig/PHRzPiguKj8pKVx0KD88dWlkPiguKj8pKVx0KD88aWQub3JpZ19oPigu
Kj8pKVx0KD88aWQucmVzcF9oPiguKj8pKVx0KD88cmVmaWQ+KC4qPykpXHQoPzxjb2RlPiguKj8p
KVx0KD88c3RyYXR1bT4oLio/KSlcdCg/PHBvbGw+KC4qPykpXHQoPzxwcmVjZWlzc2lvbj4oLio/
KSlcdCg/PHJlZnRpbWU+KC4qPykpXHQoPzxleGNlc3M+KC4qPykpIiBdCiAgICAgIH0KICAgICAg
aWYgW2NvZGVdID1+IC9eNC8gewogICAgICAgIGRyb3AgeyAgfQogICAgICAgfQogIH0KCiMgQlJP
X2Ruc2xvZyAjIyMjIyMjIyMjIyMjIyMjIyMjIyMjCiAgaWYgW3R5cGVdID09ICJCUk9fZG5zbG9n
IiB7CiAgICBncm9rIHsKbWF0Y2ggPT4gWyAibWVzc2FnZSIsICIoPzx0cz4oLio/KSlcdCg/PHVp
ZD4oLio/KSlcdCg/PGlkLm9yaWdfaD4oLio/KSlcdCg/PGlkLm9yaWdfcD4oLio/KSlcdCg/PGlk
LnJlc3BfaD4oLio/KSlcdCg/PGlkLnJlc3BfcD4oLio/KSlcdCg/PHByb3RvPiguKj8pKVx0KD88
dHJhbnNfaWQ+KC4qPykpXHQoPzxxdWVyeT4oLio/KSlcdCg/PHFjbGFzcz4oLio/KSlcdCg/PHFj
bGFzc19uYW1lPiguKj8pKVx0KD88cXR5cGU+KC4qPykpXHQoPzxxdHlwZV9uYW1lPiguKj8pKVx0
KD88cmNvZGU+KC4qPykpXHQoPzxyY29kZV9uYW1lPiguKj8pKVx0KD88QUE+KC4qPykpXHQoPzxU
Qz4oLio/KSlcdCg/PFJEPiguKj8pKVx0KD88UkE+KC4qPykpXHQoPzxaPiguKj8pKVx0KD88YW5z
d2Vycz4oLio/KSlcdCg/PFRUTHM+KC4qPykpXHQoPzxyZWplY3RlZD4oLiopKSIgXQpyZW1vdmVf
ZmllbGQgPT4gWyAiYW5zd2VycyIgXQogIH0KfQoKIyBCUk9fY29ubmxvZyAjIyMjIyMjIyMjIyMj
IyMjIyMjIyMjCiAgaWYgW3R5cGVdID09ICJCUk9fY29ubmxvZyIgewogICAgZ3JvayB7Cm1hdGNo
ID0+IFsgIm1lc3NhZ2UiLCAiKD88dHM+KC4qPykpXHQoPzx1aWQ+KC4qPykpXHQoPzxpZC5vcmln
X2g+KC4qPykpXHQoPzxpZC5vcmlnX3A+KC4qPykpXHQoPzxpZC5yZXNwX2g+KC4qPykpXHQoPzxp
ZC5yZXNwX3A+KC4qPykpXHQoPzxwcm90bz4oLio/KSlcdCg/PHNlcnZpY2U+KC4qPykpXHQoPzxk
dXJhdGlvbj4oLio/KSlcdCg/PG9yaWdfYnl0ZXM+KC4qPykpXHQoPzxyZXNwX2J5dGVzPiguKj8p
KVx0KD88Y29ubl9zdGF0ZT4oLio/KSlcdCg/PGxvY2FsX29yaWc+KC4qPykpXHQoPzxtaXNzZWRf
Ynl0ZXM+KC4qPykpXHQoPzxoaXN0b3J5PiguKj8pKVx0KD88b3JpZ19wa3RzPiguKj8pKVx0KD88
b3JpZ19pcF9ieXRlcz4oLio/KSlcdCg/PHJlc3BfcGt0cz4oLio/KSlcdCg/PHJlc3BfaXBfYnl0
ZXM+KC4qPykpXHQoPzx0dW5uZWxfcGFyZW50cz4oLio/KSkiIF0KICAgIH0KICB9CiB9CiAgaWYg
W2lkLm9yaWdfaF0gIHsKICAgIGdlb2lwIHsKICAgICAgc291cmNlID0+ICJpZC5vcmlnX2giCiAg
ICAgIHRhcmdldCA9PiAiZ2VvaXAiCiAgICAgIGFkZF9maWVsZCA9PiBbICJbZ2VvaXBdW2Nvb3Jk
aW5hdGVzXSIsICIle1tnZW9pcF1bbG9uZ2l0dWRlXX0iIF0KICAgICAgYWRkX2ZpZWxkID0+IFsg
IltnZW9pcF1bY29vcmRpbmF0ZXNdIiwgIiV7W2dlb2lwXVtsYXRpdHVkZV19IiAgXQogICAgfQog
ICAgbXV0YXRlIHsKICAgICAgY29udmVydCA9PiBbICJbZ2VvaXBdW2Nvb3JkaW5hdGVzXSIsICJm
bG9hdCIgXQogICAgfQogICAgbXV0YXRlIHsKICAgICAgdXBwZXJjYXNlID0+IFsgImdlb2lwLmNv
dW50cnlfY29kZTIiIF0KICAgIH0KICB9Cn0K' | base64 -d > /etc/logstash/bro.conf
printf "output {
rabbitmq {
user => \"$RABBIT_USER\"
exchange_type => \"direct\"
password => \"$RABBIT_PASSWORD\"
exchange => \"amq.direct\"
vhost => \"/amp\"
durable => true
ssl => true
port => 5671
persistent => true
host => \"$RABBIT_HOST\"
}
}" >> /etc/logstash/bro.conf
curl https://raw.githubusercontent.com/kingtuna/logstash-ubuntu-misc/master/upstart.logstash.conf > /etc/init/logstash.conf
cd /root/build/
###### Setup SSH Honeypot ######
apt-get install libssh-dev -y
git clone https://github.com/kingtuna/sshpot.git
cd sshpot
make
mv /etc/ssh/sshd_config /etc/ssh/sshd_config.old
cat /etc/ssh/sshd_config.old | sed 's/Port 22$/Port 2222/g' > /etc/ssh/sshd_config
cp upstart.sshpot.conf /etc/init/sshpot.conf
cp sshpot /usr/local/bin/
touch cat /var/log/sshpot_auth.log
service ssh restart
service sshpot start
cd /root/build/
##
# Added in killntp.sh script
# killall sshpot
# service sshpot restart
##
###### Clean Ups and Cron ######
echo '
/usr/local/bin/ntpd -u ntp:ntp -p /var/run/ntpd.pid -g
export PATH=/nsm/bro/bin:$PATH
/nsm/bro/bin/broctl install
/nsm/bro/bin/broctl start
sleep 2
service logstash start
/root/build/ssdp/hp-ssdp &
/root/build/heartbeat/hp-heartbeat &
/root/build/mdns/hp-mdns &
/root/build/nat-pmp/hp-nat-pmp &
/root/build/quake_emu/hp-quake &
/root/build/ripv1/hp-ripv1 &
/root/build/sentinel/hp-sentinal &
hping3 --rand-source -c 600 --udp -p 123 --fast -n 127.0.0.1 -d 48 -E /root/ntp.bin
exit 0
' > /etc/rc.local
###### Install Geoupdater ######
echo '#!/usr/bin/env bash
## This script updates the maxmind database on the honeypot
cd /root/build/
wget -N http://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz
wget -N http://geolite.maxmind.com/download/geoip/database/GeoLiteCityv6-beta/GeoLiteCityv6.dat.gz
gunzip GeoLiteCity.dat.gz
gunzip GeoLiteCityv6.dat.gz
mv GeoLiteCity.dat /usr/share/GeoIP/GeoLiteCity.dat
mv GeoLiteCityv6.dat /usr/share/GeoIP/GeoLiteCityv6.dat
ln -s /usr/share/GeoIP/GeoLiteCity.dat /usr/share/GeoIP/GeoIPCity.dat
ln -s /usr/share/GeoIP/GeoLiteCityv6.dat /usr/share/GeoIP/GeoIPCityv6.dat' > /root/geoupdate.sh
chmod +x /root/geoupdate.sh
##### crontab to install
##start logstash and bro
printf '*/30 * * * * service bind9 restart
*/30 * * * * /etc/init.d/xinetd restart
* */2 * * * /root/killntp.sh
0-59/5 * * * * /nsm/bro/bin/broctl cron
23 2 * * * /root/geoupdate.sh
' > crontab.txt
crontab crontab.txt
echo "net.ipv4.tcp_keepalive_intvl=570" >> /etc/sysctl.conf
reboot
| true |
6019f48102f0d6df4ba2c5f2e47ced047cdaaa17 | Shell | stqism/xnu-chroot-x86_64 | /var/lib/dpkg/info/binfmt-support.postinst | UTF-8 | 1,128 | 3.296875 | 3 | [] | no_license | #! /bin/sh
set -e
if [ "$1" = configure ]; then
update-binfmts --import || true
if dpkg --compare-versions "$2" lt 2.0.0; then
rm -rf /var/cache/binfmts
fi
fi
# Automatically added by dh_systemd_enable
# This will only remove masks created by d-s-h on package removal.
deb-systemd-helper unmask binfmt-support.service >/dev/null || true
# was-enabled defaults to true, so new installations run enable.
if deb-systemd-helper --quiet was-enabled binfmt-support.service; then
# Enables the unit on first installation, creates new
# symlinks on upgrades if the unit file has changed.
deb-systemd-helper enable binfmt-support.service >/dev/null || true
else
# Update the statefile to add new symlinks (if any), which need to be
# cleaned up on purge. Also remove old symlinks.
deb-systemd-helper update-state binfmt-support.service >/dev/null || true
fi
# End automatically added section
# Automatically added by dh_installinit
if [ -x "/etc/init.d/binfmt-support" ]; then
update-rc.d binfmt-support defaults >/dev/null
invoke-rc.d binfmt-support start || true
fi
# End automatically added section
exit 0
| true |
e9ed37c3190018c26fbf28228ee35bc90eb38c3e | Shell | andrewidya/pico | /.openshift/action_hooks/deploy | UTF-8 | 861 | 2.546875 | 3 | [] | no_license | #!/bin/bash
# This deploy hook gets executed after dependencies are resolved and the
# build hook has been run but before the application has been started back
# up again. This script gets executed directly, so it could be python, php,
# ruby, etc.
# GETTING-STARTED: change 'myproject' to your project name:
echo "Executing 'python $OPENSHIFT_REPO_DIR/wsgi/pico/manage.py syncdb --noinput'"
# GETTING-STARTED: change 'myproject' to your project name:
##python "$OPENSHIFT_REPO_DIR"wsgi/pico/manage.py syncdb --noinput
##python "$OPENSHIFT_REPO_DIR"wsgi/pico/manage.py migrate
# GETTING-STARTED: change 'myproject' to your project name:
echo "Executing 'python $OPENSHIFT_REPO_DIR/wsgi/pico/manage.py collectstatic --noinput'"
# GETTING-STARTED: change 'myproject' to your project name:
python "$OPENSHIFT_REPO_DIR"wsgi/pico/manage.py collectstatic --noinput | true |
53ddc60929c97b73ecb3ff2a674cf3418ea1887e | Shell | chendaye/Shell | /advance_shell_programming/mail/basics/mail_distribute_proxy_sh | UTF-8 | 495 | 2.59375 | 3 | [] | no_license | #!/bin/bash
# 邮件分发代理
# MDA软件负责把消息分发个本地用户
# MDA软件集中将邮件发送给本地用户
# 邮件管理员可以为用户提供额外的功能。比如过滤垃圾邮件,邮件排序等等
#
# MDA程序收到消息时必须确保消息发送到正确位置,也就是本地用户邮箱或者用户自定义的其他位置
# 目前Linux上最常用的3种邮箱类型:
# /var/spool/mail 或 /var/mail
# $HOME/mail
# Maildir 样式的邮箱目录
| true |
da8171739425515a7e2b8cf58feeb32a5c95dae2 | Shell | cuthik/submit_cab | /respmcs_TMPL.sh | UTF-8 | 3,295 | 3.171875 | 3 | [] | no_license | #!/bin/bash
##
## @file respmcs_TMPL.sh
##
## template FOR CAB
##
## @author cuto <Jakub.Cuth@cern.ch>
## @date 2014-02-16
#PBS -N OUTNAME
#PBS -q sam_hi@d0cabsrv2
#PBS -j oe
#PBS -k o
#PBS -l nodes=1
# for testing the settings
DRYRUN="echo "
# !!!! uncomment line below to actualy run something !!!
DRYRUN=
CP="${DRYRUN}cp "
#RESBOS DIR
PMCSDIR=/prj_root/7055/wmass2/jcuth/epmcs_analysis/wz_epmcs
PMCSSRC=$PMCSDIR/src
#RESBOS DIR
RESBOSINTFACE=/prj_root/7055/wmass2/jcuth/epmcs_analysis/resbosa_interface
#OUTPUT DIR
OUTDIR=/prj_root/7056/wmass2/jcuth/cabout_respmcs
jobi=`echo OUTNAME | rev | cut -d. -f1 | rev`
prepare_resbos(){
templatein=$1
mainout=$2
ykout=$3
# RESBOS ENVIRONMENT
if [[ `uname -p` == "x86_64" ]]
then
echo 64
setup root v5_26_00d -q GCC_4_5_1 -f Linux64bit+2.6-2.5
setup cern 2004 -q x86_64
RESBOSROOTDIR=$RESBOSINTFACE/resbos_CP_020811_64
else
echo 32
setup root v5_18_00_lts3-32_py243_dzero -q gcc343:opt
RESBOSROOTDIR=$RESBOSINTFACE/resbos_CP_020811
fi
#RESBOS INPUTS
WGT=-2
if [[ $mainout == *central* ]]
then
WGT=-1
else
$CP $OUTDIR/weights_$jobi.dat weights.dat
fi
#WGT=1
${DRYRUN}cat $templatein | sed "s|1234567|RANDOMSEED|g;s|IWGT|$WGT|g" > resbos.in
$CP $mainout main.out
$CP $ykout yk.out
$CP $RESBOSROOTDIR/resbos resbos
$CP $RESBOSINTFACE/tupleMaker/tupleMaker tupleMaker
$CP $RESBOSINTFACE/tupleMaker/get_entries.C get_entries.C
}
run_resbos(){
echo -n "======= starting resbos: "; date
$DRYRUN ./resbos
echo -n "======= resbos end: "; date
}
prepare_pmcs(){
# PMCS ENVIRONMENT
setup D0RunII p21.26.00 -O SRT_QUAL=maxopt
setup lhapdf
export TBLibraryRootPath="/rooms/wmass/hengne/TBLibrary"
export MBLibraryRootPath="/rooms/wmass/rclsa/DATA/MBZBLibrary"
export ZBLibraryRootPath="/rooms/wmass/rclsa/DATA/MBZBLibrary"
export HRLibraryRootPath="/rooms/wmass/jenny/MC/HRLibrary"
# CP EXECUTABLE INSTEAD OF COMPILATION
$CP $PMCSSRC/run_pmcs .
#$CP $PMCSSRC/parameters.rc.geant .
$CP $PMCSSRC/parameters.rc .
}
run_pmcs(){
$DRYRUN ./tupleMaker resbos.hep resbos.root
echo -n "======= tupleMaker end: "; date
echo -n "======= get entries: "
root -l -b -q resbos.root get_entries.C
echo "===== pwd and ls"
pwd
ls -la
echo "===== cat file.list"
ls resbos*.root > file.list
cat file.list
$DRYRUN ./run_pmcs -f file.list -c parameters.rc -t 1 | tee pmcs.log
}
# WORK PLACE
WORKAREA=/scratch/${PBS_JOBID}
cd $WORKAREA
tar xzvf /prj_root/7055/wmass2/jcuth/epmcs_pure/epmc_pure.tgz
cd wz_epmcs/src
unset UPS_DIR UPS_SHELL SETUP_UPS SETUPS_DIR
source /usr/products/etc/setups.sh
setup setpath
setup limit_transfers
#RUN
prepare_resbos $RESBOSINTFACE/input/resbos_wp_tev2_sigma_templateBIG.in \
$RESBOSINTFACE/grids/w/1s/MAIN \
$RESBOSINTFACE/grids/w/scn/yk_w+_tev2_ct10nn.out
run_resbos | tee resbos.log
prepare_pmcs
run_pmcs
# SAVE OUTPUTS
if [[ OUTNAME == *CENTRAL* ]]
then
cp weights.dat $OUTDIR/weights_$jobi.dat
fi
cp resbos.log $OUTDIR/OUTNAME.resbos.log
cp pmcs.log $OUTDIR/OUTNAME.pmcs.log
cp result_wen.root $OUTDIR/OUTNAME.root
exit 0
| true |
1d0ec98968d86dcf9d285c0bf206dc033e34fdc1 | Shell | cliffton/caliper | /capstone/launchOrderer.sh | UTF-8 | 1,860 | 3.375 | 3 | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | #!/bin/bash
# set -x
export CALIPER_FABRIC=$HOME/caliper/packages/caliper-samples/network/fabric-v1.4/
function usage {
echo "./launchOrderer.sh ORDERER_ID kafka"
}
# Check if ORG_NAME passed
if [ -z $1 ];
then
usage
echo "Please provide the ORDERER_ID!!!"
exit 0
else
ORDERER_ID=$1
fi
if [ -z $2 ];
then
echo "Using default config"
CONFIG_TYPE="config"
else
CONFIG_TYPE=$2
fi
pids=`ps ax | grep -i 'orderer' | grep -v grep | awk '{print $1}'`
for pid in $(eval echo $pids)
do
if [ $pid == $$ ]
then
echo "Current PID = " $pid
else
echo "########### kill -9 $pid #############"
kill -9 $pid
fi
done
rm -rf $HOME/orderer
mkdir -p $HOME/orderer/msp/orderer
mkdir -p $HOME/orderer/ledger
mkdir -p $HOME/orderer/configtx
export FABRIC_LOGGING_SPEC="grpc=debug:debug"
export ORDERER_GENERAL_LISTENADDRESS="0.0.0.0"
export ORDERER_GENERAL_GENESISMETHOD="file"
export ORDERER_GENERAL_GENESISFILE="$HOME"/orderer/configtx/genesis.block
export ORDERER_GENERAL_LOCALMSPID="OrdererMSP"
export ORDERER_GENERAL_LOCALMSPDIR="$HOME"/orderer/msp/orderer/msp
export ORDERER_KAFKA_VERBOSE="true"
export ORDERER_FILELEDGER_LOCATION="$HOME"/orderer/ledger
export ORDERER_GENERAL_LOGLEVEL="debug"
export FABRIC_CFG_PATH="$HOME"/orderer
cd $HOME/caliper/packages/caliper-samples/network/fabric-v1.4/"$CONFIG_TYPE"
cp orderer.yaml "$HOME"/orderer/
cp -r ./config/* "$HOME"/orderer/configtx/
cp -r ./config/crypto-config/ordererOrganizations/example.com/orderers/orderer"$ORDERER_ID".example.com/* "$HOME"/orderer/msp/orderer/
cd $HOME/go/src/github.com/hyperledger/fabric/
# set +x
orderer &> $HOME/orderer/orderer.log &
pid=$!
sleep 5
# pid=`ps ax | grep -i 'orderer' | grep -v grep | awk '{print $1}'`
python3 $HOME/caliper/capstone/processMonitor.py 9001 $pid &> $HOME/orderer/monitor.log & | true |
747a0de4166ffa2c02188f243d88b84819a38ccf | Shell | mickelindahl/bgmodel | /nest/dist/install-nest-2.6.0.sh | UTF-8 | 2,403 | 4.15625 | 4 | [] | no_license | #!/bin/sh
NEST_VERSION=2.6.0
NEST_FOLDER_NAME=nest-2.6.0
NEST_TAR=nest-$NEST_VERSION.tar.gz
echo ""
if [ -d "source/$NEST_FOLDER_NAME" ];
then
echo "Source files already downloaded"
else
mkdir -p source
echo "Entering source folder"
cd source
URL="https://github.com/nest/nest-simulator/releases/download/v$NEST_VERSION/$NEST_TAR"
echo "Downloading nest from $URL"
wget $URL
echo "Unpacking "$NEST_TAR" to source folder"
tar -zxvf "$NEST_TAR"
cd ..
fi
echo "Proceeding with installation"
echo
#Start time watch
START=$(date +%s)
currDir=$(pwd)
#Get number of processors on the system
noProcs=$(grep -c 'model name' /proc/cpuinfo)
#Source directory
srcDir="$currDir/source/$NEST_FOLDER_NAME/"
#Build directory
buildDir="$currDir/build/$NEST_FOLDER_NAME/"
#Build directory
installDir="$currDir/install/$NEST_FOLDER_NAME/"
#Log directory
logDir="$currDir/log/"
#echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH"
#
## Directory where nest have been installed
#export NEST_INSTALL_DIR="$1"
#export LD_LIBRARY_PATH="$installDir/lib/nest"
#
#echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH"
#Remove old build
echo "Clear previous installation and build directories"
echo "Source dir: $srcDir"
echo "Build dir: $buildDir"
echo "Install dir: $installDir"
echo "Log dir: $logDir"
echo "Press [Enter] key to continue..."
read TMP
if [ -d "$buildDir" ];
then
echo "Removing $buildDir"
rm -r "$buildDir"
else
echo "No previous build dir to remove"
fi
echo "Creating build dir $buildDir"
mkdir -p "$buildDir"
if [ -d "$installDir" ];
then
echo "Removing $installDir"
rm -r "$installDir"
else
echo "Not previous install dir to remove"
fi
echo "Creating install dir $installDir"
mkdir -p "$installDir"
echo "Create log dir if it does not exist $logDir"
mkdir -p "$logDir"
echo "Enter build dir $buildDir"
cd "$buildDir"
echo "Press [Enter] key to continue..."
read TMP
# Need to explicilty say where to put nest with --prefix=$HOME/opt/nest
$srcDir"configure" --prefix=$installDir 2>&1 | tee "$logDir$NEST_FOLDER_NAME-configure"
make -j $noProcs 2>&1 | tee "$logDir$NEST_FOLDER_NAME-make"
make -j $noProcs install 2>&1 | tee "$logDir$NEST_FOLDER_NAME-install"
#make -j $noProcs installcheck 2>&1 | tee $logDir$1-installcheck
#Stop time watch
END=$(date +%s)
DIFF=$(( $END - $START ))
#Display script execution time
echo "It took $DIFF seconds"
| true |
d737729868c7e4dedd8610e68e9d073a46289520 | Shell | AlD/archlinux | /pkgbuilds/bareos/PKGBUILD | UTF-8 | 10,271 | 2.96875 | 3 | [] | no_license | # Maintainer: Daniel Albers <daniel@lbe.rs>
# bareos-fd.service by Christian Hesse <arch@eworm.de>
pkgbase='bareos'
pkgname=( "${pkgbase}-common" "${pkgbase}-console" "${pkgbase}-fd" "${pkgbase}-sd" "${pkgbase}-dir" "${pkgbase}-dir-sqlite3" "${pkgbase}-dir-mysql" "${pkgbase}-dir-mariadb" "${pkgbase}-dir-postgresql" )
pkgver=13.2.3
pkgrel=2
arch=(i686 x86_64)
pkgdesc="${pkgbase^} - Backup Archiving REcovery Open Sourced"
url="http://www.${pkgbase}.org"
license=('AGPL3')
optdepends=('lzo2: LZO compression for Storage Daemon'
'openssl: network encryption between daemons'
)
makedepends=('tcp_wrappers' 'sqlite' 'libmariadbclient' 'postgresql-libs' 'qt4')
source=("https://github.com/${pkgbase}/${pkgbase}/archive/Release/${pkgver}.tar.gz"
'00-qmake4.patch'
)
md5sums=('433d758fa7ea4455e96947fcfe30f84d'
'9ac88867a9ad3a4fe0486a26d0cdb542')
sha1sums=('eb3b4f220bf20b7b57a81b66bf42e906bb341b48'
'58a60e8af9b4735c564c7223c2bf0c25803927f3')
_instdir="${startdir}/install"
_workdir="/var/lib/${pkgbase}"
_pkgsrc="${srcdir}/${pkgbase}-Release-${pkgver}"
_genfiles() {
> "${srcdir}/${pkgbase}-dir.service" cat <<-EOF
#
# ${pkgbase^} Director Daemon service
#
[Unit]
Description=${pkgbase^} Director
Alias=${pkgbase}-dir
Requires=network.target
After=network.target remote-fs.target syslog.target
# Dependency about the database
# We let administrators decide if they need it (if local db instance)
#Wants=sqlite3.service
# Check if working dir exist and is a directory
ConditionPathIsDirectory=${_workdir}
[Service]
Type=forking
Restart=always
PIDFile=/run/${pkgbase}-dir.9101.pid
# EnvironmentFile=-/etc/sysconfig/${pkgbase}-dir
ExecStart=/usr/bin/${pkgbase}-dir -c /etc/${pkgbase}/${pkgbase}-dir.conf
[Install]
WantedBy=multi-user.target
EOF
> "${srcdir}/${pkgbase}-fd.service" cat <<-EOF
#
# ${pkgbase^} File Daemon service
#
[Unit]
Description=${pkgbase^} File Daemon
Requires=network.target
After=network.target remote-fs.target syslog.target
[Service]
Type=forking
Restart=always
ExecStart=/usr/bin/${pkgbase}-fd -c /etc/${pkgbase}/${pkgbase}-fd.conf
IOSchedulingClass=idle
PIDFile=/run/${pkgbase}-fd.9102.pid
[Install]
WantedBy=multi-user.target
EOF
> "${srcdir}/${pkgbase}-sd.service" cat <<-EOF
#
# ${pkgbase^} Storage Daemon service
#
[Unit]
Description=${pkgbase^} Storage Daemon
Requires=network.target
After=network.target remote-fs.target syslog.target
[Service]
Type=forking
Restart=always
PIDFile=/run/${pkgbase}-sd.9103.pid
# EnvironmentFile=-/etc/sysconfig/${pkgbase}-sd
ExecStart=/usr/bin/${pkgbase}-sd -c /etc/${pkgbase}/${pkgbase}-sd.conf
[Install]
WantedBy=multi-user.target
EOF
}
prepare() {
pushd "${srcdir}/${_pkgsrc}"
patch -Np3 -i ${srcdir}/00-qmake4.patch || true
_genfiles
popd
}
build() {
pushd "${srcdir}/${_pkgsrc}"
./configure \
--enable-smartalloc \
--prefix=/usr \
--sbindir=/usr/bin \
--sysconfdir=/etc/${pkgbase} \
--with-scriptdir=/etc/${pkgbase}/scripts \
--with-sqlite3 \
--with-mysql \
--with-postgresql \
--with-pid-dir=/run \
--with-systemd=/usr/lib/systemd/system \
--with-tcp-wrappers \
--with-logdir=/var/log/${pkgbase} \
--with-working-dir=${_workdir} \
--with-x
make DESTDIR="$_instdir" install
popd
}
package_bareos-fd() {
pkgdesc="${pkgdesc} (file daemon)"
backup=("etc/${pkgbase}/${pkgname}.conf")
depends=("${pkgbase}-common" 'tcp_wrappers')
pushd "${_instdir}"
cp --parents -a etc/${pkgbase}/${pkgname}.conf ${pkgdir}
cp --parents -a usr/bin/${pkgname} ${pkgdir}
cp --parents -a usr/lib/bpipe-fd.so ${pkgdir}
cp --parents -a usr/share/man/man8/${pkgname}.8.gz ${pkgdir}
mkdir -p ${pkgdir}/usr/lib/systemd/system/
cp -f ${srcdir}/${pkgbase}-fd.service ${pkgdir}/usr/lib/systemd/system/
popd
}
package_bareos-common() {
pkgdesc="${pkgdesc} (common files)"
pushd "${_instdir}"
cp --parents -a etc/${pkgbase}/scripts/${pkgbase}_config ${pkgdir}
cp --parents -a etc/${pkgbase}/scripts/btraceback.gdb ${pkgdir}
cp --parents -a etc/${pkgbase}/scripts/btraceback.dbx ${pkgdir}
cp --parents -a etc/${pkgbase}/scripts/btraceback.mdb ${pkgdir}
cp --parents -a usr/bin/btraceback ${pkgdir}
cp --parents -a usr/lib/libbareos-${pkgver}.so ${pkgdir}
cp --parents -a usr/lib/libbareoscfg-${pkgver}.so ${pkgdir}
cp --parents -a usr/lib/libbareosfind-${pkgver}.so ${pkgdir}
cp --parents -a usr/share/man/man8/btraceback.8.gz ${pkgdir}
#cp --parents -a usr/lib/libbareospy-${pkgver}.so ${pkgdir}
popd
}
package_bareos-console() {
pkgdesc="${pkgdesc} (management CLI)"
backup=("etc/${pkgbase}/bconsole.conf")
depends=("${pkgbase}-common")
pushd "${_instdir}"
cp --parents -a usr/bin/bconsole ${pkgdir}
cp --parents -a etc/${pkgbase}/bconsole.conf ${pkgdir}
cp --parents -a usr/share/man/man8/bconsole.8.gz ${pkgdir}
chmod 755 ${pkgdir}/etc/${pkgbase} ${pkgdir}/usr/bin/bconsole
chmod 644 ${pkgdir}/etc/${pkgbase}/bconsole.conf
popd
}
package_bareos-dir() {
pkgdesc="${pkgdesc} (Director)"
depends=("${pkgbase}-common")
optdepends=(
"${pkgname}-sqlite3: SQLite support"
"${pkgname}-mariadb: MariaDB support"
"${pkgname}-postgresql: PostgreSQL support")
backup=("etc/${pkgbase}/${pkgname}.conf")
pushd "${_instdir}"
cp --parents -a etc/${pkgbase}/${pkgname}.conf ${pkgdir}
cp --parents -a etc/${pkgbase}/scripts/query.sql ${pkgdir}
cp --parents -a etc/${pkgbase}/scripts/create_${pkgbase}_database ${pkgdir}
cp --parents -a etc/${pkgbase}/scripts/delete_catalog_backup ${pkgdir}
cp --parents -a etc/${pkgbase}/scripts/drop_${pkgbase}_database ${pkgdir}
cp --parents -a etc/${pkgbase}/scripts/drop_${pkgbase}_tables ${pkgdir}
cp --parents -a etc/${pkgbase}/scripts/grant_${pkgbase}_privileges ${pkgdir}
cp --parents -a etc/${pkgbase}/scripts/make_${pkgbase}_tables ${pkgdir}
cp --parents -a etc/${pkgbase}/scripts/make_catalog_backup.pl ${pkgdir}
cp --parents -a etc/${pkgbase}/scripts/update_${pkgbase}_tables ${pkgdir}
cp --parents -a usr/bin/${pkgname} ${pkgdir}
cp --parents -a usr/bin/bregex ${pkgdir}
cp --parents -a usr/bin/bsmtp ${pkgdir}
cp --parents -a usr/bin/bwild ${pkgdir}
cp --parents -a usr/share/man/man1/bsmtp.1.gz ${pkgdir}
cp --parents -a usr/share/man/man8/${pkgname}.8.gz ${pkgdir}
cp --parents -a usr/share/man/man8/bregex.8.gz ${pkgdir}
cp --parents -a usr/share/man/man8/bwild.8.gz ${pkgdir}
cp --parents -a usr/lib/libbareossql-${pkgver}.so ${pkgdir}
mkdir -p ${pkgdir}/usr/lib/systemd/system/
cp -f ${srcdir}/${pkgname}.service ${pkgdir}/usr/lib/systemd/system/
## Logwatch Support
mkdir -p ${pkgdir}/etc/logwatch/scripts/services/ ${pkgdir}/etc/logwatch/scripts/shared/
mkdir -p ${pkgdir}/etc/logwatch/conf/logfiles/ ${pkgdir}/etc/logwatch/conf/services/
cp -a ${srcdir}/${_pkgsrc}/scripts/logwatch/${pkgbase} ${pkgdir}/etc/logwatch/scripts/services/
cp -a ${srcdir}/${_pkgsrc}/scripts/logwatch/applybareosdate ${pkgdir}/etc/logwatch/scripts/shared/
cp -a ${srcdir}/${_pkgsrc}/scripts/logwatch/logfile.${pkgbase}.conf ${pkgdir}/etc/logwatch/conf/logfiles/${pkgbase}.conf
cp -a ${srcdir}/${_pkgsrc}/scripts/logwatch/services.${pkgbase}.conf ${pkgdir}/etc/logwatch/conf/services/${pkgbase}.conf
## Logrotate Support
mkdir -p ${pkgdir}/etc/logrotate.d/
cp -a ${srcdir}/${_pkgsrc}/scripts/logrotate ${pkgdir}/etc/logrotate.d/${pkgbase}
}
package_bareos-dir-mariadb() {
pkgdesc="${pkgdesc} (Director - MariaDB support)"
depends=("${pkgbase}-dir" 'libmariadbclient')
pushd "${_instdir}"
cp --parents -a usr/lib/libbareoscats-mysql-${pkgver}.so ${pkgdir}
cp --parents -a usr/lib/libbareoscats-mysql.so ${pkgdir}
cp -d usr/lib/libbareoscats-mysql.so ${pkgdir}/usr/lib/libbareoscats-7.0.3.so
for file in etc/${pkgbase}/scripts/ddl/*/*mysql*; do
cp --parents -a "$file" "${pkgdir}"
done
}
package_bareos-dir-mysql() {
pkgdesc="${pkgdesc} (transition package)"
depends=("${pkgbase}-dir-mariadb")
pushd "${_instdir}"
}
package_bareos-dir-postgresql() {
pkgdesc="${pkgdesc} (Director - PostgreSQL support)"
depends=("${pkgbase}-dir" 'postgresql-libs')
pushd "${_instdir}"
cp --parents -a usr/lib/libbareoscats-postgresql-${pkgver}.so ${pkgdir}
cp --parents -a usr/lib/libbareoscats-postgresql.so ${pkgdir}
cp -d usr/lib/libbareoscats-postgresql.so ${pkgdir}/usr/lib/libbareoscats-7.0.3.so
for file in etc/${pkgbase}/scripts/ddl/*/*postgresql*; do
cp --parents -a "$file" "${pkgdir}"
done
}
package_bareos-dir-sqlite3() {
pkgdesc="${pkgdesc} (Director - SQLite3 support)"
depends=("${pkgbase}-dir" 'sqlite')
pushd "${_instdir}"
cp --parents -a usr/lib/libbareoscats-sqlite3-${pkgver}.so ${pkgdir}
cp --parents -a usr/lib/libbareoscats-sqlite3.so ${pkgdir}
cp -d usr/lib/libbareoscats-sqlite3.so ${pkgdir}/usr/lib/libbareoscats-7.0.3.so
for file in etc/${pkgbase}/scripts/ddl/*/*sqlite3*; do
cp --parents -a "$file" "${pkgdir}"
done
}
package_bareos-sd() {
pkgdesc="${pkgdesc} (Storage Daemon)"
backup=("etc/${pkgbase}/${pkgname}.conf")
depends=("${pkgbase}-common")
pushd "${_instdir}"
cp --parents -a etc/${pkgbase}/${pkgname}.conf ${pkgdir}
cp --parents -a usr/bin/${pkgname} ${pkgdir}
cp --parents -a usr/bin/bextract ${pkgdir}
cp --parents -a usr/bin/bls ${pkgdir}
cp --parents -a usr/bin/bcopy ${pkgdir}
cp --parents -a usr/bin/bscan ${pkgdir}
cp --parents -a usr/bin/btape ${pkgdir}
cp --parents -a usr/share/man/man8/${pkgname}.8.gz ${pkgdir}
cp --parents -a usr/share/man/man8/bextract.8.gz ${pkgdir}
cp --parents -a usr/share/man/man8/bls.8.gz ${pkgdir}
cp --parents -a usr/share/man/man8/bcopy.8.gz ${pkgdir}
cp --parents -a usr/share/man/man8/bscan.8.gz ${pkgdir}
cp --parents -a usr/share/man/man8/btape.8.gz ${pkgdir}
mkdir -p ${pkgdir}/usr/lib/systemd/system/
cp -f ${srcdir}/${pkgname}.service ${pkgdir}/usr/lib/systemd/system/
}
| true |
83b79c62ae39abb4c93657cb244f8646aa762814 | Shell | urbanski/flow-scripts | /port-recon/port-recon | UTF-8 | 675 | 3.3125 | 3 | [] | no_license | #!/bin/bash
RA="/usr/local/bin/ra"
NETWORKS_FILE="networks.lst"
PORTS_FILE="ports.lst"
IFILE=$1
NETWORKS=""
PORTS=""
RACMD="$RA -r $IFILE -n -s dport daddr:64 "
for netw in `cat $NETWORKS_FILE`; do
if [ "$NETWORKS" != "" ]; then
NETWORKS="$NETWORKS or "
fi
NETWORKS="$NETWORKS dst net $netw"
done
for port in `cat $PORTS_FILE`; do
if [ "$PORTS" != "" ]; then
PORTS="$PORTS or "
fi
PORTS="$PORTS dst port $port"
done
$RA -r $IFILE -n -s dport daddr:64 - tcp and \(pkts gt 128\) and \(dst port lte 1024 or \($PORTS\)\) and \($NETWORKS\) | sort | uniq
#$RACMD - tcp and \(pkts gt 8\) and \(dst port lte 1024\) and \($NETWORKS\) and fin and not rst | sort | uniq
| true |
08faf51239338cee4546d674767ee79351f355ad | Shell | jjwill/environment | /.bash/.functions_git.bash | UTF-8 | 5,634 | 3.765625 | 4 | [] | no_license | # git related functions, sourced in ~/.bashrc
# Get the common functions loaded
. ~/.bash/.bash_functions
# Global git branches we don't want to touch, blacklisted.
# (not sure if origin is needed actually)
export __GIT_YOUR_HANDS_OFF=("HEAD" "staging" "master" "development" "origin" "SEURAT_*")
# You can also have the prompt show the 'dirty' status of your repo, i.e. if you
# have uncommited changes, and whether your branch differs from upstream HEAD:
#
# * = unstaged changes
# + = staged changes
# $ = stashed changes
# % = untracked files
# u-1 = behind upstream by 1 commit
# u+2 = ahead of upstream by 2 commits
# u= = same as upstream
gitprompt () {
echo -e "\n$(tput smul)Normal Settings$(tput rmul)\n"
echo " $(tput setaf 5)*$(tput setaf 9) unstaged changes"
echo " $(tput setaf 5)+$(tput setaf 9) staged changes"
echo " $(tput setaf 5)\$$(tput setaf 9) stashed changes"
echo " $(tput setaf 5)%$(tput setaf 9) untracked files"
echo " $(tput setaf 5)u-1$(tput setaf 9) behind upstream by 1 commit"
echo " $(tput setaf 5)u+2$(tput setaf 9) ahead of upstream by 2 commits"
echo " $(tput setaf 5)u+1-2$(tput setaf 9) diverged from upstream"
echo " $(tput setaf 5)u=$(tput setaf 9) equal to upstream"
echo -e "\n$(tput smul)Verbose Settings$(tput rmul)\n"
echo " $(tput setaf 5)=$(tput setaf 9) equal to upstream"
echo " $(tput setaf 5)>$(tput setaf 9) ahead of upstream"
echo " $(tput setaf 5)<$(tput setaf 9) behind upstream"
echo " $(tput setaf 5)<>$(tput setaf 9) diverged form upstream"
}
git_branch_is_blacklisted() {
contains_element "$1" "${__GIT_YOUR_HANDS_OFF[@]}"
}
# Prints out all your remote branches
git_my_branches() {
local blue=$(tput setaf 6; tput bold)
local nc=$(tput sgr0)
# Check the "owner" name for the remote branch. This may not always be correct
# since users might change their commit name in the config.
local GIT_UNAME=`git config user.name`
local BRANCHES=(`git for-each-ref --format='%(authorname) %09 %(refname)' | grep "$GIT_UNAME" | awk -F "refs/remotes/origin/" 'NF > 1 {print $2}'`)
echo -e "\nRemote branches for $GIT_UNAME:\n"
printf " $blue%s$nc\n" "${BRANCHES[@]}"
}
# Show the branches not merged in to the current branch you are on
git_show_missing() {
local branch=${1:?"A branch is required"}
local current=${2:-`git rev-parse --abbrev-ref HEAD`}
git log ${branch} ^${current} --first-parent --pretty=format:"%h - %an %s"
}
# Deletes a list git branches locally, and then it will attempt deleting it on
# the remote repo. If you do not own the remote branch it will not delete it.
delete_branches() {
for branch in "$@"; do
__delete_branch $branch
done
}
git_intersection() {
# Check the "owner" name for the remote branch. This may not always be correct
# since users might change their commit name in the config.
local GIT_UNAME=`git config user.name`
local list1=(`git for-each-ref --format='%(authorname) %09 %(refname)' | grep "$GIT_UNAME" | awk -F "refs/remotes/origin/" 'NF > 1 {print $2}'`)
local list2=()
# Get the local branches, exluding the asterisk
for branch in $(git branch | tr -d " *"); do
git_branch_is_blacklisted "$branch"
if [[ "$?" == 1 ]]; then
list2+=($branch)
fi
done
# 1. Intersection
C=($(comm -12 <(printf '%s\n' "${list1[@]}" | LC_ALL=C sort) <(printf '%s\n' "${list2[@]}" | LC_ALL=C sort)))
# # 2. B - A
D=($(comm -13 <(printf '%s\n' "${list1[@]}" | LC_ALL=C sort) <(printf '%s\n' "${list2[@]}" | LC_ALL=C sort)))
local blue=$(tput setaf 6; tput bold)
local nc=$(tput sgr0)
printf " $blue%s$nc\n" "${C[@]}"
echo
echo
printf " $blue%s$nc\n" "${D[@]}"
}
git_history_merged() {
local branches=()
# Get the local branches, exluding the asterisk
for branch in $(git branch | tr -d " *"); do
git_branch_is_blacklisted "$branch"
if [[ "$?" == 1 ]]; then
history | grep 'git merge' | grep -q $branch
if [[ "$?" == 0 ]]; then
branches+=($branch)
fi
fi
done
local blue=$(tput setaf 6; tput bold)
local nc=$(tput sgr0)
printf " $blue%s$nc\n" "${branches[@]}"
}
# Deletes a git branch locally, and then it will attempt deleting it on the
# remote repo. If you do not own the remote branch it will not delete it.
__delete_branch() {
local BRANCH=${1:?"A branch is required"}
git_branch_is_blacklisted "$BRANCH"
if [[ "$?" == 0 ]]; then
echo -e "\nInvalid branch: $(tput setaf 6; tput bold)$BRANCH$(tput sgr0)"
echo -e "\nYou cannot delete any of these branches:"
echo
printf ' %s\n' "${__GIT_YOUR_HANDS_OFF[@]}"
echo
return 1
fi
# Delete it locally if it exists
if [[ `git branch | grep "$BRANCH"` ]]; then
git branch -D "$BRANCH"
else
echo "Branch does not exist locally, looking for remote to delete..."
fi
# Check the "owner" name for the remote branch and make sure it's the same as
# the one that is trying to delete it. This may not always be correct since
# users might change their commit name in the config.
local GIT_UNAME=`git config user.name`
local BRANCH_OWNER=`git for-each-ref --format='%(authorname) %09 %(refname)' | grep "origin/${BRANCH}" | awk '{print $1, $2}'`
if [[ "$GIT_UNAME" == "$BRANCH_OWNER" ]]; then
git push origin --delete "$BRANCH"
else
echo "Skipping remote delete since you do not own it"
fi
}
# Attaches git tab completion to a function
__git_completer() {
local track=1
__gitcomp_nl "$(__git_refs '' $track)"
}
__git_complete delete_branches __git_completer
__git_complete git_show_missing __git_completer
| true |
058c3d904dda2ceacce28e33a03a9a52a98c19a5 | Shell | shlomis1/vsa | /src/vscli | UTF-8 | 279 | 3.171875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/sh
set -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
. $DIR/scripts/common.sh
fail_if_not_root
base=`basename $0`
user=""
if [ "$base" == "vscliuser" ]; then
user="--user"
fi
export PYTHONPATH="$DIR"
python $DIR/vsa/client/cli/vsacli.pyc $user $*
| true |
f3527b9dae92e5fb0d429aa9e8028ba58110459e | Shell | boredom101/nix-tip | /nix-tip | UTF-8 | 2,191 | 4.25 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
RED='\033[1;31m'
GREEN='\033[1;32m'
NC='\033[0m'
type=
config=
tip=
extraFlags=
attr=" "
colored=false
showSyntax() {
echo "Usage: $0 --tip <tips.nix> --type <TYPE> --config <config.nix>"
echo
echo "Options"
echo
echo " -A, --attr ATTRIBUTE Optional attribute that selects a configuration"
echo " expression in the config file"
echo " --color Color code the output"
echo " -C, --config FILE Config files to generate recommendations on"
echo " -H, --help Print this help."
echo " --show-trace Sent to the call to nix-instantiate, useful for"
echo " debugging"
echo " -T, --tip FILE File containing tips to generate recommendations with"
echo " --type TYPE The type of config file that is being used"
exit 0
}
while [ "$#" -gt 0 ]; do
i="$1"; shift 1
case "$i" in
--help|-h|-H)
showSyntax
;;
--tip|-T)
tip="$1"; shift 1
;;
--show-trace)
extraFlags+="--show-trace"
;;
--attr|-A)
attr="$1"; shift 1
;;
--config|-C)
config="$1"; shift 1
;;
--type)
type="$1"; shift 1
case $type in
nixos|home);;
*)
echo "unknown type: '$type'"
exit 1 ;;
esac
;;
--color)
colored="true"
;;
*)
echo "unknown option: '$i'"
exit 1
;;
esac
done
TEXT=$(nix-instantiate $extraFlags --eval nix-tip.nix --argstr "tipsPath" $(realpath $tip) --argstr "confPath" $(realpath $config) --argstr "type" $type --argstr "confAttr" "$attr")
TEXT=${TEXT#\"}
TEXT=${TEXT%\"}
printf "$TEXT" | while read line ; do
if [ $colored == "true" ]; then
if [[ "$line" == "Recommended:"* ]];
then
line=${GREEN}${line}
else
line=${RED}${line}
fi
line+=$NC
fi
printf "$line\n"
done
| true |
859c838d0b65664c728d6df5129dfe1560e9bfc9 | Shell | keonjeo/erlvm | /scripts/functions/erlvm_printers | UTF-8 | 927 | 3.328125 | 3 | [] | no_license | #!/usr/bin/env bash
erlvm_fatal() {
tput sgr0
tput setaf 1 # Red color
echo "[ERROR] $1" >&2
tput sgr0
exit 1
}
erlvm_error() {
tput sgr0
tput setaf 1 # Red color
echo "[ERROR] $1" >&2
tput sgr0
}
erlvm_warn() {
tput sgr0
tput setaf 3 # Yellow color
echo "[WARN] $1" >&1
tput sgr0
}
erlvm_info() {
tput sgr0
tput setaf 2 # Green color
echo "[INFO] $1" >&1
tput sgr0
}
erlvm_dev_info() {
tput sgr0
tput setaf 2 # Green color
if [[ -n $ERLVM_DEV ]]; then
echo "[INFO] $(date +'%Y-%m-%dT%H:%M:%S%z'): $@" >&1
else
echo "[INFO] $(date +'%Y-%m-%dT%H:%M:%S%z'): $@" > /dev/null
fi
tput sgr0
}
erlvm_echo() {
tput sgr0
tput setaf 4 # Blue color
echo "$1" >&1
tput sgr0
}
erlvm_success_echo() {
tput sgr0
tput setaf 5
echo "$1" >&1
tput sgr0
}
erlvm_message() {
tput sgr0
tput setaf 2 # Green color
echo "$1" >&1
tput sgr0
}
| true |
64addb14b4115bda614611eda10b878b453e83fb | Shell | openstack/openstack-helm | /tools/deployment/common/test-networkpolicy.sh | UTF-8 | 6,619 | 3.15625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -xe
# test_netpol(namespace, application, component, target_host, expected_result{fail,success})
function test_netpol {
NS=$1
APP=$2
COMPONENT=$3
HOST=$4
STATUS=$5
echo Testing connection from $APP - $COMPONENT to host $HOST with namespace $NS
POD=$(kubectl -n $NS get pod -l application=$APP,component=$COMPONENT | grep Running | cut -f 1 -d " " | head -n 1)
PID=$(sudo docker inspect --format '{{ .State.Pid }}' $(kubectl get pods --namespace $NS $POD -o jsonpath='{.status.containerStatuses[0].containerID}' | cut -c 10-21))
if [ "x${STATUS}" == "xfail" ]; then
if ! sudo nsenter -t $PID -n wget --spider --timeout=5 --tries=1 $HOST ; then
echo "Connection timed out; as expected by policy."
else
exit 1
fi
else
sudo nsenter -t $PID -n wget --spider --timeout=5 --tries=1 $HOST
fi
}
#NOTE(gagehugo): Enable the negative tests once the services policy is defined
# General Netpol Tests
# Doing negative tests
#test_netpol openstack mariadb server rabbitmq.openstack.svc.cluster.local:5672 fail
#test_netpol openstack rabbitmq-rabbitmq server memcached.openstack.svc.cluster.local:11211 fail
# Negative Keystone tests
test_netpol openstack mariadb server keystone-api.openstack.svc.cluster.local:5000 fail
test_netpol openstack mariadb ingress keystone-api.openstack.svc.cluster.local:5000 fail
test_netpol openstack memcached server keystone-api.openstack.svc.cluster.local:5000 fail
test_netpol openstack rabbitmq server keystone-api.openstack.svc.cluster.local:5000 fail
# Negative Mariadb tests
test_netpol openstack memcached server mariadb.openstack.svc.cluster.local:3306 fail
test_netpol openstack ingress server mariadb-server.openstack.svc.cluster.local:3306 fail
# Doing positive tests
# Positive Mariadb tests
test_netpol openstack keystone api mariadb.openstack.svc.cluster.local:3306 success
test_netpol openstack keystone api mariadb-server.openstack.svc.cluster.local:3306 success
test_netpol openstack mariadb ingress mariadb-server.openstack.svc.cluster.local:3306 success
test_netpol openstack keystone api rabbitmq.openstack.svc.cluster.local:5672 success
test_netpol openstack ingress server keystone-api.openstack.svc.cluster.local:5000 success
test_netpol openstack prometheus-openstack-exporter exporter keystone-api.openstack.svc.cluster.local:5000 success
if kubectl -n openstack get pod -l application=horizon | grep Running ; then
test_netpol openstack keystone api horizon.openstack.svc.cluster.local:80 fail
fi
if kubectl -n openstack get pod -l application=cinder | grep Running ; then
# Negative Cinder Tests
#test_netpol openstack keystone api cinder-api.openstack.svc.cluster.local fail
test_netpol openstack cinder api horizon.openstack.svc.cluster.local:80 fail
# Positive Cinder Tests
test_netpol openstack cinder api rabbitmq.openstack.svc.cluster.local:5672 success
# Positive Keystone test
test_netpol openstack cinder api keystone-api.openstack.svc.cluster.local:5000 success
# Positive Mariadb tests
test_netpol openstack cinder api mariadb.openstack.svc.cluster.local:3306 success
test_netpol openstack cinder api mariadb-server.openstack.svc.cluster.local:3306 success
else
# Negative Compute-Kit Tests
#test_netpol openstack keystone api heat-api.openstack.svc.cluster.local fail
#test_netpol openstack keystone api glance-api.openstack.svc.cluster.local fail
test_netpol openstack mariadb server glance-api.openstack.svc.cluster.local:9292 fail
test_netpol openstack memcached server glance-api.openstack.svc.cluster.local:9292 fail
test_netpol openstack keystone api glance-api.openstack.svc.cluster.local:9292 fail
# Memcached Negative Tests
test_netpol openstack mariadb server memcached.openstack.svc.cluster.local:11211 fail
test_netpol openstack rabbitmq server memcached.openstack.svc.cluster.local:11211 fail
test_netpol openstack openvswitch openvswitch-vswitchd memcached.openstack.svc.cluster.local:11211 fail
test_netpol openstack libvirt libvirt memcached.openstack.svc.cluster.local:11211 fail
# Heat Negative Tests
test_netpol openstack keystone api heat-api.openstack.svc.cluster.local:8004 fail
test_netpol openstack nova os-api heat-api.openstack.svc.cluster.local:8004 fail
test_netpol openstack neutron server heat-api.openstack.svc.cluster.local:8004 fail
test_netpol openstack glance api heat-api.openstack.svc.cluster.local:8004 fail
# Positive Compute-Kit Tests
# Positive Mariadb tests
test_netpol openstack heat api mariadb.openstack.svc.cluster.local:3306 success
test_netpol openstack glance api mariadb.openstack.svc.cluster.local:3306 success
test_netpol openstack glance api mariadb-server.openstack.svc.cluster.local:3306 success
# Positive Keystone tests
test_netpol openstack heat api keystone-api.openstack.svc.cluster.local:5000 success
test_netpol openstack glance api keystone-api.openstack.svc.cluster.local:5000 success
test_netpol openstack horizon server keystone-api.openstack.svc.cluster.local:5000 success
test_netpol openstack nova os-api keystone-api.openstack.svc.cluster.local:5000 success
test_netpol openstack nova compute keystone-api.openstack.svc.cluster.local:5000 success
test_netpol openstack neutron l3-agent keystone-api.openstack.svc.cluster.local:5000 success
test_netpol openstack ingress server glance-api.openstack.svc.cluster.local:9292 success
test_netpol openstack nova os-api glance-api.openstack.svc.cluster.local:9292 success
test_netpol openstack nova compute glance-api.openstack.svc.cluster.local:9292 success
test_netpol openstack heat api glance-api.openstack.svc.cluster.local:9292 success
test_netpol openstack horizon server glance-api.openstack.svc.cluster.local:9292 success
test_netpol openstack horizon server heat-api.openstack.svc.cluster.local:8004 success
test_netpol openstack horizon server heat-cfn.openstack.svc.cluster.local:8000 success
test_netpol openstack heat api heat-api.openstack.svc.cluster.local:8004 success
fi
echo Test Success
| true |
bcfbaaa694192e3397895a15dcd79fa212649303 | Shell | xgi/houdoku-plugins | /.circleci/deploy.sh | UTF-8 | 1,127 | 3 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# download/authorize google cloud sdk
curl -o gcloud-sdk.tar.gz https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-sdk-241.0.0-linux-x86_64.tar.gz
tar zxvf gcloud-sdk.tar.gz google-cloud-sdk
mv google-cloud-sdk gcloud-sdk
echo $GCLOUD_SERVICE_KEY | ./gcloud-sdk/bin/gcloud auth activate-service-account --key-file=-
./gcloud-sdk/bin/gcloud --quiet config set project ${GOOGLE_PROJECT_ID}
./gcloud-sdk/bin/gcloud --quiet config set compute/zone ${GOOGLE_COMPUTE_ZONE}
# upload plugin classfiles to bucket
PLUGINS_DIR=build/classes/java/main/com/faltro/houdoku/plugins
./gcloud-sdk/bin/gsutil cp -r $PLUGINS_DIR/content gs://houdoku-plugins
# build/upload index
SOURCE_DIR=src/main/java/com/faltro/houdoku/plugins
for dir in $SOURCE_DIR/*/*; do
filename=`basename $dir/*.java`
classname=`basename $(dirname $dir)`/${filename%.*}.class
md5sum=`md5sum $PLUGINS_DIR/$classname | awk '{ print $1 }'`
cat ${dir%*/}/metadata.json | jq -r --arg md5sum $md5sum '. + {md5sum: $md5sum}'
done | jq -sr '[.[]]' > index.json
./gcloud-sdk/bin/gsutil cp -r index.json gs://houdoku-plugins | true |
865f646e1c5fb45b1fb5fae97f282db8e9ea2fef | Shell | cloux/runit-base | /etc/runit/bootup/00-pseudofs.sh | UTF-8 | 2,143 | 3.25 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"WTFPL"
] | permissive | # *-*- Shell Script -*-*
# from VOID Linux (https://www.voidlinux.org)
printf '=> Mounting pseudo-filesystems ...\n'
mountpoint -q /proc || mount -o nosuid,noexec,nodev -t proc proc /proc
mountpoint -q /proc/sys/fs/binfmt_misc || ( [ -d /proc/sys/fs/binfmt_misc ] && \
mount binfmt_misc -t binfmt_misc /proc/sys/fs/binfmt_misc )
mountpoint -q /sys || mount -o nosuid,noexec,nodev -t sysfs sys /sys
mountpoint -q /sys/fs/pstore || mount -o nosuid,noexec,nodev -t pstore pstore /sys/fs/pstore
mountpoint -q /sys/kernel/config || mount -t configfs configfs /sys/kernel/config 2>/dev/null
mountpoint -q /sys/kernel/security || mount -t securityfs securityfs /sys/kernel/security 2>/dev/null
mountpoint -q /sys/kernel/debug || mount -t debugfs none_debugs /sys/kernel/debug 2>/dev/null
mountpoint -q /dev || mount -o mode=0755,nosuid -t devtmpfs dev /dev
mkdir -p -m0755 /dev/pts
mountpoint -q /dev/pts || mount -o mode=0620,gid=5,nosuid,noexec -n -t devpts devpts /dev/pts
mkdir -p -m0755 /dev/mqueue
mountpoint -q /dev/mqueue || mount -t mqueue mqueue /dev/mqueue 2>/dev/null
mountpoint -q /run || mount -o mode=0755,nosuid,nodev -t tmpfs run /run
mkdir -p -m0755 /run/shm /run/lvm /run/user /run/lock /run/log /run/rpc_pipefs
mountpoint -q /run/shm || mount -o mode=1777,nosuid,nodev -n -t tmpfs shm /run/shm
mountpoint -q /run/rpc_pipefs || mount -o nosuid,noexec,nodev -t rpc_pipefs rpc_pipefs /run/rpc_pipefs
# compatibility symlink
ln -sf /run/shm /dev/shm
# create path for runit supervise files
[ -d /run/runit/supervise ] || mkdir -p /run/runit/supervise
# Detect LXC virtualization containers
grep -q lxc /proc/self/environ >/dev/null && export VIRTUALIZATION=1
if [ -z "$VIRTUALIZATION" ]; then
if ! mountpoint -q /sys/fs/cgroup; then
mkdir -p -m0755 /sys/fs/cgroup
# try to mount cgroup2 single hierarchy, fallback to cgroup v1
mount -t cgroup2 cgroup2 /sys/fs/cgroup || mount -o mode=0755 -t tmpfs cgroup /sys/fs/cgroup
# add cgroup v1 hierarchy
for cg in $(grep '1$' /proc/cgroups 2>/dev/null | cut -f 1); do
mkdir /sys/fs/cgroup/$cg && mount -t cgroup -o $cg cgroup /sys/fs/cgroup/$cg
done
fi
fi
| true |
b730b482b9ffebd4321e100bda4d08162f19a4ce | Shell | jeromeyoon/LFSR | /run_test | UTF-8 | 411 | 2.75 | 3 | [] | no_license | #!/bin/sh
echo -n "insert dataset name(EX:horizontal,vertical,views):"
read dataset
echo -n "insert GPU number:"
read gpu
echo -n "insert GPU factor:"
read gpu_factor
if [ -z "$gpu" ]||[ -z "$gpu_factor" ]
then
echo "Please insert GPU number and GPU factor \n"
exit
else
CUDA_VISIBLE_DEVICES=$gpu python main_test.py --gpu $gpu_factor --dataset $dataset --batch_size 1 --image_wid 768 --image_hei 768
fi
| true |
49687b91e6888fe4699df2413d0b157c4ff88616 | Shell | Vaa3D/vaa3d_tools | /hackathon/PengXie/NeuronStructNavigator/cmake-3.6.2/Utilities/Scripts/update-curl.bash | UTF-8 | 689 | 3.046875 | 3 | [
"BSD-3-Clause",
"MIT"
] | permissive | #!/usr/bin/env bash
set -e
set -x
shopt -s dotglob
readonly name="curl"
readonly ownership="Curl Upstream <curl-library@cool.haxx.se>"
readonly subtree="Utilities/cmcurl"
readonly repo="https://github.com/bagder/curl.git"
readonly tag="curl-7_50_1"
readonly shortlog=false
readonly paths="
CMake/*
CMakeLists.txt
COPYING
include/curl/*.h
include/curl/curlbuild.h.cmake
lib/*.c
lib/*.h
lib/CMakeLists.txt
lib/Makefile.inc
lib/curl_config.h.cmake
lib/libcurl.rc
lib/vauth/*.c
lib/vauth/*.h
lib/vtls/*.c
lib/vtls/*.h
"
extract_source () {
git_archive
pushd "${extractdir}/${name}-reduced"
rm lib/config-*.h
popd
}
. "${BASH_SOURCE%/*}/update-third-party.bash"
| true |
4ce3e47a7b3110cccbfd3a03f215adf8f8b52362 | Shell | dmitry-solomadin/skwibl | /bin/config.sh | UTF-8 | 1,842 | 2.90625 | 3 | [] | no_license |
## dynamic server
DYN='app'
## socket server
SOC='server'
## coffee executable
COFFEE='bin/coffee'
## uglifyjs executable
UGLIFYJS='bin/uglifyjs'
## cleancss executable
CLEANCSS='bin/cleancss'
## application script name
JS_NAME='./assets/js/skwibl'
## application style name
CSS_NAME='./assets/css/skwibl'
## socket.io script
SOCKETIO='./node_modules/socket.io/node_modules/socket.io-client/dist/socket.io.js'
## coffee dir
COFFEE_DIR='client'
## coffee output
JS_OUTPUT_DIR='assets/js/client'
## uploads dir
UPLOADS='uploads'
## uploads temp dir
UPLOADS_TMP='uploads/tmp'
## patches dir
PATCHES_DIR='patches'
## node modules dir
MODULES_DIR='node_modules'
## external server configurations dir
EXTERNAL='external'
## hetzner IP
IP='88.198.192.88'
## hertzner password
PASSWORD='fuThoh5eipe8'
## node execution command
NODE_CMD='node --nouse-idle-notification --expose-gc'
## coffee execution command
COFFEE_OPT='--nodejs "--nouse-idle-notification" --nodejs "--expose-gc"'
## redis flush command
FLUSH_REDIS_CMD='redis-cli flushall'
## tools file (to uncomment gc)
TOOLS_FILE='tools/tools.iced'
## application script file (for timeshtamp)
APPSCRIPT_FILE='views/shared/application_scripts.ect'
## usage string
USAGE="Usage: $0 -a -b -c -d -i -p -r -s -u -h \n\n
-a --dynamic Start dynamic server \n
-b --build Build the project \n
-c --clean Clean the project auxiliary files \n
-d --deploy Deploy the project \n
-i --init Init the project \n
-o --connect Connect to the production skwibl service through ssh \n
-p --product Run skwibl in production mode \n
-r --reset Reset the project \n
-s --socket Start socket server \n
-u --update Update project dependencies \n
-w --dynamicWatch Watch client files for changes \n\n
-h --help This prompt \n"
| true |
8f55b10d231d3ffd56c13aa24383ea142460b9bf | Shell | selvaje/YaleRep | /SRTM/sc09_renameTif.sh | UTF-8 | 3,140 | 2.84375 | 3 | [] | no_license | # qsub /lustre/home/client/fas/sbsc/ga254/scripts/SRTM/sc9_renameTif.sh
#PBS -S /bin/bash
#PBS -q fas_normal
#PBS -l walltime=10:00:00
#PBS -l nodes=1:ppn=8
#PBS -V
#PBS -o /scratch/fas/sbsc/ga254/stdout
#PBS -e /scratch/fas/sbsc/ga254/stderr
export INDIR=/lustre/scratch/client/fas/sbsc/ga254/dataproces/SRTM
export OUTDIR=/lustre/scratch/client/fas/sbsc/ga254/dataproces/SRTM/final
echo 1 5 10 50 100 | xargs -n 1 -P 8 bash -c $'
km=$1
if [ $km -eq 1 ] ; then res="30 arc-seconds" ; fi
if [ $km -eq 5 ] ; then res="2.5 arc-minute " ; fi
if [ $km -eq 10 ] ; then res="5 arc-minute" ; fi
if [ $km -eq 50 ] ; then res="25 arc-minute" ; fi
if [ $km -eq 100 ] ; then res="50 arc-minute" ; fi
for DIR in altitude roughness slope tpi tri vrm ; do
if [ $DIR = altitude ] ; then DIR2="elevation" ; fi
if [ $DIR = roughness ] ; then DIR2="roughness" ; fi
if [ $DIR = slope ] ; then DIR2="slope" ; fi
if [ $DIR = tri ] ; then DIR2="tri" ; fi
if [ $DIR = tpi ] ; then DIR2="tpi" ; fi
if [ $DIR = vrm ] ; then DIR2="vrm" ; fi
for dir in max mean median min stdev ; do
if [ $dir = min ] ; then dir2="mi" ; fi
if [ $dir = max ] ; then dir2="ma" ; fi
if [ $dir = mean ] ; then dir2="mn" ; fi
if [ $dir = median ] ; then dir2="md" ; fi
if [ $dir = stdev ] ; then dir2="sd" ; fi
gdal_translate -projwin -180 +60 +180 -60 -co COMPRESS=DEFLATE -co ZLEVEL=9 $INDIR/$DIR/$dir/${DIR}_${dir}_km${km}.tif $OUTDIR/$DIR/${DIR2}_${km}KM${dir2}_SRTM.tif
echo gdal_edit $OUTDIR/$DIR/${DIR2}_${km}KM${dir2}_SRTM.tif
gdal_edit.py \
-mo "TIFFTAG_ARTIST=Giuseppe Amatulli (giuseppe.amatulli@yale.edu , giuseppe.amatulli@gmail.com)" \
-mo "TIFFTAG_DATETIME=2016" \
-mo "TIFFTAG_DOCUMENTNAME= ${res} ${dir} ${DIR2}" \
-mo "TIFFTAG_IMAGEDESCRIPTION= $res $DIR2 ${dir} derived from SRTM4.1dev" \
-mo "TIFFTAG_SOFTWARE=gdal 1.10.0 & pktools 2.6.4 & GRASS7" \
-a_ullr -180 +60 +180 -60 \
-a_nodata -9999 $OUTDIR/$DIR/${DIR2}_${km}KM${dir2}_SRTM.tif
done
done
echo aspect
for dir in max mean median min stdev ; do
if [ $dir = min ] ; then dir2="mi" ; fi
if [ $dir = max ] ; then dir2="ma" ; fi
if [ $dir = mean ] ; then dir2="mn" ; fi
if [ $dir = median ] ; then dir2="md" ; fi
if [ $dir = stdev ] ; then dir2="sd" ; fi
for var in cos sin Ew Nw ; do
if [ $var = cos ] ; then var2="aspectcosine" ; fi
if [ $var = sin ] ; then var2="aspectsine" ; fi
if [ $var = Ew ] ; then var2="eastness" ; fi
if [ $var = Nw ] ; then var2="northness" ; fi
gdal_translate -projwin -180 +60 +180 -60 -co COMPRESS=DEFLATE -co ZLEVEL=9 $INDIR/aspect/$dir/aspect_${dir}_${var}_km${km}.tif $OUTDIR/aspect/${var2}_${km}KM${dir2}_SRTM.tif
echo gdal_edit $OUTDIR/aspect/${var2}_${km}KM${dir2}_SRTM.tif
gdal_edit.py \
-mo "TIFFTAG_ARTIST=Giuseppe Amatulli (giuseppe.amatulli@yale.edu , giuseppe.amatulli@gmail.com)" \
-mo "TIFFTAG_DATETIME=2016" \
-mo "TIFFTAG_DOCUMENTNAME= ${res} ${dir} ${var2}" \
-mo "TIFFTAG_IMAGEDESCRIPTION ${res} ${dir} ${var2} derived from SRTM4.1dev" \
-mo "TIFFTAG_SOFTWARE=gdal 1.10.0 & pktools 2.6.4 & GRASS7" \
-a_ullr -180 +60 +180 -60 \
-a_nodata -9999 $OUTDIR/aspect/${var2}_${km}KM${dir2}_SRTM.tif
done
done
' _
exit
| true |
f163b1c2abc348bf724fe4c9b3c805b5e6f5ea49 | Shell | evanslabSJCRH/Polygenomic-Analysis | /run02.sh | UTF-8 | 26,378 | 2.9375 | 3 | [] | no_license | #!/bin/bash
#
#Runs the drworkflow pipeline.
################################################################################
set -e
source config.rc
timestamp=`date +%Y%m%d%H%M%S%N`
currdate=`date +%Y%m%d_%H%M%S`
echo $wd
echo $timestamp
# Prestage the files
datadir="/drworkflow_data"
ln -sf ${datadir}/20111103_SOM_GERM_SNP6_TOTXVI.csv \
./bin/20111103_SOM_GERM_SNP6_TOTXVI.csv
ln -sf ${datadir}/all.CNState.RData ./bin/all.CNState.RData
ln -sf ${datadir}/all.CNState.smooth.RData ./bin/all.CNState.smooth.RData
ln -sf ${datadir}/CNState.RData ./bin/CNState.RData
ln -sf ${datadir}/dutch.RData ./bin/dutch.RData
ln -sf ${datadir}/human_predictions_S_0_aug2010.txt \
./bin/human_predictions_S_0_aug2010.txt
ln -sf ${datadir}/human_predictions_S_C_aug2010.txt \
./bin/human_predictions_S_C_aug2010.txt
ln -sf ${datadir}/SmoothSignal.RData ./bin/SmoothSignal.RData
ln -sf ${datadir}/2013-11-06.dutch.dxbm.ii.mas5.probe.log2.RData ./bin/2013-11-06.dutch.dxbm.ii.mas5.probe.log2.RData
for drug in "${drugs[@]}"
do
echo $drug
mkdir -p "$wd/$drug"
mkdir -p "$wd/$drug/log"
cd "$wd/$drug"
################################################################################
# Stage the files in each drug output directory
mkdir -p {totxv_snp_lc50,totxvi_snp_lc50,all_snp_lc50,snp_lc50_meta}
mkdir -p {totxv_cn_lc50,totxvi_cn_lc50,all_cn_lc50,cn_lc50_meta}
cp ${execbin}/cnlc50.R ${wd}/${drug}/cnlc50.R
cp ${execbin}/genecnlc50.R ${wd}/${drug}/genecnlc50.R
cp ${execbin}/genecncutoff.R ${wd}/${drug}/genecncutoff.R
cp ${execbin}/drugcatadjust.R ${wd}/${drug}/drugcatadjust.R
cp ${execbin}/U133_gene_pos.txt ${wd}/${drug}/U133_gene_pos.txt
cp ${execbin}/mir_all_small.csv ${wd}/${drug}/mir_all_small.csv
ln -sf ${execbin}/SmoothSignal.RData ${wd}/${drug}/SmoothSignal.RData
ln -sf ${execbin}/all.CNState.RData ${wd}/${drug}/all.CNState.RData
ln -sf ${execbin}/CNState.RData ${wd}/${drug}/CNState.RData
ln -sf ${execbin}/all.CNState.smooth.RData \
${wd}/${drug}/all.CNState.smooth.RData
cp -u ${execbin}/20111103_SOM_GERM_SNP6_TOTXVI.csv \
$wd/$drug/20111103_SOM_GERM_SNP6_TOTXVI.csv
cp ${execbin}/cnlc50merge.R $wd/$drug/cnlc50merge.R
cp ${execbin}/cnlc50meta.R $wd/$drug/cnlc50meta.R
cp ${execbin}/Ip_Select-Optimal-Pval-Threshold_7-22-12.R \
${wd}/${drug}/Ip_Select-Optimal-Pval-Threshold_7-22-12.R
ln -sf ${execbin}/human_predictions_S_C_aug2010.txt \
${wd}/${drug}/human_predictions_S_C_aug2010.txt
ln -sf ${execbin}/human_predictions_S_0_aug2010.txt \
${wd}/${drug}/human_predictions_S_0_aug2010.txt
ln -sf ${execbin}/miranno.csv $wd/$drug/miranno.csv
ln -sf ${execbin}/miranno_new.csv $wd/$drug/miranno_new.csv
ln -sf ${execbin}/mir_all.csv $wd/$drug/mir_all.csv
cp -u ${execbin}/Sweave.sty ${wd}/${drug}/Sweave.sty
ln -sf ${execbin}/dutch.RData $wd/$drug/dutch.RData
ln -sf ${execbin}/nl_profile051405.txt $wd/$drug/nl_profile051405.txt
create_sweave_dispatch()
{
cp ${execbin}/${1}.Rnw $wd/$drug/${1}.Rnw
echo -e "r.lib <- '/home/rautry/drworkflow_Rlib'" > $wd/$drug/${1}.R
echo -e "require (filehash, lib.loc=r.lib)" >> $wd/$drug/${1}.R
echo -e "require (digest, lib.loc=r.lib)" >> $wd/$drug/${1}.R
echo -e "require (stashR, lib.loc=r.lib)" >> $wd/$drug/${1}.R
echo -e "require(cacheSweave, lib.loc=r.lib)" >> $wd/$drug/${1}.R
echo -e "setCacheDir('./cache${1}/')" >> $wd/$drug/${1}.R
echo -e "Sweave ('${1}.Rnw', driver=cacheSweaveDriver)" >> \
$wd/$drug/${1}.R
}
create_sweave_dispatch gelc50
create_sweave_dispatch mirlc50
create_sweave_dispatch methlc50
#################################################################################
if [ "${drug}" == "6TG" ]; then
cp ${execbin}/6TG_LC50_AllCohorts_022414.csv "${wd}/${drug}/6TG_LC50_AllCohorts_022414.csv"
ln -sf ${execbin}/2013-11-06.dutch.dxbm.ii.mas5.probe.log2.RData ${wd}/${drug}/2013-11-06.dutch.dxbm.ii.mas5.probe.log2.RData
fi
#################################################################################
if [ "${drug}" == "6MP" ]; then
cp ${execbin}/6MP_LC50_AllCohorts_022414.csv "${wd}/${drug}/6MP_LC50_AllCohorts_022414.csv"
ln -sf ${execbin}/2013-11-06.dutch.dxbm.ii.mas5.probe.log2.RData ${wd}/${drug}/2013-11-06.dutch.dxbm.ii.mas5.probe.log2.RData
fi
#################################################################################
if [ "${runsnp}" == "TRUE" ]; then
cp ${execbin}/snpcutoff.R "${wd}/${drug}/${drug}_snpcutoff.R"
cp ${execbin}/snplc50.R "$wd/$drug/snplc50.R"
cp ${execbin}/snplc50merge.R "$wd/$drug/snplc50merge.R"
cp ${execbin}/snplc50meta.R "$wd/$drug/snplc50meta.R"
mkdir -p {imputedxv_snp_lc50,imputedxvi_snp_lc50}
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_01_01_01[1-200] \
-oo "log/${timestamp}_${drug}_01_01_01_%I.o" -R "rusage[mem=30000]" -M 30000 \
-app R-2.14.0 R CMD BATCH --no-save --args --\$LSB_JOBINDEX --$drug \
--TOTXV snplc50.R log/${timestamp}_${drug}_01_01_01_\$LSB_JOBINDEX.Rout
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_01_01_02[1] \
-oo "log/${timestamp}_${drug}_01_01_02.o" -R "rusage[mem=30000]" -M 30000 \
-w "ended(${timestamp}_${drug}_01_01_01)" $execbin/checkconcat.sh \
$wd/$drug/totxv_snp_lc50 $wd/$drug/totxv_snp_lc50.tsv
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_01_01_03[1-200] \
-oo "log/${timestamp}_${drug}_01_01_03_%I.o" -R "rusage[mem=30000]" -M 30000 \
-app R-2.14.0 R CMD BATCH --no-save --args --\$LSB_JOBINDEX --$drug \
--TOTXVI snplc50.R log/${timestamp}_${drug}_01_01_03_\$LSB_JOBINDEX.Rout
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_01_01_04[1] \
-oo "log/${timestamp}_${drug}_01_01_04.o" \
-w "ended(${timestamp}_${drug}_01_01_03)" -R "rusage[mem=30000]" -M 30000 \
$execbin/checkconcat.sh $wd/$drug/totxvi_snp_lc50 $wd/$drug/totxvi_snp_lc50.tsv
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_01_01_05[1-200] \
-app R-2.14.0 -oo "log/${timestamp}_${drug}_01_01_05_%I.o" \
-R "rusage[mem=30000]" -M 30000 R CMD BATCH --no-save --args --\$LSB_JOBINDEX \
--$drug --ALL snplc50.R log/${timestamp}_${drug}_01_01_05_\$LSB_JOBINDEX.Rout
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_01_01_06[1] \
-oo "log/${timestamp}_${drug}_01_01_06.o" \
-w "ended(${timestamp}_${drug}_01_01_05)" -R "rusage[mem=30000]" -M 30000 \
$execbin/checkconcat.sh $wd/$drug/all_snp_lc50 $wd/$drug/all_snp_lc50.tsv
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_01_01_07[1-285,288,289] \
-oo "log/${timestamp}_${drug}_01_01_07_%I.o" -R "rusage[mem=30000]" -M 30000 \
-app R-2.14.0 R CMD BATCH --no-save --args --\$LSB_JOBINDEX --$drug \
--imputedxv snplc50.R log/${timestamp}_${drug}_01_01_07_\$LSB_JOBINDEX.Rout
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_01_01_08[1] \
-oo "log/${timestamp}_${drug}_01_01_08.o" -R "rusage[mem=30000]" -M 30000 \
-w "ended(${timestamp}_${drug}_01_01_07)" $execbin/checkconcat.sh \
$wd/$drug/imputedxv_snp_lc50 $wd/$drug/imputedxv_snp_lc50.tsv
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_01_01_09[1-289] \
-oo "log/${timestamp}_${drug}_01_01_09_%I.o" -R "rusage[mem=30000]" -M 30000 \
-app R-2.14.0 R CMD BATCH --no-save --args --\$LSB_JOBINDEX --$drug \
--imputedxvi snplc50.R log/${timestamp}_${drug}_01_01_09_\$LSB_JOBINDEX.Rout
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_01_01_10[1] \
-oo "log/${timestamp}_${drug}_01_01_10.o" -R "rusage[mem=30000]" -M 30000 \
-w "ended(${timestamp}_${drug}_01_01_09)" $execbin/checkconcat.sh \
$wd/$drug/imputedxvi_snp_lc50 $wd/$drug/imputedxvi_snp_lc50.tsv
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_01_01_11[1] \
-app R-2.14.0 -oo "log/${timestamp}_${drug}_01_01_11.o" \
-w "ended(${timestamp}_${drug}_01_01_02)&&ended(${timestamp}_${drug}_01_01_04)&&ended(${timestamp}_${drug}_01_01_06)" \
-R "rusage[mem=2000]" -M 2000 R CMD BATCH --no-save --args --$drug snplc50merge.R \
log/${timestamp}_${drug}_01_01_11_\$LSB_JOBINDEX.Rout
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_01_01_12[1-200] \
-app R-2.14.0 -oo "log/${timestamp}_${drug}_01_01_12_%I.o" \
-w "ended(${timestamp}_${drug}_01_01_11)" -R "rusage[mem=2000]" -M 2000 \
R CMD BATCH --no-save --args --\$LSB_JOBINDEX --$drug snplc50meta.R \
log/${timestamp}_${drug}_01_01_12_\$LSB_JOBINDEX.Rout
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_01_01_13[1] \
-oo "log/${timestamp}_${drug}_01_01_13.o" \
-R "rusage[mem=30000]" -M 30000 -w "ended(${timestamp}_${drug}_01_01_12)" \
$execbin/checkconcat.sh $wd/$drug/snp_lc50_meta $wd/$drug/snp_lc50.tsv
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_01_01_14[1] \
-oo "log/${timestamp}_${drug}_01_01_14.o" \
-app R-2.14.0 -R "rusage[mem=30000]" -M 30000 \
-w "ended(${timestamp}_${drug}_01_01_13)" \
R CMD BATCH --no-save --args ${drug}_snpcutoff.R \
log/${timestamp}_${drug}_01_01_14.Rout
fi
#################################################################################
#################################################################################
if [[ "${runcn}" == "TRUE" ]]; then
# cp ${execbin}/cncutoff.R ${wd}/${drug}/${drug}_cncutoff.R
cp ${execbin}/genecncutoff.R ${wd}/${drug}/${drug}_cncutoff.R
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_01_02_01[1-200] \
-app R-2.14.0 -oo "log/${timestamp}_${drug}_01_02_01_%I.o" \
-R "rusage[mem=30000]" -M 30000 R CMD BATCH --no-save --args --\$LSB_JOBINDEX \
--$drug --TOTXV genecnlc50.R log/${timestamp}_${drug}_01_02_01_\$LSB_JOBINDEX.Rout
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_01_02_02[1] \
-oo "log/${timestamp}_${drug}_01_02_02.o" \
-R "rusage[mem=30000]" -M 30000 -w "ended(${timestamp}_${drug}_01_02_01)" \
$execbin/checkconcat.sh $wd/$drug/totxv_cn_lc50 $wd/$drug/totxv_cn_lc50.tsv
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_01_02_03[1-200] \
-app R-2.14.0 -oo "log/${timestamp}_${drug}_01_02_03_%I.o" \
-R "rusage[mem=30000]" -M 30000 R CMD BATCH --no-save --args --\$LSB_JOBINDEX \
--$drug --TOTXVI genecnlc50.R log/${timestamp}_${drug}_01_02_03_\$LSB_JOBINDEX.Rout
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_01_02_04[1] \
-oo "log/${timestamp}_${drug}_01_02_04.o" \
-R "rusage[mem=30000]" -M 30000 -w "ended(${timestamp}_${drug}_01_02_03)" \
$execbin/checkconcat.sh $wd/$drug/totxvi_cn_lc50 $wd/$drug/totxvi_cn_lc50.tsv
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_01_02_05[1-200] \
-app R-2.14.0 -oo "log/${timestamp}_${drug}_01_02_05_%I.o" \
-R "rusage[mem=30000]" -M 30000 R CMD BATCH --no-save --args --\$LSB_JOBINDEX \
--$drug --ALL genecnlc50.R log/${timestamp}_${drug}_01_02_05_\$LSB_JOBINDEX.Rout
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_01_02_06[1] \
-oo "log/${timestamp}_${drug}_01_02_06.o" \
-R "rusage[mem=30000]" -M 30000 -w "ended(${timestamp}_${drug}_01_02_05)" \
$execbin/checkconcat.sh $wd/$drug/all_cn_lc50 $wd/$drug/all_cn_lc50.tsv
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_01_02_07[1] \
-app R-2.14.0 -oo "log/${timestamp}_${drug}_01_02_07.o" \
-w "ended(${timestamp}_${drug}_01_02_02)&&ended(${timestamp}_${drug}_01_02_04)&&ended(${timestamp}_${drug}_01_02_06)" \
-R "rusage[mem=30000]" -M 30000 R CMD BATCH --no-save --args --$drug \
cnlc50merge.R log/${timestamp}_${drug}_01_02_07.Rout
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_01_02_08[1-200] \
-app R-2.14.0 -oo "log/${timestamp}_${drug}_01_02_08_%I.o" \
-w "ended(${timestamp}_${drug}_01_02_07)" -R "rusage[mem=30000]" -M 30000 \
R CMD BATCH --no-save --args --\$LSB_JOBINDEX --$drug \
cnlc50meta.R log/${timestamp}_${drug}_01_02_08_\$LSB_JOBINDEX.Rout
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_01_02_09[1] \
-oo "log/${timestamp}_${drug}_01_02_09.o" \
-R "rusage[mem=30000]" -M 30000 -w "ended(${timestamp}_${drug}_01_02_08)" \
$execbin/checkconcat.sh $wd/$drug/cn_lc50_meta $wd/$drug/cn_lc50.tsv
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_01_02_10[1] \
-oo "log/${timestamp}_${drug}_01_02_10.o" -app R-2.14.0 \
-R "rusage[mem=30000]" -M 30000 \
-w "ended(${timestamp}_${drug}_01_02_09)" \
R CMD BATCH --no-save --args ${drug}_cncutoff.R \
log/${timestamp}_${drug}_01_02_10.Rout
fi
#################################################################################
#TODO(spaugh):Reorder modules so shorter modules run first
#################################################################################
if [[ "${runge}" == "TRUE" ]]; then
cp ${execbin}/gecutoff.R ${wd}/${drug}/${drug}_gecutoff.R
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_01_03_01[1] \
-app R-2.14.0 -oo "log/${timestamp}_${drug}_01_03_01.o" \
-R "rusage[mem=30000]" -M 30000 R CMD BATCH --no-save --args \
gelc50.R log/${timestamp}_${drug}_01_03_01.Rout
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_01_03_02[1] \
-app R-2.14.0 -oo "log/${timestamp}_${drug}_01_03_02.o" \
-R "rusage[mem=2000]" -M 2000 -w "ended(${timestamp}_${drug}_01_03_01)" \
R CMD BATCH --no-save --args ${drug}_gecutoff.R \
log/${timestamp}_${drug}_01_03_02.Rout
fi
#################################################################################
#################################################################################
if [[ "${runmir}" == "TRUE" ]]; then
cp ${execbin}/mircutoff.R ${wd}/${drug}/${drug}_mircutoff.R
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_01_04_01[1] \
-app R-2.14.0 -oo "log/${timestamp}_${drug}_01_04_01.o" \
-R "rusage[mem=30000]" -M 30000 R CMD BATCH --no-save --args \
mirlc50.R log/${timestamp}_${drug}_01_04_01.Rout
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_01_04_02[1] \
-app R-2.14.0 -oo "log/${timestamp}_${drug}_01_04_02.o" \
-R "rusage[mem=2000]" -M 2000 -w "ended(${timestamp}_${drug}_01_04_01)" \
R CMD BATCH --no-save --args ${drug}_mircutoff.R \
log/${drug}_mircutoff\$LSB_JOBINDEX.Rout
fi
#################################################################################
#################################################################################
if [[ "${runmeth}" == "TRUE" ]]; then
cp ${execbin}/methcutoff.R ${wd}/${drug}/${drug}_methcutoff.R
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_01_05_01[1] \
-app R-2.14.0 -oo "log/${timestamp}_${drug}_01_05_01.o" \
-R "rusage[mem=30000]" -M 30000 R CMD BATCH --no-save --args \
methlc50.R log/${timestamp}_${drug}_01_05_01.Rout
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_01_05_02[1] \
-app R-2.14.0 -oo "log/${timestamp}_${drug}_01_05_02.o" \
-R "rusage[mem=30000]" -M 30000 -w "ended(${timestamp}_${drug}_01_05_01)" \
R CMD BATCH --no-save --args ${drug}_methcutoff.R \
log/${timestamp}_${drug}_01_05_02.Rout
fi
#################################################################################
#################################################################################
if [[ "${rungemir}" == "TRUE" ]]; then
mkdir -p totxv_ge_mir
mkdir -p totxvi_ge_mir
mkdir -p all_ge_mir
mkdir -p ge_mir_meta
cp ${execbin}/gemir.R $wd/$drug/gemir.R
cp ${execbin}/gemirmeta.R $wd/$drug/gemirmeta.R
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_01_06_01[1] \
-app R-2.14.0 -R "rusage[mem=30000]" -M 30000 \
-oo "log/${timestamp}_${drug}_01_06_01.o" \
-w "ended(${timestamp}_${drug}_01_03_02)&&ended(${timestamp}_${drug}_01_04_02)" \
${execbin}/submirge.sh ${project} ${timestamp} ${wd} ${drug} ${execbin} ${priority}
fi
#################################################################################
#################################################################################
if [[ "${rungemeth}" == "TRUE" ]]; then
mkdir -p totxv_ge_meth
mkdir -p totxvi_ge_meth
mkdir -p all_ge_meth
mkdir -p ge_meth_meta
mkdir -p ge_meth_meta_sub
cp ${execbin}/methgeprep.R ${wd}/${drug}/methgeprep.R
cp ${execbin}/gemeth.R $wd/$drug/gemeth.R
cp ${execbin}/gemethmeta.R $wd/$drug/gemethmeta.R
cp ${execbin}/gemethmetasub.R $wd/$drug/gemethmetasub.R
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_7_1[1] \
-app R-2.14.0 -oo "log/${timestamp}_${drug}_meth_ge_prep.o" \
-R "rusage[mem=4000]" -w "ended(${timestamp}_${drug}_01_03_02)&&ended(${timestamp}_${drug}_01_05_01)" \
R CMD BATCH --no-save --args methgeprep.R \
log/methgeprep_${drug}_\$LSB_JOBINDEX.Rout
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_D_1[1-200] \
-app R-2.14.0 -R "rusage[mem=4000]" -w "ended(${timestamp}_${drug}_7_1)" \
-oo "log/${timestamp}_${drug}_totxv_meth_ge_%I.o" \
R CMD BATCH --no-save --args --\${LSB_JOBINDEX} --TOTXV \
gemeth.R log/totxv_meth_ge_\${LSB_JOBINDEX}.Rout
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_D_2[1-200] \
-app R-2.14.0 -R "rusage[mem=4000]" -w "ended(${timestamp}_${drug}_7_1)" \
-oo "log/${timestamp}_${drug}_totxvi_meth_ge_%I.o" \
R CMD BATCH --no-save --args --\${LSB_JOBINDEX} --TOTXVI \
gemeth.R log/totxvi_meth_ge_\${LSB_JOBINDEX}.Rout
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_D_3[1-200] \
-app R-2.14.0 -R "rusage[mem=4000]" -w "ended(${timestamp}_${drug}_7_1)" \
-oo "log/${timestamp}_${drug}_all_meth_ge_%I.o" \
R CMD BATCH --no-save --args --\${LSB_JOBINDEX} --ALL \
gemeth.R log/all_meth_ge_\${LSB_JOBINDEX}.Rout
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_D_4[1-200] \
-app R-2.14.0 -R "rusage[mem=4000]" -w "ended(${timestamp}_${drug}_D_1)&&ended(${timestamp}_${drug}_D_2)&&ended(${timestamp}_${drug}_D_3)" \
-oo "log/${timestamp}_${drug}_meth_ge_meta_%I.o" \
R CMD BATCH --no-save --args --\${LSB_JOBINDEX} gemethmeta.R \
log/meth_ge_meta_\${LSB_JOBINDEX}.Rout
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_D_5[1-200] \
-app R-2.14.0 -R "rusage[mem=4000]" -w "ended(${timestamp}_${drug}_D_4)" \
-oo "log/${timestamp}_${drug}_meth_ge_meta_sub_%I.o" \
R CMD BATCH --no-save --args --\${LSB_JOBINDEX} gemethmetasub.R \
log/meth_ge_meta_sub_${drug}_\${LSB_JOBINDEX}.Rout
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_D_6[1] \
-w "ended(${timestamp}_${drug}_D_5)" -R "rusage[mem=30000]" \
-oo "log/${timestamp}_${drug}_meth_ge_meta_sub_result_cat_%I.o" \
$execbin/checkconcat.sh ${wd}/${drug}/ge_meth_meta_sub ${wd}/${drug}/ge_meth_meta_sub.tsv
fi
#################################################################################
#################################################################################
if [[ "${rungesnp}" == "TRUE" ]]; then
cp ${execbin}/snpgeprep.R $wd/$drug/snpgeprep.R
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_4_1[1] \
-app R-2.14.0 -o "log/${timestamp}_${drug}_snp_ge_prep_%I.o" \
-w "ended(${timestamp}_${drug}_01_03_02)&&ended(${timestamp}_${drug}_01_01_14)" \
-R "rusage[mem=30000]" R CMD BATCH --no-save --args snpgeprep.R \
log/snpgeprep_${drug}_\$LSB_JOBINDEX.Rout
mkdir -p totxv_ge_snp
mkdir -p totxvi_ge_snp
mkdir -p all_ge_snp
mkdir -p ge_snp_meta
mkdir -p ge_snp_meta_sub
cp ${execbin}/gesnp.R $wd/$drug/gesnp.R
cp ${execbin}/gesnpmeta.R $wd/$drug/gesnpmeta.R
cp ${execbin}/gesnpmetasub.R $wd/$drug/gesnpmetasub.R
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_4_2[1] \
-app R-2.14.0 -R "rusage[mem=30000]" -w "ended(${timestamp}_${drug}_4_1)" \
${execbin}/subsnpge.sh ${project} ${timestamp} ${wd} ${drug} ${execbin} ${queue} ${priority}
fi
#################################################################################
#################################################################################
if [[ "${rungecn}" == "TRUE" ]]; then
cp ${execbin}/cngeprep.R $wd/$drug/cngeprep.R
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_5_1[1] \
-w "ended(${timestamp}_${drug}_01_03_02)&&ended(${timestamp}_${drug}_01_02_10)" \
-app R-2.14.0 -oo "log/${timestamp}_${drug}_cn_ge_prep_%I.o" \
-R "rusage[mem=30000]" R CMD BATCH --no-save --args \
cngeprep.R log/cngeprep_${drug}_\$LSB_JOBINDEX.Rout
mkdir -p totxv_ge_cn
mkdir -p totxvi_ge_cn
mkdir -p all_ge_cn
mkdir -p ge_cn_meta
mkdir -p ge_cn_meta_sub
cp ${execbin}/genegecn.R $wd/$drug/gecn.R
cp ${execbin}/gecnmeta.R $wd/$drug/gecnmeta.R
cp ${execbin}/gecnmetasub.R $wd/$drug/gecnmetasub.R
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_5_2[1] \
-app R-2.14.0 -R "rusage[mem=10000]" \
-w "ended(${timestamp}_${drug}_5_1)" \
${execbin}/subcnge.sh ${project} ${timestamp} ${wd} ${drug} ${execbin} ${queue}
fi
#################################################################################
#################################################################################
if [[ "${runmirsnp}" == "TRUE" ]]; then
cp ${execbin}/snpmirprep.R $wd/$drug/snpmirprep.R
# bsub -P $project -q ${queue} -J ${timestamp}_${drug}_8_1[1] -app R-2.14.0 -o "log/${timestamp}_${drug}_snp_mir_prep_%I.o" -R "rusage[mem=30000]" R CMD BATCH --no-save --args snpmirprep.R log/snpmirprep_${drug}_\$LSB_JOBINDEX.Rout
mkdir -p totxv_snp_mir
mkdir -p totxvi_snp_mir
mkdir -p snp_mir_meta
mkdir -p snp_mir_meta_sub
cp ${execbin}/snpmir.R $wd/$drug/snpmir.R
cp ${execbin}/snpmirmeta.R $wd/$drug/snpmirmeta.R
cp ${execbin}/snpmirmetasub.R $wd/$drug/snpmirmetasub.R
# bsub -P $project -q ${queue} -J ${timestamp}_${drug}_8_2[1] -app R-2.14.0 -R "rusage[mem=30000]" -w "ended(${timestamp}_${drug}_8_1)" ${execbin}/subsnpmir.sh ${project} ${timestamp} ${wd} ${drug} ${execbin} ${queue}
fi
#################################################################################
#################################################################################
if [[ "${runfigure}" == "TRUE" ]]; then
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_7_1[1] \
-app R-2.14.0 -R "rusage[mem=1000]" -M 1000 \
-oo "log/${timestamp}_${drug}_01_07_01.o" \
-w "done(${timestamp}_${drug}_01_01_14)&&done(${timestamp}_${drug}_01_02_10)&&done(${timestamp}_${drug}_01_03_02)&&done(${timestamp}_${drug}_01_04_02)&&done(${timestamp}_${drug}_01_05_02)&&done(${timestamp}_${drug}_01_06_01)" \
${execbin}/subfigure.sh ${project} ${timestamp} ${wd} ${drug} ${execbin} ${priority}
fi
#################################################################################
#################################################################################
if [[ "${runonlyfigure}" == "TRUE" ]]; then
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_7_1[1] \
-app R-2.14.0 -R "rusage[mem=1000]" -M 1000 \
-oo "log/${timestamp}_${drug}_01_07_01.o" \
${execbin}/subfigure.sh ${project} ${timestamp} ${wd} ${drug} ${execbin} ${priority}
fi
#################################################################################
#################################################################################
if [[ "${runbabyfigure}" == "TRUE" ]]; then
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_8_1[1] \
-app R-2.14.0 -R "rusage[mem=1000]" -M 1000 \
-oo "log/${timestamp}_${drug}_01_08_01.o" \
-w "ended(${timestamp}_${drug}_01_01_14)&&ended(${timestamp}_${drug}_01_02_10)&&ended(${timestamp}_${drug}_01_03_02)&&ended(${timestamp}_${drug}_01_04_02)&&ended(${timestamp}_${drug}_01_05_02)&&ended(${timestamp}_${drug}_01_06_01)&&ended(${timestamp}_${drug}_01_07_01)" \
${execbin}/subbabyfigure.sh ${project} ${timestamp} ${wd} ${drug} ${execbin} ${priority}
fi
#################################################################################
#################################################################################
if [[ "${runonlybabyfigure}" == "TRUE" ]]; then
bsub -P $project -q ${queue} -sp ${priority} -J ${timestamp}_${drug}_8_1[1] \
-app R-2.14.0 -R "rusage[mem=1000]" -M 1000 \
-oo "log/${timestamp}_${drug}_01_08_01.o" \
${execbin}/subbabyfigure.sh ${project} ${timestamp} ${wd} ${drug} ${execbin} ${priority}
fi
################################################################################
#################################################################################
if [[ "${runpackageresult}" == "TRUE" ]]; then
echo ""
#bsub -P $project -q ${queue} -J ${timestamp}_${drug}_resultprep_1 -oo "log/${timestamp}_${drug}_resultprep_1.o" -R "rusage[mem=2000]" ${execbin}/result_prep.sh ${wd} ${drug} ${timestamp}
fi
#################################################################################
done
| true |
050d0629008a51a45abf9be3285240b230948fed | Shell | jjourdai/ft_ssl_md5 | /diff_des.bash | UTF-8 | 4,098 | 2.984375 | 3 | [] | no_license | #/bin/bash
make
nb=1000
for i in `seq 0 $nb`;
do
echo $i
cat /dev/urandom | head -c $RANDOM > random.txt
pass=$RANDOM
K=$(head -c 100 /dev/urandom | md5 -q | head -c 16)
./ft_ssl des -e -a -k $K -i random.txt > test1
openssl des-ecb -e -a -K $K -in random.txt > test2
./ft_ssl des -d -a -k $K -i test1 > test3
openssl des-ecb -d -a -K $K -in test2 > test4
diff test3 test4 > diff.txt
if [ "$(echo $?)" != "0" ]
then
exit
fi
./ft_ssl des-cbc -e -a -k $K -v $K -i random.txt > test1
openssl des-cbc -e -a -K $K -iv $K -in random.txt > test2
./ft_ssl des-cbc -d -a -k $K -v $K -i test1 > test3
openssl des-cbc -d -a -K $K -iv $K -in test2 > test4
diff test3 test4 > diff.txt
if [ "$(echo $?)" != "0" ]
then
exit
fi
./ft_ssl des3 -e -a -k $K -v $K -i random.txt > test1
openssl des3 -e -a -K $K -iv $K -in random.txt > test2
./ft_ssl des3 -d -a -k $K -v $K -i test1> test3
openssl des3 -d -a -K $K -iv $K -in test2 > test4
diff test3 test4 > diff.txt
if [ "$(echo $?)" != "0" ]
then
exit
fi
./ft_ssl des -e -k $K -i random.txt > test1
openssl des-ecb -e -K $K -in random.txt > test2
./ft_ssl des -d -k $K -i test2 | xxd > test3
openssl des-ecb -d -K $K -in test1 | xxd > test4
diff test3 test4 > diff.txt
if [ "$(echo $?)" != "0" ]
then
exit
fi
./ft_ssl des-ecb -e -k $K -i random.txt > test1
openssl des-ecb -e -K $K -in random.txt > test2
./ft_ssl des-ecb -d -k $K -i test2 | xxd > test3
openssl des-ecb -d -K $K -in test1 | xxd > test4
diff test3 test4 > diff.txt
if [ "$(echo $?)" != "0" ]
then
exit
fi
./ft_ssl des-cbc -e -k $K -v $K -i random.txt > test1
openssl des-cbc -e -K $K -iv $K -in random.txt > test2
./ft_ssl des-cbc -d -k $K -v $K -i test2 | xxd > test3
openssl des-cbc -d -K $K -iv $K -in test1 | xxd > test4
diff test3 test4 > diff.txt
if [ "$(echo $?)" != "0" ]
then
exit
fi
./ft_ssl des3 -e -k $K -v $K -i random.txt > test1
openssl des3 -e -K $K -iv $K -in random.txt > test2
./ft_ssl des3 -d -k $K -v $K -i test2 | xxd > test3
openssl des3 -d -K $K -iv $K -in test1 | xxd > test4
diff test3 test4 > diff.txt
if [ "$(echo $?)" != "0" ]
then
exit
fi
echo "./ft_ssl des-ecb -e -p $pass -s $K -i random.txt > test1"
./ft_ssl des-ecb -e -p $pass -s $K -i random.txt > test1
openssl des-ecb -e -pass pass:$pass -S $K -in random.txt > test2
echo "./ft_ssl des-ecb -d -p $pass -i test1 | xxd > test3"
./ft_ssl des-ecb -d -p $pass -i test1 | xxd > test3
openssl des-ecb -d -pass pass:$pass -in test2 | xxd > test4
diff test3 test4 > diff.txt
if [ "$(echo $?)" != "0" ]
then
exit
fi
echo "./ft_ssl des-cbc -e -p $pass -s $K -v $K -i random.txt > test1"
./ft_ssl des-cbc -e -p $pass -s $K -v $K -i random.txt > test1
openssl des-cbc -e -pass pass:$pass -S $K -iv $K -in random.txt > test2
echo "./ft_ssl des-cbc -d -p $pass -i test2 -v $K | xxd > test3"
./ft_ssl des-cbc -d -p $pass -i test2 -v $K | xxd > test3
openssl des-cbc -d -pass pass:$pass -iv $K -in test1 | xxd > test4
diff test3 test4 > diff.txt
if [ "$(echo $?)" != "0" ]
then
exit
fi
echo "./ft_ssl des3 -e -p $pass -v $K -s $K -i random.txt > test1"
./ft_ssl des3 -e -p $pass -v $K -s $K -i random.txt > test1
openssl des3 -e -pass pass:$pass -iv $K -S $K -in random.txt > test2
echo "./ft_ssl des3 -d -p $pass -v $K -i test1 | xxd > test3"
./ft_ssl des3 -d -p $pass -v $K -i test1 | xxd > test3
openssl des3 -d -pass pass:$pass -iv $K -in test2 | xxd > test4
diff test3 test4 > diff.txt
if [ "$(echo $?)" != "0" ]
then
exit
fi
# ./ft_ssl des3 -e -p $pass -s $K -i random.txt > test1
# openssl des3 -e -pass pass:$pass -S $K -in random.txt > test2
# diff test1 test2
# if [ "$(echo $?)" != "0" ]
# then
# exit
# fi
# ./ft_ssl des3 -e -k $K -v $K -i random.txt | xxd > test1
# openssl des3 -e -K $K -iv $K -in random.txt | xxd > test2
# diff test1 test2
# if [ "$(echo $?)" != "0" ]
# then
# exit
# fi
done
rm test1 test2 random.txt test3 test4
#openssl des-ecb -K 133457799BBCDFF1 -nosalt -in big_file_bro
#make && ./ft_ssl des -d -i test1
| true |
f5ff0b2531d27d8cb4cd066d1f69ac6d0b0e05ed | Shell | togawatakuya/bert_test | /run_test_jp.sh | UTF-8 | 733 | 2.6875 | 3 | [] | no_license | BATCH_SIZE=32
TEXT_LENGTH=256
MECAB_DICT_DIR=`mecab-config --dicdir`
MECAB_OPT=""
if [ -x ${MECAB_DICT_DIR}/mecab-ipadic-neologd ]; then
MECAB_OPT="--mecab_dict ${MECAB_DICT_DIR}/mecab-ipadic-neologd"
fi
# remove comment if require Japanese text normalization.
# DO_NORMALIZE='--normalize_text'
MODEL_DIR=./models/Japanese_L-12_H-768_A-12_E-30_BPE
CONF_FILE=${MODEL_DIR}/bert_config.json
VOCAB_FILE=${MODEL_DIR}/vocab.txt
TRAINED_MODEL=./results/masuda/net_trained_10000.pth
TEST_TSV=./data/masuda/test_3000.tsv
function run_once() {
poetry run python test.py --batch_size ${BATCH_SIZE} --text_length ${TEXT_LENGTH} ${DO_NORMALIZE} ${MECAB_OPT} ${CONF_FILE} ${TRAINED_MODEL} ${TEST_TSV} ${VOCAB_FILE}
}
run_once
| true |
c5a26c70f2dd5547b18569b6095124a6313d558a | Shell | q4Zar/cardano_token_minting_scripts | /check_balance.sh | UTF-8 | 595 | 2.671875 | 3 | [] | no_license | #!/bin/bash
set -e
###############################################################################
###############################################################################
NAME="minter"
SENDER_ADDR=$(cat ${NAME}/${NAME}_base.addr)
###############################################################################
###############################################################################
# Passive relay or Daedalus is required.
#
# Must have a live Network.Socket.connect
# get utxo
echo "Getting UTxO"
cardano-cli query utxo \
--cardano-mode \
--mainnet \
--address ${SENDER_ADDR} | true |
81ee20fd5ca0abadae1f5b0d73a5842a14a918c7 | Shell | miki725/.dotfiles | /.bin/generate_manpath.sh | UTF-8 | 1,078 | 3.6875 | 4 | [] | no_license | #!/bin/sh
function generate_manpath_for_root {
root=$1
[ -d $root ] || return
for i in $(
find -L /usr/local/opt -name '*man*' -type d \
| grep -vE 'man[0-9]' \
| grep -vi command \
| grep -vi resources \
| grep -v '/system/' \
| grep -v '/include' \
| grep -v '/src/' \
| grep -v '/node_modules/' \
| grep -v '/site-packages/'
); do
if ls $i | grep -E 'man[0-9]' > /dev/null; then
echo $i
fi
done \
| sort
}
function generate_from_mandb {
[ -f /etc/man_db.conf ] || return
for i in $(
cat /etc/man_db.conf \
| grep -E '[a-zA-Z0-9\/]+/man\b' -o \
| sort -u
); do
[ -d $i ] || continue
if ls $i | grep -E 'man[0-9]' > /dev/null 2> /dev/null; then
echo $i
fi
done
}
function generate_manpath {
generate_manpath_for_root /usr/local/opt
generate_manpath_for_root /opt/homebrew/opt
generate_from_mandb
}
generate_manpath
| true |
007c3512bfc81d1f583f511f942f7c04b3860cf2 | Shell | altergui/wbm-configs | /wbm-5691/overlay/etc/wbm/protos/batadv.sh | UTF-8 | 1,261 | 3.265625 | 3 | [] | no_license | #!/bin/sh
ACTION=$1
LOGICAL_INTERFACE=$2
REAL_INTERFACE=$3
IPV4=$4
IPV6=$5
ipv4_addr () {
echo ${IPV4%%/*}
}
ipv4_netmask () {
echo ${IPV4##*/}
}
clean () {
true
}
prepare () {
uci set batman-adv.bat0=mesh
uci set batman-adv.bat0.bridge_loop_avoidance=1
uci commit batman-adv
uci set network.bat0=interface
uci set network.bat0.ifname=bat0
uci set network.bat0.proto=static
uci set network.bat0.ip6addr=""
uci set network.bat0.ipaddr=""
uci set network.bat0.netmask=""
uci set network.bat0.mtu=1500
uci commit network
}
add () {
if [ "$(uci -q get network.bat0.macaddr)" == "" ] ; then
id="$(uci get system.@system[0].hostname | sed -e 's/wbm-\(..\)\(..\)/\1:\2/')"
uci set network.bat0.macaddr="02:ba:$id:00:01"
fi
if [ "$(uci -q get network.bat0.ip6addr)" == "" ] ; then
uci set network.bat0.ip6addr="$IPV6"
fi
if [ "$(uci -q get network.bat0.ipaddr)" == "" ] ; then
uci set network.bat0.ipaddr="$(ipv4_addr)"
uci set network.bat0.netmask="$(ipv4_netmask)"
fi
uci set network.${LOGICAL_INTERFACE}=interface
uci set network.${LOGICAL_INTERFACE}.proto=batadv
uci set network.${LOGICAL_INTERFACE}.mesh=bat0
uci set network.${LOGICAL_INTERFACE}.mtu=1528
uci commit network
}
$ACTION
| true |
a345f576e878a3183da42370438d443feebfcb6f | Shell | nd1511/hub | /tensorflow_hub/pip_package/build_pip_package.sh | UTF-8 | 1,832 | 3.765625 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
# Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# This script should be run from the repo root.
set -e
set -o pipefail
die() {
printf >&2 '%s\n' "$1"
exit 1
}
function main() {
if [ $# -lt 1 ] ; then
die "ERROR: no destination dir provided"
fi
DEST=$1
TMPDIR=$(mktemp -d -t --suffix _tensorflow_hub_pip_pkg)
RUNFILES="bazel-bin/tensorflow_hub/pip_package/build_pip_package.runfiles/org_tensorflow_hub"
echo $(date) : "=== Using tmpdir: ${TMPDIR}"
bazel build //tensorflow_hub/pip_package:build_pip_package
if [ ! -d bazel-bin/tensorflow_hub ]; then
echo `pwd`
die "ERROR: Could not find bazel-bin. Did you run from the build root?"
fi
cp "tensorflow_hub/pip_package/setup.py" "${TMPDIR}"
cp "tensorflow_hub/pip_package/setup.cfg" "${TMPDIR}"
cp "LICENSE" "${TMPDIR}/LICENSE.txt"
cp -R "${RUNFILES}/tensorflow_hub" "${TMPDIR}"
pushd ${TMPDIR}
rm -f MANIFEST
echo $(date) : "=== Building universal python wheel in $PWD"
python setup.py bdist_wheel --universal >/dev/null
mkdir -p ${DEST}
cp dist/* ${DEST}
popd
rm -rf ${TMPDIR}
echo $(date) : "=== Output wheel files are in: ${DEST}"
}
main "$@"
| true |
5122bbe4bb1e1e4ba73f03d06e60361212dc7415 | Shell | komokun/xrplkd | /install/install_xrplkd_service.sh | UTF-8 | 293 | 2.609375 | 3 | [] | no_license | #!/bin/sh
sed -e "s;%WORKING_DIR%;$1;g" -e "s;%YARN%;$2;g" -e "s;%DIR%;$3;g" service.template > xrplkd.service
cd $1
sudo $2 build
cd $1/install
sudo cp xrplkd.service /etc/systemd/system
sudo rm xrplkd.service
sudo systemctl enable xrplkd.service
sudo systemctl start xrplkd.service | true |
56e01ec4a3636cb368d81dfb84e09fa55a7cb7de | Shell | onap/aai-graphadmin | /src/main/scripts/extract-events.sh | UTF-8 | 413 | 3.515625 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
usage(){
echo "Usage $0 input-file output-file event-type";
}
if [ "${#}" -lt 3 ]; then
usage;
exit -1
fi;
input_file=$1
output_file=$2
event_type=$3
grep "|${event_type}|" ${input_file} > ${output_file}.1
sed -i -e '/InvokeReturn/s/^.*$//g' ${output_file}.1
sed -i '/^$/d' ${output_file}.1
cat ${output_file}.1 | awk -F '|' '{print $29}' > ${output_file}
rm ${output_file}.1
exit 0
| true |
4c4111b978bf94c0f6a355cc05b5d9b2ee3c9393 | Shell | aikomastboom/Prototyper | /bin/coverit | UTF-8 | 579 | 3.171875 | 3 | [] | no_license | #!/usr/bin/env bash
current_dir=$(cd $(dirname $0) && pwd)
base_dir=${current_dir}/..
cd ${base_dir}
if [ ! -x ./opt/node-jscoverage/jscoverage ]; then
mkdir -p ./opt
cd ./opt
git clone git://github.com/visionmedia/node-jscoverage.git
cd node-jscoverage/
./configure && make
cd ${base_dir}
fi
if [ ! -x ./node_modules/.bin/mocha ]; then
npm install mocha
fi
./opt/node-jscoverage/jscoverage lib lib-cov
PROTOTYPER_COV=1 ./node_modules/.bin/mocha -R html-cov > ./doc/coverage.html
rm -rf lib-cov
echo "coverage.html generated in doc/coverage.html"
| true |
7db7ffd761b1be5d22e712042686a17c6b8aaafe | Shell | ryo853fj/etrobo | /scripts/console | UTF-8 | 1,199 | 3.84375 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# console pipe, input from stdin and output to both console and stdout
# console
# Author: jtFuruhata
# Copyright (c) 2020 ETロボコン実行委員会, Released under the MIT license
# See LICENSE
#
# `console` is no longer needed, use `tee`.
# see `athrill_runner`
#
if [ "$1" == "usage" ] || [ "$1" == "--help" ]; then
echo "usage: console [pipe] [echo] [<message>]"
echo
echo "Output <message> to both console and stdout"
echo "On pipe mode, input from stdin and ignore <message>"
echo "On echo mode, output to console only"
exit 0
fi
# mode selection
echo_on="echo"
stdout_on="out"
unset pipe_on
if [ "$1" == "pipe" ]; then
unset pipe_mode
pipe_on="pipe"
shift
fi
if [ "$1" == "echo" ]; then
pipe_mode="echo"
unset stdout_on
shift
fi
# pipe mode
if [ "$pipe_on" ]; then
if [ -p /dev/stdin ]; then
while read line; do
console $pipe_mode "$line"
done
exit 0
else
exit 1
fi
fi
# output for console
if [ "$echo_on" ]; then
echo "$@" > "/dev/`ps aux | grep 'ps aux' | tail -n 1 | awk '{print $7}'`"
fi
# output for stdout
if [ "$stdout_on" ]; then
echo "$@"
fi
| true |
daccc15788f2554ff8344914d7322d2c4fd0fe4a | Shell | isabella232/docker-from-scratch | /scripts/build | UTF-8 | 808 | 3.25 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -ex
cd $(dirname $0)/..
arch="$1"
rm -rf build
mkdir -p build
cp /etc/ssl/certs/ca-certificates.crt ./build/
cp ./assets/base-files_${arch}.tar.gz ./build/base-files.tar.gz
tar -xf ./assets/docker-$(echo $DOCKER_VERSION | sed -e 's/^v//')_${arch}.tgz -C ./build --strip-components=1
strip_bin=$(which strip)
if [ "${arch}" == "arm" ]; then
export GOARM=6
export CGO_ENABLED=1
export CC=/usr/bin/arm-linux-gnueabihf-gcc
strip_bin=/usr/bin/arm-linux-gnueabihf-strip
fi
if [ "${arch}" == "arm64" ]; then
export GOARM=
export CGO_ENABLED=1
export CC=/usr/bin/aarch64-linux-gnu-gcc
strip_bin=/usr/bin/aarch64-linux-gnu-strip
fi
GOARCH=${arch} go build -ldflags "-linkmode external -extldflags -static" -o build/dockerlaunch ./main
${strip_bin} --strip-all build/dockerlaunch
| true |
c7c5dab79faeb391b1a3f0b44eccc6d5ce33d31c | Shell | laradevio/Shipyard | /shipyard.sh | UTF-8 | 6,863 | 4.375 | 4 | [
"MIT"
] | permissive | #! /bin/bash
# This is the main Shipyard entrypoint for using it. The purpose
# of this file is to ask the user some questions to properly
# configurate Laravel Shipyard for using inside the OS.
# Laravel Shipyard Initialization Script
# Version 1.0.20170901A
printf "
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Welcome to Laravel Shipyard!
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
We are gonna ask some questions to properly setup Laravel Shipyard
in your system. It won't take more than 1 minute. Let's start!
~~~~~~~~~
"
#
# Environment File Detection
#
if [ -f ./.env ]; then
printf "
(( Caution! )))
~~~~~~~~~~~~~~~
We found a '.env' file present. We will edit this file.
If you are not sure, remove it (or backup it) and
run this script again.
~~~~~~~~~~~~~~~
(( Caution! )))
\n"
read -rsp $'Press any key to continue...\n' -n1 key
else
cp ./.env.example ./.env
fi
#
# Docker Detection
#
if ! command -v docker-compose >/dev/null 2>&1 ; then
printf "
((( Caution! )))
~~~~~~~~~~~~~~~
It seems that Docker is not properly installed or present.
Please go to https://www.docker.com and install Docker
to use Laravel Shipyard after the script finishes.
~~~~~~~~~~~~~~~
((( Caution! )))
\n"
read -rsp $'Press any key to continue...\n' -n1 key
fi
#
# OS Detection
#
os=unknown
if [[ "$OSTYPE" == "linux-gnu" ]]; then
os='Linux'
elif [[ "$OSTYPE" == "darwin"* ]]; then
os='MacOS'
elif [[ "$OSTYPE" == "cygwin" ]]; then
os='Windows'
elif [[ "$OSTYPE" == "msys" ]]; then
os='Windows'
fi
os_selected=true
printf "
~~~~~~~~~
It seems we are under '$os'. Is that correct?
"
select yn in "Yes" "No"; do
case $yn in
Yes ) printf "\nGood!\n" ; break;;
No ) os_selected=false; break;;
esac
done
if [[ "$os_selected" == false ]]; then
printf "\n~~~~~~~~~\n What OS do you plan to use Laravel Shipyard? \n"
select wml in "Windows" "MacOS" "Linux"; do
case $wml in
Windows ) os='Windows'; break;;
MacOS ) os='MacOS'; break;;
Linux ) os='Linux'; break;;
esac
done
fi
#
# OS Fixes -- MacOS
#
if [[ "$os" == "MacOS" ]]; then
printf "\n* Adding :delegated fix.\n"
sed -i "s/^DELEGATED_MOUNT=.*/DELEGATED_MOUNT=:delegated/" .env
fi
#
# OS Fixes -- Windows
#
if [[ "$os" == "Windows" ]]; then
printf "\n* Adding volume fix for MariaDB, Beanstalkd & PostgreSQL.\n"
sed -i "s/^DATA_VOLUME_TYPE=.*/DATA_VOLUME_TYPE=volume/" .env
sed -i "s/^DATA_SOURCE_STRING=.*/DATA_SOURCE_STRING=data_/" .env
printf "\n* Adding 'winbackup' container\n"
bash ./.commands/shipyard/winbackup.sh
fi
#
# Locate the Project Path
#
project_path=false
printf "
~~~~~~~~~
What is your Laravel project path? (relative to Shipyard, or absolute)
If the directory doesn't exist, we will create it for you.
"
select lp in "Next To Shipyard (../laravel)" "Other"; do
case $lp in
"Next To Shipyard (../laravel)" ) project_path='../laravel'; break;;
"Other" ) project_path='custom'; break;;
esac
done
if [[ "$project_path" == 'custom' ]]; then
printf "\n~~~~~~~~~\n Enter the path of your Laravel project: \n"
if [[ "$os" == "Windows" ]]; then
printf "\n Use '/c/Users/MyUser/Laravel' under Windows to reference partitions \n"
printf "\n Use '../MyOtherLaravelApp' to reference Shipyard relative directory \n"
fi
read project_path
fi
printf "
Your Laravel project path will be: $project_path
"
if [ ! -d project_path ]; then
mkdir -p project_path
fi
if [ -z "$(ls -A $project_path)" ]; then
printf "/n Use './warehouse laravel new' to create your project."
fi
project_path_sed_temp=${project_path//\./\\\.}
project_path_sed=${project_path_sed_temp//\//\\\/}
sed -i "s/APP_PATH=.*/APP_PATH=$project_path_sed/" .env
#
# Custom Server Name
#
server_name=false
cert_domain=false
printf "
~~~~~~~~~
Do you want to access using a custom server name?
"
select nyy in "No, just use 'localhost'" "Yes, use '*.shipyard.test' subdomain" "Yes, use '{myservername}.test' domain"; do
case $nyy in
"No, just use 'localhost'" )
server_name='localhost'
sed -i "s/^SERVER_NAME=.*/SERVER_NAME=$server_name/" .env
cert_domain=false
full_server_name='https://localhost'
break;;
"Yes, use '*.shipyard.test' subdomain" )
read -p "Enter your subdomain name (.shipyard.test): " server_name
sed -i "s/^SERVER_NAME=.*/SERVER_NAME=$server_name\.shipyard\.test/" .env
full_server_name="https://$server_name.shipyard.test"
break;;
"Yes, use '{myservername}.test' domain" )
read -p "Enter your 'domain name' (.test): " server_name
sed -i "s/^SERVER_NAME=.*/SERVER_NAME=$server_name\.test/" .env
sed -i "/^DNS\.6.*/,/^/d" ./.secrets/openssl-server.conf
sed -i "/^DNS\.7.*/,/^/d" ./.secrets/openssl-server.conf
printf "\nDNS.6 = $server_name.test" >> .secrets/openssl-server.conf
printf "\nDNS.7 = *.$server_name.test" >> .secrets/openssl-server.conf
printf "\nAdded '$server_name.test' to your OpenSSL Certificate config file \n"
full_server_name="https://$server_name.test"
break;;
esac
done
printf "
Your Application will be accesible at $full_server_name
"
#
# Certificates Search
#
printf "
~~~~~~~~~
We are gonna create random and performance-lean 2048-bit
OpenSSL certificates for development under HTTPS. Use
them only for development, as they are insecure!
\n"
read -rsp $'\nPress any key to continue...\n\n' -n1 key
if [[ -f ./.secrets/ssl/certs/shipyard-ca-cert.pem || -f ./.secrets/ssl/certs/shipyard-server-cert.pem ]]; then
printf "\n
(( Caution! )))
~~~~~~~~~~~~~~~
We found some CA Certificates. Do you want to overwrite them?
Select 'No' if you are unsure. You can use OpenSSL to check
them and execute '.commands/shipyard/newssl.sh' later to
replace them.
(( Caution! )))
~~~~~~~~~~~~~~~
\n"
select yn in "Yes" "No"; do
case $yn in
Yes ) cert_create=true; break;;
No ) cert_create=false; break;;
esac
done
else
cert_create=true
fi
if [[ "$cert_create" == true ]]; then
if [[ ! -d .secrets/ssl/ || ! -d .secrets/ssl/certs ]]; then
mkdir -p .secrets/ssl/certs
fi
if [ ! -f .secrets/ssl/newssl.log ]; then
touch .secrets/ssl/newssl.log
fi
if [ ! -f .secrets/ssl/dhparam.pem ]; then
printf "##########################################\n$(date -u)\n" >> .secrets/ssl/newssl.log 2>&1
openssl dhparam -dsaparam -out .secrets/ssl/dhparam.pem 2048 >> .secrets/ssl/newssl.log 2>&1
printf "##########################################\n\n\n" >> .secrets/ssl/newssl.log 2>&1
fi
bash ./.commands/shipyard/newssl.sh
fi
#
# End
#
printf "
~~~
That's all folks!
- Install the CA Certificate '.secrets/ssl/certs/shipyard-ca-cert.pem'
- Develop your application under '$project_path'
- Access with your browser at '$full_server_name'
Go and have some fun ;)
~~~
"
exit
| true |
eab5c06c963a73c21030a7ef8c2bb1208ea82ecf | Shell | delkyd/alfheim_linux-PKGBUILDS | /quake-par/PKGBUILD | UTF-8 | 837 | 2.875 | 3 | [] | no_license | # Maintainer: Slash <demodevil5[at]yahoo[dot]com>
pkgname=quake-par
pkgver=0.03.01
pkgrel=2
pkgdesc="Quake PAK archiving utility."
url="http://ftp.vim.org/ibiblio/games/quake/"
license=('GPL')
arch=('i686' 'x86_64')
depends=('glibc')
source=("http://ftp.vim.org/ibiblio/games/quake/par-${pkgver}.tar.gz")
sha256sums=('e68a3b78586cd762b29f5d5edca8ea9f263fa100423f4d4fee36c7e0a5061122')
build() {
cd "${srcdir}/par-${pkgver}"
./configure
make
}
package() {
cd "${srcdir}/par-${pkgver}"
# Modify Prefix Directory in Makefile
/bin/sed -i "s:/usr/local:${pkgdir}/usr:" Makefile.Linux
# Modify man page Directory in Makefile
/bin/sed -i "s:\${prefix}/man:${pkgdir}/usr/share/man:" Makefile.Linux
# Create Destination Directories
install -d "${pkgdir}"/usr/{bin,share/man/man1}
make install
}
| true |
a10ef6b4e53b636ad4e92353921ce849532a96f2 | Shell | rafaelameijeiras/PandoraFMS | /pandorafms_community/pandorafms/sources/run_pandora.sh | UTF-8 | 5,504 | 3.734375 | 4 | [] | no_license | #!/bin/bash
#
# Prepares environment and launchs Pandora FMS
#
# Global vars
#
PANDORA_CONSOLE=/var/www/html/pandora_console
PANDORA_SERVER_CONF=/etc/pandora/pandora_server.conf
PANDORA_SERVER_BIN=/usr/bin/pandora_server
PANDORA_HA_BIN=/usr/bin/pandora_ha
PANDORA_TABLES_MIN=160
#
# Check database
#
function db_check {
# Check DB
echo -n ">> Checking dbengine connection: "
for i in `seq $RETRIES`; do
r=`echo 'select 1' | mysql -u$DBUSER -p$DBPASS -P$DBPORT -h$DBHOST -A`
if [ $? -ne 0 ]; then
echo -n "retriying DB conection in $SLEEP seconds: "
sleep $SLEEP
else
break
fi
done
r=`echo 'select 1' | mysql -u$DBUSER -p$DBPASS -P$DBPORT -h$DBHOST -A`
if [ $? -eq 0 ]; then
echo "OK"
echo -n ">> Checking database connection: "
r=`echo 'select 1' | mysql -u$DBUSER -p$DBPASS -P$DBPORT -h$DBHOST -A $DBNAME`
if [ $? -eq 0 ]; then
echo "OK"
return 0
fi
echo -n ">> Cannot connect to $DBNAME, trying to create: "
r=`echo "create database $DBNAME" | mysql -u$DBUSER -p$DBPASS -P$DBPORT -h$DBHOST`
if [ $? -eq 0 ]; then
echo "OK"
return 0
fi
echo "Cannot create database $DBNAME on $DBUSER@$DBHOST:$DBPORT"
return 1
fi
if [ "$DEBUG" == "1" ]; then
echo "Command: [echo 'select 1' | mysql -u$DBUSER -p$DBPASS -P$DBPORT -h$DBHOST -A $DBNAME]"
echo "Output: [$r]"
traceroute $DBHOST
nmap $DBHOST -v -v -p $DBPORT
fi
return 1
}
# Load database
#
function db_load {
# Load DB
echo -n ">> Checking database state:"
r=`mysql -u$DBUSER -p$DBPASS -P$DBPORT -h$DBHOST -A $DBNAME -s -e 'show tables'| wc -l`
if [ "$DEBUG" == "1" ]; then
echo "Command: [mysql -u$DBUSER -p$DBPASS -P$DBPORT -h$DBHOST -A $DBNAME -s -e 'show tables'| wc -l]"
echo "Output: [$r]"
fi
if [ "$r" -ge "$PANDORA_TABLES_MIN" ]; then
echo 'OK. Already exists, '$r' tables detected'
return 0
fi
echo 'Empty database detected';
# Needs to be loaded.
echo -n "- Loading database schema: "
r=`mysql -u$DBUSER -p$DBPASS -P$DBPORT -h$DBHOST $DBNAME < $PANDORA_CONSOLE/pandoradb.sql`
if [ $? -ne 0 ]; then
echo "mysql -u$DBUSER -p$DBPASS -P$DBPORT -h$DBHOST $DBNAME < $PANDORA_CONSOLE/pandoradb.sql"
echo "ERROR"
echo "$r"
return 1;
fi
echo "OK"
echo -n "- Loading database data: "
r=`mysql -u$DBUSER -p$DBPASS -P$DBPORT -h$DBHOST $DBNAME < $PANDORA_CONSOLE/pandoradb_data.sql`
if [ $? -ne 0 ]; then
echo "ERROR"
echo $r
return 2;
fi
echo "OK"
# Loaded.
return 0
}
#
# Prepare & start Pandora FMS Console
#
function console_prepare {
CONSOLE_PATH=/var/www/html/pandora_console
echo ">> Preparing console"
# Delete install and license files.
rm -f $CONSOLE_PATH/install.php $CONSOLE_PATH/install.done
# Configure console.
cat > $CONSOLE_PATH/include/config.php << EO_CONFIG_F
<?php
\$config["dbtype"] = "mysql";
\$config["dbname"]="$DBNAME";
\$config["dbuser"]="$DBUSER";
\$config["dbpass"]="$DBPASS";
\$config["dbhost"]="$DBHOST";
\$config["homedir"]="/var/www/html/pandora_console";
\$config["homeurl"]="/pandora_console";
error_reporting(0);
\$ownDir = dirname(__FILE__) . '/';
include (\$ownDir . "config_process.php");
EO_CONFIG_F
echo "- Fixing permissions"
chmod 600 $CONSOLE_PATH/include/config.php
chown apache. $CONSOLE_PATH/include/config.php
# prepare php.ini
sed -i -e "s/^max_input_time.*/max_input_time = -1/g" /etc/php.ini
sed -i -e "s/^max_execution_time.*/max_execution_time = 0/g" /etc/php.ini
sed -i -e "s/^upload_max_filesize.*/upload_max_filesize = 800M/g" /etc/php.ini
sed -i -e "s/^memory_limit.*/memory_limit = 500M/g" /etc/php.ini
# Start httpd
echo "- Starting apache"
/tmp/run-httpd.sh &
}
# Prepare server configuration
function server_prepare {
sed -i -e "s/^dbhost.*/dbhost $DBHOST/g" $PANDORA_SERVER_CONF
sed -i -e "s/^dbname.*/dbname $DBNAME/g" $PANDORA_SERVER_CONF
sed -i -e "s/^dbuser.*/dbuser $DBUSER/g" $PANDORA_SERVER_CONF
sed -i -e "s|^dbpass.*|dbpass $DBPASS|g" $PANDORA_SERVER_CONF
sed -i -e "s/^dbport.*/dbport $DBPORT/g" $PANDORA_SERVER_CONF
sed -i -e "s/^#servername.*/servername $INSTANCE_NAME/g" $PANDORA_SERVER_CONF
echo "pandora_service_cmd /etc/init.d/pandora_server" >> $PANDORA_SERVER_CONF
}
# Run Pandora server
#
function server_run {
# Tail extra logs
sleep 5 && tail -F /var/log/pandora/pandora_server.{error,log} /var/www/html/pandora_console/pandora_console.log /var/log/httpd/error_log &
# Launch pandora_server
$PANDORA_SERVER_BIN $PANDORA_SERVER_CONF
}
## MAIN
#
if [ "$DBUSER" == "" ] || [ "$DBPASS" == "" ] || [ "$DBNAME" == "" ] || [ "$DBHOST" == "" ]; then
echo "Required environemntal variables DBUSER, DBPASS, DBNAME, DBHOST"
exit 1
fi
if [ "$DBPORT" == "" ]; then
DBPORT=3306
fi
# Start tentacle
echo -n ">> Starting tentacle: "
if [ `/etc/init.d/tentacle_serverd restart | grep "is now running with PID" | wc -l` -ne 1 ]; then
echo "ERROR"
exit 1
fi
echo "OK"
# Check and prepare
db_check && db_load && console_prepare
# Enable discovery
echo ">> Enable discovery cron: "
while true ; do wget -q -O - --no-check-certificate http://localhost/pandora_console/enterprise/cron.php >> /var/www/html/pandora_console/pandora_console.log && sleep 60 ; done &
# Enable cron
echo ">> Enable pandora_db cron: "
/usr/share/pandora_server/util/pandora_db.pl /etc/pandora/pandora_server.conf
while true ; do sleep 1h && /usr/share/pandora_server/util/pandora_db.pl /etc/pandora/pandora_server.conf; done &
# Check and launch server
echo ">> Starting server: " Check and launch server
server_prepare && server_run
| true |
e9e67b5bba95ff374c2ec2c7f5491672dcf67cea | Shell | megacoder/howto | /howtos/howto-vim | UTF-8 | 1,102 | 3.15625 | 3 | [] | no_license | #!/bin/zsh
# This script gets run in the top-level directory and invokes
# my "configure" script there; the "-C src" switch makes the
# script run in the "src" subdirector.
RPMS=(
libXt-devel
)
NORPM=
for RPM in ${RPMS}; do
if rpm -q ${RPM} >/dev/null 2>&1; then
# OK
else
NORPM+=" ${RPM}"
echo "${RPM} not found." >&2
fi
done
[[ ! -z "${NORPM}" ]] && exit 1
if [[ -x /bin/pump ]]; then
eval $(/bin/pump --startup)
ZSHEXIT() {
/bin/pump --shutdown
}
fi
#
configure -C src -f -m -n vim \
-- \
--with-features=huge \
\
--disable-netbeans \
--disable-nls \
--enable-cscope \
--enable-fontset \
--enable-gui \
--enable-luainterp=yes \
--enable-python3interp=yes \
--enable-pythoninterp=yes \
--enable-tclinterp \
--enable-rubyinterp=yes \
\
"$@" \
2>&1 |
tee howto.log
exit $?
# QUARRANTEENED
--enable-perlinterp=dynamic \
--with-tlib=ncurses \
# UNUSED
--disable-nextaw-check \
--enable-gui=athena \
--enable-gui=gnome \
--enable-gui \
| true |
b860a1fb6daa3d4bc2497833c201363b99551072 | Shell | lkochniss/intent-backend | /travis/before-install.sh | UTF-8 | 201 | 2.84375 | 3 | [] | no_license | #!/bin/bash -e
pushd "$(dirname $0)" > /dev/null
cd ..
rm composer.lock
composer self-update -q
if [ -n "GITHUB" ] ; then
composer config github-oauth.github.com ${GITHUB};
else
exit 1;
fi
| true |
53332033724f6993e25711318932b52c89b8a54d | Shell | CARLA-TEAM/uwa-simulation | /scripts/run-ros.sh | UTF-8 | 1,295 | 3 | 3 | [] | no_license | #!/bin/bash
# ---------------------------------------------------------------------
# SCRIPT TO RUN CARLA WITH ROS
#
# Troubleshooting:
# Could not contact ROS master at http://localhost:XXXX
# Run the next command on the terminal: roscore
# -----------------------------------------------------------------------
cd ~
cd /home/uwarobotics/Documents/Project/CARLA-package
echo "Opening CARLA"
./CarlaUE4.sh </dev/null &>/dev/null &
# Stores the process ID
pid=$!
# Displays the process ID in the console
echo "CARLA Process ${pid}"
# Waits 10 seconds to open Carla
sleep 10
# -----------------------------------------------------------------------
# Run ROS
echo "Opening ROS"
export PYTHONPATH=$PYTHONPATH:/Simulation/Gerardo/carla/PythonAPI/carla/dist/carla-0.9.9-py2.7-linux-x86_64.egg
cd ~
source /opt/carla-ros-bridge/melodic/setup.bash
roslaunch carla_ros_bridge carla_ros_bridge_with_example_ego_vehicle.launch </dev/null &>/dev/null &
# Stores the process ID
pid=$!
# Displays the process ID in the console
echo "ROSLAUNCH Process ${pid}"
# -----------------------------------------------------------------------
# Run RVIZ
echo "Opening RVIZ"
rviz </dev/null &>/dev/null &
# Stores the process ID
pid=$!
# Displays the process ID in the console
echo "ROSLAUNCH Process ${pid}" | true |
21752367e295544ff59551803762caa78235810f | Shell | jianxiamage/Proj_TestResults | /TestResult_Project_ver0.3/GetResultsXls/Make_Result_Ini_Select.sh | UTF-8 | 2,156 | 3.8125 | 4 | [] | no_license | #!/bin/bash
#set -e
#功能:
#多线程执行程序,可以设置测试节点的每个测试用例的测试结果(标记:初值->1,成功->0)
#最终目的是便于之后为在前端展示测试用例的执行结果而做的结果文件
if [ $# -ne 2 ];then
echo "usage: $0 TestType Platform"
exit 1
fi
TestType="$1"
Platform="$2"
#----------------------------------------------------------------------------------------
#TestType="OS"
#Platform="7A_Integrated"
#----------------------------------------------------------------------------------------
ResultPath='/data'
destPath="${ResultPath}/${TestType}/${Platform}"
#----------------------------------------------------------------------------------------
outputDir=TestMark_MakeIni
outputFile="${outputDir}/TestMark_${TestType}_${Platform}.txt"
ScoreListFile="ScoreCaseList/ScoreCaseList_${TestType}.txt"
#----------------------------------------------------------------------------------------
okfile='ok_file.txt'
errfile='err_file.txt'
#----------------------------------------------------------------------------------------
if [ ! -s $ScoreListFile ]
then
echo "File $ScoreListFile is not Existed!"
exit 1
fi
mkdir $outputDir -p
rm -rf $outputFile
start_time=`date +%s` #定义脚本运行的开始时间
:> ${okfile}
:> ${errfile}
echo "Making the Results ini files Begin..."
echo "***************************************************"
rm -rf $outputFile
case_count=0
for line in `cat ${ScoreListFile}`
do
{
case_name=$line
echo "当前测试用例:$case_name" |tee -a ${outputFile}
sh make_Ini_Points_all.sh $TestType $Platform "${case_name}"
if [ $? -eq 0 ];
then
echo "TestType:[${TestType}] Platform:[${Platform}] TestCase:[${case_name}] setting result file success!" >> $okfile
else
echo "TestType:[${TestType}] Platform:[${Platform}] TestCase:[${case_name}] setting result file failed!" >> $errfile
fi
}
done
echo "***************************************************"
stop_time=`date +%s` #定义脚本运行的结束时间
echo "Exec Time:`expr $stop_time - $start_time`s"
pwd
echo "Making the Results ini files success."
| true |
d4d151c0bd58f88540a2c8d11d914440b4eb2a83 | Shell | webee/rc | /env/pyenvrun.sh | UTF-8 | 1,165 | 3.8125 | 4 | [] | no_license | #!/bin/bash
#require: virtualenv, virtualenvwrapper
#set -x
#set -e
function set_py_home() {
# if already set.
if [ "$PY_HOME" != "" ]; then
return
fi
# find from global python profile.
if [ -f /etc/profile.d/python.sh ]; then
source /etc/profile.d/python.sh
if [ "$PY_HOME" != "" ]; then
return
fi
fi
# suppose this script is together with python
if [ -x "$(dirname $0)/python" ]; then
export PY_HOME=$(dirname $0)
return
fi
# suppose current python is right.
if hash python >/dev/null 2>&1; then
if [ -f "$(dirname $(which python))/virtualenvwraper.sh" ]; then
export PY_HOME=$(dirname $(which python))
return
fi
fi
echo "can't set PY_HOME" 1>&2
exit 123
}
set_py_home
export VIRTUALENVWRAPPER_PYTHON=${PY_HOME}/python
source ${PY_HOME}/virtualenvwrapper.sh
if [ "$1" != "" ] && ! [[ "$1" == -* ]]; then
if workon $1 >/dev/null 2>&1; then
shift
fi
fi
if [ "$VIRTUAL_ENV" != "" ]; then
${VIRTUAL_ENV}/${VIRTUALENVWRAPPER_ENV_BIN_DIR}/python "$@"
else
${PY_HOME}/python "$@"
fi
| true |
0cababbbf8aaf785443a5c76cd72259902ea6abd | Shell | gvalkov/dotfiles | /zsh/install.sh | UTF-8 | 153 | 2.828125 | 3 | [] | no_license | #!/bin/sh
set -x
cd $(dirname $0)
c () { readlink -f $1 ; }
ln -snvf `c zshrc` ~/.zshrc
ln -snvf `c zshenv` ~/.zshenv
ln -snvf `c zfunc` ~/.zfunc
| true |
5449c5cd07c045bdb24645dba96a27594104538b | Shell | jasonfsmitty/utilities | /scripts/common.sh | UTF-8 | 4,965 | 4.21875 | 4 | [] | no_license | #!/bin/bash
#---------------------------------------------------------------------
# Options for common functions; can modify directly or use the
# appropriate set_*() helper.
OPT_COMMON_VERBOSE=${OPT_COMMON_VERBOSE:-false}
OPT_COMMON_DRY_RUN=${OPT_COMMON_DRY_RUN:-false}
#---------------------------------------------------------------------
# https://unix.stackexchange.com/questions/9957/how-to-check-if-bash-can-print-colors
if test -t 1
then
# https://stackoverflow.com/questions/5947742/how-to-change-the-output-color-of-echo-in-linux
# Reset
NO_COLOR='\033[0m' # Text Reset
NC=${NO_COLOR}
# Regular Colors
BLACK='\033[0;30m' # Black
RED='\033[0;31m' # Red
GREEN='\033[0;32m' # Green
YELLOW='\033[0;33m' # Yellow
BLUE='\033[0;34m' # Blue
PURPLE='\033[0;35m' # Purple
CYAN='\033[0;36m' # Cyan
WHITE='\033[0;37m' # White
# Bold
BBLACK='\033[1;30m' # Black
BRED='\033[1;31m' # Red
BGREEN='\033[1;32m' # Green
BYELLOW='\033[1;33m' # Yellow
BBLUE='\033[1;34m' # Blue
BPURPLE='\033[1;35m' # Purple
BCYAN='\033[1;36m' # Cyan
BWHITE='\033[1;37m' # White
fi
#---------------------------------------------------------------------
function parse_bool()
{
case "${1}" in
true|yes|1)
return 0
;;
false|no|0)
return 1
;;
esac
return 2
}
#---------------------------------------------------------------------
function to_bool()
{
parse_bool "${1}" && echo "true" || echo "false"
}
#---------------------------------------------------------------------
function set_verbose()
{
OPT_COMMON_VERBOSE="$(to_bool $1)"
}
#---------------------------------------------------------------------
function is_verbose()
{
${OPT_COMMON_VERBOSE}
}
#---------------------------------------------------------------------
function set_dry_run()
{
OPT_COMMON_DRY_RUN="$(to_bool $1)"
}
#---------------------------------------------------------------------
function is_dry_run()
{
${OPT_COMMON_DRY_RUN}
}
#---------------------------------------------------------------------
function run()
{
if ${OPT_COMMON_DRY_RUN}
then
info "SKIP: ${@}"
else
debug "Running '${@}'"
"${@}"
fi
}
#---------------------------------------------------------------------
function quietly()
{
local command="${1}"
shift
if is_verbose
then
run "${command}" "${@}"
else
run "${command}" "${@}" >/dev/null
fi
}
#---------------------------------------------------------------------
# Display an error, but do not exit; this is for use in cases
# where the caller wants to take some action (such as printing
# usage info) between the error message and exiting.
function error_msg()
{
echo -e "- ${RED}ERROR${NC}: ${@}" 1>&2
}
# Display a fatal error, then exit immediately
function error()
{
error_msg "${@}"
exit 1
}
#---------------------------------------------------------------------
# Display a warning, then continue on
function warn()
{
echo -e "- ${YELLOW}WARN ${NC}: ${@}" 1>&2
}
#---------------------------------------------------------------------
function info()
{
echo "+ ${@}"
}
#---------------------------------------------------------------------
function debug()
{
echo "+ ${@}" 1>&2
}
#---------------------------------------------------------------------
function banner()
{
echo "+-----------------------------------------------------------------------"
echo "+ ${@}"
echo "+-----------------------------------------------------------------------"
}
#---------------------------------------------------------------------
function maybe_banner()
{
if is_verbose
then
banner "${@}"
else
info "${@}"
fi
}
#---------------------------------------------------------------------
# Add a prefix to every line piped via stdin;
# Example:
# cat <somefile> | prefix "contents: "
function prefix()
{
while IFS= read -r line; do echo -e "${1}${line}"; done
}
#---------------------------------------------------------------------
# Convienence wrapper around prefix to indent every line 4 spaces
function indent()
{
prefix " "
}
#---------------------------------------------------------------------
# Spinning bar, use Ctrl-C to quit spinning and return to the calling code
function spinner()
{
(
trap 'exit 0' SIGINT
i=1
sp="/-\|"
echo -n ' '
while true
do
printf "\b${sp:i++%${#sp}:1}"
sleep 0.5
done
)
echo ""
return 0
}
#---------------------------------------------------------------------
function fileExists()
{
[[ -f "${1}" ]]
}
#---------------------------------------------------------------------
function fileMissing()
{
[[ ! -f "${1}" ]]
}
#---------------------------------------------------------------------
# Usage:
# join '<delim>' <args>
# Example:
# a=( 1 2 3 )
# join , ${a[@]} => 1,2,3
function join()
{
local IFS="${1}"
shift
echo "${*}"
}
#---------------------------------------------------------------------
| true |
5ef28e5ec50ef6b08d758ae21e875f63de8fa2e2 | Shell | MASILab/N-GSBSS | /scripts/examples/freesurfer/runSmooth_Freesurfer.sh | UTF-8 | 572 | 2.96875 | 3 | [] | no_license | #!/bin/bash
input=$1
basedir=$2
outdir=$3
lh_tgtSurface=lh_tgtSurface.res.vtk
rh_tgtSurface=rh_tgtSurface.res.vtk
mkdir $outdir
echo $DATE_WITH_TIME
# Geodescic distance ---
DATE_WITH_TIME=`date "+%Y-%m-%d-%H:%M:%S"`
echo "$DATE_WITH_TIME:Starting 2mm smoothing"
GeodesicSmoothing -i $lh_tgtSurface -p $basedir/lh_${input}_prj_res.prj.txt --size 2 -o $outdir/lh_${input}_odi_prj_res_2mm.txt --writevtk --saven
GeodesicSmoothing -i $rh_tgtSurface -p $basedir/rh_${input}_prj_res.prj.txt --size 2 -o $outdir/rh_${input}_odi_prj_res_2mm.txt --writevtk --saven
| true |
7d5b55fbfbb2a3d24a7a2ae73cf39ae5207e1a98 | Shell | internetwache/Internetwache-CTF-2016 | /tasks/web90/code/cleanpdfdir.sh | UTF-8 | 114 | 3.15625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
DIR="./pdf/"
SIZE=$(du -sm $DIR | grep -oP "\d+")
if [[ $SIZE -gt 100 ]]; then
rm -r "$DIR*.pdf"
fi
| true |
4557fa40196508bc938ca7b4b09ec7fa9f6cc9f9 | Shell | ismo-karkkainen/datalackey | /test/error/run.sh | UTF-8 | 3,122 | 3.078125 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"UPL-1.0"
] | permissive | #!/bin/sh
if [ $# -ne 1 ]; then
echo "Usage: $(basename $0) datalackey-executable"
exit 100
fi
B=$(basename $0 .sh)
DL=$1
OUT="${B}_out.txt"
EXP="${B}_expected.txt"
cat > _script.sh << EOF
#!/bin/sh
while read L
do
echo \$L
done
EOF
chmod a+x _script.sh
(
echo '["1","run","in","JSON","stdin","program","./_script.sh"]'
nap
echo '["1","run","program","./_script.sh"]'
echo '["1","end-feed","1"]'
nap
echo '["2","run","invalid"]'
echo '["3","run","env"]'
echo '["4","run","env",1,1]'
echo '["5","run","output","str",0]'
echo '["6","run","env","invalid=name",0]'
echo '["7","run","env","name",0,"env","name",1]'
echo '["8","run","in","JSON","stdin","in","JSON","stdin"]'
echo '["9","run","in","invalid","stdin"]'
echo '["10","run","in","JSON","invalid"]'
echo '["11","run","out","invalid","stdin"]'
echo '["12","run","out","JSON","invalid"]'
echo '["13","run","notify","invalid"]'
echo '["14","run","notify","data","program","./_script.sh"]'
echo '["15","run","change-directory","/invld","program","./_script.sh"]'
echo '["16","run","output","data","label","output","data",null,"program","./_script.sh"]'
echo '["18","run","output","data","label","program","./_script.sh"]'
echo '["19","run","program","./invld"]'
echo '["20","run","out","JSON","stdout","out","JSON","stdout"]'
echo '["21","run","out","JSON","stderr","out","JSON","stderr"]'
) | $DL -m -i stdin JSON -o stdout JSON |
sed 's/invld",.*]$/invld","err"]/' | replace-pid > "$OUT"
cat > "$EXP" << EOF
["1","run","running","pid"]
[null,"process","started","1","pid"]
["1","run","error","identifier","in-use"]
["1","done",""]
["1","done",""]
set
["1","run","input","closed"]
["1","run","exit",0]
end
[null,"process","ended","1","pid"]
["1","run","finished"]
["1","done",""]
["2","error","unknown","2","run","invalid"]
["2","done",""]
["3","error","missing","3","run","env"]
["3","done",""]
["4","error","not-string","4","run","env",1]
["4","done",""]
["5","error","not-string-null","5","run","output","str",0]
["5","done",""]
["6","run","error","env","argument","invalid","invalid=name"]
["6","done",""]
["7","run","error","env","argument","duplicate","name"]
["7","done",""]
["8","run","error","in","multiple"]
["8","done",""]
["9","error","unknown","9","run","in","invalid"]
["9","done",""]
["10","error","unknown","10","run","in","JSON","invalid"]
["10","done",""]
["11","error","unknown","11","run","out","invalid"]
["11","done",""]
["12","error","unknown","12","run","out","JSON","invalid"]
["12","done",""]
["13","error","unknown","13","run","notify","invalid"]
["13","done",""]
["14","run","error","notify","no-input"]
["14","done",""]
["15","run","error","change-directory","/invld","err"]
["15","done",""]
["16","run","error","output","duplicate","data"]
["16","done",""]
["18","run","error","out","missing"]
["18","done",""]
subset
["19","run","error","program","./invld","err"]
["19","run","error","program","./invld"]
end
["19","done",""]
["20","run","error","out","duplicate","stdout"]
["20","done",""]
["21","run","error","out","duplicate","stderr"]
["21","done",""]
EOF
compare-output "$OUT" "$EXP" && rm -f "$OUT" "$EXP" _script.sh
| true |
e34163c3b68bab690586dfe86ef64ac23e560189 | Shell | sikachu/dotfiles | /aliases | UTF-8 | 5,003 | 2.8125 | 3 | [
"MIT"
] | permissive | # vim: set filetype=bash:
# aliases
alias ra="source ~/.aliases"
alias ea="mvim ~/.aliases"
# rcm
alias rcup="rcup -v"
# zsh
alias ezsh="mvim ~/.zshrc"
# General
alias agq='ag -Q'
e() {
if [[ -x "$(command -v mvim)" ]]; then
mvim $@
else
vim $@
fi
}
p() {
cd "$HOME/code/$1"
}
_projects() {
compadd `ls -1 "$HOME/code"`
}
compdef _projects p
alias vcat='vimcat'
# Foolproof
alias sl="ls"
alias -g pdos="pods"
alias gti="git"
# Git
alias g="git"
alias ga='git add'
alias gaa='git add -A .'
alias gb='git branch'
alias gba='git branch -a'
alias gc='git commit -v'
alias gfu='git commit --fixup'
alias gca='git commit -a'
alias gcam='git commit -a -m'
alias gcf='git commit --amend --verbose'
alias gcfh='git commit --amend -C HEAD'
alias gclean='git cleanup'
alias gcl='git cleanup'
alias gcm='git commit -m'
alias gco='git checkout'
compdef _git gco=git-checkout
alias gcb='git checkout -b'
alias gre='git restore'
compdef _git gre=git-restore
_git_base_branch() {
if git show-ref --verify --quiet refs/heads/main; then
echo 'main'
else
echo 'master'
fi
}
alias gcom='gco $(_git_base_branch)'
alias gd='git diff'
alias gf='git fetch'
alias gfo='git fetch origin'
alias gl='git log'
alias gm='git merge'
alias gp='git push'
alias gpo='git push origin'
alias gpom='gpo $(_git_base_branch)'
alias gpp='echo "=> Pulling changes from remote ..." && git pull --rebase && echo "=> Pushing changes to remote ..." && git push'
alias gpu='git pull'
alias gpuo='git pull origin'
alias gpur='git pull --rebase'
alias gr='git rebase'
alias gra='git rebase --abort'
alias grc='git rebase --continue'
alias gres='git restore'
alias grs='git restore --staged'
alias grm='git fetch origin && git rebase origin/$(_git_base_branch)'
alias grp='git remote prune'
alias grpo='git remote prune origin'
alias gs='git status'
alias gsu='git submodule init; git submodule update'
#git pull from
gpf() { git pull $1 $(_git_base_branch); }
gpfb() { git pull $1 $2; }
gcot() { gb --remotes --no-color | grep "^[[:space:]]\+origin\/$1" | head -n 1 | awk -F"/" '{ print $2 }' | xargs git checkout }
gff() { git remote add $1 "git://github.com/$1/${PWD##*/}.git" && git fetch $1 }
gpr() {
current_branch=`git rev-parse --abbrev-ref HEAD`
if [[ $1 != '' ]]; then
upstream="$1..."
else
upstream=''
fi
origin_username=`git remote -v | grep 'origin.\+fetch' | head -n 1 | sed "s/^.*github.com[\/:]\(.*\)\/\(.*\)\.git.*$/\1/"`
origin_repository=`git remote -v | grep 'origin.\+fetch' | head -n 1 | sed "s/^.*github.com[\/:]\(.*\)\/\(.*\)\.git.*$/\2/"`
if [[ `git remote | grep sikachu` != '' ]]; then
my_username='sikachu'
else
my_username=$origin_username
fi
open -a /Applications/Safari.app "https://github.com/$origin_username/$origin_repository/compare/$upstream$my_username:$current_branch?expand=1"
}
# Homebrew
alias bbedit="$EDITOR ~/.Brewfile && brew bundle --global"
alias bbup="brew update && brew upgrade"
alias bb="command brew bundle --global"
brew() {
if [[ $1 = 'install' ]]; then
echo "brew install disabled; Edit '~/.Brewfile' and use 'brew bundle' instead."
else
command brew $@
fi
}
# Ruby & Rails
alias be="bundle exec"
alias b="bundle"
gemcd() { cd $(bundle info --path $@) }
_gemcd() {
compadd `bundle list --name-only`
}
compdef _gemcd gemcd
rspec() {
if [ -f "bin/rspec" ]; then
bin/rspec $@
elif [ -f "Gemfile" ]; then
command bundle exec rspec $@
else
command rspec $@
fi
}
rake() {
if [ -f "bin/rake" ]; then
bin/rake $@
elif [ -f "Gemfile" ]; then
command bundle exec rake $@
else
command rake $@
fi
}
rails() {
if [ -f "bin/rails" ]; then
bin/rails $@
elif [ -f "Gemfile" ]; then
command bundle exec rails $@
else
command rails $@
fi
}
alias rc='git ls-files -m | xargs ls -1 2>/dev/null | grep '\.rb$' | xargs rubocop'
alias rct='ctags -R --languages=ruby --exclude=.git --exclude=log --exclude=tmp . $(bundle list --paths)'
alias rs='rails server'
alias sp="bin/rspec"
alias ss="spring stop"
alias sync='git pull && bundle && yarn && bin/rails db:migrate'
alias t='bundle exec ruby -Itest'
alias wp='bin/webpack-dev-server'
# docker-compose
du() {
if [[ $1 != '' ]]; then
tag="$1"
else
tag=$(basename `pwd`)
fi
if [[ $2 != '' ]]; then
port="$2"
else
port=$(grep EXPOSE Dockerfile | cut -f 2 -d " ")
port=$port:$port
fi
docker build -t $tag . && docker image prune -f && docker run --rm -p $port $tag
}
# k8s
alias kb="kubecolor"
alias kbon="kubeon"
alias kbdiff="kustomize build . | kubecolor diff -f -"
alias kbapply="kustomize build . | kubecolor apply -f -"
alias kbuild="kustomize build . | yq -C"
alias kbnamespace="kubens"
alias kbns="kubens"
alias kbenv="kubens"
alias kbcluster="kubectx"
alias kbcl="kubectx"
alias kbg="kubecolor get"
alias kbgp="kubecolor get pods"
alias kbdp="kubecolor describe pod"
alias kbd="kubecolor describe"
alias kbl="kubecolor logs"
# Terraform
alias tfsec='tfsec --concise-output'
| true |
fe045540aa2cb57ad85676b52631e800f15d03f0 | Shell | ak352/melanomics | /genome/variants2/annotation/mutect_strelka/runAnnovarTestvariantFile.sh | UTF-8 | 6,608 | 2.828125 | 3 | [] | no_license | #!/bin/bash
########################################################
# Version 25.03.2014
# - change dbsnp version 137 to 138
# - change cosmic verstion 67 to 68
# - add cosmic version 68 wgs
# - add clinVar version 20140211
# - change from ljb2 to ljb23 with new scores
# - add cadd scores direct from ANNOVAR (>20, 1% percentile)
# - add Complete Genomics dataset cg46
projectdir=/work/projects/melanomics/
toolsdir=$projectdir/tools
cgatools=$toolsdir/cgatools/cgatools-1.8.0.1-linux_binary-x86_64/bin/cgatools
annovardir=$toolsdir/annovar/annovar/
commonvariants=$projectdir/data/commonvariantblocks.tsv
annovar=$annovardir/annovar/annotate_variation.pl
annovartable=$annovardir/annovar/table_annovar.pl
convert2annovar=$projectdir/scripts/snps/pipeline/convertList2Annovar.pl
convertAnnovar2list=$projectdir/scripts/pipeline/convertAnnovarMultianno2CGI.pl
humandb=$annovardir/humandb
buildver=hg19
tested=$1
echo $tested
input=$tested.annovar.input
# convert to ANNOVAR format
perl $convert2annovar $tested >$input
# run various annotation databases
perl $annovardir/annovar/table_annovar.pl $input $annovardir/humandb -buildver $buildver -protocol refGene,ensGene,knownGene,ccdsGene,wgEncodeGencodeManualV4 -operation g,g,g,g,g -outfile $input.geneanno 2>annovar.geneanno.log
perl $annovardir/annovar/table_annovar.pl $input $annovardir/humandb -buildver $buildver -protocol esp6500si_all,esp6500si_ea,cg46,cg69,1000g2012apr_all,1000g2012apr_eur -operation f,f,f,f,f,f -outfile $input.maf 2>annovar.maf.log
perl $annovardir/annovar/table_annovar.pl $input $annovardir/humandb -buildver $buildver -protocol phastConsElements46way,segdup,cytoband,dgv,tfbs,gwascatalog,wgEncodeRegTfbsClustered,wgEncodeRegDnaseClustered,mirna,mirnatarget -operation r,r,r,r,r,r,r,r,r,r -outfile $input.regions 2>annovar.regions.log
perl $annovardir/annovar/table_annovar.pl $input $annovardir/humandb -buildver $buildver -protocol snp138,cosmic68,cosmic68wgs,nci60 -operation f,f,f,f -outfile $input.dbsnpcosmicanno 2>annovar.dbsnpcosmic.log
perl $annovardir/annovar/table_annovar.pl $input $annovardir/humandb -buildver $buildver -protocol ljb23_all,caddgt20,clinvar_20140211 -operation f,f,f -outfile $input.genescores 2>annovar.genescores.log
# add annotation to testvariantfile
cur=$tested
add=$input.dbsnpcosmicanno.${buildver}_multianno.txt
out=$tested.cosmic.dbsnp.nci60
perl $convertAnnovar2list $add >$add.cgi
$cgatools join --beta --input $cur $add.cgi --output $out --match chromosome:Chr --match begin:Start --match end:End --match alleleSeq:Alt --select 'a.*,b.snp138,b.cosmic68,b.cosmic68wgs,b.nci60' --output-mode compact --always-dump
cur=$out
add=$input.maf.${buildver}_multianno.txt
out=$cur.maf
perl $convertAnnovar2list $add >$add.cgi
$cgatools join --beta --input $cur $add.cgi --output $out --match chromosome:Chr --match begin:Start --match end:End --match alleleSeq:Alt --select 'a.*,b.esp6500si_all,b.esp6500si_ea,b.cg46,cg69,b.1000g2012apr_all,b.1000g2012apr_eur' --output-mode compact --always-dump
cur=$out
add=$input.geneanno.${buildver}_multianno.txt
out=$cur.geneanno
perl $convertAnnovar2list $add >$add.cgi
$cgatools join --beta --input $cur $add.cgi --output $out --match chromosome:Chr --match begin:Start --match end:End --match alleleSeq:Alt --output-mode compact --always-dump --select 'a.*,b.Func.refGene,b.Gene.refGene,b.ExonicFunc.refGene,b.AAChange.refGene,b.Func.ensGene,b.Gene.ensGene,b.ExonicFunc.ensGene,b.AAChange.ensGene,b.Func.knownGene,b.Gene.knownGene,b.ExonicFunc.knownGene,b.AAChange.knownGene,b.Func.ccdsGene,b.Gene.ccdsGene,b.ExonicFunc.ccdsGene,b.AAChange.ccdsGene,b.Func.wgEncodeGencodeManualV4,b.Gene.wgEncodeGencodeManualV4,b.ExonicFunc.wgEncodeGencodeManualV4,b.AAChange.wgEncodeGencodeManualV4'
cur=$out
add=$input.genescores.${buildver}_multianno.txt
out=$cur.genescores
perl $convertAnnovar2list $add >$add.cgi
$cgatools join --beta --input $cur $add.cgi --output $out --match chromosome:Chr --match begin:Start --match end:End --match alleleSeq:Alt --output-mode compact --always-dump --select 'a.*,b.LJB23_SIFT_score,b.LJB23_SIFT_score_converted,b.LJB23_SIFT_pred,b.LJB23_Polyphen2_HDIV_score,b.LJB23_Polyphen2_HDIV_pred,b.LJB23_Polyphen2_HVAR_score,b.LJB23_Polyphen2_HVAR_pred,b.LJB23_LRT_score,b.LJB23_LRT_score_converted,b.LJB23_LRT_pred,b.LJB23_MutationTaster_score,b.LJB23_MutationTaster_score_converted,b.LJB23_MutationTaster_pred,b.LJB23_MutationAssessor_score,b.LJB23_MutationAssessor_score_converted,b.LJB23_MutationAssessor_pred,b.LJB23_FATHMM_score,b.LJB23_FATHMM_score_converted,b.LJB23_FATHMM_pred,b.LJB23_RadialSVM_score,b.LJB23_RadialSVM_score_converted,b.LJB23_RadialSVM_pred,b.LJB23_LR_score,b.LJB23_LR_pred,b.LJB23_GERP++,b.LJB23_PhyloP,b.LJB23_SiPhy,b.caddgt20,b.clinvar_20140211'
cur=$out
add=$input.regions.${buildver}_multianno.txt
out=$cur.regions
perl $convertAnnovar2list $add >$add.cgi
$cgatools join --beta --input $cur $add.cgi --output $out --match chromosome:Chr --match begin:Start --match end:End --match alleleSeq:Alt --output-mode compact --always-dump --select 'a.*,b.phastConsElements46way,b.segdup,b.cytoBand,b.dgv,b.tfbs,b.gwascatalog,b.wgEncodeRegTfbsClustered,b.wgEncodeRegDnaseClustered,b.mirna,b.mirnatarget'
add=/work/projects/isbsequencing/data/clinvar/clinvar_current.vcf.annovar.list
cur=$out
$cgatools join --beta --input $cur $add --output $cur.clinVar --match chromosome:chr --match begin:start --match end:stop --match alleleSeq:var --output-mode compact --always-dump --select 'a.*,b.clinVar'
#cur=$cur.clinVar
add=/work/projects/isbsequencing/tools/GWAVA/VEP_plugin/gwava_scores.tsv
$cgatools join --beta --input $cur $add --output $cur.gwava --match chromosome:chr --match begin:start --match end:stop --output-mode compact --always-dump --select 'a.*,b.gwava_reg,b.gwava_tss,b.gwava_unm'
cur=$cur.gwava
add=/work/projects/isbsequencing/data/CADD/cadd.tsv
$cgatools join --beta --input $cur $add --output $cur.cadd --match chromosome:chr --match end:pos --match alleleSeq:alt --output-mode compact --always-dump --select 'a.*,b.CADD_RawScore,b.CADD_PHRED'
cur=$cur.cadd
addGeneScores=/mnt/nfs/projects/isbsequencing/scripts/snps/addGeneScores.pl
add=$tested.genescores.txt
perl $addGeneScores $input.geneanno.${buildver}_multianno.txt >$add
perl $convertAnnovar2list $add >$add.cgi
$cgatools join --beta --input $cur $add.cgi --output $cur.genescores --match chromosome:Chr --match begin:Start --match end:End --match alleleSeq:Alt --output-mode compact --always-dump --select 'a.*,b.RVIS,b.RVISpercent,b.CMD,b.BODYMAP_BRAIN,b.PATHOSCORE,b.HAPLOINSUFF,b.CGD,b.CGD_NEURO'
| true |
715f81e6f10d877b4452716c6164085693250868 | Shell | baseboxorg/dotfiles-7 | /.bashrc | UTF-8 | 259 | 2.625 | 3 | [] | no_license | alias ll='ls -AlF'
source ~/.shell_prompt.sh
export NVM_DIR=~/.nvm
source ~/.nvm/nvm.sh
function vim() {
if test $# -gt 0; then
env vim "$@"
elif test -f Session.vim; then
env vim -S
else
env vim -c Obsession
fi
}
HISTCONTROL=ignoreboth
| true |
2e7ce1700f9a2a143f0cee4adabcea9bf138d5b3 | Shell | Indigitous/godtools-web | /.travis/invalidate_cloudfront_cache.sh | UTF-8 | 1,087 | 3.5625 | 4 | [] | no_license | ###############################################################################
### Resets CloudFront cache with boto/cfadmin utility
### Run: ./this_script
###############################################################################
#
# Travis specific part - run this script only for production
#
if [[ $TRAVIS_BRANCH == 'master' ]]; then
echo -e "\nThis is master/production branch - let's reset the CloudFront cache\n"
else
echo -e "\nReset of CloudFront cache will not be started for non-production branch - exit.\n"
exit 0
fi
#
# Install boto
#
echo -e "\nInstalling boto...\n"
git clone git://github.com/boto/boto.git
cd boto
sudo python setup.py install
cd ../
rm -rf boto
#
# Set up credentials for boto
#
echo -e "\nSet up boto credentials...\n"
echo "[Credentials]" >> ~/.boto
echo "aws_access_key_id = $1" >> ~/.boto
echo "aws_secret_access_key = $2" >> ~/.boto
echo -e "\nCloudFront Invalidating...\n"
cfadmin invalidate E29Z8FMTDDSDC7 "/*"
echo -e "\nInvalidating is in progress...\n"
#
# Clean up
#
echo -e "\nRemove boto config file\n"
rm ~/.boto
| true |
7ee29cc2cc4d89bb515fd428d4de5b93af615a5f | Shell | l-korous/hermes-testing | /run.sh | UTF-8 | 5,182 | 3.21875 | 3 | [] | no_license | cmake .
echo "This test suite will fail if you do not have Hermes, or Valgrind, etc.."
echo "CMake generation done."
echo "Run memory-leaks tests? (Long) [y/n]"
read ans
if [ "$ans" = "y" ]; then
memory_start_time=`date +%s`
echo "Processing memory leaks tests..."
cd memory-leaks
cd 01-memory-simple
echo "Building the test..."
make
echo "Test built, running..."
valgrind --leak-check=full --log-file=../01-memory-simple-ValgrindLogfile ./01-memory-simple 3 3
echo "Valgrind output '01-memory-simple-ValgrindLogfile' available in memory-leaks/"
cd ../02-memory-adapt
echo "Building the test..."
make
echo "Test built, running..."
valgrind --leak-check=full --log-file=../02-memory-adapt-ValgrindLogfile ./02-memory-adapt
echo "Valgrind output '02-memory-adapt-ValgrindLogfile' available in memory-leaks/"
cd ../03-memory-transient-adapt
echo "Building the test..."
make
echo "Test built, running..."
valgrind --leak-check=full --log-file=../03-memory-transient-adapt-ValgrindLogfile ./03-memory-transient-adapt
echo "Valgrind output '03-memory-transient-adapt-ValgrindLogfile' available in memory-leaks/"
echo Memory leaks tests runtime - $(expr `date +%s` - $memory_start_time) s
echo "Memory leaks tests - Done."
cd ../..
fi
echo "Run performance tests? (Very long) [y/n]"
read ans
if [ "$ans" = "y" ]; then
perf_start_time=`date +%s`
echo "Processing performance tests..."
echo "In the meantime:"
echo " Valgrind(memcheck): http://valgrind.org/docs/manual/mc-manual.html"
echo " Callgrind: http://valgrind.org/docs/manual/cl-manual.html"
echo " Cachegrind: http://valgrind.org/docs/manual/cg-manual.html"
echo " DHAT: http://valgrind.org/docs/manual/dh-manual.html"
echo " Massif: http://valgrind.org/docs/manual/ms-manual.html"
echo ""
cd performance
cd 01-performance-simple
echo "Building the test..."
make
echo "Test built, running..."
rm -f cachegrind.out.*
valgrind --log-file=temp --tool=cachegrind ./01-performance-simple 3 3
cg_annotate cachegrind.out.* > ../01-performance-simple-CachegrindLogfile
echo "Cachegrind output '01-performance-simple-CachegrindLogfile' available in performance/"
valgrind --log-file=../01-performance-simple-DHATLogfile --tool=exp-dhat ./01-performance-simple 3 3
echo "DHAT output '01-performance-simple-DHATLogfile' available in performance/"
rm -f massif.out.*
valgrind --log-file=temp --tool=massif ./01-performance-simple 3 3
ms_print massif.out.* > ../01-performance-simple-MassifgrindLogfile
echo "Massif output '01-performance-simple-MassifgrindLogfile' available in performance/"
cd ../02-performance-adapt
echo "Building the test..."
make
echo "Test built, running..."
rm -f cachegrind.out.*
valgrind --log-file=temp --tool=cachegrind ./02-performance-adapt
cg_annotate cachegrind.out.* > ../02-performance-adapt-CachegrindLogfile
echo "Cachegrind output '02-performance-adapt-CachegrindLogfile' available in performance/"
valgrind --log-file=../02-performance-adapt-DHATLogfile --tool=exp-dhat ./02-performance-adapt
echo "DHAT output '02-performance-adapt-DHATLogfile' available in performance/"
rm -f massif.out.*
valgrind --log-file=temp --tool=massif ./02-performance-adapt
ms_print massif.out.* > ../02-performance-adapt-MassifgrindLogfile
echo "Massif output '02-performance-adapt-MassifgrindLogfile' available in performance/"
cd ../03-performance-transient-adapt
echo "Building the test..."
make
echo "Test built, running..."
rm -f cachegrind.out.*
valgrind --log-file=temp --tool=cachegrind ./03-performance-transient-adapt
cg_annotate cachegrind.out.* > ../03-performance-transient-adapt-CachegrindLogfile
echo "Cachegrind output '03-performance-transient-adapt-CachegrindLogfile' available in performance/"
valgrind --log-file=../03-performance-transient-adapt-DHATLogfile --tool=exp-dhat ./03-performance-transient-adapt
echo "DHAT output '03-performance-transient-adapt-DHATLogfile' available in performance/"
rm -f massif.out.*
valgrind --log-file=temp --tool=massif ./03-performance-transient-adapt
ms_print massif.out.* > ../03-performance-transient-adapt-MassifgrindLogfile
echo "Massif output '03-performance-transient-adapt-MassifgrindLogfile' available in performance/"
echo Performance leaks tests runtime - $(expr `date +%s` - $memory_start_time) s
echo "Performance leaks tests - Done."
cd ../..
fi
echo "Run visualization tests? (Short) [y/n]"
read ans
if [ "$ans" = "y" ]; then
echo "Processing visualization tests..."
cd visualization/views
make
./01-all
echo "Outputs saved in visualization/views/*.bmp"
echo "Visualization tests - Done."
cd ../../..
fi
echo "Run inner-funcionality tests? (Very short) [y/n]"
read ans
if [ "$ans" = "y" ]; then
echo "Processing internal tests..."
cd inner-functionality
make -j4
ctest -j4
echo "Internal-functionality tests - Done."
cd ..
fi
echo "Run calculations tests? [y/n]"
read ans
if [ "$ans" = "y" ]; then
echo "Processing calculations tests..."
cd calculations
make -j4
ctest -j4
echo "Calculations tests - Done."
fi
echo "Quitting..."
| true |
ebfd2ba1d5828140f99d301b40d1fbe5334c7a0b | Shell | AatishLanghee/Embedded_Linux_T2 | /stats.sh | UTF-8 | 1,229 | 3.625 | 4 | [] | no_license |
######################################################################################################################################
# Author: Aatish Langhee (MIS: 121735011)
# T2: Program-3: Write a shell script to print RAM usage, disk usage and CPU usage in tabular form. Also, print name of the process that is consuming most CPU at that time instant.
# Here I am printing stats such as Memory (% of RAM being used by all process together) , Disk ( Total disk consumed by all resources) , process ( name of the process that is consuming most CPU at that time instant), CPU ( % of cpu consumed by that process at that time instant)
######################################################################################################################################
#! /bin/bash
SECONDS=1
printf "Time(24 Hours)\t\tMemory\t\tDisk\t\tProcess\t\tCPU\n"
end=$((SECONDS+3600))
while [ $SECONDS -lt $end ]
do
CURRENTDATE=`date +"%d-%b-%Y %T"`
MEMORY=$(free -m | awk 'NR==2{printf " \t%.2f%%\t\t", $3*100/$2 }')
DISK=$(df -h | awk '$NF=="/"{printf "%s\t\t", $5}')
Process=$(ps -eo comm,pcpu --sort=-%cpu | head -n 2 | egrep -v '(0.0)|(%CPU)')
echo ${CURRENTDATE} "$MEMORY$DISK$Process"
SECONDS=$((SECONDS+1))
sleep 5
done
| true |
54a7e22494183dcf6dcc1449d225d8ab968271cc | Shell | sorrowxfull/dots | /.local/bin/scripts/cron/cronbat | UTF-8 | 375 | 3.171875 | 3 | [
"CC0-1.0"
] | permissive | #!/bin/sh
# Notify me with notify-send if my battery is below 25%.
if [ "$(cat /sys/class/power_supply/BAT0/status)" = "Charging" ]; then
[ -f '/tmp/cronbat.lock' ] && rm '/tmp/cronbat.lock'
elif [ "$(cat /sys/class/power_supply/BAT0/capacity)" -lt 25 ] && [ ! -f '/tmp/cronbat.lock' ]; then
touch '/tmp/cronbat.lock'; notify-send -u critical "Battery critically low."
fi
| true |
6aefe6dec764b7ab004bfa1eb4b73f458f08937f | Shell | zhaohe123/system_shell | /rootfs/buildroot/cfg_rootfs.sh | UTF-8 | 2,474 | 3.875 | 4 | [] | no_license | #!/bin/bash
###############################################################################
# 版 权:米联客
# 技术社区:www.osrc.cn
# 功能描述:1. 清除配置文件和编译中间结果
# 2. 重新配置根文件系统
# 3. 编译开发所需要的工具
# 版 本 号:V1.0
###############################################################################
# => Setting The Development Environment Variables
if [ ! "${ZN_CONFIG_DONE}" ];then
echo "[ERROR] 请以“source settings64.sh”的方式执行 settings64.sh 脚本。" && exit 1
fi
# => Filename of the running script.
ZN_SCRIPT_NAME="$(basename ${BASH_SOURCE})"
###############################################################################
# => The beginning
echo_info "[ $(date "+%Y/%m/%d %H:%M:%S") ] Starting ${ZN_SCRIPT_NAME}"
# => Try lsb_release, fallback with /etc/issue then uname command
distributions="(Debian|Ubuntu|RedHat|CentOS|openSUSE|SUSE)"
distribution=$( \
lsb_release -d 2>/dev/null | grep -Eo $distributions \
|| grep -Eo $distributions /etc/issue 2>/dev/null \
|| grep -Eo $distributions /etc/*-release 2>/dev/null \
|| uname -s \
)
case ${distribution} in
CentOS)
# You have PERL_MM_OPT defined because Perl local::lib
# is installed on your system. Please unset this variable
# before starting Buildroot, otherwise the compilation of
# Perl related packages will fail
unset PERL_MM_OPT
;;
*)
;;
esac
# => Make sure the source is there
if [ "`ls -A ${ZN_ROOTFS_DIR}`" = "" ]; then
error_exit "Can't find the source code of buildroot"
else
cd ${ZN_ROOTFS_DIR}
fi
# => 1. Cleaning the Sources
echo_info "To delete all build products as well as the configuration"
make distclean || error_exit "Failed to make distclean"
# => 2. To configure the sources for the intended target.
echo_info "Configure Buildroot on the ${ZN_ROOTFS_DIR}"
make ${ZN_BUILDROOT_DEFCONFIG} || error_exit "Failed to make ${ZN_BUILDROOT_DEFCONFIG}"
# => 3. Download all sources needed for offline-build
echo_info "Download all sources needed for offline-build"
make source || error_exit "Failed to make source"
# => The end
echo_info "[ $(date "+%Y/%m/%d %H:%M:%S") ] Finished ${ZN_SCRIPT_NAME}"
###############################################################################
| true |
fd64db4744173ca7d13e6d7608a083d88b04aaad | Shell | leandroohf/Public_Liberty_Mutual_Group_Property_Inspection_Prediction | /config/setup.sh | UTF-8 | 674 | 2.984375 | 3 | [
"Apache-2.0"
] | permissive | echo "preparing variable senviroments"
export PATH=/home/ubuntu/anaconda/bin:${PATH}
echo "updatting the system "
sudo apt-get update ## && sudo apt-get upgrade -y
echo "installing packages "
echo "\tinstalling git"
sudo apt-get -y install git
echo "\tinstalling make"
sudo apt-get -y install make
echo "\tinstalling htop"
sudo apt-get -y install htop
echo "\tinstalling g++"
sudo apt-get -y install g++
echo "updatting pip"
pip install --upgrade pip
echo "installing nose"
pip install nose
echo "clone xgboost"
git clone https://github.com/dmlc/xgboost.git
echo "building xgboost"
cd xgboost
./build.sh
echo "python setting up"
cd python-package
python setup.py install
| true |
539a18d8f49bf4bbdb6ef8b7b3f7017cd90d533f | Shell | jacobke/etrace | /etrace-plugins/etrace-prometheus-pushgateway/script/run.sh | UTF-8 | 2,301 | 3.8125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
SCRIPT_DIR=$(dirname ${BASH_SOURCE[0]})
# App Home
cd "$SCRIPT_DIR"
APP_HOME=${APP_HOME-$(pwd -P)}
DAEMON="pushgateway"
DESC="ETrace pushgateway"
LOG_PATH="$APP_HOME/logs"
EXE_FILE="$APP_HOME/bin/pushgateway.sh"
LOG_FILE="pushgateway.out"
PID=0
TIMEOUT=3
pushgateway_run(){
echo "Starting ${DESC} ....."
PID=$(sh ${EXE_FILE} check_pid)
if [ "${PID}" != "" ]; then
echo "WARN: ${DESC} already started! (pid=${PID})"
else
sh ${EXE_FILE} start
echo "${DESC} started!"
fi
}
pushgateway_start(){
echo "Starting ${DESC} ....."
PID=$(sh ${EXE_FILE} check_pid)
if [ "${PID}" != "" ]; then
echo "WARN: ${DESC} already started! (pid=${PID})"
else
if [ ! -d "${LOG_PATH}" ]; then
mkdir "${LOG_PATH}"
fi
# nohup sh ${EXE_FILE} start > "${LOG_PATH}/${LOG_FILE}" 2>&1 &
sh ${EXE_FILE} start
echo "${DESC} started!"
fi
}
pushgateway_stop(){
PID=$(sh ${EXE_FILE} check_pid)
if [ "${PID}" != "" ]; then
echo "Stopping ${DESC} ....."
if ! kill -9 ${PID}
then
echo "[OK]"
else
echo "[Failed]"
fi
else
echo "WARN: ${DESC} is stopped."
fi
}
pushgateway_status(){
echo "Checking ${DESC} ....."
PID=$(sh ${EXE_FILE} check_pid)
if [ "${PID}" != "" ]; then
echo "${DESC} is running! (pid=${PID})"
else
echo "${DESC} is stopped!"
fi
}
pushgateway_info(){
echo "Consumer information:"
sh ${EXE_FILE} infoconsumer.sh:
}
pushgateway_force_stop(){
PID=$(sh ${EXE_FILE} check_pid)
if [ "${PID}" != "" ]; then
echo "Stopping ${DESC} ....."
if ! kill -9 ${PID}
then
echo "[OK]"
else
echo "[Failed]"
fi
else
echo "WARN: ${DESC} is not running."
fi
}
pushgateway_restart(){
thrift_stop
[ -n "${TIMEOUT}" ] && sleep ${TIMEOUT}
thrift_start
}
case "$1" in
run)
pushgateway_run
;;
start)
pushgateway_start
;;
stop)
pushgateway_stop
;;
force-stop)
pushgateway_force_stop
;;
restart)
pushgateway_restart
;;
status)
pushgateway_status
;;
info)
pushgateway_info
;;
*)
echo "Usage: $0 {start|stop|forcestop|restart|status|info}"
exit 1;
;;
esac
exit 0
| true |
5885e9f607d1831e4e9e4e724e0daefdae42abc6 | Shell | yugosever/bash-repo | /fedora-2.sh | UTF-8 | 203 | 2.65625 | 3 | [] | no_license | #!/bin/bash
echo "This porgram finds vms ip address and after that make ssh connection"
fedora_2_ip=`virsh net-dhcp-leases default | grep fedora-2 | awk '{print $5}' | cut -d/ -f1`
#ssh vl@$fedora_2_ip
| true |
64d6e2b37dab6bd727bbaae3505a81f3d83822fe | Shell | NrgXnat/xnat_bash_utils | /src/xnat2loc | UTF-8 | 8,267 | 3.71875 | 4 | [
"BSD-2-Clause"
] | permissive | #!/bin/bash
# Author: Mikhail Milchenko, mmilchenko@wustl.edu
# Copyright (c) 2010-2021, Computational Imaging Lab, School of Medicine,
# Washington University in Saint Louis
# Redistribution and use in source and binary forms,
# with or without modification, are permitted provided
# that the following conditions are met:
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# in 1: downloaded study location
# in 2: location to save file with mappings
# in 3: predefined scans
function save_UID_map
{
local st=$1
local mf=$2
local sc=($3)
pushd $st/scans &> /dev/null
local scans=""
local uids=""
local scan xnatid suid i j found
for ((i=0; i<${#sc[*]}; i++)); do
scandir=(`ls -d ${sc[i]}-*`)
if [ -z "${scandir}" ]; then
echo "xnat2loc ERROR: cannot find scan ${sc[i]}, exiting"
exit -1
fi
if (( ${#scandir[*]} > 1 )); then
regex="${sc[i]}-MR([0-9]+).*"
found=-1
for (( j=0; j<${#scandir[*]}; j++ )); do
[[ "${scandir[j]}" =~ $regex ]]
if [ -z "${BASH_REMATCH[0]}" ]; then
if (( found != -1 )); then
echo "xnat2loc ERROR: cannot uniquely resolve scan ID: ${sc[i]}, exiting"
exit -1
else
found=$j
fi
fi
done
scandir=${scandir[found]}
if [ -z "${scandir}" -o "$found" == "-1" ]; then
echo "xnat2loc ERROR: cannot find scan ${sc[i]}, exiting"
exit -1
fi
fi
xnatid=${scan%-*}
suid=(`dcminfo -q -t 0020 000e $scandir/resources/DICOM/files`)
suid=( $suid )
suid=`echo $suid | tr -d '<>'`
# scans="$scans ${sc[i]}"
if ((i>0)); then
uids="$uids,${sc[i]},$suid"
else
uids="${sc[i]},$suid"
fi
done
popd &> /dev/null
echo "serinstuids=($uids)" | tr -d ' ' >> $mf
}
function exit_with_error
{
echo "xnat2loc ERROR: $1, exiting"
exit -1
}
#1: str
#2: field
function get_field
{
local b="$1" f="$2" c d
c=${b# * ID=\"}
d=${c%%\"*}
}
###################################################################
#
# Get scan #'s of given types.
#
# input 1: CURL connection string, i.e. curl -k -u user:pass host ....
# input 2: scan type
#
function get_scans
{
local cstr="$1"
local stypes="$2"
local line id scans
local f=`mktemp xnat2locXXXX` f1=`mktemp xnat2locXXXX`
$cstr > $f
grep "xnat:scan ID=.*type=\"$2\"" $f > $f1
while read line; do
c=${line#* ID=\"};id=${c%%\"*}
if [ -n "$scans" ]; then scans="$id,$scans"; else scans=$id; fi
done < $f1
echo $scans
}
#split tokenized string
#input 1: string
#input 2: token
function split_str
{
local str="$1"
local token="$2"
arr=`echo "$str" | tr "$token" " "`
echo $arr
}
echo "XNAT session -> local storage"
if [ -z $1 ]; then
echo "Usage: xnat2loc <session label> [options]"
echo "Options:"
echo " -sr <server> "
echo " -u <user>"
echo " -pa <password>"
echo " -js <JSESSIONID>"
echo " -pr <XNAT project>"
echo " -o <outdir> "
echo " -s <scan_id>[,<scan_id>,...] scan ids (default is ALL)"
echo " -m save XNAT ID -> DICOM Series instance UID map to scans.txt"
echo " -st <scan_type> scan type"
echo " -r <dir>[,<dir>,...] resource directory(ies) to download"
echo " -subj <subject ID>"
echo " -z Do not unzip"
echo " -uncompress uncompress incoming DICOM scans."
echo " -rm_enh remove enhanced MRI from downloaded DICOM's"
echo "NOTE: if both -s and -r are specified, specified scans with specified scan resource dir(s) will be downloaded."
exit -1
fi
umask 0007
dcmsrt=${MFREL}/dcm_sort
dcmsrt=dcm_sort
slabel=$1; shift;
for arg in "$@"; do args[i]=$arg; ((i++)); done
#for((i=1; i<((${#args[*]})); i++)); do echo ${args[i]}; done
mapfile=0; uncompress=0; rm_enh=0
#set -x
JSESSION=""
while [ -n "$1" ]; do
case "$1" in
-sr) shift; server=$1; shift ;;
-z) shift; savezip=1 ;;
-u) shift; user=$1; shift ;;
-pa) shift; pass=$1; shift ;;
-js) shift; JSESSION=$1; shift ;;
-l) shift; slabel=$1; shift ;;
-pr) shift; project=$1; shift ;;
-sr) shift; server=$1; shift ;;
-o) shift; outdir=$1; shift ;;
-m) shift; mapfile=1; shift ;;
-s) shift; scans=$1; shift; if [ -f "$scans" ]; then scans=`cat $scans`; fi ;;
-st) shift; stypes=$1; shift; if [ -f "$stypes" ]; then stypes=`cat $stypes`; fi ;;
-r) shift; rdirs=$1; shift; if [ -f "$rdirs" ]; then stypes=`cat $rdirs`; fi ;;
-rm_enh) shift; rm_enh=1 ;;
-subj) shift; subj=$1; shift ;;
-uncompress) shift; uncompress=1 ;;
*) shift ;;
esac
done
server=`echo $server | sed 's/\/$//'`
if [ -z "$project" -o -z "$subj" -o -z "$slabel" ]; then
exit_with_error "Please specify project, subject and session label."
fi
if [ -z "$JSESSION" ]; then
if [ -z "$user" -o -z "$pass" ]; then exit_with_error "User credentials required"; fi
echo curl -s -k -u $user:xxxx "$server/data/JSESSION"
JSESSION=`curl -s -k -u $user:$pass "$server/data/JSESSION"`
JSERROR=`echo $JSESSION | grep Error`
if [ -n "$JSERROR" ]; then exit_with_error "user credentials failed"; fi
fi
prm="curl -k --cookie JSESSIONID=$JSESSION"; prms="curl -k --cookie JSESSIONID=[...]";
echo "JSESSION: $JSESSION"
#scan resource dir with DICOM files.
dcmdir=DICOM
#uncomment for modified DICOM
#dcmdir=DICOM_ORIG
echo "project: $project subject: $subj session: $slabel scans: $scans"
if [ -z "$outdir" ]; then outdir=$slabel; fi
mkdir $outdir; pushd $outdir &> /dev/null
if [ -n "$stypes" ]; then
cstr="$prm $server/REST/projects/$project/subjects/$subj/experiments/$slabel?format=xml"
extra_scans=`get_scans "$cstr" "$stypes"`
if [ -z "$scans" ]; then
scans=$extra_scans
else
scans="${scans},$extra_scans"
fi
elif [ -z "$scans" ]; then
scans="ALL";
fi
#download scans if no resource dir was specified
if [ -n "$scans" -a -z "$rdirs" ]; then
echo "$prms \"${server}/REST/projects/${project}/subjects/${subj}/experiments/${slabel}/scans/${scans}/files?format=zip\""
$prm "${server}/REST/projects/${project}/subjects/${subj}/experiments/${slabel}/scans/${scans}/files?format=zip" > temp.zip
#download resource dir(s)
if [ -n "$savezip" ]; then popd &> /dev/null; exit 0; fi
unzip temp.zip &> /dev/null; rm temp.zip
mkdir DICOM
# mkdir SECONDARY
# set -x
if [ "$mapfile" == "1" -a "$scans" != "ALL" ]; then
OLDIFS=$IFS
IFS=','
sarr=( $scans )
IFS=$OLDIFS
echo save_UID_map $slabel `pwd`/scans.txt "${sarr[*]}"
save_UID_map $slabel `pwd`/scans.txt "${sarr[*]}"
fi
# set +x
mv */scans/*/*/$dcmdir/files/* DICOM/
if (( uncompress )); then
echo uncompress_dcm_dir `pwd`/DICOM
uncompress_dcm_dir `pwd`/DICOM
fi
if (( rm_enh )); then
echo dcm_rm_enh DICOM
dcm_rm_enh DICOM
fi
$dcmsrt DICOM &> /dev/null
if (( $? )); then exit_with_error "dcm_sort failed"; fi
rm -r $slabel
#mv */scans/*/*/secondary/files/* SECONDARY
#dcm_sort DICOM
else
echo $prms "${server}/REST/projects/${project}/subjects/${subj}/experiments/${slabel}/resources/${rdirs}/files?format=zip"
$prm "${server}/REST/projects/${project}/subjects/${subj}/experiments/${slabel}/resources/${rdirs}/files?format=zip" > temp.zip
unzip temp.zip &> /dev/null
if (( $? )); then exit_with_error "unzip failed"; fi
rm temp.zip
mv ${slabel}/resources/* .
lst=( `split_str $rdirs ","` )
for ((i=0; i<${#lst[*]}; i++)); do
t=${lst[i]}
mv $t/files/* $t/
rm -r $t/files
done
fi
popd &> /dev/null
| true |
45c256fe59132afa834627ce500ce7f95a586399 | Shell | facebook/flow | /tests/saved_state_init_recheck/test.sh | UTF-8 | 2,054 | 3.0625 | 3 | [
"MIT",
"CC-BY-4.0"
] | permissive | #!/bin/bash
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
printf "There should be no errors when the saved state is generated\\n"
assert_ok "$FLOW" status
assert_ok "$FLOW" save-state --root . --out ".flow.saved_state" > /dev/null
assert_ok "$FLOW" stop
cp bar.js.ignored bar.js
echo -e "$(pwd)/bar.js\\n$(pwd)/does_not_exist.php" \
> ".flow.saved_state_file_changes"
printf "\\nFull init with saved state does recheck & sees new error\\n"
start_flow . --saved-state-fetcher "local" --saved-state-no-fallback
assert_errors "$FLOW" status
assert_ok "$FLOW" stop
printf "\\nLazy init with saved state does NOT recheck & sees 0 errors...\\n"
start_flow . --lazy --saved-state-fetcher "local" --saved-state-no-fallback
assert_ok "$FLOW" status
printf "\\n...but focusing the file exposes the error\\n"
assert_ok "$FLOW" force-recheck --focus --no-auto-start bar.js
assert_errors "$FLOW" status
assert_ok "$FLOW" stop
printf "\\n--saved-state-force-recheck does recheck & sees new error\\n"
start_flow . --lazy --saved-state-fetcher "local" --saved-state-no-fallback \
--saved-state-force-recheck
assert_errors "$FLOW" status
assert_ok "$FLOW" stop
# change flowconfig in some meaningful way
printf "\\n# changed" >> "$(pwd)/.flowconfig"
echo -e "$(pwd)/bar.js\\n$(pwd)/.flowconfig" \
> ".flow.saved_state_file_changes"
printf "\\nA file incompatible with rechecks changed, so no saved state loading\\n"
# 78 just means flow start failed. The server exited with 20
assert_exit 78 start_flow_unsafe . \
--saved-state-fetcher "local" --saved-state-no-fallback
printf "\\n...so we need to fallback to non-saved-state\\n"
start_flow . --saved-state-fetcher "local"
assert_errors "$FLOW" status
assert_ok "$FLOW" stop
printf "\\nFallbacks work for lazy mode too\\n"
start_flow . --lazy --saved-state-fetcher "local"
# No errors, since we started in lazy mode so nothing is focused
assert_ok "$FLOW" status
| true |
035eecbe6551763fe52da68071637d4c9a61f5e7 | Shell | szovaty/scripts | /ygrive | UTF-8 | 220 | 3.015625 | 3 | [] | no_license | #!/bin/bash
PATH="/proj/exosite/grive"
LOG="/var/log/ygrive.log"
EXE="/usr/bin/grive"
echo "*** Starting $0 at $PATH on: `/bin/date` ***" >> $LOG
cd $PATH
test $? -eq 0 &&
$EXE 2>&1 | /usr/bin/tee -a $LOG
sleep 2
| true |
c557d63d2f84845a235a5dfd32c27ae0d59cb448 | Shell | armin-m-garcia/autocomplete | /autocomplete/build.sh | UTF-8 | 1,129 | 3.9375 | 4 | [] | no_license | #!/bin/sh
## Is the java version valid
is_version_valid() {
if [[ "$1" ]]; then
version=$("$1" -version 2>&1 | awk -F ' ' '{print $1}')
if [[ "$version" < "16" ]]; then
echo "The minimum java version required to run the application is '16.x.x'. The available version is '${version}'. Please install and/or update path or JAVA_HOME."
fi
fi
}
## Check for Maven
maven_bin=$(type -p mvn)
# Check to see if the maven binary is in the path
if [[ ! -z "$maven_bin" ]]; then
maven=mvn
# Not found
else
echo "Maven is not found. Please install maven and update PATH."
exit 1
fi
## Check for JDK >= 16
javac_bin=$(type -p javac)
# Check to see if the javac binary is in the path
if [[ ! -z "$javac_bin" ]] && [[ -z $(is_version_valid $javac_bin ) ]]; then
javac=javac
# Not found
elif [[ -n "$JAVA_HOME" ]] && [[ -x "$JAVA_HOME/bin/javac" ]] && [[ -z $(is_version_valid $JAVA_HOME/bin/javac) ]]; then
javac="$JAVA_HOME/bin/javac"
# Not found
else
echo "The JDK with version >= 16 is not found. Please install the JDK and update PATH or JAVA_HOME."
exit 1
fi
mvn clean install
| true |
7bdb88e2cac03db515912a625e6180031491c2ee | Shell | HarryKodden/JupyterHub-SAML | /saml/start.sh | UTF-8 | 445 | 2.703125 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
set -e
# Apache gets grumpy about PID files pre-existing
rm -f /usr/local/apache2/logs/httpd.pid
sed -i 's/ulimit -n [0-9]*/ulimit -n 100/' /usr/sbin/apachectl
cd /etc/shibboleth && ln -sf /opt/shibboleth-sp/etc/shibboleth/surfconext.pem .
cd /etc/shibboleth && ln -sf /run/sp/sp-* .
/etc/init.d/shibd restart
sed -i 's~%%SERVER_NAME%%~'"$SERVER_NAME"'~g' /etc/apache2/sites-enabled/000-default.conf
exec apachectl -DFOREGROUND
| true |
2511a3f117403a765341ec27ce3399f3dbee0da2 | Shell | galaxyproject/usegalaxy-playbook | /env/common/files/pulsar/mount_cvmfs | UTF-8 | 1,397 | 3.671875 | 4 | [
"AFL-3.0"
] | permissive | #!/bin/bash
##
## This file is maintained by Ansible - CHANGES WILL BE OVERWRITTEN
##
set -euo pipefail
# required vars:
# $CVMFS_MOUNT_ROOT
: ${UID:="$(id -u)"}
: ${CVMFS_BINARY:="cvmfs2"}
: ${CVMFS_LOCAL_ROOT:="/run/user/${UID}/cvmfs"}
: ${CVMFS_KEYS_DIR:="${HOME}/cvmfs/keys"}
: ${CVMFS_LOG_FILE:="$(pwd)/cvmfs.log"}
: ${CVMFS_ALIEN_CACHE:="$(pwd)/cvmfs_cache"}
if [ -z "{$1:-}" ]; then
echo "usage: $0 <repo> [repo...]"
exit 1
fi
cache="${CVMFS_LOCAL_ROOT}/cache"
mkdir -p "$cache"
cat > "${CVMFS_LOCAL_ROOT}/cvmfs.conf" <<EOF
CVMFS_CACHE_BASE="/run/user/${UID}/cvmfs/cache"
CVMFS_RELOAD_SOCKETS="/run/user/${UID}/cvmfs/cache"
CVMFS_QUOTA_LIMIT="-1"
CVMFS_SHARED_CACHE="no"
CVMFS_ALIEN_CACHE="${CVMFS_ALIEN_CACHE}"
CVMFS_USYSLOG="${CVMFS_LOG_FILE}"
CVMFS_CLAIM_OWNERSHIP="yes"
CVMFS_SERVER_URL="http://cvmfs1-tacc0.galaxyproject.org/cvmfs/@fqrn@;http://cvmfs1-iu0.galaxyproject.org/cvmfs/@fqrn@;http://cvmfs1-psu0.galaxyproject.org/cvmfs/@fqrn@"
CVMFS_KEYS_DIR="${CVMFS_KEYS_DIR}"
CVMFS_HTTP_PROXY="DIRECT"
EOF
for repo in "$@"; do
mountpoint="${CVMFS_MOUNT_ROOT}/${repo}"
# a job exiting without unmounting causes a broken mountpoint if you get that node again
fusermount -u "$mountpoint" 2>/dev/null || true
mkdir -p "$mountpoint"
${CVMFS_BINARY} -o "config=${CVMFS_LOCAL_ROOT}/cvmfs.conf" "$repo" "$mountpoint"
echo "mounted $repo on $mountpoint"
done
| true |
f41859c888db8921ec47cebaffebb9fb4c7b54bd | Shell | jensg-st/gobuilder | /build/builder/build.sh | UTF-8 | 612 | 2.96875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
repo=$(cat /flux-data/data.in | jq -r '.repo')
img=$(cat /flux-data/data.in | jq -r '.image')
target=$(cat /flux-data/data.in | jq -r '.target')
login=$(cat /flux-data/data.in | jq -r '.login')
token=$(cat /flux-data/data.in | jq -r '.token')
echo "building $repo"
git clone $repo 2>&1
base=$(basename $repo)
dir=`echo "$base" | cut -d'.' -f1`
cd $dir && ls -la && make 2>&1
podman tag $img $target 2>&1
podman login docker.io -u $login -p $token 2>&1
podman push --format=v2s2 $target 2>&1
podman images 2>&1
podman logout docker.io 2>&1
echo "{ \"build\": true }" > /flux-data/data.out
| true |
7ae15d4aa1d8dbd346eed93d351e808774ab72b8 | Shell | llimon/kubeam | /pre-setup.sh | UTF-8 | 453 | 2.828125 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
##
curl -L -o kubectl.linux https://storage.googleapis.com/kubernetes-release/release/v1.9.0/bin/linux/amd64/kubectl
##
## create self signed cert. We don't want to provide real information on cert signature
if [ ! -f server.key -o ! -f server.crt ]; then
openssl req -new -newkey rsa:4096 -days 365 -nodes -x509 \
-subj "/C=US/ST=Denial/L=Springfield/O=Dis/CN=selfsigned.com" \
-keyout server.key -out server.crt
fi
| true |
e2e6cbe87f28fb5ba0382bdc4c6b9f507ef18d69 | Shell | D0ot/hfcexp | /pred_test_pure.sh | UTF-8 | 1,642 | 2.78125 | 3 | [] | no_license | #!/bin/bash
SIM_SCALAR=/home/doot/programs/simplescalar/simplesim-3.0
SIM_BPRED=${SIM_SCALAR}/sim-bpred
SIM_BPRED_CONFIGS=("-bpred:bimod 2048" "-bpred:2lev 1 1024 8 0" "-bpred taken" "-bpred nottaken")
SIM_BPRED_CONFIG_NAMES=(
"_bimod"
"_2lev"
"_taken"
"_nottaken"
)
OUTPUT_DIR=pred_output
mkdir -p ./${OUTPUT_DIR}
for ((i = 0; i < ${#SIM_BPRED_CONFIGS[*]}; i++))
do
full_command="${SIM_BPRED} ${SIM_BPRED_CONFIGS[$i]} ./176.gcc/gcc00.peak.ev6 ./176.gcc/data/test/input/cccp.i -o ${OUTPUT_DIR}/gcc_out${SIM_BPRED_CONFIG_NAMES[$i]}.s"
echo "${full_command} > ./${OUTPUT_DIR}/gcc${SIM_BPRED_CONFIG_NAMES[$i]}.out &>./${OUTPUT_DIR}/gcc${SIM_BPRED_CONFIG_NAMES[$i]}.err"
done
for ((i = 0; i < ${#SIM_BPRED_CONFIGS[*]}; i++))
do
full_command="${SIM_BPRED} ${SIM_BPRED_CONFIGS[$i]} ./186.crafty/crafty00.peak.ev6"
echo "${full_command} <./186.crafty/data/test/input/crafty.in > ./${OUTPUT_DIR}/crafty${SIM_BPRED_CONFIG_NAMES[$i]}.out &>./${OUTPUT_DIR}/crafty${SIM_BPRED_CONFIG_NAMES[$i]}.err"
done
for ((i = 0; i < ${#SIM_BPRED_CONFIGS[*]}; i++))
do
full_command="${SIM_BPRED} ${SIM_BPRED_CONFIGS[$i]} ./256.bzip2/bzip200.peak.ev6 ./256.bzip2/data/test/input/input.random 2"
echo "${full_command} > ./${OUTPUT_DIR}/bzip2${SIM_BPRED_CONFIG_NAMES[$i]}.out &>./${OUTPUT_DIR}/bzip2${SIM_BPRED_CONFIG_NAMES[$i]}.err"
done
for ((i = 0; i < ${#SIM_BPRED_CONFIGS[*]}; i++))
do
full_command="${SIM_BPRED} ${SIM_BPRED_CONFIGS[$i]} ./181.mcf/mcf00.peak.ev6 ./181.mcf/data/test/input/inp.in"
echo "${full_command} > ./${OUTPUT_DIR}/mcf${SIM_BPRED_CONFIG_NAMES[$i]}.out &>./${OUTPUT_DIR}/mcf${SIM_BPRED_CONFIG_NAMES[$i]}.err"
done
| true |
0827f624084d0c79897035a1e947f761aa650db9 | Shell | motoy3d/SmartJ-Server | /Shell/GMO/playerTweets.sh | UTF-8 | 306 | 2.640625 | 3 | [] | no_license | #!/bin/bash
export JAVA=/usr/bin/java
export WEBINF=/usr/tomcat6/webapps/redsmylife/WEB-INF
export LIB=$WEBINF/lib
CLASSPATH=
for name in `ls $LIB/*.jar`; do
CLASSPATH="${CLASSPATH}:$name"
done
export CLASSPATH=$CLASSPATH:$WEBINF/classes
$JAVA -cp $CLASSPATH com.urawaredsmylife.PlayerTweetsSaver $1
| true |
644a9e521a2b45cb87c84596d751a92f48c70ef0 | Shell | cscolabear/my-brew-bundle | /link-git.sh | UTF-8 | 1,518 | 3.53125 | 4 | [] | no_license | #!/bin/bash
red=`tput setaf 1`
green=`tput setaf 2`
reset=`tput sgr0`
echo
echo
echo "${green}---------------------------${reset}"
echo "${green}--- link-git ---${reset}"
echo "${green}---------------------------${reset}"
echo
source ./.env
function fun_link() {
echo " - 🔍 is target exist? : \`${green}${target_file}${reset}\`"
if [ -e "${target_file}" ]; then
echo "⭕️${green}.gitconfig files found.${reset}"
else
echo " - 🔍 is source exist? : \`${green}${source_file}${reset}\`"
if [ -e "${source_file}" ]; then
echo "‼.gitconfig files not found! ready to create link...."
echo "ln -s ${source_file} ${target_file}"
ln -s "${source_file}" "${target_file}"
else
echo "❌${red}source not exist...${reset}"
fi
fi
}
source_file="$SOURCE_ROOT_PATH/System/.gitconfig"
target_file="/Users/$USER/.gitconfig"
echo "🧪 try: ln -s ${source_file} ${target_file}"
fun_link $source_file $target_file
echo
echo "${green}use 'git config --list' list your config...${reset}"
echo
echo
#
source_file="$SOURCE_ROOT_PATH/System/.gitignore_global"
target_file="/Users/$USER/.gitignore_global"
echo "🧪 try: ln -s ${source_file} ${target_file}"
fun_link $source_file $target_file
echo
echo "${green}use 'git config --global core.excludesfile' your config...${reset}"
echo
echo
echo "${green}-------------${reset}"
echo "${green}--- Done. ---${reset}"
echo "${green}-------------${reset}"
echo
echo
| true |
545957c445f155f326f77a54d063b2cc7ce64827 | Shell | qengli/neouzabox | /packages/fbi/scripts/view_img | UTF-8 | 702 | 3.515625 | 4 | [] | no_license | #!/bin/sh
for i in `cat /etc/img_ext`; do
if [ -z "$EXTS" ]; then
EXTS="$i"
else
EXTS="$EXTS\|$i"
fi
done
if test "$1" = "-a"; then
# display all images in the selected folder
DIR=$2
[ -d "$DIR" ] || DIR=${DIR%/[^/]*}/
ls -1 "$DIR" | grep -i "\.\($EXTS\)\$" | sed "s%\(.*\)%$DIR\1%" > /tmp/view_img_files
elif test "$1" = "-r"; then
# display all images in the selected folder and its subfolders (recursive)
DIR=$2
[ -d "$DIR" ] || DIR=${DIR%/[^/]*}/
find "$DIR" | grep -i "\.\($EXTS\)\$" > /tmp/view_img_files
else
# display a single image
echo "$1" > /tmp/view_img_files
fi
if [ -s /tmp/view_img_files ]; then
echo 'quit 165' > /var/mp_control
else
exit 1
fi
| true |
d239315a196eac47b203fe19620957e8a7dac772 | Shell | oloapinivad/ece3-postproc | /easy2cmor3/scripts/call_qa-dkrz.sh | UTF-8 | 1,696 | 3.234375 | 3 | [] | no_license | #!/bin/bash
# Easy2cmor tool
# by Paolo Davini (May 2019)
# Script to call QA-dkrz
# info for installation: prepare is installed in ece2cmor3
#conda create -n qa-dkrz -c conda-forge -c h-dh qa-dkrz
#qa-dkrz install --up --force CMIP6
set -e
#Will validate all years between year1 and year2 of experiment with name expname
expname=${expname:-chis}
do_force=true
#--------config file-----
. ${ECE3_POSTPROC_TOPDIR}/conf/${ECE3_POSTPROC_MACHINE}/conf_easy2cmor3_${ECE3_POSTPROC_MACHINE}.sh
cd ${EASYDIR}
#####################################################################################################
# activate conda
export PATH="$CONDADIR:$PATH"
# configurator
. ${EASYDIR}/config_and_create_metadata.sh $expname
# set path, options and log files for QA-DKRZ
year="*"
CMORDIR=$(eval echo ${ECE3_POSTPROC_CMORDIR})
SELECT=CMIP6/${mip}/EC-Earth-Consortium/${model}/${exptype}/r${realization}i1p1f1
CHECK_MODE=TIME,DATA,CNSTY,CF,DRS,DRS_F,DRS_P
#CHECK_MODE=CV
COREDIR=/lus/snx11062/scratch/ms/it/ccpd/tmp_cmor/QA/${expname}
QA_RESULTS=${COREDIR}/results
TMPDIR=${COREDIR}/linkdata
NUM_EXEC_THREADS=${NCORESQA:-1}
# replicating folder structure
rm -rf $TMPDIR
mkdir -p $TMPDIR
cp -nrs $(eval echo ${ECE3_POSTPROC_CMORDIR}/*) $TMPDIR
# cleaning old environment
if [[ ${do_force} == true ]] ; then
rm -rf ${QA_RESULTS}
fi
# start the environment
source activate qa-dkrz
echo "Running QA-DKRZ"
qa-dkrz -P CMIP6 -E PROJECT_DATA=$TMPDIR -E SELECT=${SELECT} -E CHECK_MODE=${CHECK_MODE} -E QA_RESULTS=${QA_RESULTS} \
-E NUM_EXEC_THREADS=${NUM_EXEC_THREADS} -m
echo "Done!"
mkdir -p $INFODIR/${expname}
cp ${QA_RESULTS}/check_logs/Annotations/*.json $INFODIR/${expname}/
conda deactivate
| true |
8a965e42af0ff92a000f9be0eddde7a29c3e4040 | Shell | flatcap/dot-config | /bin/backup/tar-encrypt-split.sh | UTF-8 | 496 | 3.28125 | 3 | [] | no_license | #!/bin/bash
set -o errexit # set -e
set -o nounset # set -u
renice --priority 19 --pid $$ > /dev/null
ionice --class 3 --pid $$ > /dev/null
RCPT="Rich Russon (backup) <rich@flatcap.org>"
DIR="${1%/}"
SUFFIX=".tar.gpg"
NUM_LENGTH=2
CHUNK_SIZE="1G"
[ -d "$DIR" ] || exit 1
tar --sort=name -cvf - "$DIR" \
| gpg2 --encrypt --hidden-recipient "$RCPT" --compress-algo none -o - \
| split --bytes "$CHUNK_SIZE" --additional-suffix="$SUFFIX" --numeric-suffixes=1 --suffix-length="$NUM_LENGTH" - "${DIR}-"
| true |
ddba3f036e28c16adf5494ce03e0ae5a2c7aaf56 | Shell | s-p-k/qdb | /qdb | UTF-8 | 5,038 | 4.03125 | 4 | [
"MIT"
] | permissive | #! /bin/bash
# ############################ QuickDB 0.1 ##############################
#
# This script will create a DB name, DB user and DB password.
#
# Usage:
# qdb [-n <database name> ] [-u <database user>] [-p <database password>]
#
# Notes:
# 1. Modify the DB parameters below to avoid being prompted for a
# SQL username/password
# 2. For convenience, add this script to your /usr/bin or alike with
# chmod +x permissions.
# 2. License: MIT
# 3. Author: dth at alterland dot net - Brussels, May 30, 2015
# #######################################################################
# #######################################################################
# ## Parameters (if empty, you will be prompted for a password)
qdb_sqluser="" # optional (do not use root)
qdb_sqlpassword="" # optional
# #######################################################################
# ## Functions
function help {
echo "Usage: qdb [-n <database name> ] [-u <database user>] [-p <database password>]
Mandatory Options:
-n, --name
-u, --user
-p, --password
Example: qdb -n Name -u User -p Pass"
exit 1
}
function seeya {
echo "Thanks for using QuickDB v1. Have a good day."
}
function testcancel {
if test $? -eq 0
then
return
else
clear
echo "The operation was canceled"
seeya
exit 1
fi
}
function op_success {
clear
echo "The operation was concluded successfully"
seeya
}
function create_db_user_local {
query1="GRANT ALL PRIVILEGES ON $qdb_name.* TO '$qdb_user'@'localhost' IDENTIFIED BY '$qdb_pwd';FLUSH PRIVILEGES;"
}
function create_db_user_remote { # @todo :: yet to implement
query2="GRANT ALL PRIVILEGES ON $qdb_name.* TO '$qdb_user'@'%' IDENTIFIED BY '$qdb_pwd';FLUSH PRIVILEGES;"
}
function create_db {
query3="DROP DATABASE IF EXISTS $qdb_name;"
query4="CREATE DATABASE $qdb_name DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci;"
}
function run_queries {
create_db_user_local
# @todo :: implement create_db_user_remote
create_db
sql_query=${query1}${query2}${query3}${query4}
mysql -u "$qdb_sqluser" -p"$qdb_sqlpassword" -e "$sql_query"
}
function qdb {
# Are all arguments there?
if [ -z "$qdb_name" ] || [ -z "$qdb_user" ] || [ -z "$qdb_pwd" ]; then
echo "Error: missing argument. All 3 fields are required."; help
else
# What about DB parameters?
if [ -z "$qdb_sqluser" ] || [ -z "$qdb_sqlpassword" ]; then
# DB parameters empty. Prompting mysql.
while [ -z "$qdb_sqluser" ]; do
clear
echo "Now connecting to the SQL server for adding the new DB, user and password"
echo -n "Enter username: "
read -r qdb_sqluser
done
run_queries && (op_success; exit 0) || (c=$?; echo "Failed connecting to DB server"; (exit $c))
else
# DB parameters set. No further user input will be required.
run_queries && (op_success; exit 0) || (c=$?; echo "Failed connecting to DB server"; (exit $c)) # @todo :: refactoring
fi
fi
}
# ## "GUI"
function qdbgui {
# Intro
if (dialog --title " QuickDB 0.1 " --yesno "
\nThis script will create a SQL database, an associated local user \
with GRANT ALL privileges and his password.\n\n
If there is a DB already with the same name, it will be deleted\n\n\
Do you want to proceed?" 13 60) # size of 60x13 characters
then
# Get data
while [ -z "$qdb_name" ]; do
qdb_name=$(\
dialog --title " Creating a database " \
--backtitle " QuickDB 0.1 " \
--inputbox "Enter database name " 8 60 \
3>&1 1>&2 2>&3 3>&- \
)
testcancel
done
while [ -z "$qdb_user" ]; do
qdb_user=$(\
dialog --title " Creating a local user " \
--backtitle " QuickDB 0.1 " \
--inputbox "Enter username " 8 60 \
3>&1 1>&2 2>&3 3>&- \
)
testcancel
done
while [ -z "$qdb_pwd" ]; do
qdb_pwd=$(\
dialog --title " Choosing user password " \
--backtitle " QuickDB 0.1 " \
--inputbox "Enter user password " 8 60 \
3>&1 1>&2 2>&3 3>&- \
)
testcancel
done
else
testcancel
exit 1
fi
}
# ## Default if no input
# [[ $# -eq 0 ]] && help # if no input, display help
[[ $# -eq 0 ]] && qdbgui # if no input, display gui
# #######################################################################
# ## Main
while [[ $# -gt 0 ]]
do
case "$1" in
-n|--name)
qdb_name="$2"
shift
;;
-u|--user)
qdb_user="$2"
shift
;;
-p|--password)
qdb_pwd="$2"
shift
;;
-h|--help)
help
shift
;;
*)
echo "qdb: invalid option \""$1"\""
echo "Try 'qdb --help' for more information."; exit 1
esac
shift
done
# ## Where the magic happens
qdb
| true |
8166b3b5d07e8b7a76de273bc4392b392c0e8032 | Shell | judgegrubb/voltest | /creduce/showme.sh | UTF-8 | 14,453 | 3.8125 | 4 | [] | no_license | #!/bin/bash
##
## Copyright (c) 2013 The University of Utah
## All rights reserved.
##
## This is a predicate script for C-Reduce, for reducing programs that reveal
## "volatile defects" in C compilers.
##
## This script judges that the file named by its argument is interesting if:
##
## + it appears to be a valid, strictly conforming C program
## + the program compiles with both "gcc -O0" and "gcc -O2"
## + the output of the two compiled programs is the same
## + the "volatile access checksums" of the two compiled programs are
## different
##
###############################################################################
if test -z "$VOLTEST_HOME"; then
echo $0: '$VOLTEST_HOME' is not set
exit 1
fi
###############################################################################
## Information about the test setup: the compilers under test and the
## environment for compiling and validating test programs.
CCUT1="gcc-4.4 -O0"
CCUT2="gcc-4.4 -O2"
CPPFLAGS="-DINLINE= -DCSMITH_MINIMAL -DWRAP_VOLATILES=0 -DNOT_PRINT_CHECKSUM -I/disk2/randtest/src/csmith/runtime"
# PIN_HOME: inherit this from the environment.
TIMEOUT=5
TIMEOUT_FRAMAC=125
###############################################################################
## Environment configuration.
CLANG=/disk2/randtest/src/llvm-install/bin/clang
CMP=cmp
FRAMAC=/usr/bin/frama-c
GCC=gcc
GREP=grep
RM=rm
FIND_COMMON_VARS="$VOLTEST_HOME"/creduce/find-common-vars.pl
RUNSAFELY=RunSafely
VOL_CHECKER=/disk2/randtest/src/volatile_checker/volatile_checker
VOL_ADDR_GEN=/disk2/randtest/src/volatile_pintrace/gen_volatile_addr.pl
###############################################################################
## Options.
# If `quiet' is true, this script is silent when the mutant is found to be
# uninteresting --- i.e., uncompilable, sematically suspect, or without the
# behavior we're looking for.
quiet=0
# If `neat' is true, this script deletes the temporary files it creates.
neat=0
# If `debug' is true, this script prints debugging output.
debug=0
###############################################################################
## Parse and validate the command-line arguments.
if [ $# -ne 1 ]; then
echo "usage: $0 filename"
exit 1
fi
filename="$1"
if test ! -e "$filename"; then
echo "$0: file \"$filename\" does not exist"
exit 1
fi
if test ! -f "$filename"; then
echo "$0: \"$filename\" is not a regular file"
exit 1
fi
if test ! -r "$filename"; then
echo "$0: file \"$filename\" is not readable"
exit 1
fi
###############################################################################
## Validate the environment.
if test -z "$PIN_HOME"; then
echo "$0: environment variable \$PIN_HOME is not set"
exit 1
fi
if test ! -e "$PIN_HOME/pin.sh"; then
echo "$0: \"\$PIN_HOME/pin.sh\" does not exist"
exit 1
fi
###############################################################################
## Initialization.
DEBUG_ECHO=echo
if [ $debug -ne 0 ]; then
DEBUG_ECHO=:
fi
QUIET_ECHO=echo
if [ $quiet -ne 0 ]; then
QUIET_ECHO=:
fi
NEAT_RM_OUTS="$RM -f *out*.exe *out*.txt framac-prog.c"
if [ $neat -eq 0 ]; then
NEAT_RM_OUTS=:
fi
# ulimit -t 1
# ulimit -v 2000000
# Remove any lingering temporary files.
$NEAT_RM_OUTS
###############################################################################
## Use Clang to weed out "broken" programs.
clang_out=clang-out.txt
$CLANG -c -pedantic -Wall -O0 $CPPFLAGS \
"$filename" \
-o /dev/null \
> "$clang_out" 2>&1
if [ $? -ne 0 ]; then
$QUIET_ECHO "$0: clang could not compile \"$filename\""
$NEAT_RM_OUTS
exit 1
fi
$GREP -q \
-e 'incompatible redeclaration' \
-e 'ordered comparison between pointer' \
-e 'eliding middle term' \
-e 'end of non-void function' \
-e 'invalid in C99' \
-e 'specifies type' \
-e 'should return a value' \
-e 'too few argument' \
-e 'too many argument' \
-e "return type of 'main" \
-e 'uninitialized' \
-e 'incompatible pointer to' \
-e 'incompatible integer to' \
-e 'type specifier missing' \
-e 'discards qualifiers' \
"$clang_out"
if [ $? -ne 1 ]; then
$QUIET_ECHO "$0: unacceptable output warning from clang"
$NEAT_RM_OUTS
exit 1
fi
###############################################################################
## Use GCC to weed out "broken" programs.
gcc_out=gcc-out.txt
$GCC -c -Wall -Wextra -O1 $CPPFLAGS \
"$filename" \
-o /dev/null \
> "$gcc_out" 2>&1
if [ $? -ne 0 ]; then
$QUIET_ECHO "$0: gcc could not compile \"$filename\""
$NEAT_RM_OUTS
exit 1
fi
$GREP -q \
-e 'uninitialized' \
-e 'control reaches end' \
-e 'no semicolon at end' \
-e 'incompatible pointer' \
-e 'cast from pointer to integer' \
-e 'ordered comparison of pointer with integer' \
-e 'declaration does not declare anything' \
-e 'expects type' \
-e 'assumed to have one element' \
-e 'division by zero' \
-e 'pointer from integer' \
-e 'incompatible implicit' \
-e 'excess elements in struct initializer' \
-e 'comparison between pointer and integer' \
-e "discards '.*' qualifier" \
"$gcc_out"
if [ $? -ne 1 ]; then
$QUIET_ECHO "$0: unacceptable output warning from gcc"
$NEAT_RM_OUTS
exit 1
fi
###############################################################################
## Use our volatile_checker tool to weed out "broken" programs.
checker_out=checker-out.txt
$VOL_CHECKER --checker=volatile-reorder "$filename" > "$checker_out" 2>&1
if [ $? -ne 0 ]; then
$QUIET_ECHO "$0: \"$filename\" contains ill-ordered volatile accesses"
$NEAT_RM_OUTS
exit 1
fi
###############################################################################
## Use Clang's address sanitizer to weed out "broken" programs.
asan_exe=clang-asan-out.exe
asan_out=clang-asan-out.txt
$CLANG -O0 -fsanitize=address $CPPFLAGS \
"$filename" \
-o "$asan_exe" \
> "$asan_out" 2>&1
if [ $? -ne 0 ]; then
$QUIET_ECHO "$0: clang -fsanitize=address could not compile \"$filename\""
$NEAT_RM_OUTS
exit 1
fi
# The output of the program produced by clang -fsanitize=address.
asan_exe_out=clang-asan-exe-out.txt
$RUNSAFELY $TIMEOUT 1 /dev/null "$asan_exe_out" \
"$asan_exe" \
> /dev/null 2>&1
if [ $? -ne 0 ]; then
$QUIET_ECHO "$0: clang -fsanitize=address: program is unsanitary"
$NEAT_RM_OUTS
exit 1
fi
###############################################################################
## Use Frama-C to weed out "broken" programs.
framac_filename=framac-prog.c
framac_out=framac-out.txt
# For whatever reason, Frama-C does not like `main' to have arguments!
#
cp "$filename" "$framac_filename"
if [ $? -ne 0 ]; then
$QUIET_ECHO "$0: failed to copy \"$filename\" for Frama-C"
$NEAT_RM_OUTS
exit 1
fi
perl -pi \
-e 's/int main \(int argc, char\* argv\[\]\)/int argc; char **argv; int main (void)/' \
"$framac_filename"
if [ $? -ne 0 ]; then
$QUIET_ECHO "$0: failed to edit \"$framac_filename\" for Frama-C"
$NEAT_RM_OUTS
exit 1
fi
# XXX: `RunSafely' has some serious problems with shell quoting!
# Note the funky (basically, wrong) quoting of the -cpp-command value, which is
# needed to get it through RunSafely. Fix RunSafely.
#
$RUNSAFELY $TIMEOUT_FRAMAC 1 /dev/null "$framac_out" \
$FRAMAC \
-cpp-command \"gcc -C -Dvolatile= -E -I.\" \
-val-signed-overflow-alarms -val -stop-at-first-alarm -no-val-show-progress \
-machdep x86_64 \
-obviously-terminates \
-precise-unions \
"$framac_filename" \
> /dev/null 2>&1
if [ $? -ne 0 ]; then
$QUIET_ECHO "$0: Frama-C failed to vet the program"
$NEAT_RM_OUTS
exit 1
fi
$GREP -q \
-e 'user error' \
-e 'assert' \
"$framac_out"
if [ $? -ne 1 ]; then
$QUIET_ECHO "$0: unacceptable output warning from Frama-C"
$NEAT_RM_OUTS
exit 1
fi
###############################################################################
## Extract information about the volatiles (and all variables) in the program.
all_vars=all-vars-out.txt
vol_vars=vol-vars-out.txt
$VOL_CHECKER \
--checker=volatile-address \
--all-vars-output="$all_vars" \
"$filename" \
> "$vol_vars" 2>&1
if [ $? -ne 0 ]; then
$QUIET_ECHO "$0: volatile-variable extractor failed"
$NEAT_RM_OUTS
exit 1
fi
###############################################################################
## Compile the mutant using first compiler under test.
# The outputs of the first compiler under test.
ccut1_exe=ccut1-out.exe
ccut1_out=ccut1-out.txt
$CCUT1 \
-w $CPPFLAGS \
"$filename" \
-o "$ccut1_exe" \
> "$ccut1_out" 2>&1
if [ $? -ne 0 ]; then
$QUIET_ECHO "$0: $CCUT1: could not compile \"$filename\""
$NEAT_RM_OUTS
exit 1
fi
# The addresses of volatile objects (and normal objects, too) in the program
# produced by the first compiler under test.
ccut1_exe_all_addrs=ccut1-exe-all-addrs-out.txt
ccut1_exe_vol_addrs=ccut1-exe-vol-addrs-out.txt
$VOL_ADDR_GEN \
--vars-file="$vol_vars" \
--all-vars-file="$all_vars" \
--all-var-addrs-output="$ccut1_exe_all_addrs" \
"$ccut1_exe" \
> "$ccut1_exe_vol_addrs" 2>&1
if [ $? -ne 0 ]; then
$QUIET_ECHO "$0: $CCUT1: volatile-address extractor failed"
$NEAT_RM_OUTS
exit 1
fi
###############################################################################
## Compile the mutant using second compiler under test.
# The outputs of the second compiler under test.
ccut2_exe=ccut2-out.exe
ccut2_out=ccut2-out.txt
$CCUT2 \
-w $CPPFLAGS \
"$filename" \
-o "$ccut2_exe" \
> "$ccut2_out" 2>&1
if [ $? -ne 0 ]; then
$QUIET_ECHO "$0: $CCUT2: could not compile \"$filename\""
$NEAT_RM_OUTS
exit 1
fi
# The addresses of volatile objects (and normal objects, too) in the program
# produced by the second compiler under test.
ccut2_exe_all_addrs=ccut2-exe-all-addrs-out.txt
ccut2_exe_vol_addrs=ccut2-exe-vol-addrs-out.txt
$VOL_ADDR_GEN \
--vars-file="$vol_vars" \
--all-vars-file="$all_vars" \
--all-var-addrs-output="$ccut2_exe_all_addrs" \
"$ccut2_exe" \
> "$ccut2_exe_vol_addrs" 2>&1
if [ $? -ne 0 ]; then
$QUIET_ECHO "$0: $CCUT2: volatile-address extractor failed"
$NEAT_RM_OUTS
exit 1
fi
###############################################################################
## Find the set of globals that are visible in both compiled programs.
## Pin can compute the value checksum only over the state that is visible
## in both programs.
# The addresses of "common" objects in each compiled program. A "common"
# object is one that appears in both of the compiled programs, although it
# might have different locations across the two programs.
ccut1_exe_com_addrs=ccut1-exe-com-addrs-out.txt
ccut2_exe_com_addrs=ccut2-exe-com-addrs-out.txt
$FIND_COMMON_VARS "$ccut1_exe_all_addrs" "$ccut2_exe_all_addrs" \
> "$ccut1_exe_com_addrs" 2>&1
if [ $? -ne 0 ]; then
$QUIET_ECHO "$0: $CCUT1: common-object extractor failed"
$NEAT_RM_OUTS
exit 1
fi
$FIND_COMMON_VARS "$ccut2_exe_all_addrs" "$ccut1_exe_all_addrs" \
> "$ccut2_exe_com_addrs" 2>&1
if [ $? -ne 0 ]; then
$QUIET_ECHO "$0: $CCUT2: common-object extractor failed"
$NEAT_RM_OUTS
exit 1
fi
###############################################################################
## Run the mutant using the first compiler under test.
# The output of the program produced by the first compiler under test.
ccut1_exe_out=ccut1-exe-out.txt
$RUNSAFELY $TIMEOUT 1 /dev/null "$ccut1_exe_out" \
"$PIN_HOME/pin.sh" \
-injection child \
-t "$PIN_HOME/source/tools/ManualExamples/obj-intel64/pinatrace.so" \
-vol_input "$ccut1_exe_vol_addrs" \
-all_vars_input "$ccut1_exe_com_addrs" \
-output_mode checksum \
-- "$ccut1_exe" \
> /dev/null 2>&1
if [ $? -ne 0 ]; then
$QUIET_ECHO "$0: $CCUT1: compiled program failed to run correctly"
$NEAT_RM_OUTS
exit 1
fi
# The regular and volatile checksums of the program produced by the first
# compiler under test.
ccut1_exe_chk=ccut1-exe-out-chk.txt
ccut1_exe_vchk=ccut1-exe-out-vchk.txt
$GREP -e "^checksum" "$ccut1_exe_out" > "$ccut1_exe_chk"
# $? is 0 is lines were matched; 1 if no lines matched; >1 if error.
if [ $? -ne 0 ]; then
$QUIET_ECHO "$0: $CCUT1: compiled program produced no checksum"
$NEAT_RM_OUTS
exit 1
fi
$GREP -e "^vol_access_checksum" "$ccut1_exe_out" > "$ccut1_exe_vchk"
# $? is 0 is lines were matched; 1 if no lines matched; >1 if error.
if [ $? -ne 0 ]; then
$QUIET_ECHO "$0: $CCUT1: compiled program produced no volatile checksum"
$NEAT_RM_OUTS
exit 1
fi
###############################################################################
## Run the mutant using the second compiler under test.
# The output of the program produced by the second compiler under test.
ccut2_exe_out=ccut2-exe-out.txt
$RUNSAFELY $TIMEOUT 1 /dev/null "$ccut2_exe_out" \
"$PIN_HOME/pin.sh" \
-injection child \
-t "$PIN_HOME/source/tools/ManualExamples/obj-intel64/pinatrace.so" \
-vol_input "$ccut2_exe_vol_addrs" \
-all_vars_input "$ccut2_exe_com_addrs" \
-output_mode checksum \
-- "$ccut2_exe" \
> /dev/null 2>&1
if [ $? -ne 0 ]; then
$QUIET_ECHO "$0: $CCUT2: compiled program failed to run correctly"
$NEAT_RM_OUTS
exit 1
fi
# The regular and volatile checksums of the program produced by the second
# compiler under test.
ccut2_exe_chk=ccut2-exe-out-chk.txt
ccut2_exe_vchk=ccut2-exe-out-vchk.txt
$GREP -e "^checksum" "$ccut2_exe_out" > "$ccut2_exe_chk"
# $? is 0 is lines were matched; 1 if no lines matched; >1 if error.
if [ $? -ne 0 ]; then
$QUIET_ECHO "$0: $CCUT2: compiled program produced no checksum"
$NEAT_RM_OUTS
exit 1
fi
$GREP -e "^vol_access_checksum" "$ccut2_exe_out" > "$ccut2_exe_vchk"
# $? is 0 is lines were matched; 1 if no lines matched; >1 if error.
if [ $? -ne 0 ]; then
$QUIET_ECHO "$0: $CCUT2: compiled program produced no volatile checksum"
$NEAT_RM_OUTS
exit 1
fi
###############################################################################
## Compare the regular and volatile checksums of the two compiled programs.
# If the regular checksums differ, this mutant is bad.
$CMP -s "$ccut1_exe_chk" "$ccut2_exe_chk"
if [ $? -ne 0 ]; then
$QUIET_ECHO "$0: compiled programs output different checksums"
exit 1
fi
# If the volatile checksums are *not* different, this mutant is bad.
$CMP -s "$ccut1_exe_vchk" "$ccut2_exe_vchk"
if [ $? -eq 0 ]; then
$QUIET_ECHO "$0: compiled programs yield the same volatile checksum"
exit 1
fi
###############################################################################
$QUIET_ECHO "$0: \"$filename\" is good"
$NEAT_RM_OUTS
exit 0
## End of file.
| true |
8845f4c6d0ac231345cb0749873be9976c3e86e7 | Shell | awhitbeck/usercode | /lowMassAnalysis1D/makeDataCards/datacards_TEMPLATE/make_SMCLS.sh | UTF-8 | 1,365 | 3.1875 | 3 | [] | no_license | #!/bin/bash
if test -d $1; then MASS=$1; else echo "Usage: $0 mass [what ]"; exit 1; fi;
cd $MASS
MATCH=$2;
OPTIONS="-M HybridNew --freq --optimizeSim=1 --singlePoint 1"
function run {
WHAT=$1; shift
NAM=$(echo $1 | sed -e s/comb_// -e s/.root// | tr [a-z] [A-Z])
BSP=${1/.root};
if [[ "$MATCH" == "" || "$MATCH" == "$1" || "$MATCH" == "${NAM}" ]]; then
if test -f $1; then
if ls crab_0_${BSP}_SM*/res/*root > /dev/null 2>&1; then
echo "Runnining CLs for limits for $NAM at $MASS. ";
../hadd2 -f grid-$BSP.root crab_0_${BSP}_SM*/res/*root > /dev/null 2>&1;
GRID="--readHybridResult --toysFile=grid-$BSP.root"
combine $* -n ${NAM}_${WHAT} -m $MASS $GRID $OPTIONS > ${1/.root/.log.$WHAT} 2>&1
for E in 50 16 025 84 975; do
combine $* -n ${NAM}_${WHAT} -m $MASS $GRID --expectedFromGrid 0.$E $OPTIONS > ${1/.root/.log.$WHAT}_E0$E 2>&1
done;
grep '^CLs =' ${1/.root/.log.$WHAT}* | sed 's/:/\t/';
else
echo "No grid ready for $NAM at $MASS";
fi;
fi;
fi;
}
run SMCLS comb_hgg.root
run SMCLS comb_hww.root
run SMCLS comb_htt.root
run SMCLS comb_hzz4l.root
run SMCLS comb_hzz2l2nu.root
run SMCLS comb_hzz2l2q.root
run SMCLS comb.root
| true |
74ec6f3b563854b97afa260ba973a195d0c0fbdb | Shell | petronny/aur3-mirror | /winscp4/winscp4 | UTF-8 | 276 | 3.046875 | 3 | [] | no_license | #!/bin/bash
pkg=winscp4
export WINEPREFIX="$HOME/.$pkg/wine"
export WINEDLLOVERRIDES="mscoree,mshtml="
if [ ! -d "$HOME"/.$pkg ] ; then
mkdir -p "$HOME"/.$pkg || exit 1
ln -s /usr/share/$pkg/$pkg.exe "$HOME"/.$pkg/$pkg.exe || exit 1
fi
wine "$HOME"/.$pkg/$pkg.exe "$@" | true |
accace90abe7a671c0e5ad35ecbf439ab6b2f09d | Shell | arbitrary-dev/scripts | /powermode-switch | UTF-8 | 2,421 | 4.15625 | 4 | [
"WTFPL"
] | permissive | #!/bin/sh
TMPDIR=${TMPDIR:-/tmp}
CURR=`cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor`
MODES=(`cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_available_governors`)
function _try_mode() {
MODE=(`grep -oE "\b[a-z]*$1[a-z]*\b" <<< ${MODES[@]}`)
if (( ${#MODE[@]} == 0 )); then
echo "No such mode: $1"
return 1
elif (( ${#MODE[@]} > 1 )); then
echo "There are ${#MODE[@]} power modes matching '*$1*':"
for m in ${MODE[@]}; do
echo "- $m"
done
read -p "Which one ? "
_try_mode $REPLY
elif [ "$MODE" == "$CURR" ]; then
echo "Power mode '$MODE' is already set."
return 1
fi
}
if [ -n "$1" ]; then
_try_mode $1 || exit 1
else
echo "Power modes: `sed -E "s/$CURR/[\0]/" <<< ${MODES[@]}`"
[ "$CURR" != "powersave" ] && MODE="powersave" || MODE="performance"
read -p "Switch to $MODE? "
if [[ "$REPLY" =~ ^(y|yes)$ ]]; then
:
elif [[ "$REPLY" =~ ^(n|no|)$ ]]; then
exit 1
else
_try_mode $REPLY
fi
fi
# CPU
for c in `ls -d /sys/devices/system/cpu/cpu[0-9]*`; do
echo $MODE > $c/cpufreq/scaling_governor || exit 1
done
echo "CPU's set to '$MODE' mode."
# Bluetooth
if [ "$MODE" = "powersave" ]; then
if pgrep bluetoothd >/dev/null && bluetoothctl info | grep -q "Connected: yes"; then
echo
echo "Unable to disable bluetooth, following devices connected:"
bluetoothctl info | grep "Name:" | cut -d\ -f2-
else
rfkill block bluetooth || exit 1
# TODO remove PCI?
echo "Bluetooth disabled"
fi
else
rfkill unblock bluetooth || exit 1
echo "Bluetooth enabled"
fi
# PCI devices
_join() {
local IFS="$1"
shift
echo "$*"
}
if [ "$MODE" = "powersave" ]; then
PCI_DVCS=()
# Nvidia VGA
PCI_DVCS+=(10de:1c8c)
# Nvidia audio
PCI_DVCS+=(10de:0fb9)
# ThinkPad ethernet
PCI_DVCS+=(8086:15bb)
# Samsung netbook ethernet
PCI_DVCS+=(10ec:8136)
IFS=$'\r\n' PCI_DVCS=($(lspci -nn | sed -En "s/([^ ]+) [^:]+: (.+) \[(`_join \| ${PCI_DVCS[@]}`)\].*/\1 \2/p"))
[ -n "$PCI_DVCS" ] && echo
for d in "${PCI_DVCS[@]}"; do
IFS=\ d=(echo $d)
DEV=(/sys/bus/pci/devices/*${d[1]})
NAME=${d[@]:2}
echo 1 > $DEV/remove || exit 1
echo "PCI device removed: $NAME"
done
else
echo 1 > /sys/bus/pci/rescan || exit 1
echo "PCI devices rescanned."
fi
# Powertop
if [[ ! -f $TMPDIR/.powertop-tuned ]]; then
echo
powertop --auto-tune \
&& touch $TMPDIR/.powertop-tuned
fi
| true |
ce90b65dd8de42d2f3d78083217b4d7797f75738 | Shell | zhujs/codesnippets | /bash.note/autocomplete.bash | UTF-8 | 1,475 | 4.125 | 4 | [] | no_license | #!/bin/bash
func1() {
echo "You are in func1: $@"
}
func2() {
echo "You are in func2: $@"
}
myFunc1() {
echo "You are in myFunc1: $@"
}
#use: autocomplete "word1 word2 ..."
autocomplete() {
# we only try to autocomplete the last word so
# we keep a record of the rest of the input
OTHER_WORDS="${READLINE_LINE% *} "
if [[ ${#OTHER_WORDS} -ge ${#READLINE_LINE} ]]; then #if there is only 1 word...
OTHER_WORDS=""
fi
#the -W flag tells compgen to read autocomplete from the 1st argument provided
#we then evaluate the last word of the current line through compgen
AUTOCOMPLETE=($(compgen -W $1 "${READLINE_LINE##* }"))
if [[ ${#AUTOCOMPLETE[@]} == 1 ]]; then #if there is only 1 match, we replace...
READLINE_LINE="$OTHER_WORDS${AUTOCOMPLETE[0]} "
READLINE_POINT=${#READLINE_LINE} #we set the cursor at the end of our word
else
echo -e "cmd> $READLINE_LINE\n${AUTOCOMPLETE[@]}" #...else we print the possibilities
fi
}
# here we list the values we want to allow autocompletion for
MYFUNC="func1 func2 myFunc1"
# we do this to enable line edition (man bash)
set -o emacs
#calls autocomplete when TAB is pressed
bind -x '"\t":"autocomplete \$MYFUNC"'
# -e option use the 'readline' library and store the input in the REPLY variable if no name supplied
while read -ep "cmd> "; do
history -s $REPLY #set the last history command
eval ${REPLY}
done
| true |
9643052f95c4aa3865f0ddde30dc62ef00cd1af4 | Shell | vh21/fuego | /fuego-ro/toolchains/debian-armel-tools.sh | UTF-8 | 805 | 2.890625 | 3 | [
"BSD-3-Clause",
"MIT"
] | permissive | # fuego toolchain script
# this sets up the environment needed for fuego to use a toolchain
# this includes the following variables:
# CC, CXX, CPP, CXXCPP, CONFIGURE_FLAGS, AS, LD, ARCH
# CROSS_COMPILE, PREFIX, HOST, SDKROOT
# CFLAGS and LDFLAGS are optional
#
# this script is sourced by ${FUEGO_RO}/toolchains/tools.sh
#
# Note that to use this script, you should install the
# Debian cross-compiler toolchains, using the script
# install_cross_toolchain.sh
#
# do this, inside the container, as follows:
# /fuego-ro/toolchain/install_cross_toolchain.sh armel
export ARCH=arm
export SDKROOT=/
export PREFIX=arm-linux-gnueabi
export_tools
CPP="${CC} -E"
CXXCPP="${CXX} -E"
# save original path, to get to non-toolchain version of python
ORIG_PATH=$PATH
unset PYTHONHOME
env -u PYTHONHOME >/dev/null
| true |
371ac8ee2e90a502172e5e2b791892ea56b8970b | Shell | hy57in/test-git-auto-commit | /file/userCommit.sh | UTF-8 | 407 | 2.953125 | 3 | [] | no_license | echo "UserCommit and Push"
branch="$1"
message="$2"
git checkout $branch
git merge --squash auto-commit
git commit -m "$message"
git push -u origin master
git checkout -b auto-commit-temp
git merge --no-edit auto-commit
git commit --amend -m "User makes commit and push it to user branch."
git push -f
git branch -d auto-commit
git branch -m auto-commit-temp auto-commit
git push -u origin auto-commit | true |
0550b2ec60682fa9a95192f3d778e6ede7f2b534 | Shell | olgeni/bsdkit | /cloud-init/digitalocean.sh | UTF-8 | 3,522 | 2.890625 | 3 | [
"BSD-2-Clause"
] | permissive | #!/bin/sh
set -e -u -x
exec > /var/log/bsdkit-cloud-init.log 2>&1
: "${BSDKIT_BRANCH:=master}"
: "${BSDKIT_VERSION:=13.2}"
: "${BSDKIT_JAIL_NETWORK:=172.16.1.0/24}"
: "${ZFS_SWAP_SIZE:=4G}"
export IGNORE_OSVERSION=yes
cd /root
rm -f /root/.wget-hsts
# shellcheck disable=SC2016
chpass -p '$1$Kk8uqtid$UZr4tpkPw6388O6xDSFLt1' root
mv -v /boot/loader.conf.local /boot/.loader.conf
sed -i -e "/vfs\.root\.mountfrom/d;" /boot/.loader.conf
sed -i -e "/vfs\.zfs\.vdev\.cache\.size/d;" /boot/.loader.conf
sed -i -e "/vfs\.zfs\.arc_max/d;" /boot/.loader.conf
sed -i -e "/loader_logo/d;" /boot/.loader.conf
cat -s /boot/.loader.conf > /boot/loader.conf
rm -f -v /boot/.loader.conf
if kenv zfs_be_root > /dev/null 2>&1; then
_zfs_pool=$(kenv zfs_be_root | cut -d'/' -f1)
zfs create \
-o canmount=off \
"${_zfs_pool}"/usr/local
zfs create \
-o checksum=off \
-o compression=off \
-o dedup=off \
-o sync=disabled \
-o primarycache=none \
-o org.freebsd:swap=on \
-V "${ZFS_SWAP_SIZE}" \
"${_zfs_pool}"/swap
fi
zfs destroy -r "${_zfs_pool}"@base_installation || :
zfs destroy -r "${_zfs_pool}"@digitalocean_installation || :
pw userdel freebsd -r || :
mkdir -p /usr/local/etc/pkg/repos
while ! pkg install -y ports-mgmt/pkg; do :; done
# shellcheck disable=SC2016
echo 'bsdkit: { url: "https://hub.olgeni.com/FreeBSD/packages-${ABI}-default-nox11" }' > /usr/local/etc/pkg/repos/bsdkit.conf
for i in $(pkg query -g %n 'py37-*'); do pkg set -yn ${i}:py38-${i#py37-}; done
pkg update -f
while ! pkg upgrade -y; do :; done
while ! pkg install -y devel/git sysutils/pv sysutils/ansible shells/zsh; do :; done
git clone https://gitlab.com/olgeni/bsdkit.git
cd bsdkit
git checkout ${BSDKIT_BRANCH}
./bsdkit ansible_local_playbook
if route get default | grep "interface:" > /dev/null 2>&1; then
_iface=$(route get default | awk '/interface:/ { print $2 }')
echo "nat on ${_iface} from ${BSDKIT_JAIL_NETWORK} to any -> egress" > /etc/pf.conf
echo 'anchor "f2b/*"' >> /etc/pf.conf
service pf enable
service pf start
fi
sysrc -a -e > /etc/.rc.conf
cat /etc/.rc.conf > /etc/rc.conf
rm -f /etc/.rc.conf
sysrc -x cloudinit_enable || :
sysrc -x digitalocean || :
sysrc -x digitaloceanpre || :
sysrc -x ifconfig_vtnet0_ipv6 || :
sysrc -x ipv6_activate_all_interfaces || :
sysrc -x ipv6_defaultrouter || :
sysrc -x route_net0 || :
rm -f /usr/local/etc/rc.d/digitalocean
rm -f /usr/local/etc/rc.d/digitaloceanpre
rm -f /usr/local/etc/sudoers.d/90-cloud-init-users
rm -f /root/.cloud-locale-test.skip
pkg delete -y net/cloud-init python2 python27 || :
pkg delete -y -g py27\* || :
pkg autoremove -y || :
pkg clean -y -a || :
rm -r -f /usr/tests
rm -r -f /usr/lib/debug
./bsdkit-upgrade -v${BSDKIT_VERSION} -F
./bsdkit-upgrade -v${BSDKIT_VERSION} -n bsdkit
rm -r -f /usr/freebsd-dist/
cd /root
bectl mount bsdkit /mnt
cat << "EOF" > /mnt/etc/rc.d/digitalocean_boot
#!/bin/sh
# PROVIDE: digitalocean_boot
# REQUIRE: DAEMON
# KEYWORD: firstboot
PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin:/root/bin
gpart bootcode -b /boot/pmbr -p /boot/gptzfsboot -i 1 vtbd0
zfs upgrade -a
zpool upgrade -a
bectl destroy -Fo default || :
pkg upgrade -F -y
pkg upgrade -y
touch /firstboot-reboot
rm -f /etc/rc.d/digitalocean_boot
EOF
chmod 555 /mnt/etc/rc.d/digitalocean_boot
touch /mnt/firstboot
bectl umount bsdkit
for _file in /var/log/*; do
: > ${_file}
done
newsyslog -C -v
shutdown -r now
exec > /dev/tty 2>&1
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.