blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
ee39c8eecafd024018e6e56ddc660f6307bdd19c | Shell | JuniorArogie/ACIT-4640-Group2 | /assignment1/vm_setup.sh | UTF-8 | 1,550 | 3.03125 | 3 | [] | no_license | #!/bin/bash
#PART 1: Git/Node/Mongo
USER_NAME="todoapp"
install_services () {
sudo yum get update
sudo yum install git
sudo yum install npm
sudo yum install mongodb-server
sudo systemctl enable mongod && systemctl start mongod
}
install_services
create_user(){
sudo useradd "$USER_NAME"
sudo passwd "$USER_NAME"
todoapp - Password
su - "$USER_NAME"
cd
}
create_user
git clone https://github.com/timoguic/ACIT4640-todo-app
npm install
cd ACIT4640-todo-app/
cd config
echo "module.exports = {localURl: 'mongodb://localhost/acit4640'};" >> database.js
cd ..
#Turn off the firewall
#firewall-cmd --zone=public --add-port=8080/tcp
#firewall-cmd --zone=public --add-port=80/tcp
sudo systemctl stop firewalld
node server.js
#PART 1 DONE
#PART 2 NGINX
sudo yum install nginx
sudo systemctl enable nginx
sudo systemctl start nginx
cd /etc/nginx/
sudo cp nginx.conf /etc/nginx/
sudo systemctl restart nginx
cd app/ACIT4640-todo-app/
node server.js
#PART 2 DONE
#PART 3
cd /etc/systemd/system
sudo echo "[Unit]
Description=Todo app, ACIT4640
After=network.target" >> todoapp.service
sudo echo "[Service]
Environment=NODE_PORT=8080
WorkingDirectory=/home/todoapp/app
Type=simple
User=todoapp
ExecStart=/usr/bin/node /home/todoapp/app/server.js
Restart=always
" >> todoapp.service
sudo echo "[Install]
WantedBy=multi-user.target" >> todoapp.service
sudo systemctl daemon-reload
sudo systemctl enable todoapp
sudo systemctl start todoapp
sudo systemctl stop firewalld
sudo systemctl status
| true |
b420c9c42e6e312404c618a17d20cc8b3c830059 | Shell | tsengvn/config | /setup.sh | UTF-8 | 974 | 3.546875 | 4 | [] | no_license | #!/bin/sh
exists() {
if hash $1 2>/dev/null; then
return 0
else
echo "$1 doesn't exist"
return 1
fi
}
exists curl
has_curl=$?
exists git
has_git=$?
exists zsh
has_zsh=$?
#check required lib exists
if [ $has_curl ] && [ $has_git ] && [ $has_zsh ]; then
echo "All required libb exists...."
else
echo "One ore more of curl, git, zsh is not installed. Exiting..."
exit 1
fi
if [ -d "$HOME/.oh-my-zsh" ]; then
echo "ob-my-zsh already installed"
else
echo "Installing oh-my-zsh"
sh -c "$(curl -fsSL https://raw.github.com/ohmyzsh/ohmyzsh/master/tools/install.sh)"
fi
#copy powerlevel10k zsh-theme
git clone --depth=1 "https://github.com/romkatv/powerlevel10k.git" ~/.oh-my-zsh/custom/themes/powerlevel10k
#copy .zshrc
curl -LSso ~/.zshrc "https://raw.githubusercontent.com/tsengvn/config/main/.zshrc"
#copy p10k config
curl -LSso ~/.p10k.zsh "https://raw.githubusercontent.com/tsengvn/config/main/.p10k.zsh" | true |
58cd2bcef5a6c23d9031c0ecac52fd2f0993d867 | Shell | listinvest/tempgopher | /install.sh | UTF-8 | 1,067 | 3.453125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
INSTALLDIR=/opt/tempgopher
INSTALLBIN=$INSTALLDIR/tempgopher
INSTALLUSER=pi
BINURL='https://gitlab.com/shouptech/tempgopher/-/jobs/artifacts/master/raw/tempgopher?job=build'
CONFIGFILE=$INSTALLDIR/config.yml
# Load w1_therm module
sudo /sbin/modprobe w1_therm
# Download binary
sudo mkdir -p $INSTALLDIR
sudo curl -L $BINURL -o $INSTALLBIN
sudo chmod +x $INSTALLBIN
sudo chown -R $INSTALLUSER: $INSTALLDIR
# Generate a configuration file
sudo -u $INSTALLUSER $INSTALLBIN -c $CONFIGFILE config || true
# Create unit file
sudo sh -c "cat > /etc/systemd/system/tempgopher.service" << EOM
[Unit]
Description=Temp Gopher
After=network.target
[Service]
Type=simple
WorkingDirectory=$INSTALLDIR
PermissionsStartOnly=true
User=$INSTALLUSER
Group=$INSTALLUSER
ExecStartPre=/sbin/modprobe w1_therm
ExecStart=$INSTALLBIN -c $CONFIGFILE run
ExecReload=/bin/kill -HUP \$MAINPID
[Install]
WantedBy=multi-user.target
EOM
# Enable and start the service
sudo systemctl daemon-reload
sudo systemctl enable tempgopher.service
sudo systemctl start tempgopher.service
| true |
59f188cb7ddfd361239aa2ab29660177e458d245 | Shell | ganmao/Ocs_Scripts | /Project_Scripts/daily_scripts/PorcessDailyRecurrEvent.sh | GB18030 | 4,908 | 3.0625 | 3 | [] | no_license | #!/usr/bin/ksh
#################################################################
#
# Զ
# ÿ0,ȡ
# crontab set:
# 0 1 * * * /ztesoft/ocs/scripts/PorcessDailyRecurrEvent.sh
#
#################################################################
. /ztesoft/ocs/scripts/CommonENV.sh
cd ${HOME}/bin
#űļȫֱ
#==========================================
#־ļ
v_LogFileName="PorcessDailyRecurrEvent_${gvDate_Today}.log"
#ڷѽ
v_ProcModNum=10
#־ȼ,....
v_LogLevel="1"
v_LogContent="ʼִȡ..."
gfWriteLogFile ${v_LogFileName} ${v_LogLevel} ${v_LogContent}
#KPI
v_KpiFileName="PorcessDailyRecurrEvent_${gvDate_Today}.kpi"
v_KpiName="PorcessDailyRecurrEvent"
#ʼKpiز
v_KpiContent=""
v_KpiProcAllNum=0
v_KpiProcErrorNum=0
v_KpiProcSuccesNum=0
gfGetCurrCycle ${gvDate_Today} | read v_CurrCycleId v_CurrCycleStat
if [[ x${v_CurrCycleId} = x ]]
then
v_LogLevel="ERROR"
v_LogContent="ִȡʧ- ȡID!"
gfWriteLogFile ${v_LogFileName} ${v_LogLevel} ${v_LogContent}
exit -1
fi
if [[ ${v_CurrCycleStat} != "A" ]]
then
v_LogLevel="ERROR"
v_LogContent="ִȡʧ- ȡ״̬'A'![${v_CurrCycleStat}]"
gfWriteLogFile ${v_LogFileName} ${v_LogLevel} ${v_LogContent}
exit -1
fi
#~ echo "v_CurrCycleId=${v_CurrCycleId}"
#~ echo "v_CurrCycleStat=${v_CurrCycleStat}"
v_LogLevel="4"
v_LogContent="ȡID:${v_CurrCycleId};ȡ״̬:${v_CurrCycleStat}"
gfWriteLogFile ${v_LogFileName} ${v_LogLevel} ${v_LogContent}
#==========================================
#ȡûɵ¼
fGetTodayOriNumber()
{
vSql_GetTodayOriNumber="
SELECT count(*) \
FROM event_recurring_ori_${v_CurrCycleId} \
WHERE TRUNC (created_date) = TRUNC (SYSDATE) \
AND recurring_re_type = 3;"
${gvSqlplus} -S ${gvConOra_RB} << END
set heading off
set feedback off
set pagesize 0
${vSql_GetTodayOriNumber}
exit
END
}
fGetTodayIndbNumber()
{
vSql_GetTodayIndbNumber="
SELECT count(*) \
FROM event_recurring_${v_CurrCycleId} \
WHERE TRUNC (created_date) = TRUNC (SYSDATE) \
AND recurring_re_type = 3;"
${gvSqlplus} -S ${gvConOra_RB} << END
set heading off
set feedback off
set pagesize 0
${vSql_GetTodayIndbNumber}
exit
END
}
#ȡûɵ¼
fGetTodayOriNumberError()
{
vSql_GetTodayOriNumberError="
SELECT count(*) \
FROM event_recurring_ori_${v_CurrCycleId} \
WHERE TRUNC (created_date) = TRUNC (SYSDATE) \
AND recurring_re_type = 3
AND STATE = 'A';"
${gvSqlplus} -S ${gvConOra_RB} << END
set heading off
set feedback off
set pagesize 0
${vSql_GetTodayOriNumberError}
exit
END
}
#==========================================
#main
#¼ɽ,Чû
v_LogLevel="1"
v_LogContent="ʼ¼ɽ..."
gfWriteLogFile ${v_LogFileName} ${v_LogLevel} ${v_LogContent}
${HOME}/bin/RecurrEventGen -e 3 -d ${gvDate_Today} -p 2 -l 0
v_LogLevel="1"
v_LogContent="¼ɽ̽!ʼ۽..."
gfWriteLogFile ${v_LogFileName} ${v_LogLevel} ${v_LogContent}
#۽
#${HOME}/bin/RecurrEventRate -e 3 -c ${v_CurrCycleId} -l 0
#Ϊ̴
v_count=0
while [[ ${v_count} -lt ${v_ProcModNum} ]]
do
echo "nohup ${HOME}/bin/RecurrEventRate -e 3 -c ${v_CurrCycleId} -M ${v_ProcModNum} -i ${v_count} &"
nohup ${HOME}/bin/RecurrEventRate -e 3 -c ${v_CurrCycleId} -M ${v_ProcModNum} -i ${v_count} &
v_count=`expr ${v_count} + 1`
#echo "v_count="${v_count}
done
v_ProcEnd="NO"
while [[ ${v_ProcEnd} = "YES" ]]
do
_Num=`ps -ef RecurrEventRate | grep -v grep | wc -l`
if [[ ${_Num} -eq 0 ]]
then
v_ProcEnd="YES"
fi
done
v_LogLevel="1"
v_LogContent="۽̽!"
gfWriteLogFile ${v_LogFileName} ${v_LogLevel} ${v_LogContent}
#==========================================
#־Kpiļ
#10,ȴ
sleep 600
fGetTodayOriNumber | read v_KpiProcAllNum
fGetTodayIndbNumber | read v_KpiProcSuccesNum
fGetTodayOriNumberError | read v_KpiProcErrorNum
v_KpiContent="|DailyRecurrEventNum=${v_KpiProcAllNum}|DailyRecurrEventSuccesNum=${v_KpiProcSuccesNum}|DailyRecurrEventErrorNum=${v_KpiProcErrorNum}"
gfWriteKpiFile ${v_KpiFileName} ${v_KpiName} ${v_KpiContent}
gfInsertKpi2Oracle "ִн" "Ҫ¼:${v_KpiProcAllNum}" "Ѿ¼:${v_KpiProcSuccesNum}" "¼:${v_KpiProcErrorNum}"
| true |
525ac7db15db6329756725ac6f911f812787007e | Shell | nthomas-redhat/USM | /install.sh | UTF-8 | 4,055 | 3.25 | 3 | [] | no_license | #!/bin/bash
set -e
# Package installation
YELLOW='\033[0;33m'
GREEN='\033[0;32m'
NC='\033[0m'
USM_HOME=`pwd`
printf "${GREEN}Installing necessary packages for USM${NC}\n"
set -x
yum -y install python-django python-django-celery python-django-extensions python-django-bash-completion python-django-filter python-paramiko redis python-redis salt-master postgresql postgresql-server postgresql-devel postgresql-libs postgresql-contrib python-psycopg2 python-netaddr python-cpopen python-gevent python-pip python-devel wget tar
pip install djangorestframework psycogreen
pip install celery --upgrade
# Initialize the DB
set +x
printf "${GREEN}Initializing the DB${NC}\n"
set -x
/usr/bin/postgresql-setup initdb
# Configure PostgreSQL to accept network connection
set +x
printf "${GREEN}Configuring PostgreSQL to accept network connection${NC}\n"
set -x
sed -i 's/127.0.0.1\/32 *ident/127.0.0.1\/32 password/g' /var/lib/pgsql/data/pg_hba.conf
sed -i 's/::1\/128 *ident/::1\/128 password/g' /var/lib/pgsql/data/pg_hba.conf
# Enable and start the postgresSQL service
set +x
printf "${GREEN}Enable and start the postgresSQL service${NC}\n"
set -x
systemctl enable postgresql
systemctl start postgresql
# Create the Database,User and grant privileges
set +x
printf "${GREEN}Creating the Database,User and granting privileges${NC}\n"
set -x
sudo su - postgres -c "createdb usm"
sudo su - postgres -c "psql --command=\"CREATE USER usm WITH PASSWORD 'usm'\""
sudo su - postgres -c "psql --command=\"GRANT ALL PRIVILEGES ON DATABASE usm TO usm\""
# Start the salt service
set +x
printf "${GREEN}Starting salt service${NC}\n"
set -x
systemctl enable salt-master
systemctl start salt-master
# Setup the USM App
set +x
printf "${GREEN}Setting Up USM app${NC}\n"
set -x
mkdir /var/log/usm
cd $USM_HOME
python manage.py makemigrations
python manage.py migrate
set +x
printf "${GREEN}Please Enter Details for USM super-user creation${NC}\n"
set -x
python manage.py createsuperuser
# Celery setup
set +x
printf "${GREEN}Setting up Celery${NC}\n"
set -x
systemctl enable redis
systemctl start redis
sed -i 's|^CELERYD_CHDIR=.*|'CELERYD_CHDIR=\""$USM_HOME"\"'|g' $USM_HOME/celery/default/celeryd
sed -i 's|^DJANGO_PROJECT_DIR=.*|'DJANGO_PROJECT_DIR=\""$USM_HOME"\"'|g' $USM_HOME/celery/default/celeryd
yes |cp $USM_HOME/celery/default/celeryd /etc/default
yes |cp $USM_HOME/celery/init.d/celeryd /etc/init.d
mkdir -p -m 2755 /var/log/celery
service celeryd start
# Salt setup
set +x
printf "${GREEN}Setting up salt${NC}\n"
set -x
yes |cp $USM_HOME/usm_wrappers/setup-minion.sh.template $USM_HOME
mkdir /srv/salt
yes |cp $USM_HOME/usm_wrappers/*.sls /srv/salt
set +x
while true; do
read -p "Do you wish to install USM-UI [Y/N]:" yn
case $yn in
[YyNn]* ) break;;
* ) echo "Please answer Y or N.";;
esac
done
if [ $yn = "Y" -o $yn = "y" ]
then
printf "${GREEN}Downloading and installing usm-client...${NC}\n"
set -x
wget http://github.com/kmkanagaraj/usm-client/releases/download/0.0.1/usm-client-0.0.1.tar.gz
mkdir static
tar -xzf usm-client-0.0.1.tar.gz -C static/
set +x
printf "${YELLOW}Please Make Suitable Firewall settings by unblocking 4505-4506 ports for communication with salt and your HTTP port used for USM....${NC}\n"
printf "${GREEN}You Can start the USM application by running following command in $USM_HOME dir\nCommand: python manage.py runserver IPAddress:PORT${NC}\n"
printf "${GREEN}Access the application using http://IPADDRESS:PORT/static/index.html${NC}\n"
else
set +x
printf "${YELLOW}Please Make Suitable Firewall settings by unblocking 4505-4506 ports for communication with salt and your HTTP port used for USM....${NC}\n"
printf "${GREEN}You Can start the USM application by running following command in $USM_HOME dir\nCommand: python manage.py runserver IPAddress:PORT${NC}\n"
printf "${GREEN}Access the application using http://IPADDRESS:PORT/api/v1${NC}\n"
fi
| true |
1979d8c2091dcab7fb55d0c540c67ffedc55e1e7 | Shell | banan1988/spring-demo | /demo-infra/sonarqube/up.sh | UTF-8 | 106 | 2.703125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
docker-compose up -d
EXIT_CODE=$?
if [[ ${EXIT_CODE} != 0 ]]; then
exit 1
fi
docker ps
| true |
5bde20b665b6f1731530979d882263b9d60bb2f8 | Shell | rgeorgiev583/os-2016-2017 | /review/3/9.sh | UTF-8 | 76 | 3.109375 | 3 | [
"MIT"
] | permissive | #!/bin/sh
file=$1
if [ -e "$file" ]
then
echo yes
else
echo no
fi
| true |
53f9f3dc228cb2d0a3cc91525145417bdf436981 | Shell | budimanjojo/dotfiles | /dot_i3/scripts/bandwidth | UTF-8 | 2,481 | 3.75 | 4 | [
"Unlicense"
] | permissive | #!/bin/bash
# Source: http://www.onlamp.com/pub/a/linux/2000/11/16/LinuxAdmin.html
INSTANCE="${BLOCK_INSTANCE}"
if [[ "${INSTANCE}" = "" ]]; then
INSTANCE="wlp3s0;both"
fi
DISPLAY=$(echo "${INSTANCE}" | awk -F ';' '{print $2}')
INSTANCE=$(echo "${INSTANCE}" | awk -F ';' '{print $1}')
if [[ "${DISPLAY}" = "" ]]; then
DISPLAY="both"
fi
ONE_KB=1024
ONE_MB=$(echo "${ONE_KB}*1024" | bc -l)
TEN_MB=$(echo "${ONE_MB}*10" | bc -l)
OHD_MB=$(echo "${TEN_MB}*10" | bc -l)
URGENT_VALUE="${TEN_MB}"
PREV_IN=0
PREV_OUT=0
PREV_FILE="/tmp/.bandwidth"
if [[ -f "${PREV_FILE}" ]]; then
PREV_CONT=$(cat "${PREV_FILE}")
PREV_IN=$(echo "${PREV_CONT}" | head -n 1)
PREV_OUT=$(echo "${PREV_CONT}" | tail -n 1)
fi
BANDWIDTH=$(grep "${INSTANCE}" /proc/net/dev | awk -F: '{print $2}' | awk '{print $1" "$9}')
if [[ "${BANDWIDTH}" = "" ]]; then
exit
fi
BYTES_IN=$(echo "${BANDWIDTH}" | awk -F ' ' '{print $1}')
BYTES_OUT=$(echo "${BANDWIDTH}" | awk -F ' ' '{print $2}')
function FormatNumber() {
if [[ "${1}" -ge "${OHD_MB}" ]]; then
echo $(echo "scale=0;${1}/${ONE_MB}" | bc -l)"mb"
elif [[ "${1}" -ge "${TEN_MB}" ]]; then
echo $(echo "scale=1;${1}/${ONE_MB}" | bc -l)"mb"
elif [[ "${1}" -ge "${ONE_MB}" ]]; then
echo $(echo "scale=2;${1}/${ONE_MB}" | bc -l)"mb"
elif [[ "${1}" -ge "${ONE_KB}" ]]; then
echo $(echo "scale=0;${1}/${ONE_KB}" | bc -l)"kb"
else
echo "${1}""b"
fi
}
if [[ "${PREV_IN}" != "" ]] && [[ "${PREV_OUT}" != "" ]]; then
# Calculate the CPU usage since we last checked.
DIFF_IN=$(echo "scale=0;${BYTES_IN} - ${PREV_IN}" | bc -l)
DIFF_OUT=$(echo "scale=0;${BYTES_OUT} - ${PREV_OUT}" | bc -l)
DIFF_TOTAL=0
USAGE_IN=$(FormatNumber "${DIFF_IN}")
USAGE_OUT=$(FormatNumber "${DIFF_OUT}")
if [[ "${DISPLAY}" = "both" ]]; then
echo "${USAGE_IN} : ${USAGE_OUT}"
echo "${USAGE_IN} : ${USAGE_OUT}"
echo ""
DIFF_TOTAL=$((DIFF_TOTAL+DIFF_IN))
DIFF_TOTAL=$((DIFF_TOTAL+DIFF_OUT))
elif [[ "${DISPLAY}" = "in" ]]; then
echo "${USAGE_IN}"
echo "${USAGE_IN}"
echo ""
DIFF_TOTAL=$((DIFF_TOTAL+DIFF_IN))
elif [[ "${DISPLAY}" = "out" ]]; then
echo "${USAGE_OUT}"
echo "${USAGE_OUT}"
echo ""
DIFF_TOTAL=$((DIFF_TOTAL+DIFF_OUT))
fi
else
echo "?"
echo "?"
echo ""
fi
# Remember the total and idle CPU times for the next check.
echo "${BYTES_IN}" > "${PREV_FILE}"
echo "${BYTES_OUT}" >> "${PREV_FILE}"
if [[ "${DIFF_TOTAL}" -ge "${URGENT_VALUE}" ]]; then
exit 33
fi
| true |
22a2b349e6e8dc759878e02da85bf553c42d982c | Shell | ofenton/ofenton | /tools/bin/docker_build | UTF-8 | 464 | 3.984375 | 4 | [] | no_license | #!/bin/bash
if [ "$#" -ne 1 ] || ! [ -d "$1" ]; then
echo "usage: "$0" [path_to_Dockerfile]"
exit
fi
# Build name = 'ofenton/[last_bit_of_path]'
NAME=`echo $1 | awk -F/ '{if ($NF=="") {n=$(NF-1)} else {n=$NF}} END {print n}'`
if [ ${NAME} == "" ]; then
echo "usage: "$0" [path_to_Dockerfile]"
exit
fi
echo "Creating docker build for "${NAME}
docker build -t ofenton/${NAME} ${NAME}
echo "Cleaning up any unreferenced containers"
docker_clean_untagged
| true |
834d38b415d919f8bfd39c4385409c98bc38640a | Shell | JJRnumrous/Firmware | /platforms/nuttx/NuttX/nuttx/tools/testbuild.sh | UTF-8 | 9,302 | 3.5625 | 4 | [
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-permissive",
"ISC",
"MIT"
] | permissive | #!/bin/bash
# testbuild.sh
#
# Copyright (C) 2016-2018 Gregory Nutt. All rights reserved.
# Author: Gregory Nutt <gnutt@nuttx.org>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# 3. Neither the name NuttX nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
WD=$PWD
nuttx=$WD/../nuttx
TOOLSDIR=$nuttx/tools
UNLINK=$TOOLSDIR/unlink.sh
progname=$0
host=linux
wenv=cygwin
sizet=uint
APPSDIR=../apps
MAKE_FLAGS=-i
MAKE=make
unset testfile
function showusage {
echo ""
echo "USAGE: $progname [-w|l] [-c|u|n] [-s] [-a <appsdir>] [-n <nxdir>] <testlist-file>"
echo " $progname -h"
echo ""
echo "Where:"
echo " -w|l selects Windows (w) or Linux (l). Default: Linux"
echo " -c|u|n selects Windows environment option: Cygwin (c), Ubuntu under"
echo " Windows 10 (u), or Windows native (n). Default Cygwin"
echo " -s Use C++ unsigned long size_t in new operator. Default unsigned int"
echo " -a <appsdir> provides the relative path to the apps/ directory. Default ../apps"
echo " -t <topdir> provides the absolute path to top nuttx/ directory. Default $PWD/../nuttx"
echo " -d enables script debug output"
echo " -x exit on build failures"
echo " -h will show this help test and terminate"
echo " <testlist-file> selects the list of configurations to test. No default"
echo ""
echo "Your PATH variable must include the path to both the build tools and the"
echo "kconfig-frontends tools"
echo ""
exit 1
}
# Parse command line
while [ ! -z "$1" ]; do
case $1 in
-w )
host=windows
;;
-l )
host=linux
;;
-c )
host=windows
wenv=cygwin
;;
-d )
set -x
;;
-u )
host=windows
wenv=ubuntu
;;
-n )
host=windows
wenv=native
;;
-s )
host=windows
sizet=long
;;
-x )
MAKE_FLAGS='--silent --no-print-directory'
set -e
;;
-a )
shift
APPSDIR="$1"
;;
-t )
shift
nuttx="$1"
;;
-h )
showusage
;;
* )
testfile="$1"
shift
break;
;;
esac
shift
done
if [ ! -z "$1" ]; then
echo "ERROR: Garbage at the end of line"
showusage
fi
if [ -z "$testfile" ]; then
echo "ERROR: Missing test list file"
showusage
fi
if [ ! -r "$testfile" ]; then
echo "ERROR: No readable file exists at $testfile"
showusage
fi
if [ ! -d "$nuttx" ]; then
echo "ERROR: Expected to find nuttx/ at $nuttx"
showusage
fi
# Clean up after the last build
function distclean {
cd $nuttx || { echo "ERROR: failed to CD to $nuttx"; exit 1; }
if [ -f .config ]; then
echo " Cleaning..."
${MAKE} ${MAKE_FLAGS} distclean 1>/dev/null
fi
}
# Configure for the next build
function configure {
cd $nuttx/tools || { echo "ERROR: failed to CD to $nuttx/tools"; exit 1; }
echo " Configuring..."
./configure.sh $config
cd $nuttx || { echo "ERROR: failed to CD to $nuttx"; exit 1; }
if [ "X$host" == "Xlinux" ]; then
echo " Select CONFIG_HOST_LINUX=y"
kconfig-tweak --file $nuttx/.config --enable CONFIG_HOST_LINUX
kconfig-tweak --file $nuttx/.config --disable CONFIG_HOST_WINDOWS
kconfig-tweak --file $nuttx/.config --disable CONFIG_WINDOWS_NATIVE
kconfig-tweak --file $nuttx/.config --disable CONFIG_WINDOWS_CYGWIN
kconfig-tweak --file $nuttx/.config --disable CONFIG_WINDOWS_MSYS
kconfig-tweak --file $nuttx/.config --disable CONFIG_WINDOWS_OTHER
kconfig-tweak --file $nuttx/.config --enable CONFIG_SIM_X8664_SYSTEMV
kconfig-tweak --file $nuttx/.config --disable CONFIG_SIM_X8664_MICROSOFT
kconfig-tweak --file $nuttx/.config --disable CONFIG_SIM_M32
else
echo " Select CONFIG_HOST_WINDOWS=y"
kconfig-tweak --file $nuttx/.config --enable CONFIG_HOST_WINDOWS
kconfig-tweak --file $nuttx/.config --disable CONFIG_HOST_LINUX
if [ "X$wenv" == "Xcygwin" ]; then
echo " Select CONFIG_WINDOWS_CYGWIN=y"
kconfig-tweak --file $nuttx/.config --enable CONFIG_WINDOWS_CYGWIN
kconfig-tweak --file $nuttx/.config --disable CONFIG_WINDOWS_UBUNTU
kconfig-tweak --file $nuttx/.config --disable CONFIG_WINDOWS_NATIVE
else
kconfig-tweak --file $nuttx/.config --disable CONFIG_WINDOWS_CYGWIN
if [ "X$wenv" == "Xubuntu" ]; then
echo " Select CONFIG_WINDOWS_UBUNTU=y"
kconfig-tweak --file $nuttx/.config --enable CONFIG_WINDOWS_UBUNTU
kconfig-tweak --file $nuttx/.config --disable CONFIG_WINDOWS_NATIVE
else
echo " Select CONFIG_WINDOWS_NATIVE=y"
kconfig-tweak --file $nuttx/.config --disable CONFIG_WINDOWS_UBUNTU
kconfig-tweak --file $nuttx/.config --enable CONFIG_WINDOWS_NATIVE
fi
fi
kconfig-tweak --file $nuttx/.config --disable CONFIG_WINDOWS_MSYS
kconfig-tweak --file $nuttx/.config --disable CONFIG_WINDOWS_OTHER
kconfig-tweak --file $nuttx/.config --enable CONFIG_SIM_X8664_MICROSOFT
kconfig-tweak --file $nuttx/.config --disable CONFIG_SIM_X8664_SYSTEMV
kconfig-tweak --file $nuttx/.config --disable CONFIG_SIM_M32
fi
kconfig-tweak --file $nuttx/.config --disable CONFIG_HOST_MACOS
kconfig-tweak --file $nuttx/.config --disable CONFIG_HOST_OTHER
if [ "X$sizet" == "Xlong" ]; then
echo " Select CONFIG_CXX_NEWLONG=y"
kconfig-tweak --file $nuttx/.config --enable CONFIG_CXX_NEWLONG
else
echo " Disable CONFIG_CXX_NEWLONG"
kconfig-tweak --file $nuttx/.config --disable CONFIG_CXX_NEWLONG
fi
if [ "X$toolchain" != "X" ]; then
setting=`grep TOOLCHAIN $nuttx/.config | grep -v CONFIG_ARCH_TOOLCHAIN_GNU=y | grep =y`
varname=`echo $setting | cut -d'=' -f1`
if [ ! -z "$varname" ]; then
echo " Disabling $varname"
kconfig-tweak --file $nuttx/.config --disable $varname
fi
echo " Enabling $toolchain"
kconfig-tweak --file $nuttx/.config --enable $toolchain
fi
echo " Refreshing..."
cd $nuttx || { echo "ERROR: failed to CD to $nuttx"; exit 1; }
${MAKE} ${MAKE_FLAGS} olddefconfig 1>/dev/null 2>&1
}
# Perform the next build
function build {
cd $nuttx || { echo "ERROR: failed to CD to $nuttx"; exit 1; }
echo " Building NuttX..."
echo "------------------------------------------------------------------------------------"
${MAKE} ${MAKE_FLAGS} 1>/dev/null
}
# Coordinate the steps for the next build test
function dotest {
echo "------------------------------------------------------------------------------------"
distclean
configure
build
}
# Perform the build test for each entry in the test list file
if [ ! -d $APPSDIR ]; then
export "ERROR: No directory found at $APPSDIR"
exit 1
fi
export APPSDIR
# Shouldn't have to do this
testlist=`cat $testfile`
#while read -r line || [[ -n $line ]]; do
for line in $testlist; do
echo "===================================================================================="
firstch=${line:0:1}
if [ "X$firstch" == "X#" ]; then
echo "Skipping: $line"
else
echo "Configuration/Tool: $line"
# Parse the next line
config=`echo $line | cut -d',' -f1`
path=$nuttx/configs/$config
if [ ! -r "$path/defconfig" ]; then
echo "ERROR: no configuration found at $path"
showusage
fi
unset toolchain;
if [ "X$config" != "X$line" ]; then
toolchain=`echo $line | cut -d',' -f2`
if [ -z "$toolchain" ]; then
echo " Warning: no tool configuration"
fi
fi
# Perform the build test
dotest
fi
cd $WD || { echo "ERROR: Failed to CD to $WD"; exit 1; }
done # < $testfile
echo "===================================================================================="
| true |
59ffd77c61b335b508c7d93f3d8270a40a5fb2d2 | Shell | newjoker2/PacketSorter | /clean-build.sh | UTF-8 | 508 | 3.109375 | 3 | [] | no_license | clear
echo "Deleting build/ folder"
rm -rf build/
echo "Creating build/ folder"
mkdir build
cd build
echo "Entering build/ folder"
if [ "$1" == "test" ];
then
cmake -DCMAKE_BUILD_TYPE=Debug ..
make -j 8
make test
elif [ "$1" == "test-coverage-publish" ]
then
cmake -DCMAKE_BUILD_TYPE=Debug -DENABLE_COVERAGE=On ..
make -j 8
make test
make lcov
else
cmake -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=OFF ..
make VERBOSE=1 -j 8
fi
| true |
57407bc813dc7a05b0c2f153791c86b8c114d107 | Shell | kmova/bootstrap | /shell/empty.sh | UTF-8 | 73 | 2.953125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -e
if [ -e ./$1 ]; then
echo "No argument passed"
fi
| true |
6ab8a9b9665de1661971f1ea8147f29861eb851d | Shell | josiahjohnston/admin-scripts | /psql_backup.sh | UTF-8 | 1,444 | 3.796875 | 4 | [
"Apache-2.0"
] | permissive | #/bin/sh
# Backup each psql database into a directory format that is suitable for
# rsnapshot's file-level deduplication.
backupdir=/data1/psql_backups
# Backup global stuff like user accounts
pg_dumpall --file="$backupdir/switch_gis_globals.sql" --host=switch-db2.erg.berkeley.edu \
--username=postgres --globals-only
# Backup each DB
psql --host=switch-db2.erg.berkeley.edu --username=postgres -c \
'SELECT datname FROM pg_database WHERE datistemplate = false;' -t \
| while read db_name; do
# Skip blank lines so we don't delete the whole backup dir
if [ -z "$db_name" ]; then continue; fi;
save_path="$backupdir/${db_name}"
log_path="$backupdir/${db_name}.log"
err_log="$backupdir/${db_name}.errlog"
rm -rf ${save_path}
# Schema only as sql text file
pg_dump --host=switch-db2.erg.berkeley.edu --username=postgres --format=plain \
--file="$save_path".schema.sql --schema-only "$db_name" 1>"$log_path" 2>"$err_log"
# Schema + data as directory file of "custom" binary format, 1 file per table
# We have to delete the old backup before writing a new one; pg_dump insists on this.
rm -rf "$save_path"
pg_dump --host=switch-db2.erg.berkeley.edu --username=postgres --format=directory \
--file="$save_path" "$db_name" 1>>"$log_path" 2>>"$err_log"
# Print any errors that came up; these will be emailed to the admins
if [ -s "$err_log" ]; then
cat $err_log;
fi
done
| true |
d54bba31c4e69366493660d2ef6aed99be4ca46a | Shell | KevinKbyte/.dotfiles | /i3/scripts/terminal_time.sh | UTF-8 | 586 | 3.171875 | 3 | [] | no_license | #!/usr/bin/env bash
( echo " | "$( date '+%Y-%m-%d %H:%M:%S '$($HOME/.dotfiles/i3/scripts/get-day-of-week.sh) & echo "| Energy:" $(echo "scale=3;"$(cat /sys/class/power_supply/BAT0/energy_now)"/"$(cat /sys/class/power_supply/BAT0/energy_full) | bc))" |" ); read
##!/usr/bin/expect -f
## https://askubuntu.com/questions/5363/how-to-start-a-terminal-with-certain-text-already-input-on-the-command-line
## Get a Bash shell
#spawn -noecho bash
## Wait for a prompt
#expect "$ "
## Type something
#send "date \"+%H:%M:%S %d/%m/%y\""
## Hand over control to the user
#interact
#exit
| true |
c43c37f8a119b49a1ba194ea0ce05aa02e2912dc | Shell | maprost/nominatim | /merge_pbf_files.sh | UTF-8 | 346 | 3.3125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
FILES="./pbf_files/*"
OUTPUT=data.osm.pbf
echo "osmium cat new-york.osm.pbf new-jersey.osm.pbf connecticut.osm.pbf -o ny-nj-ct.osm.pbf"
fileList=""
for file in $FILES;
do
if [ "$file" != "." ] && [ "$file" != ".." ]; then
fileList+="${file} "
fi
done
echo "fileList: $fileList"
rm $OUTPUT
osmium sort $fileList -o $OUTPUT | true |
de8ed2110cd32f27bcadbf4e31eb029c5b14f11c | Shell | boardman/sphinx-buildpack | /bin/detect | UTF-8 | 505 | 3.21875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# This script serves as the
# [**Python Buildpack**](https://github.com/heroku/heroku-buildpack-python)
# detector.
#
# A [buildpack](http://devcenter.heroku.com/articles/buildpacks) is an
# adapter between a Python application and Heroku's runtime.
# ## Usage
# Compiling an app into a slug is simple:
#
# $ bin/detect <build-dir> <cache-dir>
BUILD_DIR=$1
SETTINGS_FILE=$(find $BUILD_DIR/. -maxdepth 3 -type f -name 'conf.py' | head -1)
[ -n "$SETTINGS_FILE" ] && echo Sphinx | true |
270727b9925f49edb8b19d3ccecd11f95eb3486a | Shell | cameronhr/dotfiles | /bash_mac | UTF-8 | 317 | 2.65625 | 3 | [] | no_license | # vim:set ft=sh:
eval "$(/opt/homebrew/bin/brew shellenv)"
[ -f /opt/homebrew/etc/bash_completion ] && . /opt/homebrew/etc/bash_completion
# If we have a `default` python virtualenv, lets use it over anything on the system already
[ -d ~/.virtualenvs/default ] && export PATH="$HOME/.virtualenvs/default/bin:$PATH"
| true |
4da1441402f64d7dcb31b7161a3affbb934df7b4 | Shell | Peter-Metz/Behavioral-Responses | /conda.recipe/remove_local_package.sh | UTF-8 | 465 | 3.53125 | 4 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | #!/bin/bash
# USAGE: ./remove_local_package.sh
# ACTION: (1) uninstalls _ANY_ installed behresp package (conda uninstall)
# NOTE: for those with experience working with compiled languages,
# removing a local conda package is analogous to a "make clean" operation
# uninstall _ANY_ existing behresp conda package
conda list behresp | awk '$1=="behresp"{rc=1}END{exit(rc)}'
if [[ $? -eq 1 ]]; then
conda uninstall --yes behresp 2>&1 > /dev/null
fi
exit 0
| true |
946490facfb1dd17216f2cf732a7a08584440e3a | Shell | remoteit/docs | /.gitbook/assets/get-pi-status (1).sh | UTF-8 | 3,002 | 3.46875 | 3 | [] | no_license | #!/bin/bash
# The above line should be the shell you wish to execute this sccript with.
# Raspberry Pi supports bash shell
#
# remote.it Bulk Management Status Script
#
# $1 parameter is the jobID used for completion status
# $2 is API server
#
# This example script first clears all the status columns (StatusA-E) in the remote.it portal.
# Next this script grabs the following Pi system values and returns them to the remote.it portal.
#
#StatusA = os-release ID per /etc/os-release
#StatusB = Linux version info per uname -a
#StatusC = System uptime since last boot
#StatusD = counts and returns the number remote.it Services
#StatusE = connectd package version
TOOL_DIR="/usr/bin"
NOTIFIER="connectd_task_notify"
# Clear all status columns A-E in remote.it portal
ret=$(${TOOL_DIR}/$NOTIFIER a $1 $2 "")
ret=$(${TOOL_DIR}/$NOTIFIER b $1 $2 "")
ret=$(${TOOL_DIR}/$NOTIFIER c $1 $2 "")
ret=$(${TOOL_DIR}/$NOTIFIER d $1 $2 "")
ret=$(${TOOL_DIR}/$NOTIFIER e $1 $2 "")
# Update status column A (StatusA) in remote.it portal
#-------------------------------------------------
# retrieve the os ID as reported by the command “cat /etc/os-release”
os=$(cat /etc/os-release | grep -w ID | awk -F "=" '{print $2 }')
# send to status column a in remote.it portal
ret=$(${TOOL_DIR}/$NOTIFIER a $1 $2 $os)
#-------------------------------------------------
# Update status column B (StatusB) in remote.it portal
#-------------------------------------------------
# retrieve the Linux kernel version
fwversion=$(uname -a)
# send to status column b in remote.it portal
ret=$(${TOOL_DIR}/$NOTIFIER b $1 $2 "$fwversion")
#-------------------------------------------------
# Update status column C (StatusC) in remote.it portal
#-------------------------------------------------
# retrieve the system uptime
uptime=$(uptime | sed 's/^.*up *//; s/, *[0-9]* user.*$/m/; s/day[^0-9]*/d, /;s/\([hm]\).*m$/\1/;s/:/h, /;s/^//')
# send to status column c in remote.it portal
ret=$(${TOOL_DIR}/$NOTIFIER c $1 $2 "$uptime")
#-------------------------------------------------
# Update status column D (StatusD) in remote.it portal
#-------------------------------------------------
# retrieve the number of active remote.it Services
nsvcs=$(ps ax | grep connect | grep -v grep | wc -l)
# send to status d
ret=$(${TOOL_DIR}/$NOTIFIER d $1 $2 "$nsvcs")
#-------------------------------------------------
# Update status column E (StatusE) in remote.it portal
#-------------------------------------------------
# use free command to retrieve free memory space value
cversion=$(dpkg -s connectd | grep Version)
# send to status e
ret=$(${TOOL_DIR}/$NOTIFIER e $1 $2 "$cversion")
#-------------------------------------------------
#=======================================================================
# Lastly finalize job, no updates allowed after this
ret=$(${TOOL_DIR}/$NOTIFIER 1 $1 $2 "Job complete")
# Use this line in case of error, and add desired message
#${TOOL_DIR}/$NOTIFIER 2 $1 $2 "Job Failed"
| true |
fdaa2e88bc4a2ec246c5bfaadeb1743d7d468aaf | Shell | smkent/dotfiles | /.zshrc | UTF-8 | 924 | 3.09375 | 3 | [
"MIT",
"DOC"
] | permissive | #!/bin/zsh
[ -f ~/.dotfiles/shell/rc ] && source ~/.dotfiles/shell/rc
# Basic settings {{{
# History control
setopt APPEND_HISTORY
setopt HIST_IGNORE_SPACE
setopt HIST_IGNORE_DUPS
# iTerm2 keymaps
if [ "$(uname -s)" = "Darwin" ]; then
bindkey -e
bindkey '^[[1;5C' forward-word
bindkey '^[[1;5D' backward-word
bindkey '^[[1~' beginning-of-line
bindkey '^[[4~' end-of-line
bindkey '^[[H' beginning-of-line
bindkey '^[[F' end-of-line
bindkey '^[[3~' delete-char
# Option+Backspace can be configured to delete a word in iTerm2 by sending the
# following hex sequence: 0x1b 0x18
# More info: https://stackoverflow.com/a/29403520
fi
# }}}
# Enable additional zsh completions on OS X
if [ "$(uname -s)" = "Darwin" ]; then
if type brew &>/dev/null; then
FPATH="$(brew --prefix)/share/zsh-completions:${FPATH}"
autoload -Uz compinit
compinit -i
fi
fi
| true |
887d8986ce5e4128f4a4af034dd21942ade77569 | Shell | sergiobaro/bash-scripts | /bash-idioms/verbose.sh | UTF-8 | 352 | 3.421875 | 3 | [] | no_license | #!/usr/bin/env bash
VERBOSE=':'
while getopts ':v-:' OPTION; do
case $OPTION in
v ) VERBOSE='echo' ;;
- ) case $OPTARG in
verbose ) VERBOSE='echo' ;;
* ) echo "Unknown option $OPTARG" ;;
esac
;;
* ) echo "Unknown option $OPTARG" ;;
esac
done
$VERBOSE "Verbose" | true |
369c8e2ee48a56dd3d16329450abf09a479a2b0c | Shell | diogoflduarte/lfads_tf1 | /lfads_wrapper/run_posterior_samp.sh | UTF-8 | 758 | 3.296875 | 3 | [] | no_license |
#!/bin/bash
PATH_TO_PS=/snel/home/mreza/projects/PBT_HP_opt/PBT_HP_opt/pbt_opt/lfads_wrapper/run_posterior_mean_sampling.py
max_proc=20
runn=0
for dir in $1/*/
do
((runn+=1))
# GPU assignment
tot_gpu=$((nvidia-smi --query-gpu=gpu_name,gpu_bus_id --format=csv | wc -l) || (echo 0))
tot_gpu=$((tot_gpu-1))
# parse job number
# get gpu device id
device_no=$((runn % tot_gpu))
tname=$(basename $dir)
(CUDA_VISIBLE_DEVICES=$device_no python $PATH_TO_PS $dir) &
#tmux new-session $tname "CUDA_VISIBLE_DEVICES="$device_no" python "$PATH_TO_PS" "$dir" ; tmux wait-for -S "$tname"-done" &
#wait-for $tname-done &
if [ "$((runn % max_proc))" -eq 0 ]
then
echo "Waiting for the current parallel processes to finish"
wait
fi
done | true |
b488e96ef19025b867fd8624a145affe4b9b0ca3 | Shell | piger/Preferences | /bin/avvisa | UTF-8 | 500 | 3.609375 | 4 | [
"BSD-2-Clause"
] | permissive | #!/usr/bin/env bash
# Esegue un comando e notifica il completamento con Notification Center
# di OS X.
#
# Requisiti: terminal-notifier
# can't "set -e" because otherwise the script would stop after executing a command which
# exit code is not 0 and not display the notification.
TN=terminal-notifier
if [[ -x /usr/local/bin/reattach-to-user-namespace ]]; then
TN="/usr/local/bin/reattach-to-user-namespace $TN"
fi
$*
$TN -message "Command \"$*\" done" -title "Command terminated (exit: $?)"
| true |
a50688173093e22537b7c8d89abc2c6be3ea81b6 | Shell | ElizabethBrooks/TranscriptomeAnalysisPipeline_DaphniaUVTolerance | /Archived/VariantCalling/variants_GATK.sh | UTF-8 | 3,957 | 3.65625 | 4 | [] | no_license | #!/bin/bash
#$ -M ebrooks5@nd.edu
#$ -m abe
#$ -r n
#$ -N variants_GATK_jobOutput
#Script to perform variant calling with gatk:
# SplitNTrim > BQSR > HaplotypeCaller
#Bam files need to have duplicates marked using samtools
#Usage: qsub variants_GATK.sh sortedFolder
#Usage Ex: qsub variants_GATK.sh sortedCoordinate_samtoolsTophat2_run1
#Required modules for ND CRC servers
module load bio/samtools
module load bio/gatk
#Check for input arguments of folder names
if [ $# -eq 0 ]; then
echo "No folder name(s) supplied... exiting"
exit 1
fi
#Determine if the folder name was input in the correct format
if [[ "$1" == *\/* ]] || [[ "$1" == *\\* ]]; then
echo "ERROR: Please enter folder names without a trailing forward slash (/)... exiting"
exit 1
fi
#Determine if the correct analysis folder was input
if [[ "$1" != sortedCoordinate* ]]; then
echo "ERROR: The "$1" folder of aligned bam files were not found... exiting"
exit 1
fi
#Determine what analysis method was used for the input folder of data
if [[ "$1" == *"Hisat2"* ]]; then
#Set analysis method for folder naming
analysisMethod="Hisat2"
elif [[ "$1" == *"Tophat2"* ]]; then
#Set analysis method for folder naming
analysisMethod="Tophat2"
else
echo "ERROR: The sorted "$1" folder of bam files were not found... exiting"
exit 1
fi
#TODO: check input folder requirements
#Retrieve aligned reads input absolute path
inputsPath=$(grep "sorting:" ../InputData/outputPaths.txt | tr -d " " | sed "s/sorting://g")
#Retrieve genome reference absolute path for alignment
genomeFile=$(grep "genomeReference:" ../InputData/inputPaths.txt | tr -d " " | sed "s/genomeReference://g")
#Retrieve variant calling outputs absolute path
outputsPath=$(grep "variantCalling:" ../InputData/outputPaths.txt | tr -d " " | sed "s/variantCalling://g")
#Create output directory
outputFolder="$outputsPath"/"$1"_variants
mkdir "$outputFolder"
#Move to outputs directory
cd "$outputFolder"
#Prepare for analysis
dirFlag=0
runNum=1
COUNTER=0
#Loop through all reads and sort sam/bam files for input to samtools
for f1 in "$inputsPath"/"$1"/*/*.bam; do
#Name of sorted and aligned file
curAlignedSample="$f1"
#Trim file paths from current sample folder name
curSampleNoPath=$(echo $f1 | sed 's/accepted\_hits\.bam//g')
curSampleNoPath=$(basename $curSampleNoPath)
#Mark duplicates using smatools fixmate
samtools fixmate -m "$f1" "$outputFolder"/"$curSampleNoPath"_fixed.bam
#Remove duplicate reads wtih samtools markdup
samtools markdup -r "$outputFolder"/"$curSampleNoPath"_fixed.bam "$outputFolder"/"$curSampleNoPath"_marked.bam
rm "$outputFolder"/"$curSampleNoPath"_fixed.bam
#Index the marked file
samtools index "$outputFolder"/"$curSampleNoPath"_marked.bam "$outputFolder"/"$curSampleNoPath"_indexed.bam
rm "$outputFolder"/"$curSampleNoPath"_marked.bam
#Perform variant calling using gatk
echo "Sample $curSampleNoPath variant are being called..."
#Splits reads into exon segments (getting rid of Ns but maintaining grouping information)
# and hardclip any sequences overhanging into the intronic regions
gatk SplitNCigarReads -R "$genomeFile" -I "$outputFolder"/"$curSampleNoPath"_indexed.bam -O "$outputFolder"/"$curSampleNoPath"_split.bam
rm "$outputFolder"/"$curSampleNoPath"_indexed.bam
#Correct for systematic bias that affect the assignment of base quality scores by the sequencer
gatk ApplyBQSR -R "$genomeFile" -I "$outputFolder"/"$curSampleNoPath"_split.bam --bqsr-recal-file "$outputFolder"/"$curSampleNoPath"_recalibration.table -O "$outputFolder"/"$curSampleNoPath"_recal.bam
rm "$outputFolder"/"$curSampleNoPath"_split.bam
#Finally, run HaplotypeCaller in GVCF mode so multiple samples may be added in a scalable way
gatk HaplotypeCaller -R "$genomeFile" -I "$outputFolder"/"$curSampleNoPath"_recal.bam -O "$outputFolder"/variants.g.vcf -ERC GVCF
rm "$outputFolder"/"$curSampleNoPath"_recal.bam
done
#Copy previous summaries
cp "$inputsPath"/"$1"/*.txt "$outputFolder"
| true |
f6b61158ccd72b7d75a3c3f7a4a91a40aefb32ba | Shell | iabsis/backupninja-helper | /handler/sysimg | UTF-8 | 734 | 3.171875 | 3 | [] | no_license | # -*- mode: sh; sh-basic-offset: 3; indent-tabs-mode: nil; -*-
# vim: set filetype=sh sw=3 sts=3 expandtab autoindent:
#
# tar backup handler script for backupninja
#
getconf backupdir /var/backups/
getconf tarexclude
getconf tarcompression gz
getconf tarname rootfs
case ${tarcompression} in
bz2)
tarargs="-cj"
;;
gz)
tarargs="-cz"
;;
xz)
tarargs="-cJ"
;;
*)
tarargs="-c"
;;
esac
for folder in $tarexclude ; do
tarargs="$(echo ${tarargs} --exclude=${folder})"
done
tar ${tarargs} \
-f ${backupdir}/${tarname}.tar.${tarcompression} \
--directory=/ \
--exclude=${backupdir#/}
--exclude=proc \
--exclude=sys \
--exclude=dev/pts \
--exclude=home \
--exclude=var/www \
.
return 0
| true |
e7631697173e2aac8defff04efad5d57de59e081 | Shell | ravenkiller1709/.dotfiles2 | /.scripts/network.sh | UTF-8 | 703 | 3.09375 | 3 | [] | no_license | #!/bin/bash
# icons depend on nerdfonts being installed
hostname="${HOSTNAME:-${hostname:-$(hostname)}}"
network() {
wire="$(ip a | grep 'enp3s0' | grep inet | wc -l)"
wifi="$(ip a | grep wlp4s0 | grep inet | wc -l)"
if [ $wire = 1 ]; then
echo " ^c#b5bd68^$(ifconfig | grep inet | awk 'NR==1 {print $2}')"
elif [ $wifi = 1 ]; then
echo " ^c#b5bd68^$(ifconfig | grep inet | awk 'NR==3 {print $2}')"
else
echo "睊 "
fi
}
vpn() {
state="$(ip a | grep tun0 | grep inet | wc -l)"
if [ $state = 1 ]; then
echo "ﱾ"
else
echo " "
fi
}
echo "$(network) $(vpn)"
#case $BUTTON in
# 1) setsid -f "st -e pia-client"
# 2) notify-send "~/.scripts/network.sh"
#esac
| true |
9db0a5dc6b33e4090d372fd245f8d9e949f0e5fa | Shell | hoodielive/shellscripting | /bash/scripts_for_studies/cyborg-hawk/xplico | UTF-8 | 5,066 | 3.734375 | 4 | [] | no_license | #!/bin/sh
#
# /etc/init.d/xplico -- start/stop the Xplico daemon. Xplico will decode the capture you upload to its web interface (default http://localhost:9876)
#
### BEGIN INIT INFO
# Provides: Xplico
# Required-Start: $syslog $network
# Required-Stop: $syslog
# Should-Start: $local_fs
# Should-Stop: $local_fs
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Starts and stops the xplico daemon
### END INIT INFO
PATH=/bin:/usr/bin:/sbin:/usr/sbin:/opt/xplico/bin
NAME="Xplico"
DAEMON=/opt/xplico/bin/dema
#APPLICATION=/opt/xplico/bin/dema
PIDFILE=/var/run/dema.pid
APACHEPIDFILE1=/var/run/apache2.pid
APACHEPIDFILE2=/var/run/apache2/apache2.pid
DESC=""
RUN_DAEMON=yes
PRIORITY=1 #(0..20)
#0: don't change priority
#1: better priority
#...
#20: highest priority, not recommended.
#INTERFACE=eth1
#CONFIG_FILE="/opt/xplico/cfg/xplico.cfg"
#MODE=pcap
#PATH_DECODED_FILES="/opt/xplico/bin/"
#DAEMON_ARGS="-c $CONFIG_FILE -m $MODE -i $INTERFACE"
#DAEMON_ARGS=""
unset LANG
trap "" 1 15
test -x $DAEMON || exit 0
run_xplico="1"
if [ "x$RUN_DAEMON" = "xno" ]; then
run_xplico=0
fi
is_running()
{
start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON --test > /dev/null \
|| return 0
return 1
}
is_apache2_running()
{
PIDS=`pidof apache2` || true # Array of Apache forks.
[ -e $APACHEPIDFILE1 ] && PIDS2=`cat $APACHEPIDFILE1` #Last Apache2 pid known.
[ -e $APACHEPIDFILE2 ] && PIDS2=`cat $APACHEPIDFILE2` #Last Apache2 pid known.
# if there is a pid we need to verify that belongs to apache2
for i in $PIDS; do
if [ "$i" = "$PIDS2" ]; then
# in this case the pid stored in the pidfile matches one of the pidof apache
#echo $i
return 0
fi
done
echo " Error, Apache2 not running"
return 1
}
# Function that starts the daemon/service
do_start()
{
# Return
# 0 if daemon has been started
# 1 if daemon was already running
# 2 if daemon could not be started
#Check Apache2 is up&running
is_apache2_running || return 2
is_running
case "$?" in
0) echo "Xplico was (and is) already running" ;;
1) # ulimit
ulimit -n 200000
ulimit -m unlimited
#ulimit -u unlimited #this is a value for bash, not sh.
ulimit -v unlimited
# kill : Not necessary here, will use the function "is_running"
#killall dema
# start dema
(cd /opt/xplico/bin; ./dema -d /opt/xplico -b sqlite > /dev/null) &
#Optional: let's give more priotity and CPU to xplico, so decoding will be faster.
if [ "$PRIORITY" -ge "0" ] && [ "$PRIORITY" -le "20" ]; then
if [ "$PRIORITY" -ge "1" ]; then
echo Modifying priority to -$PRIORITY
sleep 1 #giving time to DEMA to start and write its pid in the PIDFILE
renice -$PRIORITY `cat $PIDFILE` >> /dev/null
#else: "PRIORITY is 0, nothing will be done.
fi
else
echo "WARNING: Xplico priority not altered: wrong priority value (check $PRIORITY, range 0..20, default:0)"
fi
;;
*) echo "Error #123" ;; # Failed to start
esac
}
# Function that stops the daemon/service
do_stop()
{
# Return
# 0 if daemon has been stopped
# 1 if daemon was already stopped
# 2 if daemon could not be stopped
# other if a failure occurred
start-stop-daemon --stop --quiet --retry=TERM/10/KILL/2 --pidfile $PIDFILE
RETVAL="$?"
[ "$RETVAL" = 2 ] && return 2
start-stop-daemon --stop --quiet --oknodo --retry=0/30/KILL/5 --exec $DAEMON
[ "$?" = 2 ] && return 2
# killall -9 dema
return "$RETVAL"
}
. /lib/lsb/init-functions
case "$1" in
start)
if [ "$run_xplico" = "1" ]; then
[ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
do_start
case "$?" in
0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
esac
fi
echo
;;
stop)
# if [ -f /var/run/xplico.pid ] && kill -0 `cat /var/run/xplico.pid` 2>/dev/null; then
[ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
do_stop
case "$?" in
0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
esac
# fi
;;
restart|force-reload)
if [ "$run_xplico" = "1" ]; then
log_daemon_msg "Restarting $DESC" "$NAME"
do_stop
case "$?" in
0|1)
do_start
case "$?" in
0) log_end_msg 0 ;;
1) log_end_msg 1 ;; # Old process is still running
*) log_end_msg 1 ;; # Failed to start
esac
;;
*)
# Failed to stop
log_end_msg 1
;;
esac
fi
;;
status)
if is_running; then
log_success_msg "Xplico web interface IS RUNNING to decode traffic capture files"
exit 0
else
log_failure_msg "Xplico web interface mode is not running."
exit 1
fi
;;
*)
echo "Usage: /etc/init.d/xplico {start|stop|restart|force-reload|status}"
exit 1
;;
esac
exit 0
| true |
425709c93c2d35a98c215a80370a33b4eaa3a1a4 | Shell | tonydeng/note | /sh/ffmpeg/rotation.sh | UTF-8 | 3,473 | 3.578125 | 4 | [] | no_license | #!/bin/bash
# rotation of 90 degrees. Will have to concatenate.
#ffmpeg -i <originalfile> -metadata:s:v:0 rotate=0 -vf "transpose=1" <destinationfile>
#/VLC -I dummy -vvv <originalfile> --sout='#transcode{width=1280,vcodec=mp4v,vb=16384,vfilter={canvas{width=1280,height=1280}:rotate{angle=-90}}}:std{access=file,mux=mp4,dst=<outputfile>}\' vlc://quit
#Allowing blanks in file names
SAVEIFS=$IFS
IFS=$(echo -en "\n\b")
#Bit Rate
BR=16384
#where to store fixed files
FIXED_FILES_DIR="fixedFiles"
#rm -rf $FIXED_FILES_DIR
mkdir $FIXED_FILES_DIR
# VLC
VLC_START="/Applications/VLC.app/Contents/MacOS/VLC -I dummy -vvv"
VLC_END="vlc://quit"
#############################################
# Processing of MOV in the wrong orientation
for f in `find . -regex '\./.*\.MOV'`
do
ROTATION=`exiftool "$f" |grep Rotation|cut -c 35-38`
SHORT_DIMENSION=`exiftool "$f" |grep "Image Size"|cut -c 39-43|sed 's/x//'`
BITRATE_INT=`exiftool "$f" |grep "Avg Bitrate"|cut -c 35-38|sed 's/\..*//'`
echo Short dimension [$SHORT_DIMENSION] $BITRATE_INT
if test "$ROTATION" != ""; then
DEST=$(dirname ${f})
echo "Processing $f with rotation $ROTATION in directory $DEST"
mkdir -p $FIXED_FILES_DIR/"$DEST"
if test "$ROTATION" == "0"; then
cp "$f" "$FIXED_FILES_DIR/$f"
elif test "$ROTATION" == "180"; then
# $(eval $VLC_START \"$f\" "--sout="\'"#transcode{vfilter={rotate{angle=-"$ROTATION"}},vcodec=mp4v,vb=$BR}:std{access=file,mux=mp4,dst=\""$FIXED_FILES_DIR/$f"\"}'" $VLC_END )
$(eval ffmpeg -i \"$f\" -vf hflip,vflip -r 30 -metadata:s:v:0 rotate=0 -b:v "$BITRATE_INT"M -vcodec libx264 -acodec copy \"$FIXED_FILES_DIR/$f\")
elif test "$ROTATION" == "270"; then
$(eval ffmpeg -i \"$f\" -vf "scale=$SHORT_DIMENSION:-1,transpose=2,pad=$SHORT_DIMENSION:$SHORT_DIMENSION:\(ow-iw\)/2:0" -r 30 -s "$SHORT_DIMENSION"x"$SHORT_DIMENSION" -metadata:s:v:0 rotate=0 -b:v "$BITRATE_INT"M -vcodec libx264 -acodec copy \"$FIXED_FILES_DIR/$f\" )
else
# $(eval $VLC_START \"$f\" "--sout="\'"#transcode{scale=1,width=$SHORT_DIMENSION,vcodec=mp4v,vb=$BR,vfilter={canvas{width=$SHORT_DIMENSION,height=$SHORT_DIMENSION}:rotate{angle=-"$ROTATION"}}}:std{access=file,mux=mp4,dst=\""$FIXED_FILES_DIR/$f"\"}'" $VLC_END )
echo ffmpeg -i \"$f\" -vf "scale=$SHORT_DIMENSION:-1,transpose=1,pad=$SHORT_DIMENSION:$SHORT_DIMENSION:\(ow-iw\)/2:0" -r 30 -s "$SHORT_DIMENSION"x"$SHORT_DIMENSION" -metadata:s:v:0 rotate=0 -b:v "$BITRATE_INT"M -vcodec libx264 -acodec copy \"$FIXED_FILES_DIR/$f\"
$(eval ffmpeg -i \"$f\" -vf "scale=$SHORT_DIMENSION:-1,transpose=1,pad=$SHORT_DIMENSION:$SHORT_DIMENSION:\(ow-iw\)/2:0" -r 30 -s "$SHORT_DIMENSION"x"$SHORT_DIMENSION" -metadata:s:v:0 rotate=0 -b:v "$BITRATE_INT"M -vcodec libx264 -acodec copy \"$FIXED_FILES_DIR/$f\" )
fi
fi
echo
echo ==================================================================
sleep 1
done
#############################################
# Processing of AVI files for my Panasonic TV
# Use ffmpegX + QuickBatch. Bitrate at 16384. Camera res 640x424
for f in `find . -regex '\./.*\.AVI'`
do
DEST=$(dirname ${f})
DEST_FILE=`echo "$f" | sed 's/.AVI/.MOV/'`
mkdir -p $FIXED_FILES_DIR/"$DEST"
echo "Processing $f in directory $DEST"
$(eval ffmpeg -i \"$f\" -r 20 -acodec libvo_aacenc -b:a 128k -vcodec mpeg4 -b:v 8M -flags +aic+mv4 \"$FIXED_FILES_DIR/$DEST_FILE\" )
echo
echo ==================================================================
done
IFS=$SAVEIFS | true |
11fdd9f1c34ab372b7a92e51d824f23d1076fdb1 | Shell | MaxenceGuinard/GoogleDomainUpdate | /updateDomain.sh | UTF-8 | 4,278 | 3.890625 | 4 | [] | no_license | #!/bin/bash
PWD="/home/maxence/Documents/"
DOMAINFILE="domain.txt"
IPFILE="ip.txt"
if [ ! -f $PWD$DOMAINFILE ]; then
> $PWD${DOMAINFILE}
fi
if [ ! -f $PWD$IPFILE ]; then
> $PWD${IPFILE}
fi
FILENAME=$(basename "$0")
DOMAIN[2]=""
IP=$(dig @resolver1.opendns.com A myip.opendns.com +short -4)
nbrRow=$(wc -l < ${PWD}${DOMAINFILE})
IPFILEVAL=$(cat ${PWD}${IPFILE})
PWDETC="/etc/apache2/sites-available/"
PWDVAR="/var/www/"
help()
{
echo -e "\n updateDomain update all the sub domain registered"
echo -e " updateDomain [username] [password] [domain] add domain"
echo -e " -show, display all the domain registered"
echo -e " -cron, add the cron line into crontab to update domains each hour"
echo -e " -h, --help display this help and exit\n"
exit
}
if [[ $1 == -show ]]; then
cat ${PWD}${DOMAINFILE}
exit
fi
if [[ $1 == -cron ]]; then
clear
echo -e "Please copy this line:\n\n0 * * * * bash ${PWD}${FILENAME}\n\nThen paste it at the end of the crontab file ans save"
read pause
sudo crontab -e
exit
fi
if [ $# == 3 ] || [ $# == 2 ]; then
DOMAIN[0]=$1
DOMAIN[1]=$2
DOMAIN[2]=$3
echo ${DOMAIN[0]} ${DOMAIN[1]} ${DOMAIN[2]} >> ${PWD}${DOMAINFILE}
echo -e Line \'${DOMAIN[0]} ${DOMAIN[1]} ${DOMAIN[2]}\' has been added to ${PWD}${DOMAINFILE}
read pause
clear
echo -e "Do you want to generate the virtual host associated ? [y/n]"
read answerVH
if [[ $answerVH == "y" ]] || [[ $answerVH == "Y" ]]; then
echo "Alias /${DOMAIN[2]} \"${PWDVAR}${DOMAIN[2]}\"" >> ${DOMAIN[2]}.conf
echo "<Virtualhost *:80>" >> ${DOMAIN[2]}.conf
echo " ServerName ${DOMAIN[2]}" >> ${DOMAIN[2]}.conf
echo " ServerAlias www.${DOMAIN[2]}" >> ${DOMAIN[2]}.conf
echo " DocumentRoot ${PWDVAR}${DOMAIN[2]}/" >> ${DOMAIN[2]}.conf
echo "</Virtualhost>" >> ${DOMAIN[2]}.conf
sudo mv ${DOMAIN[2]}.conf ${PWDETC}
echo -e "Virtual host successfully created in ${PWDETC}${DOMAIN[2]}.conf"
read pause
clear
echo -e "Do you want to create the virtual host generated ? [y/n]"
read answerVHA
if [[ $answerVHA == "y" ]] || [[ $answerVHA == "Y" ]]; then
sudo a2ensite ${DOMAIN[2]}.conf
echo -e "\nVirtual host successfully created"
read pause
clear
echo -e "Do you want to restart apache2 now to activate your virtual host ? [y/n]"
read answerVHA2
if [[ $answerVHA2 == "y" ]] || [[ $answerVHA2 == "Y" ]]; then
sudo systemctl reload apache2
echo -e "Apache2 successfully reloaded\nVirtual host successfully activated"
fi
fi
fi
read pause
clear
echo -e "Do you want to generate the root folder associated ? [y/n]"
read answerVAR
if [[ $answerVAR == "y" ]] || [[ $answerVAR == "Y" ]]; then
mkdir ${DOMAIN[2]}
cd ${DOMAIN[2]}
echo "<!DOCTYPE html>" >> index.html
echo "<html>" >> index.html
echo " <head>" >> index.html
echo " <title>Title</title>" >> index.html
echo " </head>" >> index.html
echo " <body><center>" >> index.html
echo " Enjoy" >> index.html
echo " </body></center>" >> index.html
echo "</html>" >> index.html
cd ..
sudo mv ${DOMAIN[2]} ${PWDVAR}
echo -e "Root folder successfully created"
read pause
clear
fi
exit
fi
if [[ $1 == "" ]]; then
if [ $nbrRow == 0 ]; then
echo -e "Nothing to do, '${DOMAINFILE}' is empty"
exit
fi
if [[ $IP == $IPFILEVAL ]]; then
echo -e "Nothing to do, IP=${IP} is up do date"
exit
fi
echo ${IP} > ${PWD}${IPFILE}
for ((i=1; i<=$nbrRow; i++)); do
line=$(head -n $i ${PWD}${DOMAINFILE} | tail -n 1)
USERNAME=$(echo -e $line | awk '{print $1}')
PASSWORD=$(echo -e $line | awk '{print $2}')
HOSTNAME=$(echo -e $line | awk '{print $3}')
curl -s "https://${USERNAME}:${PASSWORD}@domains.google.com/nic/update?hostname=${HOSTNAME}&myip=${IP}"
echo -e " "$HOSTNAME
done
echo -e "\nDone"
exit
fi
help
| true |
43ce2def6b425a2428ab05cb49e65d3e693302e8 | Shell | dockhippie/invoiceninja | /rootfs/etc/s6/php/custom | UTF-8 | 2,048 | 2.9375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
case "${INVOICENINJA_DB_TYPE}" in
"sqlite")
if [ -z "${INVOICENINJA_DB_DATABASE}" ]
then
echo >&2 "Error: You have to provide INVOICENINJA_DB_DATABASE environment variable"
s6-svc -t /etc/s6
exit 1
fi
if [ ! -f "${INVOICENINJA_DB_DATABASE}" ]
then
/usr/bin/gosu \
caddy \
/usr/bin/sqlite3 \
${INVOICENINJA_DB_DATABASE} \
""
fi
;;
"mysql")
if [ -z "${INVOICENINJA_DB_USERNAME}" ]
then
echo >&2 "Error: You have to provide INVOICENINJA_DB_USERNAME environment variable"
s6-svc -t /etc/s6
exit 1
fi
if [ -z "${INVOICENINJA_DB_PASSWORD}" ]
then
echo >&2 "Error: You have to provide INVOICENINJA_DB_PASSWORD environment variable"
s6-svc -t /etc/s6
exit 1
fi
;;
"pgsql")
if [ -z "${INVOICENINJA_DB_USERNAME}" ]
then
echo >&2 "Error: You have to provide INVOICENINJA_DB_USERNAME environment variable"
s6-svc -t /etc/s6
exit 1
fi
if [ -z "${INVOICENINJA_DB_PASSWORD}" ]
then
echo >&2 "Error: You have to provide INVOICENINJA_DB_PASSWORD environment variable"
s6-svc -t /etc/s6
exit 1
fi
;;
esac
if [ -z "${INVOICENINJA_APP_KEY}" ]
then
echo >&2 "Error: You have to provide INVOICENINJA_APP_KEY environment variable"
/bin/s6-svscanctl -t /etc/s6
exit 1
fi
if [ -z "${INVOICENINJA_APP_URL}" ]
then
echo >&2 "Error: You have to provide INVOICENINJA_APP_URL environment variable"
/bin/s6-svscanctl -t /etc/s6
exit 1
fi
/usr/bin/templater -d -p invoiceninja \
-o /srv/www/.env \
/etc/templates/env.tmpl
pushd /srv/www > /dev/null
/usr/bin/gosu \
caddy \
/usr/bin/php \
artisan \
clear-compiled -n -q
/usr/bin/gosu \
caddy \
/usr/bin/php \
artisan \
optimize -n -q
/usr/bin/gosu \
caddy \
/usr/bin/php \
artisan \
migrate:install -n -q
/usr/bin/gosu \
caddy \
/usr/bin/php \
artisan \
migrate -n -q --force --seed
popd > /dev/null
| true |
914f270086b9fdfa1a0b6854b03b5b7be9f99e9d | Shell | domestos/KVMScripts | /kvm_temp_pool_storage.sh | UTF-8 | 408 | 3.609375 | 4 | [] | no_license | #!/bin/bash
##########################################
# CREATE TEMPORY STORAGE PATH #
#########################################
if [ $# -ne 2 ] ; then
echo -e "Please set two params: \n - pool \n - image path \nRUN EXAMPLE: $0 <POOL_NAME> <IMAGE_PATH>"
exit 1
fi
POOL_NAME=$1
IMAGE_PATH=$2
virsh pool-create-as --name "$POOL_NAME" --type dir --target "$IMAGE_PATH"
virsh pool-list --all
| true |
5c6297a59827a28a43e7fec769ea1f0b5b166535 | Shell | dsmic/oakfoam | /ReinforcmentLearning/CNN/CNN-data.sh | UTF-8 | 1,406 | 3.734375 | 4 | [
"FSFAP",
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | #!/bin/bash
set -eu
set -o pipefail
WD="$(dirname "$0")"
#$RANDOM reduces collisions if executed parallel, not 100% sure of cause!
TEMPOUTPUT="patterns_circ_`date +%F_%T`$RANDOM$RANDOM.tmp"
TEMPOUTPUT2="patterns_circ_2_`date +%F_%T`$RANDOM$RANDOM.tmp"
OAKFOAM="$WD/../../oakfoam --nobook --log $TEMPOUTPUT"
if ! test -x $WD/../../oakfoam; then
echo "File $WD/../../oakfoam not found" >&2
exit 1
fi
if (( $# < 1 )); then
echo "Exactly one GAME.SGF required" >&2
exit 1
fi
GAME=$1
INITGAMMAS=$2
SIZE=$3
echo $GAME >&2
GAMERESULT=$(cat $GAME|grep -Eo "RE\[.+\]"|| echo "noresult")
GAMEKOMI=$(cat $GAME|grep -Eo "KM\[.+\]"|| echo "nokomi")
GAMEHANDY=$(cat $GAME|grep -Eo "HA\[[^\[]+\]"|| echo "nohandy")
GAMETIME=$(cat $GAME|grep -Eo "\+Time"|| echo "notime")
if ([ "$GAMEHANDY" == "nohandy" ] && [ "$GAMETIME" == "notime" ]) then
CMDS="param undo_enable 0\nparam cnn_data $SIZE\nparam cnn_data_predict_future 2\nloadfeaturegammas \"$INITGAMMAS\"\nloadsgf \"$GAME\""
# Use gogui-adapter to emulate loadsgf
echo -e $CMDS | gogui-adapter "$OAKFOAM" > /dev/null
set +e
HARVESTED=`cat $TEMPOUTPUT | grep -E '^[0-9]+,' | wc -l`
set -e
if (( ${HARVESTED:-0} > 0 )); then
cat $TEMPOUTPUT | grep -E '^[0-9]+,' | while read line; do echo "$line,$GAMERESULT,$GAMEKOMI"; done > $TEMPOUTPUT2
cat $TEMPOUTPUT2
fi
else
echo "game ignored $GAMEHANDY $GAMETIME" >&2
fi
rm -f $TEMPOUTPUT $TEMPOUTPUT2
| true |
b336e8621dc2b7c520a5841ae8c730992ab6b3ae | Shell | cpc1986/docker-freeswitch | /scripts/tcpdump.sh | UTF-8 | 239 | 3.125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
fl=$(date +%s%N)
/usr/sbin/tcpdump -nq -s 0 -i any -w $1 $3 > /tmp/$fl 2>&1 &
PID=$!
sleep 1s
if [ -e /proc/$PID ]
then
sleep $2
kill $PID
else
cat /tmp/$fl > $1
echo error
fi
sleep 1s
rm -rf /tmp/$fl
exit 0
| true |
f8c4b9d0a5dd35a147b91d1fe98bfc48648d2e57 | Shell | alexey-larionov/rms_vcfs | /c01_preprocessing_on_mgpc64/s02_copy_to_mgqnap.sh | UTF-8 | 822 | 3.375 | 3 | [] | no_license | #!/bin/bash
# s02_copy_to_mgqnap.sh
# Started: Alexey Larionov, 10Jun2016
# Last updated: AL, 10Jun2016
# Copy pre-processed rms vcfs to mgqnap
# Stop at any error
set -e
# Start message
echo "Copy pre-processed rms vcfs to mgqnap"
echo "Started: $(date +%d%b%Y_%H:%M:%S)"
echo ""
# Folders
source1="/media/ajme/ajme/external/rms_db_vcf/rms120_vcf"
source2="/media/ajme/ajme/external/rms_db_vcf/s03_trimmed_vcfs"
source3="/media/ajme/ajme/external/rms_db_vcf/s04_filtered_vcfs"
target="admin@mgqnap.medschl.cam.ac.uk:/share/mae/rms_dbGAP"
# Copying
echo "Raw data"
echo ""
rsync -avhe "ssh -x" "${source1}" "${target}/"
echo "Reshaped data"
echo ""
rsync -avhe "ssh -x" "${source2}" "${target}/"
echo "Filtered data"
echo ""
rsync -avhe "ssh -x" "${source3}" "${target}/"
# Completion message
echo ""
echo "Completed: $(date +%d%b%Y_%H:%M:%S)"
| true |
90514daa960eeb229100f363ec962719245f1a56 | Shell | ttych/ansible_role_gitlab-ce | /templates/svscan/gitlab_ce_app/run | UTF-8 | 526 | 2.59375 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/sh
exec 2>&1
export PATH="/usr/local/bin:/usr/local/sbin:/sbin:/bin:/usr/sbin:/usr/bin"
APP_USER="{{ gitlab_ce_git_user }}"
APP_USER_HOME="{{ gitlab_ce_git_home }}"
APP_ROOT="{{ gitlab_ce_app_root }}"
APP_ENV=production
APP_SOCKET="$APP_ROOT/tmp/sockets/gitlab.socket"
APP_WEB_SERVER="{{ gitlab_ce_web_server }}"
set -e
rm -f "$APP_SOCKET"
cd "$APP_ROOT"
exec setuidgid "$APP_USER" \
env RAILS_ENV="$APP_ENV" USE_WEB_SERVER="$APP_WEB_SERVER" HOME="$APP_USER_HOME" \
"$APP_ROOT/bin/web" start_foreground
| true |
87a817f67a0e802bd3311b244c9b643f357a7113 | Shell | thomas-mc-work/ansible-test-role | /ansible-role.sh | UTF-8 | 4,027 | 4.03125 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env sh
# Isolated execution of an ansible role.
#
# Further reading: Ansible Configuration - Common Options
# https://docs.ansible.com/ansible/2.4/config.html
set -o nounset ## set -u : exit the script if you try to use an uninitialised variable
vagrant_box="debian/jessie64"
data_base_dir="/var/lib/ansible-role"
# #####
fn_print_usage () {
cat <<HELP
Wrapper script for ansible-playbook to apply single role.
Usage: $(basename $0) <role-path> [ansible-playbook options]
Examples:
$(basename $0) /path/to/my_role
$(basename $0) /path/to/roles/apache-webserver -i 'custom_host,' -vv --check
HELP
}
# ########
fn_prepare_machine () {
mkdir -p "$data_dir"
cat > "${VAGRANT_CWD}/Vagrantfile" <<END
Vagrant.configure(2) do |config|
config.vm.box = "$vagrant_box"
config.vm.network "private_network", type: "dhcp"
config.vm.define "ansible-role"
end
END
vagrant box update
# start virtual machine
vagrant up
sc=$?
if [ $sc -ne 0 ]; then
echo "failed to bring up virtual machine: ${sc}"
exit 10
fi
if ! vagrant ssh -c exit 2> /dev/null; then
echo "failed to ssh ping to the virtual machine (vagrant ssh)"
vagrant status
exit 11
fi
machine_ip=$(vagrant ssh-config | grep -i HostName | cut -d' ' -f4)
machine_ssh_port=$(vagrant ssh-config | grep -i Port | cut -d' ' -f4)
machine_private_key=$(vagrant ssh-config | grep -i IdentityFile | cut -d' ' -f4)
# create temporary ansible inventory file
cat > "$ansible_inventory" <<END
[vagrant]
$machine_ip ansible_port=${machine_ssh_port} ansible_user=vagrant ansible_ssh_private_key_file=${machine_private_key}
END
# create temporary playbook.yml
cat > "$ansible_playbook" <<END
---
- hosts: ${machine_ip}
become: yes
roles:
- ${role_name}
END
}
# ########
fn_execute_ansible_playbook () {
echo "# running ansible-playbook:"
echo " "ansible-playbook --ssh-extra-args \"-o UserKnownHostsFile=/dev/null\" --ssh-extra-args \"-o StrictHostKeyChecking=no\" \
-i \"$ansible_inventory\" \"$ansible_playbook\"
ansible-playbook --ssh-extra-args "-o UserKnownHostsFile=/dev/null" --ssh-extra-args "-o StrictHostKeyChecking=no" \
-i "$ansible_inventory" "$ansible_playbook"
}
# ########
fn_clean_up () {
vagrant destroy -f
vagrant_sc=$?
if [ $vagrant_sc -ne 0 ]; then
echo "failed to remove virtual machine: ${vagrant_sc}"
echo " $ vagrant destroy -f"
echo " $ rm -rfv '$data_dir'"
exit 30
fi
rm -rf "$data_dir"
}
mkdir -p "$data_base_dir"
## prepare cli parameters
if [ $# -lt 1 ]; then
fn_print_usage
exit 1
fi
clean_up=0
remove_on_success=0
remove_after=0
while true; do
case $1 in
-h|-\?|--help)
fn_print_usage
exit
;;
-s|--remove-on-success)
remove_on_success=1
;;
-r|--remove_after)
remove_after=1
;;
-c|--clean-up)
clean_up=1
;;
--)
shift
role_path="$(realpath $1)"
break
;;
-*)
printf "CLI error: Unmatched argument: '%s'\n" $1
fn_print_usage
exit 1
;;
*)
role_path="$(realpath $1)"
break
esac
shift
done
# verify readability of the role
if [ ! -r "$role_path" ]; then
echo "Failed to read the role: ${role_path}"
exit 2
fi
role_name=$(basename "$role_path")
role_hash=$(printf "$role_path" | md5sum | cut -f 1 -d " ")
data_dir="${data_base_dir}/${role_hash}"
ansible_inventory="${data_dir}/inventory.ini"
ansible_playbook="${data_dir}/playbook.yml"
# define vagrant working dir
export VAGRANT_CWD=$data_dir
# define the ansible roles path
export ANSIBLE_ROLES_PATH=$(dirname "$role_path")
if [ $clean_up -eq 1 ]; then
fn_clean_up
exit
fi
echo "Trying to apply role '${role_name}' ..."
if [ ! -d "$data_dir" ]; then
fn_prepare_machine
else
vagrant up
fi
fn_execute_ansible_playbook
sc=$?
if [ $remove_on_success -eq 1 -a $sc -eq 0 ] || [ $remove_after -eq 1 ]; then
fn_clean_up
fi
exit $sc | true |
32683ab31e6dc9b44aa02d57b4bd4546b65bebc6 | Shell | isaigm/leetcode | /tenth-line/Runtime Error/5-2-2020, 11_06_35 PM/Solution.bash | UTF-8 | 239 | 3.546875 | 4 | [] | no_license | // https://leetcode.com/problems/tenth-line
# Read from the file file.txt and output the tenth line to stdout.
lines=$(cat file.txt | wc -l)
if [ $lines -gt 9 ]
then
cat file.txt | head -n 10 | tail -n 1
else
echo " "
fi
| true |
051b88917adabf9802abd3f1107eb743033ec69b | Shell | chinglinwen/k8s | /install/v1.6/others-install.sh | UTF-8 | 3,530 | 2.53125 | 3 | [] | no_license | #!/bin/sh
# others, kube-dns etc
kubedns-svc.yaml
diff kubedns-svc.yaml.base kubedns-svc.yaml
diff kubedns-controller.yaml.base kubedns-controller.yaml
# 部署kubedns pods
# 进入install_kubernetes_yaml/kube-dns的yaml目录
kubectl create -f .
# 进入install_kubernetes_yaml/dashboard的yaml目录
# 此部分使用diff对比两个配置文件的修改部分
diff dashboard-service.yaml.orig dashboard-service.yaml
# 进入install_kubernetes_yaml/dashboard的yaml目录
kubectl create -f .
kubectl cluster-info
http://172.28.40.76:8080/api/v1/proxy/namespaces/kubesystem/
services/kubernetes-dashboard
# 部署监控
# 进入install_kubernetes_yaml目录的heapster下
# 部署grafana
# 如果后续使用 kube-apiserver 或者 kubectl proxy 访问 grafana dashboard,
# +则必须将 GF_SERVER_ROOT_URL 设置为
/api/v1/proxy/namespaces/kubesystem/services/monitoring-grafana/ ,
# +否则后续访问grafana时访问时提示找不到
# +http://10.64.3.7:8086/api/v1/proxy/namespaces/kubesystem/services/monitoringgrafana/
api/dashboards/home 页面;
# value: /api/v1/proxy/namespaces/kube-system/services
# 部署heapstr
# heapster.yaml
# 在template下的spec下添加:
# serviceAccountName: heapster
# config.toml文件需要修改
# enabled = true
# influxdb 部署
# 进入install_kubernetes_yaml目录的heapster下
# 修改influxdb.yaml文件
# 在 volumeMounts添加如下:
# volumeMounts:
# - mountPath: /etc/
# name: influxdb-config
# 在 volumes 添加如下:
# volumes:
# - name: influxdb-config
# configMap:
# name: influxdb-config
# 部署ingress
# 生成私有的d.k8s.me证书,用于dashboard
# 生成 CA 自签证书
mkdir cert && cd cert
openssl genrsa -out ca-key.pem 2048
openssl req -x509 -new -nodes -key ca-key.pem -days 10000 -out ca.pem -subj "/CN=kubeca"
# 编辑 openssl 配置
cp /etc/pki/tls/openssl.cnf .
vim openssl.cnf
# 主要修改如下
[req]
req_extensions = v3_req # 这行默认注释关着的 把注释删掉
# 下面配置是新增的
[ v3_req ]
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = @alt_names
[alt_names]
DNS.1 = d.k8s.me
#DNS.2 = kibana.mritd.me
# 生成证书
openssl genrsa -out ingress-key.pem 2048
openssl req -new -key ingress-key.pem -out ingress.csr -subj "/CN=d.k8s.me" -config
openssl.cnf
openssl x509 -req -in ingress.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out
ingress.pem -days 365 -extensions v3_req -extfile openssl.cnf
# 创建secret
kubectl create secret tls d.k8s.me-secret --namespace=kube-system --key cert/ingresskey.
pem --cert cert/ingress.pem
# 安装ingress
# 进入/root/install_kubernetes_yaml/ingress
kubectl create -f .
# 访问dashboard的ingress
https://d.k8s.me
# 安装自研的watch服务
# 下载Wukong的代码到/apps/目录
# 复制ENV27的virtualENV环境到/usr/src下
# 安装supervisor
/usr/src/ENV27/bin/pip install supervisor==3.3.1
# 配置watch的supervisird
[group:wukong]
programs=wukong_watch
[program:wukong_watch]
command=/usr/src/ENV27/bin/python2.7
/apps/WuKong/src/wk_server/celery_wk/cluster_watch_nginx.py
user=root
startretries=300
autorestart=true
logfile=/apps/soft/supervisird/log/wukong/wukong_watch.log
stdout_logfile=/apps/soft/supervisird/log/wukong/wukong_watch.log
stderr_logfile=/apps/soft/supervisird/log/wukong/wukong_watch.err
# 启动
/usr/src/ENV27/bin/python2.7 /usr/src/ENV27/bin/supervisord
# 添加自启动
echo '/usr/src/ENV27/bin/python2.7 /usr/src/ENV27/bin/supervisord' >> /etc/rc.d/rc.local
| true |
47629e69e2373bd8e37163ada579792252366007 | Shell | cyberark/secrets-provider-for-k8s | /deploy/test/test_cases/TEST_ID_27_push_to_file.sh | UTF-8 | 2,236 | 3.703125 | 4 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
set -euxo pipefail
echo "Deploying Push to file tests"
echo "Deploying test_env without CONTAINER_MODE environment variable"
export CONTAINER_MODE_KEY_VALUE=$KEY_VALUE_NOT_EXIST
echo "Running Deployment push to file"
export SECRETS_MODE="p2f"
deploy_env
echo "Expecting secrets provider to succeed as an init container"
pod_name="$(get_pod_name "$APP_NAMESPACE_NAME" 'app=test-env')"
$cli_with_timeout "get pod $pod_name --namespace=$APP_NAMESPACE_NAME | grep -c 1/1"
FILES="group1.yaml group2.json some-dotenv.env group4.bash group5.template"
declare -A expected_content
expected_content[group1.yaml]='"url": "postgresql://test-app-backend.app-test.svc.cluster.local:5432"
"username": "some-user"
"password": "7H1SiSmYp@5Sw0rd"
"encoded": "secret-value"'
expected_content[group2.json]='{"url":"postgresql://test-app-backend.app-test.svc.cluster.local:5432","username":"some-user","password":"7H1SiSmYp@5Sw0rd","still_encoded":"c2VjcmV0LXZhbHVl"}'
expected_content[some-dotenv.env]='url="postgresql://test-app-backend.app-test.svc.cluster.local:5432"
username="some-user"
password="7H1SiSmYp@5Sw0rd"'
expected_content[group4.bash]='export url="postgresql://test-app-backend.app-test.svc.cluster.local:5432"
export username="some-user"
export password="7H1SiSmYp@5Sw0rd"'
expected_content[group5.template]='username | some-user
password | 7H1SiSmYp@5Sw0rd'
declare -A file_format
file_format[group1.yaml]="yaml"
file_format[group2.json]="json"
file_format[some-dotenv.env]="dotenv"
file_format[group4.bash]="bash"
file_format[group5.template]="template"
test_failed=false
for f in $FILES; do
format="${file_format[$f]}"
echo "Checking file $f content, file format: $format"
content="$($cli_with_timeout exec "$pod_name" -c test-app -- cat /opt/secrets/conjur/"$f")"
if [ "$content" == "${expected_content[$f]}" ]; then
echo "Push to File PASSED for $format!"
else
echo "Push to File FAILED for file format $format!"
echo "Expected content:"
echo "================="
echo "${expected_content[$f]}"
echo
echo "Actual content:"
echo "==============="
echo "$content"
echo
test_failed=true
fi
done
if "$test_failed"; then
exit 1
fi
| true |
001d9f235b49ff7eef5225a8bd1912c8948920cb | Shell | mzki/erago | /scripts/credits | UTF-8 | 2,835 | 3.84375 | 4 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
set -eu
outputdir="build"
while getopts o: opt; do
case "$opt" in
o)
outputdir="$OPTARG"
;;
\?)
echo "Usage: credits [-o OUTPUTDIR]"
exit 1
;;
esac
done
shift $((OPTIND - 1))
mkdir -p $outputdir
# test depedency tools are available.
which go-licenses >/dev/null
which go > /dev/null
which curl > /dev/null
rm -rf ${outputdir}/credits_desktop
rm -rf ${outputdir}/credits_mobile
# remove own repository from credits or remove repository whose LICENSE is not found.
ignore_pkg="github.com/mzki/erago,github.com/golang/freetype"
go-licenses save ./app/cmd \
--ignore $ignore_pkg \
--save_path ${outputdir}/credits_desktop \
2>${outputdir}/credits_desktop_err.log
go-licenses save ./mobile/model/v2 \
--ignore $ignore_pkg \
--save_path ${outputdir}/credits_mobile \
2>${outputdir}/credits_mobile_err.log
# Additional licenses which are not found by the tool
# for desktop build.
## github.com/ebiten/internal/devicescale
target_dir=${outputdir}/credits_desktop/github.com/ebiten/internal/devicescale
mkdir -p $target_dir
cp ./app/internal/devicescale/LICENSE $target_dir/
## github.com/golang/freetype/truetype
target_dir=${outputdir}/credits_desktop/github.com/golang/freetype/truetype
mkdir -p $target_dir
cp $(go env GOPATH)/pkg/mod/github.com/golang/freetype\@v0.0.0-20170609003504-e2365dfdc4a0/LICENSE $target_dir/
cp $(go env GOPATH)/pkg/mod/github.com/golang/freetype\@v0.0.0-20170609003504-e2365dfdc4a0/licenses/ftl.txt $target_dir/
# for all platform
target_dir=${outputdir}/credits_all/Go
mkdir -p $target_dir
echo "Get Golang license"
curl -o $target_dir/LICENSE https://go.dev/LICENSE?m=text
# finally, create CREDITS files from credits_{platform}/
output_credit () {
local license_path=$1
# assumes path like ./github.com/golang/groupcache/LICENSE, get github.com/golang/groupcache
local middle_path=$(dirname ${license_path#*/})
local repo_name=${middle_path}
(
echo $repo_name
echo "================================================================================"
cat $license_path
echo "--------------------------------------------------------------------------------"
echo ""
)
}
abspath () {
# https://qiita.com/katoy/items/c0d9ff8aff59efa8fcbb
local arg=$1
local abs=$(cd $(dirname $arg) && pwd)/$(basename $arg)
echo $abs
}
target_file=$(abspath ${outputdir}/CREDITS_list_desktop)
(
pushd $outputdir/credits_all
output_credit ./Go/LICENSE > $target_file
popd
)
(
pushd ${outputdir}/credits_desktop
for p in $(find . -type f); do output_credit $p >> $target_file; done
popd
)
target_file=$(abspath ${outputdir}/CREDITS_list_mobile)
(
pushd $outputdir/credits_all
output_credit ./Go/LICENSE > $target_file
popd
)
(
pushd ${outputdir}/credits_mobile
for p in $(find . -type f); do output_credit $p >> $target_file; done
popd
) | true |
75db44e51d3b4c1fe9c515d77c8a39e17ce828b7 | Shell | suneil/lnav | /test/test_json_format.sh | UTF-8 | 609 | 2.90625 | 3 | [
"BSD-2-Clause"
] | permissive | #! /bin/bash
lnav_test="${top_builddir}/src/lnav-test"
run_test ${lnav_test} -n \
-I ${test_dir} \
${test_dir}/logfile_json.json
check_output "json log format is not working" <<EOF
2013-09-06T20:00:49.124 INFO Starting up service
2013-09-06T22:00:49.124 INFO Shutting down service
user: steve@example.com
EOF
run_test ${lnav_test} -n \
-I ${test_dir} \
-c ":goto 1" \
-c ":pipe-line-to sed -e 's/2013//g'" \
-c ":switch-to-view text" \
${test_dir}/logfile_json.json
check_output "pipe-line-to is not working" <<EOF
-09-06T22:00:49.124 INFO Shutting down service
user: steve@example.com
EOF
| true |
5c91b872de12c3b45ee8c9006e91dbb6e2532468 | Shell | jhmwalters/depthai-docs-website | /source/_static/install_dependencies.sh | UTF-8 | 3,720 | 3.609375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
set -e
print_action () {
green="\e[0;32m"
reset="\e[0;0m"
printf "\n$green >>$reset $*\n"
}
print_and_exec () {
print_action $*
$*
}
if [[ $(uname) == "Darwin" ]]; then
echo "During Homebrew install, certain commands need 'sudo'. Requesting access..."
sudo true
arch_cmd=
if [[ $(uname -m) == "arm64" ]]; then
arch_cmd="arch -x86_64"
echo "Running in native arm64 mode, will prefix commands with: $arch_cmd"
# Check if able to run with x86_64 emulation
retcode=0
$arch_cmd true || retcode=$?
if [[ $retcode -ne 0 ]]; then
print_action "=== Installing Rosetta 2 - Apple binary translator"
# Prompts the user to agree to license: <A> <Enter>
# Could be automated by adding: --agree-to-license
print_and_exec softwareupdate --install-rosetta
fi
fi
homebrew_install_url="https://raw.githubusercontent.com/Homebrew/install/master/install.sh"
print_action "Installing Homebrew from $homebrew_install_url"
# CI=1 will skip some interactive prompts
CI=1 $arch_cmd /bin/bash -c "$(curl -fsSL $homebrew_install_url)"
print_and_exec $arch_cmd brew install python3 git
print_and_exec python3 -m pip install -U pip
echo
echo "=== Installed successfully! IMPORTANT: For changes to take effect,"
echo "please close and reopen the terminal window, or run: exec \$SHELL"
elif [[ ! $(uname -m) =~ ^arm* ]]; then
# shellcheck source=/etc/os-release
source /etc/os-release
case "$NAME" in
Ubuntu)
sudo apt-get update
sudo apt-get install -y python3 python3-pip udev
echo 'SUBSYSTEM=="usb", ATTRS{idVendor}=="03e7", MODE="0666"' | sudo tee /etc/udev/rules.d/80-movidius.rules
sudo udevadm control --reload-rules && sudo udevadm trigger
# https://docs.opencv.org/master/d7/d9f/tutorial_linux_install.html
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y build-essential cmake git libgtk2.0-dev pkg-config libavcodec-dev libavformat-dev libswscale-dev python-dev python-numpy libtbb2 libtbb-dev libjpeg-dev libpng-dev libtiff-dev libdc1394-22-dev
# https://stackoverflow.com/questions/55313610/importerror-libgl-so-1-cannot-open-shared-object-file-no-such-file-or-directo
sudo apt-get install -y ffmpeg libsm6 libxext6 libgl1-mesa-glx
python3 -m pip install --upgrade pip
;;
*)
echo "ERROR: Distribution not supported"
;;
esac
elif [[ $(uname -m) =~ ^arm* ]]; then
sudo apt-get update
sudo apt-get install -y python3 python3-pip udev virtualenv
echo 'SUBSYSTEM=="usb", ATTRS{idVendor}=="03e7", MODE="0666"' | sudo tee /etc/udev/rules.d/80-movidius.rules
sudo udevadm control --reload-rules && sudo udevadm trigger
# https://docs.opencv.org/master/d7/d9f/tutorial_linux_install.html
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y build-essential cmake git libgtk2.0-dev pkg-config libavcodec-dev libavformat-dev libswscale-dev python-dev python-numpy libtbb2 libtbb-dev libjpeg-dev libpng-dev libtiff-dev libjasper-dev libdc1394-22-dev
# https://stackoverflow.com/questions/55313610/importerror-libgl-so-1-cannot-open-shared-object-file-no-such-file-or-directo
sudo apt-get install -y ffmpeg libsm6 libxext6 libgl1-mesa-glx
# https://stackoverflow.com/a/53402396/5494277
sudo apt-get install -y libhdf5-dev libhdf5-dev libatlas-base-dev libjasper-dev libqtgui4 libqt4-test
# https://github.com/EdjeElectronics/TensorFlow-Object-Detection-on-the-Raspberry-Pi/issues/18#issuecomment-433953426
sudo apt-get install -y libilmbase-dev libopenexr-dev libgstreamer1.0-dev
python3 -m pip install --upgrade pip
else
echo "ERROR: Host not supported"
fi
| true |
15605e758c58969809482dcc17ac379e8c1bed44 | Shell | dendisuhubdy/Xpedite | /demo/demo.sh | UTF-8 | 2,492 | 3.890625 | 4 | [
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
################################################################################################
#
# Xpedite demo
#
# The script starts the demo application in the background
# and attaches a profiler for collecting profile data and genearating a report.
#
# The profiler's heartbeat interval is set to 1 second, to detect and end profiling, when
# the application terminates.
#
# Author: Manikandan Dhamodharan, Morgan Stanley
#
################################################################################################
usage() {
cat << EOM
Usage: $0 [OPTION]... -- [XPEDITE OPTION]...
Run a demo for xpedite profiling.
Mandatory arguments to long options are mandatory for short options too.
-r, --remote remote host where the app runs.
-c, --cpu pin to the given cpu core
-p, --pmc collect hardware performance counters
EOM
exit 1
}
ARGS=`getopt -o r:c:p --long remote:,cpu:,pmc -- "$@"`
if [ $? -ne 0 ]; then
usage
fi
APP_LAUNCHER='eval'
eval set -- "$ARGS"
CPU=0
while true ; do
case "$1" in
-r|--remote)
APP_HOST=$2
APP_LAUNCHER="ssh $APP_HOST" ; shift 2 ;;
-c|--cpu)
CPU=$2 ; shift 2 ;;
-p|--pmc)
PMC=true; shift 1 ;;
--) shift ; break ;;
*) usage ;;
esac
done
DEMO_DIR=`dirname $0`
DEMO_DIR=`readlink -f ${DEMO_DIR}`
PROFILER=${DEMO_DIR}/../scripts/bin/xpedite
if [ ! -x ${PROFILER} ]; then
echo 'Fatal error - failed to locate xpedite profiler.' > /dev/stderr
exit 1
fi
DEMO_APP=${DEMO_DIR}/../install/bin/xpediteDemo
if [ ! -x ${DEMO_APP} ]; then
echo 'Failed to locate demo binary. Have you built xpedite ?' > /dev/stderr
exit 1
fi
if [ -d ${DEMO_DIR}/../install/runtime/bin ]; then
echo detected virtual environment. resolving python dependencies from ${DEMO_DIR}/../install/runtime
export PATH=${DEMO_DIR}/../install/runtime/bin:${PATH}
fi
LOG_DIR=`${APP_LAUNCHER} "mktemp -d"`
echo Xpedite demo log dir - $LOG_DIR
if [ ! -z ${APP_HOST} ]; then
rsync -a ${DEMO_APP} ${APP_HOST}:${LOG_DIR}/
else
ln -s ${DEMO_APP} ${LOG_DIR}/
fi
APP_NAME=`basename ${DEMO_APP}`
DEMO_APP_PID=`${APP_LAUNCHER} "cd $LOG_DIR; ./${APP_NAME} -c ${CPU} >$LOG_DIR/app.log 2>&1& echo \\\$!"`
XPEDITE_DEMO_APP_HOST=${APP_HOST:-localhost} XPEDITE_DEMO_LOG_DIR=${LOG_DIR} XPEDITE_DEMO_PMC=${PMC} XPEDITE_DEMO_CPU_SET=${CPU} ${PROFILER} record -p ${DEMO_DIR}/profileInfo.py -H 1 "$@"
if [ ! -z ${DEMO_APP_PID} ]; then
${APP_LAUNCHER} "kill ${DEMO_APP_PID}"
fi
wait
| true |
654c23d965d4667c1946ed5afaf5ec27aaec58ec | Shell | khannz/crispy-palm-tree | /t1-tunnel/bundle/rpm.preremove.sh | UTF-8 | 163 | 2.65625 | 3 | [
"Apache-2.0"
] | permissive | if [ "$1" -ge 1 ]; then
systemctl stop lbost1at.service
fi
if [ "$1" = 0 ]; then
systemctl stop lbost1at.service
systemctl disable lbost1at.service
fi
exit 0 | true |
c593a34cbf4ee69a55653ef50986858530ae00b6 | Shell | fransixles/admin-scripts | /net/bus-speed.sh | UTF-8 | 852 | 3.859375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# Test available memory bus bandwidth using netperf
hash netperf 2>/dev/null || { echo >&2 "You need to install netperf. Aborting."; exit 1; }
#if ! source "$(dirname "$0")/../helpers/fs.sh"; then
# echo >&2 "ERROR: Failed to load $(dirname "$0")/../helpers/fs.sh"
# exit 1
#fi
if ! OUTPUT=$(netperf -T0,0 -C -c -P1); then
exit 1
fi
RESULT=$(echo "${OUTPUT}" | tail -n1)
SPEED=$(echo "${RESULT}" | awk '{ print $5 }')
echo "${OUTPUT}"
echo
# Total bandwidth - 10^6bits/s (or 1 Mb/s)
# MBPS=$(awk "BEGIN {print (${SPEED}/8)}")
# echo "Total bandwidth: ${MBPS} MB/s"
# Convert 10^6bits/s -> bps
BPS=$(awk "BEGIN {print (${SPEED}*125000)}")
# echo "Total bandwidth: $(getSizeString "${BPS}")/s"
# Gb/s = n/125000000
# GB/s = n/1250000000
GBPS=$(awk "BEGIN {print (${BPS}/125000000)}")
echo "Total bandwidth: ${GBPS} Gb/s"
exit 0
| true |
85ed7c15a9ff23ff7a094ff2dbc96c619f40569d | Shell | spyrioan/opsys2018-assignment1-2805 | /script2.sh | UTF-8 | 1,840 | 4.1875 | 4 | [] | no_license | #!/bin/bash
if [ ! -r $1 ]; then #check existance of file
exit 1;
fi
registers=$( tar xvf $1 )
IFS='
'
#Creation of dir
if [ ! -d assignments ]; then
mkdir assignments
fi
for filename in $registers
do
#Check if file .txt and readable
if [ ${filename: -4} == ".txt" -a -r $filename ]; then
content=$( cat $filename )
for line in $content
do
#Check for correct links
if [[ $line == "https"* ]]; then
#Keeps only the rep
dir_name=$( echo "$line" | cut -d'/' -f 5 | cut -d'.' -f 1 )
git clone $line "assignments/$dir_name" > /dev/null 2> /dev/null
#Checks cloning
if [ $? -eq 0 ]; then
echo "$line: Cloning OK"
else
>&2 echo "$line: Cloning FAILED"
fi
break
fi
done
fi
done
for file in assignments/*
do
registers=$(find $file -name ".git" -prune -o -print)
directories=-1
txtfiles=0
otherfiles=0
for line in $registers
do
if [ -d $line ]; then
((directories++))
elif [[ ${line: -4} == ".txt" ]]; then
((txtfiles++))
else
((otherfiles++))
fi
done
reponame=$( echo $file | cut -d'/' -f 2 )
echo "$reponame:"
echo "Number of directories: $directories"
echo "Number of txt files: $txtfiles"
echo "Number of other files: $otherfiles"
#Checks dir structure
if [ -f $file/dataA.txt -a -d $file/more -a -f $file/more/dataB.txt -a -f $file/more/dataC.txt -a $txtfiles -eq 3 -a $otherfiles -eq 0 -a $directories -eq 1 ]; then
echo "Directory structure is OK"
else
echo "Directory structure is not OK"
fi
done
rm -rf assignments
exit 0
| true |
ed5be7da300ac0aafee3a0bcefb56b31392174db | Shell | jancajthaml-openbank/ledger | /packaging/debian/ledger.postinst | UTF-8 | 1,210 | 3.40625 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | #!/bin/sh
set -e
if [ "$1" = configure ] && [ -f /etc/ledger/conf.d/init.conf ] ; then
ledger_server_key=$(sed -n -e 's/^.*LEDGER_SERVER_KEY=//p' /etc/ledger/conf.d/init.conf 2>/dev/null | awk '{gsub(/^ +| +$/,"")} {print $0}')
if [ -z "${ledger_server_key}" ] ; then
(>&2 echo "LEDGER_SERVER_KEY is not defined at /etc/ledger/conf.d/init.conf")
exit 1
fi
ledger_server_cert=$(sed -n -e 's/^.*LEDGER_SERVER_CERT=//p' /etc/ledger/conf.d/init.conf 2>/dev/null | awk '{gsub(/^ +| +$/,"")} {print $0}')
if [ -z "${ledger_server_cert}" ] ; then
(>&2 echo "LEDGER_SERVER_CERT is not defined at /etc/ledger/conf.d/init.conf")
exit 1
fi
if [ ! -f "${ledger_server_cert}" -o ! -f "${ledger_server_key}" ] ; then
mkdir -p $(dirname "${ledger_server_cert}") $(dirname "${ledger_server_key}")
openssl req \
-x509 \
-nodes \
-newkey rsa:2048 \
-keyout "${ledger_server_key}" \
-out "${ledger_server_cert}" \
-days 1 \
-subj "/C=CZ/ST=Czechia/L=Prague/O=OpenBanking/OU=IT/CN=localhost/emailAddress=jan.cajthaml@gmail.com"
echo "generated temporary x509 key pair ${ledger_server_key} and ${ledger_server_cert} valid for 24 hours"
fi
fi
#DEBHELPER#
| true |
c3d8d1f32fd706c14bbfb1e97aa9e46be6ed890b | Shell | nickilchen/charts | /scripts/clean-crds | UTF-8 | 1,164 | 3.90625 | 4 | [] | no_license | #!/usr/bin/env bash
set -e
# Reverts changes done to a chart from the prepare-crd script to go back to using one chart
if [[ -z $1 ]]; then
echo "No directory provided to revert charts-crd changes within"
exit 1
fi
f=$1
if ! [[ -d ${f}/charts-crd ]]; then
echo "Chart does not have a charts-crd/ directory to revert changes from"
exit 1
fi
# Move CRDs back into ${f}/charts/crd/ and remove ${f}/charts-crd
mkdir -p ${f}/charts/crds/
if [[ -d ${f}/charts-crd/crd-manifest ]]; then
mv ${f}/charts-crd/crd-manifest/* ${f}/charts/crds/
else
mv ${f}/charts-crd/templates/* ${f}/charts/crds/
fi
rm -rf ${f}/charts-crd
if ! [[ -d ${f}/charts ]]; then
# Main chart has already been deleted; no need to modify contents
exit 0
fi
# Remove the validate-install-${name}-crd.yaml
name=$(cat ${f}/charts/Chart.yaml | yq r - 'name')
rm ${f}/charts/templates/validate-install-crd.yaml
# Remove additional annotations added to original chart if added
if [[ "$(yq r ${f}/charts/Chart.yaml 'annotations[catalog.cattle.io/auto-install]')" == "${name}-crd=match" ]]; then
yq d -i ${f}/charts/Chart.yaml 'annotations[catalog.cattle.io/auto-install]'
fi
| true |
2bdd0c530c61beea30d37ea3edd96229d73d9128 | Shell | NOAA-SWPC/parallel-workflow | /workflow/jobs/eupd.sh | UTF-8 | 5,830 | 2.765625 | 3 | [] | no_license | #!/bin/ksh
################################################################################
# This script runs the enkf update (analysis)
# Usage: eupd.sh
# Imported variables:
# CONFIG
# CDATE
# CDUMP
# CSTEP
# Configuration variables:
# INFOLEVELANAL
# PMDLOGANAL
# FNTSFATMP
# SMIPCPTMP
# TMIPCPTMP
# DATATMP
# COMIN
# COMRS
# COMROT
# NCP
# NDATE
# ENKFUPDSH
# PBEG
# PERR
# PEND
################################################################################
set -ux
################################################################################
# Go configure
set -a;. $CONFIG;set +a
export CKSH=$(echo $CSTEP|cut -c-4)
export CKND=$(echo $CSTEP|cut -c5-)
export machine=${machine:-WCOSS}
machine=$(echo $machine|tr '[a-z]' '[A-Z]')
eval export DATA=$DATATMP
cd;rm -rf $DATA||exit 1;mkdir -p $DATA||exit 1;cd $DATA||exit 1
#chgrp ${group_name:-rstprod} $DATA
chmod ${permission:-755} $DATA
#
export BASEDIR=${BASEDIR:-/nwprod}
export EXECDIR=${EXECDIR:-$BASEDIR/exec}
export FIXDIR=${FIXDIR:-$BASEDIR/fix/fix_am}
export FIXgsm=${FIXgsm:-$FIXDIR}
export SCRDIR=${SCRDIR:-$BASEDIR/scripts}
export SHDIR=${SHDIR:-$BASEDIR/bin}
export NWPROD=${NWPROD:-$BASEDIR}
#
export PBEG=${PBEG:-$SHDIR/pbeg}
export PEND=${PEND:-$SHDIR/pend}
export PERR=${PERR:-$SHDIR/perr}
$PBEG
################################################################################
# Set other variables
export PCOP=${PCOP:-$SHDIR/pcop}
export NDATE=${NDATE:-${NWPROD}/util/exec/ndate}
export ENKFUPDSH=${ENKFUPDSH:-$SCRDIR/exglobal_enkf_update.sh.sms}
export CONVINFO=${CONVINFO:-${FIXgsm}/global_convinfo.txt}
export OZINFO=${OZINFO:-${FIXgsm}/global_ozinfo.txt}
export PCPINFO=${PCPINFO:-${FIXgsm}/global_pcpinfo.txt}
export HYBENSINFO=${HYBENSINFO:-${FIXgsm}/global_hybens_locinfo.txt}
if [ $machine = IBMP6 ] ; then
export MP_INFOLEVEL=${INFOLEVELUPD:-2}
export MP_PMDLOG=${PMDLOGANAL:-no}
export MP_SHARED_MEMORY=${MP_SHARED_MEMORY:-yes}
export MEMORY_AFFINITY=${MEMORY_AFFINITY:-MCM}
export MP_LABELIO=${MP_LABELIO:-yes}
export MP_COREFILE_FORMAT=lite
# Recommended MPI environment variable setttings from IBM
# (Appendix E, HPC Clusters Using InfiniBand on IBM Power Systems Servers)
export LAPI_DEBUG_ENABLE_AFFINITY=YES
#export LAPI_DEBUG_MTU_4K=YES
export MP_FIFO_MTU=4K
export MP_SYNC_QP=YES
export MP_SHM_ATTACH_THRESH=500000
export MP_EUIDEVELOP=min
export MP_USE_BULK_XFER=yes
export MP_BULK_MIN_MSG_SIZE=64k
export MP_RC_MAX_QP=8192
export LAPI_DEBUG_RC_DREG_THRESHOLD=1000000
export LAPI_DEBUG_QP_NOTIFICATION=no
export LAPI_DEBUG_RC_INIT_SETUP=yes
elif [ $machine = WCOSS ] ; then
export MP_EAGER_LIMIT=${MP_EAGER_LIMIT:-65536}
export MP_COREFILE_FORMAT=${MP_COREFILE_FORMAT:-lite}
# export MP_EUIDEVICE=${MP_EUIDEVICE:-sn_all}
# export MP_EUILIB=${MP_EUILIB:-us}
export MP_MPILIB=${MP_MPILIB:-mpich2}
export MP_LABELIO=${MP_LABELIO:-yes}
export MP_USE_BULK_XFER=${MP_USE_BULK_XFER:-yes}
export MPICH_ALLTOALL_THROTTLE=${MPICH_ALLTOALL_THROTTLE:-0}
export MP_COLLECTIVE_OFFLOAD=${MP_COLLECTIVE_OFFLOAD:-no}
export MP_SINGLE_THREAD=${MP_SINGLE_THREAD:-yes}
export MP_SHARED_MEMORY=${MP_SHARED_MEMORY:-yes}
export KMP_STACKSIZE=${KMP_STACKSIZE:-2048m}
export NTHREADS_ENKF=${NTHREADS_ENKF:-2}
else
export MPI_BUFS_PER_PROC=${MPI_BUFS_PER_PROC:-256}
export MPI_BUFS_PER_HOST=${MPI_BUFS_PER_HOST:-256}
export MPI_GROUP_MAX=${MPI_GROUP_MAX:-256}
export MPI_MEMMAP_OFF=${MPI_MEMMAP_OFF:-1}
fi
export PREINP=gdas1.t$(echo $CDATE|cut -c9-10)z.
export FILESTYLE=${FILESTYLEEUPD:-'C'}
export VERBOSE=YES
export CYINC=${CYINC:-06}
export GDATE=$($NDATE -$CYINC $CDATE)
export CDFNL=${CDFNL:-gdas}
export GDUMP=${GDUMP:-$CDFNL}
export COMIN=${COMIN:-$COMROT}
export COMOUT=${COMOUT:-$COMROT}
export SMOOTH_ENKF=${SMOOTH_ENKF:-"NO"}
################################################################################
# Define variables for input files
export GBIAS=${GBIAS:-${COMIN}/biascr.$GDUMP.$GDATE}
export GBIASE=${GBIASE:-$COMIN/biascr_int_${CDATE}_ensmean}
export GBIASe=${GBIASe:-$GBIASE}
export GSATANG=${GSATANG:-${COMIN}/satang.$GDUMP.$GDATE}
export SIGGES=${SIGGES:-${COMIN}/sfg_${GDATE}_fhr06_ensmean}
export SFCGES=${SFCGES:-${COMIN}/bfg_${GDATE}_fhr06_ensmean}
ENKF_SUFFIX=""
if [[ "$SMOOTH_ENKF" = "YES" ]]; then
ENKF_SUFFIX="s"
fi
export SIGGESENS=${SIGGESENS:-${COMIN}/sfg_${GDATE}_fhr06${ENKF_SUFFIX}}
export CNVSTAT=${CNVSTAT:-${COMIN}/cnvstat_$CDATE}
export OZNSTAT=${OZNSTAT:-${COMIN}/oznstat_$CDATE}
export RADSTAT=${RADSTAT:-${COMIN}/radstat_$CDATE}
################################################################################
# Make use of updated angle dependent bias file, if it exists.
if [[ -s $GSATANG ]]; then
export SATANGL=$GSATANG
fi
################################################################################
# Set output data
export ENKFSTAT=${ENKFSTAT:-${COMOUT}/enkfstat_$CDATE}
export SIGANLENS=${SIGANLENS:-${COMOUT}/sanl_$CDATE}
################################################################################
# Run enkf update
export JCAP=${JCAP_ENKF:-254}
export LEVS=${LEVS_ENKF:-64}
export LONB=${LONB_ENKF:-768}
export LATB=${LATB_ENKF:-384}
export LONA=${LONA_ENKF:-512}
export LATA=${LATA_ENKF:-256}
export PGMOUT=stdout
export PGMERR=stderr
$ENKFUPDSH
rc=$?
if [[ $rc -ne 0 ]];then $PERR;exit 1;fi
cat $PGMOUT
cat $PGMERR
################################################################################
# Copy out restart and output files
##$PCOP $CDATE/$CDUMP/$CSTEP/ROTO $DATA $COMROT <$RLIST
##rc=$?
##$PCOP $CDATE/$CDUMP/$CSTEP/OPTO $DATA $COMROT <$RLIST
################################################################################
# Exit gracefully
if [[ $rc -ne 0 ]];then $PERR;exit 1;fi
if [ ${KEEPDATA:-NO} != YES ] ; then rm -rf $DATA ; fi
$PEND
| true |
2dc660dc5197d00ece4b41977ef2153d0f38216b | Shell | atweiden/voidpkgs | /srcpkgs/pkgconf/template | UTF-8 | 1,209 | 2.609375 | 3 | [
"Unlicense",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain"
] | permissive | maintainer="nox"
pkgname="pkgconf"
version=1.9.3
revision=1
short_desc="Provides compiler and linker configuration"
checkdepends="kyua"
homepage="http://pkgconf.org/"
license="MIT"
changelog="https://github.com/pkgconf/pkgconf/raw/master/NEWS"
distfiles="https://distfiles.ariadne.space/pkgconf/$pkgname-$version.tar.xz"
checksum="5fb355b487d54fb6d341e4f18d4e2f7e813a6622cf03a9e87affa6a40565699d"
# cmake and meson also available
build_style="gnu-configure"
bootstrap="yes"
alternatives=" pkg-config:pkg-config:/usr/bin/pkgconf"
alternatives=" pkg-config:pkg-config.1:/usr/share/man/man1/pkgconf.1"
alternatives=" pkg-config:pkg.m4:/usr/share/aclocal/pkg.m4.pkgconf"
post_install() {
vlicense COPYING
# suffix file that conflicts with pkg-config
mv "$DESTDIR/usr/share/aclocal"/pkg.m4{,.pkgconf}
}
libpkgconf_package() {
short_desc+=" - runtime library"
pkg_install() {
vmove "usr/lib/*.so.*"
}
}
pkgconf-devel_package() {
depends="libpkgconf>=${version}_$revision"
short_desc+=" - development files"
pkg_install() {
vmove usr/include
vmove usr/lib/pkgconfig
vmove "usr/lib/*.a"
vmove "usr/lib/*.so"
}
}
# vim: set filetype=sh foldmethod=marker foldlevel=0 nowrap:
| true |
5abaf2f162b7cadf63bdc2c3179d968d549c846c | Shell | delkyd/alfheim_linux-PKGBUILDS | /rapidlauncher-git/PKGBUILD | UTF-8 | 866 | 2.765625 | 3 | [] | no_license | # Maintainer: Black_Codec <orso.f.regna@gmail.com>
pkgname=rapidlauncher-git
_pkgname=RapidLauncher
pkgrel=3
pkgver=r95.44bc2f9
pkgdesc="Rapid Launcher is an application launcher that is supposed to be fast and relatively lightweight."
arch=('i686' 'x86_64')
url="https://github.com/echo-devim/RapidLauncher"
license=('GPL')
depends=('gtk3' 'vala')
provides=('rapidlauncher')
conflicts=('rapidlauncher')
source=("git+https://github.com/echo-devim/$_pkgname.git")
md5sums=('SKIP')
pkgver() {
cd "$srcdir/$_pkgname"
( set -o pipefail
git describe --long 2>/dev/null | sed 's/v//;s/\([^-]*-g\)/r\1/;s/-/./g' ||
printf "r%s.%s" "$(git rev-list --count HEAD)" "$(git rev-parse --short HEAD)"
)
}
build() {
cd "$srcdir/$_pkgname"
sed -i 's/\"\/usr/\"\$\(DESTDIR\)\/usr/g' misc/Makefile
make
}
package() {
cd "$srcdir/$_pkgname"
make DESTDIR="$pkgdir" install
}
| true |
22119aac12e90e375efb525dfea858cbd31de4c1 | Shell | bubble0601/dotfiles | /setup.sh | UTF-8 | 118 | 2.578125 | 3 | [] | no_license | TARGETS=(.vimrc .vim .zshrc .latexmkrc)
for tgt in ${TARGETS[@]}
do
ln -fnsv $HOME/dotfiles/$tgt $HOME/$tgt
done
| true |
2e1b439645b7a72fb6748d73a58983ae3767e7ed | Shell | jan-kaspar/analysis_elastic.6500GeV.beta90_2018.4sigma | /input/make_input_files | UTF-8 | 693 | 3.15625 | 3 | [] | no_license | #!/bin/bash
function GetOne()
{
local base_dir="$1"
local ds="$2"
local bit="$3"
local output_dir="$4"
local output_file="$output_dir/input_files.h"
mkdir -p "$output_dir"
rm -rf "$output_file"
(
echo "#include <string>"
echo "#include <vector>"
echo ""
echo "std::vector<std::string> input_files;"
echo ""
echo "void InitInputFiles()"
echo "{"
echo " input_files.clear();"
echo ""
for f in `eos ls $base_dir|grep _filter_${ds}_bit${bit}.root`
do
echo " input_files.push_back(\"root://eostotem.cern.ch/$base_dir/$f\");"
done
echo "}"
) > "$output_file"
}
GetOne "/eos/totem/user/j/jkaspar/reco/2018_90m/version9" "TOTEM1" "347" "../DS-TOTEM1-347"
| true |
510bae39b376d32b0f6fe5b792ef527b30fa0202 | Shell | micodls/CodeCleaner | /wei_martin_may_sedTest.sh | UTF-8 | 1,461 | 2.6875 | 3 | [] | no_license | #!/bin/bash
sed -i -e 's/\r//g' $1 | echo Changed line endings
sed -i -e 's/include "\(.*\)"/include <\1>/g' $1 | echo Corrected headers
sed -i -e '/^$/N;/^\n$/D' $1 | echo Removed extra blank lines
sed -i -e 's/[\t ]\{1,10\}$//g' $1 | echo Removed trailing tabs and spaces
sed -i -e '/(/{N;N;s/(\(.*\)\n\{2,3\}\t\{1,5\});/( \1 );/g}' $1 | echo Removed extra new lines inside parenthesis
sed -i -e 's/\(.\)[ \t]\{0,10\}([ \t]\{0,10\}\([^)n].*\))[ \t]\{0,10\}\([;$]\{0,1\}\)/\1(\2)\3/g' -e 's/[ \t]\{1,10\})/)/g' $1 | echo Fixed spacing inside parenthesis
sed -i -e '12,$s/string\([^_".]\)/std::string\1/g' -e '12,$s/\t\([mlc][aion].*[pt]\)/\tstd::\1/g' -e '12,$s/shared_ptr/std::shared_ptr/g' -e 's/endl/std::endl/g' $1 | echo Appended std to standard types
sed -i -e 's/\([;,]\)\([^ ]\)/\1 \2/g' $1 | echo Added space between commas and characters
sed -i -e '12,$s/\([^ ]\)\([=]\{1,2\}\)\([^ ]\)/\1 \2 \3/' $1 | echo Added space before and after =
sed -i -e '12,$s/[ ]</</g' -e '12,$s/>>/> >/g' $1 | echo Fixed spaces around brackets
sed -i -e 's/\(.\)\([+-]\{2\}\)/\2\1/g' $1 | echo Postfix to prefix
sed -i -e 's/[ ]\{0,2\}\([<=<]\{2\}\)[ ]\{0,2\}/ \1 /g' $1 | echo Fixedspacing around comparator operators
sed -i -e 's/\([\t ]\)\([fi][of].*\)[(]/\1\2 (/g' $1 | echo Fixed space after for and if
sed -i -e '/}$/{N;s/}\n$/}/}' -e '/^$/{N;s/\n\(\t\{1,5\}\)}/\1}/}' $1 | echo Fixed spacing between { and }
sed -i -e '/);$/{N;N;s/);\n\n\([\t][^/]\)/);\n\1/}' $1 | true |
856d2705e5933830ee58dda50bd7ac958baff500 | Shell | sowmiyamuthuraman/nexus-operator | /hack/install.sh | UTF-8 | 1,557 | 2.6875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Copyright 2020 Nexus Operator and/or its authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The git command will fetch the most recent tag across all branches
LATEST_TAG=$(git describe --tags $(git rev-list --tags --max-count=1))
NAMESPACE=nexus
echo "[INFO] The repository will be checked out at the latest release"
echo "....... Checkout code at ${LATEST_TAG} ......"
git checkout tags/${LATEST_TAG}
echo "....... Creating namespace ......."
kubectl create namespace ${NAMESPACE}
echo "....... Applying CRDS ......."
kubectl apply -f deploy/crds/apps.m88i.io_nexus_crd.yaml
echo "....... Applying Rules and Service Account ......."
kubectl apply -f deploy/role.yaml -n ${NAMESPACE}
kubectl apply -f deploy/role_binding.yaml -n ${NAMESPACE}
kubectl apply -f deploy/service_account.yaml -n ${NAMESPACE}
echo "....... Applying Nexus Operator ......."
kubectl apply -f deploy/operator.yaml -n ${NAMESPACE}
echo "....... Creating the Nexus 3.x Server ......."
kubectl apply -f examples/nexus3-centos-no-volume.yaml -n ${NAMESPACE}
| true |
459d5add396934f05cd54ddaed1a8506f01633bb | Shell | MoonLory/monitoring | /day5/install_server.sh | UTF-8 | 1,934 | 2.796875 | 3 | [] | no_license | #!/bin/bash
cd /opt
sudo wget https://github.com/prometheus/prometheus/releases/download/v2.26.0/prometheus-2.26.0.linux-amd64.tar.gz
sudo tar -xzf prometheus-2.26.0.linux-amd64.tar.gz
sudo useradd --no-create-home --shell /usr/sbin/nologin prometheus
sudo useradd --no-create-home --shell /bin/false node_exporter
sudo chmod 755 prometheus-2.26.0.linux-amd64 -R
sudo chown prometheus:prometheus prometheus-2.26.0.linux-amd64 -R
sudo cp /tmp/config/prometheus.service /etc/systemd/system/prometheus.service
wget https://github.com/prometheus/node_exporter/releases/download/v0.16.0/node_exporter-0.16.0.linux-amd64.tar.gz
tar xvf node_exporter-0.16.0.linux-amd64.tar.gz
sudo cp node_exporter-0.16.0.linux-amd64/node_exporter /usr/local/bin
sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
rm -rf node_exporter-0.16.0.linux-amd64.tar.gz node_exporter-0.16.0.linux-amd64
sudo cp /tmp/config/node_exporter.service /etc/systemd/system/node_exporter.service
sudo systemctl daemon-reload
sudo systemctl start node_exporter
sudo systemctl daemon-reload
sudo systemctl start prometheus.service
sudo systemctl enable prometheus.service
sudo cp /tmp/config/grafana.repo /etc/yum.repos.d/grafana.repo
sudo yum install grafana -y
sudo systemctl start grafana-server
cd /opt
sudo wget https://github.com/prometheus/blackbox_exporter/releases/download/v0.18.0/blackbox_exporter-0.18.0.linux-amd64.tar.gz
sudo tar -xzf blackbox_exporter-0.18.0.linux-amd64.tar.gz
sudo chown -R prometheus:prometheus blackbox_exporter-0.18.0.linux-amd64.tar.gz
sudo cp /tmp/config/blackbox.service /etc/systemd/system/blackbox.service
sudo systemctl daemon-reload
sudo systemctl start blackbox.service
sudo cat << EOF >> /opt/prometheus-2.26.0.linux-amd64/prometheus.yml
- job_name: 'blackbox'
static_configs:
- targets: ['localhost:9115'] # The blackbox exporter's real hostname:port.
EOF
sudo systemctl restart prometheus.service | true |
e4d8aa9a6039f07e8e663feefa855384bed6e119 | Shell | PGM-Lab/pythoncourse-I | /scripts/start.bash | UTF-8 | 402 | 3.140625 | 3 | [] | no_license | #!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd $DIR
cd ..
if [[ $(python3.6 --version) ]];then
pbin=$(which python3.6)
elif [[ $(python3 --version) ]];then
pbin=$(which python3)
else
pbin=$(which python)
fi
if [[ ! -d venv ]]; then
$pbin -m venv venv
fi
source venv/bin/activate
python -m pip install -r scripts/requirements.txt
python -m jupyter notebook
| true |
c9ce1336ddd0599f4f457aa1df7d94407ea2e884 | Shell | Ednar/hannah | /start-hannah.sh | UTF-8 | 470 | 3.265625 | 3 | [] | no_license | #!/bin/sh
if [[ "$#" != 1 ]]; then
echo "Du måste ange IP adress till main container" >&2
exit 1
fi
HOST=$1
AGENT="hannah"
AGENT_CLASS="hannah.ConcurrentMoodHannah"
printf "Machine %s" "$AGENT" | figlet
echo "Attempting to start agent with settings:"
printf "Host: \t%s\nAgent: \t%s\nPath: \t%s\n" "$HOST" "$AGENT" "$AGENT_CLASS"
mvn compile exec:java \
-Dexec.mainClass=jade.Boot \
-Dexec.args="-container -host $HOST -agents $AGENT:$AGENT_CLASS"
| true |
d1074360842621eb347fdfc63c1a0b9e01205b4c | Shell | kntate/openshift-hw | /scripts/steps/setup-repos.sh | UTF-8 | 1,467 | 2.90625 | 3 | [] | no_license | source properties.sh
# Add repos
export OWN_REPO_PATH=https://admin.shared.example.opentlc.com/repos/ocp/3.5
cat << EOF > /etc/yum.repos.d/open.repo
[rhel-7-server-rpms]
name=Red Hat Enterprise Linux 7
baseurl=${OWN_REPO_PATH}/rhel-7-server-rpms
enabled=1
gpgcheck=0
[rhel-7-server-rh-common-rpms]
name=Red Hat Enterprise Linux 7 Common
baseurl=${OWN_REPO_PATH}/rhel-7-server-rh-common-rpms
enabled=1
gpgcheck=0
[rhel-7-server-extras-rpms]
name=Red Hat Enterprise Linux 7 Extras
baseurl=${OWN_REPO_PATH}/rhel-7-server-extras-rpms
enabled=1
gpgcheck=0
[rhel-7-server-optional-rpms]
name=Red Hat Enterprise Linux 7 Optional
baseurl=${OWN_REPO_PATH}/rhel-7-server-optional-rpms
enabled=1
gpgcheck=0
[rhel-7-fast-datapath-rpms]
name=Red Hat Enterprise Linux 7 Fast Datapath
baseurl=${OWN_REPO_PATH}/rhel-7-fast-datapath-rpms
enabled=1
gpgcheck=0
EOF
# Add the OpenShift Container Platform repository mirror
cat << EOF >> /etc/yum.repos.d/open.repo
[rhel-7-server-ose-3.5-rpms]
name=Red Hat Enterprise Linux 7 OSE 3.5
baseurl=${OWN_REPO_PATH}/rhel-7-server-ose-3.5-rpms
enabled=1
gpgcheck=0
EOF
# Clean up repos
yum clean all ; yum repolist
# loop through all nodes
for node in "${all_nodes[@]}"
do
echo Copying open repos to $node
scp /etc/yum.repos.d/open.repo ${node}:/etc/yum.repos.d/open.repo
ssh ${node} 'mv /etc/yum.repos.d/redhat.{repo,disabled}'
ssh ${node} yum clean all
ssh ${node} yum repolist
done
| true |
07f674bb8db29f504417222ef406ccffc1210de2 | Shell | srirambtechit/linux | /shell-scripting-basics/variables/special-variables.sh | UTF-8 | 1,090 | 3.65625 | 4 | [] | no_license | #!/bin/sh
# To run this script
# ./special-variables.sh arg1 arg2 arg3 arg4 arg5 arg6 arg7 arg8 arg9
# Following variables are special purpose variables
echo "Script file name : " $0 # returns current running script file name
echo "Running process ID : " $$ # returns currently running script's process id
echo "String of Cmd args : " $* # returns full command line arguments as a string
echo # for new line
# Command line argument
echo "First argument : " $1 # 1st input parameter
echo "Second argument : " $2 # 2nd input parameter
echo "Third argument : " $3 # 3rd input parameter
echo "Fourth argument : " $4 # 4th input parameter
echo "Fifth argument : " $5 # 5th input parameter
echo "Sixth argument : " $6 # 6th input parameter
echo "Seventh argument : " $7 # 7th input parameter
echo "Eighth argument : " $8 # 8th input parameter
echo "Ninth argument : " $9 # 9th input parameter
echo "Number of arg : " $# # Number of input parameter
echo "Full param string: " $@ # Returns full argument as a single string
| true |
f1a12c37df46bd352ac78de4134af9d4661f1e10 | Shell | comepradz/sandbox | /sh/md5scanner.sh | UTF-8 | 178 | 2.6875 | 3 | [] | no_license | #/bin/bash
$MD5SUM = "25ce9982b3fc6e957bcfcebc66aa8605"
find . -type f -exec md5sum {} + | grep $MD5SUM > md5scanner.tmp
rm -rf `cat md5scanner.tmp | gawk -F" " '{ print $2 }'`
| true |
07c39fd44b17c48b8e62470dbcd2a4b2ff4a96c0 | Shell | KoShimizuV/learnings | /java/generics/format.sh | UTF-8 | 76 | 2.671875 | 3 | [] | no_license | #!/bin/sh
for f in `ls *.java`
do
echo $f
cat $f | sed "s/^/ /g"
done
| true |
fc03d02f7bca10fc0078434ec7631135a8dcd2c4 | Shell | pyhero/nagiosauto | /init/conf/libexec-panda/check_cert.sh | UTF-8 | 722 | 3.796875 | 4 | [] | no_license | #!/bin/bash
#
dohelp () {
echo "Useage:$0 -H HOSTNAME -c critical"
exit 3
}
[ $# -lt 2 ] && dohelp
chk_cer () {
end_day=$(openssl s_client -servername $sername -connect $sername:443 </dev/null 2>/dev/null | \
sed -n '/-BEGIN CERTIFICATE/,/END CER/p' | \
openssl x509 -text 2>/dev/null | \
sed -n 's/ *Not After : *//p')
today=$(date "+%s")
endday=$(date "+%s" -d "$end_day")
last=$[($endday-$today)/3600/24]
msg="$sername cer last $last days."
if [ $last -le $cri ];then
echo $msg
exit 2
else
echo $msg
exit 0
fi
}
while getopts H::c::h arg;do
case $arg in
H)
sername=$OPTARG
;;
c)
cri=$OPTARG
;;
h)
dohelp
;;
*)
dohelp
esac
done
[ ! $cri ] && cri="30"
chk_cer
| true |
d9d2e120db11213362f40aa988cf74481f4f4648 | Shell | 8bitjoey/NestDFUAttack | /Dev/root/etc/init.d/umountfs | UTF-8 | 2,534 | 4.0625 | 4 | [] | no_license | #!/bin/sh
#
# Copyright (c) 2010 Nest Labs, Inc.
# All rights reserved.
#
# Description:
# This file implements a script to handles unmounting of all
# non-networked and non-root filesystems on system shutdown or
# restart.
#
# Source common library
. /etc/init.d/functions
unmount_fs () {
local description="${1}"
local args="${2}"
shift 2
local mountpoints=${*}
if [ -n "${mountpoints}" ]; then
${ECHO} "Unmounting ${description} filesystems:\c"
${UNMOUNT} ${args} ${mountpoints}
${ECHO} " done."
fi
}
stop_fs () {
# Save standard input and redirect it from the current list of
# mounted file systems.
exec 9<&0 < ${ROOTDIR}proc/mounts
PROTECTED_MOUNTS="$(${SED} -n '0,/^\/[^ ]* \/ /p' ${ROOTDIR}proc/mounts)"
NONCORE_MOUNTPOINTS=""
LOCAL_MOUNTPOINTS=""
TMPFS_MOUNTPOINTS=""
# Split them by device, mount point, type and the remainder and
# filter them, mount-by-mount.
while read -r DEVICE MOUNTPOINT TYPE REMAINDER
do
# Filter out the root file system mount point, that's handled
# elsewhere.
echo "${PROTECTED_MOUNTS}" | ${GREP} -qs "^${DEVICE} ${MOUNTPOINT} " && continue
# Filter out filesystems by mount point
case "${MOUNTPOINT}" in
${ROOTDIR}|${ROOTDIR}proc|${ROOTDIR}dev|${ROOTDIR}.dev|${ROOTDIR}dev/pts|${ROOTDIR}dev/shm|${ROOTDIR}dev/.static/dev|${ROOTDIR}proc/*|${ROOTDIR}sys|${LIBDIR}/init/rw)
continue
;;
${VARDIR}/run)
continue
;;
${VARDIR}/lock)
continue
;;
esac
# Filter out filesystems by type
case "${TYPE}" in
proc|procfs|linprocfs|devfs|devtmpfs|sysfs|usbfs|usbdevfs|devpts)
continue
;;
tmpfs)
TMPFS_MOUNTPOINTS="${MOUNTPOINT} ${TMPFS_MOUNTPOINTS}"
;;
*)
if echo "$PROTECTED_MOUNTS" | ${GREP} -qs "^${DEVICE} "; then
NONCORE_MOUNTPOINTS="${MOUNTPOINT} ${NONCORE_MOUNTPOINTS}"
else
LOCAL_MOUNTPOINTS="${MOUNTPOINT} ${LOCAL_MOUNTPOINTS}"
fi
;;
esac
done
# Restore standard input
exec 0<&9 9<&-
# Unmount non-essential temporary (RAM-based) file systems
unmount_fs "temporary" "" ${TMPFS_MOUNTPOINTS}
# Unmount non-essential other file systems (debufs, etc.)
unmount_fs "non-essential" "-r -d" ${NONCORE_MOUNTPOINTS}
# Unmount local file systems
unmount_fs "non-root local" "-f -r -d" ${LOCAL_MOUNTPOINTS}
}
case "${1}" in
'start')
;;
'stop')
stop_fs
;;
*)
echo "Usage: ${0} <start | stop>"
exit 1
;;
esac
| true |
98e142fd83f9485665c7e93ab9bcad6c8f632731 | Shell | Buzzglo/Easy-Release-Notes-V1 | /releasenotes.sh | UTF-8 | 669 | 3.625 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
# Written by: Bilal Jooma
# https://git-scm.com/docs/git-log
FILENAME='test_'$(date +"%F %T")
echo $FILENAME
touch $FILENAME
for d in */ ; do
cd $d
BRANCH=$( git branch | grep \* | cut -d ' ' -f2 )
echo "##== Commits for : $d ==##" >> $FILENAME
echo >> ../releaseNotesAll.txt
if [ "$BRANCH" != "master" ]
then
echo "Only supported for Master Branch: $d" >> $FILENAME
else
git log --pretty=format:"%h - %an, %ar : %s" --since=${1} >> $FILENAME
# git log --pretty='{ hash: "%h", commit: "%s", author: "%an", date: "%aD"}' --since${1} >> ../releaseNotesAll.txt
echo >> $FILENAME
fi
cd ..
done | true |
5f575d608291cac4f953cbb6420f9346183afba8 | Shell | Travmatth/nm-otool | /test/organize.sh | UTF-8 | 229 | 3.078125 | 3 | [] | no_license | #!/bin/bash
for f in $(find artifacts/archive)
do
string=$(file $f);
if [[ $string =~ "x86_64" ]]
then
echo $f >> x86_64.txt
elif [[ $string =~ "i386" ]]
then
echo $f >> i386.txt
else
echo $f >> unknown.txt
fi
done
| true |
3c6bf1214cb02c8342390e797624f18ca9359609 | Shell | zz7a5pe4/xeven | /startup_2.sh | UTF-8 | 8,216 | 3.3125 | 3 | [] | no_license | #!/bin/bash -x
#set -e
function trackme
{
#$CURWD/notify_status.py "log" "$@"
$@
local EXIT_CODE=$?
return $EXIT_CODE
}
if [ $# == 0 ]; then
echo "you must tell me which nic you use: $0 eth0 or $0 wlan0"
exit -1
fi
export INTERFACE=$1
source ./addrc
echo ${HOSTADDR:?"empty host addr"}
echo ${MASKADDR:?"empty mask addr"}
echo ${GATEWAY:?"empty gateway"}
echo ${NETWORK:?"empty network"}
echo ${BRDADDR:?"empty broad addr"}
export X7WORKDIR=`pwd`
CONFDIR=$X7WORKDIR/conf
CURWD=$X7WORKDIR
MYID=`whoami`
# ssh id/user setup
if [ ! -f /home/$MYID/.ssh/id_rsa ]; then
ssh-keygen -b 1024 -t rsa -P "" -f /home/$MYID/.ssh/id_rsa
fi
cat /home/$MYID/.ssh/id_rsa.pub >> /home/$MYID/.ssh/authorized_keys
cd /home/$MYID
tar czf $CURWD/ssh.tar.gz .ssh/
# ntp server
#trackme sudo apt-get install -y --force-yes ntp
grep "server 127.127.1.0" /etc/ntp.conf > /dev/null && true
if [ "$?" -ne "0" ]; then
#trackme echo "configure ntp"
sudo sed -i 's/server ntp.ubuntu.com/serverntp.ubuntu.com\nserver 127.127.1.0\nfudge 127.127.1.0 stratum 10/g' /etc/ntp.conf
sudo service ntp restart
fi
# prepare for pxe installation
#trackme sudo apt-get install -y --force-yes fai-quickstart
# dhcp config
cp -f $CONFDIR/etc/dhcp/dhcpd.conf.template $CONFDIR/etc/dhcp/dhcpd.conf
sed -i "s|%NETADDR%|$NETWORK|g" $CONFDIR/etc/dhcp/dhcpd.conf
sed -i "s|%MASKADDR%|$MASKADDR|g" $CONFDIR/etc/dhcp/dhcpd.conf
sed -i "s|%GATEWAY%|$GATEWAY|g" $CONFDIR/etc/dhcp/dhcpd.conf
sed -i "s|%HOSTADDR%|$HOSTADDR|g" $CONFDIR/etc/dhcp/dhcpd.conf
sudo cp -f $CONFDIR/etc/dhcp/dhcpd.conf /etc/dhcp/dhcpd.conf
trackme sudo /etc/init.d/isc-dhcp-server restart
# tftp config
cp -f $CONFDIR/etc/default/tftpd-hpa.template $CONFDIR/etc/default/tftpd-hpa
trackme sudo cp -f $CONFDIR/etc/default/tftpd-hpa /etc/default/tftpd-hpa
trackme sudo /etc/init.d/tftpd-hpa restart
# vai ubuntu auto-installation setup
cp -f $CONFDIR/srv/tftp/vai/ubuntu-installer/amd64/boot-screens/txt.cfg.template $CONFDIR/srv/tftp/vai/ubuntu-installer/amd64/boot-screens/txt.cfg
sed -i "s|%HOSTADDR%|$HOSTADDR|g" $CONFDIR/srv/tftp/vai/ubuntu-installer/amd64/boot-screens/txt.cfg
sudo mkdir -p /srv/tftp
sudo rm -rf /srv/tftp/*
sudo cp -rf $CONFDIR/srv/tftp/vai /srv/tftp/
# preseed
cp -f $CURWD/www/preseed.cfg.template $CURWD/www/preseed.cfg
sed -i "s|%HOSTADDR%|$HOSTADDR|g" $CURWD/www/preseed.cfg
cp -f $CURWD/www/preseed.cfg $CURWD/ubuntu_repo
# dns setup/config
##TODO##
# hosts config
cp $CONFDIR/etc/hosts.template $CONFDIR/etc/hosts
sed -i "s|%HOSTADDR%|$HOSTADDR|g" $CONFDIR/etc/hosts
sed -i "s|%HOSTNAME%|$HOSTNAME|g" $CONFDIR/etc/hosts
sudo cp -rf $CONFDIR/etc/hosts /etc/hosts
# nfs path prepare
sudo mkdir -p /srv/instances
sudo chmod 777 /srv/instances
if [ -n "$SSDDEV" ];then
#(a && b) or c
[ -b $SSDDEV ] && sudo mount $SSDDEV /srv/instances || echo "$SSD isn't mounted"
fi
grep "/srv/instances $HOSTADDR/24" /etc/exports > /dev/null && true
if [ "$?" -ne "0" ]; then
echo "/srv/instances $HOSTADDR/24(async,rw,fsid=0,no_subtree_check,no_root_squash)" | sudo tee -a /etc/exports > /dev/null
echo "/srv/instances 127.0.0.1(async,rw,fsid=0,no_subtree_check,no_root_squash)" | sudo tee -a /etc/exports > /dev/null
sudo /etc/init.d/nfs-kernel-server restart
sudo /etc/init.d/idmapd restart
fi
cp -f $CONFDIR/etc/apt/sources.list.template $CONFDIR/sources.list.client
sed -i "s|%HOSTADDR%|$HOSTADDR|g" $CONFDIR/sources.list.client
#devstack & openstack packages
#mkdir -p $CURWD/cache
#if [ ! -f $CURWD/cache/devstack.tar.gz ]; then
# trackme wget https://github.com/downloads/zz7a5pe4/x7_start/devstack.tar.gz -O $CURWD/cache/devstack.tar.gz
#fi
#rm -rf $CURWD/devstack
#tar xzf $CURWD/cache/devstack.tar.gz -C $CURWD/
( cd $CURWD/ && tar czf $CURWD/cache/devstack.tar.gz $CURWD/devstack )
if [ ! -f $CURWD/cache/cirros-0.3.0-x86_64-uec.tar.gz ]; then
wget https://github.com/downloads/zz7a5pe4/x7_start/cirros-0.3.0-x86_64-uec.tar.gz -O $CURWD/cache/cirros-0.3.0-x86_64-uec.tar.gz
fi
cp -f $CURWD/cache/cirros-0.3.0-x86_64-uec.tar.gz $CURWD/devstack/files/cirros-0.3.0-x86_64-uec.tar.gz
if [ ! -f $CURWD/cache/stack.zip ]; then
trackme wget https://nodeload.github.com/zz7a5pe4/x7_dep/zipball/master -O $CURWD/cache/stack.zip
fi
rm -rf $CURWD/stack $CURWD/zz7a5pe4-x7_dep*
unzip $CURWD/cache/stack.zip -d $CURWD/
mv $CURWD/zz7a5pe4-x7_dep* $CURWD/stack
sudo umount /opt/stack/nova/instances/
sudo rm -rf /opt/stack
sudo mv -f $CURWD/stack /opt
sudo chown -R $MYID:$MYID /opt/stack
mkdir -p $CURWD/cache/apt
mkdir -p $CURWD/cache/img
mkdir -p $CURWD/cache/pip
mkdir -p $CURWD/cache/piptmp
sudo rm -rf $CURWD/cache/piptmp/*
#trackme sudo apt-get install --force-yes -y $PYDEP
# image and pip
if [ -d /media/x7_usb/ ]; then
cp /media/x7_usb/x7_cache/cirros-0.3.0-x86_64-uec.tar.gz $CURWD/cache/img/cirros-0.3.0-x86_64-uec.tar.gz
tar xzf /media/x7_usb/x7_cache/pika-0.9.5.tar.gz -C $CURWD/cache/pip/
tar xzf /media/x7_usb/x7_cache/passlib-1.5.3.tar.gz -C $CURWD/cache/pip/
tar xzf /media/x7_usb/x7_cache/django-nose-selenium-0.7.3.tar.gz -C $CURWD/cache/pip/
else
#[ -f $CURWD/cache/img/cirros-0.3.0-x86_64-uec.tar.gz ] || wget http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-uec.tar.gz -O $CURWD/cache/img/cirros-0.3.0-x86_64-uec.tar.gz
[ -f $CURWD/cache/pip/pika-0.9.5.tar.gz ] || wget https://github.com/downloads/jkerng/x7/pika-0.9.5.tar.gz -O $CURWD/cache/pip/pika-0.9.5.tar.gz
[ -f $CURWD/cache/pip/passlib-1.5.3.tar.gz ] || wget https://github.com/downloads/jkerng/x7/passlib-1.5.3.tar.gz -O $CURWD/cache/pip/passlib-1.5.3.tar.gz
[ -f $CURWD/cache/pip/django-nose-selenium-0.7.3.tar.gz ] || wget https://github.com/downloads/jkerng/x7/django-nose-selenium-0.7.3.tar.gz -O $CURWD/cache/pip/django-nose-selenium-0.7.3.tar.gz
[ -f $CURWD/cache/pip/pam-0.1.4.tar.gz ] || wget https://github.com/downloads/jkerng/x7/pam-0.1.4.tar.gz -O $CURWD/cache/pip/pam-0.1.4.tar.gz
[ -f $CURWD/cache/pip/pycrypto-2.3.tar.gz ] || wget https://github.com/downloads/jkerng/x7/pycrypto-2.3.tar.gz -O $CURWD/cache/pip/pycrypto-2.3.tar.gz
fi
tar xzf $CURWD/cache/pip/pika-0.9.5.tar.gz -C $CURWD/cache/piptmp/
tar xzf $CURWD/cache/pip/passlib-1.5.3.tar.gz -C $CURWD/cache/piptmp/
tar xzf $CURWD/cache/pip/django-nose-selenium-0.7.3.tar.gz -C $CURWD/cache/piptmp/
tar xzf $CURWD/cache/pip/pam-0.1.4.tar.gz -C $CURWD/cache/piptmp/
tar xzf $CURWD/cache/pip/pycrypto-2.3.tar.gz -C $CURWD/cache/piptmp/
[ -f $CURWD/cache/pip/WebOb-1.0.8.tar.gz ] && tar xzf $CURWD/cache/pip/WebOb-1.0.8.tar.gz -C $CURWD/cache/piptmp/
chmod -R +r $CURWD/cache/piptmp || true
if [ -d $CURWD/cache/piptmp ];then
pippackages=`ls $CURWD/cache/piptmp`
for package in ${pippackages}; do
cd $CURWD/cache/piptmp/$package && sudo python setup.py install
echo "$CURWD/cache/piptmp/$package"
done
fi
cd $CURWD/cache
tar czf $CURWD/cache/pip.tar.gz --exclude "*.tar.gz" piptmp
mkdir -p $CURWD/log/
rm -rf $CURWD/log/*
cp -f $CURWD/localrc_server_template $CURWD/devstack/localrc
cd $CURWD/devstack
sed -i "s|%HOSTADDR%|$HOSTADDR|g" localrc
sed -i "s|%INTERFACE%|$INTERFACE|g" localrc
sed -i "s|%BRDADDR%|$BRDADDR|g" localrc
sudo mkdir /opt/log
sudo chmod 777 /opt/log
grep "add_nova_opt \"logdir=" stack.sh > /dev/null && true
# "0" => found
if [ "$?" -ne "0" ]; then
sed -i "s,add_nova_opt \"verbose=True\",add_nova_opt \"verbose=True\"\nadd_nova_opt \"logdir=/opt/log\",g" stack.sh
fi
trackme ./stack.sh
sudo mkdir -p /opt/stack/nova/instances
cp -f $CURWD/localrc_compute_template $CURWD/localrc_compute
sed -i "s|%SERVERADDR%|$HOSTADDR|g" $CURWD/localrc_compute
sed -i "s|%BRDADDR%|$BRDADDR|g" $CURWD/localrc_compute
#echo "change libvirt config for migrate"
#sudo sed -i /etc/libvirt/libvirtd.conf -e "
# s,#listen_tls = 0,listen_tls = 0,g;
# s,#listen_tcp = 1,listen_tcp = 1,g;
# s,#auth_tcp = \"sasl\",auth_tcp = \"none\",g;
#"
#sudo sed -i /etc/default/libvirt-bin -e "s,libvirtd_opts=\"-d\",libvirtd_opts=\" -d -l\",g"
#sudo /etc/init.d/libvirt-bin restart
cd $CURWD/devstack
source ./openrc admin admin
#python $CURWD/migrate_monitor.py &
cd $CURWD/
tar czf cache/devstack.tar.gz devstack/
#$CURWD/notify_status.py cmd complete
exit 0
| true |
3eb0e25e906133d96896cd156ba16c2781905cbe | Shell | banzera/heroku-buildpack-php | /bin/util/plexcel.sh | UTF-8 | 509 | 3 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Build Path: /app/.heroku/php/
dep_url=http://www.ioplex.com/d/plexcel-2.7.32.x86_64.tar.gz
status "Setting up Plexecl..."
notice "Downloading Plexecl"
curl_retry_on_18 $dep_url -O
notice "Unpacking Plexcel"
tar -xzf plexcel-2.7.32.x86_64.tar.gz
notice "Copying libs for Plexcel"
cp plexcel-2.7.32.x86_64/x86_64/linux/* "$build_dir/.heroku/php"
notice "Import extension phalcon into php.ini"
echo "extension=/app/user/lib/plexcel.so.php-${php-version}.so" >> /app/.heroku/php/etc/php/php.ini | true |
543b149f2dae995850c82546b0ff61c557d9c90f | Shell | rtrouton/jp400-09-2019 | /log_airdrop_pdfs/resources/extension_attribute/List_AirDropped_PDFs_extension_attribute.sh | UTF-8 | 217 | 2.890625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# This extension attribute uses cat to
# check the current contents of the
# specified output file.
output_file=/var/log/airdropped_pdfs.log
result=$(cat "$output_file")
echo "<result>$result</result>" | true |
cc457afcc3c9b663d00087d19bd60b8f1037f018 | Shell | tomklino/scripts | /unlimited_area_poller.sh | UTF-8 | 582 | 3.265625 | 3 | [] | no_license | #!/bin/bash
base_url="https://unlimited.net.il";
index_page="/page/deployment_telaviv";
persistent_sum_file="${HOME}/.unlimited.sums"
tmp_sum_file="/tmp/unlimited.sums"
rm ${tmp_sum_file} > /dev/null
curl ${base_url}${index_page}\
| grep -Eo "src=\"/contentfiles[^\.]+\.png\"" | sed -r "s/src=\"(\/contentfiles[^\.]+\.png)\"/\1/"\
| while read img; do
curl ${base_url}${img} | sha1sum >> ${tmp_sum_file};
done
if [ -f ${persistent_sum_file} ]; then
diff -q ${persistent_sum_file} ${tmp_sum_file}
fi
cp ${tmp_sum_file} ${persistent_sum_file}
| true |
8d2f4ba226c77021a0cff7b87c6a45b3deaec261 | Shell | petronny/aur3-mirror | /catalyst-old/PKGBUILD | UTF-8 | 1,865 | 2.765625 | 3 | [] | no_license | _kernver=2.6.29-ARCH
pkgname=catalyst-old
pkgver=9.3
pkgrel=2
pkgdesc="Old proprietary AMD/ATI kernel drivers for Radeon brand cards. Support for R300/R700 chipsets."
arch=('i686' 'x86_64')
url="http://www.ati.amd.com"
license=('custom')
depends=("catalyst-utils-old>=${pkgver}" "kernel26>=2.6.29" "kernel26<2.6.30")
replaces=('ati-fglrx' 'fglrx') # Yay rebranding
conflicts=('catalyst')
provides=('catalyst')
install=catalyst.install
source=(http://www2.ati.com/drivers/linux/ati-driver-installer-${pkgver/./-}-x86.x86_64.run \
flush_tlb_page.patch 2.6.29.diff)
md5sums=('3875441c1e6f9bd7e0c2d006eb0708b3'
'e743aab05f6cb815b2ebc5cf8ea2886a'
'3a5896c62b6a37f0f1f752017d95762d')
build() {
/bin/sh ./ati-driver-installer-${pkgver/./-}-x86.x86_64.run --extract archive_files
# Fix flush_tlb_page
patch -Np0 -i ${srcdir}/flush_tlb_page.patch || return 1
# Fix 2.6.29 kernel
cd ${srcdir}/archive_files/common
patch -p0 <${srcdir}/2.6.29.diff || return 1
if [ "${CARCH}" = "x86_64" ]; then
BUILDARCH=x86_64
_archdir=x86_64
fi
if [ "${CARCH}" = "i686" ]; then
BUILDARCH=i386
_archdir=x86
fi
cd "${srcdir}/archive_files/common/lib/modules/fglrx/build_mod"
cp "${srcdir}/archive_files/arch/${_archdir}/lib/modules/fglrx/build_mod/libfglrx_ip.a.GCC4" . || return 1
cp 2.6.x/Makefile . || return 1
make -C /lib/modules/${_kernver}/build SUBDIRS="`pwd`" ARCH=${BUILDARCH} modules || return 1
install -m755 -d "${pkgdir}/lib/modules/${_kernver}/video/"
install -m644 fglrx.ko "${pkgdir}/lib/modules/${_kernver}/video/" || return 1
install -m755 -d "${pkgdir}/usr/share/licenses/${pkgname}"
# License
install -m644 "${srcdir}/archive_files/ATI_LICENSE.TXT" "${pkgdir}/usr/share/licenses/${pkgname}/" || return 1
sed -i -e "s/KERNEL_VERSION=.*/KERNEL_VERSION=${_kernver}/" $startdir/*.install
} | true |
fd17c6c5b520e2bb971f9a83e5d1a93f6586b53e | Shell | Verumex/clamav-microservice | /entrypoint.sh | UTF-8 | 519 | 2.578125 | 3 | [
"MIT"
] | permissive | #!/bin/sh
# TODO: Re-enable this 'break on first error' line. The virus signature mirrors
# sometimes return non-existent signature files due to... well.. it's free and
# probably misconfigured somehow.
# set -e
echo "Starting virus DB auto updater..."
freshclam -d
echo "Starting ClamAV daemon..."
clamd
echo "Loading unofficial ClamAV community signatures..."
clamav-unofficial-sigs.sh --upgrade
clamav-unofficial-sigs.sh --force
echo "Starting API server..."
bundle exec rackup -E production -o 0.0.0.0 -p 3000
| true |
d0811034f83aab0ceefed95399b6beac3139dac2 | Shell | eBLDR/MasterNotes_BASH | /Loops/until.sh | UTF-8 | 139 | 3.375 | 3 | [] | no_license | #!/bin/bash
# `until` loops until loop returns a zero exit status
num=10
until [ ${num} -eq 0 ]; do
echo ${num}
num=$((num - 1))
done
| true |
bb8d2fd9074b4350cc1e09d23739ca7e19d51065 | Shell | contd/bash-utils | /libs/check_uninstalled.sh | UTF-8 | 1,214 | 3.46875 | 3 | [] | no_license | #!/usr/bin/env bash
#
# check_uninstalled
#
function check_uninstalled() {
__BINS_TO_CHECK=(awk colordiff convert curl docker ffmpeg gifsicle gpg2 HandBrakeCLI jq nmap openssl pandoc pdftohtml rename rsync unzip wget xsel youtube-dl)
__NPMS_TO_CHECK=(tidy-markdown thumbsup webshot)
echo "Checking for bins needed..."
for B in ${__BINS_TO_CHECK[@]}; do
#echo $B
__BIN_PATH=$(which $B 2>/dev/null)
if [ "${__BIN_PATH}" == "" ]; then
ansi --red --no-newline "MISSING: "
ansi --yellow "$B"
ansi --blue --no-newline " (To install, run: "
ansi --green --no-newline "sudo apt install $B"
ansi --blue " )"
else
ansi --green --no-newline "$B"
ansi --blue " - is installed!"
fi
done
echo ""
echo "Checking for global npms needed..."
for N in ${__NPMS_TO_CHECK[@]}; do
#echo $B
__NPM_PATH=$(which $N 2>/dev/null)
if [ "${__NPM_PATH}" == "" ]; then
ansi --red --no-newline "MISSING: "
ansi --yellow "$N"
ansi --blue --no-newline " (To install, run: "
ansi --green --no-newline "sudo npm install -g $N"
ansi --blue " )"
else
ansi --green --no-newline "npm: $N"
ansi --blue " - is installed!"
fi
done
}
if [[ $_ == $0 ]]; then
check_uninstalled
fi | true |
ce6462e29756b9ac1a38ba3dfdfddf608fd69ca3 | Shell | random220/gcode | /bin/q0 | UTF-8 | 363 | 3.015625 | 3 | [] | no_license | #!/bin/bash
d=~/sb/omwork
if [[ ! -d $d ]]; then
mkdir -p ~/sb
cd ~/sb
git clone git@github.com:om-mandal/work.git omwork
fi
mkdir -p ~/sb/omwork/twits
cd ~/sb/omwork/twits
git pull
echo >>.twt.txt
date >>.twt.txt
cat - | sed 's/^/ /' >>.twt.txt
cat .twt.txt tw.txt >.twtt.txt
rm -f .twt.txt
mv .twtt.txt tw.txt
git add tw.txt
git commit -m "$(date)"
git push
| true |
18ab15f6c22d68b01983b71de03305e56e0ea4ad | Shell | FabianWe/mlmmj-docker | /docker_entrypoint.sh | UTF-8 | 1,364 | 3.8125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# ignores empty results
shopt -s nullglob
# we want to use the virtual and transport files, by default they should be
# in /var/spool/mlmmj
# But we also want to share the virtual and transport files with postfix
# and therefore store them in another directory /mlmmj_conf
# to do so we create a symlink
# if the files don't exist we create them
# TODO we should call newaliases in this case: postfix will send out warnings otherwise
# but since they're empty it's not too bad...
# if they exist the .db file should already be there
TRANSPORT="/mlmmj_conf/transport"
VIRTUAL="/mlmmj_conf/virtual"
if [ ! -f "$TRANSPORT" ]; then
touch "$TRANSPORT"
fi
if [ ! -f "$VIRTUAL" ]; then
touch "$VIRTUAL"
fi
if [ -z "$POSTFIX_HOST" ]; then
POSTFIX_HOST="$(/sbin/ip route|awk '/default/ { print $3 }')"
printf "POSTFIX_HOST not specified, assuming that postfix is reachable on %s\n" "$POSTFIX_HOST"
fi
ln -sf /mlmmj_conf/virtual /var/spool/mlmmj/virtual && \
ln -sf /mlmmj_conf/transport /var/spool/mlmmj/transport
# we have to change the relayhost in each mailinglist s.t. mlmmj connects to
# the postfix in the postfix container
(IFS='
'
for listdir in `find /var/spool/mlmmj/ -name control -type d`; do
printf "$POSTFIX_HOST\n" > "$listdir/relayhost"
done)
# start cron
cron
# start the server and wait for incoming mails...
exec "$@"
| true |
8fecc16497a57d292e3ff8de776e4733d7df6e6d | Shell | HumanCellAtlas/upload-service | /scripts/authorize_aws_deploy.sh | UTF-8 | 1,368 | 3.6875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
source "$(dirname $0)/../config/environment"
set -euo pipefail
if [[ $# != 2 ]]; then
echo "Given an IAM principal intended to be used by a test/CI/CD pipeline,"
echo "this script grants the principal the AWS IAM permissions necessary to"
echo "test and deploy the application. Run this script using privileged"
echo "(IAM write access) IAM credentials."
echo "Usage: $(basename $0) iam-principal-type iam-principal-name"
echo "Example: $(basename $0) user hca-test"
exit 1
fi
export iam_principal_type=$1 iam_principal_name=$2
export region_name=$(aws configure get region)
export account_id=$(aws sts get-caller-identity | jq -r .Account)
envsubst_vars='$BUCKET_NAME_PREFIX $TERRAFORM_STATE_BUCKET $region_name $account_id'
for policy_json in $(dirname $0)/../config/iam-policy-templates/ci-cd-*.json ; do
policy_name=dcp-upload-`basename "${policy_json}"|sed 's/.json//'`
echo "Applying policy ${policy_name} to ${iam_principal_type} ${iam_principal_name}..."
aws iam put-${iam_principal_type}-policy \
--${iam_principal_type}-name ${iam_principal_name} \
--policy-name "${policy_name}" \
--policy-document file://<(cat "$policy_json" | \
envsubst "$envsubst_vars" | \
jq -c 'del(.Statement[].Sid)')
done
| true |
199d0c877c48baf1f9d658a456813e8d851629c7 | Shell | bazelbuild/rules_go | /tests/bcr/go_version_test.sh | UTF-8 | 905 | 3.03125 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
# --- begin runfiles.bash initialization v3 ---
# Copy-pasted from the Bazel Bash runfiles library v3.
set -uo pipefail; set +e; f=bazel_tools/tools/bash/runfiles/runfiles.bash
source "${RUNFILES_DIR:-/dev/null}/$f" 2>/dev/null || \
source "$(grep -sm1 "^$f " "${RUNFILES_MANIFEST_FILE:-/dev/null}" | cut -f2- -d' ')" 2>/dev/null || \
source "$0.runfiles/$f" 2>/dev/null || \
source "$(grep -sm1 "^$f " "$0.runfiles_manifest" | cut -f2- -d' ')" 2>/dev/null || \
source "$(grep -sm1 "^$f " "$0.exe.runfiles_manifest" | cut -f2- -d' ')" 2>/dev/null || \
{ echo>&2 "ERROR: cannot find $f"; exit 1; }; f=; set -e
# --- end runfiles.bash initialization v3 ---
GO_TOOL=$(rlocation "$GO_TOOL_RLOCATION")
# Set runfiles variables for subprocess.
runfiles_export_envvars
# Simulate a bazel run environment.
export BUILD_WORKING_DIRECTORY=$(pwd)
[[ "$("$GO_TOOL" version)" =~ ^go ]]
| true |
c259b7b773fd732ce6963489c74d7d5b04c39b89 | Shell | kikitux/jessie-go16 | /scripts/provision.sh | UTF-8 | 772 | 3.328125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
which curl git bzr 2>/dev/null || {
sudo apt-get update
sudo apt-get install -y curl git bzr
}
[ -f /vagrant/tmp/go1.6.linux-amd64.tar.gz ] || {
mkdir -p /vagrant/tmp
pushd /vagrant/tmp
curl -o go1.6.linux-amd64.tar.gz https://storage.googleapis.com/golang/go1.6.linux-amd64.tar.gz
popd
}
[ -d /usr/local/go ] || sudo tar -C /usr/local -xzf /vagrant/tmp/go1.6.linux-amd64.tar.gz
grep 'export GOROOT=' .bash_profile || ( echo export GOROOT=/usr/local/go | tee -a .bash_profile )
grep 'export GOPATH=' .bash_profile || ( echo export GOPATH=/vagrant/go | tee -a .bash_profile )
source .bash_profile
grep 'export PATH=' .bash_profile || ( echo export PATH=$PATH:$GOROOT/bin:$GOPATH/bin | tee -a .bash_profile)
source .bash_profile
mkdir -p $GOPATH
| true |
932d78fc9a60792aa30c0d9eb43381887b019b28 | Shell | woggioni/x-toolchain | /packages/curl/PKGBUILD | UTF-8 | 2,305 | 2.875 | 3 | [] | no_license | # Contributor: ant32 <antreimer@gmail.com>
pkgname="${_target}-curl"
_basename="${pkgname#${_target}-}"
pkgver=7.78.0
pkgrel=1
pkgdesc="An URL retrival utility and library (${_target})"
arch=('any')
url="http://curl.haxx.se"
license=("MIT")
if [[ ${_os} == *"mingw"* ]]
then
depends=("${_target}-gcc"
"${_target}-libidn2"
"${_target}-libssh2"
"${_target}-openssl"
"${_target}-zlib")
_idn_configure_flag="--with-libidn2"
else
depends=("${_target}-gcc"
"${_target}-libssh2"
"${_target}-openssl"
"${_target}-zlib")
_idn_configure_flag=""
fi
makedepends=("${_target}-configure")
options=('staticlibs' '!buildflags')
provides=("${_target}-libcurl.so")
source=("https://curl.haxx.se/download/$_basename-$pkgver.tar.gz"{,.asc})
sha512sums=('3d74343e06dd3d699f4c883758775554956f7f27de470b71f4af0d7f56ff5e4d7c534ab958c8926fae69cd0ded90c173ac3d5a6ad5518d88c2f39f31f8bba2f3'
'SKIP')
validpgpkeys=('27EDEAF22F3ABCEB50DB9A125CC908FDB71E12C2') # Daniel Stenberg
build() {
cd "${srcdir}"/${pkgname#${_target}-}-${pkgver}
configure_args="--with-openssl \
--disable-ldap \
--disable-ldaps \
--disable-manual \
--enable-ipv6 \
--enable-threaded-resolver \
${_idn_configure_flag} \
--with-libssh2 \
--enable-versioned-symbols \
--with-gssapi \
--without-ca-bundle \
--without-random"
mkdir -p build-${_target}-static && pushd build-${_target}-static
${_target}-configure $configure_args \
--enable-static --disable-shared ..
make
popd
mkdir -p build-${_target} && pushd build-${_target}
${_target}-configure $configure_args \
--disable-static --enable-shared ..
make
popd
}
package() {
cd "${srcdir}/${pkgname#${_target}-}-${pkgver}/build-${_target}-static"
make DESTDIR="${pkgdir}" install
cd "${srcdir}/${pkgname#${_target}-}-${pkgver}/build-${_target}"
make DESTDIR="${pkgdir}" install
sed -i "s/-lidn2/-lidn2 -lunistring/g" "$pkgdir/${_prefix}/${_target}/sysroot/usr/lib/pkgconfig/libcurl.pc"
}
strip() {
${_target}-strip $@
}
export -f strip | true |
6c46fd8d22b4f9f0390fa0ae7621fadaff63321e | Shell | deanflyer/vpc | /vpc.sh | UTF-8 | 15,643 | 3.734375 | 4 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | # Shell script for creating AWS resources
# Resources will be created in default VPC (run AWS configure list to see what your default region is set to) unless specififed otherwise
# Dependencies - requires jq JSON parsing utility to be installed and AWS CLI
# General variables
DEFAULT_VPC="10.0.0.0/16"
DEFAULT_VPC_TAG="test-VPC"
DEFAULT_AVAILABILITY_ZONE_1=""
DEFAULT_AVAILABILITY_ZONE_2=""
DEFAULT_SUBNET1="10.0.1.0/24"
DEFAULT_SUBNET1_TAG="public-subnet"
DEFAULT_SUBNET2="10.0.2.0/24"
DEFAULT_SUBNET2_TAG="private-subnet"
DEFAULT_IGW="IGW1"
DEFAULT_PUBLIC_RT="public-route-table"
DEFAULT_PRIVATE_RT="private-route-table"
DEFAULT_KEYPAIR="test-keypair"
DEFAULT_SECURITY_GROUP="TestSecurityGroup"
DEFAULT_SECURITY_GROUP_DESCRIPTION="Test Security Group (autocreated)"
DEFAULT_SECURITY_GROUP_TAG_NAME="Test"
DEFAULT_INBOUND_RULE_PROTOCOL="tcp"
DEFAULT_INBOUND_RULE_PORT="22"
DEFAULT_INBOUND_RULE_CIDR="0.0.0.0/0"
DEFAULT_INSTANCE_TYPE="t2.micro"
INSTANCE_ID=""
EC2_PUBLIC_IP_ADDRESS=""
# Default output filenames
VPCOUTPUT="vpc.json"
SN1OUTPUT="subnet1.json"
SN2OUTPUT="subnet2.json"
IGWOUTPUT="igw.json"
PUBLIC_RT_OUTPUT="public-rt.json"
PRIVATE_RT_OUTPUT="private-rt.json"
INSTANCEOUTPUT="instances.json"
echo "AWS CLI Utility"
# Select AWS region
CURRENT_REGION=$(aws configure get region)
echo -n "Enter AWS region, or press enter for default region["$CURRENT_REGION"]: "
read SELECTEDREGION
if [ -n "$SELECTEDREGION" ]
then
CURRENT_REGION=$SELECTEDREGION
echo "AWS region has been changed to:" $CURRENT_REGION
fi
# Create a VPC
# When you create a VPC, it automatically has a main route table.
# We can use this or just create our own route tables and associate them with out subnets later on
echo -n "Enter CIDR-block you want to create ["$DEFAULT_VPC"]: "
read CIDRBLOCK
if [ -z "$CIDRBLOCK" ]
then
CIDRBLOCK=$DEFAULT_VPC
fi
echo -n "Enter name of the VPC ["$DEFAULT_VPC_TAG"]: "
read VPCTAGNAME
if [ -z "$VPCTAGNAME" ]
then
VPCTAGNAME=$DEFAULT_VPC_TAG
aws ec2 create-vpc --region $CURRENT_REGION --cidr-block $CIDRBLOCK --tag-specifications 'ResourceType=vpc, Tags=[{Key=Name,Value='$VPCTAGNAME'}]' --output json > $VPCOUTPUT
fi
if [ $? -eq 0 ]
then
VPCID=$(cat $VPCOUTPUT | jq -r ".Vpc.VpcId")
echo "VPC creation successful. [SUCCESS]"
echo "VPC ID:" $VPCID
else
echo "VPC creation failed. AWS CLI return code is:" $? "[FAIL]"
exit
fi
# Create 2 subnets within our VPC in separate AZs
# Retrieve Region AZs and associate first 2 available AZs with our 2 subnets respectively (unless user specifies otherwise)
echo
echo "CREATE SUBNETS"
echo "AZs in "$CURRENT_REGION" are: "
aws ec2 describe-availability-zones --region $CURRENT_REGION | jq -r .AvailabilityZones[].ZoneName
DEFAULT_AVAILABILITY_ZONE_1=$(aws ec2 describe-availability-zones --region $CURRENT_REGION | jq -r .AvailabilityZones[0].ZoneName)
DEFAULT_AVAILABILITY_ZONE_2=$(aws ec2 describe-availability-zones --region $CURRENT_REGION | jq -r .AvailabilityZones[1].ZoneName)
echo -n "First subnet IP address ["$DEFAULT_SUBNET1":]"
read SUBNET1
if [ -z "$SUBNET1" ]
then
SUBNET1=$DEFAULT_SUBNET1
fi
echo -n "First subnet name ["$DEFAULT_SUBNET1_TAG"]: "
read SN1TAGNAME
if [ -z "$SN1TAGNAME" ]
then
SN1TAGNAME=$DEFAULT_SUBNET1_TAG
fi
echo -n "First subnet Availability Zone ["$DEFAULT_AVAILABILITY_ZONE_1"]: "
read SN1AZ
if [ -z "$SN1AZ" ]
then
SN1AZ=$DEFAULT_AVAILABILITY_ZONE_1
fi
echo -n "Second subnet IP address ["$DEFAULT_SUBNET2"]: "
read SUBNET2
if [ -z "$SUBNET2" ]
then
SUBNET2=$DEFAULT_SUBNET2
fi
echo -n "Second subnet name ["$DEFAULT_SUBNET2_TAG"]: "
read SN2TAGNAME
if [ -z "$SN2TAGNAME" ]
then
SN2TAGNAME=$DEFAULT_SUBNET2_TAG
fi
echo -n "Second subnet Availability Zone ["$DEFAULT_AVAILABILITY_ZONE_2"]: "
read SN2AZ
if [ -z "$SN2AZ" ]
then
SN2AZ=$DEFAULT_AVAILABILITY_ZONE_2
fi
aws ec2 create-subnet --region $CURRENT_REGION --availability-zone $SN1AZ --vpc-id $VPCID --cidr-block $SUBNET1 --tag-specifications 'ResourceType=subnet, Tags=[{Key=Name,Value='$SN1TAGNAME'}]' > $SN1OUTPUT
if [ $? -eq 0 ]
then
SUBNET1ID=$(cat $SN1OUTPUT | jq -r ".Subnet.SubnetId")
echo "Subnet 1 created. Subnet ID: "$SUBNET1ID "[SUCCESS]"
else
echo "VPC creation failed. See" $SN1OUTPUT "for further details. AWS CLI return code is:" $? "[FAIL]"
exit
fi
aws ec2 create-subnet --region $CURRENT_REGION --availability-zone $SN2AZ --vpc-id $VPCID --cidr-block $SUBNET2 --tag-specifications 'ResourceType=subnet,Tags=[{Key=Name,Value='$SN2TAGNAME'}]' --output json > $SN2OUTPUT
if [ $? -eq 0 ]
then
SUBNET2ID=$(cat $SN2OUTPUT | jq -r ".Subnet.SubnetId")
echo "Subnet 2 created. Subnet ID: "$SUBNET2ID "[SUCCESS]"
else
echo "VPC creation failed. See "$SN2OUTPUT" for further details. AWS CLI return code is:" $? "[FAIL]"
exit
fi
# Create Internet Gateway
echo
echo "CREATE INTERNET GATEWAY"
echo -n "Internet Gateway name ["$DEFAULT_IGW"]: "
read IGWNAME
if [ -z "$IGWNAME" ]
then
IGWNAME=$DEFAULT_IGW
fi
aws ec2 create-internet-gateway --region $CURRENT_REGION --tag-specifications 'ResourceType=internet-gateway, Tags=[{Key=Name,Value='$IGWNAME'}]' --output json > $IGWOUTPUT
if [ $? -eq 0 ]
then
IGWID=$(cat $IGWOUTPUT | jq -r ".InternetGateway.InternetGatewayId")
echo "Internet Gateway created [SUCCESS]"
echo "Internet gateway ID: "$IGWID
else
echo "Internet Gateway creation failed. See" $IGWOUTPUT "for further details. AWS CLI return code is:" $? "[FAIL]"
exit
fi
# Attach Internet Gateway to VPC
echo
echo "ATTACH INTERNET GATEWAY TO VPC"
aws ec2 attach-internet-gateway --region $CURRENT_REGION --vpc-id $VPCID --internet-gateway-id $IGWID
if [ $? -eq 0 ]
then
echo "Internet gateway attached successfully. [SUCCESS]"
else
echo "Internet gateway not attached AWS CLI return code is:" $? "[FAIL]"
exit
fi
# Create public and private routing table for our subnets
# Note - As we are explicitly associating our subnets with our created rout tables the main VPC route table is redundant.
echo
echo "CREATE PUBLIC SUBNET ROUTE TABLE"
aws ec2 create-route-table --region $CURRENT_REGION --vpc-id $VPCID --tag-specifications 'ResourceType=route-table,Tags=[{Key=Name,Value='$DEFAULT_PUBLIC_RT'}]' > $PUBLIC_RT_OUTPUT
if [ $? -eq 0 ]
then
echo "Public subnet route table crated successfully. [SUCCESS]"
PUBLIC_SUBNET_RT=$(jq -r .[].RouteTableId public-rt.json)
echo "Public subnet route table ID: "$PUBLIC_SUBNET_RT
else
echo "Public subnet route table creation failed. AWS return code is: "$? "[FAIL]"
exit
fi
# Create private routing table
echo
echo "CREATE PRIVATE SUBNET ROUTE TABLE"
aws ec2 create-route-table --region $CURRENT_REGION --vpc-id $VPCID --tag-specifications 'ResourceType=route-table,Tags=[{Key=Name,Value='$DEFAULT_PRIVATE_RT'}]' > $PRIVATE_RT_OUTPUT
if [ $? -eq 0 ]
then
echo "Private subnet route table crated successfully. [SUCCESS]"
PRIVATE_SUBNET_RT=$(jq -r .[].RouteTableId private-rt.json)
echo "Private subnet route table ID: "$PRIVATE_SUBNET_RT
else
echo "Private subnet route table creation failed. AWS return code is: "$? "[FAIL]"
exit
fi
# Attach internet gateway to public subnet route table
echo
echo "ADD INTERNET GATEWAY TO PUBLIC SUBNET ROUTE TABLE"
aws ec2 create-route --region $CURRENT_REGION --route-table-id $PUBLIC_SUBNET_RT --destination-cidr-block 0.0.0.0/0 --gateway-id $IGWID > /dev/null
if [ $? -eq 0 ]
then
echo "Dest:0.0.0.0/0 -" $IGWID "associated with main Route table" $MAIN_RT_TABLE "[SUCCESS]"
else
echo "Route table could not be associated with main Route table [FAIL]"
echo "AWS CLI return code is:" $?
exit
fi
# Associate our public and private subnets explicitly with our public and private route tables
echo
echo "ASSOCIATE PUBLIC SUBNET WITH PUBLIC ROUTE TABLE"
aws ec2 associate-route-table --region $CURRENT_REGION --subnet-id $SUBNET1ID --route-table-id $PUBLIC_SUBNET_RT > /dev/null
if [ $? -eq 0 ]
then
echo $SUBNET1ID "explicitly associated with "$PUBLIC_SUBNET_RT "[SUCCESS]"
else
echo $SUBNET1ID "could not be associated with "$PUBLIC_SUBNET_RT "[FAIL]"
echo "AWS CLI return code is:" $?
exit
fi
echo
echo "ASSOCIATE PRIVATE SUBNET WITH PRIVATE ROUTE TABLE"
aws ec2 associate-route-table --region $CURRENT_REGION --subnet-id $SUBNET2ID --route-table-id $PRIVATE_SUBNET_RT > /dev/null
if [ $? -eq 0 ]
then
echo $SUBNET2ID "explicitly associated with "$PRIVATE_SUBNET_RT "[SUCCESS]"
else
echo $SUBNET2ID "could not be associated with "$PRIVATE_SUBNET_RT "[FAIL]"
echo "AWS CLI return code is:" $?
exit
fi
# Make subnet1 issue public-IP addresses to EC2 instances on launch
echo
echo "AUTO-ASSIGN PUBLIC IPV4"
aws ec2 modify-subnet-attribute --region $CURRENT_REGION --subnet-id $SUBNET1ID --map-public-ip-on-launch
if [ $? -eq 0 ]
then
echo "Enabled auto-assign public IPv4 address on launch on "$SUBNET1ID "[SUCCESS]"
else
echo "auto-assign public IPv4 address on launch on "$SUBNET1ID "[FAILED]"
echo "AWS CLI return code is:" $?
exit
fi
# Create keypair
echo
echo "CREATE KEY-PAIR"
echo -n "Enter key-pair name ["$DEFAULT_KEYPAIR"]: "
read KEYPAIR
if [ -z "$KEYPAIR" ]
then
KEYPAIR=$DEFAULT_KEYPAIR
fi
aws ec2 create-key-pair --region $CURRENT_REGION --key-name $KEYPAIR --query 'KeyMaterial' --output text > ./$KEYPAIR.pem
chmod 600 $KEYPAIR.pem
if [ $? -eq 0 ]
then
echo "keypair generated. File saved locally as "$KEYPAIR".pem [SUCCESS]"
echo -n "KeyFingerprint: "
aws ec2 describe-key-pairs --region $CURRENT_REGION --key-name $KEYPAIR | jq -r ".KeyPairs[].KeyFingerprint"
else
echo "keypair generation [FAIL]"
echo "AWS CLI return code is:" $?
exit
fi
# Create Security Group
echo
echo "CREATE SECURITY GROUP"
echo -n "Enter security group name ["$DEFAULT_SECURITY_GROUP"]: "
read SECURITY_GROUP
if [ -z "$SECURITY_GROUP" ]
then
SECURITY_GROUP=$DEFAULT_SECURITY_GROUP
fi
echo -n "Enter security group description ["$DEFAULT_SECURITY_GROUP_DESCRIPTION"]: "
read SECURITY_GROUP_DESCRIPTION
if [ -z "$SECURITY_GROUP_DESCRIPTION" ]
then
SECURITY_GROUP_DESCRIPTION=$DEFAULT_SECURITY_GROUP_DESCRIPTION
fi
echo -n "Enter security group tag name ["$DEFAULT_SECURITY_GROUP_TAG_NAME"]: "
read SECURITY_GROUP_TAG_NAME
if [ -z "$SECURITY_GROUP_TAG_NAME" ]
then
SECURITY_GROUP_TAG_NAME=$DEFAULT_SECURITY_GROUP_TAG_NAME
fi
aws ec2 create-security-group --region $CURRENT_REGION --description "$SECURITY_GROUP_DESCRIPTION" --group-name $SECURITY_GROUP --vpc-id $VPCID --tag-specifications 'ResourceType=security-group,Tags=[{Key=Name,Value='$SECURITY_GROUP_TAG_NAME'}]' > /dev/null
if [ $? -eq 0 ]
then
SECURITY_GROUP_ID=$(aws ec2 describe-security-groups --region $CURRENT_REGION --filters Name=group-name,Values=$SECURITY_GROUP | jq -r .SecurityGroups[].GroupId)
echo "Security group created [SUCCESS]"
echo -n "Security group ID: "
echo $SECURITY_GROUP_ID
else
echo "Security group creation [FAIL]"
echo "AWS CLI return code is:" $?
exit
fi
# do while loop for adding multiple security rules. Will run at least once and test for exit at bottom of loop.
while true; do
echo
echo "ADD AN INBOUND RULE TO SECURITY GROUP"
echo -n "Protocol (tcp,udp,icmp) ["$DEFAULT_INBOUND_RULE_PROTOCOL"]: "
read INBOUND_RULE_PROTOCOL
if [ -z "$INBOUND_RULE_PROTOCOL" ]
then
INBOUND_RULE_PROTOCOL=$DEFAULT_INBOUND_RULE_PROTOCOL
fi
echo -n "Port no. ["$DEFAULT_INBOUND_RULE_PORT"]: "
read INBOUND_RULE_PORT
if [ -z "$INBOUND_RULE_PORT" ]
then
INBOUND_RULE_PORT=$DEFAULT_INBOUND_RULE_PORT
fi
echo -n "CIDR block ["$DEFAULT_INBOUND_RULE_CIDR"]: "
read INBOUND_RULE_CIDR_BLOCK
if [ -z "INBOUND_RULE_CIDR_BLOCK" ]
then
INBOUND_RULE_CIDR_BLOCK=$DEFAULT_INBOUND_RULE_CIDR
fi
aws ec2 authorize-security-group-ingress --region $CURRENT_REGION --group-id $SECURITY_GROUP_ID --protocol $INBOUND_RULE_PROTOCOL --port $INBOUND_RULE_PORT --cidr $DEFAULT_INBOUND_RULE_CIDR
if [ $? -eq 0 ]
then
echo "Inbound rule" $INBOUND_RULE_CIDR">" $INBOUND_RULE_PORT "added to security group: "$SECURITY_GROUP_ID" [SUCCESS]"
else
echo "Inbound rule addition [FAIL]"
echo "AWS CLI return code is:" $?
exit
fi
read -p "'q' to quit security rule creation or press enter to add another security rule:" QUIT_TEST
[[ $QUIT_TEST != "q" ]] || break
done
# Get current (latest) version of Amazon 2 Linux AMI for x86_64 (query by wilcard name, sort by reverse date and display latest version only) and launch EC2 instance/s
# For arm64 AMI use "amzn2-ami-hvm-2.0.????????.?-arm64-gp2"
echo
echo "RETRIEVE LATEST x86 AMAZON LINUX 2 AMI"
LATEST_AL2_AMI=$(aws ec2 describe-images --region $CURRENT_REGION --owners amazon --filters "Name=name,Values=amzn2-ami-hvm-2.0.????????.?-x86_64-gp2" "Name=state,Values=available" --query "reverse(sort_by(Images, &CreationDate))[:1].ImageId" --output text)
echo "Latest version of AMI is: "$LATEST_AL2_AMI
echo -n "Enter EC2 Instance Type required ["$DEFAULT_INSTANCE_TYPE"]: "
read INSTANCE_TYPE
if [ -z "$INSTANCE_TYPE" ]
then
INSTANCE_TYPE=$DEFAULT_INSTANCE_TYPE
fi
echo "Launching "$INSTANCE_TYPE" EC2 INSTANCE IN PUBLIC SUBNET: "$SUBNET1ID
# Launch EC2 instance and send output to $INSTANCEOUTPUT filename
aws ec2 run-instances --region $CURRENT_REGION --image-id $LATEST_AL2_AMI --count 1 --instance-type $INSTANCE_TYPE --key-name $KEYPAIR --security-group-ids $SECURITY_GROUP_ID --subnet-id $SUBNET1ID --output json > $INSTANCEOUTPUT
INSTANCE_ID=$(jq -r '.Instances[].InstanceId' $INSTANCEOUTPUT)
if [ $? -eq 0 ]
then
echo "EC2 Instance launched [SUCCESS]"
echo "Instance ID: "$INSTANCE_ID
else
echo "Instance(s) not launched [FAIL]"
echo "AWS CLI return code is:" $?
exit
fi
# Query EC2 instance with current $INSTANCE_ID and retrieve Public IP address
EC2_PUBLIC_IP_ADDRESS=$(aws ec2 describe-instances --region $CURRENT_REGION --filters Name=instance-id,Values=$INSTANCE_ID --query Reservations[*].Instances[*].PublicIpAddress --output text)
echo "EC2 Public IP address: "$EC2_PUBLIC_IP_ADDRESS
echo
# Clean up and delete created AWS resources
echo "Terminate created AWS resources?
Do not proceed unless you wish to terminate the AWS resources you just created."
echo "Note - This script will fail if you run it immediately after resource creation due to propagation delays"
read -eiN -p "Terminate all AWS created resources? [y/N]" YN
if [ $YN = "Y" ] || [ $YN = "y" ]
then
echo "Terminating EC2 instance: "$INSTANCE_ID
aws ec2 terminate-instances --region $CURRENT_REGION --instance-ids $INSTANCE_ID > /dev/null
echo "Pausing for 90 seconds for propagation delays."
sleep 90
echo "Deleting security group: "$SECURITY_GROUP_ID
aws ec2 delete-security-group --region $CURRENT_REGION --group-id $SECURITY_GROUP_ID
echo "Deleting subnet: "$SUBNET1ID
aws ec2 delete-subnet --region $CURRENT_REGION --subnet-id $SUBNET1ID
echo "Deleting subnet: "$SUBNET2ID
aws ec2 delete-subnet --region $CURRENT_REGION --subnet-id $SUBNET2ID
echo "Detaching internet gateway: "$IGWID
aws ec2 detach-internet-gateway --region $CURRENT_REGION --internet-gateway-id $IGWID --vpc-id $VPCID
echo "Deleting internet gateway: "$IGWID
aws ec2 delete-internet-gateway --region $CURRENT_REGION --internet-gateway-id $IGWID
echo "Deleting public route table: "$PUBLIC_SUBNET_RT
aws ec2 delete-route-table --region $CURRENT_REGION --route-table-id $PUBLIC_SUBNET_RT
echo "Deleting private route table: "$PRIVATE_SUBNET_RT
aws ec2 delete-route-table --region $CURRENT_REGION --route-table-id $PRIVATE_SUBNET_RT
echo "Deleting VPC: "$VPCID
aws ec2 delete-vpc --region $CURRENT_REGION --vpc-id $VPCID
echo "All AWS resources deleted"
else
echo "Script complete. AWS resources retained."
fi | true |
2f5331e2506a689356a93b89ead371ead82876b0 | Shell | devinzou/network-feeds | /shadowsocks-tools/files/etc/init.d/ss-redir.sh | UTF-8 | 7,786 | 3.046875 | 3 | [] | no_license | #!/bin/sh /etc/rc.common
#
# Copyright (C) 2014 Justin Liu <rssnsj@gmail.com>
# https://github.com/rssnsj/network-feeds
#
START=96
#
# Data source of /etc/gfwlist/china-banned:
# https://github.com/zhiyi7/ddwrt/blob/master/jffs/vpn/dnsmasq-gfw.txt
# http://code.google.com/p/autoproxy-gfwlist/
#
SS_REDIR_PORT=7070
SS_REDIR_PIDFILE=/var/run/ss-redir-go.pid
DNSMASQ_PORT=7053
DNSMASQ_PIDFILE=/var/run/dnsmasq-go.pid
PDNSD_LOCAL_PORT=7453
start()
{
local ss_enabled=`uci get shadowsocks.@shadowsocks[0].enabled 2>/dev/null`
local ss_server_addr=`uci get shadowsocks.@shadowsocks[0].server`
local ss_server_port=`uci get shadowsocks.@shadowsocks[0].server_port`
local ss_password=`uci get shadowsocks.@shadowsocks[0].password 2>/dev/null`
local ss_method=`uci get shadowsocks.@shadowsocks[0].method`
local ss_timeout=`uci get shadowsocks.@shadowsocks[0].timeout 2>/dev/null`
local ss_safe_dns=`uci get shadowsocks.@shadowsocks[0].safe_dns 2>/dev/null`
local ss_safe_dns_port=`uci get shadowsocks.@shadowsocks[0].safe_dns_port 2>/dev/null`
local ss_safe_dns_tcp=`uci get shadowsocks.@shadowsocks[0].safe_dns_tcp 2>/dev/null`
local ss_proxy_mode=`uci get shadowsocks.@shadowsocks[0].proxy_mode`
# $covered_subnets, $local_addresses are not required
local covered_subnets=`uci get shadowsocks.@shadowsocks[0].covered_subnets 2>/dev/null`
local local_addresses=`uci get shadowsocks.@shadowsocks[0].local_addresses 2>/dev/null`
/etc/init.d/pdnsd disable 2>/dev/null
# -----------------------------------------------------------------
if [ "$ss_enabled" = 0 ]; then
echo "WARNING: Shadowsocks is disabled."
return 1
fi
if [ -z "$ss_server_addr" -o -z "$ss_server_port" ]; then
echo "WARNING: Shadowsocks not fully configured, not starting."
return 1
fi
[ -z "$ss_proxy_mode" ] && ss_proxy_mode=S
[ -z "$ss_method" ] && ss_method=table
[ -z "$ss_timeout" ] && ss_timeout=60
[ -z "$ss_safe_dns_port" ] && ss_safe_dns_port=53
# Get LAN settings as default parameters
[ -f /lib/functions/network.sh ] && . /lib/functions/network.sh
[ -z "$covered_subnets" ] && network_get_subnet covered_subnets lan
[ -z "$local_addresses" ] && network_get_ipaddr local_addresses lan
local ss_gfwlist="china-banned"
ss_np_ipset="china" # Must be global variable
# -----------------------------------------------------------------
###### shadowsocks ######
ss-redir -b:: -l$SS_REDIR_PORT -s$ss_server_addr -p$ss_server_port \
-k"$ss_password" -m$ss_method -t$ss_timeout -f $SS_REDIR_PIDFILE || return 1
# IPv4 firewall rules
iptables -t nat -N shadowsocks_pre
iptables -t nat -F shadowsocks_pre
iptables -t nat -A shadowsocks_pre -m set --match-set local dst -j RETURN || {
iptables -t nat -A shadowsocks_pre -d 10.0.0.0/8 -j RETURN
iptables -t nat -A shadowsocks_pre -d 127.0.0.0/8 -j RETURN
iptables -t nat -A shadowsocks_pre -d 172.16.0.0/12 -j RETURN
iptables -t nat -A shadowsocks_pre -d 192.168.0.0/16 -j RETURN
iptables -t nat -A shadowsocks_pre -d 127.0.0.0/8 -j RETURN
iptables -t nat -A shadowsocks_pre -d 224.0.0.0/3 -j RETURN
}
iptables -t nat -A shadowsocks_pre -d $ss_server_addr -j RETURN
case "$ss_proxy_mode" in
G) : ;;
S)
iptables -t nat -A shadowsocks_pre -m set --match-set $ss_np_ipset dst -j RETURN
;;
M)
ipset create gfwlist hash:ip maxelem 65536
iptables -t nat -A shadowsocks_pre -m set ! --match-set gfwlist dst -j RETURN
iptables -t nat -A shadowsocks_pre -m set --match-set $ss_np_ipset dst -j RETURN
;;
V)
ss_np_ipset=""
ss_gfwlist="unblock-youku"
ipset create gfwlist hash:ip maxelem 65536
iptables -t nat -A shadowsocks_pre -m set ! --match-set gfwlist dst -j RETURN
;;
esac
local subnet
for subnet in $covered_subnets; do
iptables -t nat -A shadowsocks_pre -s $subnet -p tcp -j REDIRECT --to $SS_REDIR_PORT
done
iptables -t nat -I PREROUTING -p tcp -j shadowsocks_pre
# -----------------------------------------------------------------
###### dnsmasq main configuration ######
mkdir -p /var/etc/dnsmasq-go.d
cat > /var/etc/dnsmasq-go.conf <<EOF
conf-dir=/var/etc/dnsmasq-go.d
EOF
[ -f /tmp/resolv.conf.auto ] && echo "resolv-file=/tmp/resolv.conf.auto" >> /var/etc/dnsmasq-go.conf
# -----------------------------------------------------------------
###### Anti-pollution configuration ######
if [ -n "$ss_safe_dns" ]; then
if [ "$ss_safe_dns_tcp" = 1 ]; then
start_pdnsd "$ss_safe_dns"
awk -vs="127.0.0.1#$PDNSD_LOCAL_PORT" '!/^$/&&!/^#/{printf("server=/%s/%s\n",$0,s)}' \
/etc/gfwlist/$ss_gfwlist > /var/etc/dnsmasq-go.d/01-pollution.conf
else
awk -vs="$ss_safe_dns#$ss_safe_dns_port" '!/^$/&&!/^#/{printf("server=/%s/%s\n",$0,s)}' \
/etc/gfwlist/$ss_gfwlist > /var/etc/dnsmasq-go.d/01-pollution.conf
fi
else
echo "WARNING: Not using secure DNS, DNS resolution might be polluted if you are in China."
fi
# -----------------------------------------------------------------
###### dnsmasq-to-ipset configuration ######
case "$ss_proxy_mode" in
M|V)
awk '!/^$/&&!/^#/{printf("ipset=/%s/gfwlist\n",$0)}' \
/etc/gfwlist/$ss_gfwlist > /var/etc/dnsmasq-go.d/02-ipset.conf
;;
esac
# -----------------------------------------------------------------
###### Start dnsmasq service ######
if ls /var/etc/dnsmasq-go.d/* >/dev/null 2>&1; then
dnsmasq -C /var/etc/dnsmasq-go.conf -p $DNSMASQ_PORT -x $DNSMASQ_PIDFILE || return 1
# IPv4 firewall rules
iptables -t nat -N dnsmasq_go_pre
iptables -t nat -F dnsmasq_go_pre
iptables -t nat -A dnsmasq_go_pre -p udp ! --dport 53 -j RETURN
local loc_addr
for loc_addr in $local_addresses; do
iptables -t nat -A dnsmasq_go_pre -d $loc_addr -p udp -j REDIRECT --to $DNSMASQ_PORT
done
iptables -t nat -I PREROUTING -p udp -j dnsmasq_go_pre
fi
}
stop()
{
if iptables -t nat -F dnsmasq_go_pre 2>/dev/null; then
while iptables -t nat -D PREROUTING -p udp -j dnsmasq_go_pre 2>/dev/null; do :; done
iptables -t nat -X dnsmasq_go_pre
fi
if [ -f $DNSMASQ_PIDFILE ]; then
kill -9 `cat $DNSMASQ_PIDFILE`
rm -f $DNSMASQ_PIDFILE
fi
rm -f /var/etc/dnsmasq-go.conf
rm -rf /var/etc/dnsmasq-go.d
stop_pdnsd
if iptables -t nat -F shadowsocks_pre 2>/dev/null; then
while iptables -t nat -D PREROUTING -p tcp -j shadowsocks_pre 2>/dev/null; do :; done
iptables -t nat -X shadowsocks_pre 2>/dev/null
fi
ipset destroy gfwlist 2>/dev/null
if [ -f $SS_REDIR_PIDFILE ]; then
kill -9 `cat $SS_REDIR_PIDFILE`
rm -f $SS_REDIR_PIDFILE
fi
}
# $1: upstream DNS server
start_pdnsd()
{
local safe_dns="$1"
local tcp_dns_list="8.8.8.8,8.8.4.4"
[ -n "$safe_dns" ] && tcp_dns_list="$safe_dns,$tcp_dns_list"
killall -9 pdnsd 2>/dev/null && sleep 1
mkdir -p /var/etc /var/pdnsd
cat > /var/etc/pdnsd.conf <<EOF
global {
perm_cache=256;
cache_dir="/var/pdnsd";
pid_file = /var/run/pdnsd.pid;
run_as="nobody";
server_ip = 127.0.0.1;
server_port = $PDNSD_LOCAL_PORT;
status_ctl = on;
query_method = tcp_only;
min_ttl=15m;
max_ttl=1w;
timeout=10;
neg_domain_pol=on;
proc_limit=2;
procq_limit=8;
}
server {
label= "fwxxx";
ip = $tcp_dns_list;
timeout=6;
uptest=none;
interval=10m;
purge_cache=off;
}
EOF
/usr/sbin/pdnsd -c /var/etc/pdnsd.conf -d
# Access TCP DNS server through Shadowsocks tunnel
if iptables -t nat -N pdnsd_output; then
iptables -t nat -A pdnsd_output -m set --match-set $ss_np_ipset dst -j RETURN
iptables -t nat -A pdnsd_output -p tcp -j REDIRECT --to $SS_REDIR_PORT
fi
iptables -t nat -I OUTPUT -p tcp --dport 53 -j pdnsd_output
}
stop_pdnsd()
{
if iptables -t nat -F pdnsd_output 2>/dev/null; then
while iptables -t nat -D OUTPUT -p tcp --dport 53 -j pdnsd_output 2>/dev/null; do :; done
iptables -t nat -X pdnsd_output
fi
killall -9 pdnsd 2>/dev/null
rm -rf /var/pdnsd
rm -f /var/etc/pdnsd.conf
}
| true |
98f871a485f7a0b8c570c69339e0a083f2faf642 | Shell | bentastic27/homelab-ansible | /control.sh | UTF-8 | 804 | 3.40625 | 3 | [] | no_license | #!/bin/bash
cd "$(dirname "$0")"
if [ "$1" = "init" ]; then
echo "Add password like this: (good chance to copy/paste)"
echo "user_pass: somepassword"
echo "sudo_pass: somepassword"
sleep 7
ansible-vault create passwd.yaml
elif [ "$1" = "reset" ]; then
ansible-playbook -i hosts --ask-vault-pass --extra-vars '@passwd.yaml' reset_homelab.yaml
elif [ "$1" = 'kubeadm_cluster' ]; then
ansible-playbook -i hosts --ask-vault-pass --extra-vars '@passwd.yaml' reset_homelab.yaml install_kubeadm_cluster.yaml
elif [ "$1" = 'reset_rancher' ]; then
ansible-playbook -i hosts --ask-vault-pass --extra-vars '@passwd.yaml' reset_rancher.yaml
else
echo "available commands:"
echo
echo "init"
echo "reset"
echo "kubeadm_cluster"
echo "reset_rancher"
echo
fi
| true |
44dc9fda9ded047d51293e856c2138e72eecdea5 | Shell | inkarkat/vim-writebackup | /tests/setup | UTF-8 | 596 | 3.328125 | 3 | [
"Vim"
] | permissive | #!/bin/sh
scriptDir=$(dirname -- "$0")
[ "$scriptDir" = "." ] && readonly scriptDir=$(dirname -- $(which -- "$0")) || readonly scriptDir=$(cd "$scriptDir" && echo "${PWD}") || { echo >&2 "ERROR: Cannot determine absolute script dirspec!"; exit 1; }
[ -d "$scriptDir" ] || { echo >&2 "ERROR: Cannot determine script directory!"; exit 1; }
testdir="${TMPDIR:-/tmp}/WriteBackupTest"
[ -d "$testdir" ] && { rm -rf "$testdir" || exit 1; }
# Note: The zip archive automatically preserves file timestamps for correct
# :WriteBackupListVersions output.
unzip "${scriptDir}/testdata.zip" -d "$testdir"
| true |
c3e747999b3d56829eb9d837f9ee1eb0d3f137d3 | Shell | wdomitrz/gfootball_zpp | /gcp/run.sh | UTF-8 | 1,738 | 3.46875 | 3 | [] | no_license | #!/bin/bash
set -e
if [ "$#" -ne "4" ] && [ "$#" -ne "5" ] ; then
echo "JOB_NAME_PREFIX BUCKET_NAME AI_PLATFORM_CONFIG_FILE ENV_CONFIG_FILE REGION(default=us-central1)"
exit 1
fi
export JOB_NAME_PREFIX="$1"
BUCKET_NAME="$2"
AI_PLATFORM_CONFIG_FILE="$3"
ENV_CONFIG_FILE="$4"
if [ "$#" -ne "5" ] ; then
REGION="us-central1"
else
REGION="$5"
fi
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
CONFIG_OUT_FILE="/tmp/${JOB_NAME_PREFIX}.yaml"
export CONFIG=football
export ENVIRONMENT=football
export AGENT=vtrace
export WORKERS=25
export ACTORS_PER_WORKER=8
PROJECT_ID=$(gcloud config get-value project)
export IMAGE_URI=gcr.io/$PROJECT_ID/seed
start_training () {
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
$DIR/../docker/build.sh
$DIR/../docker/push.sh
# Create bucket if doesn't exist.
gsutil ls "gs://${BUCKET_NAME}" || gsutil mb "gs://${BUCKET_NAME}"
JOB_NAME="${JOB_NAME_PREFIX}"
# Start training on AI platform.
gcloud beta ai-platform jobs submit training ${JOB_NAME} \
--project=${PROJECT_ID} \
--job-dir "gs://${BUCKET_NAME}/${JOB_NAME}" \
--region "$REGION" \
--config "$CONFIG_OUT_FILE" \
--stream-logs -- --environment=${ENVIRONMENT} --agent=${AGENT} \
--actors_per_worker=${ACTORS_PER_WORKER} --workers=${WORKERS} --
}
ENV_CONFIG=$(python3 $DIR/../gfootball_zpp/utils/config_encoder.py --f "$ENV_CONFIG_FILE")
sed "s|ENV_CONFIG_HERE|${ENV_CONFIG}|g" "$AI_PLATFORM_CONFIG_FILE" > "$CONFIG_OUT_FILE"
sed -i="" "s|\${IMAGE_URI}|${IMAGE_URI}|g" "$CONFIG_OUT_FILE"
sed -i="" "s|\${CONFIG}|${CONFIG}_${JOB_NAME_PREFIX}|g" "$CONFIG_OUT_FILE"
sed -i="" "s|\${WORKERS}|${WORKERS}|g" "$CONFIG_OUT_FILE"
start_training
| true |
a2c049166022b46ed799f0aa85dd406b77689b47 | Shell | gurza/cli-toolbox | /firebird/unarchive_firebird.sh | UTF-8 | 1,225 | 4.21875 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
#
# This script extracts database file from Firebird archive (.pka
# or .pka, .p01, .p02, ...), using zip, unzip utilities.
# The input parameter is the directory where the database archive is located.
#
function help {
echo "Usage: $(basename $0) <data-dir>"
exit 1
}
data_dir=${1}
[ -z "${data_dir}" ] && help
for file in "$data_dir"/*.p[0-9]*
do
if [[ -f $file ]] ; then
newfile=$(echo $file | sed -e 's/\.p\([0-9]*\)$/.z\1/')
mv -v "$file" "$newfile"
fi
done
for file in "$data_dir"/*.pka
do
if [[ -f $file ]] ; then
mv -v -- "$file" "${file%.pka}.zip"
fi
done
is_splitted_archive=false
for file in "$data_dir"/*.z[0-9]*
do
if [[ -f $file ]] ; then
is_splitted_archive=true
break
fi
done
for file in "$data_dir"/*.zip
do
if [[ ! -f $file ]] ; then
continue
fi
archive_file_name=$(basename "$file")
if [ "$is_splitted_archive" = true ] ; then
archive_file_name=$(basename "${file%.zip}_full.zip")
zip -FF "$file" --out "$data_dir/$archive_file_name"
fi
zip -T "$data_dir/$archive_file_name"
unzip "$data_dir/$archive_file_name" -d "$data_dir"
break
done
exit 0
| true |
e00ed30ecdbcb7acc479d8c47b0bd96be49a7294 | Shell | denniscaudell/dot-files | /Scripts/colortable.sh | UTF-8 | 399 | 3.421875 | 3 | [] | no_license | #!/bin/bash
#
# by painless - https://bbs.archlinux.org/viewtopic.php?pid=1509183#p1509183
#
for i in $(seq -f %03g 0 255)
do
j=$(echo $i | sed 's/^0*//')
printf "\e[48;5;${i}m\e[38;5;15m ${i} "
printf "\e[48;5;0m\e[38;5;${i}m ${i} "
case "$i" in
"007") printf "\n" ;;
"015") printf "\n" ;;
esac
if [[ $j -ge 21 && $(( (${j}-15) % 6 )) -eq 0 ]]
then
printf "\n"
fi
done
| true |
0a75b3997f36f32ae7bc416e50d8fd6beb39686f | Shell | Windyo/GhostScripts | /masspublish.sh | UTF-8 | 3,405 | 4.03125 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
#declare vars
BlogAdress=""
GhostLogin=""
GhostPassword=""
ClientSecret=""
BearerToken=""
Author=""
CustomFilters=""
AutomaticMode="0"
###FUNCTIONS
function masspublish
{
if [ "$AutomaticMode" = "0" ]; then
#prompt for user token
echo "Hi there !"
echo "This script will publish all the drafts corresponding to the filter in Ghost."
echo "You need to login for this to work. No data is stored."
echo "Please type in your blog adress (http://blog.mysite.com)"
read BlogAdress
echo "Please type in your username"
read GhostLogin
echo "and now your password:"
read GhostPassword
echo "By default, the filter is set to publish all posts made by the logged in user, in status Draft."
echo "If you want, you may specify override the filters here."
echo "Write the filters in GQL. For example, the default filter used here is status:draft"
echo "More information is available on https://api.ghost.org/docs/filter"
read CustomFilters
else
echo "AutomaticMode is on, assuming variables are already set..."
fi
#Login and get the BearerToken
ClientSecret=`curl $BlogAdress/ghost/signin/ | grep -o -P '(?<=env-clientSecret" content=").*(?=" data-type="string)'`
BearerToken=`curl --data grant_type=password --data username=$GhostLogin --data password=$GhostPassword --data client_id=ghost-admin --data client_secret=$ClientSecret $BlogAdress/ghost/api/v0.1/authentication/token | grep -o -P '(?<="access_token":").*(?=","refresh_token")'`
Author=`curl --header "Authorization: Bearer $BearerToken" $BlogAdress/ghost/api/v0.1/users/me/ | grep -o -P '(?<="id":).*(?=,"uuid")'`
# read -n1 -r -p "Logged in, got the bearer token ! Your Author id is $Author" key
#Check if custom filters were set and use them if so to query the list of posts to publish
if [ -z "$CustomFilters" ]
then
echo "Using default filters"
declare -a PostsToPublish=`curl --header "Authorization: Bearer $BearerToken" $BlogAdress/ghost/api/v0.1/posts/?filter=\(status:draft\) | jq '.posts[] | .id'`
else
echo "Using custom filters"
declare -a PostsToPublish=`curl --header "Authorization: Bearer $BearerToken" $BlogAdress/ghost/api/v0.1/posts/?filter=\($CustomFilters\) | jq '.posts[] | .id'`
fi
echo "Posts to Publish ids: $PostsToPublish"
#Loop over the posts and publish them
for ThisPostId in $PostsToPublish
do
curl --header "Authorization: Bearer $BearerToken" -H "Content-Type: application/json" -X PUT -d '{"posts":[{"status":"published"}]}' $BlogAdress/ghost/api/v0.1/posts/$ThisPostId
done
echo "Published all drafts !"
}
function usage
{
echo "usage: masspublish.sh [-a (http://blog.mysite.eu ghostlogin ghostpassword customfilters)] | [-h]]"
}
###ARGUMENTS
while [ "$1" != "" ]; do
case $1 in
-a | --automatic ) shift
AutomaticMode=1
BlogAdress=$1
GhostLogin=$2
GhostPassword=$3
CustomFilters=$4
masspublish
;;
-h | --help ) usage
exit
;;
* ) exit 1
esac
shift
done
###MAIN
masspublish | true |
4940b4c52082ce7bb16ef83884971b09684f8363 | Shell | anthonydillon/docker-django | /entrypoint | UTF-8 | 1,346 | 4.0625 | 4 | [] | no_license | #! /bin/bash
set -euo pipefail
# If no default port is set for this project, set it to 8000
if [ -z "${PORT:-}" ]; then
PORT=8000
fi
run_command=$@
# Update any python requirements
if [ -f $REQUIREMENTS_PATH ]; then
if [[ ! -f "${REQUIREMENTS_HASH}" ]] || [[ "$(cat ${REQUIREMENTS_HASH})" != $(md5sum $REQUIREMENTS_PATH) ]]; then
echo "Installing python modules." >> /dev/stderr
pip install --requirement $REQUIREMENTS_PATH # Install new dependencies
md5sum $REQUIREMENTS_PATH > $REQUIREMENTS_HASH
fi
else
echo "$REQUIREMENTS_PATH not found - not installing requirements" >> /dev/stderr
fi
# Check for Django
if ! python -c "import django"; then
echo "Error: Django not found." >> /dev/stderr
exit 1
fi
# Run manage.py commands
if [ -n "$run_command" ]; then
# If we have arguments, pass them to manage.py
python manage.py $run_command
else
# By default, prepare database and run server
if echo "execfile('/db-check')" | ./manage.py shell -i bpython 2> /tmp/dberror; then
# Wait for database to come up before running migrate
python manage.py migrate --noinput
else
# If we encountered an unrecognised error, print it and exit
if [ $? -ne 9 ]; then cat /tmp/dberror && exit 1; fi
fi
python manage.py runserver 0.0.0.0:${PORT}
fi
| true |
33ea4e14e6f2c6225c0135001ade58ff859cc4c8 | Shell | worldcryptoforum/worldcryptoforum | /contrib/init/worldcryptoforumd.init | UTF-8 | 1,418 | 3.578125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#
# worldcryptoforumd The WCF core server.
#
#
# chkconfig: 345 80 20
# description: worldcryptoforumd
# processname: worldcryptoforumd
#
# Source function library.
. /etc/init.d/functions
# you can override defaults in /etc/sysconfig/worldcryptoforumd, see below
if [ -f /etc/sysconfig/worldcryptoforumd ]; then
. /etc/sysconfig/worldcryptoforumd
fi
RETVAL=0
prog=worldcryptoforumd
# you can override the lockfile via BITCOIND_LOCKFILE in /etc/sysconfig/worldcryptoforumd
lockfile=${BITCOIND_LOCKFILE-/var/lock/subsys/worldcryptoforumd}
# worldcryptoforumd defaults to /usr/bin/worldcryptoforumd, override with BITCOIND_BIN
bitcoind=${BITCOIND_BIN-/usr/bin/worldcryptoforumd}
# worldcryptoforumd opts default to -disablewallet, override with BITCOIND_OPTS
bitcoind_opts=${BITCOIND_OPTS}
start() {
echo -n $"Starting $prog: "
daemon $DAEMONOPTS $bitcoind $bitcoind_opts
RETVAL=$?
echo
[ $RETVAL -eq 0 ] && touch $lockfile
return $RETVAL
}
stop() {
echo -n $"Stopping $prog: "
killproc $prog
RETVAL=$?
echo
[ $RETVAL -eq 0 ] && rm -f $lockfile
return $RETVAL
}
case "$1" in
start)
start
;;
stop)
stop
;;
status)
status $prog
;;
restart)
stop
start
;;
*)
echo "Usage: service $prog {start|stop|status|restart}"
exit 1
;;
esac
| true |
07ccd4b844886da2c1b04a19e3feea08ab77582b | Shell | cenk1cenk2/docker-samba | /init-env.sh | UTF-8 | 1,603 | 3.765625 | 4 | [] | no_license | #!/bin/bash
echo "init-env.sh@sh: v2.1, 20190321"
## Variables
# Write down the data required in .env file here for initiation.
ENVFILENAME=.env
ENVFILECONTENTS=(
"TZ="
"SRVNAME="
"# USERS: required arg: \"<username>;<passwd>\""
"# <username> for user"
"# <password> for user"
"# [ID] for user"
"# [group] for user"
"# multiple user format: example1;badpass:example2;badpass"
"USERS="
"# MOUNTS: Configure a share"
"# required arg: \"<name>;</path>\""
"# <name> is how it's called for clients"
"# <path> path to share"
"# NOTE: for the default value, just leave blank"
"# [browsable] default:'yes' or 'no'"
"# [readonly] default:'yes' or 'no'"
"# [guest] allowed default:'yes' or 'no'"
"# [users] allowed default:'all' or list of allowed users"
"# [admins] allowed default:'none' or list of admin users"
"# [writelist] list of users that can write to a RO share"
"# [comment] description of share"
"# multiple mount format: example1 private share;/example1;no;no;no;example1:example2 private share;/example2;no;no;no;example2"
"MOUNTS="
"WORKGROUPNAME=WORKGROUP"
)
## Script
echo "Initiating ${ENVFILENAME} file."; if [[ ! -f ${ENVFILENAME} ]] || ( echo -n ".env file already initiated. You want to override? [ y/N ]: " && read -r OVERRIDE && echo ${OVERRIDE::1} | grep -iqF "y" ); then echo "Will rewrite the .env file with the default one."; > ${ENVFILENAME} && for i in "${ENVFILECONTENTS[@]}"; do echo $i >> ${ENVFILENAME}; done; echo "All done."; else echo "File already exists with no overwrite permission given."; echo "Not doing anything."; fi | true |
e64dfc76e9faf9f265539948ddf710e904ca2424 | Shell | kalyansarwa/kubectl | /setkube.sh | UTF-8 | 6,590 | 3.203125 | 3 | [] | no_license | #SERVER=localhost
PORT=6443
USER=root
PASSWORD=B0h1caS4all
NS=com-att-ocnp-mgmt
GL=name
APPNS=$NS
DS=glusterfs
TOKEN="ks4080@csp.att.com:enc:JwJUQU54q5Gxrmdo9gt7OKwJYkAAczD"
USETOKEN="false"
export http_proxy=http://one.proxy.att.com:8080
export https_proxy=$http_proxy
export all_proxy=$http_proxy
function usage() {
echo -e "USAGE: ${0} [-h] [-s SERVER] [-p PORT]
[-u USER] [-P PASSWORD] [-n NAMESPACE] [-g GLUSTERLABEL] [-t TOKEN] [-T (if token is required to be used)]\n"
}
while getopts ':s:p:u:P:n:g:t:hT' opt ; do
case "$opt" in
h) usage; return 0 ;;
s) SERVER="$OPTARG" ;;
p) PORT="$OPTARG" ;;
u) USER="$OPTARG" ;;
P) PASSWORD="$OPTARG" ;;
n) NS="$OPTARG" ;;
g) GL="$OPTARG" ;;
t) TOKEN="$OPTARG" ;;
T) USETOKEN="true" ;;
\?) echo "Invalid option: -$OPTARG" >&2 ;;
esac
done
shift $((OPTIND-1))
# The following commands can be kept as an alias or in a script file.
# Execute these to set the context for the kubectl commands
echo "Using SERVER:PORT as $SERVER:$PORT"
kubectl config set-cluster c1 --server=https://$SERVER:$PORT --insecure-skip-tls-verify=true
if [[ "$USETOKEN" == "true" ]]; then
kubectl config set-credentials rc --token=$TOKEN
else
kubectl config set-credentials rc --username=$USER --password=$PASSWORD
fi
kubectl config set-context c1-context --cluster=c1 --user=rc --namespace=$NS
kubectl config use-context c1-context
# Execute the following to view your config
kubectl config view
function pod() {
n=$1
POD=$(kubectl -n com-att-ocnp-mgmt get pods -l $GL=glusterfs -o wide | grep -i running | head -$n | tail -1 | awk '{print $1}')
echo $POD
}
function gstats() {
kubectl -n com-att-ocnp-mgmt get pods -l name=glusterfs -o wide --no-headers > tmp.txt;
echo "Number of Nodes in the cluster: $(kubectl get nodes --no-headers | wc -l)"
while read i; do
node=$(echo $i | awk '{print $7}'); ip=$(echo $i | awk '{print $6}'); pod=$(echo $i | awk '{print $1}');
echo -n "$node : $ip : $pod : "
peers=$(kubectl -n com-att-ocnp-mgmt exec $pod -- gluster peer status | grep -i peers | awk '{print $4}');
con=$(kubectl -n com-att-ocnp-mgmt exec $pod -- gluster peer status | grep "Peer in Cluster (Connected)" | wc -l);
vols=$(kubectl -n com-att-ocnp-mgmt exec $pod -- gluster volume list 2>/dev/null | grep -v 'No' | wc -l);
echo "Peers=$peers : Connected=$con : Volumes=$vols";
done < tmp.txt;
rm -fr tmp.txt;
}
function gfsleader() {
leader=$(kubectl exec $(kubectl get pods -l name=gfshelper --no-headers | head -1 | awk '{print $1}') -c gfshelper -- bash -c "curl --silent localhost:4040 | jq -rc '.name'" | cut -d'.' -f1)
GFSPOD=$(kubectl get pods -l name=gfshelper --no-headers -o wide | grep $leader | awk '{print $1}')
}
pod 1
alias start-mongo='mongod --auth --bind_ip 127.0.0.1 --dbpath /Users/skalyan/Tools/mongo-data/data/db/'
alias allpods='kubectl get pods --all-namespaces'
alias apiserverlogs='kubectl -n kube-system logs $(kubectl -n kube-system get pods -l k8s-app=kube-apiserver -o name | head -1)'
alias ns='kubectl get ns'
alias nodes='kubectl get nodes -o wide --no-headers'
alias ds='kubectl get ds --all-namespaces'
alias pvs='kubectl get pv --show-labels'
alias pods='kubectl -n com-att-ocnp-mgmt get pods -o wide -l glusterfs'
alias hpods='kubectl -n com-att-ocnp-mgmt get pods -o wide -l heketi'
alias shglu='kubectl -n com-att-ocnp-mgmt exec -it $POD -- bash'
alias pc='kubectl -n com-att-ocnp-mgmt exec -it $POD -- gluster peer status | grep -i peers'
alias gvs='kubectl -n com-att-ocnp-mgmt exec -it $POD -- gluster volume list'
alias mps='kubectl -n com-att-ocnp-mgmt get po $POD -o yaml | grep "mountPath:" | grep -i brick'
alias ubs='kubectl -n com-att-ocnp-mgmt exec -it $POD -- gluster volume info | grep -i brick | grep -iv bricks | cut -d" " -f2 | sort'
alias glogs='kubectl -n com-att-ocnp-mgmt logs $POD'
alias peers='for i in $(kubectl -n com-att-ocnp-mgmt get pods -l $GL=glusterfs -o name | cut -d"/" -f2); do echo -n "$i : "; kubectl -n com-att-ocnp-mgmt exec -it $i -- gluster peer status | grep -i peers; done'
alias detach-peers='kubectl -n com-att-ocnp-mgmt exec -it $POD -- gluster peer detach'
alias volinfo='kubectl -n com-att-ocnp-mgmt exec -it $POD -- gluster volume info'
alias brickinfo='kubectl -n com-att-ocnp-mgmt exec -it $POD -- gluster volume info | egrep -i "volume name:|brick" | grep -v Bricks'
alias volstatus='kubectl -n com-att-ocnp-mgmt exec -it $POD -- gluster volume status'
alias volstart='kubectl -n com-att-ocnp-mgmt exec -it $POD -- gluster volume start'
alias volstop='kubectl -n com-att-ocnp-mgmt exec -it $POD -- gluster --mode=script volume stop'
alias voldelete='kubectl -n com-att-ocnp-mgmt exec -it $POD -- gluster --mode=script volume delete'
alias volcreate='kubectl -n com-att-ocnp-mgmt exec -it $POD -- gluster volume create'
alias volremove-brick='kubectl -n com-att-ocnp-mgmt exec -it $POD -- gluster --mode=script volume remove-brick'
alias offbricks='volstatus | grep -i brick | grep N | cut -d" " -f2'
alias volquota='kubectl -n com-att-ocnp-mgmt exec -it $POD -- gluster volume quota'
alias apods='kubectl -n $APPNS get pods -o wide'
alias apodevents='kubectl -n $APPNS describe po $APOD | grep -i events -A 20'
alias eps='kubectl -n $APPNS get ep,svc | grep gluster'
alias shapp='kubectl -n $APPNS exec -it $APOD bash'
alias pvcs='kubectl -n $APPNS get pvc'
alias desapod='kubectl -n $APPNS describe pods $APOD'
alias apodlogs='kubectl -n $APPNS logs $APOD'
alias pvfails='kubectl -n $APPNS describe pods | grep FailedMount'
alias hcli='kubectl -n com-att-ocnp-mgmt exec -it $(kubectl -n com-att-ocnp-mgmt get pods -l heketi -o name | cut -d"/" -f2) -- heketi-cli -s http://localhost:8080 --user admin --secret $(kubectl -n com-att-ocnp-mgmt exec -it $(kubectl -n com-att-ocnp-mgmt get pods -l heketi -o name | cut -d"/" -f2) -- cat /secret/key)'
alias hdel='kubectl -n com-att-ocnp-mgmt delete pods $(kubectl -n com-att-ocnp-mgmt get pods -l heketi -o name | cut -d"/" -f2)'
alias hdelall='kubectl -n com-att-ocnp-mgmt delete all,secret,cm -l heketi; kubectl -n com-att-ocnp-mgmt delete secret heketi-db-backup'
alias hall='kubectl -n com-att-ocnp-mgmt get all,secret,cm -l heketi; kubectl -n com-att-ocnp-mgmt get secret heketi-db-backup'
alias hsh='kubectl -n com-att-ocnp-mgmt exec -it $(kubectl -n com-att-ocnp-mgmt get pods -l heketi -o name | cut -d"/" -f2) -- bash'
alias hlogs='kubectl -n com-att-ocnp-mgmt logs $(kubectl -n com-att-ocnp-mgmt get pods -l heketi -o name | cut -d"/" -f2)'
alias gfslogs='gfsleader; echo "Leader: $leader; Leader-GfsHelper: $GFSPOD"; kubectl logs -c gfshelper $GFSPOD'
| true |
3e16da3fccbca600b5642e38a72a00b227ebfd32 | Shell | sidaf/scripts | /hashcat_rules_attack.sh | UTF-8 | 2,965 | 3.78125 | 4 | [] | no_license | #!/bin/bash
set -o errexit -o nounset -o pipefail
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
DEFAULT_WORDLISTS=("$DIR/wordlists/passwords/rockyou.txt.gz")
DEFAULT_RULES=("$DIR/rules/hob064_2.rule" "$DIR/rules/best64.rule" "$DIR/rules/InsidePro-PasswordsPro.rule" "$DIR/rules/T0XlC.rule" "$DIR/rules/d3ad0ne.rule" "$DIR/rules/OneRuleToRuleThemAll.rule" "$DIR/rules/d3adhob0_2.rule")
#HASHCAT="$DIR/hashcat-5.1.0/hashcat"
HASHCAT="hashcat"
usage() {
echo "================================================================================"
echo "usage: $(basename "$0") <hash_file> <hash_type> [dictionary1 [dictionary2]]"
echo
echo "Perform a rules attack against a list of hashes using the hashcat tool."
echo
echo "Required arguments:"
echo "hash_file - a file containing a list of hashes"
echo "hash_type - the hash type, see 'hashcat -h' for a list of possible values"
echo
echo "Optional arguments:"
echo "dictionary - use a supplied dictionary (or multiple), otherwise defaults to"
echo
printf ' %s\n' "${DEFAULT_WORDLISTS[@]}"
echo
echo "HASHCAT_RULES - Environment variable to set rules, otherwise defaults to"
echo
printf ' %s\n' "${DEFAULT_RULES[@]}"
echo
echo "================================================================================"
}
args() {
if [ $# -lt 1 ]; then
usage
exit 1
fi
case $1 in
"" | "-h" | "--help")
usage
exit 0
esac
HASH_FILE=${1}
CRED_FILE="$HASH_FILE.cracked"
if [[ ! -f "$HASH_FILE" ]]; then
echo "$HASH_FILE not found!"
exit 1
fi
HASH_TYPE=${2:-}
if [[ ! -n $HASH_TYPE ]]; then
usage
exit 1
else
if ! [[ "$HASH_TYPE" =~ ^[0-9]+$ ]]; then
echo "The hash type needs to be an integer value!"
exit 1
fi
fi
shift 2
WORDLISTS=("$@")
if [[ ${#WORDLISTS[@]} -eq 0 ]]; then
WORDLISTS=("${DEFAULT_WORDLISTS[@]}")
fi
for WORDLIST in "${WORDLISTS[@]}"; do
if [[ ! -f "$WORDLIST" ]]; then
echo "$WORDLIST not found!"
exit 1
fi
done
if [ -z ${HASHCAT_DEVICE_ID+x} ]; then
HASHCAT_DEVICE_ID=3
fi
if [ -z ${HASHCAT_RULES+x} ]; then
HASHCAT_RULES=("${DEFAULT_RULES[@]}")
fi
}
# main
args "$@"
CRACK_START=$(date +%s)
for RULE in "${HASHCAT_RULES[@]}"; do
for WORDLIST in "${WORDLISTS[@]}"; do
echo "$ hashcat -d $HASHCAT_DEVICE_ID -O -a 0 -m $HASH_TYPE --outfile $CRED_FILE $HASH_FILE $WORDLIST -r $RULE"
set +e
$HASHCAT -d "$HASHCAT_DEVICE_ID" -O -a 0 -m "$HASH_TYPE" --outfile "$CRED_FILE" "$HASH_FILE" "$WORDLIST" -r "$RULE"
set -e
echo
if [[ $? -lt 0 ]] || [[ $? -gt 1 ]]; then
exit $?
fi
done
done
CRACK_STOP=$(date +%s)
CRACK_RUNTIME=$(( $CRACK_STOP - $CRACK_START ))
echo "================================================================================"
echo "Runtime: $CRACK_RUNTIME seconds"
echo "================================================================================"
| true |
bb13449b8bb2b7b1039173392091605fc528b882 | Shell | SouvikDey9510/GP_pyspark-dataproc-gcs-to-bigquery | /dataproc_cluster_scripts/create.sh | UTF-8 | 940 | 3.25 | 3 | [] | no_license | #!/bin/bash
while [[ "$#" -gt 0 ]]; do
case $1 in
-h|--help) echo "Please assign --cluster_name <cluster_name> --region <region> --gcs_uri <initialization_action_location>"; exit 1;;
-c|--cluster_name) cluster_name="$2"; shift ;;
-r|--region) region="$2"; shift ;;
-g|--gcs_uri) gcs_uri="$2"; shift ;;
*) echo "Unknown parameter passed: $1"; exit 1 ;;
esac
shift
done
[ -z $cluster_name ] && echo "--cluster_name is missing"
[ -z $region ] && echo "--region is missing"
[ -z $gcs_uri ] && echo "--gcs_uri is missing"
gcloud dataproc clusters $cluster_name \
--image-version=1.5 \
--master-machine-type n1-standard-2 \
--master-boot-disk-size 20GB \
--region=$region \
--num-workers 2 \
--worker-boot-disk-size 20GB \
--worker-machine-type n1-standard-2 \
--metadata='PIP_PACKAGES=pandas==1.2.2 pandas-gbq==0.14.1' \
--initialization-actions=$gcs_uri | true |
49ff60b1187223d65f7c38d2cd44ba543c03253c | Shell | sgleizes/xsh-modules | /zsh/zinit/@interactive.zsh | UTF-8 | 1,730 | 3.1875 | 3 | [
"Unlicense"
] | permissive | #
# Zinit module for zsh.
# https://github.com/zdharma/zinit
#
# Paths for the zinit installation.
typeset -gAH ZINIT
ZINIT[HOME_DIR]="${XDG_CONFIG_HOME:-$HOME/.config}/zinit"
ZINIT[ZCOMPDUMP_PATH]="${XDG_CACHE_HOME:-$HOME/.cache}/zsh/zcompdump"
ZPFX="$ZINIT[HOME_DIR]/prefix"
# Install zinit if necessary.
if [[ ! -f $ZINIT[HOME_DIR]/bin/zinit.zsh ]]; then
print -P "%F{33}:: Installing zdharma/zinit...%f"
command mkdir -p $ZINIT[HOME_DIR]
command git clone 'https://github.com/zdharma/zinit' $ZINIT[HOME_DIR]/bin \
&& print -P "%F{34}:: Installation successful%f%b" \
|| { print -P "%F{160}:: Installation failed%f%b" && return 1 }
# Patch scope: apply available patches to zinit.
patch_dir="${0:h}/patch"
function {
# Apply patch to git-process-output.
local patch="$patch_dir/zinit-git-process-output.patch"
patch -d $ZINIT[HOME_DIR]/bin -p1 -r- -suN <$patch |& grep -q 'FAILED' \
&& print -P ":: %F{red}ERROR%f: failed to apply $patch"
}
unset patch_dir
fi
# Automatically compile sourced scripts.
# To enable, run `zinit module build`.
# To see the list of sourced files and their loading time, run `zpmod source-study -l`.
# module_path+=($ZINIT[HOME_DIR]/bin/zmodules/Src)
# zmodload -s zdharma/zplugin
# Source zinit.
source $ZINIT[HOME_DIR]/bin/zinit.zsh
# Remove redundant zinit aliases and functions.
unalias zini zpl zplg
unfunction zpcdclear zpcdreplay zpcompdef zpcompinit zplugin
# Install zinit annexes.
zinit light-mode depth=1 for \
zinit-zsh/z-a-bin-gem-node
# Install zinit consolette for plugin management.
zinit light-mode lucid depth=1 trackbinds for \
blockf bindmap'^O^Z -> hold' zdharma/zui \
bindmap'^O^J -> ^O^Z' zinit-zsh/zinit-console
| true |
6f6431bea6982583621910fc84e059f618067388 | Shell | g2s3-2018/hippylib-hub | /update_lab.sh | UTF-8 | 284 | 3.140625 | 3 | [] | no_license | #!/bin/bash
set -u
echo "Update" "$1"
cd /home/fenics/Installations/labs && git pull
for ((i = 0; i < ${NUMBER_OF_USERS}; i++));
do
cp -rf /home/fenics/Installations/labs/Labs/$1 /home/user${i}/Labs/
chown -R user${i} /home/user${i}/Labs/$1
chmod -R u+rwX /home/user${i}/Labs/$1
done
| true |
18ed0b7a9dcea736cfde6ce3f76fc5e12ac97f5c | Shell | rajatmshukla/DevOpsTraining | /bashscripting/functiondemo.sh | UTF-8 | 277 | 2.921875 | 3 | [] | no_license | function Hello()
{
echo "My First Function"
}
Hello
Hello
: 'MyFunc2()
{
result=$(($1+$2))
echo "Result is $result"
}
MyFunc2 10 30'
function MyFunc2()
{
result=$(($1+$2))
echo $result
#return result
}
value=$(MyFunc2 10 30)
echo "addition $value" | true |
59ba71a1e6942156168b670cc1cae104ab40b10f | Shell | Cloudxtreme/xztech | /work/peer1/kickstart/install/var/www/kickstart/taskfiles.new/devices/1000/0079/burnin.sh | UTF-8 | 692 | 2.765625 | 3 | [] | no_license | burnin() {
# Okay, this is not really doing anything right now, but it is a step in the right
# direction. The idea is to modularize the RAID burnin (and other add-on products
# for that matter. We want any burnin specific stuff into a function that can be called
# so that we don't need to hack up this taskfile everytime there is a new product
postlog "INFO" "Detected LSI MegaRaid SAS 8308ELP"
tar -xzvf /tmp/cli.tgz -C /tmp/
if [[ -x /tmp/MegaCli ]] ; then
postlog "INFO" "Got MegaRaid cli from the tarball"
else
postlog "ERR" "Failed to get MegaRaid cli from the tarball"
fi
}
| true |
7ef3ca4dc869f82c562cd6a186fb27e8c08f5a42 | Shell | ChinaFengliang/ci-scripts | /boot-app-scripts/jenkins_boot_start.sh | UTF-8 | 15,550 | 3.34375 | 3 | [] | no_license | #!/bin/bash -ex
function init_build_option() {
SKIP_LAVA_RUN=${SKIP_LAVA_RUN:-"false"}
}
function init_workspace() {
WORKSPACE=${WORKSPACE:-/home/ts/jenkins/workspace/estuary-ci}
mkdir -p ${WORKSPACE}
}
function init_input_params() {
TREE_NAME=${TREE_NAME:-"open-estuary"}
VERSION=${VERSION:-""}
GIT_DESCRIBE=${GIT_DESCRIBE:-""}
JENKINS_JOB_INFO=$(expr "${BUILD_URL}" : '^http.*/job/\(.*\)/$' | sed "s#/#-#g")
}
function parse_params() {
pushd ${CI_SCRIPTS_DIR}
: ${SHELL_PLATFORM:=`python configs/parameter_parser.py -f config.yaml -s Build -k Platform`}
: ${SHELL_DISTRO:=`python configs/parameter_parser.py -f config.yaml -s Build -k Distro`}
: ${BOOT_PLAN:=`python configs/parameter_parser.py -f config.yaml -s Jenkins -k Boot`}
: ${TEST_PLAN:=`python configs/parameter_parser.py -f config.yaml -s Test -k Plan`}
: ${TEST_REPO:=`python configs/parameter_parser.py -f config.yaml -s Test -k Repo`}
: ${TEST_LEVEL:=`python configs/parameter_parser.py -f config.yaml -s Test -k Level`}
: ${LAVA_SERVER:=`python configs/parameter_parser.py -f config.yaml -s LAVA -k lavaserver`}
: ${LAVA_USER:=`python configs/parameter_parser.py -f config.yaml -s LAVA -k lavauser`}
: ${LAVA_STREAM:=`python configs/parameter_parser.py -f config.yaml -s LAVA -k lavastream`}
: ${LAVA_TOKEN:=`python configs/parameter_parser.py -f config.yaml -s LAVA -k TOKEN`}
: ${FTP_SERVER:=`python configs/parameter_parser.py -f config.yaml -s Ftpinfo -k ftpserver`}
: ${FTP_DIR:=`python configs/parameter_parser.py -f config.yaml -s Ftpinfo -k FTP_DIR`}
: ${ARCH_MAP:=`python configs/parameter_parser.py -f config.yaml -s Arch`}
: ${SUCCESS_MAIL_LIST:=`python configs/parameter_parser.py -f config.yaml -s Mail -k SUCCESS_LIST`}
: ${FAILED_MAIL_LIST:=`python configs/parameter_parser.py -f config.yaml -s Mail -k FAILED_LIST`}
popd # restore current work directory
}
function save_to_properties() {
cat << EOF > ${WORKSPACE}/env.properties
TREE_NAME="${TREE_NAME}"
GIT_DESCRIBE="${GIT_DESCRIBE}"
SHELL_PLATFORM="${SHELL_PLATFORM}"
SHELL_DISTRO="${SHELL_DISTRO}"
BOOT_PLAN="${BOOT_PLAN}"
TEST_REPO="${TEST_REPO}"
TEST_PLAN="${TEST_PLAN}"
TEST_LEVEL="${TEST_LEVEL}"
ARCH_MAP="${ARCH_MAP}"
EOF
# EXECUTE_STATUS="Failure"x
cat ${WORKSPACE}/env.properties
}
function prepare_tools() {
dev_tools="python-yaml python-keyring expect"
if ! (dpkg-query -l $dev_tools >/dev/null 2>&1); then
sudo apt-get update
if ! (sudo apt-get install -y --force-yes $dev_tools); then
echo "ERROR: can't install tools: ${dev_tools}"
exit 1
fi
fi
}
function init_boot_env() {
JOBS_DIR=jobs
RESULTS_DIR=results
WHOLE_SUM='whole_summary.txt'
DETAILS_SUM='details_summary.txt'
PDF_FILE='resultfile.pdf'
}
function generate_jobs() {
test_name=$1
distro=$2
pwd
for PLAT in $SHELL_PLATFORM; do
board_arch=${dict[$PLAT]}
if [ x"$distro" != x"" ]; then
python estuary-ci-job-creator.py "$FTP_SERVER/${TREE_NAME}/${GIT_DESCRIBE}/${PLAT}-${board_arch}/" \
--tree "${TREE_NAME}" --plans "$test_name" --distro "$distro" --arch "${board_arch}" \
--testUrl "${TEST_REPO}" --testDir "${TEST_CASE_DIR}" --scope "${TEST_PLAN}" --level "${TEST_LEVEL}" \
--jenkinsJob "${JENKINS_JOB_INFO}"
else
python estuary-ci-job-creator.py "$FTP_SERVER/${TREE_NAME}/${GIT_DESCRIBE}/${PLAT}-${board_arch}/" \
--tree "${TREE_NAME}" --plans "$test_name" --arch "${board_arch}" \
--testUrl "${TEST_REPO}" --testDir "${TEST_CASE_DIR}" --scope "${TEST_PLAN}" --level "${TEST_LEVEL}" \
--jekinsJob "${JENKINS_JOB_INFO}"
fi
done
}
function run_and_report_jobs() {
if [ x"$SKIP_LAVA_RUN" = x"false" ];then
pushd ${JOBS_DIR}
python ../estuary-job-runner.py --username $LAVA_USER --token $LAVA_TOKEN --server $LAVA_SERVER --stream $LAVA_STREAM --poll POLL
popd
if [ ! -f ${JOBS_DIR}/${RESULTS_DIR}/POLL ]; then
echo "Running jobs error! Aborting"
return -1
else
echo "POLL Result:"
cat ${JOBS_DIR}/${RESULTS_DIR}/POLL
fi
python estuary-report.py --boot ${JOBS_DIR}/${RESULTS_DIR}/POLL --lab $LAVA_USER
if [ ! -d ${RESULTS_DIR} ]; then
echo "running jobs error! Aborting"
return -1
fi
else
echo "skip lava run and report"
fi
}
function judge_pass_or_not() {
FAIL_FLAG=$(grep -R 'FAIL' ./${JOBS_DIR}/${RESULTS_DIR}/POLL || true)
if [ "$FAIL_FLAG"x != ""x ]; then
echo "jobs fail"
return -1
fi
PASS_FLAG=$(grep -R 'PASS' ./${JOBS_DIR}/${RESULTS_DIR}/POLL || true)
if [ "$PASS_FLAG"x = ""x ]; then
echo "jobs fail"
return -1
fi
return 0
}
function run_and_move_result() {
test_name=$1
dest_dir=$2
ret_val=0
if ! run_and_report_jobs ;then
ret_val=-1
fi
if ! judge_pass_or_not ; then
ret_val=-1
fi
[ ! -d ${dest_dir} ] && mkdir -p ${dest_dir}
[ -e ${WHOLE_SUM} ] && mv ${WHOLE_SUM} ${dest_dir}/
[ -e ${DETAILS_SUM} ] && mv ${DETAILS_SUM} ${dest_dir}/
[ -e ${PDF_FILE} ] && mv ${PDF_FILE} ${dest_dir}/
[ -d ${JOBS_DIR} ] && mv ${JOBS_DIR} ${dest_dir}/${JOBS_DIR}_${test_name}
[ -d ${RESULTS_DIR} ] && mv ${RESULTS_DIR} ${dest_dir}/${RESULTS_DIR}_${test_name}
if [ "$ret_val" -ne 0 ]; then
return -1
else
return 0
fi
}
function print_time() {
echo -e "@@@@@@"$@ `date "+%Y-%m-%d %H:%M:%S"` "\n" >> $timefile
#echo -e "\n" >> $timefile
}
export
####### Begining the tests ######
function init_timefile() {
timefile=${WORKSPACE}/timestamp_boot.txt
if [ -f ${timefile} ]; then
rm -fr ${timefile}
else
touch ${timefile}
fi
}
function init_summaryfile() {
if [ -f ${WORKSPACE}/whole_summary.txt ]; then
rm -rf ${WORKSPACE}/whole_summary.txt
else
touch ${WORKSPACE}/whole_summary.txt
fi
}
function parse_arch_map() {
read -a arch <<< $(echo $ARCH_MAP)
declare -A -g dict
for((i=0; i<${#arch[@]}; i++)); do
if ((i%2==0)); then
j=`expr $i+1`
dict[${arch[$i]}]=${arch[$j]}
fi
done
for key in "${!dict[@]}"; do echo "$key - ${dict[$key]}"; done
}
function clean_workspace() {
##### remove all file from the workspace #####
rm -rf ${CI_SCRIPTS_DIR}/uef* test_result.tar.gz||true
rm -rf ${WORKSPACE}/*.txt||true
### reset CI scripts ####
cd ${CI_SCRIPTS_DIR}/; git clean -fdx; cd -
}
function trigger_lava_build() {
pushd ${WORKSPACE}/local/ci-scripts/boot-app-scripts
mkdir -p ${GIT_DESCRIBE}/${RESULTS_DIR}
for DISTRO in $SHELL_DISTRO; do
if [ -d $DISTRO ];then
rm -fr $DISTRO
fi
for boot_plan in $BOOT_PLAN; do
rm -fr ${JOBS_DIR} ${RESULTS_DIR}
# generate the boot jobs for all the targets
if [ "$boot_plan" = "BOOT_ISO" ]; then
# TODO : need rewrite the logic by lava2 way to boot from STAT or SAS.
:
elif [ "$boot_plan" = "BOOT_PXE" ]; then
# pxe install in previous step.use ssh to do the pxe test.
# BOOT_NFS
# boot from NFS
print_time "the start time of $boot_plan is "
generate_jobs $boot_plan $DISTRO
if [ -d ${JOBS_DIR} ]; then
if ! run_and_move_result $boot_plan $DISTRO ;then
if [ ! -d ${GIT_DESCRIBE}/${RESULTS_DIR}/${DISTRO} ];then
mv ${DISTRO} ${GIT_DESCRIBE}/${RESULTS_DIR}
continue
else
cp -fr ${DISTRO}/* ${GIT_DESCRIBE}/${RESULTS_DIR}/${DISTRO}/
continue
fi
fi
fi
print_time "the end time of $boot_plan is "
else
# BOOT_NFS
# boot from NFS
print_time "the start time of $boot_plan is "
generate_jobs $boot_plan $DISTRO
if [ -d ${JOBS_DIR} ]; then
if ! run_and_move_result $boot_plan $DISTRO ;then
if [ ! -d ${GIT_DESCRIBE}/${RESULTS_DIR}/${DISTRO} ];then
mv ${DISTRO} ${GIT_DESCRIBE}/${RESULTS_DIR}
continue
else
cp -fr ${DISTRO}/* ${GIT_DESCRIBE}/${RESULTS_DIR}/${DISTRO}/
continue
fi
fi
fi
print_time "the end time of $boot_plan is "
fi
done
if [ ! -d $GIT_DESCRIBE/${RESULTS_DIR}/${DISTRO} ];then
mv ${DISTRO} $GIT_DESCRIBE/${RESULTS_DIR} && continue
else
cp -fr ${DISTRO}/* $GIT_DESCRIBE/${RESULTS_DIR}/${DISTRO}/ && continue
fi
done
popd
}
function collect_result() {
# push the binary files to the ftpserver
pushd ${WORKSPACE}/local/ci-scripts/boot-app-scripts
DES_DIR=${FTP_DIR}/${TREE_NAME}/${GIT_DESCRIBE}/
[ ! -d $DES_DIR ] && echo "Don't have the images and dtbs" && exit -1
tar czf test_result.tar.gz ${GIT_DESCRIBE}/*
cp test_result.tar.gz ${WORKSPACE}
if [ -e ${WORKSPACE}/${WHOLE_SUM} ]; then
rm -rf ${WORKSPACE}/${WHOLE_SUM}
fi
if [ -e ${WORKSPACE}/${DETAILS_SUM} ]; then
rm -rf ${WORKSPACE}/${DETAILS_SUM}
fi
if [ -e ${WORKSPACE}/${PDF_FILE} ]; then
rm -rf ${WORKSPACE}/${PDF_FILE}
fi
if [ -e ${GIT_DESCRIBE}/${RESULTS_DIR}/${WHOLE_SUM} ]; then
rm -rf ${GIT_DESCRIBE}/${RESULTS_DIR}/${WHOLE_SUM}
fi
echo '' | tee ${GIT_DESCRIBE}/${RESULTS_DIR}/${WHOLE_SUM} | tee ${GIT_DESCRIBE}/${RESULTS_DIR}/${DETAILS_SUM}
cd ${GIT_DESCRIBE}/${RESULTS_DIR}
distro_dirs=$(ls -d */ | cut -f1 -d'/')
cd -
for distro_name in ${distro_dirs};do
echo "##### distro : ${distro_name} ######" | tee -a ${GIT_DESCRIBE}/${RESULTS_DIR}/${WHOLE_SUM} | tee -a ${GIT_DESCRIBE}/${RESULTS_DIR}/${DETAILS_SUM}
cat ${CI_SCRIPTS_DIR}/boot-app-scripts/${GIT_DESCRIBE}/${RESULTS_DIR}/${distro_name}/${WHOLE_SUM} >> ${GIT_DESCRIBE}/${RESULTS_DIR}/${WHOLE_SUM}
cat ${CI_SCRIPTS_DIR}/boot-app-scripts/${GIT_DESCRIBE}/${RESULTS_DIR}/${distro_name}/${DETAILS_SUM} >> ${GIT_DESCRIBE}/${RESULTS_DIR}/${DETAILS_SUM}
# cp -f ${CI_SCRIPTS_DIR}/boot-app-scripts/${GIT_DESCRIBE}/${RESULTS_DIR}/${distro_name}/${PDF_FILE} ${GIT_DESCRIBE}/${RESULTS_DIR}/${PDF_FILE}
done
# apt-get install pdftk
# pdftk file1.pdf file2.pdf cat output output.pdf
cp ${GIT_DESCRIBE}/${RESULTS_DIR}/${WHOLE_SUM} ${WORKSPACE}/${WHOLE_SUM}
cp ${GIT_DESCRIBE}/${RESULTS_DIR}/${DETAILS_SUM} ${WORKSPACE}/${DETAILS_SUM}
#cp ${GIT_DESCRIBE}/${RESULTS_DIR}/${PDF_FILE} ${WORKSPACE}/${PDF_FILE}
cp -rf ${timefile} ${WORKSPACE} || true
#zip -r ${{GIT_DESCRIBE}}_results.zip ${GIT_DESCRIBE}/*
cp -f ${timefile} ${GIT_DESCRIBE} || true
if [ -d $DES_DIR/${GIT_DESCRIBE}/results ];then
sudo rm -fr $DES_DIR/${GIT_DESCRIBE}/results
sudo rm -fr $DES_DIR/${GIT_DESCRIBE}/${timefile}
fi
sudo cp -rf ${GIT_DESCRIBE}/* $DES_DIR
popd # restore current work directory
cat ${timefile}
cat ${WORKSPACE}/${WHOLE_SUM}
}
function init_env() {
CI_SCRIPTS_DIR=${WORKSPACE}/local/ci-scripts
TEST_CASE_DIR=${WORKSPACE}/local/ci-test-cases
}
function show_help(){
:
}
function parse_input() {
# A POSIX variable
OPTIND=1 # Reset in case getopts has been used previously in the shell.
# Initialize our own variables:
properties_file=""
while getopts "h?p:" opt; do
case "$opt" in
h|\?)
show_help
exit 0
;;
p) properties_file=$OPTARG
;;
esac
done
shift $((OPTIND-1))
[ "$1" = "--" ] && shift
echo "properties_file='$properties_file', Leftovers: $@"
}
# used to load paramters in pipeline job.
function source_properties_file() {
if [ -n "${properties_file}" ];then
if [ -e "${properties_file}" ];then
source "${properties_file}"
fi
fi
}
function generate_failed_mail(){
cd ${WORKSPACE}
echo "${FAILED_MAIL_LIST}" > MAIL_LIST.txt
echo "Estuary CI - ${GIT_DESCRIBE} - Failed" > MAIL_SUBJECT.txt
cat > MAIL_CONTENT.txt <<EOF
( This mail is send by Jenkins automatically, don't reply )
Project Name: ${TREE_NAME}
Version: ${GIT_DESCRIBE}
Build Status: success
Boot and Test Status: failed
Build Log Address: ${BUILD_URL}console
Build Project Address: $BUILD_URL
Build and Generated Binaries Address:${FTP_SERVER}/open-estuary/${GIT_DESCRIBE}
The Test Cases Definition Address: https://github.com/qinshulei/ci-test-cases
The boot and test is failed unexpectly. Please check the log and fix it.
EOF
}
function generate_success_mail(){
cd ${WORKSPACE}
if [ "${DEBUG}" = "true" ];then
echo "${FAILED_MAIL_LIST}" > ${WORKSPACE}/MAIL_LIST.txt
else
echo "${SUCCESS_MAIL_LIST}" > ${WORKSPACE}/MAIL_LIST.txt
fi
echo "Estuary CI - ${GIT_DESCRIBE} - Result" > ${WORKSPACE}/MAIL_SUBJECT.txt
cat > ${WORKSPACE}/MAIL_CONTENT.txt <<EOF
( This mail is send by Jenkins automatically, don't reply )
Project Name: ${TREE_NAME}
Version: ${GIT_DESCRIBE}
Boot and Test Status: Success
Build Log Address: ${BUILD_URL}console
Build Project Address: $BUILD_URL
Build and Generated Binaries Address:${FTP_SERVER}/open-estuary/${GIT_DESCRIBE}
The Test Cases Definition Address: https://github.com/qinshulei/ci-test-cases
EOF
cd ${WORKSPACE}/local/ci-scripts/boot-app-scripts/${GIT_DESCRIBE}/${RESULTS_DIR}
echo ""
echo "Test summary is below:" >> ${WORKSPACE}/MAIL_CONTENT.txt
cat whole_summary.txt >> ${WORKSPACE}/MAIL_CONTENT.txt
echo ""
echo "The Test Case details is below:" >> ${WORKSPACE}/MAIL_CONTENT.txt
cat details_summary.txt >> ${WORKSPACE}/MAIL_CONTENT.txt
cd -
}
function workaround_stash_devices_config() {
if [ -n "${CI_ENV}" ];then
:
else
CI_ENV=dev
fi
if [ -e "configs/"${CI_ENV}"/devices.yaml" ];then
cp -f configs/"${CI_ENV}"/devices.yaml /tmp/devices.yaml
fi
}
function workaround_pop_devices_config() {
if [ -n "${CI_ENV}" ];then
:
else
CI_ENV=dev
fi
if [ -e "/tmp/devices.yaml" ];then
cp -f /tmp/devices.yaml configs/"${CI_ENV}"/devices.yaml
fi
}
function main() {
parse_input "$@"
source_properties_file
init_workspace
init_build_option
init_env
init_boot_env
init_input_params
parse_params
generate_failed_mail
prepare_tools
init_timefile
print_time "the begin time of boot test is "
init_summaryfile
##### copy some files to the lava-server machine to support the boot process #####
parse_arch_map
clean_workspace
print_time "the time of preparing all envireonment is "
workaround_stash_devices_config
trigger_lava_build
collect_result
generate_success_mail
save_to_properties
}
main "$@"
| true |
2ca22e266e243b2ac23ae69c69898e5a5e6f64d6 | Shell | L-maple/k8s-autoscaling | /hdfs/hdfs-test.sh | UTF-8 | 594 | 2.6875 | 3 | [] | no_license | #!/bin/bash
echo "Step1: 删除之前不必要的yaml资源"
kubectl delete -f manifests-latest/
sleep 10
echo "Step2: 删除所有的pvc"
kubectl delete -n monitoring pvc --all
echo "Step3: 部署新的yaml资源"
kubectl apply -f manifests-latest/
echo "Step4: 记得删除worker节点上的无用pv\n"
echo "Step5: 等待,不然hpa可能会做出错误的扩容决定"
sleep 30
# 将benchmark相关文件复制到hdfs-client容器内部
kubectl -n monitoring cp ./benchmark/ hdfs-client:/benchmark/
# 登录容器查看下
kubectl exec -it -n monitoring hdfs-client -- /bin/bash
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.