blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
2eede88312a6661394b03d510fd66c536b2d4ace
|
Shell
|
Roboy/ravestate
|
/deploy.sh
|
UTF-8
| 174
| 2.65625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
export setupfile=${1}
if [[ ! $setupfile ]]; then
export setupfile=setup.py
fi
rm -rf dist
python3 $setupfile sdist bdist_wheel
twine upload dist/*
| true
|
381780bc987d5855281fd033adbb19436d1f2dfe
|
Shell
|
nfvri/dputils
|
/vnfs/packer/ubuntu_1604_server/provision/provision.sh
|
UTF-8
| 1,014
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
useradd -m -s /bin/bash -p $(openssl passwd -crypt $guest_user) -U $guest_password
echo "$guest_user ALL=(ALL) NOPASSWD:ALL" | tee -a /etc/sudoers
# Bypass proxy settings if http_proxy is empty
if [ ! -z "$http_proxy" ]
then
echo "http_proxy=$http_proxy" | sudo tee -a /etc/environment
echo "https_proxy=$https_proxy" | sudo tee -a /etc/environment
echo "HTTP_PROXY=$http_proxy" | sudo tee -a /etc/environment
echo "HTTPS_PROXY=$https_proxy" | sudo tee -a /etc/environment
if [ ! -f /etc/apt/apt.conf ]; then
sudo touch /etc/apt/apt.conf
fi
echo 'Acquire::http::Proxy "'$http_proxy'";' | sudo tee -a /etc/apt/apt.conf
echo 'Acquire::https::Proxy "'$https_proxy'";' | sudo tee -a /etc/apt/apt.conf
fi
sudo apt-get -y update
sudo apt-get -y upgrade
# assumes the user-defined startup.sh script has already been copied under /etc/init.d
sudo mv /tmp/startup.sh /etc/init.d/startup.sh
sudo chmod ugo+x /etc/init.d/startup.sh
sudo update-rc.d startup.sh defaults
| true
|
ff264b31a0b4fd1f1edc12b1cf25c3b843986ee1
|
Shell
|
grlf/vagrant-lando
|
/dist/provision.sh
|
UTF-8
| 1,039
| 3.40625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -euo pipefail
LANDO_VERSION=${LANDO_VERSION:-v3.4.2}
# Install Docker.
if ! hash docker > /dev/null 2>&1; then
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
sudo apt-get update
sudo apt-get -qq install -y docker-ce docker-ce-cli containerd.io
sudo usermod -a -G docker vagrant
#Up max count for elasticsearch
sudo sh -c 'echo vm.max_map_count=262144 > /etc/sysctl.conf'
sudo sysctl --system
fi
# Install Lando.
if ! hash lando > /dev/null 2>&1; then
printf "Downloading Lando version %s...\n" ${LANDO_VERSION}
wget -O /tmp/lando.deb https://github.com/lando/lando/releases/download/${LANDO_VERSION}/lando-x64-${LANDO_VERSION}.deb
sudo dpkg -i /tmp/lando.deb
rm /tmp/lando.deb
sudo mkdir -p /home/vagrant/.lando/keys
sudo chown vagrant:vagrant /home/vagrant/.lando
sudo chown vagrant:vagrant /home/vagrant/.lando/keys
fi
| true
|
14a0610b61a2b246939937e9780671b1c07f74f1
|
Shell
|
surajkawade/shell-scripting
|
/echo.sh
|
UTF-8
| 165
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash
# This is my first bash script
greeting="hello"
echo $greeting, world \(planet\)!
echo '$greeting, world (planet)'
echo "$greeting, world (planet)!"
| true
|
83147fc619cf42c9897f5909f9ee936ec6721172
|
Shell
|
stemkit-collection/stemkit-util
|
/scripts/lprint
|
UTF-8
| 1,017
| 4.03125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/ksh
#
# Script to perform local print. Usage:
# lprint [<file> ...]
# where <file> - The name of a file to print. Multiple file
# may be printed on a single invocation. If
# <file> is "-" or absent than standard input
# is read.
Printer_ON () {
[ "${CodeSet:+set}" = set ] && csmmap off 0<&1 2>/dev/null
echo "$OnCmd\c"
}
Printer_OFF () {
[ "${CodeSet:+set}" = set ] && csmmap on 0<&1 2>/dev/null
echo "$OffCmd\c"
}
ScriptName=`basename $0`
OnCmd=`tput mc5 2>/dev/null`; OffCmd=`tput mc4 2>/dev/null`
[ "${OnCmd:+set}" = set -a "${OffCmd:+set}" = set ] || {
echo "$ScriptName: terminal does not support local print" 1>&2
exit 1
}
trap 'Printer_OFF' 0 1 2 3 8 15 16 17 23 24 25
exec 3<&0; Printer_ON
for file in ${*:--}
do
case "$file" in
-) exec 0<&3 ;;
*) [ -f "$file" -a -r "$file" ] || {
Printer_OFF
echo "$ScriptName: cannot access file \"$file\"" 1>&2
exit 2
}
exec 0<"$file" ;;
esac
cat || exit 3
done
exit 0
| true
|
ec55dbb1f0021488a48d1daa08b96f50db41b980
|
Shell
|
Jacob-Spiegel/SMILESClickChem
|
/docker/examples/example.bash
|
UTF-8
| 627
| 2.6875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# This script is an example for running SMILESClickChem within a docker.
# To modify the protein, pocket dimensions, and GA paremeters... please
# create a JSON file with the desired user variables.
# An example JSON is provided at: /SMILESClickChem/docker/examples/sample_submit_SMILESClickChem_docker.json
# Make sure we are in the /SMILESClickChem/docker/ directory
# sudo should only be run in Linux or MacOS
# If Windows please instead just open the terminal with admin privileges
# and omit the 'sudo'
sudo python ./smilesclickchem_in_docker.py -j ./examples/sample_submit_SMILESClickChem_docker.json
| true
|
b447380e0ea5bb545b855c9a3d74e51ceb4bfd36
|
Shell
|
ehwest/control_scripts
|
/poller.sh
|
UTF-8
| 2,244
| 3.015625
| 3
|
[] |
no_license
|
#/usr/bin/ntpdate -b -s -u pool.ntp.org
date >> /home/root/logfile.txt
echo "Starting poller.sh script." >> /home/root/logfile.txt
export SHELL=/bin/sh
export TERM=vt100
export USER=root
export PATH=/usr/local/bin:/usr/bin:/bin:/usr/local/sbin:/usr/sbin:/sbin
export PWD=/home/root
export HOME=/home/root
export SHLVL=2
exportLOGNAME=root
date >> /home/root/logfile.txt
echo "Starting usb_discover.sh" >> /home/root/logfile.txt
/home/root/usb_discover.sh >> /tmp/onetouch_content.txt
export tty_bcm=/dev/`cat /etc/tty_bcm`
export tty_sierra=/dev/`cat /etc/tty_sierra`
echo tty_bcm=$tty_bcm >> /tmp/onetouch_content.txt
echo tty_sierra=$tty_sierra >> /tmp/onetouch_content.txt
date >> /home/root/logfile.txt
echo "Starting insulaudit python script." >> /home/root/logfile.txt
epochtime=`date +%s`
filename1=model_read-${epochtime}
/usr/bin/insulaudit onetouch --port ${tty_bcm} hello >> /tmp/${filename1}
cat /tmp/${filename1} >> /tmp/onetouch_content.txt
filename2=data_read-${epochtime}
/usr/bin/insulaudit onetouch --port ${tty_bcm} sugars >> /tmp/${filename2}
cat /tmp/${filename2} >> /tmp/onetouch_content.txt
message1=`/bin/base64 --wrap=0 /tmp/${filename1}`
message2=`/bin/base64 --wrap=0 /tmp/${filename2}`
echo $message1 >> /tmp/onetouch_content.txt
echo $message2 >> /tmp/onetouch_content.txt
date >> /home/root/logfile.txt
echo "Joining the network with join_network.sh script." >> /home/root/logfile.txt
/home/root/join_network.sh >> /home/root/logfile.txt
date >> /home/root/logfile.txt
echo "Getting current time/date from NTP server." >> /home/root/logfile.txt
/usr/bin/ntpdate -b -s -u pool.ntp.org
date >> /home/root/logfile.txt
date >> /home/root/logfile.txt
echo "Starting curl to post the data." >> /home/root/logfile.txt
# send PUT request with data
curl --request POST 'http://transactionalweb.com/mconnect.php' --data-urlencode 'postedcontent='${message1}
curl --request POST 'http://transactionalweb.com/mconnect.php' --data-urlencode 'postedcontent='${message2}
date >> /home/root/logfile.txt
echo "Starting quit_network.sh script. " >> /home/root/logfile.txt
/home/root/quit_network.sh
date >> /home/root/logfile.txt
echo "Done running poller.sh script. " >> /home/root/logfile.txt
| true
|
c83a7445292c6f46516be1bec07cca592e7b184c
|
Shell
|
stfnwong/smips
|
/test/run_tests.sh
|
UTF-8
| 131
| 2.953125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Run unit tests
for t in bin/test/*; do
./$t || rc=$?
if [[ rc -ne 0 ]] ; then
exit $rc
fi
done
| true
|
4efa10c5c95d2035650c8c93c6e8a95909663275
|
Shell
|
wilsjame/dotfiles
|
/.bashrc
|
UTF-8
| 846
| 2.9375
| 3
|
[] |
no_license
|
#alias ls='ls -aGFhp'
alias ls='ls --color'
alias ll='ls -alF'
alias lg='git log --pretty=oneline'
alias ld='docker ps --format "table {{.Names}}\t{{.Image}}\t{{.State}}"'
alias ldi='docker ps --format "table {{.Names}}\t{{.Image}}\t{{.ID}}"'
alias ldp='docker ps --format "table {{.Names}}\t{{.Image}}\t{{.Ports}}"'
alias ldm='docker ps --format "table {{.Names}}\t{{.Image}}\t{{.Mounts}}"'
export PROMPT='%F{green}%~%f > '
export EDITOR='vim'
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
parse_git_branch() {
git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/(\1)/'
}
export PS1="\u@\h \[\e[32m\]\w \[\e[91m\]\$(parse_git_branch)\[\e[00m\]$ "
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
| true
|
2367f9003b4d49a38e53120599cb0d02b2deed05
|
Shell
|
danielflira/docker-vscode
|
/dev.sh
|
UTF-8
| 511
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
if [[ -z "${1}" ]]; then
echo "you must inform image name"
exit 1
fi
IMAGE_NAME="${1}"
X11_SOCKET=`echo $DISPLAY | sed -e 's/.*://' -e 's/\..*$//'`
mkdir -p .home
docker run --rm -ti --privileged \
-v /etc/passwd:/etc/passwd \
-v /etc/shadow:/etc/shadow \
-v /tmp/.X11-unix/X${X11_SOCKET}:/tmp/.X11-unix/X${X11_SOCKET} \
-v /var/run/docker.sock:/var/run/docker.sock \
-v $PWD/.home:$HOME \
-v $PWD:/data \
-e DISPLAY=$DISPLAY \
-u `id -u`:`id -g` \
--workdir /data \
"${IMAGE_NAME}" bash
| true
|
caf522fbeda64135bf49cb10116c5a988ae0dd92
|
Shell
|
Kaladiya-Ilaf/shell-programming
|
/loopAndSelection/MagicNumber.sh
|
UTF-8
| 806
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
echo "Think a number in the range 1-100 : "
LowerNumber=0
HigherNumber=100
while true
do
midNum=`echo $LowerNumber $HigherNumber 2 | awk '{ printf "%.f", ($1 + $2) / $3 }'`
echo "whether your thinking value is $midNum"
read -p "press 1 for yes ,0 for no : " Response
if [ $Response -eq 1 ]
then
echo "Found it!!!"
break;
else
echo "Whether the guessed value is lesser than $midNum"
read -p "press 1 if it is greater press 2 : " secondResponce
if [ $secondResponce -eq 1 ]
then
LowerNumber=$LowerNumber
HigherNumber=$midNum
else
LowerNumber=$midNum
HigherNumber=$HigherNumber
fi
fi
echo "Lower Limit : $LowerNumber"
echo "Higher Limit : $HigherNumber"
done
| true
|
6300fadb55bf6df5d7551c94e024b25e6570d045
|
Shell
|
salvogs/progettoSOL
|
/test/test2.sh
|
UTF-8
| 1,421
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/bash
# RED='\033[0;31m'
LBLUE='\033[1;34m'
LGREEN='\033[1;32m'
NC='\033[0m' # No Color
# avvio server in background
echo -e "${LGREEN}==AVVIO SERVER IN BACKGROUND==${NC}"
./bin/server test/config2.txt &
SPID=$!
# piccola attesa per far si che il server sia pronto ad accettare connessioni
sleep 2s
socket=socket.sk
# src contiene esattamente 10 files;
# -W src/api.c,src/clientParser.c modificano ultimo accesso file per LRU/LFU;
# -W di bigFile.bin causa l'espulsione di api.c (capacita' nfile superata) se evictionPolicy = 0(FIFO)
# altrimenti, se LRU(1)/LFU(2), verrà espulso client.c
echo -e "${LBLUE}==CLIENT 1==${NC}"
./bin/client -p -f $socket -w src -W src/api.c,src/clientParser.c,testfile/bigFile.bin -D test/ejectedDir
# causa l'espulsione di tutti i file
# vengono inviati solo i file modificati
echo -e "${LBLUE}==CLIENT 2==${NC}"
./bin/client -p -f $socket -W testfile/bigFile1.bin,testfile/img.jpg -D test/ejectedDir
# modifico img.jpg (non causa espulsione)
echo -e "${LBLUE}==CLIENT 3==${NC}"
./bin/client -p -f $socket -W testfile/img.jpg
# file troppo grande
echo -e "${LBLUE}==CLIENT 4==${NC}"
./bin/client -p -f $socket -W testfile/verybigFile.bin
# scrivendo 1Mb.bin(che causa espulsione), ricevo img.jpg che è stato modificato
echo -e "${LBLUE}==CLIENT 5==${NC}"
./bin/client -p -f $socket -W testfile/1Mb.bin -D test/ejectedDir
kill -s SIGHUP $SPID
wait $SPID
exit 0
| true
|
3904858efae6e419e6eaa8f32f6342c6744416a9
|
Shell
|
tpm2-software/tpm2-tools
|
/test/integration/tests/abrmd_policycountertimer.sh
|
UTF-8
| 3,381
| 3.109375
| 3
|
[] |
no_license
|
# SPDX-License-Identifier: BSD-3-Clause
source helpers.sh
cleanup() {
rm -f session.ctx prim.ctx key.pub key.priv key.ctx policy.countertimer.minute
if [ "$1" != "no-shut-down" ]; then
shut_down
fi
}
call_policy_countertimer () {
trap - ERR
output=$(tpm2 policycountertimer $@ 2>&1)
result=$?
if [ $result != 0 ] && echo $output | grep "ErrorCode.*0126" > /dev/null
then
echo "This test failed due to a TPM bug regarding signed comparison as described"
echo "in TCG's Errata for TCG Trusted Platform Module Library Revision 1.59 Version 1.4,"
echo "Section 2.5 TPM_EO – two’s complement"
tpm2 flushcontext session.ctx
skip_test
else
if [ $result != 0 ]; then
tpm2 flushcontext session.ctx
exit 1
fi
fi
trap onerror ERR
}
trap cleanup EXIT
start_up
cleanup "no-shut-down"
## Check cpHash output for TPM2_PolicyCounterTimer
tpm2 startauthsession -S session.ctx
call_policy_countertimer -S session.ctx -L policy.countertimer.minute --ult 60000 --cphash cp.hash
TPM2_CC_PolicyCounterTimer="0000016d"
operandB="0008000000000000ea60"
offset="0000"
operation="0005"
policySession=$(tpm2 sessionconfig session.ctx | grep Session-Handle | \
awk -F ' 0x' '{print $2}')
echo -ne $TPM2_CC_PolicyCounterTimer$policySession$operandB$offset$operation \
| xxd -r -p | openssl dgst -sha256 -binary -out test.bin
xxd cp.hash
xxd test.bin
cmp cp.hash test.bin 2
tpm2 flushcontext session.ctx
tpm2 clear
#
# Create a sealing object with a policy that evaluates for first minute after
# TPM restart. NOTE the time is 60000 milliseconds.
#
tpm2 startauthsession -S session.ctx
call_policy_countertimer -S session.ctx -L policy.countertimer.minute --ult 60000
tpm2 flushcontext session.ctx
tpm2 createprimary -C o -c prim.ctx -Q
echo "SUPERSECRET" | \
tpm2 create -Q -u key.pub -r key.priv -i- -C prim.ctx \
-L policy.countertimer.minute -a "fixedtpm|fixedparent" -c key.ctx
#
# ASSUMING 1 minute hasn't elapsed since clear, Try unseal in the first minute
# -- Should pass
#
tpm2 startauthsession -S session.ctx --policy-session
call_policy_countertimer -S session.ctx -L policy.countertimer.minute --ult 60000
tpm2 unseal -c key.ctx -p session:session.ctx
tpm2 flushcontext session.ctx
#
# Test if a policycountertimer evaluates with the clock
#
tpm2 clear
tpm2 startauthsession -S session.ctx --policy-session
call_policy_countertimer -S session.ctx --ult clock=60000
tpm2 flushcontext session.ctx
#
# Test if a policycountertimer evaluates with the TPM clocks safe flag
# Assuming the safe flag is set since with just started and cleared the TPM
#
tpm2 clear
tpm2 startauthsession -S session.ctx --policy-session
call_policy_countertimer -S session.ctx safe
tpm2 flushcontext session.ctx
#
# Test if a policycountertimer evaluates with the TPM reset count
# Assuming the value is zero since we just cleared the TPM
#
tpm2 clear
tpm2 startauthsession -S session.ctx --policy-session
call_policy_countertimer -S session.ctx resets=0
tpm2 flushcontext session.ctx
#
# Test if a policycountertimer evaluates with the TPM restart count
# Assuming the value is zero since we just cleared the TPM
#
tpm2 clear
tpm2 startauthsession -S session.ctx --policy-session
call_policy_countertimer -S session.ctx restarts=0
tpm2 flushcontext session.ctx
exit 0
| true
|
7d5cbd9c2e84f202af4d2cea1ff58aabc02ffbb7
|
Shell
|
jayko1552/Jay-gus
|
/intul.sh
|
UTF-8
| 735
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
#Autor: Fenix
#Descripción: Detecta a los hosts conectados en nuestra red
#Requiere: nmap
#
#Atención: Lo único q requiere configurar es la interfaz en donde se desea escuchar
INTER='wlan0'
function mask2bits {
m=`ifconfig $INTER | awk '{print $4}' | grep -o -E '([0-9]{1,3}\.){3}[0-9]+' `
nbits=0
for i in {1..4}
do
v=`echo $m | cut -d'.' -f$i`
while [ $v -ge 1 ]; do
let nbits=nbits+$v%2
let v=v/2
done
done
echo $nbits
}
#
MASK=`mask2bits`
IP=`ifconfig $INTER | grep -o -E '([0-9]{1,3}\.){3}[1-9]+' | head -1`
nmap -sP $IP/$MASK | grep -o -E '([0-9]{1,3}\.){3}[0-9]+'
| true
|
f9a86864e51a1b66a956316e1c0b27c9d37c8e6f
|
Shell
|
psauliere/netatmo
|
/launcher.sh
|
UTF-8
| 501
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
# launcher: starts netatmo.py in a new tmux session.
# To launch at startup as pi, add this to /etc/rc.local:
# su -c /home/pi/netatmo/launcher.sh -l pi
SESSION="NETATMO"
echo "Launching tmux"
# allow re-launch
tmux has-session -t $SESSION 2>/dev/null && tmux kill-session -t $SESSION
tmux new-session -d -s $SESSION
echo "Pause"
sleep 5
echo "Launching netatmo.py"
tmux send-keys -t $SESSION.0 "cd ~/netatmo" C-m
echo "Pause"
sleep 5
tmux send-keys -t $SESSION.0 "./netatmo.py" C-m
| true
|
6a837a3928d9c28457442c6acae9a13a815a9611
|
Shell
|
Lampei/cfml-ci
|
/tests/ci/scripts/ci-helper.sh
|
UTF-8
| 1,978
| 4
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# WORK_DIR and BUILD_DIR must be set!
if [ ! -n "$WORK_DIR" ]; then
echo "WORK_DIR must be set!"
exit 1
fi
if [ ! -n "$BUILD_DIR" ]; then
BUILD_DIR=`pwd`
fi
echo "Working directory: $WORK_DIR, Build directory: $BUILD_DIR"
if [ ! "$1" == "install" ]; then
if [ ! -d $WORK_DIR ]; then
echo "Working directory doesn't exist and this isn't an install!"
exit 1
else
cd $WORK_DIR
fi
else
if [ ! -n "$2" ]; then
echo "usage: $0 install PROJECTNAME";
exit 1
fi
fi
case $1 in
install)
if [ -d $WORK_DIR ]; then
rm -rf $WORK_DIR
fi
mkdir -p $WORK_DIR
cd $WORK_DIR
if [ ! -n "$RAILO_URL" ]; then
RAILO_URL="http://getrailo.com/railo/remote/download/4.1.1.009/railix/linux/railo-express-4.1.1.009-nojre.tar.gz"
fi
if [ ! -n "$MXUNIT_URL" ]; then
MXUNIT_URL="https://github.com/marcins/mxunit/archive/fix-railo-nulls.zip"
fi
WGET_OPTS="-nv"
function download_and_extract {
FILENAME=`echo $1|awk '{split($0,a,"/"); print a[length(a)]}'`
if [[ "$1" == /* ]]; then
echo "Copying $1 to $FILENAME"
cp $1 $FILENAME
else
echo "Downloading $1 to $FILENAME"
wget $WGET_OPTS $1 -O $FILENAME
fi
if [[ "$FILENAME" == *zip ]]; then
unzip -q $FILENAME
else
tar -zxf $FILENAME
fi
rm $FILENAME
result=$FILENAME
}
download_and_extract $RAILO_URL
download_and_extract $MXUNIT_URL
mv railo-express* railo
mv mxunit* railo/webapps/www/mxunit
ln -s $BUILD_DIR railo/webapps/www/$2
;;
start)
if [ ! -f railo/start ]; then
echo "Railo start script does not exist!"
exit 1
fi
echo "Starting Railo..."
sh railo/start>/dev/null &
until curl -s http://localhost:8888>/dev/null
do
echo "Waiting for Railo..."
sleep 1
done
;;
stop)
echo "Stopping Railo..."
sh railo/stop
while curl -s http://localhost:8888>/dev/null
do
echo "Waiting for Railo..."
sleep 1
done
;;
*)
echo "Usage: $0 {install|start|stop}"
exit 1
;;
esac
exit 0
| true
|
3438020f2e723fd1c6393143b2aea9938d4f4e3d
|
Shell
|
djbingham/php-celestial
|
/Example/ToDoList/Script/composer.sh
|
UTF-8
| 267
| 3.125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
COMMAND=$1 # Composer command to execute. e.g. "install"
pushd "$( dirname "${BASH_SOURCE[0]}" )/.." > /dev/null
docker run \
--rm \
-v "$(pwd)/App:/app" \
--workdir /app \
composer/composer \
${COMMAND} --no-interaction
popd > /dev/null
| true
|
ea426c20b09b125eff36078be2327e1a3024f6f0
|
Shell
|
higher-security/rtl-cmd
|
/rtl_power-fm-waterfall
|
UTF-8
| 204
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
DURATION=5m
FILE=${@:-/tmp/rtl_power-fm-$(date +%d-%m-%Y_%H-%M-%S).csv}
(rtl_power -i $DURATION -f 88M:108M:125k $FILE ; python heatmap.py $FILE $FILE.jpg; eog $FILE.jpg ) >/dev/null 2>&1 &
| true
|
3c709465f585dba8552c78b3a6cffaea05e441ca
|
Shell
|
prietod/BeadArray
|
/job/generate-phenotype-details
|
UTF-8
| 854
| 2.984375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -x
source ${BASE}/lib/env.sh
source ${BASE}/lib/util.sh
${BASE}/bin/util/generate-phenotype-details \
/hiidata/projects/BeadArray/cols_added.tsv \
/hiidata/teddy/data/jinfiniti/gene_expression/map_info.txt \
> ${WORK_DIR}/common/BeadArray_phenotype_details.txt
for dataset in ${DATASETS}; do
${BASE}/bin/util/find-chip-arrays ${BASE}/tmp/data/filtered/${dataset} \
> ${WORK_DIR}/common/chip-list-filtered-${dataset}.txt
head -1 ${WORK_DIR}/common/BeadArray_phenotype_details.txt \
> ${WORK_DIR}/common/BeadArray_phenotype_details-${dataset}.txt
grep -f ${WORK_DIR}/common/chip-list-filtered-${dataset}.txt ${WORK_DIR}/common/BeadArray_phenotype_details.txt \
>> ${WORK_DIR}/common/BeadArray_phenotype_details-${dataset}.txt
wc -l ${WORK_DIR}/common/BeadArray_phenotype_details-${dataset}.txt
done
| true
|
c23ff0802374c33964921cd79a4e98ae4c26cbdf
|
Shell
|
ChrisJ60/nweventwatcher
|
/getNwEnv
|
UTF-8
| 16,488
| 2.890625
| 3
|
[
"UPL-1.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
#!/bin/zsh
#
# Copyright (c) Chris Jenkins 2019, 2020
#
# Licensed under the Universal Permissive License v 1.0 as shown
# at http://oss.oracle.com/licenses/upl
#
############################################################
# reset all shell options to default
emulate -LR zsh
# set required options
setopt nullglob
setopt shwordsplit
setopt ksharrays
############################################################
# reset PATH to system default
eval "$(/usr/libexec/path_helper)"
#
# Some constants
#
readonly tmpTemplate="/tmp/gne.XXXXXX"
readonly stNone="N"
readonly stEthernet="E"
readonly stWiFi="W"
readonly stUSB="U"
readonly stBluetooth="B"
readonly stVPN="V"
#
# The supported environments
#
readonly integer numEnvs=13
declare -a envName=( "" )
declare -a envSrvType=( "" )
declare -a envDesc=( "" )
#
# Environment names
#
envName[0]="None"
envName[1]="Home-LAN"
envName[2]="Home-WiFi"
envName[3]="Work-LAN"
envName[4]="Work-WiFi-Internal"
envName[5]="Work-WiFi-Public"
envName[6]="Home-Guest"
envName[7]="iPhone-WiFi"
envName[8]="iPad-WiFi"
envName[9]="iPhone-USB_BT"
envName[10]="Home-VPN"
envName[11]="Work-VPN"
envName[12]="Nord-VPN"
#
# Environment descriptions
#
envDesc[0]="No active network connections"
envDesc[1]="Wired connection to the Home network"
envDesc[2]="Connection to the Home WiFi network"
envDesc[3]="Wired connection to the Work network"
envDesc[4]="Connection to the Work Corporate WiFi network"
envDesc[5]="Connection to the Work Public WiFi network"
envDesc[6]="Connection to the Home Guest WiFi network"
envDesc[7]="Tethered to iPhone via WiFi"
envDesc[8]="Tethered to iPad via WiFi"
envDesc[9]="Tethered to iPhone or iPad via USB or Bluetooth"
envDesc[10]="Connection to Home VPN"
envDesc[11]="Connection to Work VPN"
envDesc[12]="Connection to Nord VPN"
#
# Environment types
#
envSrvType[0]=${stNone}
envSrvType[1]=${stEthernet}
envSrvType[2]=${stWiFi}
envSrvType[3]=${stEthernet}
envSrvType[4]=${stWiFi}
envSrvType[5]=${stWiFi}
envSrvType[6]=${stWiFi}
envSrvType[7]=${stWiFi}
envSrvType[8]=${stWiFi}
envSrvType[9]=${stUSB}
envSrvType[10]=${stVPN}
envSrvType[11]=${stVPN}
envSrvType[12]=${stVPN}
#
# macOS network service info
#
readonly integer numMatchEntries=9
declare -a matchString=( "" )
declare -a matchType=( "" )
declare -a matchIface=( "" )
matchString[0]="Ethernet"
matchType[0]="${stEthernet}"
matchIface[0]=""
matchString[1]="USB-C LAN"
matchType[1]="${stEthernet}"
matchIface[1]=""
matchString[2]="Wi-Fi"
matchType[2]="${stWiFi}"
matchIface[2]=""
matchString[3]="iPhone USB"
matchType[3]="${stUSB}"
matchIface[3]=""
matchString[4]="iPad USB"
matchType[4]="${stUSB}"
matchIface[4]=""
matchString[5]="Bluetooth PAN"
matchType[5]="${stBluetooth}"
matchIface[5]=""
matchString[6]="L2TP"
matchType[6]="${stVPN}"
matchIface[6]="ppp0"
matchString[7]="Cisco VPN"
matchType[7]="${stVPN}"
matchIface[7]="utun0"
matchString[8]="Nord VPN"
matchType[8]="${stVPN}"
matchIface[8]="ipsec0"
# Service data for Cisco AnyConnect VPN / OpenConnect VPN
readonly pathAnyConnect="/Applications/Cisco/Cisco AnyConnect Secure Mobility Client.app"
readonly pathOpenConnect="/Applications/OpenConnect VPN.app"
readonly infoAnyConnect="(1) AnyConnect VPN
(Hardware Port: Cisco VPN, Device: utun0)"
# Service data for Nord VPN
readonly pathNordVPN="/Applications/NordVPN.app"
readonly infoNordVPN="(1) Nord VPN
(Hardware Port: Nord VPN, Device: utun0)"
# Service data for Nord VPN IKE
readonly pathNordVPNIKE="/Applications/NordVPN IKE.app"
readonly infoNordVPNIKE="(1) Nord VPN IKE
(Hardware Port: Nord VPN IKE, Device: ipsec0)"
# Service data for Shimo VPN using OpenVPN
readonly pathShimo="/Applications/Shimo.app"
readonly infoShimo="(1) Shimo VPN
(Hardware Port: OpenVPN, Device: utun0)"
readonly localv6pfx="fd00::"
readonly integer minService=1
readonly integer maxService=9
integer numServices=0
declare -a srvType=( "" )
declare -a srvName=( "" )
declare -a srvIface=( "" )
declare -a srvActive=( 0 )
integer vpnConfigured=1
#
# IP matching environments
#
declare -a getIPcmd=( "" )
declare -a ipCheckPrefix=( "" )
declare -a ipEnvInd=( 0 )
readonly integer numIpItems=7
#
# IP Prefixes (v4 or v6)
#
# WorkVPN, v6 only
ipCheckPrefix[0]="2606:b400:8f0:"
# HomeLAN, v6 only
ipCheckPrefix[1]="2001:470:1f09:2df:"
# WorkLAN, v6 only
ipCheckPrefix[2]="2606:b400:"
# iPhone USB or BT, v4 only
ipCheckPrefix[3]="172.20.10."
# HomeVPN, v4 only
ipCheckPrefix[4]="10.10.200."
ipCheckPrefix[5]="10.50.200."
# NordVPN, v4 only
ipCheckPrefix[6]="10."
#
# Associated 'getmyip' commands
#
# WorkVPN, v6 only
getIPcmd[0]="getmyip vpn -6 -g"
# HomeLAN, v6 only
getIPcmd[1]="getmyip wired -6 -g"
# WorkLAN, v6 only
getIPcmd[2]="getmyip wired -6 -g"
# iPhone USB or BT, v4 only
getIPcmd[3]="getmyip wired -4"
# HomeVPN, v4 only
getIPcmd[4]="getmyip vpn -4"
getIPcmd[5]="getmyip vpn -4"
# NordVPN, v4 only
getIPcmd[6]="getmyip vpn -4"
#
# Map matched environments to defined environments
#
# WorkVPN, v6 only
ipEnvInd[0]=11
# HomeLAN, v6 only
ipEnvInd[1]=1
# WorkLAN, v6 only
ipEnvInd[2]=3
# iPhone USB or BT, v4 only
ipEnvInd[3]=9
# HomeVPN, v4 only
ipEnvInd[4]=10
ipEnvInd[5]=10
# NordVPN, v4 only
ipEnvInd[6]=12
#
# WiFi matched environments
#
declare -a getWIFIcmd
declare -a wifiNwName
declare -ai wifiEnvInd
declare -ir numWiFiItems=6
#
# Command to determine SSID
#
getWIFIcmd[0]="getwifinetworks"
getWIFIcmd[1]="getwifinetworks"
getWIFIcmd[2]="getwifinetworks"
getWIFIcmd[3]="getwifinetworks"
getWIFIcmd[4]="getwifinetworks"
getWIFIcmd[5]="getwifinetworks"
#
# Network name SSID
#
wifiNwName[0]="Home WiFi"
wifiNwName[1]="Work WiFi Corporate"
wifiNwName[2]="Work WiFi Public"
wifiNwName[3]="My iPhone"
wifiNwName[4]="My iPad"
wifiNwName[5]="Guest WiFi"
#
# Map SSID to environment
#
wifiEnvInd[0]=2
wifiEnvInd[1]=4
wifiEnvInd[2]=5
wifiEnvInd[3]=7
wifiEnvInd[4]=8
wifiEnvInd[5]=6
declare cmd=""
declare -i ret=1
declare -i doAll=1
declare -i foundMatch=1
usage()
{
echo
echo "Usage:"
echo
echo " getNwEnv help"
echo " getNwEnv show [-all]"
echo " getNwEnv list [-verbose]"
echo
echo "Displays the name(s) of the currently active recognised network environment(s)."
echo "If '-all' is specified, lists all active environments otherwise lists just"
echo "the primary one."
echo
if [[ $# -gt 0 ]]
then
echo "Recognised environments and their names are:"
echo
echo " None - No active network connections"
echo
echo " Home-VPN - Connected to Home VPN."
echo
echo " Work-VPN - Connected to Work VPN."
echo
echo " Nord-VPN - Connected to Nord VPN."
echo
echo " Home-LAN - A wired connection to the Home network."
echo
echo " Home-WiFi - A WiFi connection to the Home network."
echo
echo " Home-Guest - A WiFi connection to the Home guest network."
echo
echo " iPhone-USB_BT - Tethered to iPhone/iPad via USB or Bluetooth."
echo
echo " iPhone-WiFi - Tethered to iPhone via WiFi."
echo
echo " iPad-WiFi - Tethered to iPad via WiFi."
echo
echo " Work-LAN - A wired connection to the Oracle network."
echo
echo " Work-WiFi-Internal"
echo " - Connected to work corporate WiFi network"
echo
echo " Work-WiFi-Public"
echo " - Connected to work public WiFi network."
echo
echo "If at least one recognised environment is listed the exit code is 0 otherwise"
echo "it is > 0."
echo
fi
exit 2
}
listEnvs()
{
local prev=""
local -i n=0
local -i verbose=1
if [[ $# -gt 0 ]]
then
verbose=0
fi
while [[ ${n} -lt ${numEnvs} ]]
do
if [[ "${envName[${n}]}" != "${prev}" ]]
then
if [[ ${verbose} -eq 0 ]]
then
echo "${envName[${n}]}:${envDesc[${n}]}"
else
echo "${envName[${n}]}"
fi
fi
prev="${envName[${n}]}"
n=${n}+1
done
return 0
}
getV4Address()
{
local -i ret=0
local addr=""
if [[ $# -eq 1 ]]
then
addr=`ifconfig "$1" 2>/dev/null | grep "inet " | sed -e 's/.*inet //' -e 's/ netmask.*$//' -e 's/ --> .*//'`
fi
if [[ "${addr}" != "" ]]
then
echo ${addr}
else
ret=1
fi
return ${ret}
}
getV6Address()
{
local -i ret=0
local addr=""
if [[ $# -eq 1 ]]
then
addr=`ifconfig "$1" 2>/dev/null | grep "inet6 " | grep -v " scopeid " | grep -v "temporary" | grep -v "${localv6pfx}" | sed -e 's/.*inet6 //' -e 's/ prefixlen.*$//'`
fi
if [[ "${addr}" != "" ]]
then
echo ${addr}
else
ret=1
fi
return ${ret}
}
addService()
{
local sType
local sName
local sIface
local -i n=0
sType="${matchType[$1]}"
sName=`echo "$2" | head -n 1 | sed -e 's/^([0-9]*) //'`
if [[ "${matchIface[$1]}" != "" ]]
then
sIface="${matchIface[$1]}"
else
sIface=`echo "$2" | tail -n 1 | sed -e 's/^.* Device: //' -e 's/).*$//'`
fi
while [[ ${n} -lt ${numServices} ]]
do
if [[ "${sIface}" == "${srvIface[${n}]}" ]]
then
return 0
fi
n=${n}+1
done
srvType[${numServices}]="${sType}"
if [[ "${sType}" == "${stVPN}" ]]
then
vpnConfigured=0
fi
srvName[${numServices}]="${sName}"
srvIface[${numServices}]="${sIface}"
srvActive[${numServices}]=1
getV6Address "${srvIface[${numServices}]}" >& /dev/null
if [[ $? -eq 0 ]]
then
srvActive[${numServices}]=0
else
getV4Address "${srvIface[${numServices}]}" >& /dev/null
if [[ $? -eq 0 ]]
then
srvActive[${numServices}]=0
fi
fi
numServices=${numServices}+1
return 0
}
getServices()
{
local tmpf=""
local srvinfo
local -i n=${minService}
local -i i=0
tmpf=`mktemp -q "${tmpTemplate}"`
if [[ "${tmpf}" = "" ]]
then
return 1
fi
networksetup -listnetworkserviceorder 2>/dev/null | grep '^(' > "${tmpf}"
while [[ ${n} -le ${maxService} ]]
do
srvinfo=`sed -e "/^(${n})/,/^(H/!d" < "${tmpf}"`
if [[ "${srvinfo}" != "" ]]
then
i=0
while [[ ${i} -lt ${numMatchEntries} ]]
do
echo "${srvinfo}" | grep -q "Hardware Port: .*${matchString[${i}]}"
if [[ $? -eq 0 ]]
then
addService ${i} "${srvinfo}"
i=${numMatchEntries}
else
i=${i}+1
fi
done
fi
n=${n}+1
done
if [[ -d "${pathAnyConnect}" ]] || \
[[ -d "${pathOpenConnect}" ]]
then
i=${numMatchEntries}-2
srvinfo="${infoAnyConnect}"
addService ${i} "${srvinfo}"
fi
if [[ -d "${pathNordVPN}" ]]
then
i=${numMatchEntries}-2
srvinfo="${infoNordVPN}"
addService ${i} "${srvinfo}"
fi
if [[ -d "${pathNordVPNIKE}" ]]
then
i=${numMatchEntries}-1
srvinfo="${infoNordVPNIKE}"
addService ${i} "${srvinfo}"
fi
if [[ -d "${pathShimo}" ]]
then
i=${numMatchEntries}-3
srvinfo="${infoShimo}"
addService ${i} "${srvinfo}"
fi
rm -f "${tmpf}" >/dev/null 2>&1
return 0
}
prefixMatches()
{
local ip="$1"
local pfx="$2"
local -i ret
echo "${ip}" | grep -q "^${pfx}" >/dev/null 2>&1
ret=$?
return ${ret}
}
checkIPmatches()
{
local ipaddr
local -i ret=1
local -i vpn=$2
local -i n=0
while [[ ${n} -lt ${numIpItems} ]]
do
if [[ $# -gt 0 ]]
then
ipaddr="$1"
else
ipaddr=`${getIPcmd[${n}]}`
fi
if [[ $? -eq 0 ]]
then
if prefixMatches "${ipaddr}" "${ipCheckPrefix[${n}]}"
then
if [[ ${vpn} -eq 0 ]] && \
[[ ${envSrvType[${ipEnvInd[${n}]}]} == ${stVPN} ]]
then
foundMatch=0
ret=0
echo "${envName[${ipEnvInd[${n}]}]}"
n=${numIpItems}
elif [[ ${vpn} -eq 1 ]] && \
[[ ${envSrvType[${ipEnvInd[${n}]}]} != ${stVPN} ]]
then
foundMatch=0
ret=0
echo "${envName[${ipEnvInd[${n}]}]}"
n=${numIpItems}
fi
fi
fi
n=${n}+1
done
return ${ret}
}
checkWiFiMatches()
{
local wifinm
local -i ret=1
local -i n=0
while [[ ${n} -lt ${numWiFiItems} ]]
do
if [[ $# -gt 0 ]]
then
wifinm="$1"
else
wifinm=`${getWIFIcmd[${n}]}`
fi
if [[ $? -eq 0 ]]
then
if [[ "${wifinm}" = "${wifiNwName[${n}]}" ]]
then
foundMatch=0
ret=0
echo "${envName[${wifiEnvInd[${n}]}]}"
n=${numWiFiItems}
fi
fi
n=${n}+1
done
return ${ret}
}
checkVPNActive()
{
local -i n=0
local ipaddr=""
local vpnif=""
for vpnif in `getnwinterfaces vpn -active`
do
ipaddr=`getV6Address "${vpnif}"`
if [[ "${ipaddr}" == "" ]]
then
ipaddr=`getV4Address "${vpnif}"`
fi
if [[ "${ipaddr}" != "" ]]
then
checkIPmatches "${ipaddr}" 0
fi
if [[ ${foundMatch} -eq 0 ]] && [[ ${doAll} -eq 1 ]]
then
return 0
fi
done
return 0
}
checkActive()
{
local -i n=0
local -i done
local ipaddr=""
local nwname=""
if [[ ${foundMatch} -eq 1 ]] || [[ ${doAll} -eq 0 ]]
then
done=1
else
done=0
fi
while [[ ${n} -lt ${numServices} ]] && [[ ${done} -ne 0 ]]
do
if [[ ${srvActive[${n}]} -eq 0 ]]
then
case "${srvType[${n}]}" in
${stEthernet}|${stUSB}|${stBluetooth})
ipaddr=`getV6Address "${srvIface[${n}]}"`
if [[ "${ipaddr}" == "" ]]
then
ipaddr=`getV4Address "${srvIface[${n}]}"`
fi
if [[ "${ipaddr}" != "" ]]
then
checkIPmatches "${ipaddr}" 1
fi
;;
${stWiFi})
nwname=`networksetup -getairportnetwork "${srvIface[${n}]}" 2>/dev/null | grep '^Current Wi-Fi Network:' | sed -e 's/^Current Wi-Fi Network: //'`
if [[ "${nwname}" != "" ]]
then
checkWiFiMatches "${nwname}"
fi
;;
esac
fi
n=${n}+1
if [[ ${foundMatch} -eq 1 ]] || [[ ${doAll} -eq 0 ]]
then
done=1
else
done=0
fi
done
return 0
}
doShow()
{
local -i doAll=1
local -i ret=1
if [[ $# -gt 1 ]]
then
usage
elif [[ $# -eq 1 ]]
then
if [[ "$1" == "-all" ]]
then
doAll=0
else
usage
fi
fi
getServices
if [[ ${vpnConfigured} -eq 0 ]]
then
checkVPNActive
fi
if [[ ${foundMatch} -eq 1 ]] || [[ ${doAll} -eq 0 ]]
then
checkActive
fi
if [[ ${foundMatch} -eq 0 ]]
then
ret=0
fi
if [[ ${ret} -eq 1 ]]
then
if ! getnwinterfaces all -active >& /dev/null
then
echo "${envName[0]}"
ret=0
fi
fi
return ${ret}
}
doList()
{
local -i doVerbose=1
if [[ $# -gt 1 ]]
then
usage
elif [[ $# -eq 1 ]]
then
if [[ "$1" == "-verbose" ]]
then
doVerbose=0
else
usage
fi
fi
if [[ ${doVerbose} -eq 0 ]]
then
listEnvs Y
else
listEnvs
fi
return 0
}
if [[ $# -eq 0 ]]
then
usage
fi
cmd="$1"
shift
case "${cmd}" in
"show")
doShow "$@"
ret=$?
;;
"list")
doList "$@"
ret=$?
;;
"help")
usage Y
;;
*)
usage
;;
esac
exit ${ret}
| true
|
d44af66145042e3db9d2c68f6d44e7e8e4dfc1c8
|
Shell
|
jackyko1991/gadgetron
|
/docker/build_base_images.sh
|
UTF-8
| 1,255
| 2.53125
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
base_dir=$(pwd)
cd ${base_dir}/base/ubuntu_1604
docker build --network=host -t gadgetron/ubuntu_1604_base .
cd ${base_dir}/base/ubuntu_1604_cuda55
docker build --network=host -t gadgetron/ubuntu_1604_cuda55_base .
cd ${base_dir}/base/ubuntu_1604_cuda75
docker build --network=host -t gadgetron/ubuntu_1604_cuda75_base .
cd ${base_dir}/base/ubuntu_1604_cuda80
docker build --network=host -t gadgetron/ubuntu_1604_cuda80_base .
cd ${base_dir}/base/ubuntu_1604_cuda80_cudnn6
docker build --network=host -t gadgetron/ubuntu_1604_cuda80_cudnn6_base .
cd ${base_dir}/base/ubuntu_1604_cuda80_cudnn7
docker build --network=host -t gadgetron/ubuntu_1604_cuda80_cudnn7_base .
cd ${base_dir}/base/ubuntu_1604_cuda90_cudnn7
docker build --network=host -t gadgetron/ubuntu_1604_cuda90_cudnn7_base .
cd ${base_dir}/base/ubuntu_1604_cuda92_cudnn7
docker build --network=host -t gadgetron/ubuntu_1604_cuda92_cudnn7_base .
cd ${base_dir}/base/ubuntu_1804
docker build --network=host -t gadgetron/ubuntu_1804_base .
cd ${base_dir}/base/ubuntu_1804_cuda90
docker build --network=host -t gadgetron/ubuntu_1804_cuda90_cudnn7_base .
cd ${base_dir}/base/ubuntu_1804_cuda92_cudnn7
docker build --network=host -t gadgetron/ubuntu_1804_cuda92_cudnn7_base .
| true
|
4294d63798dda45e0228f1aaa8a405fd5fde190d
|
Shell
|
didekin/comunidad_client_libs
|
/didekinlib_utilities/buildtest.sh
|
UTF-8
| 1,139
| 3.3125
| 3
|
[] |
no_license
|
# It must be executed after 'cdlibs' with './didekinlib_utilities/buildtest.sh environment version'
# Environments: local, master
#!/bin/bash
export FUNCTIONS_FILE=terminal/envfunctions
if ! [ -f ${FUNCTIONS_FILE} ]
then
echo "No environment functions file: $FUNCTIONS_FILE" 1>&2; exit 1
else
. ${FUNCTIONS_FILE}
fi
[ $# -ne 2 ] && { echo "args count should be 2" 1>&2; exit 1;}
setArgsLibs "$1" "$2"
./gradlew didekinlib_utilities:clean
git checkout ${ENV}
if [ ${ENV} = "$LOCAL_ENV" ] ; then
git add .
git commit -m "version $VERSION"
fi
if [ ${ENV} = "$PRO_ENV" ] ; then
git merge "$LOCAL_ENV" -m "version $VERSION"
fi
if [ ${ENV} = "$LOCAL_ENV" ] ; then
./gradlew -Pversionjar=${VERSION_SUFFIX}-${ENV} didekinlib_utilities:build
else
./gradlew -Pversionjar=${VERSION_SUFFIX} didekinlib_utilities:build
fi
echo "================= ¡¡¡ gradle didekinlib_utilities build exit code = $?"
/usr/bin/ssh-add -K
git push ${GITREMOTE} ${ENV}
rm didekinlib_utilities/releases/${ENV}/*
mv didekinlib_utilities/build/libs/*.jar didekinlib_utilities/releases/${ENV}/
git checkout local
| true
|
6f8f08fc75bf6e72db03457c2c11740c9c78dea2
|
Shell
|
aidmax/terraform-gcp-k8s
|
/scripts/get_config.sh
|
UTF-8
| 394
| 2.984375
| 3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
set -e
declare TF_VAR_ssh_user
# Loading environment variables
source .envrc
mkdir -p "$HOME"/.kube_gcp
scp "$TF_VAR_ssh_user"@"$(terraform output k8s-master-ip)":.kube/config "$HOME"/.kube_gcp/config
KUBECONFIG="$HOME"/.kube_gcp/config kubectl config set-cluster kubernetes --server=https://"$(terraform output k8s-master-ip)":6443
export KUBECONFIG="$HOME"/.kube_gcp/config
| true
|
73eaa5111a93304884ce4e2ba264178b2f157d99
|
Shell
|
petronny/aur3-mirror
|
/ooc-gtksourceview-git/PKGBUILD
|
UTF-8
| 931
| 3.046875
| 3
|
[] |
no_license
|
# Contributor: Scott Olson <scott@scott-olson.org>
pkgname=ooc-gtksourceview-git
pkgdesc="ooc lang files for gtksourceview"
url="http://github.com/nddrylliog/ooc-gtksourceview"
license=('LGPL')
arch=('i686' 'x86_64')
pkgver=20100201
pkgrel=1
depends=('gtksourceview2' 'shared-mime-info')
makedepends=('git')
_gitroot='git://github.com/nddrylliog/ooc-gtksourceview.git'
_gitname='ooc-gtksourceview'
_gitbranch='master'
build() {
cd "$srcdir"
msg "Connecting to GIT server...."
if [ -d ${srcdir}/${_gitname} ] ; then
cd $_gitname && git pull origin
msg "The local files are updated."
else
git clone $_gitroot $_gitname
fi
msg "GIT checkout done or server timeout. Preparing sources..."
rm -rf ${srcdir}/${_gitname}-build
cp -r ${srcdir}/${_gitname} ${srcdir}/${_gitname}-build
msg "Starting make..."
cd ${srcdir}/${_gitname}-build
install -Dm755 ooc.lang $pkgdir/usr/share/gtksourceview-2.0/language-specs/ooc.lang
}
| true
|
7696c196d7a1641cada778880d465dbc27125ef1
|
Shell
|
mulriple/devdb
|
/dev/e2dynamo/scripts/start-node
|
UTF-8
| 594
| 3.25
| 3
|
[] |
no_license
|
#! /bin/bash
# Program:
# Start the e2d_node
if [ $# -lt 1 ]
then
echo -e "Please specify the config file path:\n" \
"Usage: startnode config_file"
exit 0
fi
base="`dirname $(which "$0")`/.."
ROOTDIR=`(cd "$base"; echo $PWD)`
MnesiaDir="var/e2dynamo/mnesia"
export E2D_ROOT=$ROOTDIR
export E2D_CONF_PATH=$ROOTDIR/conf
#echo $E2D_CONF_PATH
# mk mnesia dir
if ! mkdir -p "$MnesiaDir" ; then
echo "make mnesia dir error\n"
exit 1
fi
erl +K true \
+A 32 \
+W w \
-pa ./ebin ./conf \
-config $1 \
-mnesia dir '"var/e2dynamo/mnesia"' \
-s e2d
| true
|
427e2d9a617012c90438f8d28997cb77f95ea690
|
Shell
|
sebuaa2020/Team108
|
/scripts/setup.sh
|
UTF-8
| 610
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
if ! [ -d "/opt/ros/kinetic" ]; then
echo "install ros-kinetic"
# install ros kinetic
sudo apt-key adv --keyserver 'hkp://keyserver.ubuntu.com:80' --recv-key C1CF6E31E6BADE8868B172B4F42ED6FBAB17C654
sudo apt-get update
sudo apt-get install ros-kinetic-desktop-full
# environment setup
echo "source /opt/ros/kinetic/setup.bash" >> ~/.bashrc
# install dependencies
sudo apt-get install python-rosdep python-rosinstall python-rosinstall-generator python-wstool build-essential
sudo apt-get install python-rosdep
sudo rosdep init
rosdep update
fi
echo "ros setup finished!"
| true
|
9970087d64d724b0eba5d5a0d9093770f86dda6a
|
Shell
|
fellowlei/myshell
|
/shell/mysql/gen-mysqltest.sh
|
UTF-8
| 767
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
#show databases;
#show tables;
#select * from moneybox;
#create table tb2 as select dt,id,sum(money) from moneybox group by id,dt;
#file name
sqlfile=gen_insert.sql
# gen insert sql
function gen_insert(){
for ((i=1; i<=5; i++))
do
echo "insert into moneybox values('2017-01-0$1 01:00:00','user$1',$100);" >> $sqlfile;
done
}
#clear sqlfile
echo > $sqlfile
#gen create table
echo "use test;" >> $sqlfile
echo "create table moneybox(dt date,id varchar(20),money int);" >> $sqlfile
#gen insert sql
for ((j=1; j<6; j++))
do
gen_insert $j
done
echo "gen $sqlfile success"
mysql -uroot -p -e "source $sqlfile"
#mysql -uroot -p -e "source test.sql"
#mysql -uroot -p < test.sql
echo "import $sqlfile to mysql success"
| true
|
e0c5a7a41cce47cbbba368277e3e625034483e1d
|
Shell
|
luckyplusten/graphanalytics
|
/scripts/xilinx-tigergraph-install/install.sh
|
UTF-8
| 6,406
| 3.5625
| 4
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
#
# Copyright 2021 Xilinx, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Copyright 2020-2021 Xilinx, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
SCRIPT=$(readlink -f $0)
SCRIPTPATH=`dirname $SCRIPT`
function usage() {
echo "Usage: $0 -p product-name [options]"
echo "Required options:"
echo " -p product-name : Product to install: cosinesim, louvainmod. "
echo "Optional options:"
echo " -h : Print this help message"
}
product="none"
while getopts ":p:h" opt
do
case $opt in
p) product=$OPTARG;;
h) usage; exit 1;;
?) echo "ERROR: Unknown option: -$OPTARG"; usage; exit 1;;
esac
done
OSDIST=`lsb_release -i |awk -F: '{print tolower($2)}' | tr -d ' \t'`
OSREL=`lsb_release -r |awk -F: '{print tolower($2)}' |tr -d ' \t' | awk -F. '{print $1*100+$2}'`
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
if [[ $OSDIST == "ubuntu" ]]; then
if (( $OSREL == 1804 )); then
pkg_dir="./ubuntu-18.04"
elif (( $OSREL == 2004 )); then
pkg_dir="./ubuntu-20.04"
else
echo "ERROR: Ubuntu release version must be 18.04 or 20.04."
return 1
fi
elif [[ $OSDIST == "centos" ]]; then
pkg_dir="./centos-7.8"
else
echo "ERROR: only Ubuntu and Centos are supported."
return 1
fi
if [[ $product == "cosinesim" || $product == "louvainmod" ]] ; then
echo "INFO: Installing Xilinx $product product and its dependencies on $OSDIST $OSREL..."
else
echo "ERROR: product-name must be set to cosinesim or louvainmod."
usage
exit 2
fi
if [[ $OSDIST == "ubuntu" ]]; then
# install XRT/XRM/Deployment shell
printf "\n-------------------------------------------------------------\n"
printf "INFO: Install XRT. Enter sudo password if asked."
printf "\n-------------------------------------------------------------\n"
sudo apt install --reinstall $pkg_dir/xrt/xrt*.deb
printf "\n-------------------------------------------------------------\n"
printf "INFO: Install XRM. Enter sudo password if asked."
printf "\n-------------------------------------------------------------\n"
sudo apt install --reinstall $pkg_dir/xrm/xrm*.deb
printf "\n-------------------------------------------------------------\n"
printf "INFO: Install deployment shell. Enter sudo password if asked."
printf "\n-------------------------------------------------------------\n"
sudo apt install $pkg_dir/../deployment-shell/xilinx*.deb
# install required package
sudo apt install jq opencl-headers -y
if [[ $product == "cosinesim" ]]; then
# install required package
sudo apt install jq opencl-headers -y
printf "\n-------------------------------------------------------------\n"
printf "INFO: Install Xilinx CosineSim. Enter sudo password if asked."
printf "\n-------------------------------------------------------------\n"
sudo apt install --reinstall $pkg_dir/cosinesim/xilinx-cosinesim*.deb
printf "\n-------------------------------------------------------------\n"
printf "INFO: Install Xilinx Recommend Engine. Enter sudo password if asked."
printf "\n-------------------------------------------------------------\n"
sudo apt install --reinstall $pkg_dir/cosinesim/xilinx-recomengine*.deb
elif [[ $product == "louvainmod" ]]; then
printf "\n-------------------------------------------------------------\n"
printf "INFO: Install Xilinx LouvainMod. Enter sudo password if asked."
printf "\n-------------------------------------------------------------\n"
sudo apt install --reinstall $pkg_dir/louvainmod/xilinx-louvainmod*.deb
printf "\n-------------------------------------------------------------\n"
printf "INFO: Install Xilinx ComDetect. Enter sudo password if asked."
printf "\n-------------------------------------------------------------\n"
sudo apt install --reinstall $pkg_dir/louvainmod/xilinx-comdetect*.deb
fi
fi
if [[ $OSDIST == "centos" ]]; then
# install XRT/XRM/Deployment shell
printf "\nINFO: Install XRT. \n"
sudo yum install $pkg_dir/xrt/xrt*.rpm
printf "\nINFO: Install XRM. Enter sudo password if asked.\n"
sudo yum install $pkg_dir/xrm/xrm*.rpm
printf "\nINFO: Install deployment shell\n"
sudo yum install $pkg_dir/deployment-shell/xilinx*.rpm
if [[ $product == "cosinesim" ]]; then
# install required package
sudo yum install jq opencl-headers -y
printf "\nINFO: Install Xilinx CosineSim package\n"
sudo yum install $pkg_dir/cosinesim/xilinx-cosinesim*.rpm
printf "\nINFO: Install Xilinx Recommend Engine package\n"
sudo yum install $pkg_dir/recomengine/xilinx-recomengine*.rpm
fi
# only need to run this on CentOS
#copy the standard libstdc++ to $HOME/libstd
mkdir -p $HOME/libstd
cp /usr/lib64/libstdc++.so.6* $HOME/libstd
fi
printf "\nINFO: All packages have been installed. Please run the command below to flash your Alveo card if needed. \n"
printf "Xilinx Alveo U50 card\n"
printf "${YELLOW}sudo /opt/xilinx/xrt/bin/xbmgmt flash --update --shell xilinx_u50_gen3x16_xdma_201920_3${NC}\n"
printf "\nXilinx Alveo U55C card\n"
printf "${YELLOW}sudo /opt/xilinx/xrt/bin/xbmgmt flash --update --shell xilinx_u55c_gen3x16_xdma_base_2${NC}\n"
| true
|
f6a332b5b8531a29cfc13ebfd0c6ac942d3189e0
|
Shell
|
LeesinYii/lnmp-shell
|
/lnmp.sh
|
UTF-8
| 1,102
| 3.53125
| 4
|
[] |
no_license
|
#! /bin/bash
# lnmp 一键安装脚本
php_prefix="cn2.php.net/get/"
php_suffix=".tar.gz/from/this/mirror"
nginx_prefix="https://nginx.org/download/"
nginx_suffix=".tar.gz"
mysql_prefix=""
mysql_suffix=""
# php 版本
php_version=(
"php-5.6.35"
"php-7.0.12"
"php-7.1.13"
"php-7.1.15"
"php-7.2.1"
"php-7.2.5"
"php-7.2.7"
)
# nginx 版本
nginx_version=(
"nginx-1.10.3"
"nginx-1.12.2"
"nginx-1.14.0"
)
# mysql 版本
mysql_version=(
)
# 需要安装的软件
soft=('php' 'nginx' 'mysql')
message=(
"====== 请选择 php 版本 ======"
"====== 请选择 nginx 版本 ======"
"====== 请选择 mysql 版本 ======"
)
# 选择 软件版本
for i in ${soft[*]}; do
version=$soft[$i]"_version"
echo $message[$i]
count=${#version[*]}
for (( i = 0; i < $count; i++ )); do
echo $i"."${version[$i]}
done
read n
url=${soft[$i]}"_prefix"${version[$i]}${soft[$i]}"_suffix"
# 将获取到的 url 存入数组
url_set[$i]=$url
echo "您所选择的版本url:"url"稍后选择下一个软件的版本"
sleep 5s
done
| true
|
f6a7a3757039e86c2db59c2bf2f6e9fe7b458256
|
Shell
|
jumpinchat/jumpinchat-homepage
|
/build/publish-artifact.sh
|
UTF-8
| 875
| 3.234375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
set -e
if [ -z $AWS_ACCESS_KEY_ID ]; then
echo "AWS_ACCESS_KEY_ID not defined" >&2
exit 1
fi
if [ -z $AWS_SECRET_ACCESS_KEY ]; then
echo "AWS_SECRET_ACCESS_KEY not defined" >&2
exit 1
fi
if [ -z $AWS_BUCKET_NAME ]; then
echo "AWS_BUCKET_NAME not defined" >&2
exit 1
fi
if [ -z $REGION ]; then
echo "REGION not defined" >&2
exit 1
fi
if [ ! -d "./jic-homepage" ]; then
mkdir jic-homepage
fi
rsync -av --progress . jic-homepage \
--exclude jic-homepage \
--exclude node_modules \
--exclude src \
--exclude .git
cd jic-homepage
yarn --frozen-lockfile --production
cd ..
tar -zcvf "jic-homepage.tar.gz" jic-homepage
rm -rf jic-homepage
# upload to s3
virtualenv env
. env/bin/activate
pip install boto gevent
python build/lib/upload_to_s3.py --bucket ${AWS_BUCKET_NAME} --region ${REGION} "jic-homepage.tar.gz" "jic-homepage.tar.gz"
| true
|
dc097fd4e4ec6fb10182b960be174b3d20532056
|
Shell
|
brosander/dev-dockerfiles
|
/nifi/ubuntu/start.sh
|
UTF-8
| 1,890
| 3.921875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
ZIP_FILE="$(find /opt/nifi-archive/ -maxdepth 1 -name 'nifi*.zip' | head -n 1)"
if [ -e "/home/nifi/started_once" ]; then
echo "$0 skipping init logic as it has been run before"
else
if [ -e "/opt/nifi/bin/nifi.sh" ]; then
echo "Using nifi installation mounted at /opt/nifi"
elif [ -n "$ZIP_FILE" ]; then
echo "Using nifi archive $ZIP_FILE"
unzip -d ~/ "$ZIP_FILE"
SCRIPT="`find ~/ -name nifi.sh`"
NIFI_DIR="`dirname \"$SCRIPT\"`/.."
cd "$NIFI_DIR" && mv * /opt/nifi/
else
echo "Must mount nifi installation at /opt/nifi"
exit 1
fi
if [ -e "/opt/nifi-conf" ]; then
cd /opt/nifi-conf
cp -r * /opt/nifi/conf/
fi
if [ -n "$1" ]; then
echo "DEPRECATED: use nifi-conf directory to set up zookeeper props, myid file"
if [ -z "$2" ]; then
echo "\$2 should be number of NiFi nodes if \$1 is set"
exit
fi
cd /opt/nifi
mkdir -p state/zookeeper
echo "$1" > state/zookeeper/myid
sed -i 's/^\(server\.[0-9]\+\)/#\1/g' /opt/nifi/conf/zookeeper.properties
echo "" >> /opt/nifi/conf/zookeeper.properties
ZK_STRING="nifi1.nifi:2181"
echo "server.1=nifi1.nifi:2888:3888" >> /opt/nifi/conf/zookeeper.properties
for (( NUM=2; NUM<=$2; NUM++ ))
do
ZK_STRING="$ZK_STRING,nifi$NUM.nifi:2181"
echo "server.$NUM=nifi$NUM.nifi:2888:3888" >> /opt/nifi/conf/zookeeper.properties
done
sed -i "s/^nifi.zookeeper.connect.string=.*$/nifi.zookeeper.connect.string=$ZK_STRING/g" /opt/nifi/conf/nifi.properties
sed -i 's/^nifi.state.management.embedded.zookeeper.start=.*$/nifi.state.management.embedded.zookeeper.start=true/g' /opt/nifi/conf/nifi.properties
sed -i 's/^nifi.cluster.is.node=.*$/nifi.cluster.is.node=true/g' /opt/nifi/conf/nifi.properties
fi
touch "/home/nifi/started_once"
fi
cd /opt/nifi
/opt/nifi/bin/nifi.sh start
tail -f /opt/nifi/logs/*
| true
|
7ccd74a837569586c846993143954722e3030142
|
Shell
|
zzzapzzz/pentest
|
/android/mystrace.sh
|
UTF-8
| 1,458
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/sh
# devnull@libcrack.so
#
# Android strace wrapper
#
test -z "$1" && {
echo -e "\n\tUsage: $0 <app name>\n"
exit 1
}
trap captura INT
captura(){
echo " "
echo "[*] Pulling ${strace_logfile}"
adb shell pull "${strace_logfile}" .
}
app="$1"
pid=
pid_strace=
num=$RANDOM
logdir=.
strace_logfile="/sdcard/strace_${app}.$$.${num}.log"
logcat_logfile="${logdir}/logcat_${app}.$$.${num}.log"
syscalls="open,access,read,write,socket,poll,select,connect,recvfrom,sendto"
ignore="SFPerfTracer|TraceEventNetworkController|dalvikvm|WifiStateMachine\
|WifiP2pService|wpa_supplicant|qdhwcomposer|CydiaSubstrate|AlarmManager|MDMCTBK\
|ConnectivityService|Nat464Xlat|ModemStatsDSDetect|audio_hw_primary|audio_hw_extn\
|msm8974_platfor"
test -d ${logdir} || mkdir -p ${logdir}
pid_strace=$(adb shell ps | grep strace | awk '{print $2}')
test -z "$pid_strace" || {
echo "[*] Killing previous strace process pid=$pid_strace"
adb shell su -c kill -9 "$pid_strace"
}
pid=$(adb shell ps | grep $app | awk '{print $2}')
test -z "$pid" && {
echo "ERROR: cannot get pid of $app"
exit 2
}
echo "[*] Tracing app=${app} pid=${pid}"
echo "[*] logcat log=${logcat_logfile}"
echo "[*] strace log=${strace_logfile}"
adb shell su -c "strace -e $syscalls -p $pid -o $strace_logfile" &
sleep 3
echo "[*] Now execute:"
echo 'adb logcat >> ${logcat_logfile} 2>&1 &'
echo 'tail -f '${logcat_logfile}' | egrep -v "'${ignore}'" | logcat-colorize'
echo
| true
|
1be52027d10efe90bb17bdc619387192f42a5836
|
Shell
|
ericelsken/.dotfiles
|
/.config/bash/bashrc
|
UTF-8
| 1,094
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
# User specific environment
if ! [[ "$PATH" =~ "${HOME}/.local/bin" ]]; then
export PATH="${HOME}/.local/bin:${PATH}"
fi
# Set XDG home directories
export XDG_DATA_HOME="${HOME}/.local/share"
export XDG_STATE_HOME="${HOME}/.local/state"
export XDG_CONFIG_HOME="${HOME}/.config"
export XDG_CACHE_HOME="${HOME}/.cache"
bash_config_dir="${XDG_CONFIG_HOME}/bash"
bashrc_dir="${bash_config_dir}/bashrc.d"
source "${bash_config_dir}/history"
source "${bash_config_dir}/prompt"
source "${bash_config_dir}/alias"
# User specific bashrc.d
if [[ -d ${bashrc_dir} ]]; then
for rc in ${bashrc_dir}/* ; do
if [[ -f "$rc" ]]; then
source "$rc"
fi
done
unset rc
fi
# Host specific bashrc.d
bashrc_dir_self="${bash_config_dir}/self.d"
if [[ -d ${bashrc_dir_self} ]]; then
for rc in ${bashrc_dir_self}/* ; do
if [[ -f "$rc" ]]; then
source "$rc"
fi
done
unset rc
fi
unset bash_config_dir
unset bashrc_dir
unset bashrc_dir_self
| true
|
d4952845e5f96e1305e47e73b70c2bec391efdc6
|
Shell
|
josefnorlin/ssn-generator
|
/bash/ssn.sh
|
UTF-8
| 1,011
| 3.75
| 4
|
[] |
no_license
|
#!/bin/sh
# Returns Luhn checksum for supplied sequence
josefs_luhn_checksum() {
checksum=0;
multiples=212121212;
for (( i = 0; i < ${#1}; ++i )); do
base=${1:$i:1};
multiple=${multiples:$i:1};
multiplied=$(( $base * $multiple ));
if [[ ${#multiplied} > 1 ]]
then
checksum=$(( $checksum+${multiplied:0:1}+${multiplied:1:1} ))
else
checksum=$(( $checksum+$multiplied ))
fi
done
luhn1="$(($checksum % 10))" # mod 10 the sum to get single digit checksum
luhn2=$(( 10 - $luhn1 ))
luhn3=$(( $luhn2 % 10 ))
echo "$luhn3"
}
YEAR=$(date +'%Y')
AGE=$(jot -r 1 18 99);
YYYY=$(expr $YEAR - $AGE);
M=$(jot -r 1 1 12);
MM=$(printf %02d $M);
D=$(jot -r 1 1 28);
DD=$(printf %02d $D);
XXX=$(jot -r 1 100 999);
YYYYMMDDXXX=$(printf $YYYY$MM$DD$XXX);
YYMMDDXXX=$(printf ${YYYY: -2}$MM$DD$XXX);
X=$(josefs_luhn_checksum $YYMMDDXXX);
echo $YYYY$MM$DD$XXX$X | pbcopy; echo $YYYY$MM$DD$XXX$X;
| true
|
acf3f3bad51cb10a33dc50ebdec125200f9b658b
|
Shell
|
wb8wka/ESP8266Lib
|
/examples/eyal/app_v3/utils/lcall.sh
|
UTF-8
| 816
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
echod() {
echo "`date '+%F %T'` $me: $@"
}
die() {
echod "$@"
exit 1
}
compile() {
p="$1"
# echo "compiling '$p'"
luac.cross $ccflags -o "$p".{lc,lua} || {
echod "compile of '$p' failed $?"
rc='true'
}
}
me="`basename $0 .sh`"
ccflags='-s'
#ccflags=''
rc='false'
for f in *.lua ; do
test -f "$f" || {
echo "no '$f'"
continue
}
# pgm="`basename "$f" .lua" # remove '.lua' suffix
pgm="${f:0:0-4}" # remove '.lua' suffix
case "$pgm" in
i|init|compile)
continue
;;
main)
sed "s|#VERSION#|`date '+%F %T'` `hostname -s`:`pwd`|" "$f" >"ver_$f"
# DOS format: sed "s|#VERSION#|`date '+%Y%m%d%H%M%S'` `hostname -s`:`pwd`|" "$f" >"ver_$f"
compile "ver_$pgm"
rm "ver_$f"
mv {ver_,}"$pgm.lc"
continue
;;
esac
compile "$pgm"
done
$rc && die "some compiles failed"
exit 0
| true
|
3f653c6914b063f44852b75639110e5a4fda8010
|
Shell
|
blackout314/timelessis
|
/deploy.sh
|
UTF-8
| 1,398
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/sh
# Scripts to deploy application to staging server
# @todo #45:30min Continue implementing deploy scripts. Find out user
# and domain of the staging server and replace scp and ssh with correct data.
# Add scripts in cron (like the one created in #47). Verify the web
# application is running (execute couple fo curl requests).
echo "-- Run tests"
pytest
echo "-- Creating staging tag"
git tag -a staging-$tag -m "Rultor deploy staging-$tag"
echo "-- Copy application code to staging server"
scp -r . user@staging-server:/app
# add scripts in cron (like the one created in #47)
# verify the webapplication is running
ssh user@staging-server << EOF
echo "-- Creating database user: timless_user"
sudo -u postgres psql -c "CREATE USER timeless_user WITH
SUPERUSER
CREATEDB
CREATEROLE
INHERIT
LOGIN
ENCRYPTED PASSWORD 'timeless_pwd';"
echo "-- Creating database: timelessdb_dev"
sudo -u postgres psql -c "CREATE DATABASE timelessdb_dev;"
echo "-- Creating database: timelessdb_test"
sudo -u postgres psql -c "CREATE DATABASE timelessdb_test;"
echo "-- REPLACE: add scripts to cron"
cd /app
echo "-- Running database migrations"
python manage.py db upgrade
echo "-- Running web application server"
export FLASK_APP=main.py
export FLASK_ENV=development
flask run &
echo "-- REPLACE: verify web application is running ok"
EOF
| true
|
86e7856de32d58ef38ae71685f8997a05f62804b
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/supybot-git/PKGBUILD
|
UTF-8
| 1,038
| 2.5625
| 3
|
[] |
no_license
|
#Contributor: LinkMaster03 linkmaster032000 at gmail dot com
#Fixed by Cravix ( dr dot neemous at gmail dot com )
pkgname=supybot-git
_pkgname=supybot
pkgver=0.83.4.1.7089.6f925e5
pkgrel=1
pkgdesc="A cross-platform IRC bot written in Python"
arch=('i686' 'x86_64')
url="http://sourceforge.net/projects/supybot/"
license=('3-clause BSD')
depends=('python2>=2.5' 'python2-pysqlite-legacy' 'python2-twisted')
optdepends=('python2-pyopenssl: SSL server support')
conflicts=('supybot')
provides=('supybot')
makedepends=('git')
source=("git://$_pkgname.git.sourceforge.net/gitroot/$_pkgname/$_pkgname")
md5sums=('SKIP')
pkgver() {
cd "$srcdir/$_pkgname"
echo $(awk -F"['+]" '/version/ {print $2} ' src/version.py | grep -o "^[^-]*").$(git rev-list --count HEAD).$(git rev-parse --short HEAD)
}
build() {
cd "$srcdir/$_pkgname"
python2 setup.py build
}
package() {
cd "$srcdir/$_pkgname"
python2 setup.py install --root="$pkgdir"
mkdir -p "$pkgdir/usr/share/licenses/$_pkgname/"
install -m644 LICENSE "$pkgdir/usr/share/licenses/$_pkgname/" || return 1
}
| true
|
62c4385f89fa3d6f069b3e730f4bb1eec493e8ce
|
Shell
|
HuntsmanCancerInstitute/Workflows
|
/Hg38RunnerWorkflows2/Auto/Caris/carisAutoProcessing.sh
|
UTF-8
| 630
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
# 18 May 2022
# David.Nix@hci.utah.edu
# Huntsman Cancer Institute
# Runs a snakemake workflow to process Caris datasets for translational genomics
# Execute this on redwood and in the /scratch/general/pe-nfs1/u0028003/Caris directory
set -e; start=$(date +'%s')
echo -e "\n---------- Starting -------- $((($(date +'%s') - $start)/60)) min"
# load snakemake
module load snakemake/6.4.1
# make a work dir and change into it
#wd="CarisRun_"$(date +'%m_%d_%Y'); mkdir $wd; cd $wd
snakemake -p --cores all --snakefile carisAutoProcessing.sm
echo -e "\n---------- Complete! -------- $((($(date +'%s') - $start)/60)) min total"
| true
|
2dcf0edee597812783831f1058ed7a2e5e544523
|
Shell
|
JScott/minimal-ci
|
/ansible-role/templates/etc/init.d/mci-worker
|
UTF-8
| 453
| 3.640625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
WORKER='{{ mci_worker_script }}'
WORKER_LOG='{{ mci_worker_log_path }}'
WORKER_PID='{{ mci_worker_pidfile_path }}'
start() {
sudo su {{ mci_user }} -c "ruby ${WORKER} 2>&1 >> ${WORKER_LOG}"
}
stop() {
kill -9 `cat ${WORKER_PID}`
rm ${WORKER_PID}
}
case "$1" in
start)
start
;;
stop)
stop
;;
restart)
stop
start
;;
*)
echo "Usage: $0 {start|stop|restart}"
exit 1
;;
esac
exit $?
| true
|
15a9cd65e068e3ad1dd4363d22bd49c7ad02fad2
|
Shell
|
zheplusplus/stekin
|
/sample-test.sh
|
UTF-8
| 518
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
verify() {
if ./stkn.sh -cm samples/$1.stkn tmp.out && ./tmp.out | diff samples/$1.expected - ;
then
echo $1 "pass."
else
echo $1 "FAILED!"
fi
}
if [ $# == 1 ];
then
verify $1
exit
fi
echo "sample-test:"
verify empty
verify write
verify latency-ref
verify fib
verify nest-func
verify return-void
verify pair
verify sqrt
verify find-root
verify fixed-point
verify vector-multi
verify big-literals
verify ifte
verify basic-list
verify return-list
verify list-pipe
| true
|
ac466ef624685e84c6cbe6f279447b940c7a5423
|
Shell
|
nulldriver/cf-cli-resource
|
/spec/services/ups_with_credentials_string_spec.sh
|
UTF-8
| 3,034
| 3.0625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env shellspec
set -euo pipefail
Describe 'services'
setup() {
org=$(generate_test_name_with_spaces)
space=$(generate_test_name_with_spaces)
app_name=$(generate_test_name_with_hyphens)
service_instance=$(generate_test_name_with_spaces)
credentials='{"username":"admin","password":"pa55woRD"}'
updated_credentials='{"username":"admin","password":"pa$$woRD"}'
source=$(get_source_config "$org" "$space") || error_and_exit "[ERROR] error loading source json config"
test::login
test::create_org_and_space "$org" "$space"
}
teardown() {
test::delete_org_and_space "$org" "$space"
test::logout
}
BeforeAll 'setup'
AfterAll 'teardown'
It 'can create ups with credentials string'
cups_with_credentials_string() {
local config=$(
%text:expand
#|$source
#|params:
#| command: create-user-provided-service
#| service_instance: $service_instance
#| credentials: $credentials
)
put "$config"
}
When call cups_with_credentials_string
The status should be success
The output should json '.version | keys == ["timestamp"]'
The error should include "Creating user provided service"
Assert test::service_exists "$service_instance" "$org" "$space"
End
It 'can push an app with bound service'
push_app() {
local fixture=$(load_fixture "static-app")
local config=$(
%text:expand
#|$source
#|params:
#| command: push
#| path: $fixture/dist
#| no_start: true
#| manifest:
#| applications:
#| - name: $app_name
#| memory: 64M
#| disk_quota: 64M
#| services:
#| - $service_instance
)
put "$config"
}
When call push_app
The status should be success
The output should json '.version | keys == ["timestamp"]'
The error should include "Pushing"
Assert [ "$(echo "$credentials" | jq --sort-keys)" == "$(test::get_user_provided_vcap_service "$app_name" "$service_instance" "$org" "$space" | jq --sort-keys .credentials)" ]
End
It 'can update ups with credentials string'
uups_with_credentials_string() {
local config=$(
%text:expand
#|$source
#|params:
#| command: create-user-provided-service
#| service_instance: $service_instance
#| credentials: $updated_credentials
)
put "$config"
}
When call uups_with_credentials_string
The status should be success
The output should json '.version | keys == ["timestamp"]'
The error should include "Updating user provided service"
Assert test::service_exists "$service_instance" "$org" "$space"
Assert [ "$(echo "$updated_credentials" | jq --sort-keys)" == "$(test::get_user_provided_vcap_service "$app_name" "$service_instance" "$org" "$space" | jq --sort-keys .credentials)" ]
End
End
| true
|
50bda93fe09479bdecccdad96cf0e321e0eb4a94
|
Shell
|
Azure/azure-quickstart-templates
|
/demos/application-gateway-logviewer-goaccess/scripts/setup_vm.sh
|
UTF-8
| 3,350
| 3.140625
| 3
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#The MIT License (MIT)
#Copyright (c) Microsoft Corporation. All rights reserved.
#Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the Software), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# Add Microsoft Repo
wget -q https://packages.microsoft.com/config/ubuntu/18.04/packages-microsoft-prod.deb
sudo dpkg -i packages-microsoft-prod.deb
# Add Official GoAccess Repository
echo "deb http://deb.goaccess.io/ $(lsb_release -cs) main" | sudo tee -a /etc/apt/sources.list.d/goaccess.list
wget -O - https://deb.goaccess.io/gnugpg.key | sudo apt-key add -
apt-get -y install apt-transport-https
# Update Packages
apt-get -y update
#Install Libcurl3, unzip
apt-get -y install libcurl3 unzip
#Install .Net Core current version
apt-get -y install aspnetcore-runtime-2.1
# Install Log Processor Application
systemctl stop appgatewaylogprocessor
systemctl stop goaccess
mkdir -p /var/log/azure/Microsoft.Azure.Networking.ApplicationGateway.LogProcessor
touch /var/log/azure/Microsoft.Azure.Networking.ApplicationGateway.LogProcessor/access.log
mkdir -p /usr/share/appgatewaylogprocessor
unzip -o AppGatewayLogProcessor.zip -d /usr/share/appgatewaylogprocessor/
sh /usr/share/appgatewaylogprocessor/files/scripts/setup_application.sh
# Setup the Template Params
echo $1 >> /usr/share/appgatewaylogprocessor/blobsasuri.key
chmod 644 /usr/share/appgatewaylogprocessor/blobsasuri.key
echo $2 >> /usr/share/appgatewaylogprocessor/appgwlogsbloburlregex
chmod 644 /usr/share/appgatewaylogprocessor/appgwlogsbloburlregex
# Install the Application Gateway Log Processor & GoAccess Service
cp /usr/share/appgatewaylogprocessor/files/appgatewaylogprocessor.service /etc/systemd/system/appgatewaylogprocessor.service
cp /usr/share/appgatewaylogprocessor/files/goaccess.service /etc/systemd/system/goaccess.service
systemctl daemon-reload
sudo systemctl enable appgatewaylogprocessor.service
sudo systemctl enable goaccess.service
# Start the Application Gateway Log Processor
systemctl start appgatewaylogprocessor
# Install Apache2 and GoAccess
apt-get -y install libncursesw5-dev gcc make libgeoip-dev libtokyocabinet-dev build-essential
apt-get -y install apache2
wget -q -O goaccess-1.2.tar.gz https://tar.goaccess.io/goaccess-1.2.tar.gz
tar -xzvf goaccess-1.2.tar.gz
cd goaccess-1.2/
./configure --enable-utf8 --enable-geoip=legacy
make
make install
# restart Apache
apachectl restart
# Start GoAccess
systemctl start goaccess
| true
|
a686697f242f28f9b358393af56944b1a729dcf9
|
Shell
|
czxxjtu/wxPython-1
|
/tags/wxPy-2.8.4.2/distrib/msw/makedist.sh
|
UTF-8
| 7,650
| 3.671875
| 4
|
[] |
no_license
|
#! /bin/bash
# makedist.sh
#
# Build wxWidgets 2 for Windows distribution.
# This builds all required binaries and documents before calling
# zipdist.sh to make the archives.
#
# To use this script, you need:
#
# - CygWin installation, for bash etc.
# - VC++ 6 or higher, to compile the binaries
# - WinHelp compiler, HTML Help compiler, Tex2RTF on your path
# - WISE Install 5
# - Word 97 (not tested with higher versions)
# - Adobe Acrobat & Distiller
#
# Before running this script, you will need to:
#
# - update the readmes, change log, manual version etc.
# - update version.h
# - update distrib/msw/wisetop.txt, wisebott.txt with the correct version
# number, plus any hard-wired wxWidgets paths
# - test on a variety of compilers
#
# TODO:
#
# - generation of PDF (only PDF RTF generated so far)
# - perhaps prompt the user to read the important release docs,
# version.h, setup.h
#
# Julian Smart, October 2000
SRC=`cygpath -u $WXWIN`
DEST=$SRC/deliver
TMPDIR=`cygpath -u $TEMP`
OK=1
DOWISE=0
DOPDF=0
DOALL=1
DOCSONLY=0
WXWINONLY=0
WISEONLY=0
BINONLY=0
PDFONLY=0
# For some reason, if we pipe output to egrep, we see output, but not otherwise.
WARNINGS=": decorated name|: see reference|: see declaration|C4786|VC98\\\\INCLUDE|template<>"
setup_vars() {
VCPATH="/c/Program Files/Microsoft Visual Studio/common/msdev98/bin:/c/Program Files/Microsoft Visual Studio/VC98/bin:DevStudio/VC/bin:/c/Program Files/Microsoft Visual Studio/common/tools:/c/Program Files/HTML Help Workshop"
INCLUDE="C:\Program Files\Microsoft Visual Studio\VC98\ATL\INCLUDE;C:\Program Files\Microsoft Visual Studio\VC98\INCLUDE;C:\Program Files\Microsoft Visual Studio\VC98\MFC\INCLUDE;C:\Program Files\Tcl\include;C:\Program Files\HTML Help Workshop\include"
LIB="C:\Program Files\Microsoft Visual Studio\VC98\lib;C:\Program Files\Microsoft Visual Studio\VC98\MFC\lib;C:\Program Files\Tcl\lib;C:\Program Files\HTML Help Workshop\lib"
TCLHOME=C:/PROGRA~1/Tcl export TCLHOME
PATH="$PATH:$VCPATH" export PATH
export INCLUDE LIB
WORDEXE="/c/Program Files/Microsoft Office/Office/WINWORD.EXE"
}
check_compile() {
egrep ": error C|fatal error" $TMPDIR/buildlog.txt > $TMPDIR/errorlog.txt
if [ -s $TMPDIR/errorlog.txt ]; then
echo Did not build $0 successfully.
OK=0
fi
}
check_files() {
if [ ! -d "$SRC" ]; then
echo "$SRC" does not exist.
OK=0
fi
if [ ! -d "$SRC/deliver" ]; then
mkdir "$SRC/deliver"
fi
if [ ! -e $SRC/include/wx/msw/setup.h ]; then
cp "$SRC/include/wx/msw/setup0.h" "$SRC/include/wx/msw/setup.h"
echo setup0.h has been copied to setup.h.
echo You must now edit this file to restore release settings,
echo then run this script again.
OK=0
notepad.exe "$SRC/include/wx/msw/setup.h"
fi
if [ ! -d "$SRC/bin" ]; then
mkdir "$SRC/bin"
fi
if [ ! -e "$SRC/bin/DBGVIEW.EXE" ]; then
echo Please put DBGVIEW.EXE, DBGVIEW.CNT, DBGVIEW.HLP into $SRC/bin
echo and run the script again.
OK=0
fi
}
build_docs() {
cd "$SRC/src/msw"
echo "---------------------------------"
echo "Building wxWidgets documents"
nmake -f makefile.vc cleandocs docs
cd "$SRC/utils/tex2rtf/src"
nmake -f makefile.vc html htmlhelp htb hlp pdfrtf
cd "$SRC/contrib/src/ogl"
nmake -f makefile.vc html htmlhelp htb hlp pdfrtf
cd "$SRC/contrib/src/mmedia"
nmake -f makefile.vc html htmlhelp htb hlp pdfrtf
}
# TODO: Make PDF via Word, if Word and Adobe Acrobat are present.
# This has to be interactive at present.
build_pdf() {
echo "---------------------------------"
echo "Building wxWidgets PDF documents"
if [ -e "$WORDEXE" ]; then
"$WORDEXE" "$WXWIN\\docs\\pdf\\wx.rtf"
"$WORDEXE" "$WXWIN\\docs\\pdf\\tex2rtf.rtf"
"$WORDEXE" "$WXWIN\\contrib\\docs\\pdf\\ogl.rtf"
"$WORDEXE" "$WXWIN\\contrib\\docs\\mmedia\\ogl.rtf"
else
echo MS Word not present. Not doing PDF build.
fi
}
# Build wxWidgets
build_wxwin_vc() {
echo "---------------------------------"
echo "Building wxWidgets using VC++"
cd "$SRC/src"
echo Building wxWidgets Release library in `pwd`
echo Command: msdev wxvc.dsw /useenv /make "wxvc - Win32 Release" /rebuild
msdev wxvc.dsw /useenv /make "wxvc - Win32 Release" /rebuild | egrep -v "$WARNINGS"
}
build_tex2rtf() {
echo "---------------------------------"
echo "Building Tex2RTF using VC++"
cd "$SRC/utils/tex2rtf/src"
msdev Tex2RTFVC.dsw /useenv /make "Tex2RTFVC - Win32 Release" /rebuild | egrep -v "$WARNINGS" | tee $TMPDIR/buildlog.txt
check_compile "Tex2RTF"
}
build_life() {
echo "---------------------------------"
echo "Building Life! using VC++"
cd "$SRC/demos/life"
msdev LifeVC.dsw /useenv /make "LifeVC - Win32 Release" /rebuild | egrep -v "$WARNINGS" | tee $TMPDIR/buildlog.txt
check_compile "Life! Demo"
}
build_executables() {
build_tex2rtf
build_life
}
copy_files() {
cp "$SRC/utils/tex2rtf/src/Release/tex2rtf.exe" "$SRC/bin"
cp "$SRC/docs/winhelp/tex2rtf.hlp" "$SRC/docs/winhelp/tex2rtf.cnt" "$SRC/bin"
cp "$SRC/demos/life/Release/life.exe" "$SRC/demos/life/breeder.lif" "$SRC/bin"
}
# Process command line options.
for i in "$@"; do
case "$i" in
--wise) DOWISE=1 ;;
--pdf) DOPDF=1 ;;
--wise-only)
WISEONLY=1
DOWISE=1
DOALL=0
;;
--docs-only)
DOCSONLY=1
DOALL=0
;;
--bin-only)
BINONLY=1
DOALL=0
;;
--wxwin-only)
WXWINONLY=1
DOALL=0
;;
--pdf-only)
PDFONLY=1
DOPDF=1
DOALL=0
;;
*)
echo Usage: $0 "[ options ]"
echo Generates documentation and binaries for creating a distribution,
echo and optionally generates the zip/setup.exe distribution by
echo calling zipdist.sh.
echo
echo Options:
echo " --help Display this help message"
echo " --wise Additonally, build zips and setup.exe"
echo " --pdf Additionally, try to generate PDF"
echo " --wise-only Only do zip/setup phase"
echo " --wxwin-only Only do wxWin lib building phase"
echo " --docs-only Only do docs building phase"
echo " --pdf-only Only do PDF building phase"
echo " --bin-only Only do .exe building phase"
exit 1
;;
esac
done
mkdir -p $SRC/docs/pdf
mkdir -p $SRC/docs/html
mkdir -p $SRC/docs/htmlhelp
mkdir -p $SRC/docs/htb
mkdir -p $SRC/docs/winhelp
mkdir -p $SRC/contrib/docs/pdf
mkdir -p $SRC/contrib/docs/html
mkdir -p $SRC/contrib/docs/htmlhelp
mkdir -p $SRC/contrib/docs/htb
mkdir -p $SRC/contrib/docs/winhelp
setup_vars
check_files
if [ "$OK" = "1" ]; then
if [ "$DOCSONLY" = "1" ] || [ "$DOALL" = "1" ]; then
build_docs
fi
fi
if [ "$OK" = "1" ] && [ "$DOPDF" = "1" ]; then
if [ "$PDFONLY" = "1" ] || [ "$DOALL" = "1" ]; then
build_pdf
fi
fi
if [ "$OK" = "1" ]; then
if [ "$WXWINONLY" = "1" ] || [ "$DOALL" = "1" ]; then
build_wxwin_vc
fi
fi
if [ "$OK" = "1" ]; then
if [ "$BINONLY" = "1" ] || [ "$DOALL" = "1" ]; then
build_executables
fi
fi
if [ "$OK" = "1" ]; then
copy_files
fi
if [ "$OK" = "1" ] && [ "$DOWISE" = "1" ]; then
if [ "$WISEONLY" = "1" ] || [ "$DOALL" = "1" ]; then
$SRC/distrib/msw/zipdist.sh --wise
fi
fi
if [ "$OK" = "1" ]; then
echo Finished successfully.
else
echo Finished unsuccessfully. There were errors.
fi
echo Press return to continue.
read dummy
| true
|
f23415513ad80543edf483ef6a530841ce660ecd
|
Shell
|
Agilysbe/git-version
|
/version.sh
|
UTF-8
| 950
| 3.5625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
get_next_version () {
GITDESCRIPTION=$2
GITDESCRIPTION=${GITDESCRIPTION,,}
CURRENTTAG=$1
MAJOR=$(echo "$CURRENTTAG" | cut -d'.' -f 1)
MINOR=$(echo "$CURRENTTAG" | cut -d'.' -f 2)
PATCH=$(echo "$CURRENTTAG" | cut -d'.' -f 3)
if [[ $GITDESCRIPTION = *"patch"* || $GITDESCRIPTION = *":fire:"* || $GITDESCRIPTION = *":bug:"* || $GITDESCRIPTION = *":poop:"* ]]; then
PATCH=$(($PATCH + 1))
echo "$MAJOR.$MINOR.$PATCH"
else
if [[ $GITDESCRIPTION = *"minor"* || $GITDESCRIPTION = *":tada:"* ]]; then
MINOR=$(($MINOR + 1))
echo "$MAJOR.$MINOR.0"
else
if [[ $GITDESCRIPTION = *"major"* || $GITDESCRIPTION = *":boom:"* ]]; then
MAJOR=$(($MAJOR + 1))
echo "$MAJOR.0.0"
else
echo "error"
exit 1
fi
fi
fi
}
"$@"
| true
|
ffdcd77aef5478e31410419b3a5c5fa1242fa94d
|
Shell
|
nlabinebouchard/B32-BashExamples
|
/01-hello_world.sh
|
UTF-8
| 453
| 3.28125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Ceci est un commentaire
echo "Hello world"
#-n empeche les retour de ligne
echo -n "Vive "
echo "Linux"
#-e pour d'utiliser les caractere speciau comme le \n
echo -e "123\n123\n123"
#crée une variable et fait son echo ( -p permet de mettre un message)
#met dekoi dans une variable pas de signe de $ mais quand on veut la valeur faut mettre le signe de $
read -p "Vous avez quel âge?" age
echo $age "ans! Vous paraissez plus jeune"
| true
|
f77df0eeb73e77c35017f8c78a7a422c486906f2
|
Shell
|
kblauer/arch-vm-install
|
/alvin
|
UTF-8
| 18,087
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash
# This is a script to install ArchLinux quickly on a machine, specifically in my case a VMware Workstation VM.
# Created by Kyle Blauer
# Credit to github.com/helmuthdu for scripts.
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
checklist=( 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 )
UEFI=0
[[ $1 == -v || $1 == --verbose ]] && VERBOSE_MODE=1 || VERBOSE_MODE=0 # VERBOSE MODE
AUTOMATIC_MODE=0
TRIM=0
# COLORS
Bold=$(tput bold)
Underline=$(tput sgr 0 1)
Reset=$(tput sgr0)
# Regular Colors
Red=$(tput setaf 1)
Green=$(tput setaf 2)
Yellow=$(tput setaf 3)
Blue=$(tput setaf 4)
Purple=$(tput setaf 5)
Cyan=$(tput setaf 6)
White=$(tput setaf 7)
# Bold
BRed=${Bold}$(tput setaf 1)
BGreen=${Bold}$(tput setaf 2)
BYellow=${Bold}$(tput setaf 3)
BBlue=${Bold}$(tput setaf 4)
BPurple=${Bold}$(tput setaf 5)
BCyan=${Bold}$(tput setaf 6)
BWhite=${Bold}$(tput setaf 7)
# PROMPT
prompt1="Enter your option: "
prompt2="Enter n° of options (ex: 1 2 3 or 1-3): "
# EDITOR
if [[ -f /usr/bin/vim ]]; then
EDITOR="vim"
elif [[ -z $EDITOR ]]; then
EDITOR="nano"
fi
ROOT_MOUNTPOINT="/dev/sda1"
MOUNTPOINT="/mnt"
select_keymap(){
print_title "KEYMAP - https://wiki.archlinux.org/index.php/KEYMAP"
print_info "The KEYMAP variable is specified in the /etc/rc.conf file. It defines what keymap the keyboard is in the virtual consoles. Keytable files are provided by the kbd package."
echo "US qwerty default is: us-qwerty"
read -p "Keymap: " KEYMAP
loadkeys $KEYMAP
}
select_editor(){
print_title "DEFAULT EDITOR"
editors_list=("nano" "vi" "vim");
PS3="$prompt1"
echo -e "Select editor\n"
select EDITOR in "${editors_list[@]}"; do
if contains_element "$EDITOR" "${editors_list[@]}"; then
package_install "$EDITOR"
break
else
invalid_option
fi
done
}
configure_mirrorlist(){
local countries_code=("AU" "AT" "BY" "BE" "BR" "BG" "CA" "CL" "CN" "CO" "CZ" "DK" "EE" "FI" "FR" "DE" "GR" "HU" "IN" "IE" "IL" "IT" "JP" "KZ" "KR" "LV" "LU" "MK" "NL" "NC" "NZ" "NO" "PL" "PT" "RO" "RU" "RS" "SG" "SK" "ZA" "ES" "LK" "SE" "CH" "TW" "TR" "UA" "GB" "US" "UZ" "VN")
local countries_name=("Australia" "Austria" "Belarus" "Belgium" "Brazil" "Bulgaria" "Canada" "Chile" "China" "Colombia" "Czech Republic" "Denmark" "Estonia" "Finland" "France" "Germany" "Greece" "Hungary" "India" "Ireland" "Israel" "Italy" "Japan" "Kazakhstan" "Korea" "Latvia" "Luxembourg" "Macedonia" "Netherlands" "New Caledonia" "New Zealand" "Norway" "Poland" "Portugal" "Romania" "Russian" "Serbia" "Singapore" "Slovakia" "South Africa" "Spain" "Sri Lanka" "Sweden" "Switzerland" "Taiwan" "Turkey" "Ukraine" "United Kingdom" "United States" "Uzbekistan" "Viet Nam")
country_list(){
#`reflector --list-countries | sed 's/[0-9]//g' | sed 's/^/"/g' | sed 's/,.*//g' | sed 's/ *$//g' | sed 's/$/"/g' | sed -e :a -e '$!N; s/\n/ /; ta'`
PS3="$prompt1"
echo "Select your country:"
select country_name in "${countries_name[@]}"; do
if contains_element "$country_name" "${countries_name[@]}"; then
country_code=${countries_code[$(( $REPLY - 1 ))]}
break
else
invalid_option
fi
done
}
print_title "MIRRORLIST - https://wiki.archlinux.org/index.php/Mirrors"
print_info "This option is a guide to selecting and configuring your mirrors, and a listing of current available mirrors."
OPTION=n
while [[ $OPTION != y ]]; do
country_list
read_input_text "Confirm country: $country_name"
done
url="https://www.archlinux.org/mirrorlist/?country=${country_code}"
tmpfile=$(mktemp --suffix=-mirrorlist)
# Get latest mirror list and save to tmpfile
curl -so ${tmpfile} ${url}
sed -i 's/^#Server/Server/g' ${tmpfile}
# Backup and replace current mirrorlist file (if new file is non-zero)
if [[ -s ${tmpfile} ]]; then
{ echo " Backing up the original mirrorlist..."
mv -i /etc/pacman.d/mirrorlist /etc/pacman.d/mirrorlist.orig; } &&
{ echo " Rotating the new list into place..."
mv -i ${tmpfile} /etc/pacman.d/mirrorlist; }
else
echo " Unable to update, could not download list."
fi
# better repo should go first
echo ""
echo "Testing mirrorlist speed - this will take a minute..."
cp /etc/pacman.d/mirrorlist /etc/pacman.d/mirrorlist.tmp
rankmirrors -n 8 /etc/pacman.d/mirrorlist.tmp > /etc/pacman.d/mirrorlist
rm /etc/pacman.d/mirrorlist.tmp
# allow global read access (required for non-root yaourt execution)
chmod +r /etc/pacman.d/mirrorlist
echo "Mirrorlist Updated."
}
partition_simple() {
print_title "https://wiki.archlinux.org/index.php/Partitioning"
print_info "Partitioning a hard drive allows one to logically divide the available space into sections that can be accessed independently of one another."
print_warning "This will delete the ENTIRE disk! USE AT YOUR OWN RISK! Press Ctrl+C to quit"
pause_function
echo "Creating partition..."
echo ""
parted /dev/sda --script -- mklabel msdos
parted /dev/sda --script -- mkpart primary ext4 0% 100%
parted /dev/sda --script -- set 1 boot on
echo ""
echo "Formatting drive ext4..."
echo ""
mkfs.ext4 /dev/sda1
echo "Mounting drive..."
echo ""
mount /dev/sda1 /mnt
echo "DONE! Drive was created with one ext4 partition, then mounted to /mnt!"
pause_function
}
install_base_system(){
print_title "INSTALL BASE SYSTEM"
print_info "Using the pacstrap script we install the base system. The base-devel package group will be installed also."
echo "Installing to ${MOUNTPOINT}..."
# Some of the extra packages here causing pacstrap to fail, not sure why. Going to install later
pacstrap ${MOUNTPOINT} base base-devel # btrfs-progs f2fs-tools ntp NetworkManager wget sudo
[[ $? -ne 0 ]] && error_msg "Installing base system to ${MOUNTPOINT} failed. Check error messages above."
}
install_extra_pkg() {
arch_chroot_ext "pacman -S --noconfirm btrfs-progs f2fs-tools ntp networkmanager wget vim"
# enable NetworkManager on the install
echo "Enabling NetworkManager..."
arch_chroot_ext "systemctl enable NetworkManager.service"
echo "Base System Installed!"
pause_function
}
configure_keymap(){
#ADD KEYMAP TO THE NEW SETUP
echo "Configuring Keymap..."
echo "KEYMAP=$KEYMAP" > ${MOUNTPOINT}/etc/vconsole.conf
pause_function
}
configure_fstab(){
print_title "FSTAB - https://wiki.archlinux.org/index.php/Fstab"
print_info "The /etc/fstab file contains static filesystem information. It defines how storage devices and partitions are to be mounted and integrated into the overall system. It is read by the mount command to determine which options to use when mounting a specific partition or partition."
echo "Creating fstab..."
genfstab -U /mnt >> /mnt/etc/fstab
echo ""
echo "Done"
pause_function
}
configure_hostname(){
print_title "HOSTNAME - https://wiki.archlinux.org/index.php/HOSTNAME"
print_info "A host name is a unique name created to identify a machine on a network.Host names are restricted to alphanumeric characters.\nThe hyphen (-) can be used, but a host name cannot start or end with it. Length is restricted to 63 characters."
read -p "Hostname [ex: archlinux]: " host_name
echo "$host_name" > ${MOUNTPOINT}/etc/hostname
}
configure_timezone(){
print_title "TIMEZONE - https://wiki.archlinux.org/index.php/Timezone"
print_info "In an operating system the time (clock) is determined by four parts: Time value, Time standard, Time Zone, and DST (Daylight Saving Time if applicable)."
OPTION=n
while [[ $OPTION != y ]]; do
settimezone
read_input_text "Confirm timezone (${ZONE}/${SUBZONE})"
done
arch_chroot_ext "ln -s /usr/share/zoneinfo/${ZONE}/${SUBZONE} /etc/localtime"
echo "Done!"
pause_function
}
configure_hardwareclock(){
print_title "HARDWARE CLOCK TIME - https://wiki.archlinux.org/index.php/Internationalization"
print_info "This is set in /etc/adjtime. Set the hardware clock mode uniformly between your operating systems on the same machine. Otherwise, they will overwrite the time and cause clock shifts (which can cause time drift correction to be miscalibrated)."
hwclock_list=('UTC' 'Localtime');
PS3="$prompt1"
select OPT in "${hwclock_list[@]}"; do
case "$REPLY" in
1) arch_chroot_ext "hwclock --systohc --utc";
;;
2) arch_chroot_ext "hwclock --systohc --localtime";
;;
*) invalid_option ;;
esac
[[ -n $OPT ]] && break
done
hwclock=$OPT
echo "Done!"
pause_function
}
configure_locale(){
print_title "LOCALE - https://wiki.archlinux.org/index.php/Locale"
print_info "Locales are used in Linux to define which language the user uses. As the locales define the character sets being used as well, setting up the correct locale is especially important if the language contains non-ASCII characters."
OPTION=n
while [[ $OPTION != y ]]; do
setlocale
read_input_text "Confirm locale ($LOCALE)"
done
echo 'LANG="'$LOCALE_UTF8'"' > ${MOUNTPOINT}/etc/locale.conf
arch_chroot_ext "sed -i '/'${LOCALE_UTF8}'/s/^#//' /etc/locale.gen"
arch_chroot_ext "locale-gen"
echo "Done!"
pause_function
}
configure_mkinitcpio(){
print_title "MKINITCPIO - https://wiki.archlinux.org/index.php/Mkinitcpio"
print_info "mkinitcpio is a Bash script used to create an initial ramdisk environment."
sudo sed -i 's/MODULES=""/MODULES="vmw_balloon vmw_pvscsi vsock vmw_vsock_vmci_transport vmwgfx vmw_vmci"/g' /mnt/etc/mkinitcpio.conf
arch_chroot_ext "mkinitcpio -p linux"
echo "Done!"
pause_function
}
arch_chroot_ext() {
arch-chroot $MOUNTPOINT /bin/bash -c "${1}"
}
install_bootloader() {
print_title "BOOTLOADER - https://wiki.archlinux.org/index.php/Bootloader"
print_info "The boot loader is responsible for loading the kernel and initial RAM disk before initiating the boot process."
echo "Installing grub.."
arch_chroot_ext "pacman -S grub --noconfirm"
arch_chroot_ext "grub-install /dev/sda"
echo "Configuring Grub.."
arch_chroot_ext "grub-mkconfig -o /boot/grub/grub.cfg"
echo "Done!"
pause_function
}
setlocale() {
local _locale_list=(`cat /etc/locale.gen | grep UTF-8 | sed 's/\..*$//' | sed '/@/d' | awk '{print $1}' | uniq | sed 's/#//g'`);
PS3="$prompt1"
echo "Select locale:"
select LOCALE in "${_locale_list[@]}"; do
if contains_element "$LOCALE" "${_locale_list[@]}"; then
LOCALE_UTF8="${LOCALE}.UTF-8"
break
else
invalid_option
fi
done
}
settimezone() {
local _zones=(`timedatectl list-timezones | sed 's/\/.*$//' | uniq`)
PS3="$prompt1"
echo "Select zone:"
select ZONE in "${_zones[@]}"; do
if contains_element "$ZONE" "${_zones[@]}"; then
local _subzones=(`timedatectl list-timezones | grep ${ZONE} | sed 's/^.*\///'`)
PS3="$prompt1"
echo "Select subzone:"
select SUBZONE in "${_subzones[@]}"; do
if contains_element "$SUBZONE" "${_subzones[@]}"; then
break
else
invalid_option
fi
done
break
else
invalid_option
fi
done
}
update_clock() {
timedatectl set-ntp true
}
error_msg() {
local _msg="${1}"
echo -e "${_msg}"
exit 1
}
invalid_option() {
print_line
echo "Invalid option. Try another one."
pause_function
}
print_line() {
printf "%$(tput cols)s\n"|tr ' ' '-'
}
print_title() {
clear
print_line
echo -e "# ${Bold}$1${Reset}"
print_line
echo ""
}
print_info() {
#Console width number
T_COLS=`tput cols`
echo -e "${Bold}$1${Reset}\n" | fold -sw $(( $T_COLS - 18 )) | sed 's/^/\t/'
}
print_warning() {
T_COLS=`tput cols`
echo -e "${BYellow}$1${Reset}\n" | fold -sw $(( $T_COLS - 1 ))
}
print_danger() {
T_COLS=`tput cols`
echo -e "${BRed}$1${Reset}\n" | fold -sw $(( $T_COLS - 1 ))
}
read_input() {
if [[ $AUTOMATIC_MODE -eq 1 ]]; then
OPTION=$1
else
read -p "$prompt1" OPTION
fi
}
read_input_text() {
if [[ $AUTOMATIC_MODE -eq 1 ]]; then
OPTION=$2
else
read -p "$1 [y/N]: " OPTION
echo ""
fi
OPTION=`echo "$OPTION" | tr '[:upper:]' '[:lower:]'`
}
read_input_options() {
local line
local packages
if [[ $AUTOMATIC_MODE -eq 1 ]]; then
array=("$1")
else
read -p "$prompt2" OPTION
array=("$OPTION")
fi
for line in ${array[@]/,/ }; do
if [[ ${line/-/} != $line ]]; then
for ((i=${line%-*}; i<=${line#*-}; i++)); do
packages+=($i);
done
else
packages+=($line)
fi
done
OPTIONS=("${packages[@]}")
}
check_boot_system() {
if [[ "$(cat /sys/class/dmi/id/sys_vendor)" == 'Apple Inc.' ]] || [[ "$(cat /sys/class/dmi/id/sys_vendor)" == 'Apple Computer, Inc.' ]]; then
modprobe -r -q efivars || true # if MAC
else
modprobe -q efivarfs # all others
fi
if [[ -d "/sys/firmware/efi/" ]]; then
## Mount efivarfs if it is not already mounted
if [[ -z $(mount | grep /sys/firmware/efi/efivars) ]]; then
mount -t efivarfs efivarfs /sys/firmware/efi/efivars
fi
UEFI=1
echo "UEFI Mode detected"
else
UEFI=0
echo "BIOS Mode detected"
fi
}
check_trim() {
[[ -n $(hdparm -I /dev/sda | grep TRIM &> /dev/null) ]] && TRIM=1
}
mainmenu_item() {
#if the task is done make sure we get the state
if [ $1 == 1 -a "$3" != "" ]; then
state="${BGreen}[${Reset}$3${BGreen}]${Reset}"
fi
echo -e "$(checkbox "$1") ${Bold}$2${Reset} ${state}"
}
elihw() {
[[ $OPT == b || $OPT == d ]] && break;
}
install_extra_options() {
if [[ -f `pwd`/qi-vm-extra ]]; then
cp ./qi-vm-extra /mnt/home
arch_chroot_ext "source /home/qi-vm-extra"
else
echo "missing file: sharedfuncs"
pause_function
return
fi
}
menu_item() {
#check if the number of arguments is less then 2
[[ $# -lt 2 ]] && _package_name="$1" || _package_name="$2";
#list of chars to remove from the package name
local _chars=("Ttf-" "-bzr" "-hg" "-svn" "-git" "-stable" "-icon-theme" "Gnome-shell-theme-" "Gnome-shell-extension-");
#remove chars from package name
for char in ${_chars[@]}; do _package_name=`echo ${_package_name^} | sed 's/'$char'//'`; done
#display checkbox and package name
echo -e "$(checkbox_package "$1") ${Bold}${_package_name}${Reset}"
}
checkbox() {
#display [X] or [ ]
[[ "$1" -eq 1 ]] && echo -e "${BBlue}[${Reset}${Bold}X${BBlue}]${Reset}" || echo -e "${BBlue}[ ${BBlue}]${Reset}";
}
contains_element() {
#check if an element exist in a string
for e in "${@:2}"; do [[ $e == $1 ]] && break; done;
}
pause_function() {
print_line
if [[ $AUTOMATIC_MODE -eq 0 ]]; then
read -e -sn 1 -p "Press enter to continue..."
fi
}
root_password(){
print_title "ROOT PASSWORD"
print_warning "Enter your new root password"
arch_chroot_ext "passwd"
pause_function
}
finish(){
print_title "INSTALL COMPLETED"
read_input_text "Reboot system"
if [[ $OPTION == y ]]; then
umount_partitions
reboot
fi
exit 0
}
### MAIN
check_boot_system
check_trim
update_clock
pacman -Sy
while true
do
print_title "ArchLinux Quick Install - Designed for VMs"
echo " 1) $(mainmenu_item "${checklist[1]}" "Select Keymap" "${KEYMAP}" )"
echo " 2) $(mainmenu_item "${checklist[2]}" "Select Editor" "${EDITOR}" )"
echo " 3) $(mainmenu_item "${checklist[3]}" "Configure Mirrorlist" "${country_name} (${country_code})" )"
echo " 4) $(mainmenu_item "${checklist[4]}" "Partition Scheme")"
echo " 5) $(mainmenu_item "${checklist[5]}" "Install Base System")"
echo " 6) $(mainmenu_item "${checklist[6]}" "Configure Fstab" "${fstab}" )"
echo " 7) $(mainmenu_item "${checklist[7]}" "Configure Hostname" "${host_name}" )"
echo " 8) $(mainmenu_item "${checklist[8]}" "Configure Timezone" "${ZONE}/${SUBZONE}" )"
echo " 9) $(mainmenu_item "${checklist[9]}" "Configure Hardware Clock" "${hwclock}" )"
echo "10) $(mainmenu_item "${checklist[10]}" "Configure Locale" "${LOCALE}" )"
echo "11) $(mainmenu_item "${checklist[11]}" "Configure Mkinitcpio")"
echo "12) $(mainmenu_item "${checklist[12]}" "Install Bootloader" "${bootloader}" )"
echo "13) $(mainmenu_item "${checklist[13]}" "Root Password")"
echo ""
echo "Optional Steps:"
echo "14) $(mainmenu_item "${checklist[14]}" "Extra Options (Desktop Environments)")"
echo " d) Done"
echo ""
read_input_options
for OPT in ${OPTIONS[@]}; do
case "$OPT" in
1)
select_keymap
checklist[1]=1
;;
2)
select_editor
checklist[2]=1
;;
3)
configure_mirrorlist
checklist[3]=1
;;
4)
partition_simple
checklist[4]=1
;;
5)
install_base_system
configure_keymap
install_extra_pkg
checklist[5]=1
;;
6)
configure_fstab
checklist[6]=1
;;
7)
configure_hostname
checklist[7]=1
;;
8)
configure_timezone
checklist[8]=1
;;
9)
configure_hardwareclock
checklist[9]=1
;;
10)
configure_locale
checklist[10]=1
;;
11)
configure_mkinitcpio
checklist[11]=1
;;
12)
install_bootloader
checklist[12]=1
;;
13)
root_password
checklist[13]=1
;;
14)
install_extra_options
checklist[14]=1
;;
"d")
finish
;;
*)
invalid_option
;;
esac
done
done
| true
|
af558cb0c7a7d6c38ea76570d78cd357711e6da7
|
Shell
|
kzhdlmu/hi3518-osdrv
|
/rootfs_scripts/rootfs/sbin/fw-upgrade
|
UTF-8
| 2,806
| 4.21875
| 4
|
[] |
no_license
|
#!/bin/sh
evt=$1
fw_dir=$2
fw_file=$3
fw_path="${fw_dir}/${fw_file}"
lock_file="${fw_dir}/firmware-lock"
warn() {
echo -e "\033[34;1mWARNING: $*\033[0m"
}
fatal() {
echo -e "\033[31;1mFATAL: $*\033[0m"
}
## only process the event:
## w: Writable file was closed
## y: File was moved to Y
if ! [ x"${evt}" = "xw" -o x"${evt}" = "xy" ]; then
exit 1
fi
## get file extension
file_ext=${fw_file##*.}
## firmware must be .bin or .fw extension
[ x"${file_ext}" != x"${fw_file}" ] || exit 1
[ x"${file_ext}" = "xbin" -o x"${file_ext}" = "xfw" ] || exit 1
if [ -f "${lock_file}" ]; then
warn "Upgrade is processing"
exit 1
fi
## delete lock_file on signal 0/1/2/3/6/9/15
cleanup() {
rm -f ${lock_file}
rm -f ${fw_path}
rm -rf ${fw_dir}/files
}
trap "cleanup; exit 1" 0 1 2 3 6 9 15
touch $lock_file || exit 1
## extract the firmware
echo 2 > ${lock_file}
echo -e "\033[1mExtracting the firmware...\033[0m"
new_ver=$(fw_decode -d ${fw_dir}/files ${fw_path} \
| grep 'Version' \
| awk '{print $2}')
if [ $? -ne 0 ]; then
echo 0 > ${lock_file}
fatal "Extract firmware failed"
exit 1
fi
get_mtddev_from_name() {
mtdnr=$(grep -E "\"$1\"" /proc/mtd \
| grep -E -o '^mtd[0-9]{1,2}' \
| cut -b 4-)
if [ x"${mtdnr}" = "x" ]; then
warn "partition '$f' does not exists, ignore."
return 1;
fi
mtdcdev="/dev/mtd${mtdnr}"
mtdbdev="/dev/mtdblock${mtdnr}"
## check the device
if ! [ -c $mtdcdev -a -b $mtdbdev ]; then
warn "MTD device ${mtdcdev}-${mtdbdev} does not exist"
return 1;
fi
## if filesystem is in-use, umount it first
if grep "$mtdbdev" /proc/mounts; then
local ignore=
local mpoint=
read ignore mpoint ignore << EOF
$(grep -E "${mtdbdev}" /proc/mounts)
EOF
if [ x"$mpoint" != "x" ]; then
fuser -km $mpoint
umount $mpoint
fi
fi
return 0
}
## run pre-upgrade.sh
if [ -f ${fw_dir}/files/pre-upgrade.sh ]; then
echo 3 > ${lock_file}
cd ${fw_dir}/files
. pre-upgrade.sh
cd -
fi
## program every partition
echo 3 > ${lock_file}
for f in $(ls ${fw_dir}/files); do
if ! get_mtddev_from_name $f; then
continue
fi
if [ -c ${mtdcdev} ]; then
echo -e "\033[1mPrograming partition '${f}' to '${mtdcdev}'...\033[0m"
if grep ${mtdbdev} /proc/mounts; then
umount ${mtdbdev}
fi
flash_eraseall ${mtdcdev}
flashcp -v ${fw_dir}/files/$f ${mtdcdev}
else
warn "partition '$f' is not a valid character device"
continue
fi
done
## run post-upgrade.sh
if [ -f ${fw_dir}/files/post-upgrade.sh ]; then
echo 3 > ${lock_file}
cd ${fw_dir}/files
. post-upgrade.sh
cd -
fi
fw_version.sh $new_ver
## program OK, reboot the system.
echo 4 > ${lock_file}
echo -e "\033[32;1mUpgrade complete, now rebooting system...\033[0m"
sleep 1
reboot
| true
|
72ab625a34d613cf44a47111f2acc539bdf61f57
|
Shell
|
marty-sullivan/CodePipeline-Template
|
/build/web.sh
|
UTF-8
| 684
| 2.6875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
# API_ID=$(aws cloudformation describe-stacks \
# --stack-name "$APPLICATION-$ENVIRONMENT" \
# --query 'Stacks[0].Outputs[?OutputKey==`Api`].OutputValue' \
# --output text)
# aws apigateway get-sdk \
# --rest-api-id "$API_ID" \
# --stage-name 'api' \
# --sdk-type javascript \
# $CODEBUILD_SRC_DIR/build/sdk.zip
# unzip \
# $CODEBUILD_SRC_DIR/build/sdk.zip \
# -d $CODEBUILD_SRC_DIR/web/
# WEB_BUCKET=$(aws cloudformation describe-stacks \
# --stack-name "$APPLICATION-$ENVIRONMENT" \
# --query 'Stacks[0].Outputs[?OutputKey==`WebBucket`].OutputValue' \
# --output text)
zip $CODEBUILD_SRC_DIR/build/web.zip -r $CODEBUILD_SRC_DIR/web/*
| true
|
d4cc3bd4956c94cd2f0b9efa6eaa1c3483c3b1ab
|
Shell
|
cdoan1/rhacm-tools
|
/hack/proxy/proxy-verify-deployments-env.sh
|
UTF-8
| 486
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
echo ""
echo "Date: $(date)"
echo "Date: $(date -u)"
echo "Cluster: $(oc cluster-info | grep api)"
echo ""
for NS in open-cluster-management hive open-cluster-management-hub open-cluster-management-agent open-cluster-management-agent-addon
do
for p in `oc get deployments -n $NS | awk '{print $1}' | grep -v NAME`
do
if oc get deployment $p -n $NS -o yaml | grep PROXY > /dev/null ; then
:
else
echo "no proxy in deployment: $p"
fi
done
done
| true
|
7779455cb9146bbe0787ae7d7600edc787d65c27
|
Shell
|
yongjiangbuaa/march3
|
/WebProject/Tools/publish_control
|
UTF-8
| 8,725
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/sh
if [ $# -lt 2 ]; then
echo "Usage: $0 {deploy|deploywithdb|deployTemp|start|stop|restart|syncCrossFile|syncBadwords|changeDbStruct} [ sid list. use sepreater {,|-} ]"
exit 1
fi
mode=$1
sids=$2
extra=$3
if [ "$mode" = "deploywithdb" ]; then
php /publish/scripts/generate_servers.xml.php output_config_dir=/publish/update/config
php /publish/scripts/generate_mybatis-cross.xml.php output_config_dir=/publish/update/config
php /publish/scripts/generate_rmiClient.xml.php output_config_dir=/publish/update/config
php /publish/scripts/generate_consumer.xml.php output_config_dir=/publish/update/config
php /publish/scripts/generate_config.properties.php output_config_dir=/usr/local/cok/SFS2X/onlineconfig
PARSESIDS=`php /publish/util/parse_sids.php sids=$sids`
for i in $PARSESIDS
do
DBIPNAME=`php /publish/util/get_dbipname_list.php sids=$i`
ROOTIP=`php /publish/util/get_server_list_with_root.php sids=$i`
sh publish_oneserver_full.sh $ROOTIP $DBIPNAME $i &
done
wait
exit 0
fi
ROOTIPLIST=`/home/elex/php/bin/php /publish/util/get_server_list_with_root.php sids=$sids`
case "$mode" in
'test')
for i in $ROOTIPLIST
do
echo $i
done
;;
'deploy')
php /publish/scripts/generate_servers.xml.php output_config_dir=/publish/update/config
php /publish/scripts/generate_mybatis-cross.xml.php output_config_dir=/publish/update/config
php /publish/scripts/generate_rmiClient.xml.php output_config_dir=/publish/update/config
php /publish/scripts/generate_config.properties.php output_config_dir=/usr/local/cok/SFS2X/onlineconfig
for i in $ROOTIPLIST
do
#fab stopsfs:host=$i setCurrentVersion upload:host=$i startsfs:host=$i &
fab stopsfs:host=$i setCurrentVersion upload:host=$i uploadResourceXml:host=$i,xmlfiles="exchange.xml" startsfs:host=$i &
done
;;
'deployPatch')
gameversion=$extra
#php /publish/scripts/generate_servers.xml.php output_config_dir=/publish/update/config
#php /publish/scripts/generate_config.properties.php output_config_dir=/usr/local/cok/SFS2X/onlineconfig
for i in $ROOTIPLIST
do
#fab stopsfs:host=$i uploadPatchJarFile:host=$i,ver=$gameversion &
fab stopsfs:host=$i uploadPatchJarFile:host=$i,ver=$gameversion startsfs:host=$i &
#fab stopsfs:host=$i uploadPatchJarFile:host=$i,ver=$gameversion uploadConfigProperties:host=$i startsfs:host=$i &
#fab stopsfs:host=$i uploadPatchJarFile:host=$i,ver=$gameversion uploadResourceXml:host=$i,xmlfiles="package.xml" startsfs:host=$i &
#fab stopsfs:host=$i uploadPatchJarFile:host=$i,ver=$gameversion uploadConfigProperties:host=$i uploadServerConfig:host=$i startsfs:host=$i &
done
wait
;;
'deployTemp')
#php /publish/scripts/generate_servers.xml.php output_config_dir=/publish/update/config
#php /publish/scripts/generate_config.properties.php output_config_dir=/usr/local/cok/SFS2X/onlineconfig
for i in $ROOTIPLIST
do
#echo "nothing to do !"
fab puthadoop:host=$i &
#fab updateGameconfigClientVersion:host=$i,appVer=1.1.12,clientVer="0|1.0.2213"
#fab updateGameconfigClientVersion:host=$i,appVer=1.1.11,clientVer="0|1.0.2183"
#fab updateGameconfigClientVersion:host=$i,appVer=1.1.10,clientVer="0|1.0.2165"
#fab updateGameconfigClientVersion:host=$i,appVer=1.1.9,clientVer="0|1.0.2140"
#fab updateGameconfigClientVersion:host=$i,appVer=1.1.8,clientVer="0|1.0.2106"
#fab updateGameconfigClientVersion:host=$i,appVer=1.1.7,clientVer="0|1.0.2096"
#fab updateGameconfigClientVersion:host=$i,appVer=1.1.6,clientVer="0|1.0.2063"
#fab updateGameconfigClientVersion:host=$i,appVer=1.1.5,clientVer="0|1.0.2042"
#fab updateGameconfigClientVersion:host=$i,appVer=1.1.4,clientVer="0|1.0.1988"
done
wait
;;
'setstatus')
for i in $ROOTIPLIST
do
fab setRedisServerStatus:host=$i,status=$extra
done
;;
'getstatus')
for i in $ROOTIPLIST
do
fab getRedisServerStatus:host=$i
done
;;
'start')
for i in $ROOTIPLIST
do
fab startsfs:host=$i &
done
echo "wait all subprocess exit"
wait
;;
'stop')
for i in $ROOTIPLIST
do
fab stopsfs:host=$i &
done
;;
'restart')
#php /publish/scripts/generate_servers.xml.php output_config_dir=/publish/update/config
for i in $ROOTIPLIST
do
#fab stopsfs:host=$i uploadServersXml:host=$i startsfs:host=$i &
#fab stopsfs:host=$i uploadConfigProperties:host=$i startsfs:host=$i &
fab stopsfs:host=$i startsfs:host=$i &
done
wait
;;
'syncCrossFile')
php /publish/scripts/generate_servers.xml.php output_config_dir=/publish/update/config
php /publish/scripts/generate_mybatis-cross.xml.php output_config_dir=/publish/update/config
php /publish/scripts/generate_rmiClient.xml.php output_config_dir=/publish/update/config
for i in $ROOTIPLIST
do
fab uploadMybatisCross:host=$i uploadServersXml:host=$i &
done
echo "Wait all subprocess exit"
wait
;;
'uploadXml')
uploadxmlfiles=$extra
for i in $ROOTIPLIST
do
fab uploadResourceXml:host=$i,xmlfiles="$uploadxmlfiles" &
done
wait
;;
'downloadXml')
uploadxmlfiles=$extra
for i in $ROOTIPLIST
do
fab downloadResourceXml2PatchDir:host=$i,xmlfiles="$uploadxmlfiles"
done
;;
'uploadXmlFromPatchDir')
uploadxmlfiles=$extra
for i in $ROOTIPLIST
do
fab uploadPatchResourceXml:host=$i,xmlfiles="$uploadxmlfiles" &
done
wait
;;
'findCrack')
for i in $ROOTIPLIST
do
fab findCrack:host=$i &
done
;;
'setRedisKeyValue')
for i in $ROOTIPLIST
do
#fab setRedisKeyValue:host=$i,key=realtime_translationTarget,val="'en|ru|de'" &
#fab setRedisKeyValue:host=$i,key=realtime_translation,val=true &
fab setRedisKeyValue:host=$i,key=realtime_ms_client_id,val='"elex-translator-1"' &
#fab setRedisKeyValue:host=$i,key=realtime_ms_client_secret,val=sEcC5WmTYYZfnXfHIECyh+a+uMenGFPACGSL5GRhVTI= &
done
;;
'syncBadwords')
for i in $ROOTIPLIST
do
fab syncBadwords:host=$i &
done
;;
'changeDbStruct')
DBIPNAMELIST=`/home/elex/php/bin/php /publish/util/get_dbipname_list.php sids=$sids`
for i in $DBIPNAMELIST
do
DBIPNAME=$i
dbipport=`echo $DBIPNAME|cut -d '/' -f 1`
dbname=`echo $DBIPNAME|cut -d '/' -f 2`
dbip=`echo $dbipport|cut -d ':' -f 1`
dbport=`echo $dbipport|cut -d ':' -f 2`
echo "mysql -uroot -pt9qUzJh1uICZkA -h $dbip -P $dbport $dbname < currdeploy/db_struct_changes.sql"
/usr/bin/mysql -uroot -pt9qUzJh1uICZkA -h $dbip -P $dbport -f $dbname < currdeploy/db_struct_changes1.sql
#fab setCurrentVersion changeDbStruct:dbipname=$i
done
;;
'execSql')
DBIPNAMELIST=`/home/elex/php/bin/php /publish/util/get_dbipname_list.php sids=$sids slave=1`
echo $extra
for i in $DBIPNAMELIST
do
DBIPNAME=$i
dbipport=`echo $DBIPNAME|cut -d '/' -f 1`
dbname=`echo $DBIPNAME|cut -d '/' -f 2`
dbip=`echo $dbipport|cut -d ':' -f 1`
dbport=`echo $dbipport|cut -d ':' -f 2`
#echo $dbname
#echo $dbname >> batch_sql/ckf_repair_data2.log
/usr/bin/mysql -uroot -pt9qUzJh1uICZkA -h $dbip -P $dbport $dbname --skip-column-names -e "$extra"
#/usr/bin/mysql -uroot -pt9qUzJh1uICZkA -h $dbip -P $dbport --skip-column-names -f $dbname < batch_sql/batch.sql
#/usr/bin/mysql -uroot -pt9qUzJh1uICZkA -h $dbip -P $dbport -f $dbname < currdeploy/db_struct_changes.sql
#/usr/bin/mysql -uroot -pt9qUzJh1uICZkA -h $dbip -P $dbport -f $dbname < batch_sql/xxxbatch.sql
#/usr/bin/mysql -uroot -pt9qUzJh1uICZkA -h $dbip -P $dbport --skip-column-names $dbname < batch_sql/invaid_name.sql >> /publish/data/invalidname_$dbname.log
done
;;
'execSlaveDBSql')
DBIPNAMELIST=`/home/elex/php/bin/php /publish/util/get_dbipname_list.php sids=$sids slave=1`
echo $extra
for i in $DBIPNAMELIST
do
DBIPNAME=$i
dbipport=`echo $DBIPNAME|cut -d '/' -f 1`
dbname=`echo $DBIPNAME|cut -d '/' -f 2`
dbip=`echo $dbipport|cut -d ':' -f 1`
dbport=`echo $dbipport|cut -d ':' -f 2`
echo $dbname
/usr/bin/mysql -uroot -pt9qUzJh1uICZkA -h $dbip -P $dbport --skip-column-names -f $dbname < batch_sql/$extra >> batch_sql/$extra.log
done
;;
'clearGlobalDbData')
echo "mysql -uroot -pt9qUzJh1uICZkA -h10.81.92.75 cokdb_global < currdeploy/db_struct_changes_everytime.sql"
mysql -uroot -pt9qUzJh1uICZkA -h10.81.92.75 cokdb_global < currdeploy/db_struct_changes_everytime.sql
echo "done."
;;
*)
# usage
echo "Usage: $0 {deploy|deploywithdb|deployTemp|start|stop|restart|uploadMybatisCross|uploadServersXml|changeDbStruct|syncBadwords} [ sid list. use sepreater {,|-} ]"
exit 1
;;
esac
| true
|
174b8018d304fbd2aa23e8a5ca39bca1f9a15e64
|
Shell
|
DrSnowbird/jetty-fileserver
|
/bin/docker-network-bridge-create.sh
|
UTF-8
| 337
| 3.609375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -x
BRIDGE_NAME=${1:-dev-network}
find_brdige=`sudo docker network ls|awk '{print $2}' | grep $BRIDGE_NAME`
if [ "${find_brdige}" = "${BRIDGE_NAME}" ]; then
echo "Docker network creation for: ${BRIDGE_NAME}: EXISTS! Skip!"
exit 0
fi
sudo docker network create --driver bridge ${BRIDGE_NAME}
sudo docker network ls
| true
|
42488df748bf5af34f9add9ba949f60c0ab63021
|
Shell
|
infriend/infriend-homework
|
/OS theory/shell/ex02_02-1.sh
|
UTF-8
| 88
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
#filename
while [ 1 -lt 2 ]
do
read filename
echo ${filename%.*}
done
| true
|
ab581904d8162f4a1edcd9f843b93236057c94e5
|
Shell
|
bluelabsio/sqlalchemy-vertica-python
|
/deps.sh
|
UTF-8
| 921
| 3.015625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash -e
if [ "$(uname)" == Darwin ]
then
brew update && ( brew upgrade pyenv || true )
fi
python_version=3.8.5
# zipimport.ZipImportError: can't decompress data; zlib not available:
# You may need `xcode-select --install` on OS X
# https://github.com/pyenv/pyenv/issues/451#issuecomment-151336786
pyenv install -s "${python_version:?}"
if [ "$(uname)" == Darwin ]
then
# Python has needed this in the past when installed by 'pyenv
# install'. The current version of 'psycopg2' seems to require it
# now, but Python complains when it *is* set. 🤦
CFLAGS="-I$(brew --prefix openssl)/include"
export CFLAGS
LDFLAGS="-L$(brew --prefix openssl)/lib"
export LDFLAGS
fi
pyenv virtualenv "${python_version:?}" sqlalchemy-vertica-python-"${python_version:?}" || true
pyenv local sqlalchemy-vertica-python-"${python_version:?}"
pip3 install --upgrade pip
pip3 install -r requirements.txt -e .
| true
|
26db402eac47d5a46b9c7460a130a7a2b27257e7
|
Shell
|
krboswell/ceen8886
|
/lab1/doc/enc_dec_all.sh
|
UTF-8
| 701
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
key=purplemonkeydishwasher
src_name=Lecture02.pdf
outdir=output
src=../../$src_name
modes="cbc cfb ctr ecb ofb"
checksums_file=$outdir/checksums.md5sum
md5sum $src > $checksums_file
for mode in $modes ; do
enc_tgt="$outdir/Lecture02_${mode}_encrypted.base64"
dec_tgt="$outdir/Lecture02_${mode}_decrypted.pdf"
TIMEFORMAT='%3R'; { time openssl enc --in $src --out $enc_tgt --base64 --nosalt -e --aes-256-$mode -k $key; } 2>&1 | tee "$enc_tgt.time"
md5sum $enc_tgt >> $checksums_file
TIMEFORMAT='%3R'; { time openssl enc --in $enc_tgt --out $dec_tgt --base64 --nosalt -d --aes-256-$mode -k $key; } 2>&1 | tee "$dec_tgt.time"
md5sum $dec_tgt >> $checksums_file
done
| true
|
0513f11ecc3856013c4d2af9d0a0eebc61a8a387
|
Shell
|
ycrumeyrolle/Base64
|
/fuzz/init.sh
|
UTF-8
| 335
| 2.71875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
echo "See https://github.com/Metalnem/sharpfuzz for setup"
echo ""
lib=gfoidl.Base64.dll
dotnet build -c Release gfoidl.Base64.FuzzTests
mkdir -p ./instrumented
cp ../source/gfoidl.Base64/bin/Release/netcoreapp3.0/$lib ./instrumented/$lib
sharpfuzz ./instrumented/$lib
echo "$lib instrumented and ready to go"
| true
|
0a1872929eb18591c65b3b98075d4feba9e8e526
|
Shell
|
voidlock/aed
|
/bin/defib
|
UTF-8
| 1,275
| 3.296875
| 3
|
[] |
permissive
|
#!/usr/bin/env bash
DEF_BASE="$HOME/.aed"
AED_BASE="${AED_BASE:=$DEF_BASE}"
AED_LIB="${AED_BASE}/lib"
AED_MODULES="${AED_BASE}/modules"
AED_CONFIG="${HOME}/.aedrc"
PWD_DIR=`pwd -P`
AED_LOCAL_DIR="${PWD_DIR}/.aed"
AED_LOCAL_MODULES="${AED_LOCAL_DIR}/modules"
AED_LOG="${AED_LOCAL_DIR}/defib.log"
AED_FILE="${PWD_DIR}/Aedfile"
mkdir -p ${AED_LOCAL_DIR}
echo -n "" > ${AED_LOG}
echo " ______ _____"
echo " /\ | ____|| __ \\"
echo " / \ | |__ | | | |"
echo " / /\ \ | __| | | | |"
echo " / ____ \ | |____ | |__| |"
echo " /_/ \_\|______||_____/"
echo
echo
echo " Automated External "
echo " Defibrillator "
echo
source "${AED_LIB}/utils.sh" || die "Could not find AED install"
if [ ! -e "$AED_FILE" ]; then
outp "You must create a project specific '$AED_FILE' file"
exit 1
fi
debug "Sourcing core and platform from '${AED_LIB}'"
source "${AED_LIB}/core.sh"
source "${AED_LIB}/platform.sh"
outp "Shocking project to life with '${PKG_MGR}' on '${PLATFORM}' ... CLEAR!!!"
outp
if [ -z "$SKIP_UPDATE" ]; then
defib_module "Updating ${PKG_MGR}..." 'update_pkg_mgr' || die "Cannot to update package manager"
fi
debug "loading ${AED_FILE}"
source "${AED_FILE}"
outp "It lives!!"
| true
|
39e359c968766a524828db646656f8302e771740
|
Shell
|
HacksDev/PythonHW
|
/List/linux_hw1.sh
|
UTF-8
| 1,169
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
# Make sure only root can run script
if [[ $EUID -ne 0 ]]; then
echo "This script must be run as root" 1>&2
exit 1
fi;
# Create main ftp folder.
BASE_DIR=/var/ftp;
mkdir -p $BASE_DIR;
# Create group that includes superusers.
groupadd ftpadmins;
# Create group that includes ftp users.
groupadd ftpusers;
# Create superuser;
useradd -d $BASE_DIR -G ftpadmins ftpmod;
# Create users;
useradd -m -d $BASE_DIR/u1 -G ftpusers u1;
chown u1 $BASE_DIR/u1;
chgrp ftpadmins $BASE_DIR/u1;
useradd -m -d $BASE_DIR/u2 -G ftpusers u2;
chown u2 $BASE_DIR/u2;
chgrp ftpadmins $BASE_DIR/u2;
useradd -m -d $BASE_DIR/u3 -G ftpusers u3;
chown u3 $BASE_DIR/u3;
chgrp ftpadmins $BASE_DIR/u3;
# Set super group as owner
chgrp ftpadmins $BASE_DIR;
chgrp ftpadmins $BASE_DIR/*;
# Set permission rules
chmod u+rwx,g+rwx,o-rwx $BASE_DIR/*;
chmod g+s $BASE_DIR/*;
# With using ACL: setfacl -R -d -m u::rwx,g::rwx,o::--- $BASE_DIR/*
# How to check it...
# 1. docker run -it --name=ubuntu ubuntu:latest
# 2. apt update
# [2.1] apt install acl nano
# 3. Write this into scriptname.sh file.
# 4. chmod 777 scriptname.sh
# 5. ./scriptname.sh
# 6. su - u1 # to change account
| true
|
49b06c0ec8518fbe516a7e78ef003610df33bda5
|
Shell
|
hdinsight/HivePerformanceAutomation
|
/workload/tpch/prerun.sh
|
UTF-8
| 3,477
| 3.859375
| 4
|
[] |
no_license
|
#/bin/bash
function usageerror {
echo "please enter scale factor in the config"
exit 1
}
function runcommand {
if [ "X$DEBUG_SCRIPT" != "X" ]; then
$1
else
$1 2>/dev/null
fi
}
set -x
. ./config.sh
cd ${CURRENT_DIRECTORY}
echo "Building TPC-H Data Generator"
(cd tpch-gen; make)
echo "TPC-H Data Generator built, you can now use tpch-setup.sh to generate data."
if [ ! -f tpch-gen/target/tpch-gen-1.0-SNAPSHOT.jar ]; then
echo "Please build the data generator with ./tpch-build.sh first"
exit 1
fi
which hive > /dev/null 2>&1
if [ $? -ne 0 ]; then
echo "Script must be run where Hive is installed"
exit 1
fi
DIR=$RAWDATA_DIR
if [ "X$DEBUG_SCRIPT" != "X" ]; then
set -x
fi
# Sanity checking.
if [ X"$SCALE" = "X" ]; then
usageerror
fi
if [ X"$DIR" = "X" ]; then
DIR=/tmp/tpch-generate
fi
if [ $SCALE -lt 2 ]; then
echo "Scale factor must be greater than 1"
exit 1
fi
mkdir -p $LOADTIMES_DIR
touch $LOADTIMES_FILE
STARTTIME="`date +%s`"
# Do the actual data load.
hdfs dfs -mkdir -p ${DIR}
hdfs dfs -ls ${DIR}/${SCALE}/lineitem > /dev/null
if [ $? -ne 0 ]; then
echo "Generating data at scale factor $SCALE."
(cd tpch-gen; hadoop jar target/*.jar -d ${DIR}/${SCALE}/ -s ${SCALE})
fi
hdfs dfs -ls ${DIR}/${SCALE}/lineitem > /dev/null
if [ $? -ne 0 ]; then
echo "Data generation failed, exiting."
exit 1
fi
echo "TPC-H text data generation complete."
hdfs dfs -ls ${WAREHOUSE_DIR}/tpch_partitioned_orc_${SCALE}.db > /dev/null
if [ $? -eq 0 ]; then
echo "Data already loaded into query tables"
exit 1
fi
DATAGENTIME="`date +%s`"
echo "DATAGENTIME,$( expr $DATAGENTIME - $STARTTIME)" >> $LOADTIMES_FILE
BEELINE_CONNECTION_STRING=$CONNECTION_STRING/$RAWDATA_DATABASE";transportMode=http"
# Create the text/flat tables as external tables. These will be later be converted to ORCFile.
echo "Loading text data into external tables."
runcommand "beeline -u ${BEELINE_CONNECTION_STRING} -i settings/load-flat.sql -f ${CURRENT_DIRECTORY}/ddl-tpch/bin_partitioned/allexternaltables.sql --hivevar DB=${RAWDATA_DATABASE} --hivevar LOCATION=${DIR}/${SCALE}"
EXTERNALTABLELOAD="`date +%s`"
# Create the optimized tables.
echo "EXTERNALTABLELOAD,$( expr $EXTERNALTABLELOAD - $DATAGENTIME)" >> $LOADTIMES_FILE
i=1
total=8
BEELINE_CONNECTION_STRING=$CONNECTION_STRING/$QUERY_DATABASE";transportMode=http"
for t in ${TABLES}
do
echo "Optimizing table $t ($i/$total)."
TABLELOADSTART="`date +%s`"
COMMAND="beeline -u ${BEELINE_CONNECTION_STRING} -i ${CURRENT_DIRECTORY}/ddl-tpch/load-partitioned.sql -f ${CURRENT_DIRECTORY}/ddl-tpch/bin_partitioned/${t}.sql \
--hivevar DB=${QUERY_DATABASE} \
--hivevar SOURCE=${RAWDATA_DATABASE}
--hivevar SCALE=${SCALE} \
--hivevar FILE=orc"
runcommand "$COMMAND"
TABLELOADEND="`date +%s`"
echo "TABLELOAD_${t},$( expr $TABLELOADEND - $TABLELOADSTART)" >> $LOADTIMES_FILE
if [ $? -ne 0 ]; then
echo "Command failed, try 'export DEBUG_SCRIPT=ON' and re-running"
exit 1
fi
i=`expr $i + 1`
done
echo "Data loaded into ${QUERY_DATABASE}"
ORCLOAD="`date +%s`"
ANALYZE_COMMAND="beeline -u ${BEELINE_CONNECTION_STRING} -i ${CURRENT_DIRECTORY}/settings/load-partitioned.sql \
--hivevar DB=${QUERY_DATABASE} \
-f ${CURRENT_DIRECTORY}/ddl-tpch/bin_partitioned/analyze.sql"
if $RUN_ANALYZE; then
echo "Running analyze"
runcommand "$ANALYZE_COMMAND"
fi
ANALYZETIME="`date +%s`"
echo "ANALYZETIME, $( expr $ANALYZETIME - $ORCLOAD)" >> $LOADTIMES_FILE
echo "Analyze completed"
| true
|
02fa98171063f0d585da66e818e46a8e5ea4fa94
|
Shell
|
nReality/SUGSA-App2018
|
/build-scripts/android-post.sh
|
UTF-8
| 263
| 2.6875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
scripts_root=build-scripts/
test -d $scripts_root || (echo "script must be executed from the root of the repo" && exit 1)
echo "=== SIGNING ==="
$scripts_root/android-sign.sh
echo "=== PUBLISH ==="
$scripts_root/android-publish-hockeyapp.sh
| true
|
d0d3864bdb624203dc9ac3a5fa0f32d41f21df52
|
Shell
|
smit1678/posm-build
|
/kickstart/scripts/bridge-deploy.sh
|
UTF-8
| 925
| 3.265625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
deploy_bridge_ubuntu() {
local v="`virt-what 2>/dev/null`"
if [ $? = 0 ] && [ -z "$v" ]; then
# enable port forwarding
expand etc/sysctl.d/99-forwarding.conf /etc/sysctl.d/99-forwarding.conf
# load sysctl settings
service procps start
# configure interface hook scripts
expand etc/enable-port-forwarding /etc/network/if-up.d/enable_port_forwarding
expand etc/disable-port-forwarding /etc/network/if-down.d/disable_port_forwarding
chmod +x /etc/network/if-up.d/enable_port_forwarding
chmod +x /etc/network/if-down.d/disable_port_forwarding
IFACE=$posm_wan_netif /etc/network/if-up.d/enable_port_forwarding
# disable DNS wildcarding
rm -r /etc/dnsmasq.d/99-captive.conf
service dnsmasq restart
# disable Nginx captive portal
rm -f /etc/nginx/sites-enabled/captive
service nginx restart
posm_network_bridged=1 expand etc/posm.json /etc/posm.json
fi
}
deploy bridge
| true
|
2c3eb71bb8d6340598598c103069b931a8b6902e
|
Shell
|
CloverOS/gingerdx_stock
|
/system/bin/modelid_cfg.sh
|
UTF-8
| 898
| 2.859375
| 3
|
[] |
no_license
|
#!/sbin/sh
kineto=/system/app/MS-HTCEMR-KNT20-02-A0-GB-02.apk
rm_kineto=y
cat /proc/cmdline|egrep -q '(PC1010000)|(PB9910000)|(PD1510000)|(PB6510000)'
if [ $? = 0 ]; then
rm_kineto=n
fi
if [ "$rm_kineto" = "y" ]; then
if [ -f $kineto ]; then
rm -f /system/app/MS-HTCEMR-KNT20-02-A0-GB-02.apk
rm -f /system/lib/libkineto.so
rm -f /system/lib/libganril.so
rm -f /system/lib/librilswitch.so
sed 's/librilswitch.so/libhtc_ril.so/' /system/build.prop > /tmp/build.tmp
sed '/rilswitch/d' /tmp/build.tmp > /system/build.prop
chmod 644 /system/build.prop
rm /tmp/build*
fi
fi
cat /proc/cmdline | grep -q spade
if [ $? = 0 ]; then
cat /proc/cmdline | egrep -q '(PD9812000)|(PD9814000)'
if [ $? = 0 ]; then
mv /system/etc/nam/*MCLK.txt /system/etc/soundimage/
mv /system/etc/nam/CodecDSPID.txt /system/etc
mv /system/etc/nam/gps.conf /system/etc
fi
rm -R /system/etc/nam
fi
| true
|
529decac0b65167985fa987176e91302d0f9d553
|
Shell
|
kongyew/greenplum-pxf-examples
|
/usecase1/pxf/disablePXFUserImpersonation.sh
|
UTF-8
| 484
| 2.9375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
current=`pwd`
cd `dirname $0`
. ./setEnv.sh
if [ -d "/home/gpadmin/pxf" ]
then
sed -i 's/export PXF_USER_IMPERSONATION=${PXF_USER_IMPERSONATION:=true}/export PXF_USER_IMPERSONATION=false/g' /home/gpadmin/pxf/conf/pxf-env.sh
else
if [ -d "/usr/local/greenplum-db" ]
then
sed -i 's/export PXF_USER_IMPERSONATION=${PXF_USER_IMPERSONATION:=true}/export PXF_USER_IMPERSONATION=false/g' /usr/local/greenplum-db/pxf/conf/pxf-env.sh
fi
fi
cd $current
| true
|
6ddf20799c60ebb2eea1bd62dc1c7a9bbbbd210d
|
Shell
|
pmem/pmemkv
|
/utils/jenkins/scripts/removeNamespaces.sh
|
UTF-8
| 460
| 3.203125
| 3
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2019-2021, Intel Corporation
# removeNamespaces.sh - clear all existing namespaces.
set -e
MOUNT_POINT="/mnt/pmem*"
# Clearing all existing namespaces
sudo umount $MOUNT_POINT || true
namespace_names=$(ndctl list -X | jq -r '.[].dev')
for n in $namespace_names
do
sudo ndctl clear-errors $n -v
done
sudo ndctl disable-namespace all || true
sudo ndctl destroy-namespace all || true
| true
|
34df2f14d0a068207c10b2f214871dc822ac9d0c
|
Shell
|
dingyong/smalltools
|
/ed2k.sh
|
UTF-8
| 609
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
ED2K_FILE=/net/aMule/.aMule/ED2KLinks
TMP=/tmp/$$.tmp
TMP2=/tmp/$$.tmp2
if [ "${EDITOR}" = "" ]; then
EDITOR=/usr/bin/vim
fi
# accept input data from editor
${EDITOR} "$TMP"
if [ -f $TMP ]; then
while read line
do
# php -r "echo urldecode('${line}'.\"\n\");" >> "${TMP2}"
php -r 'echo urldecode($argv[1]) . "\n";' -- "$line" >> "${TMP2}"
# echo ${line}
done < "${TMP}"
cat ${TMP2} >> ${ED2K_FILE}
# remove temp file
rm -rf ${TMP}
rm -rf ${TMP2}
fi
# php -r "echo urldecode('$@'.\"\n\");" >> ${F_NAME}
# rm -rf /tmp/tempfile.$$
exit 0
| true
|
b7db6517805d5cd1944b6994ac8a20de1d4708e5
|
Shell
|
HNLETHZ/PlotFactory
|
/checksamples/checkdiffsamples.sh
|
UTF-8
| 877
| 3.3125
| 3
|
[] |
no_license
|
file=diffsamples.py
numsamp=0
while read -r line; do
numsamp=$((numsamp+1))
done < "$file"
echo "number of samples:" $numsamp >> notuseablediff.txt
echo "number of samples:" $numsamp >> useablediff.txt
echo "number of samples:" $numsamp
echo -n "number of samples checked: "
enum=1
while read -r line; do
signal="$line"
checkfile=$(edmDumpEventContent $signal | grep "displacedStandAlone")
# echo $checkfile
# echo $signal
echo -n $enum
if [ -z "$checkfile" ]; then
echo $signal >> notuseablediff.txt
fi
if [ ! -z "$checkfile" ]; then
echo $signal >> useablediff.txt
fi
if [ $enum -lt 10 ]; then
echo -n -e "\b"
fi
if [ $enum -ge 10 -a $enum -lt 100 ]; then
echo -n -e "\b\b"
fi
if [ $enum -ge 100 -a $enum -lt 1000 ]; then
echo -n -e "\b\b\b"
fi
enum=$((enum+1))
done < "$file"
echo -e "\n"
| true
|
14a8f37c875b49a891c793e390c7bfdca3ce709a
|
Shell
|
thomwiggers/dotfiles
|
/bootstrap.sh
|
UTF-8
| 1,545
| 3.84375
| 4
|
[
"MIT"
] |
permissive
|
# --- Functions --- #
# Notice title
function notice { echo "\033[1;32m=> $1\033[0m"; }
# Error title
function error { echo "\033[1;31m=> Error: $1\033[0m"; }
# List item
function c_list { echo " \033[1;32m✔\033[0m $1"; }
# Error list item
function e_list { echo " \033[1;31m✖\033[0m $1"; }
# Check for dependency
function dep {
# Check installed
local i=true
type -p $1 &> /dev/null || i=false
# Check version
if $i ; then
local version=$($1 --version | grep -oE -m 1 "[[:digit:]]+\.[[:digit:]]+\.?[[:digit:]]?")
[[ $version < $2 ]] && local msg="$1 version installed: $version, version needed: $2"
else
local msg="Missing $1"
fi
# Save if dep not met
if ! $i || [ -n "$msg" ] ; then
missing+=($msg)
fi
}
# --- INIT --- #
current_pwd=$(pwd)
missing=()
# --- Check deps --- #
notice "Checking dependencies"
dep "git" "1.7"
dep "ruby" "1.8"
dep "vim" "7.3"
if [ "${#missing[@]}" -gt "0" ]; then
error "Missing dependencies"
for need in "${missing[@]}"; do
e_list "$need."
done
exit 1
fi
# Assumes ~/.dotfiles is *ours*
if [ -d ~/.dotfiles ]; then
# --- Update Repo --- #
notice "Updating"
cd ~/.dotfiles
git pull origin master
git submodule init
git submodule update
git submodule foreach -q git pull -q origin master
# --- Install --- #
else
# --- Clone Repo --- #
notice "Downloading"
git clone --recursive git://github.com/thomwiggers/dotfiles.git ~/.dotfiles
fi
# --- Finished --- #
cd $current_pwd
notice "Done. Now run ~/.dotfiles/rake install"
| true
|
433fd6befb38f67c27340b9e6180663cb99e71a9
|
Shell
|
adityasaraswat0035/azcli
|
/ssh_rule.sh
|
UTF-8
| 4,964
| 3.734375
| 4
|
[] |
no_license
|
#!/usr/bin/zsh
#################################################
# Add SSH allow/deny rules to running VMs
# It assumes that the NSG is in the same RG as the VM
# It creates the rules with prio 100
#
# Jose Moreno, March 2021
#################################################
# Function to inject a deny rule for SSH
function deny_ssh () {
while IFS= read -r vm; do
ssh_vm_name=$(echo $vm | cut -f1 -d$'\t')
ssh_rg=$(echo $vm | cut -f2 -d$'\t')
echo "Getting NSG for VM $ssh_vm_name in RG $ssh_rg..."
ssh_nic_id=$(az vm show -n $ssh_vm_name -g $ssh_rg --query 'networkProfile.networkInterfaces[0].id' -o tsv)
ssh_nsg_id=$(az network nic show --ids $ssh_nic_id --query 'networkSecurityGroup.id' -o tsv)
if [[ -z "$ssh_nsg_id" ]]
then
echo "No NSG could be found for NIC $ssh_nic_id"
else
ssh_nsg_name=$(basename $ssh_nsg_id)
echo "Adding SSH-deny rule to NSG $ssh_nsg_name for VM $ssh_vm_name in RG $ssh_rg..."
az network nsg rule create -n "${rule_prefix}SSH" --nsg-name $ssh_nsg_name -g $ssh_rg --priority $rule_prio --destination-port-ranges 22 --access Deny --protocol Tcp -o none
az network nsg rule create -n "${rule_prefix}RDP" --nsg-name $ssh_nsg_name -g $ssh_rg --priority $(($rule_prio+1)) --destination-port-ranges 3389 --access Deny --protocol Tcp -o none
fi
done <<< "$vm_list"
}
# Function to inject an allow rule for SSH
function allow_ssh () {
while IFS= read -r vm; do
ssh_vm_name=$(echo $vm | cut -f1 -d$'\t')
ssh_rg=$(echo $vm | cut -f2 -d$'\t')
echo "Getting NSG for VM $ssh_vm_name in RG $ssh_rg..."
ssh_nic_id=$(az vm show -n $ssh_vm_name -g $ssh_rg --query 'networkProfile.networkInterfaces[0].id' -o tsv)
ssh_nsg_id=$(az network nic show --ids $ssh_nic_id --query 'networkSecurityGroup.id' -o tsv)
if [[ -z "$ssh_nsg_id" ]]
then
echo "No NSG could be found for NIC $ssh_nic_id"
else
ssh_nsg_name=$(basename $ssh_nsg_id)
echo "Adding SSH-allow rule to NSG $ssh_nsg_name for VM $ssh_vm_name in RG $ssh_rg..."
az network nsg rule create -n "${rule_prefix}SSH" --nsg-name $ssh_nsg_name -g $ssh_rg --priority $rule_prio --destination-port-ranges 22 --access Allow --protocol Tcp -o none
az network nsg rule create -n "${rule_prefix}RDP" --nsg-name $ssh_nsg_name -g $ssh_rg --priority $(($rule_prio+1)) --destination-port-ranges 3389 --access Allow --protocol Tcp -o none
fi
done <<< "$vm_list"
}
# Function to inject an allow rule for SSH
function delete_ssh_rule () {
while IFS= read -r vm; do
ssh_vm_name=$(echo $vm | cut -f1 -d$'\t')
ssh_rg=$(echo $vm | cut -f2 -d$'\t')
echo "Getting NSG for VM $ssh_vm_name in RG $ssh_rg..."
ssh_nic_id=$(az vm show -n $ssh_vm_name -g $ssh_rg --query 'networkProfile.networkInterfaces[0].id' -o tsv)
ssh_nsg_id=$(az network nic show --ids $ssh_nic_id --query 'networkSecurityGroup.id' -o tsv)
if [[ -z "$ssh_nsg_id" ]]
then
echo "No NSG could be found for NIC $ssh_nic_id"
else
ssh_nsg_name=$(basename $ssh_nsg_id)
echo "Deleting SSH-allow rule from NSG $ssh_nsg_name for VM $ssh_vm_name in RG $ssh_rg..."
az network nsg rule delete -n "${rule_prefix}SSH" --nsg-name $ssh_nsg_name -g $ssh_rg -o none
az network nsg rule delete -n "${rule_prefix}RDP" --nsg-name $ssh_nsg_name -g $ssh_rg -o none
fi
done <<< "$vm_list"
}
# Variables
rule_prefix=auto
rule_prio=100
# Get arguments
scope_rg=''
action=''
for i in "$@"
do
case $i in
-g=*|--resource-group=*)
scope_rg="${i#*=}"
shift # past argument=value
;;
-a=*|--action=*)
action="${i#*=}"
shift # past argument=value
;;
esac
done
set -- "${POSITIONAL[@]}" # restore positional parameters
# Check there is an action
if [[ -z "$action" ]]
then
echo "ERROR: You need to specify an action with -a/--action, and optionally a resource group with -g/--resource-group"
exit 1
fi
# Create VM list
subscription=$(az account show --query name -o tsv)
if [[ -z $scope_rg ]]
then
echo "Getting the list of VMs powered on in subscription $subscription..."
vm_list=$(az vm list -o tsv -d --query "[?powerState=='VM running'].[name,resourceGroup]")
else
echo "Getting the list of VMs powered on in subscription $subscription and resource group $scope_rg..."
vm_list=$(az vm list -g $scope_rg -o tsv -d --query "[?powerState=='VM running'].[name,resourceGroup]")
fi
echo "$(echo $vm_list | wc -l) VMs found"
# Run action
case $action in
allow|Allow|permit|Permit)
allow_ssh
;;
deny|Deny|drop|Drop)
deny_ssh
;;
delete|remove)
delete_ssh_rule
;;
esac
| true
|
8abadf1cc03b6b588111acb5c8a7c7835b08e576
|
Shell
|
schmunsler/personal-scripts
|
/dolphin
|
UTF-8
| 1,269
| 3.90625
| 4
|
[
"WTFPL"
] |
permissive
|
#!/bin/bash
# wrapper for dolphin to make it open new tabs instead of windows
# place somewhere earlier in your path than your current dolphin (e.g.$HOME/bin)
# usage: dolphin [URL(s)]
# source: https://github.com/schmunsler/personal-scripts
# if Dolphin is already running
if [ ! -z $(qdbus | grep "org.kde.dolphin") ]; then
bus="qdbus org.kde.dolphin /dolphin/Dolphin_1"
# open each argument in a new tab
for arg in "$@"; do
d=$(realpath "$arg") # convert relative paths to absolute
# ignore non-directory args for now
if [ -d "$d" ]; then
# entering critical section, lock to make operation atomic
# see: http://stackoverflow.com/a/169969
set -e
(
flock -x -w 10 200
# open a new tab
$bus org.kde.KMainWindow.activateAction new_tab >/dev/null
# change the url to the argument
$bus org.kde.dolphin.MainWindow.changeUrl "$d" >/dev/null
) 200>/var/lock/.dolphin.lock
fi
done
# raise the window
$bus com.trolltech.Qt.QWidget.showNormal >/dev/null
$bus com.trolltech.Qt.QWidget.raise >/dev/null
else
# no running instance, start normally
/usr/bin/dolphin "$@"
fi
| true
|
8e1e4e9b38c17d420f33109a72598602f6cbabf2
|
Shell
|
aNNufriy/postgres-training
|
/runEx.sh
|
UTF-8
| 142
| 2.609375
| 3
|
[] |
no_license
|
[ $# -eq 1 ] || { echo No arguments supplied; exit 1; }
docker-compose exec postgresql su - postgres -c "cat /exercises/$1/*.sql | psql test"
| true
|
813373f4642d15a653c1a0dfa4f679929d37ecfe
|
Shell
|
jhenkins/hostfw
|
/host/ipset/getallip.sh
|
UTF-8
| 457
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
#===============================================================================
#
# FILE: getallip.sh
#
# USAGE: ./getallip.sh arg1
#
# DESCRIPTION: Get all IP addresses, including lo
#
#===============================================================================
set -euo pipefail
IFS=$'\n\t'
for i in $(ip addr | grep "inet " | egrep -v "virbr0" | awk {'print $2'} | cut -f 1 -d "/")
do
echo $i | cut -f 2
done
| true
|
203d81b8bb356e1b7633646c5773351a32a54573
|
Shell
|
rexcontrols/rex-install-rpi
|
/script-pifacedigital
|
UTF-8
| 966
| 3.890625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# REXYGEN installer
# HW specific operations - PiFace Digital
if [ "$EUID" -ne 0 ]
then echo "Please run this script as root"
exit
fi
#--- PiFace Digital specific operations ---
cp /boot/config.txt /boot/config.txt.rexbak
if ! grep -q '^dtparam=spi=on' /boot/config.txt
then echo 'dtparam=spi=on' >> /boot/config.txt
fi
#--- Final reboot ---
echo ' '
echo 'Runtime modules of REXYGEN for Raspberry Pi with PiFace Digital add-on board were successfully installed.'
echo ' '
echo '!!! REBOOT IS REQUIRED !!!'
echo ' '
echo 'Afterwards you will be able to program your Raspberry Pi using REXYGEN Studio.'
echo ' '
# Print the IP address
_IP=$(hostname -I) || true
if [ "$_IP" ]; then
printf "The IP address of this device is %s\n" "$_IP"
fi
echo ' '
read -p "Is it OK to reboot now? [y/N] " -n 1 -r
echo ' '
if [[ $REPLY =~ ^[Yy]$ ]]
then
reboot
else
echo 'Remember to reboot your Raspberry Pi at your earliest convenience.'
fi
echo ' '
| true
|
10f31d7c1dadbc3980037c5f6bcb8461a7ee7306
|
Shell
|
nickmccurdy/danamccurdy.com
|
/bin/normalize_dates
|
UTF-8
| 317
| 3.78125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Renames all posts so that dates are in the YYYY-MM-DD format with consistent leading zeros.
for file in "$(dirname "$0")"/../_posts/*.md; do
old_date=$(echo "$file" | grep -Eo '\d{2,4}-\d{1,2}-\d{1,2}')
new_date=$(date -jf %F +%F "$old_date")
mv "$file" "${file/$old_date/$new_date}"
done
| true
|
b5921415a4a410be2efb0ef584bafdfa0da99eb0
|
Shell
|
freebsd/freebsd-ports
|
/textproc/jarnal/files/patch-jarnal-open-file.sh
|
UTF-8
| 892
| 2.5625
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
--- jarnal-open-file.sh.orig 2014-02-20 07:10:52.000000000 +0100
+++ jarnal-open-file.sh 2014-04-28 21:58:13.000000000 +0200
@@ -1,19 +1,20 @@
-#! /bin/bash
+#! /bin/sh
-SOURCE=$0
-echo $SOURCE
-while [ -h "$SOURCE" ]; do
- DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
- SOURCE="$(readlink "$SOURCE")"
- [[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE"
-done
-DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
-cd $DIR
+#SOURCE=$0
+#echo $SOURCE
+#while [ -h "$SOURCE" ]; do
+# DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
+# SOURCE="$(readlink "$SOURCE")"
+# [[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE"
+#done
+#DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
+#cd $DIR
+cd %%JAVAJARDIR%%
if [ -d upgrade-lib ]; then
cp -fr lib old-lib
cp -fr upgrade-lib lib
- rm -fr upgrade-lib
+ rm -fr upgrade-lib
fi
if [ -f upgrade-jarnal.jar ]; then
cp -f jarnal.jar old-jarnal.jar
| true
|
7a733285450b52638532510b147199b1e995f29f
|
Shell
|
smitelli/sodasrv-client
|
/scripts/alarm.sh
|
UTF-8
| 165
| 2.734375
| 3
|
[] |
no_license
|
#!/usr/bin/bash
alarming=$(wget -q -O - "http://alala.smitelli.com/soda/?page=alarm")
if [ "$alarming" == "1" ]; then
play -v 1.5 /root/sodasrv/alarm.wav &
fi
| true
|
5ab4c083f467addb36b69f5eb156a0a96922c9e2
|
Shell
|
jefflund/dot
|
/linkdot
|
UTF-8
| 549
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
linkdot_path=$(realpath "$0")
dotfile_root=$(dirname "$linkdot_path")/files
# create dot directories
for dotdir in $(find $dotfile_root -mindepth 1 -type d); do
mkdir -p ${dotdir/$dotfile_root\//$HOME/.}
done
# symlink dot files
for dotfile in $(find $dotfile_root -mindepth 1 -type f); do
dest=${dotfile/$dotfile_root\//$HOME/.}
if [ ! -f "$dest" ]; then
echo "linking $dotfile => $dest"
ln -sf $dotfile $dest
elif [ "$(readlink -- "$dest")" != "$dotfile" ]; then
echo "could not link $dotfile => $dest"
fi
done
| true
|
813dcf261d249ee1f33e6c43b7b2ab28332a2563
|
Shell
|
danieldreier/puppet_installer
|
/files/download.sh
|
UTF-8
| 4,313
| 3.984375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# this hasn't been added yet; I'm just working on different ways to download
# stuff that I can later incorporate into the main script
# exists COMMAND
exists() {
if type "$1" >/dev/null 2>&1
then
echo "found $1"
return 0
else
return 1
fi
}
unable_to_retrieve_package() {
echo "Unable to retrieve a valid package!"
exit 1
}
download() {
echo "downloading $1"
echo " to file $2"
if [ -e "$2" ]
then
echo "Error: File $2 already exists"
return 1
fi
if exists wget; then
wget_download "$1" "$2" && return 0
fi
if exists curl; then
curl_download "$1" "$2" && return 0
fi
if exists perl; then
perl_download "$1" "$2" && return 0
fi
if exists python; then
python_download "$1" "$2" && return 0
fi
if exists ruby; then
ruby_download "$1" "$2" && return 0
fi
if exists bash; then
bash_download "$1" "$2" && return 0
fi
unable_to_retrieve_package
}
# validate_download FILE
validate_download() {
if [ -s "$1" ] && [ -f "$1" ]
then
return 0
else
return 1
fi
}
# curl_download URL FILENAME
curl_download() {
echo "trying curl..."
curl --output "$2" "$1" || return 1
validate_download "$2" || return 1
return 0
}
# wget_download URL FILENAME
wget_download() {
echo "trying wget..."
wget --output-document "$2" "$1" || return 1
validate_download "$2" || return 1
return 0
}
# python_download URL FILENAME
python_download() {
echo "trying python..."
python -c "import sys,urllib2 ; sys.stdout.write(urllib2.urlopen(sys.argv[1]).read())" "$1" > "$2" 2>/tmp/stderr
rc=$?
# check for 404
grep "HTTP Error 404" /tmp/stderr 2>&1 >/dev/null
if test $? -eq 0; then
echo "ERROR 404"
unable_to_retrieve_package
fi
# check for bad return status or empty output
if test $rc -ne 0 || test ! -s "$2"; then
return 1
fi
return 0
validate_download "$2" || return 1
return 0
}
# perl_download URL FILENAME
perl_download() {
echo "trying perl..."
perl -e 'use LWP::Simple; getprint($ARGV[0]);' "$1" > "$2" 2>/tmp/stderr
rc=$?
# check for 404
grep "404 Not Found" /tmp/stderr 2>&1 >/dev/null
if test $? -eq 0; then
echo "ERROR 404"
unable_to_retrieve_package
fi
# check for bad return status or empty output
if test $rc -ne 0 || test ! -s "$2"; then
return 1
fi
validate_download "$2" || return 1
return 0
}
# ruby_download URL FILENAME
ruby_download() {
ruby -e "require 'open-uri'; File.open('$2', 'w') do |file| file.write(open('$1').read) end"
validate_download "$2" || return 1
return 0
}
bash_download() {
[ -n "$BASH" ] || return 1
# pretty epic bashism, copied verbatim from
# http://unix.stackexchange.com/questions/83926/how-to-download-a-file-using-just-bash-and-nothing-else-no-curl-wget-perl-et
function __wget() {
: ${DEBUG:=0}
local URL=$1
local tag="Connection: close"
local mark=0
if [ -z "${URL}" ]; then
printf "Usage: %s \"URL\" [e.g.: %s http://www.google.com/]" \
"${FUNCNAME[0]}" "${FUNCNAME[0]}"
return 1;
fi
read proto server path <<<$(echo ${URL//// })
DOC=/${path// //}
HOST=${server//:*}
PORT=${server//*:}
[[ x"${HOST}" == x"${PORT}" ]] && PORT=80
[[ $DEBUG -eq 1 ]] && echo "HOST=$HOST"
[[ $DEBUG -eq 1 ]] && echo "PORT=$PORT"
[[ $DEBUG -eq 1 ]] && echo "DOC =$DOC"
exec 3<>/dev/tcp/${HOST}/$PORT
echo -en "GET ${DOC} HTTP/1.1\r\nHost: ${HOST}\r\n${tag}\r\n\r\n" >&3
while read line; do
[[ $mark -eq 1 ]] && echo $line
if [[ "${line}" =~ "${tag}" ]]; then
mark=1
fi
done <&3
exec 3>&-
}
__wget "$1" > "$2"
validate_download "$2" || return 1
return 0
}
# other ideas:
# - use rsync
# - use openssl
# - use netcat
# - ksh tcp port
# - zsh tcp port http://web-tech.ga-usa.com/2014/04/zsh-simple-network-port-checker/
# on EL, download using RPM directly
# gnu gawk https://www.gnu.org/software/gawk/manual/gawkinet/html_node/TCP-Connecting.html http://www.linuxjournal.com/article/3132
# openssh "netcat mode" http://blog.rootshell.be/2010/03/08/openssh-new-feature-netcat-mode/
# openssl client?
# use rubygems directly
# fall back to trying to install curl/wget
| true
|
2eddf4147a67d34d6bacc536c82fa5e9ee8a2a00
|
Shell
|
rlugojr/kurmaos
|
/packaging/disk-vmware/build.sh
|
UTF-8
| 1,290
| 3.703125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
BASE_PATH=`pwd`
set -e -x
function containers_gone_wild() {
mkdir /tmp/devices-cgroup
mount -t cgroup -o devices none /tmp/devices-cgroup
echo 'a' > /tmp/devices-cgroup/instance-$(hostname)/devices.allow
# create loopback devices
for i in $(seq 64 67); do
mknod -m 0660 /dev/loop$i b 7 $i
done
}
function salt_earth() {
for i in $(seq 64 67); do
losetup -d /dev/loop$i > /dev/null 2>&1 || true
done
}
if ! ls -1 /dev/loop* ; then
containers_gone_wild
trap salt_earth EXIT
fi
gunzip -k kurmaos-disk-image/kurmaos-disk.img.gz
# Mount the disk and copy in the OEM grub.cfg
cd kurmaos-source/packaging
./lib/disk_util --disk_layout=base mount $BASE_PATH/kurmaos-disk-image/kurmaos-disk.img /tmp/rootfs
cp $BASE_PATH/kurmaos-source/packaging/disk-vmware/oem-grub.cfg /tmp/rootfs/boot/oem/grub.cfg
./lib/disk_util umount /tmp/rootfs
# Convert the image
cd $BASE_PATH
qemu-img convert -f raw kurmaos-disk-image/kurmaos-disk.img -O vmdk -o adapter_type=lsilogic kurmaos.vmdk
# remove intermediate files to speed up concourse post-build ops
rm kurmaos-disk-image/kurmaos-disk.img
# Package it up
cp kurmaos-source/packaging/disk-vmware/kurmaos.vmx kurmaos.vmx
cp kurmaos-source/LICENSE LICENSE
zip kurmaos.zip LICENSE kurmaos.vmx kurmaos.vmdk
| true
|
95b51f76a6d55cdb906c08074443d80ceaf8fb4b
|
Shell
|
spikegrobstein/RetroPie-Setup
|
/scriptmodules/supplementary/xboxdrv.sh
|
UTF-8
| 676
| 2.953125
| 3
|
[] |
no_license
|
rp_module_id="xboxdrv"
rp_module_desc="Install XBox contr. 360 driver"
rp_module_menus="3+"
function install_xboxdrv() {
rps_checkNeededPackages xboxdrv
if [[ -z `cat /etc/rc.local | grep "xboxdrv"` ]]; then
sed -i -e '13,$ s|exit 0|xboxdrv --daemon --id 0 --led 2 --deadzone 4000 --silent --trigger-as-button --next-controller --id 1 --led 3 --deadzone 4000 --silent --trigger-as-button --dbus disabled --detach-kernel-driver \&\nexit 0|g' /etc/rc.local
fi
ensureKeyValueBootconfig "dwc_otg.speed" "1" "/boot/config.txt"
dialog --backtitle "$__backtitle" --msgbox "Installed xboxdrv and adapted /etc/rc.local. It will be started on boot." 22 76
}
| true
|
4a68403cf95aa269f0a66939d96a6cf971e02a45
|
Shell
|
san650/bashtap
|
/test/fixtures/red.sh
|
UTF-8
| 256
| 2.65625
| 3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/usr/bin/env bash
PWD="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# shellcheck source=bashtap.bash
source "${PWD}/../../bashtap.bash"
plan
spec "fails"
expect << EOT
echo hello world
EOT
to_output << EOT
this is not a hello world
EOT
finish
| true
|
9cc43ee670f4f200479ad4adcfea3d80cef24c3d
|
Shell
|
OttawaCloudConsulting/aws-capture-prescribed-infrastructure
|
/capture-awspbmmaccelerator-prescribed-infrastructure.sh
|
UTF-8
| 5,781
| 3.8125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Static variables
parameterbasename='/default/base/' # Expected name = /default/base/${resource-type}-${tag}-${id-type}
# Functions - Modular
function list-subnets-all-ids (){
aws ec2 describe-subnets \
--query 'Subnets[*].[SubnetId]' \
--output text
}
function list-subnets-all-name-tags () {
aws ec2 describe-subnets \
--query 'Subnets[*].Tags[?Key==`Name`].Value[]' \
--output text
}
function describe-subnet-by-tag () {
aws ec2 describe-subnets \
--filters Name=tag:Name,Values=$1 \
--output json
}
function create-new-parameter () {
aws ssm put-parameter \
--name $1 \
--type "String" \
--value $2 \
--output text
}
function list-all-vpc-name-tags () {
aws ec2 describe-vpcs --query 'Vpcs[*].Tags[?Key==`Name`].Value[]' \
--output text
}
function list-all-cmk-key-aliases () {
aws kms list-aliases \
| jq '.Aliases[] | select(.AliasName | contains("aws") |not )'
}
function get-egress-ip () {
dig -4 +short myip.opendns.com @resolver1.opendns.com.
}
# Functions - Core
function export-subnet-resources () {
SUBNETLIST=$(list-subnets-all-name-tags)
for name in $SUBNETLIST;
do
# Disassemble metadata from tag
IFS='_' read -r -a subnettag <<< "$name"
tagzone=${subnettag[0]}
tagenv=${subnettag[1]}
tagaz=${subnettag[2]}
# Create store name
prefix=$tagzone"_"$tagaz
parameterprefix=$(echo $prefix | awk '{print tolower($0)}' )
parametername=$parameterbasename"subnet-$parameterprefix"
# Capture subnet info
describesubnet=$(describe-subnet-by-tag $name \
| jq -c '.Subnets[]| .AvailabilityZone,.AvailabilityZoneId,.AvailableIpAddressCount,.CidrBlock,.SubnetId,.SubnetArn' \
| sed 's/["]//g')
compoundvalue=$(echo $describesubnet | sed -e "s/ /,/g")
# echo "compoundvalue=$compoundvalue"
IFS=',' read -r -a describedsubnet <<< "$compoundvalue"
subnetaz=${describedsubnet[0]}
subnetazid=${describedsubnet[1]}
subnetavailableipcount=${describedsubnet[2]}
subnetcidrblock=${describedsubnet[3]}
subnetid=${describedsubnet[4]}
subnetarn=${describedsubnet[5]}
# Put Parameter to Parameter Store
create-new--parameter $parametername-az $subnetaz
create-new--parameter $parametername-azid $subnetazid
create-new--parameter $parametername-availableipcount $subnetavailableipcount
create-new--parameter $parametername-cidrblock $subnetcidrblock
create-new--parameter $parametername-id $subnetid
create-new--parameter $parametername-arn $subnetarn
done
}
function export-vpc-resources () {
VPCLIST=$(list-all-vpc-name-tags)
for name in $VPCLIST
do
# Disassemble metadata from tag
IFS='_' read -r -a vpctag <<< "$name"
tagenv=${vpctag[0]}
tagvpc=${vpctag[1]}
# Create store name
prefix=$tagvpc
echo "Prefix: $tagvpc"
parameterprefix=$(echo $prefix | awk '{print tolower($0)}' )
parametername=$parameterbasename"$parameterprefix"
# Capture VPC info
describevpc=$(aws ec2 describe-vpcs --filters Name=tag:Name,Values=$name --query 'Vpcs[*].[VpcId,CidrBlock]' --output text)
compoundvalue=$(echo $describevpc | sed -e "s/ /,/g")
IFS=',' read -r -a describedvpc <<< "$compoundvalue"
vpcid=${describedvpc[0]}
vpccidrblock=${describedvpc[1]}
# Put Parameter to Parameter Store
create-new--parameter $parametername-vpcid $vpcid
create-new--parameter $parametername-cidrblock $vpccidrblock
done
}
function export-cmk-resources () {
KEYLIST=$(list-all-cmk-key-aliases | jq '.TargetKeyId'| sed 's/"//g')
for keyid in $KEYLIST;
do
# Capture KMS CMK Key Data
keyalias=$(aws kms list-aliases --key-id $keyid --query 'Aliases[].AliasName' --output text)
keyarn=$(aws kms describe-key --key-id $keyid --query 'KeyMetadata.Arn' --output text)
keyname=$(echo $keyalias | sed 's/alias//g' | sed 's|/||g' | sed 's/alias//g' | sed -r 's/.{9}$//' )
# Create store name
parameterprefix=$(echo $keyname | awk '{print tolower($0)}' )
parametername=$parameterbasename"kms-$parameterprefix"
create-new--parameter $parametername-alias $keyalias
create-new--parameter $parametername-arn $keyarn
create-new--parameter $parametername-keyid $keyid
done
}
function export-loadbalancer-resources () {
lbarn=$(aws elbv2 describe-load-balancers --query 'LoadBalancers[*].LoadBalancerArn' --output text)
lbdnsname=$(aws elbv2 describe-load-balancers --query 'LoadBalancers[*].DNSName' --output text)
lbname=$(aws elbv2 describe-load-balancers --query 'LoadBalancers[*].LoadBalancerName' --output text)
# Create store name
prefix='alb'
parameterprefix=$(echo $prefix | awk '{print tolower($0)}' )
parametername=$parameterbasename"elb-$parameterprefix"
# Put Parameter to Parameter Store
create-new--parameter $parametername-arn $lbarn
create-new--parameter $parametername-dnsname $lbdnsname
create-new--parameter $parametername-lbname $lbname
}
function export-egress-ip () {
egressip=$(get-egress-ip)
# Create store name
prefix='egress-ip'
parameterprefix=$(echo $prefix | awk '{print tolower($0)}' )
parametername=$parameterbasename"$parameterprefix"
# Put Parameter to Parameter Store
create-new--parameter $parametername $egressip
}
# Script Execution
export-subnet-resources
export-vpc-resources
export-cmk-resources
export-loadbalancer-resources
export-egress-ip
exit 0
| true
|
d44979b6a0ab379b454d83ee779c6ef918d7ccca
|
Shell
|
iter-yangxingya/linux
|
/assembly/cpuid/clean.sh
|
UTF-8
| 185
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/sh
echo "begin cleanning cpuid build's object..."
list="cpuid cpuid.o"
for out in $list; do
if [ -f $out ]; then
rm -f $out
fi
done
echo "clean done!"
exit 0
| true
|
9c7c9e104726a3b1810ff2b4498e249187be5c8b
|
Shell
|
motoakama/pxe
|
/pxe.sh
|
UTF-8
| 8,996
| 3.640625
| 4
|
[] |
no_license
|
#/bin/bash
#############################
echo "##############################################"
echo "## 一键部署pxe脚本 使用注意事项 ###"
echo "## 1:确保本地yum源设置ok ###"
echo "## 2:将镜像挂载到/mnt目录下 ###"
echo "## 3:所有服务器业务网ip相通 ###"
echo "## 4:会自动使用root目录下的 ###"
echo "## anaconda-ks.cfg应答文件, ###"
echo "## 其他服务器会按照本机模板批量安装 ###"
echo "## 5:仅在bc7.6、centos7.7测试过 ###"
echo "##############################################"
echo ''
#############设置dhcp服务端
read -p '请输入本机业务网IP:' ip
if [ $(echo $ip |awk -F. '{print NF}') -ne 4 ];then
echo "输入ip错误,请重新输入"
exit 0
fi
subnet=${ip%%$(echo $ip |awk -F. '{print $4}')}"0"
range1=${ip%%$(echo $ip |awk -F. '{print $4}')}"10"
range2=${ip%%$(echo $ip |awk -F. '{print $4}')}"254"
###################### 设置pxe部署方式: bios启动 usef启动 ####已优化为自适应启动方式
<<COMMENT
n=1
cfg='pxelinux.0'
while [ $n -eq 1 ]
do
read -p "请输入pxe部署启动方式 1.uefi 2.bios 请选择1或者2:" start
if [ $start -eq 1 ];then
cfg="bootx64.efi"
n=0
elif [ $start -eq 2 ];then
cfg='pxelinux.0'
n=0
else
echo "输入错误"
fi
done
COMMENT
###########安装pxe需要的基础服务 dhcp xinetd tftp-server syslinux vsftpd tcpdump
yum install -y dhcp xinetd tftp-server syslinux vsftpd tcpdump >/dev/null
echo "-----------------检查是否安装完成----------------"
for i in {'dhcp','xinetd','tftp-server','vsftpd','syslinux','tcpdump'}
do
rpm -q $i>/dev/null
if [ $? -eq 0 ];then
echo -e " $i \033[32m 安装成功 \033[0m "
else
echo -e " $i \033[31m 未安装 \033[0m "
fi
done
echo "-------------------全部安装完成-----------------"
echo ''
#echo "-------------------开始设置dhcp服务-------------"
cat>/etc/dhcp/dhcpd.conf<<EOF
log-facility local7;
allow booting;
allow bootp;
option space pxelinux;
option pxelinux.magic code 208 = string;
option pxelinux.configfile code 209 = text;
option pxelinux.pathprefix code 210 = text;
option pxelinux.reboottime code 211 = unsigned integer 32;
option architecture-type code 93 = unsigned integer 16;
subnet $subnet netmask 255.255.255.0 {
range $range1 $range2;
option routers $ip;
next-server $ip;
if option architecture-type = 00:07 or
option architecture-type = 00:09 {
filename "bootx64.efi";
}
else {
filename "pxelinux.0";
}
}
EOF
##判断dhcp服务起来没有#############
systemctl restart dhcpd>/dev/null
systemctl enable xinetd>/dev/null
systemctl status dhcpd>/dev/null
if [ $? -eq 0 ];then
echo -e " dhcp服务 \033[32m 启动成功 \033[0m "
else
echo -e " dhcp服务 \033[31m 启动失败 \033[0m "
fi
#####设置xinetd服务#################
sed -i 's/^.*disable.*$/disable=no/g' /etc/xinetd.d/tftp
grep 'disable=no' /etc/xinetd.d/tftp>/dev/null
if [ $? -eq 0 ];then
echo -e " xintd服务 \033[32m 设置成功 \033[0m "
else
echo -e " xinetd服务 \033[31m 设置失败 \033[0m "
fi
systemctl restart xinetd>/dev/null
systemctl enable xinetd>/dev/null
systemctl status xinetd>/dev/null
if [ $? -eq 0 ];then
echo -e "xinetd服务 \033[32m 启动成功 \033[0m "
else
echo -e "xinetd服务 \033[31m 启动失败 \033[0m "
fi
###############设置syslinux服务 设置tftp,准备linux内核 初始化镜像文件###########
cp /usr/share/syslinux/pxelinux.0 /var/lib/tftpboot
cp /mnt/images/pxeboot/{vmlinuz,initrd.img} /var/lib/tftpboot
cp /mnt/isolinux/{vesamenu.c32,boot.msg} /var/lib/tftpboot
cp /mnt/EFI/BOOT/BOOTX64.EFI /var/lib/tftpboot/bootx64.efi
cp /mnt/EFI/BOOT/grub.cfg /var/lib/tftpboot/grub.cfg
cp /mnt/EFI/BOOT/grubx64.efi /var/lib/tftpboot/grubx64.efi
mkdir -p /var/lib/tftpboot/pxelinux.cfg
cp /mnt/isolinux/isolinux.cfg /var/lib/tftpboot/pxelinux.cfg/default
num=$(ls /var/lib/tftpboot |wc -l)
if [ $num -gt 6 ];then
echo -e "\033[32m 引导文件已成功复制到tftp目录下 \033[0m "
else
echo -e "\033[31m 引导文件复制到tftp目录失败 \033[0m "
fi
systemctl restart tftp>/dev/null
systemctl enable tftp>/dev/null
systemctl status tftp>/dev/null
if [ $? -eq 0 ];then
echo -e " tftp服务 \033[32m 启动成功 \033[0m "
else
echo -e " tftp服务 \033[31m 启动失败 \033[0m "
fi
#######bios启动 修改pxelinux.cfg文件 ################
sed -i '1c default linux' /var/lib/tftpboot/pxelinux.cfg/default
sed -i '2c timeout 5' /var/lib/tftpboot/pxelinux.cfg/default
sed -i "0,/^.*append.*$/s// append initrd=initrd.img inst.stage2=ftp:\/\/$ip ks=ftp:\/\/$ip\/pub\/ks.cfg quiet /" /var/lib/tftpboot/pxelinux.cfg/default
grep "$ip" /var/lib/tftpboot/pxelinux.cfg/default>/dev/null
if [ $? -eq 0 ];then
echo -e " bios引导文件 \033[32m 设置成功 \033[0m "
else
echo -e " bios引导文件 \033[31m 设置失败 \033[0m "
fi
#######uefi启动 修改grub.cfg文件
sed -i 's/default="1"/default="0"/g' /var/lib/tftpboot/grub.cfg
sed -i 's/timeout=60/timeout=5/g' /var/lib/tftpboot/grub.cfg
sed -i "0,/^.*linuxefi.*$/s// linuxefi (tftp)\/vmlinuz inst.repo=ftp:\/\/$ip ks=ftp:\/\/$ip\/pub\/ks.cfg ip=dhcp /" /var/lib/tftpboot/grub.cfg
sed -i "0,/^.*initrdefi.*$/s// initrdefi (tftp)\/initrd.img /" /var/lib/tftpboot/grub.cfg
grep '(tftp)/initrd.img ' /var/lib/tftpboot/grub.cfg>/dev/null
if [ $? -eq 0 ];then
echo -e " uefi引导文件 \033[32m 设置成功 \033[0m "
else
echo -e " uefi引导文件 \033[31m 设置失败 \033[0m "
fi
############################复制镜像到ftp目录下##############################
ftp_num=$(ls /var/ftp |wc -l)
if [ $ftp_num -gt 4 ];then
echo -e "\033[32m 镜像文件已成功复制到ftp目录下 \033[0m "
else
nohup cp -r /mnt/* /var/ftp/>/dev/null 2>&1 &
sleep 3
while :
do
ps -ef |grep 'cp -r' |grep -v 'grep'>/dev/mull
if [ $? -eq 0 ];then
echo -ne '\r'
echo -ne '--正 \r'
#sleep 1
echo -ne '----正在 \r'
sleep 1
echo -ne '------正在复 \r'
#sleep 1
echo -ne '--------正在复制 \r'
sleep 1
echo -ne '----------正在复制镜 \r'
#sleep 1
echo -ne '------------正在复制镜像 \r'
sleep 1
echo -ne '--------------正在复制镜像到 \r'
#sleep 1
echo -ne '----------------正在复制镜像到ftp录 \r'
sleep 1
echo -ne '------------------正在复制镜像到ftp目录 \r'
sleep 1
echo -ne ' \r'
else
break
fi
done
ftp_num=$(ls /var/ftp |wc -l)
if [ $ftp_num -gt 4 ];then
echo -e "\033[32m 镜像文件已成功复制到ftp目录下 \033[0m "
else
echo -e "\033[31m 镜像文件复制到ftp目录失败 \033[0m "
fi
fi
systemctl restart vsftpd>/dev/null
systemctl enable vsftpd>/dev/null
systemctl status vsftpd>/dev/null
if [ $? -eq 0 ];then
echo -e " ftp服务 \033[32m 启动成功 \033[0m "
else
echo -e " ftp服务 \033[31m 启动失败 \033[0m "
fi
#################################设置应答文件###################
if [ ! -d "/var/ftp/pub" ]; then
mkdir -p /var/ftp/pub
fi
cp /root/anaconda-ks.cfg /var/ftp/pub/ks.cfg
chmod +r /var/ftp/pub/ks.cfg
sed -i "s/cdrom/url --url=ftp:\/\/$ip/g" /var/ftp/pub/ks.cfg
sed -i "s/# System timezone/reboot/g" /var/ftp/pub/ks.cfg
sed -i "s/--none/--all/g" /var/ftp/pub/ks.cfg
sed -i "s/^graphical.*$/text/g" /var/ftp/pub/ks.cfg
grep 'reboot' /var/ftp/pub/ks.cfg >/dev/null
if [ $? -eq 0 ];then
echo -e " 应答文件 \033[32m 设置成功 \033[0m "
else
echo -e " 应答文件 \033[31m 设置失败 \033[0m "
fi
####################################检查所有服务是否启动成功#############################
echo "-----------------检查所有服务是否启动----------------"
for i in {'dhcpd','xinetd','tftp','vsftpd'}
do
systemctl status $i>/dev/null
if [ $? -eq 0 ];then
echo -e " $i \033[32m 启动成功 \033[0m "
else
echo -e " $i \033[31m 启动失败 \033[0m "
fi
done
systemctl stop firewalld>/dev/null
systemctl disable firewalld>/dev/null
setenforce 0
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
chmod -R 755 /var/ftp
chmod -R 755 /var/lib/tftpboot
echo -e " 防火墙 \033[32m 关闭成功 \033[0m "
echo "-------------------pxe服务端部署完成-----------------"
| true
|
56314859c59fd8d9b71f410258869def441422cd
|
Shell
|
zloidemon/configs
|
/HOME/shell/func/setup_scm
|
UTF-8
| 1,650
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/sh
# -*- coding: utf-8 -*-
# vim: set et sw=4 ts=4:
function _setup_scm() {
local _SCM_CONFIG_MERCURIAL=${HOME}/.hgrc
local _SCM_CONFIG_GIT=${HOME}/.gitconfig
local _X_USER_RESOURCES=${HOME}/.Xresources
if [ ${_F_SETUP_EXV} -eq 1 ]
then
local _SCM_FIRSTNAME=$(extract_xresources_value scm.firstname)
local _SCM_LASTNAME=$(extract_xresources_value scm.lastname)
local _SCM_EMAIL=$(extract_xresources_value scm.email)
if [ "x${_SCM_FIRSTNAME}" != "x" ] && \
[ "x${_SCM_LASTNAME}" != "x" ] && \
[ "x${_SCM_EMAIL}" != "x" ]
then
local _SCM_FULLNAME="${_SCM_FIRSTNAME} ${_SCM_LASTNAME} <${_SCM_EMAIL}>"
else
echo "[setup] SCM settings not defined"
return 1
fi
else
echo "[setup] exv not found"
return 1
fi
if ! [ -f ${_SCM_CONFIG_MERCURIAL} ]
then
cat <<EOF > ${_SCM_CONFIG_MERCURIAL}
[ui]
username = ${_SCM_FULLNAME}
EOF
else
echo "[setup] file ${_SCM_CONFIG_MERCURIAL} already exist"
fi
if ! [ -f ${_SCM_CONFIG_GIT} ]
then
cat <<EOF > ${_SCM_CONFIG_GIT}
[user]
name = ${_SCM_FIRSTNAME} ${_SCM_LASTNAME}
email = ${_SCM_EMAIL}
[pull]
rebase = false
[color]
ui = true
branch = auto
diff = auto
status = auto
[color "branch"]
current = yellow reverse
local = yellow
remote = green
[color "diff"]
meta = cyan
frag = magenta
old = cyan
new = yellow
[color "status"]
added = green
changed = yellow
untracked = yellow
[diff "sopsdiffer"]
textconv = sops -d
[init]
defaultBranch = master
EOF
else
echo "[setup] file ${_SCM_CONFIG_GIT} already exist"
fi
}
_F_SETUP_SCM=1
| true
|
9742a408c7944aaca98819b9367359af33627d9d
|
Shell
|
tszare/sdk
|
/dahua_sdk_x64/run.sh
|
UTF-8
| 1,405
| 3.15625
| 3
|
[] |
no_license
|
#! /bin/bash
basepath=$(cd `dirname $0`; pwd)
cd $basepath
CURR_PATH=`pwd ./`
echo $CURR_PATH
JARFILE=client_video.jar
_obf_jar() {
mvn clean compile test-compile jar:jar -Ppro -Dmaven.test.skip=true
java -jar _obf/allatori.jar _obf/_config.xml
cp ./target/obf-client_video.jar ./target/client_video.jar
}
remote_dir() {
REMOTE_HOST=$1
REMOTE_DIR=$2
REMOTE_PORT=22
if [ "x$3" = "x" ]; then
REMOTE_PORT=22
else
REMOTE_PORT=$3
fi
if ssh -p $REMOTE_PORT $REMOTE_HOST test -e $REMOTE_DIR ; then
echo "["$(date '+%Y-%m-%d %H:%M:%S')"]" $REMOTE_HOST":"$REMOTE_DIR" EXIST";
else
echo "["$(date '+%Y-%m-%d %H:%M:%S')"]" $REMOTE_HOST":"$REMOTE_DIR" NOT_EXIST";
exit 1;
fi
}
if [ "$1" = "lib" ]; then
rm -rf ./lib/*
mvn clean dependency:copy-dependencies -DoutputDirectory=lib
elif [ "$1" = "start" ]; then
java -Xmx512M -cp "./target/classes:./lib/*" regis.DahuaApplication
##<&- 1>/dev/null 2>&1 &
elif [ "$1" = "test" ]; then
mvn clean compile test-compile jar:jar -Ppro -Dmaven.test.skip=true
REMOTE_DIR="~/Test_DPSDK_Java_win64"
rsync -aPv --delete lib/ lucas@192.168.249.48:$REMOTE_DIR/lib
rsync target/dahua_sdk_x64.jar lucas@192.168.249.48:$REMOTE_DIR/
else
mvn eclipse:clean eclipse:eclipse -DdownloadSources=true
#mvn eclipse:clean eclipse:eclipse
#mvn clean compile test-compile
fi
| true
|
5f75b641438b4df349d6acf7338c32b371b9a0c6
|
Shell
|
twitter/dodo
|
/bin/gpg-init
|
UTF-8
| 1,312
| 3.46875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -euo pipefail
# shellcheck disable=SC1091
# shellcheck disable=SC1090
source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/functions"
#######################################################################
# Script for initializing gpg with PGP_SECRET env var
#
# Does not take arguments.
#######################################################################
VERSION="22.12.0"
NAME="Dodo GPG Init"
# BEGIN: EXECUTE BUILD ----------------------------------------------------------------------------------
SECONDS=0
gpg --version
# DO NOT leak out the $PGP_SECRET in the output!
printf -- "%s" "$PGP_SECRET" | gpg --import --no-tty --batch --yes
printf "%s" "default-key $PGP_KEY" > ~/.gnupg/gpg.conf
duration="$SECONDS"
FORMATTED=$(date "+%YT%H:%M:%S%z")
log "info" "------------------------------------------------------------------------"
log "info" "GPG INIT SUCCESS"
log "info" "------------------------------------------------------------------------"
log "info" "Total time: $duration s"
log "info" "Finished at: $FORMATTED"
log "info" "Twitter $NAME version v$VERSION"
log "info" "------------------------------------------------------------------------"
# END: EXECUTE BUILD ------------------------------------------------------------------------------------
| true
|
7ae04b52973917b093bf7ac4464a3cf8f9e4cd3d
|
Shell
|
myousefa/ScriptingLanguages
|
/MidtermPractice/q2.sh
|
UTF-8
| 323
| 3.359375
| 3
|
[] |
no_license
|
#! /usr/bin/env bash
#Problem2
# Find all awk scripts under the current directory and change the #! line from /usr/local/bin/gawk
# to /usr/bin/gawk
# Get first line
find . -iname '*.awk' -print |
while read filename; do
sed -e "s/#! *\/usr\/local\/bin\/gawk/#!\/usr\/bin\/gawk/g" $filename
done
# Modify line
| true
|
55a52d7560f4b3203b5d72e358f903ad89729f02
|
Shell
|
diogoab/escamboapp
|
/deploy.sh
|
UTF-8
| 1,323
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
# Created By Diogo A. M. Barbosa 2018
# Update System
sudo apt-get update
# Install packages
sudo apt-get install \
apt-transport-https \
ca-certificates \
curl \
software-properties-common -y
# Download Key
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
# Insert the Key
sudo apt-key fingerprint 0EBFCD88
# Insert Repo
sudo add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
# Update system
sudo apt-get update
# Install Docker
sudo apt-get install docker-ce -y
# Download Package
sudo curl -L https://github.com/docker/compose/releases/download/1.21.0/docker-compose-$(uname -s)-$(uname -m) -o /usr/local/bin/docker-compose
# Initial package
sudo chmod +x /usr/local/bin/docker-compose
# Build first stack
docker-compose run web rails new . --force --database=mysql
# Build new Docker image
docker-compose build
# setting permissions
sudo chown -R root:root .
# edit this file database.yml
mv config/database.yml config/database.old
sed 's/localhost/db/g' config/database.old > config/database.new
sed 's/password:/password: "root"/g' config/database.new > config/database.yml
# up stack
docker-compose up
# create database
docker-compose run web rake db:create
docker-compose up -d
| true
|
8de17795f6be4c3015769f88d41c0187fabddaf6
|
Shell
|
apatlpo/mit_equinox
|
/launch/dask.pbs
|
UTF-8
| 1,985
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
#PBS -N dask_pbs
#PBS -q mpi_8
#PBS -l select=8:ncpus=28:mem=100g
#PBS -l walltime=24:00:00
# Qsub template for datarmor
# Scheduler: PBS
# This writes a scheduler.json file into your home directory
# You can then connect with the following Python code
# >>> from dask.distributed import Client
# >>> client = Client(scheduler_file='scheduler.json')
#Environment sourcing
ENV_SOURCE="source ~/.bashrc; export PATH=$HOME/.miniconda3/bin:$PATH; source activate equinox"
echo $PBS_O_WORKDIR # dir where pbs script was submitted
#SCHEDULER="$PBS_O_WORKDIR/scheduler.json"
SCHEDULER="$DATAWORK/dask/scheduler.json"
echo $SCHEDULER
rm -f $SCHEDULER
#Options
export OMP_NUM_THREADS=1
export NUMEXPR_NUM_THREADS=1
export MKL_NUM_THREADS=1
export OPENBLAS_NUM_THREADS=1
echo $NCPUS
# can't have less than 28 cpus
NCPUS=14
MEMORY_LIMIT="100e9"
INTERFACE="--interface ib0 "
# Run Dask Scheduler
echo "*** Launching Dask Scheduler ***"
pbsdsh -n 0 -- /bin/bash -c "$ENV_SOURCE; dask-scheduler $INTERFACE --scheduler-file $SCHEDULER > $PBS_O_WORKDIR/$PBS_JOBID-scheduler-$PBS_TASKNUM.log 2>&1;"&
#Number of chunks
nbNodes=`cat $PBS_NODEFILE | wc -l`
echo "*** Starting Workers on Other $nbNodes Nodes ***"
for ((i=1; i<$nbNodes; i+=1)); do
pbsdsh -n ${i} -- /bin/bash -c "$ENV_SOURCE; dask-worker $INTERFACE --scheduler-file $SCHEDULER --nthreads $NCPUS --memory-limit $MEMORY_LIMIT --local-directory $TMPDIR --name worker-${i};"&
#pbsdsh -n ${i} -- /bin/bash -c "$ENV_SOURCE; dask-worker $INTERFACE --scheduler-file $PBS_O_WORKDIR/scheduler.json --nthreads $NCPUS --memory-limit $MEMORY_LIMIT --local-directory $TMPDIR --name worker-${i};"&
done
SNODE = $(cat $PBS_NODEFILE | uniq | head )
#echo "scheduler node should be: $SNODE" > "$PBS_O_WORKDIR/$PBS_JOBID.scheduler"
cat $PBS_NODEFILE | uniq > "$PBS_O_WORKDIR/$PBS_JOBID.nodefile"
echo "*** Dask cluster is starting ***"
#Either sleep or wait if just startin a cluster
#Or lanuch a dask app here
sleep 86400
| true
|
0fd24558c89815646bad8dfc03d4ec3db4d761fe
|
Shell
|
trashcatt/dots
|
/scripts/xbp.sh
|
UTF-8
| 330
| 3.484375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# xbps shortcuts
if [ "$1" = "i" ] ; then
xbps-install "${@:2}"
elif [ "$1" = "s" ] ; then
xbps-query -Rs "${@:2}"
elif [ "$1" = "r" ] ; then
xbps-remove "${@:2}"
else
printf "you didn't provide any argument you dumbfuck.
usage: xbp [options]
options:
i (install)
s (search)
r (remove)\n"
exit 1
fi
| true
|
534b63f13c3911ea77912b007fae77da33616000
|
Shell
|
dejanpan/mapping-private
|
/color_feature_classification/scripts/demos_artificial/test_all.sh
|
UTF-8
| 480
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
DATA=`rospack find color_feature_classification`/demos_artificial
for rot in 0 1
do
mkdir -p $DATA/data_result_$rot
for nlevel in 0.0005 0.0010 0.0015 0.0020 0.0025 0.0030 0.0035 0.0040 0.0045 0.0050
do
for x in a #b c d e
do
bash `rospack find color_feature_classification`/scripts/demos_artificial/2.test_classify.sh $DATA/hist_data_forSVM/test_features_${nlevel}${x}_$rot > $DATA/data_result_$rot/result_${nlevel}_${x}.txt
done
done
done
| true
|
78841299a1e7371285a80a1ff123004cda30d7d1
|
Shell
|
omerel/Course_linux
|
/linux_project/perl/start
|
UTF-8
| 726
| 3.03125
| 3
|
[] |
no_license
|
#! /bin/bash
# ===================================================
# Linux project - Analysis system performnece
# Omer Elgrably 021590807 & Rami Kashi 12345678
# help from : http://canvasjs.com/ ,
# http://stackoveflow.com,
# course material
# ===================================================
echo "Hello , please wait 60 seconds to diganose your pc perfomence"
# open a new terminal (thread) and start create the html file with analysis
gnome-terminal -e ./execute_command
# count 60 seconds
for(( i = 60 ; i >= 0 ; i-- ))
do
sleep 1
echo -n "$i "
done
# open new firefox window
echo "You are moving to the analysis webpage..."
/usr/bin/firefox -new-window file:///home/omer/Downloads/linux_project/index.html
| true
|
87ae9e0116dd4aa46d1639a97bac2d9668564cb6
|
Shell
|
rcbau/openstack-ci-tools
|
/setup_volume.sh
|
UTF-8
| 428
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/bash -ex
cp etc/volumes /etc/network/if-up.d/
chmod ugo+rx /etc/network/if-up.d/
pvcreate /dev/xvdb
vgcreate srv /dev/xvdb
lvcreate -L99G -nsrv srv
lvcreate -L100G -nmysql srv
mkfs.ext4 /dev/mapper/srv-srv
mkfs.ext4 /dev/mapper/srv-mysql
set +e
/etc/network/if-up.d/volumes
set -e
mkdir /srv/mysql
/etc/network/if-up.d/volumes
df -h
chown -R mikal.mikal /srv
mkdir /srv/git
mkdir /srv/git-checkouts
mkdir /srv/logs
| true
|
e749bbf9ff15488924304c09325fb4f9d9022489
|
Shell
|
kun0769/shell100
|
/76.sh
|
UTF-8
| 702
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
#
#program:
# 根据文件中ip,用户名和密码批量登录远程机器并杀掉tomcat进程
#
#history:
#2020/02/24 kun V1.0
cmd="ps aux |grep tomcat |grep -v grep |awk '{print $2}' |xargs kill"
cat > kill_tomcat.exp <<"EOF"
#!/usr/bin/expect
set host [lindex $argv 0]
set passwd [lindex $argv 1]
set cmd [lindex $argv 2]
spawn ssh root@$host
expect {
"yes/no" {send "yes\r"}
"password:" {send "$passwd\r"}
}
expect "]*"
send "$cmd\r"
expect "]*"
send "exit\r"
EOF
chmod a+x kill_tomcat.exp
cat ip-pwd.ini |while read line
do
host=`echo $line |awk -F ',' '{print $1}'`
pw=`echo $line |awk -F ',' '{print $3}'`
./kill_tomcat.exp $host $pw $cmd
done
| true
|
dbce7879e9a27e8b57d80b5c448b8efb788fc5b3
|
Shell
|
hydrz/bulk_delete_git_hub_repos
|
/run.sh
|
UTF-8
| 410
| 3.21875
| 3
|
[
"Apache-2.0"
] |
permissive
|
GITHUB_SECRET="your-github-personal-access-token"
REPOS=(
"username/repo1"
"username/repo2"
"username/repo3"
"username/repo4"
)
function git_repo_delete() {
curl -vL \
-H "Authorization: token $GITHUB_SECRET" \
-H "Content-Type: application/json" \
-X DELETE https://api.github.com/repos/$1 |
jq .
}
for repo in $REPOS; do (git_repo_delete "$repo"); done
| true
|
01797eb745d0a1e2020b462b40ff132b4fb7d277
|
Shell
|
yuzeng2333/lab3
|
/tests/convert
|
UTF-8
| 426
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
if [ ! -d "dep" ]; then
mkdir dep
fi
if [ ! -d "bin" ]; then
mkdir bin
fi
if [ ! -d "dump" ]; then
mkdir dump
fi
if [ ! -d "vmh" ]; then
mkdir vmh
fi
mv *.d dep/
mv riscv* bin/
for x in bin/*;do
riscv32-unknown-elf-objdump -EL -sz --section=.xcpthandler --section=.text --section=.data $x > $x.dump
python ../scripts/objdump2vmh.py $x.dump > $x.vmh;
done
mv bin/*.dump dump/
mv bin/*.vmh vmh/
| true
|
22f32b44d16cf0ec2b285b895ef55e76e3e25e60
|
Shell
|
kenamick/savagewheels
|
/savagewheels.in
|
UTF-8
| 1,476
| 3.484375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
export SAVAGEWHEELS_SYS_DATADIR="${SAVAGEWHEELS_SYS_DATADIR-@INSTALL_DATADIR@}"
export SAVAGEWHEELS_LIBEXECDIR="${SAVAGEWHEELS_LIBEXECDIR-@INSTALL_LIBEXECDIR@}"
export SAVAGEWHEELS_USR_CONFDIR="${SAVAGEWHEELS_USR_CONFDIR-${HOME}/.config/savagewheels}"
export SAVAGEWHEELS_USR_DATADIR="${SAVAGEWHEELS_USR_DATADIR-${HOME}/.local/share/savagewheels}"
if [ ! -d ${SAVAGEWHEELS_USR_CONFDIR} ]; then
if [ -e ${SAVAGEWHEELS_USR_CONFDIR} ]; then
printf "${SAVAGEWHEELS_USR_CONFDIR} exists but is not a directory.\n" >&2
printf " Please remove.\n" >&2
exit 1
fi
mkdir -p ${SAVAGEWHEELS_USR_CONFDIR}
if [ $? -ne 0 ]; then
printf "Failed to create ${SAVAGEWHEELS_USR_CONFDIR}\n" >&2
exit 1
fi
fi
if [ ! -d ${SAVAGEWHEELS_USR_DATADIR} ]; then
if [ -e ${SAVAGEWHEELS_USR_DATADIR} ]; then
printf "${SAVAGEWHEELS_USR_DATADIR} exists but is not a directory.\n" >&2
printf " Please remove.\n" >&2
exit 1
fi
mkdir -p ${SAVAGEWHEELS_USR_DATADIR}
if [ $? -ne 0 ]; then
printf "Failed to create ${SAVAGEWHEELS_USR_DATADIR}\n" >&2
exit 1
fi
fi
if [ ! -e ${SAVAGEWHEELS_USR_CONFDIR}/bindings.xml ]; then
cp ${SAVAGEWHEELS_SYS_DATADIR}/bindings.xml ${SAVAGEWHEELS_USR_CONFDIR}
if [ $? -ne 0 ]; then
printf "Failed to create ${SAVAGEWHEELS_USR_CONFDIR}/bindings.xml\n" >&2
exit 1
fi
fi
exec ${SAVAGEWHEELS_LIBEXECDIR}/savagewheels "$@"
| true
|
afb9222c595aad35341c5ba448265d53403475d7
|
Shell
|
emad-elsaid/live
|
/bin/deploy
|
UTF-8
| 898
| 3.5625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Usage: deploy <branch> <user@server-ip> services
# example: deploy master root@123.123.123.123 web
set -e
BRANCH=$1
SERVER=$2
SERVICES=${@:3}
APP=/root/projects/live
REPO=git@github.com:emad-elsaid/live.git
ENVFILE=/root/env/live/.env
sshin() {
ssh -o LogLevel=QUIET -t $SERVER "cd $APP; $@"
}
echo "[*] Deleting old files"
ssh -o LogLevel=QUIET -t $SERVER rm -rf $APP
echo "[*] Clone branch"
ssh -o LogLevel=QUIET -t $SERVER git clone --depth=1 --branch $BRANCH $REPO $APP
echo "[*] Copy .env file"
ssh -o LogLevel=QUIET -t $SERVER cp $ENVFILE $APP/.env
echo "[*] Pulling new docker images"
sshin docker-compose pull
echo "[*] Building images"
sshin docker-compose build $SERVICES
echo "[*] Stop old containers"
sshin docker-compose stop $SERVICES
echo "[*] Bring up new containers"
sshin docker-compose up -d $SERVICES
echo "[*] Clean docker"
sshin docker system prune
| true
|
c9226f671d701254a847e07c7dd50a653da49e02
|
Shell
|
qubenix/qubes-tools
|
/qubes-update-all
|
UTF-8
| 11,447
| 3.875
| 4
|
[] |
no_license
|
#!/bin/bash
while [[ "${#}" -gt "0" ]]; do
case "${1}" in
--all|-A)
autoclean="1"
autoremove="1"
trim="1"
upgrade_dom0="1"
shift
;;
--autoclean|-ac)
autoclean="1"
shift
;;
--autoremove|-ar)
autoremove="1"
shift
;;
--dom0|-d)
upgrade_dom0="1"
shift
;;
--help|-h)
printf "%s\n" "
Usage: "${0}" [options]
Options:
--all, -A = all options
--autoclean, -ac = autoclean
--autoremove, -ar = autoremove
--dom0, -d = upgrade dom0
--trim, -t = trim
--help, -h = help menu
"
exit 0
;;
--trim|-t)
trim="1"
shift
;;
*)
break
;;
esac
done
## Set update VM.
## If empty, the gateway of the first TemplateVM (in alphabetical order) will be used.
updatevm="gateway-update"
## Log output dir and file.
## If left empty a dir and file will be created in current dir.
logdir="/home/user/update-all-templates-log/"
logfile="/home/user/update-all-templates-log/update-all-templates.log"
## Set variables if empty.
if [[ -z "${updatevm}" ]]; then
updatevm=$(qvm-ls --raw-data -O name,class,netvm | grep "TemplateVM" | grep -v \|\- | head -1 | cut -d"|" -f 3)
fi
if [[ -z "${logdir}" ]]; then
logdir="$(pwd)/qubes-update-all-log/"
fi
if [[ -z "${logfile}" ]]; then
logfile="${logdir}"update-all-templates.log
fi
if [[ ! -e "${logdir}" ]]; then
mkdir -p "${logdir}"
fi
## Rotate old logfile if it exists.
if [[ -e "${logfile}" && ! -z $(head "${logfile}") ]]; then
mv "${logfile}" "${logfile}".old
touch "${logfile}"
else
touch "${logfile}"
fi
## Print enabled options.
clear
printf "%s\n" "Updating all TemplateVMs and StandaloneVMs.
Options enabled:"
if [[ "${autoremove}" -eq "1" ]]; then
printf "[+] AUTOREMOVE ENABLED\n"
fi
if [[ "${autoclean}" -eq "1" ]]; then
printf "[+] AUTOCLEAN ENABLED\n"
fi
if [[ "${trim}" -eq "1" ]]; then
printf "[+] TRIM ENABLED\n"
fi
if [[ "${upgrade_dom0}" -eq "1" ]]; then
printf "[+] UPGRADE DOM0 ENABLED\n"
fi
## Start update VM, wait for Tor.
printf "\nStarting update VM and waiting for Tor to connect...\n\n"
if [[ $(qvm-ls --raw-data -O state "${updatevm}" | grep -c "Running") -ne "1" ]]; then
updatevm_was_running="0"
else
updatevm_was_running="1"
fi
qvm-start -q --skip-if-running "${updatevm}"
tor_count="0"
tor_restart_count="0"
while [[ $(qvm-run -u root -p "${updatevm}" 'grep "$(date -u +%b\ %d)" /var/log/tor/log' | grep -c -e "Bootstrapped 100%") -lt "1" ]]; do
sleep 1
tor_count=$((tor_count+1))
if [[ "${updatevm_was_running}" -eq "1" && "${tor_count}" -eq "30" ]]; then
qvm-run -u root -p "${updatevm}" 'systemctl restart tor@default.service'
tor_count="0"
fi
if [[ "${tor_count}" -ge "180" ]]; then
tor_restart_count=$((tor_restart_count+1))
printf "\n[!][!] RESTARTING TOR IN GATEWAY-UPDATE. ATTEMPT: "${tor_restart_count}" / 5 [!][!]\n\n"
qvm-run -u root -p "${updatevm}" 'systemctl restart tor@default.service'
tor_count="0"
if [[ "${tor_restart_count}" -ge "5" ]]; then
printf "\n[!][!] COULD NOT RESTART TOR, CHECK NETWORK. EXITING. [!][!]\n" | tee -a "${logfile}"
exit 1
fi
fi
done
## Upgrade Debian based TemplateVM's.
for vm in $(qvm-ls --fields name,netvm --raw-data --tags debian whonix-updatevm | grep "gateway-" | cut -d "|" -f 1 | sort); do
printf "\n[+] Starting upgrade for VM "${vm}" at $(date +%x-%T).\n\n" | tee -a "${logfile}"
## Check if vm was running.
if [[ $(qvm-ls --raw-data -O state "${vm}" | grep -c "Running") -ne "1" ]]; then
vm_was_running="0"
else
vm_was_running="1"
fi
## Start vm, wait for it.
qvm-start -q --skip-if-running "${vm}"
while [[ $(qvm-ls --fields name,state,class --raw-data --tags debian whonix-updatevm | grep "${vm}" | grep -c "Running") -ne "1" ]]; do
sleep 1
done
## Start apt update. Give 5 retries.
aborted_update="0"
update_count="0"
qvm-run -q --nogui -p -u root "${vm}" 'export DEBIAN_FRONTEND="noninteractive" TERM="vt100"; timeout 10m apt update -o Languages=none -o Acquire::IndexTargets::deb::Contents-deb::DefaultEnabled=false; printf "Exit code: $?\n"' | tee -a "${logfile}"
while [[ $(tail -1 "${logfile}" | sed 's|Exit\ code\:\ ||') -ne "0" ]] ; do
update_count=$((update_count+1))
printf "\n[!][!] UPDATE FAILED FOR: "${vm}". RETRY ATTEMPT $update_count / 5. [!][!]\n\n" | tee -a "${logfile}"
sleep 10
qvm-run --nogui -p -q -u root "${vm}" 'export DEBIAN_FRONTEND="noninteractive" TERM="vt100"; timeout 10m sudo apt update -o Languages=none -o Acquire::IndexTargets::deb::Contents-deb::DefaultEnabled=false; printf "Exit code: $?\n"' | tee -a "${logfile}"
if [[ "$update_count" -ge "5" ]]; then
printf "\n[!][!] UPDATE FOR VM: "${vm}" WAS NOT SUCCESSFUL AFTER 5 RETRY ATTEMPTS. ABORTING. [!][!]\n\n" | tee -a "${logfile}"
aborted_update="1"
break
fi
done
## Start apt dist-upgrade if update was successful. Give 5 retries.
if [[ "$aborted_update" -eq "0" ]]; then
aborted_upgrade="0"
upgrade_count="0"
qvm-run --nogui -p -q -u root "${vm}" 'export DEBIAN_FRONTEND="noninteractive" TERM="vt100"; apt dist-upgrade -V -y -q -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" -o Dpkg::Progress-Fancy="1"; printf "Exit code: $?\n"' | tee -a "${logfile}"
while [[ $(tail -1 "${logfile}" | sed 's|Exit\ code\:\ ||') -ne "0" ]]; do
upgrade_count=$((upgrade_count+1))
printf "\n[!][!] UPGRADE FAILED FOR VM: "${vm}". RETRY ATTEMPT "${upgrade_count}" / 5. [!][!]\n\n" | tee -a "${logfile}"
sleep 5
qvm-run --nogui -p -q -u root "${vm}" 'export DEBIAN_FRONTEND="noninteractive" TERM="vt100"; apt dist-upgrade -V -y -q -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" -o Dpkg::Progress-Fancy="1"; printf "Exit code: $?\n"' | tee -a "${logfile}"
if [[ "$upgrade_count" -ge "5" ]]; then
printf "\n[!][!] UPGRADE FOR VM: "${vm}" WAS NOT SUCCESSFUL AFTER 5 RETRY ATTEMPTS. ABORTING. [!][!]\n\n" | tee -a "${logfile}"
aborted_upgrade="1"
break
fi
done
fi
## Start autoremove. Should only need one try, but we give 5 retries.
if [[ "${aborted_update}" -eq "0" && "${aborted_upgrade}" -eq "0" && "${autoremove}" -eq "1" ]]; then
autoremove_count="0"
qvm-run --nogui -p -q -u root "${vm}" 'export DEBIAN_FRONTEND="noninteractive" TERM="vt100"; apt autoremove -y; printf "Exit code: $?\n"' | tee -a "${logfile}"
while [[ $(tail -1 "${logfile}" | sed 's|Exit\ code\:\ ||') -ne "0" ]]; do
autoremove_count=$((autoremove_count+1))
sleep 10
qvm-run --nogui -p -q -u root "${vm}" 'export DEBIAN_FRONTEND="noninteractive" TERM="vt100"; apt autoremove -y; printf "Exit code: $?\n"' | tee -a "${logfile}"
if [[ "$autoremove_count" -ge "5" ]]; then
printf "\n[!][!] AUTOREMOVE FOR VM: "${vm}" WAS NOT SUCCESSFUL AFTER RETRY 5 ATTEMPTS. ABORTING. [!][!]\n\n" | tee -a "${logfile}"
break
fi
done
fi
## Start autoclean. Should only need one try, but we give 5 retries.
if [[ "${aborted_update}" -eq "0" && "${aborted_upgrade}" -eq "0" && "${autoclean}" -eq "1" ]]; then
autoclean_count="0"
qvm-run --nogui -p -q -u root "${vm}" 'export DEBIAN_FRONTEND=noninteractive TERM="vt100"; apt autoclean -y; printf "Exit code: $?\n"' | tee -a "${logfile}"
while [[ $(tail -1 "${logfile}" | sed 's|Exit\ code\:\ ||') != "0" ]]; do
autoclean_count=$((autoremove_count+1))
sleep 10
qvm-run --nogui -p -q -u root "${vm}" 'export DEBIAN_FRONTEND="noninteractive" TERM="vt100"; apt autoclean -y; printf "Exit code: $?\n"' | tee -a "${logfile}"
if [[ "${autoclean_count}" -ge "5" ]]; then
printf "\n[!][!] AUTOCLEAN FOR VM: "${vm}" WAS NOT SUCCESSFUL AFTER RETRY 5 ATTEMPTS. ABORTING. [!][!]\n\n" | tee -a "${logfile}"
break
fi
done
fi
## Trim vm.
if [[ "${trim}" -eq "1" ]]; then
qvm-run --nogui -p -q -u root "${vm}" "fstrim -v -a"
fi
## Shutdown vm.
if [[ "${vm_was_running}" = "0" ]]; then
qvm-shutdown -q "${vm}"
else
qvm-shutdown --wait --timeout 20 "${vm}"
sleep 2s
qvm-start "${vm}"
fi
printf "\n[-] Finished upgrade for VM "${vm}" at $(date +%x-%T).\n\n" | tee -a "${logfile}"
done
## Upgrade Fedora based TemplateVM's.
for vm in $(qvm-ls --fields name,netvm --raw-data --tags fedora | grep "gateway-" | cut -d "|" -f 1 | sort); do
printf "\n[+] Starting upgrade for VM "${vm}" at $(date +%x-%T).\n\n" | tee -a "${logfile}"
## Check if vm was running.
if [[ $(qvm-ls --raw-data -O state "${vm}" | grep -c "Running") -ne "1" ]]; then
vm_was_running="0"
else
vm_was_running="1"
fi
## Start vm, wait for it.
qvm-start -q --skip-if-running "${vm}"
while [[ $(qvm-ls --fields name,state,class --raw-data --tags fedora | grep "${vm}" | grep -c "Running") -ne "1" ]]; do
sleep 1
done
upgrade_count="0"
qvm-run --nogui -p -q -u root "${vm}" 'export TERM="vt100"; dnf upgrade --allowerasing --best --enablerepo=qubes-vm-r4.0-current-testing --refresh -v -y; printf "Exit code: $?\n"' | tee -a "${logfile}"
while [[ $(tail -1 "${logfile}" | sed 's|Exit\ code\:\ ||') != "0" && $(tail -1 "${logfile}" | sed 's|Exit\ code\:\ ||') != "Complete!" ]]; do
upgrade_count=$((upgrade_count+1))
printf "\n[!][!] UPGRADE FAILED FOR VM: "${vm}". RETRY ATTEMPT "${upgrade_count}" / 5. [!][!]\n\n" | tee -a "${logfile}"
sleep 10
qvm-run --nogui -p -q -u root "${vm}" 'export TERM="vt100"; dnf upgrade --allowerasing --best --enablerepo=qubes-vm-r4.0-current-testing --refresh -v -y' | tee -a "${logfile}"
if [[ "${upgrade_count}" -ge "5" ]]; then
printf "\n[!][!] UPGRADE FOR VM: "${vm}" WAS NOT SUCCESSFUL AFTER 5 ATTEMPTS. ABORTING. [!][!]\n\n" | tee -a "${logfile}"
break
fi
done
## Trim vm.
if [[ "${trim}" -eq "1" ]]; then
qvm-run --nogui -p -q -u root "${vm}" "fstrim -v -a"
fi
## Shutdown vm.
if [[ "${vm_was_running}" = "0" ]]; then
qvm-shutdown -q "${vm}"
else
qvm-shutdown --wait --timeout 20 "${vm}"
sleep 2s
qvm-start "${vm}"
fi
printf "\n[-] Finished upgrade for VM ${vm} at $(date +%x-%T).\n\n" | tee -a "${logfile}"
done
# Dom0 upgrade.
if [[ "${upgrade_dom0}" -eq "1" ]]; then
printf "\n[+] Starting upgrade for dom0 at $(date +%x-%T).\n\n" | tee -a "${logfile}"
dom0update_count="0"
set -o pipefail
sudo qubes-dom0-update --clean --enablerepo=qubes-dom0-current-testing --enablerepo=qubes-templates-community --enablerepo=qubes-templates-itl-testing -v -y | tee -a "${logfile}"
while [[ "${?}" -ne "0" && $(tail -5 "${logfile}" | grep -c "Nothing to download") -lt "1" ]]; do
sudo qubes-dom0-update --enablerepo=qubes-dom0-current-testing --enablerepo=qubes-templates-community --enablerepo=qubes-templates-itl-testing -v -y | tee -a "${logfile}"
dom0update_count=$((dom0update_count+1))
if [[ "${dom0update_count}" -ge "5" ]]; then
printf "\n[!][!] UPGRADE FOR dom0 WAS NOT SUCCESSFUL AFTER 5 ATTEMPTS. ABORTING. [!][!]\n\n" | tee -a "${logfile}"
break
fi
done
set +o pipefail
printf "\n[-] Finished upgrade for dom0 at $(date +%x-%T).\n\n" | tee -a "${logfile}"
fi
## Trim dom0.
if [[ "${trim}" -eq "1" ]]; then
sudo fstrim -v /
fi
# Shutdown update vms.
if [[ "${updatevm_was_running}" -eq "0" ]]; then
printf "\nShutting down update VMs...\n\n"
qvm-shutdown -q --wait --timeout 20 "${updatevm}"
fi
exit 0
| true
|
b1cd04715c50e4d4fc19da4e06c570851f83f410
|
Shell
|
kzkohashi/playbook
|
/common_playbook/roles/init_amazon_linux/files/99-app-motd.j2
|
UTF-8
| 574
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
region=$(curl -s http://169.254.169.254/latest/meta-data/placement/availability-zone | sed -e 's/.$//')
myInstanceId=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)
tagName=$(aws ec2 describe-instances --region ${region} --query "Reservations[].Instances[?InstanceId==\`${myInstanceId}\`].[Tags[?Key==\`Name\`].Value]" --output text)
cat << EOF
_
/_\ _ __ _ __
//_\\\| '_ \\| '_ \\
/ _ | |_) | |_) |
\_/ \_| .__/| .__/
|_| |_|
******************************
This Server is "${tagName}"
******************************
EOF
| true
|
07635e95141a8e3946bf6e7a6eec8d5e3d6d78bc
|
Shell
|
seberm/aur-qubes-core-agent-linux
|
/PKGBUILD
|
UTF-8
| 6,934
| 2.625
| 3
|
[] |
no_license
|
# Maintainer: Otto Sabart <aur@seberm.com>
# Ref.: https://github.com/QubesOS/qubes-core-agent-linux/tree/master/archlinux
pkgbase=qubes-core-agent-linux
pkgname=(qubes-vm-core qubes-vm-networking qubes-vm-keyring)
_gitname=${pkgname%-git*}
pkgver=4.0.61
pkgrel=17
pkgdesc="The Qubes core files for installation inside a Qubes VM."
arch=("x86_64")
url="https://github.com/QubesOS/qubes-core-agent-linux"
license=('GPL')
groups=()
makedepends=(gcc make pkg-config qubes-vm-utils qubes-libvchan qubes-db-vm qubes-vm-xen libx11 python python-setuptools lsb-release pandoc)
validpgpkeys=('0AF64C3B1F1214B38C8C57861FA2DBE674387CC3' # Otto Sabart
'0064428F455451B3EBE78A7F063938BA42CFA724' # Marek Marczykowski-Górecki
'427F11FD0FAA4B080123F01CDDFA1A3E36879494' # Qubes Master Signing Key
)
source=(
"$_gitname::git+https://github.com/QubesOS/qubes-core-agent-linux.git?signed#tag=v${pkgver}"
PKGBUILD.qubes-ensure-lib-modules.service PKGBUILD.qubes-update-desktop-icons.hook
PKGBUILD-qubes-pacman-options.conf
PKGBUILD-qubes-repo-4.0.conf
PKGBUILD-keyring-keys
PKGBUILD-keyring-trusted
PKGBUILD-keyring-revoked
)
sha512sums=(
'SKIP'
'a120135245847c387e940024dff5b6a744b80d8863373ecfe646cb8eeedf1316e223f3b7bb75f153185cb3d9e5fed9bcc14a3cd81448dd1c2d35531c5f8c7195'
'1299ac686fa791436359ad33bb2de79f05a3c6059987b30e883a0c18bb7abaacf25ecc7ceeb762f2c1d5bcb9857aa88c106d36ca0977a2c1157bca6e3daee832'
'1b45b221f5482dd3fca65169664fc008b976904e14da883cd2d690fe0568086f3cc0a3ee1bc48bccb644c3a8627969be5a4b86bdfa0526e5415fcef6ca4742ed'
'3c7322fc5507e5ef8d3c8bbf55de2e23790142622be00aaf27ea8037dbd744895440dce814b7b4e86e9bc82be25a783fc858e86ff44b115e8330dc5580a608ad'
'cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e'
'cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e'
'9bb8027d893ea92cf85788a1389a52da0b7d49cbd355e437a278cc2de0c1f229d7cee871767ffd0eda57dca6ca8d5cc1cd453316983e4cad13d3fc373be11675'
)
# Ref.: https://github.com/QubesOS/qubes-builder/blob/master/example-configs/qubes-os-master.conf#L9
qubes_backend_vmm=xen
build() {
cd "${srcdir}/${_gitname}/"
# Fix for network tools paths
sed 's:/sbin/ifconfig:ifconfig:g' -i network/*
sed 's:/sbin/route:route:g' -i network/*
sed 's:/sbin/ethtool:ethtool:g' -i network/*
sed 's:/sbin/ip:ip:g' -i network/*
sed 's:/bin/grep:grep:g' -i network/*
# Fix for archlinux sbindir
sed 's:/usr/sbin/ntpdate:/usr/bin/ntpdate:g' -i qubes-rpc/sync-ntp-clock
sed 's:/usr/sbin/qubes-firewall:/usr/bin/qubes-firewall:g' -i vm-systemd/qubes-firewall.service
# Remove SELinux specific options from sudoers file
sed 's:ROLE=unconfined_r TYPE=unconfined_t::g' -i misc/qubes.sudoers
for dir in qubes-rpc qrexec misc; do
make BACKEND_VMM="${qubes_backend_vmm}" -C "$dir"
done
}
#This package provides:
# * qrexec agent
# * qubes rpc scripts
# * core linux tools and scripts
# * core systemd services and drop-ins
# * basic network functionality (setting IP address, DNS, default gateway)
package_qubes-vm-core() {
depends=(qubes-vm-utils python-xdg ethtool ntp net-tools
gnome-packagekit imagemagick fakeroot notification-daemon dconf
zenity qubes-libvchan qubes-db-vm haveged python-gobject
python-dbus xdg-utils notification-daemon gawk sed procps-ng librsvg
socat pacman-contrib
# Block updating if there is a major python update as the python API will be in the wrong PYTHONPATH
'python<3.10'
)
optdepends=(gnome-keyring gnome-settings-daemon python-nautilus gpk-update-viewer qubes-vm-networking qubes-vm-keyring)
install=PKGBUILD.install
cd "${srcdir}/${_gitname}/"
# Note: Archlinux removed use of directory such as /sbin /bin /usr/sbin (https://mailman.archlinux.org/pipermail/arch-dev-public/2012-March/022625.html)
# shellcheck disable=SC2154
make -C qrexec install DESTDIR="$pkgdir" SBINDIR=/usr/bin LIBDIR=/usr/lib SYSLIBDIR=/usr/lib
make install-corevm DESTDIR="$pkgdir" SBINDIR=/usr/bin LIBDIR=/usr/lib SYSLIBDIR=/usr/lib SYSTEM_DROPIN_DIR=/usr/lib/systemd/system USER_DROPIN_DIR=/usr/lib/systemd/user DIST=archlinux
# Remove things non wanted in archlinux
rm -r "$pkgdir/etc/yum"*
rm -r "$pkgdir/etc/dnf"*
rm -r "$pkgdir/etc/init.d"
# Remove fedora specific scripts
rm "$pkgdir/etc/fstab"
# Install systemd script allowing to automount /lib/modules
install -m 644 "$srcdir/PKGBUILD.qubes-ensure-lib-modules.service" "${pkgdir}/usr/lib/systemd/system/qubes-ensure-lib-modules.service"
# Install pacman hook to update desktop icons
mkdir -p "${pkgdir}/usr/share/libalpm/hooks/"
install -m 644 "$srcdir/PKGBUILD.qubes-update-desktop-icons.hook" "${pkgdir}/usr/share/libalpm/hooks/qubes-update-desktop-icons.hook"
# Install pacman.d drop-ins (at least 1 drop-in must be installed or pacman will fail)
mkdir -p "${pkgdir}/etc/pacman.d"
install -m 644 "$srcdir/PKGBUILD-qubes-pacman-options.conf" "${pkgdir}/etc/pacman.d/10-qubes-options.conf"
# Install pacman repository
release=$(echo "$pkgver" | cut -d '.' -f 1,2)
echo "Installing repository for release ${release}"
install -m 644 "$srcdir/PKGBUILD-qubes-repo-${release}.conf" "${pkgdir}/etc/pacman.d/99-qubes-repository-${release}.conf.disabled"
# Archlinux specific: enable autologin on tty1
mkdir -p "$pkgdir/etc/systemd/system/getty@tty1.service.d/"
cat <<EOF > "$pkgdir/etc/systemd/system/getty@tty1.service.d/autologin.conf"
[Service]
ExecStart=
ExecStart=-/usr/bin/agetty --autologin user --noclear %I 38400 linux
EOF
# Archlinux packaging guidelines: /var/run is a symlink to a tmpfs. Don't create it
rm -r "$pkgdir/var/run"
}
#This package provides:
# * proxy service used by TemplateVMs to download updates
# * qubes-firewall service (FirewallVM)
#
#Integration of NetworkManager for Qubes VM:
# * make connections config persistent
# * adjust DNS redirections when needed
# * show/hide NetworkManager applet icon
#
package_qubes-vm-networking() {
pkgdesc="Qubes OS tools allowing to use a Qubes VM as a NetVM/ProxyVM"
depends=(qubes-vm-core qubes-vm-utils python ethtool net-tools
qubes-db-vm networkmanager iptables tinyproxy nftables
conntrack-tools
)
install=PKGBUILD-networking.install
cd "${srcdir}/${_gitname}/"
# shellcheck disable=SC2154
make install-netvm DESTDIR="$pkgdir" SBINDIR=/usr/bin LIBDIR=/usr/lib SYSLIBDIR=/usr/lib SYSTEM_DROPIN_DIR=/usr/lib/systemd/system USER_DROPIN_DIR=/usr/lib/systemd/user DIST=archlinux
}
package_qubes-vm-keyring() {
pkgdesc="Qubes OS Binary Repository Activation package and Keyring"
install=PKGBUILD-keyring.install
# Install keyring (will be activated through the .install file)
install -dm755 "${pkgdir}/usr/share/pacman/keyrings/"
install -m0644 PKGBUILD-keyring-keys "${pkgdir}/usr/share/pacman/keyrings/qubesos-vm.gpg"
install -m0644 PKGBUILD-keyring-trusted "${pkgdir}/usr/share/pacman/keyrings/qubesos-vm-trusted"
install -m0644 PKGBUILD-keyring-revoked "${pkgdir}/usr/share/pacman/keyrings/qubesos-vm-revoked"
}
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.