blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
77f5be929789fde610e0058c7aa843fd1e3847c2
|
Shell
|
veera-samy/CodeDeploy_SampleApp_Linux
|
/scripts/test.sh
|
UTF-8
| 191
| 2.96875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#! /bin/bash
time=3
echo "TEST: I will now sleep ${time}s to pretend I'm doing something useful!"
sleep ${time}
echo "Done sleeping, resuming!"
echo "This is the ENV for this script!"
env
| true
|
df6e1b505fc67766d913ef4648f4dae32f01335f
|
Shell
|
daleha/install-scripts
|
/legacyscripts/birchhome.sh
|
UTF-8
| 3,255
| 3.375
| 3
|
[] |
no_license
|
#!/bin/sh
if [ -f "../local/admin/BIRCH.properties" ]
then
BIRCH=`grep BirchProps.homedir ../local/admin/BIRCH.properties |cut -f2 -d"="`
else # Deprecated
BIRCH=`cat ../local/admin/birchdir.param`
fi
# Make sure that this script doesn't run in BIRCHDEV, which
# would clobber the master copy of BIRCH.
RESULT=`echo $BIRCH |grep -c BIRCHDEV`
if [ ${RESULT} -eq "1" ]
then
echo '>>> birchhome.sh cannot be run in BIRCHDEV'
echo '>>> Doing so would clobber the master copy of BIRCH'
exit 1
else
if [ -d "$BIRCH" ]
then
cd ../admin
MINIBIRCH=`grep BirchProps.minibirch ../local/admin/BIRCH.properties |cut -f2 -d"="`
if [ "$MINIBIRCH" = "true" ]
then
FILELIST="cshrc.source
profile.source
add_to_cshrc
add_to_login
add_to_profile
newuser
$BIRCH/dat/fasta/fastgbs
$BIRCH/dat/fasta/*.fil
$BIRCH/install-birch/htmldir.param
$BIRCH/dat/fasta/fastgbs
$BIRCH/dat/birch/ldir.param
$BIRCH/dat/bldna/ldir.param
$BIRCH/dat/blprotein/ldir.param
$BIRCH/dat/bltree/ldir.param
$BIRCH/dat/blmarker/ldir.param
$BIRCH/dat/XLandscape/XLand
$BIRCH/admin/launchers/birch.desktop
$BIRCH/admin.uninstall/cshrc.source
$BIRCH/admin.uninstall/profile.source"
else
FILELIST="cshrc.source
profile.source
add_to_cshrc
add_to_login
add_to_profile
newuser
$BIRCH/dat/fasta/fastgbs
$BIRCH/dat/fasta/*.fil
$BIRCH/install-birch/htmldir.param
$BIRCH/dat/fasta/fastgbs
$BIRCH/dat/birch/ldir.param
$BIRCH/dat/bldna/ldir.param
$BIRCH/dat/blprotein/ldir.param
$BIRCH/dat/bltree/ldir.param
$BIRCH/dat/blmarker/ldir.param
$BIRCH/dat/XLandscape/XLand
$BIRCH/admin/launchers/birch.desktop
$BIRCH/admin.uninstall/cshrc.source
$BIRCH/admin.uninstall/profile.source
$BIRCH/java/ArrayNorm/ArrayNorm.lax
$BIRCH/java/Bluejay/Bluejay.lax
$BIRCH/java/Jalview/Jalview.lax
$BIRCH/java/genographer/genograph.cfg
$BIRCH/pkg/NCBI/.ncbirc"
fi
unset noclobber
for file in $FILELIST
do
echo Setting location of BIRCH home directory as $BIRCH in $file
cat $file | sed s%/home/psgendb/BIRCHDEV%$BIRCH%g > temp.$$
cat temp.$$ > $file
chmod a+r $file
done
chmod a+rx .
chmod a+rx newuser $BIRCH/install-birch/makelinks.sh
# Make sure that all directories in $BIRCH are world
# readable and world executable
chmod a+rx $BIRCH
cd $BIRCH
for file in `ls`
# for file in $( ls )
do
if [ -d $file ]
then
chmod a+rx $file
fi
done
# Set the userid of the birchdb database administrator
cd $BIRCH/public_html/birchdb/wspec
sed s%psgendb%`$BIRCH/script/whoami`%g < passwd.wrm > temp.$$
cat temp.$$ > passwd.wrm
chmod a+r passwd.wrm
# Set the userid of the birchdb database administrator
cd $BIRCH/local/public_html/birchdb/wspec
sed s%psgendb%`$BIRCH/script/whoami`%g < passwd.wrm > temp.$$
cat temp.$$ > passwd.wrm
chmod a+r passwd.wrm
else
echo No such directory: $BIRCH. Exiting
exit 1
fi
fi
| true
|
0b114b0099958377e815202cc6a6534d6535594b
|
Shell
|
crcerror/change_es_systems
|
/change_es_systems.sh
|
UTF-8
| 4,008
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/bash
# change_es_systems.sh
#############
# This script lets you change the es_systems.cfg file used to display systems using your controllers.
#
# v3
# TMNTturtlguy - June 2017
#
# place script in /home/pi/RetroPie/retropiemenu/
# In order to run the script you must create the following folders:
# /opt/retropie/configs/all/emulationstation/es_systems
# Then within that folder create these 4 folders:
# All
# Consoles
# Customs
# Favorites
# Hacks
# Example of the full path: /opt/retropie/configs/all/emulationstation/es_systems/Favorites
# Within each folder you have created place a es_systems.cfg file. The file needs to be named the same in each folder, but the systems can be different.
# Restart EmulationStation and the script is located in the retropie menu
#
#
#
#scirpt welcome screen
dialog --backtitle "W A R N I N G !" --title " WARNING! " \
--yesno "\nThis script lets you change your es_systems.cfg file used to display systems. This will not change or remove any systems or content, it will change which systems are displayed based on user created es_systems.cfg files. You will need to manually restart ES after running the script.\n\n\nDo you want to proceed?" \
15 75 2>&1 > /dev/tty \
|| exit
# dialog functions ##########################################################
function dialogMenu() {
local text="$1"
shift
dialog --no-mouse \
--backtitle "$BACKTITLE" \
--cancel-label "Back" \
--ok-label "OK" \
--menu "$text\n\nChoose an option." 17 75 10 "$@" \
2>&1 > /dev/tty
}
function dialogYesNo() {
dialog --no-mouse --backtitle "$BACKTITLE" --yesno "$@" 15 75 2>&1 > /dev/tty
}
function dialogMsg() {
dialog --no-mouse --ok-label "OK" --backtitle "$BACKTITLE" --msgbox "$@" 20 70 2>&1 > /dev/tty
}
function dialogInfo {
dialog --infobox "$@" 8 50 2>&1 >/dev/tty
}
# end of dialog functions ###################################################
function main_menu() {
local choice
while true; do
choice=$(dialog --backtitle "$BACKTITLE" --title " MAIN MENU " \
--ok-label OK --cancel-label Exit \
--menu "What do you want to do?" 17 75 10 \
B "Change to es_systems ALL" \
E "change to es_systems Consoles" \
C "Change to es_systems Customs" \
R "Change to es_systems Favorites" \
U "Change to es_systems Hacks" \
2>&1 > /dev/tty)
case "$choice" in
B) change_to_all ;;
E) change_to_consoles ;;
C) change_to_customs ;;
R) change_to_favorites ;;
U) change_to_hacks ;;
*) break ;;
esac
done
}
function change_to_all() {
sudo rm /opt/retropie/configs/all/emulationstation/es_systems.cfg
sudo cp /opt/retropie/configs/all/emulationstation/es_systems/All/es_systems.cfg /opt/retropie/configs/all/emulationstation/
}
function change_to_consoles() {
sudo rm /opt/retropie/configs/all/emulationstation/es_systems.cfg
sudo cp /opt/retropie/configs/all/emulationstation/es_systems/Consoles/es_systems.cfg /opt/retropie/configs/all/emulationstation/
}
function change_to_customs() {
sudo rm /opt/retropie/configs/all/emulationstation/es_systems.cfg
sudo cp /opt/retropie/configs/all/emulationstation/es_systems/Customs/es_systems.cfg /opt/retropie/configs/all/emulationstation/
}
function change_to_favorites() {
sudo rm /opt/retropie/configs/all/emulationstation/es_systems.cfg
sudo cp /opt/retropie/configs/all/emulationstation/es_systems/Favorites/es_systems.cfg /opt/retropie/configs/all/emulationstation/
}
function change_to_hacks() {
sudo rm /opt/retropie/configs/all/emulationstation/es_systems.cfg
sudo cp /opt/retropie/configs/all/emulationstation/es_systems/Hacks/es_systems.cfg /opt/retropie/configs/all/emulationstation/
}
# START HERE #################################################################
main_menu
| true
|
868c98cc50467e1851529b82e495e74529799716
|
Shell
|
zhixingfeng/shell
|
/encode2var
|
UTF-8
| 350
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
if [ "$#" -ne 2 ]; then
echo "encode2var encodefile varfile"
exit 0
fi
awk '{
for (i=1;i<=NF;i++){
if ($i%4==0) cur_base = "A";
if ($i%4==1) cur_base = "C";
if ($i%4==2) cur_base = "G";
if ($i%4==3) cur_base = "T";
cur_line = int($i/4)"\t"cur_base"\t"$i"\t0\t0\t0\t0\t0\t0";
print cur_line;
}
}' $1 > $2
| true
|
490f9776ec24ea75dda057bfa482d8ea92f35b58
|
Shell
|
cristiantela/LighterCodeChallenge
|
/challenges/week 2 - rock paper scissors/gustavocarneiroa.sh
|
UTF-8
| 292
| 2.609375
| 3
|
[] |
no_license
|
# NÃO REPRODUZIR NO PRÓPRIO SH, USAR UM COMPILADOR ONLINE. EX: https://www.onlinegdb.com/online_bash_shell
#155 Caracteres
read a b
a=${a: -3:1}
b=${b: -3:1}
declare -A w
w[d]=0
w[p]=1
w[s]=2
let v=(3+w[$a]-w[$b])%3
echo $([ "$v" != 0 ] && echo "Jogador $v" || echo "Não há vencedores")
| true
|
6dc1762d22950d97ea42052e607b8f7696c6012d
|
Shell
|
ballerina-platform/ballerina-update-tool
|
/resources/bin/bal
|
UTF-8
| 4,883
| 3.359375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# ---------------------------------------------------------------------------
# Copyright (c) 2019, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{
if [ "$(uname)" == "Darwin" ]
then
CURRENT_PATH=$(dirname "$0")
else
CURRENT_PATH="$(dirname "$(readlink -f "$0")")"
fi
JAVA_COMMAND=java
if test -d "$CURRENT_PATH/../dependencies/jdk-17.0.7+7-jre"; then
JAVA_COMMAND="$CURRENT_PATH/../dependencies/jdk-17.0.7+7-jre/bin/java"
elif test -d "$CURRENT_PATH/../dependencies/jdk-11.0.18+10-jre"; then
JAVA_COMMAND="$CURRENT_PATH/../dependencies/jdk-11.0.18+10-jre/bin/java"
elif test -d "$CURRENT_PATH/../dependencies/jdk-11.0.15+10-jre"; then
JAVA_COMMAND="$CURRENT_PATH/../dependencies/jdk-11.0.15+10-jre/bin/java"
elif test -d "$CURRENT_PATH/../dependencies/jdk-11.0.8+10-jre"; then
JAVA_COMMAND="$CURRENT_PATH/../dependencies/jdk-11.0.8+10-jre/bin/java"
elif test -d "$CURRENT_PATH/../dependencies/jdk8u265-b01-jre"; then
JAVA_COMMAND="$CURRENT_PATH/../dependencies/jdk8u265-b01-jre/bin/java"
fi
if [ "$1" == "completion" ]
then
if test -f "$CURRENT_PATH/../scripts/bal_completion.bash"; then
if [ "$2" == "bash" ]
then
printf "#!/usr/bin/env bash\n\n"
cat $CURRENT_PATH/../scripts/bal_completion.bash
elif [ "$2" == "zsh" ]
then
printf "#!/usr/bin/env bash\n\n"
printf "autoload -U +X bashcompinit && bashcompinit\n"
printf "autoload -U +X compinit && compinit\n\n"
cat $CURRENT_PATH/../scripts/bal_completion.bash
else
echo "ballerina: unknown command '$2'"
exit 1
fi
else
echo "Completion scripts not found"
fi
if [ $? -ne '0' ]; then
echo "Failed to generate the completion script"
EXIT_CODE=$?
fi
exit 0
fi
RUN_COMMAND=false
RUN_BALLERINA=true
if [ "$1" == "dist" ] || [ "$2" == "dist" ] || [ "$1" == "update" ] || ( [ "$1" == "dist" ] && [ "$2" == "update" ] )
then
RUN_COMMAND=true
RUN_BALLERINA=false
fi
if [ "$1" == "build" ]
then
RUN_COMMAND=true
fi
if [ "$RUN_COMMAND" == "true" ]
then
if [ "$1" == "build" ]
then
$JAVA_COMMAND -jar $CURRENT_PATH/../lib/ballerina-command-@version@.jar build
else
$JAVA_COMMAND -jar $CURRENT_PATH/../lib/ballerina-command-@version@.jar "$@"
EXIT_CODE=$?
fi
if [ "$1" == "update" ] && [ -d "$CURRENT_PATH/../ballerina-command-tmp" ]; then
$CURRENT_PATH/../ballerina-command-tmp/install
if [ $? -ne '0' ]; then
echo "Update failed due to errors"
rm -rf $CURRENT_PATH/../ballerina-command-tmp
EXIT_CODE=$?
fi
rm -rf $CURRENT_PATH/../ballerina-command-tmp
echo "Update successfully completed"
echo
echo "If you want to update the Ballerina distribution, use 'bal dist update'"
exit 0
fi
fi
if [ "$RUN_BALLERINA" == "true" ]
then
FILE=$CURRENT_PATH/../distributions/ballerina-version
if test -f "$FILE"; then
BALLERINA_VERSION=`cat $CURRENT_PATH/../distributions/ballerina-version`
fi
FILE=~/.ballerina/ballerina-version
if test -f "$FILE"; then
BALLERINA_USER_VERSION=`cat $FILE`
if test -d "$CURRENT_PATH/../distributions/$BALLERINA_USER_VERSION"; then
BALLERINA_VERSION=$BALLERINA_USER_VERSION
fi
fi
BALLERINA_HOME="$CURRENT_PATH/../distributions/$BALLERINA_VERSION"
export BALLERINA_HOME
if test -f "$BALLERINA_HOME/bin/./bal"; then
$BALLERINA_HOME/bin/./bal "$@"
else
if test -f "$BALLERINA_HOME/bin/./ballerina"; then
$BALLERINA_HOME/bin/./ballerina "$@"
else
echo "Distribution does not exist, use 'bal dist pull <version>'"
fi
fi
EXIT_CODE=$?
fi
if [ "$1" == "help" ] && [ "$2" == "" ] || [ "$1" == "" ] || [ "$1" == "-h" ] || [ "$1" == "--help" ] || \
[ "$1" == "version" ] || [ "$1" == "-v" ] || [ "$1" == "--version" ]
then
$JAVA_COMMAND -jar $CURRENT_PATH/../lib/ballerina-command-@version@.jar "$@"
exit $?
else
exit $EXIT_CODE
fi
}; exit
| true
|
20fecd029f890338c3d5aee724a4927865f24540
|
Shell
|
JeffeApAlves/ESP32Debug
|
/misc.sh
|
UTF-8
| 6,457
| 3.859375
| 4
|
[] |
no_license
|
#! /bin/bash
#
#
#
# Diretorio onde se encontra o script
BASEDIR="${0%/*}"
source $BASEDIR/def.sh
back_title="Projeto $PROJECT_NAME"
function select_file() {
local title=$1
local path=$2
local ext_file=$3
local source=${4:-"LOCAL"}
local dir_content=""
local curdir=""
if [ $source = "REMOTE" ]; then
dir_content=$(ssh $USER@$host_gdb "if [ ! -z $path ] ;then cd $path ; fi ; ls -lhd */ *.$ext_file" 2>&1 | awk -F ' ' ' { print $9 " " $5 } ')
curdir=$(ssh $USER@$host_gdb "pwd" 2>&1)
else
if [ ! -z $path ] ; then
cd "$path"
fi
dir_content=$(ls -lhd */ *.$ext_file | awk -F ' ' ' { print $9 " " $5 } ')
curdir=$(pwd)
fi
if [ "$curdir" != "/" ] ; then
dir_content="../ Voltar $dir_content"
fi
selection=$(dialog --stdout \
--title "Seleção de arquivo" \
--backtitle "$back_title" \
--scrollbar \
--menu "$title\nSelecione um arquivo do tipo '$ext_file'.\n$curdir" 30 100 20 \
$dir_content
)
local RET=$?
if [ $RET -eq 0 ]; then
if [[ -d "$selection" ]]; then
select_file "$title" "$selection" "$ext_file" "$source"
elif [[ -f "$selection" ]]; then
if [[ $selection == *$ext_file ]]; then # verifica a exxtensão
if (! dialog --title "Confirmação da seleção" --yesno "Diretório: $curdir\nArquivo : $selection" 10 100 \
--yes-button "OK" \
--no-button "Voltar"); then
filename="$selection"
filepath="$curdir"
RET=0
else
select_file "$title" "$curdir" "$ext_file" "$source"
fi
else
show_msgbox "ERRO!" "Arquivo incompativel.\n$selection\nVoce deve selecionar um arquivo do tipo $ext_file"
select_file "$title" "$curdir" "$ext_file" "$source"
fi
else # Não foi possivel ler o arquivo
show_msgbox "ERRO!" "ERRO!" "Caminho ou arquivo invalido.\nNão foi possivel acessa-lo:$selection"
select_file "$title" "$curdir" "$ext_file" "$source"
fi
fi
return $RET
}
function select_path() {
local title=$1
local path=$2
local source=${3:-"LOCAL"}
local content_dir=""
local cur_dir=""
if [ $source = "REMOTE" ]; then
content_dir=$(ssh $USER@$host_gdb "if [ ! -z $path ] ;then cd $path ; fi ; ls -lhd */" 2>&1 | awk -F ' ' ' { print $9 " " $5 } ')
cur_dir=$(ssh $USER@$host_gdb "pwd" 2>&1)
else
if [ ! -z $path ] ; then
cd "$path"
fi
content_dir=$(ls -lhd */ | awk -F ' ' ' { print $9 " " $5 } ')
cur_dir=$(pwd)
fi
if [ "$cur_dir" != "/" ] ; then
content_dir="../ Voltar $content_dir"
fi
selection=$(dialog --stdout \
--title "Seleção de diretório" \
--backtitle "$back_title" \
--extra-button --scrollbar \
--extra-label "Selecionar" \
--menu "Selecione o diretório de destino\n$cur_dir" 30 100 20 \
$content_dir
)
local RET=$?
if [ $RET -eq 0 ]; then
select_path "$title" "$selection"
elif [ $RET -eq 3 ]; then #extra button=seleciona
filepath="$cur_dir"
RET=0
fi
return $RET
}
function show_msgbox() {
dialog \
--title "$1" \
--backtitle "$back_title" \
--msgbox "$3" \
0 0
}
function show_info() {
dialog \
--title "Informação" \
--backtitle "$back_title" \
--sleep 3 \
--infobox "$1" \
0 0
}
function show_description_file() {
dialog \
--title "Informações do arquivo" \
--backtitle "$back_title" \
--msgbox "Arquivo selecionado\nNome : $1\nDiretorio: $2" \
0 0
}
function show_description_sbc() {
dialog \
--title "Informações do SBC" \
--backtitle "$back_title" \
--msgbox "SBC encontrada\nIP: $1 " \
0 0
}
function download_file() {
local origem=$1
local destino=$2
wget "$origem" -O "$destino" 2>&1 | \
stdbuf -o0 awk '/[.] +[0-9][0-9]?[0-9]?%/ { print substr($0,63,3) }' | \
dialog --title "Download" --gauge "Por favor espere. Download em andamento.\n\nDe :$origem\nPara:$destino" 0 0 0
}
function install_dependencias() {
# Instala pacotes necessários
sudo apt-get -y install \
git \
make \
wget \
nmap \
flex \
bison \
gperf \
python \
python-serial \
minicom &> /tmp/install.log &
dialog --title "Instalação dos pacotes" --tailbox /tmp/install.log 30 100
}
function clone_repositorio() {
local origem=$1
local destino=$2
git clone --recursive --progress $origem $destino 2>&1 | \
stdbuf -o0 awk 'BEGIN{RS="\r|\n|\r\n|\n\r";ORS="\n"}/Receiving/{print substr($3, 1, length($3)-1)}' | \
dialog --title "Download" --gauge "Por favor espere. Clonagem em andamento.\n\nDe :$origem\nPara:$destino" 0 0 0
}
function update_repositorio() {
local destino=$1
local UPSTREAM=${2:-'@{u}'} #[opcional] passar o branch ex:release/v2.1"
cd $destino &&
git remote update > /dev/null &&
LOCAL=$(git rev-parse @) &&
REMOTE=$(git rev-parse "$UPSTREAM") &&
BASE=$(git merge-base @ "$UPSTREAM") &&
if [ $LOCAL = $REMOTE ]; then
show_info "Atualizado !\nLocal :$LOCAL\nRemote:$REMOTE\nBase :$BASE"
elif [ $LOCAL = $BASE ]; then
git submodule update
git pull --recurse-submodules &> /tmp/git.log &> /tmp/git.log 30 100 &
dialog \
--title "Atualização respositório-Local :$LOCAL\nRemote:$REMOTE\nBase :$BASE" \
--tailbox /tmp/git.log 30 100
elif [ $REMOTE = $BASE ]; then
git submodule update
git pull --recurse-submodules &> /tmp/git.log &> /tmp/git.log 30 100 &
dialog
--title "Atualização respositório-Local :$LOCAL\nRemote:$REMOTE\nBase :$BASE" \
--tailbox /tmp/git.log 30 100
else
show_info "Divergencias\n\nLocal :$LOCAL\nRemote:$REMOTE\nBase :$BASE"
fi
}
| true
|
32827fbe60a5dcbe9f42bf88546fff85f94013fc
|
Shell
|
nthomson-pivotal/homelab
|
/bosh/director/create-director.sh
|
UTF-8
| 981
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
vsphere_password=$1
if [ -z "$vsphere_password" ]; then
echo "Error: Must supply vSphere password"
exit 1
fi
$DIR/deploy-director.sh $vsphere_password
source $DIR/login.sh
bosh alias-env lab -e $(bosh int "$DIR/vars.yml" --path="/internal_ip") --ca-cert <(bosh int $STATE_DIR/creds.yml --path /director_ssl/ca)
# Xenial Stemcell
bosh -n upload-stemcell --sha1 9723f506c44f100c949b5bd7734d539168d3696e https://bosh.io/d/stemcells/bosh-vsphere-esxi-ubuntu-xenial-go_agent?v=250.23
# Trusty Stemcell
bosh -n upload-stemcell --sha1 87851d1550022eab3550e732462699a20bf3513b https://bosh.io/d/stemcells/bosh-vsphere-esxi-ubuntu-trusty-go_agent?v=3586.91
$DIR/update-cloud-config.sh
$DIR/update-runtime-config.sh
$DIR/credhub-login.sh
$DIR/director-to-credhub.sh
echo "-----------------------------------"
echo "To login: source login.sh"
echo "-----------------------------------"
| true
|
7b6f304e0902088436e861db1487a28a066794df
|
Shell
|
gwabramblehouse/dcu-2019
|
/analysis/bin/grideye-background-variation-analyse
|
UTF-8
| 1,640
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash
#############################################################################
# Home Mobility Monitoring.
#
# Generate heatmaps of per-pixel statistics for fluctuations in measurements
# of a consistent background.
#############################################################################
export HMM_HOME=${HMM_HOME:$HOME/hmm}
bin_dir="${HMM_HOME}/analysis/bin"
data_dir="${HMM_HOME}/var/data"
input_dir="${HMM_HOME}/var/data/processed"
output_dir="${HMM_HOME}/var/data/reports"
mkdir -p ${output_dir}
find ${data_dir}/raw/sensor_data/absent-1/centre-node/ -name "*grideye.txt" | ${bin_dir}/grideye-background-variation-process-logs
${bin_dir}/print-heatmap -i ${input_dir}/av_pixel.csv -o ${output_dir}/av_pixel.png -t "Average pixel value"
${bin_dir}/print-heatmap -i ${input_dir}/blank_pixel.csv -o ${output_dir}/blank_pixel.png -t "Pixels with blank values"
${bin_dir}/print-heatmap -i ${input_dir}/diff_pixel.csv -o ${output_dir}/diff_pixel.png -t "Range of pixel values"
${bin_dir}/print-heatmap -i ${input_dir}/inc_pixel.csv -o ${output_dir}/inc_pixel.png -t "Rise vs fall of pixel values" -l "Increases vs decreases"
${bin_dir}/print-heatmap -i ${input_dir}/max_pixel.csv -o ${output_dir}/max_pixel.png -t "Max pixel value"
${bin_dir}/print-heatmap -i ${input_dir}/min_pixel.csv -o ${output_dir}/min_pixel.png -t "Min pixel value"
${bin_dir}/print-heatmap -i ${input_dir}/sd_pixel.csv -o ${output_dir}/sd_pixel.png -t "Standard deviation of pixel values"
${bin_dir}/print-heatmap -i ${input_dir}/trend_pixel.csv -o ${output_dir}/trend_pixel.png -t "Change in pixel values over time"
| true
|
f6ef683b154e0bac1acbee8910ee73614c1f63bf
|
Shell
|
AthabascaUniversity/openstack-setup
|
/setup/openstack-nova-glance-demo
|
UTF-8
| 655
| 3.125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
RCFILE=${1:-"openstack-setup.rc"}
if [ -e ${RCFILE} ]; then
. $RCFILE
else
echo "Missing ${RCFILE}"
exit 1
fi
if [ -n "$DB_ROOT_PW" ]; then
DBPW="--rootpw $DB_ROOT_PW"
fi
modprobe nbd
nova keypair-add demo-key > ~/demo-key.priv
chmod 600 ~/demo-key.priv
curl -C - -o ${TMP_DIR}/${NOVA_GLANCE_DEMO_IMG_NAME}.qcow2 ${NOVA_GLANCE_DEMO_IMG_URL}
glance add name=${NOVA_GLANCE_DEMO_IMG_NAME} is_public=true disk_format=qcow2 container_format=bare < ${TMP_DIR}/${NOVA_GLANCE_DEMO_IMG_NAME}.qcow2
nova boot myserver --flavor 2 --key_name demo-key \
--image $(glance index | grep ${NOVA_GLANCE_DEMO_IMG_NAME} | awk '{print $1}')
| true
|
92b174b84ab9fade36b5e6bc07f38d082e2c65cf
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/cura-engine/PKGBUILD
|
UTF-8
| 2,446
| 2.984375
| 3
|
[] |
no_license
|
# Maintainer: Yunhui Fu <yhfudev@gmail.com>
pkgname=cura-engine
pkgver=15.06.03
pkgrel=1
pkgdesc="A C++ console application for 3D printing GCode generation. It's called by Repetier Host and/or other applications."
arch=(i686 x86_64 arm)
url="https://github.com/Ultimaker/CuraEngine.git"
license=(GPL)
depends=(protobuf3 libarcus)
makedepends=(gcc cmake)
source=(
#"${pkgname}::git+https://github.com/Ultimaker/CuraEngine.git"
"${pkgname}-${pkgver}.tar.gz::https://github.com/Ultimaker/CuraEngine/archive/${pkgver}.tar.gz"
)
sha1sums=(
'aac7db3aa1188967e66a28dd3d33f004d72523d4'
)
conflicts=(curaengine curaengine-git cura-engine-git)
pkgver_git() {
cd "${srcdir}/${pkgname}"
local ver="$(git show | grep commit | awk '{print $2}' )"
#printf "r%s" "${ver//[[:alpha:]]}"
echo ${ver:0:7}
}
pkgver_svn() {
cd "${srcdir}/${pkgname}"
local ver="$(svn info | grep Revision | awk '{print $2}' )"
#printf "r%s" "${ver//[[:alpha:]]}"
echo ${ver:0:7}
}
#pkgver() {
# pkgver_git
#}
build4git() {
cd "${srcdir}/${pkgname}"
# add version
sed -i -e "s|add_definitions[ \t]*([ \t]*-DVERSION=.*||" CMakeLists.txt
echo "add_definitions( -DVERSION=\"git-$(pkgver)\" )" >> CMakeLists.txt
# patch default folder
sed -i -e "s|loadJSON[ \t]*([ \t]*\"fdmprinter.json|loadJSON(\"/usr/share/${pkgname}/fdmprinter.json|" src/main.cpp
mkdir -p build
cd build
cmake ..
make VERSION="\"git-$pkgver\""
}
build4release() {
cd "${srcdir}/CuraEngine-${pkgver}"
# add version
sed -i -e "s|add_definitions[ \t]*([ \t]*-DVERSION=.*||" CMakeLists.txt
echo "add_definitions( -DVERSION=\"git-$(pkgver)\" )" >> CMakeLists.txt
# patch default folder
sed -i -e "s|loadJSON[ \t]*([ \t]*\"fdmprinter.json|loadJSON(\"/usr/share/${pkgname}/fdmprinter.json|" src/main.cpp
mkdir -p build
cd build
cmake ..
make VERSION="\"$pkgver\""
}
build () {
build4release
}
package4git() {
cd "${srcdir}/${pkgname}"
mkdir -p ${pkgdir}/usr/bin/
cp build/CuraEngine ${pkgdir}/usr/bin/
mkdir -p ${pkgdir}/usr/share/${pkgname}/
cp fdmprinter.json ${pkgdir}/usr/share/${pkgname}/
}
package4release() {
cd "${srcdir}/CuraEngine-${pkgver}"
mkdir -p ${pkgdir}/usr/bin/
cp build/CuraEngine ${pkgdir}/usr/bin/
mkdir -p ${pkgdir}/usr/share/${pkgname}/
cp fdmprinter.json ${pkgdir}/usr/share/${pkgname}/
}
package() {
package4release
}
| true
|
fe9fb6e532db2cbf63e19d61de30bdc21e5ec5b7
|
Shell
|
noahfriedman/bin-misc
|
/ipv6-mac-to-addr
|
UTF-8
| 536
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/sh
# $Id: ipv6-mac-to-addr,v 1.1 2015/10/05 18:39:38 friedman Exp $
# Compute stateless autoconfig address based on interface MAC
# or vice-versa
case $1 in
[0-9a-f][0-9a-f]:[0-9a-f][0-9a-f]:* )
exec ipv6calc -q -A geneui64 -I mac -O eui64 "$@" ;;
*::* )
addr=fe80::${1#*::}
shift
exec ipv6calc -q -A ipv6tomac -O mac "$addr" "$@" ;;
[0-9a-f][0-9a-f][0-9a-f][0-9a-f]:* )
addr=fe80::$1
shift
exec ipv6calc -q -A ipv6tomac -O mac "$addr" "$@" ;;
esac
# eof
| true
|
9db9790591894a5dc0e9ecbb306e0753b5df7ead
|
Shell
|
ebob9/topology_proxy_container
|
/docker_build/build_docker.sh
|
UTF-8
| 242
| 2.828125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
if [[ -z "$1" ]]
then
echo "ERROR: Requires version string (x.y.z) as first argument".
exit 1
fi
cp ../* .
docker build --progress=plain --no-cache -t ebob9/topology_proxy:${1} -t ebob9/topology_proxy:latest .
| true
|
d5628c6b80a36a43b499f336dea0a9703370242b
|
Shell
|
dokku/dokku
|
/contrib/images/digitalocean/in_parts/100-image-check
|
UTF-8
| 20,014
| 4
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# DigitalOcean Marketplace Image Validation Tool
# © 2021-2022 DigitalOcean LLC.
# This code is licensed under Apache 2.0 license (see LICENSE.md for details)
VERSION="v. 1.8.1"
RUNDATE=$(date)
# Script should be run with SUDO
if [ "$EUID" -ne 0 ]; then
echo "[Error] - This script must be run with sudo or as the root user."
exit 1
fi
STATUS=0
PASS=0
WARN=0
FAIL=0
# $1 == command to check for
# returns: 0 == true, 1 == false
cmdExists() {
if command -v "$1" >/dev/null 2>&1; then
return 0
else
return 1
fi
}
function getDistro {
if [ -f /etc/os-release ]; then
# freedesktop.org and systemd
. /etc/os-release
OS=$NAME
VER=$VERSION_ID
elif type lsb_release >/dev/null 2>&1; then
# linuxbase.org
OS=$(lsb_release -si)
VER=$(lsb_release -sr)
elif [ -f /etc/lsb-release ]; then
# For some versions of Debian/Ubuntu without lsb_release command
. /etc/lsb-release
OS=$DISTRIB_ID
VER=$DISTRIB_RELEASE
elif [ -f /etc/debian_version ]; then
# Older Debian/Ubuntu/etc.
OS=Debian
VER=$(cat /etc/debian_version)
elif [ -f /etc/SuSe-release ]; then
# Older SuSE/etc.
:
elif [ -f /etc/redhat-release ]; then
# Older Red Hat, CentOS, etc.
VER=$(cut -d" " -f3 </etc/redhat-release | cut -d "." -f1)
d=$(cut -d" " -f1 </etc/redhat-release | cut -d "." -f1)
if [[ $d == "CentOS" ]]; then
OS="CentOS Linux"
fi
else
# Fall back to uname, e.g. "Linux <version>", also works for BSD, etc.
OS=$(uname -s)
VER=$(uname -r)
fi
}
function loadPasswords {
SHADOW=$(cat /etc/shadow)
}
function checkAgent {
# Check for the presence of the DO directory in the filesystem
if [ -d /opt/digitalocean ]; then
echo -en "\e[41m[FAIL]\e[0m DigitalOcean directory detected.\n"
((FAIL++))
STATUS=2
if [[ $OS == "CentOS Linux" ]] || [[ $OS == "CentOS Stream" ]] || [[ $OS == "Rocky Linux" ]] || [[ $OS == "AlmaLinux" ]]; then
echo "To uninstall the agent: 'sudo yum remove droplet-agent'"
echo "To remove the DO directory: 'find /opt/digitalocean/ -type d -empty -delete'"
elif [[ $OS == "Ubuntu" ]] || [[ $OS == "Debian" ]]; then
echo "To uninstall the agent and remove the DO directory: 'sudo apt-get purge droplet-agent'"
fi
else
echo -en "\e[32m[PASS]\e[0m DigitalOcean Monitoring agent was not found\n"
((PASS++))
fi
}
function checkLogs {
cp_ignore="/var/log/cpanel-install.log"
echo -en "\nChecking for log files in /var/log\n\n"
# Check if there are log archives or log files that have not been recently cleared.
for f in /var/log/*-????????; do
[[ -e $f ]] || break
if [ "${f}" != "${cp_ignore}" ]; then
echo -en "\e[93m[WARN]\e[0m Log archive ${f} found; Contents:\n"
cat "${f}"
((WARN++))
if [[ $STATUS != 2 ]]; then
STATUS=1
fi
fi
done
for f in /var/log/*.[0-9]; do
[[ -e $f ]] || break
echo -en "\e[93m[WARN]\e[0m Log archive ${f} found; Contents:\n"
cat "${f}"
((WARN++))
if [[ $STATUS != 2 ]]; then
STATUS=1
fi
done
for f in /var/log/*.log; do
[[ -e $f ]] || break
if [[ "${f}" = '/var/log/lfd.log' && "$(grep -E -v '/var/log/messages has been reset| Watching /var/log/messages' "${f}" | wc -c)" -gt 50 ]]; then
if [ "${f}" != "${cp_ignore}" ]; then
echo -en "\e[93m[WARN]\e[0m un-cleared log file, ${f} found; Contents:\n"
cat "${f}"
((WARN++))
if [[ $STATUS != 2 ]]; then
STATUS=1
fi
fi
elif [[ "${f}" != '/var/log/lfd.log' && "$(wc -c <"${f}")" -gt 50 ]]; then
if [ "${f}" != "${cp_ignore}" ]; then
echo -en "\e[93m[WARN]\e[0m un-cleared log file, ${f} found; Contents:\n"
cat "${f}"
((WARN++))
if [[ $STATUS != 2 ]]; then
STATUS=1
fi
fi
fi
done
}
function checkTMP {
# Check the /tmp directory to ensure it is empty. Warn on any files found.
if [[ -n "$(ls -A /tmp)" ]]; then
echo -en "\e[93m[WARN]\e[0m /tmp directory is not empty; Contents\n"
ls -A /tmp
((WARN++))
return 1
fi
echo -en "\e[32m[PASS]\e[0m /tmp directory is empty\n"
return 0
}
function checkRoot {
user="root"
uhome="/root"
for usr in $SHADOW; do
IFS=':' read -r -a u <<<"$usr"
if [[ "${u[0]}" == "${user}" ]]; then
if [[ ${u[1]} == "!" ]] || [[ ${u[1]} == "!!" ]] || [[ ${u[1]} == "*" ]]; then
echo -en "\e[32m[PASS]\e[0m User ${user} has no password set.\n"
((PASS++))
else
echo -en "\e[41m[FAIL]\e[0m User ${user} has a password set on their account.\n"
((FAIL++))
STATUS=2
fi
fi
done
if [ -d ${uhome}/ ]; then
if [ -d ${uhome}/.ssh/ ]; then
if ls ${uhome}/.ssh/* >/dev/null 2>&1; then
for key in "${uhome}"/.ssh/*; do
if [ "${key}" == "${uhome}/.ssh/authorized_keys" ]; then
if [ "$(wc -c <"${key}")" -gt 50 ]; then
echo -en "\e[41m[FAIL]\e[0m User \e[1m${user}\e[0m has a populated authorized_keys file in \e[93m${key}\e[0m\n"
akey=$(cat "${key}")
echo "File Contents:"
echo "$akey"
echo "--------------"
((FAIL++))
STATUS=2
fi
elif [ "${key}" == "${uhome}/.ssh/id_rsa" ]; then
if [ "$(wc -c <"${key}")" -gt 0 ]; then
echo -en "\e[41m[FAIL]\e[0m User \e[1m${user}\e[0m has a private key file in \e[93m${key}\e[0m\n"
akey=$(cat "${key}")
echo "File Contents:"
echo "$akey"
echo "--------------"
((FAIL++))
STATUS=2
else
echo -en "\e[93m[WARN]\e[0m User \e[1m${user}\e[0m has empty private key file in \e[93m${key}\e[0m\n"
((WARN++))
if [[ $STATUS != 2 ]]; then
STATUS=1
fi
fi
elif [ "${key}" != "${uhome}/.ssh/known_hosts" ]; then
echo -en "\e[93m[WARN]\e[0m User \e[1m${user}\e[0m has a file in their .ssh directory at \e[93m${key}\e[0m\n"
((WARN++))
if [[ $STATUS != 2 ]]; then
STATUS=1
fi
else
if [ "$(wc -c <"${key}")" -gt 50 ]; then
echo -en "\e[93m[WARN]\e[0m User \e[1m${user}\e[0m has a populated known_hosts file in \e[93m${key}\e[0m\n"
((WARN++))
if [[ $STATUS != 2 ]]; then
STATUS=1
fi
fi
fi
done
else
echo -en "\e[32m[ OK ]\e[0m User \e[1m${user}\e[0m has no SSH keys present\n"
fi
else
echo -en "\e[32m[ OK ]\e[0m User \e[1m${user}\e[0m does not have an .ssh directory\n"
fi
if [ -f /root/.bash_history ]; then
BH_S=$(wc -c </root/.bash_history)
if [[ $BH_S -lt 200 ]]; then
echo -en "\e[32m[PASS]\e[0m ${user}'s Bash History appears to have been cleared\n"
((PASS++))
else
echo -en "\e[41m[FAIL]\e[0m ${user}'s Bash History should be cleared to prevent sensitive information from leaking\n"
((FAIL++))
STATUS=2
fi
return 1
else
echo -en "\e[32m[PASS]\e[0m The Root User's Bash History is not present\n"
((PASS++))
fi
else
echo -en "\e[32m[ OK ]\e[0m User \e[1m${user}\e[0m does not have a directory in /home\n"
fi
echo -en "\n\n"
return 1
}
function checkUsers {
# Check each user-created account
awk -F: '$3 >= 1000 && $1 != "nobody" {print $1}' </etc/passwd | while IFS= read -r user; do
# Skip some other non-user system accounts
if [[ $user == "centos" ]]; then
:
elif [[ $user == "nfsnobody" ]]; then
:
else
echo -en "\nChecking user: ${user}...\n"
for usr in $SHADOW; do
IFS=':' read -r -a u <<<"$usr"
if [[ "${u[0]}" == "${user}" ]]; then
if [[ ${u[1]} == "!" ]] || [[ ${u[1]} == "!!" ]] || [[ ${u[1]} == "*" ]]; then
echo -en "\e[32m[PASS]\e[0m User ${user} has no password set.\n"
((PASS++))
else
echo -en "\e[41m[FAIL]\e[0m User ${user} has a password set on their account. Only system users are allowed on the image.\n"
((FAIL++))
STATUS=2
fi
fi
done
#echo "User Found: ${user}"
uhome="/home/${user}"
if [ -d "${uhome}/" ]; then
if [ -d "${uhome}/.ssh/" ]; then
if ls "${uhome}/.ssh/*" >/dev/null 2>&1; then
for key in "${uhome}"/.ssh/*; do
if [ "${key}" == "${uhome}/.ssh/authorized_keys" ]; then
if [ "$(wc -c <"${key}")" -gt 50 ]; then
echo -en "\e[41m[FAIL]\e[0m User \e[1m${user}\e[0m has a populated authorized_keys file in \e[93m${key}\e[0m\n"
akey=$(cat "${key}")
echo "File Contents:"
echo "$akey"
echo "--------------"
((FAIL++))
STATUS=2
fi
elif [ "${key}" == "${uhome}/.ssh/id_rsa" ]; then
if [ "$(wc -c <"${key}")" -gt 0 ]; then
echo -en "\e[41m[FAIL]\e[0m User \e[1m${user}\e[0m has a private key file in \e[93m${key}\e[0m\n"
akey=$(cat "${key}")
echo "File Contents:"
echo "$akey"
echo "--------------"
((FAIL++))
STATUS=2
else
echo -en "\e[93m[WARN]\e[0m User \e[1m${user}\e[0m has empty private key file in \e[93m${key}\e[0m\n"
((WARN++))
if [[ $STATUS != 2 ]]; then
STATUS=1
fi
fi
elif [ "${key}" != "${uhome}/.ssh/known_hosts" ]; then
echo -en "\e[93m[WARN]\e[0m User \e[1m${user}\e[0m has a file in their .ssh directory named \e[93m${key}\e[0m\n"
((WARN++))
if [[ $STATUS != 2 ]]; then
STATUS=1
fi
else
if [ "$(wc -c <"${key}")" -gt 50 ]; then
echo -en "\e[93m[WARN]\e[0m User \e[1m${user}\e[0m has a known_hosts file in \e[93m${key}\e[0m\n"
((WARN++))
if [[ $STATUS != 2 ]]; then
STATUS=1
fi
fi
fi
done
else
echo -en "\e[32m[ OK ]\e[0m User \e[1m${user}\e[0m has no SSH keys present\n"
fi
else
echo -en "\e[32m[ OK ]\e[0m User \e[1m${user}\e[0m does not have an .ssh directory\n"
fi
else
echo -en "\e[32m[ OK ]\e[0m User \e[1m${user}\e[0m does not have a directory in /home\n"
fi
# Check for an uncleared .bash_history for this user
if [ -f "${uhome}/.bash_history" ]; then
BH_S=$(wc -c <"${uhome}/.bash_history")
if [[ $BH_S -lt 200 ]]; then
echo -en "\e[32m[PASS]\e[0m ${user}'s Bash History appears to have been cleared\n"
((PASS++))
else
echo -en "\e[41m[FAIL]\e[0m ${user}'s Bash History should be cleared to prevent sensitive information from leaking\n"
((FAIL++))
STATUS=2
fi
echo -en "\n\n"
fi
fi
done
}
function checkFirewall {
if [[ $OS == "Ubuntu" ]]; then
fw="ufw"
ufwa=$(ufw status | head -1 | sed -e "s/^Status:\ //")
if [[ $ufwa == "active" ]]; then
FW_VER="\e[32m[PASS]\e[0m Firewall service (${fw}) is active\n"
((PASS++))
else
FW_VER="\e[93m[WARN]\e[0m No firewall is configured. Ensure ${fw} is installed and configured\n"
((WARN++))
fi
elif [[ $OS == "CentOS Linux" ]] || [[ $OS == "CentOS Stream" ]] || [[ $OS == "Rocky Linux" ]] || [[ $OS == "AlmaLinux" ]]; then
if [ -f /usr/lib/systemd/system/csf.service ]; then
fw="csf"
if [[ $(systemctl status $fw >/dev/null 2>&1) ]]; then
FW_VER="\e[32m[PASS]\e[0m Firewall service (${fw}) is active\n"
((PASS++))
elif cmdExists "firewall-cmd"; then
if [[ $(systemctl is-active firewalld >/dev/null 2>&1 && echo 1 || echo 0) ]]; then
FW_VER="\e[32m[PASS]\e[0m Firewall service (${fw}) is active\n"
((PASS++))
else
FW_VER="\e[93m[WARN]\e[0m No firewall is configured. Ensure ${fw} is installed and configured\n"
((WARN++))
fi
else
FW_VER="\e[93m[WARN]\e[0m No firewall is configured. Ensure ${fw} is installed and configured\n"
((WARN++))
fi
else
fw="firewalld"
if [[ $(systemctl is-active firewalld >/dev/null 2>&1 && echo 1 || echo 0) ]]; then
FW_VER="\e[32m[PASS]\e[0m Firewall service (${fw}) is active\n"
((PASS++))
else
FW_VER="\e[93m[WARN]\e[0m No firewall is configured. Ensure ${fw} is installed and configured\n"
((WARN++))
fi
fi
elif [[ "$OS" =~ Debian.* ]]; then
# user could be using a number of different services for managing their firewall
# we will check some of the most common
if cmdExists 'ufw'; then
fw="ufw"
ufwa=$(ufw status | head -1 | sed -e "s/^Status:\ //")
if [[ $ufwa == "active" ]]; then
FW_VER="\e[32m[PASS]\e[0m Firewall service (${fw}) is active\n"
((PASS++))
else
FW_VER="\e[93m[WARN]\e[0m No firewall is configured. Ensure ${fw} is installed and configured\n"
((WARN++))
fi
elif cmdExists "firewall-cmd"; then
fw="firewalld"
if [[ $(systemctl is-active --quiet $fw) ]]; then
FW_VER="\e[32m[PASS]\e[0m Firewall service (${fw}) is active\n"
((PASS++))
else
FW_VER="\e[93m[WARN]\e[0m No firewall is configured. Ensure ${fw} is installed and configured\n"
((WARN++))
fi
else
# user could be using vanilla iptables, check if kernel module is loaded
fw="iptables"
if lsmod | grep -q '^ip_tables' 2>/dev/null; then
FW_VER="\e[32m[PASS]\e[0m Firewall service (${fw}) is active\n"
((PASS++))
else
FW_VER="\e[93m[WARN]\e[0m No firewall is configured. Ensure ${fw} is installed and configured\n"
((WARN++))
fi
fi
fi
}
function checkUpdates {
if [[ $OS == "Ubuntu" ]] || [[ "$OS" =~ Debian.* ]]; then
# Ensure /tmp exists and has the proper permissions before
# checking for security updates
# https://github.com/digitalocean/marketplace-partners/issues/94
if [[ ! -d /tmp ]]; then
mkdir /tmp
fi
chmod 1777 /tmp
echo -en "\nUpdating apt package database to check for security updates, this may take a minute...\n\n"
apt-get -y update >/dev/null
uc=$(apt-get --just-print upgrade | grep -i "security" -c)
if [[ $uc -gt 0 ]]; then
update_count=$((uc / 2))
else
update_count=0
fi
if [[ $update_count -gt 0 ]]; then
echo -en "\e[41m[FAIL]\e[0m There are ${update_count} security updates available for this image that have not been installed.\n"
echo -en
echo -en "Here is a list of the security updates that are not installed:\n"
sleep 2
apt-get --just-print upgrade | grep -i security | awk '{print $2}' | awk '!seen[$0]++'
echo -en
((FAIL++))
STATUS=2
else
echo -en "\e[32m[PASS]\e[0m There are no pending security updates for this image.\n\n"
((PASS++))
fi
elif [[ $OS == "CentOS Linux" ]] || [[ $OS == "CentOS Stream" ]] || [[ $OS == "Rocky Linux" ]] || [[ $OS == "AlmaLinux" ]]; then
echo -en "\nChecking for available security updates, this may take a minute...\n\n"
update_count=$(yum check-update --security --quiet | wc -l)
if [[ $update_count -gt 0 ]]; then
echo -en "\e[41m[FAIL]\e[0m There are ${update_count} security updates available for this image that have not been installed.\n"
((FAIL++))
STATUS=2
else
echo -en "\e[32m[PASS]\e[0m There are no pending security updates for this image.\n"
((PASS++))
fi
else
echo "Error encountered"
exit 1
fi
return 1
}
function checkCloudInit {
if hash cloud-init 2>/dev/null; then
CI="\e[32m[PASS]\e[0m Cloud-init is installed.\n"
((PASS++))
else
CI="\e[41m[FAIL]\e[0m No valid verison of cloud-init was found.\n"
((FAIL++))
STATUS=2
fi
return 1
}
clear
echo "DigitalOcean Marketplace Image Validation Tool ${VERSION}"
echo "Executed on: ${RUNDATE}"
echo "Checking local system for Marketplace compatibility..."
getDistro
echo -en "\n\e[1mDistribution:\e[0m ${OS}\n"
echo -en "\e[1mVersion:\e[0m ${VER}\n\n"
ost=0
osv=0
if [[ $OS == "Ubuntu" ]]; then
ost=1
if [[ $VER == "22.10" ]] || [[ $VER == "22.04" ]] || [[ $VER == "20.04" ]] || [[ $VER == "18.04" ]] || [[ $VER == "16.04" ]]; then
osv=1
fi
elif [[ "$OS" =~ Debian.* ]]; then
ost=1
case "$VER" in
9)
osv=1
;;
10)
osv=1
;;
11)
osv=1
;;
*)
osv=2
;;
esac
elif [[ $OS == "CentOS Linux" ]]; then
ost=1
if [[ $VER == "8" ]]; then
osv=1
elif [[ $VER == "7" ]]; then
osv=1
elif [[ $VER == "6" ]]; then
osv=1
else
osv=2
fi
elif [[ $OS == "CentOS Stream" ]]; then
ost=1
if [[ $VER == "8" ]]; then
osv=1
else
osv=2
fi
elif [[ $OS == "Rocky Linux" ]]; then
ost=1
if [[ $VER =~ 8\. ]]; then
osv=1
else
osv=2
fi
elif [[ $OS == "AlmaLinux" ]]; then
ost=1
if [[ "$VERSION" =~ 8.* ]] || [[ "$VERSION" =~ 9.* ]]; then
osv=1
else
osv=2
fi
else
ost=0
fi
if [[ $ost == 1 ]]; then
echo -en "\e[32m[PASS]\e[0m Supported Operating System Detected: ${OS}\n"
((PASS++))
else
echo -en "\e[41m[FAIL]\e[0m ${OS} is not a supported Operating System\n"
((FAIL++))
STATUS=2
fi
if [[ $osv == 1 ]]; then
echo -en "\e[32m[PASS]\e[0m Supported Release Detected: ${VER}\n"
((PASS++))
elif [[ $ost == 1 ]]; then
echo -en "\e[41m[FAIL]\e[0m ${OS} ${VER} is not a supported Operating System Version\n"
((FAIL++))
STATUS=2
else
echo "Exiting..."
exit 1
fi
checkCloudInit
echo -en "${CI}"
checkFirewall
echo -en "${FW_VER}"
checkUpdates
loadPasswords
checkLogs
echo -en "\n\nChecking all user-created accounts...\n"
checkUsers
echo -en "\n\nChecking the root account...\n"
checkRoot
echo -en "\n\nChecking the /tmp directory...\n"
checkTMP
checkAgent
# Summary
echo -en "\n\n---------------------------------------------------------------------------------------------------\n"
if [[ $STATUS == 0 ]]; then
echo -en "Scan Complete.\n\e[32mAll Tests Passed!\e[0m\n"
elif [[ $STATUS == 1 ]]; then
echo -en "Scan Complete. \n\e[93mSome non-critical tests failed. Please review these items.\e[0m\e[0m\n"
else
echo -en "Scan Complete. \n\e[41mOne or more tests failed. Please review these items and re-test.\e[0m\n"
fi
echo "---------------------------------------------------------------------------------------------------"
echo -en "\e[1m${PASS} Tests PASSED\e[0m\n"
echo -en "\e[1m${WARN} WARNINGS\e[0m\n"
echo -en "\e[1m${FAIL} Tests FAILED\e[0m\n"
echo -en "---------------------------------------------------------------------------------------------------\n"
if [[ $STATUS == 0 ]]; then
echo -en "We did not detect any issues with this image. Please be sure to manually ensure that all software installed on the base system is functional, secure and properly configured (or facilities for configuration on first-boot have been created).\n\n"
exit 0
elif [[ $STATUS == 1 ]]; then
echo -en "Please review all [WARN] items above and ensure they are intended or resolved. If you do not have a specific requirement, we recommend resolving these items before image submission\n\n"
exit 0
else
echo -en "Some critical tests failed. These items must be resolved and this scan re-run before you submit your image to the DigitalOcean Marketplace.\n\n"
exit 1
fi
| true
|
c2731e86ac190be503c7b84ef6256aa87bb3a7b7
|
Shell
|
hopeseekr/BashScripts
|
/changelog-maker-lite
|
UTF-8
| 1,343
| 3.609375
| 4
|
[
"CC-BY-4.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/usr/bin/env bash
#########################################################################
# ChangeLog Maker (lite) #
# #
# Quickly creates CHANGELOG.md entries suitable for GitHub from #
# the repo's commit log. #
# #
# Part of HopeSeekr's BashScripts Collection #
# https://github.com/hopeseekr/BashScripts/ #
# #
# Copyright © 2020 Theodore R. Smith <theodore@phpexperts.pro> #
# GPG Fingerprint: 4BF8 2613 1C34 87AC D28F 2AD8 EB24 A91D D612 5690 #
# #
# License: Creative Commons Attribution v4.0 International #
#########################################################################
if [ -z "$1" ]; then
echo "Error: You must provide a git hash/tag/etc"
echo " e.g., changelog-maker-lite v1.0.0.."
exit 1
fi
# @see https://stackoverflow.com/a/12900372/430062
git log --pretty=shortlog --date=iso --reverse "$1" | awk '{print "* ["$3" "$4" CDT] - " substr($0, index($0, $9))}'
| true
|
50e652bde62208f269120ce853cfba1a5085968e
|
Shell
|
cosimop2000/Esami_SO
|
/2019/Esame0504_B/FCR.sh
|
UTF-8
| 359
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/sh
cd $1
#variabile contatore per le linee dei file
count=0
case $1 in
*/$2)
for i in *
do
if test -f $i -a -r $i
then
count=`wc -l < $i`
if test $count -gt 4
then
echo `pwd`/$i $count >> $3
fi
fi
done
;;
*);;
esac
#parte ricorsiva
for i in *
do
if test -d $i -a -x $i
then
FCR.sh `pwd`/$i $2 $3
fi
done
| true
|
64165faf289f852fbd93739e67789150b9034006
|
Shell
|
Mithreas/pazaak
|
/pazaakcli/versus.sh
|
UTF-8
| 1,072
| 3.546875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#DECK1="+-2,+-2,+-2,+-6,+-3,+-3,+-3,+-4,+-4,+-5"
#DECK1="-3,-3,-3,-4,-4,-4,-2,-2,-6,-5"
if [ -z "$DECK1" ]; then
DECK1="auto"
fi
if [ -z "$DECK2" ]; then
DECK2="auto"
fi
if [ -z "$PLAYER1" ]; then
PLAYER1="./testagent2/testagent2"
fi
if [ -z "$PLAYER2" ]; then
PLAYER2="./testagent2/testagent2"
fi
if [ -z "$COUNT" ]; then
COUNT=100
fi
if [ -z "$ROUNDS" ]; then
ROUNDS=3
fi
# use this to rig the deck and give a certain value naturally to player 1
# this combined with "agentstand" implementations which just draw and stand
# can help you crunch natural drawing odds
if [ -z "$P1VALUE" ]; then
P1VALUE=0
fi
SCORE1=0
SCORE2=0
for i in $(seq $COUNT)
do
echo Running game $i...
./pazaakcli --quiet -player $PLAYER1 -player $PLAYER2 --round-limit $ROUNDS -p1-force-value $P1VALUE <<EOF
$DECK1
$DECK2
EOF
if [ $? -eq 1 ]; then
SCORE1=$(($SCORE1 + 1))
else
SCORE2=$(($SCORE2 + 1))
fi
done
echo "score player 1 ($PLAYER1 [$DECK1]): $SCORE1"
echo "score player 2 ($PLAYER2 [$DECK2]): $SCORE2"
| true
|
c0c49371232e002acac1f4214302048bb1050cf5
|
Shell
|
skywalka/terraform-google-splunk-enterprise
|
/startup_script.sh.tpl
|
UTF-8
| 12,688
| 3.578125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
set -x
log() {
echo "`date`: $1";
curl -X PUT --data "$1" http://metadata.google.internal/computeMetadata/v1/instance/guest-attributes/splunk/install-status -H "Metadata-Flavor: Google"
}
export SPLUNK_USER=splunk
export SPLUNK_BIN=/opt/splunk/bin/splunk
export SPLUNK_HOME=/opt/splunk
export SPLUNK_DB_MNT_DIR=/mnt/splunk_db
export SPLUNK_ROLE="$(curl http://metadata.google.internal/computeMetadata/v1/instance/attributes/splunk-role -H "Metadata-Flavor: Google")"
export LOCAL_IP="$(curl http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ip -H "Metadata-Flavor: Google")"
curl -X PUT --data "in-progress" http://metadata.google.internal/computeMetadata/v1/instance/guest-attributes/splunk/install -H "Metadata-Flavor: Google"
# Determine if this is first-time boot of a new VM as opposed to a VM restart (or a VM recreate from MIG auto-healer).
# In the latter cases, no additional configuration is needed, and just exit startup script.
# Note: the exception here is MIG-recreated VMs with local SSDs as data disks. Unlike re-attached preserved PD,
# the SSDs disks are recreated and need to be re-formatted and re-striped. TODO: add that logic below.
# More info: https://cloud.google.com/compute/docs/instance-groups/autohealing-instances-in-migs#autohealing_and_disks
if [[ -d "$SPLUNK_HOME" ]]; then
log "Splunk installation found. Skipping node configuration."
exit 0
fi
log "Downloading and installing Splunk..."
# Download & install Splunk Enterprise
wget -O ${SPLUNK_PACKAGE_NAME} "${SPLUNK_PACKAGE_URL}"
tar zxf ${SPLUNK_PACKAGE_NAME}
mv splunk $SPLUNK_HOME
rm ${SPLUNK_PACKAGE_NAME}
log "Creating Splunk system user..."
# Create Splunk system user, and set directory permissions
if ! id $SPLUNK_USER >/dev/null 2>&1; then
useradd -r -m -s /bin/bash -U $SPLUNK_USER
fi
chown -R $SPLUNK_USER:$SPLUNK_USER $SPLUNK_HOME
log "Configuring data disks (if any)..."
export DATA_DISKS=`ls /dev/sd* | egrep -v '^/dev/sda[0-9]*'`
declare OVERRIDE_SPLUNK_DB_LOCATION=0
# If Data PD attached, format+mount it and override SPLUNK_DB location
if [[ -h /dev/disk/by-id/google-persistent-disk-1 ]]; then
log "Mountaing data PD for Splunk DB"
DATA_DISK=$(readlink /dev/disk/by-id/google-persistent-disk-1)
DATA_DISK_ID=$(basename $DATA_DISK)
# Confirm this is first boot based on data mount point existence
if [[ ! -e $SPLUNK_DB_MNT_DIR ]]; then
mkfs.ext4 -m 0 -E lazy_itable_init=0,lazy_journal_init=0,discard /dev/$DATA_DISK_ID
mkdir -p $SPLUNK_DB_MNT_DIR
mount -o discard,defaults /dev/$DATA_DISK_ID $SPLUNK_DB_MNT_DIR
OVERRIDE_SPLUNK_DB_LOCATION=1
fi
# If Local SSDs attached (in SCSI mode), format+stripe+mount them and override SPLUNK_DB location
elif [[ $DATA_DISKS != "" ]]; then
DATA_DISKS_CNT=$(echo $DATA_DISKS | tr ' ' '\n' | wc -l)
DATA_DISK_ID='md0'
# Confirm this is first boot based on data mount point existence
if [[ ! -e $SPLUNK_DB_MNT_DIR ]]; then
# Stripe local SSDs into single RAID0 array
mdadm --create /dev/$DATA_DISK_ID --level=0 --raid-devices=$DATA_DISKS_CNT $DATA_DISKS
# Format full array
mkfs.ext4 -F /dev/$DATA_DISK_ID
mkdir -p $SPLUNK_DB_MNT_DIR
mount -o discard,defaults,nobarrier /dev/$DATA_DISK_ID $SPLUNK_DB_MNT_DIR
OVERRIDE_SPLUNK_DB_LOCATION=1
fi
fi
# Set Splunk DB location
if [[ $OVERRIDE_SPLUNK_DB_LOCATION -eq 1 ]]; then
# Grant access to Splunk system user
chown $SPLUNK_USER:$SPLUNK_USER $SPLUNK_DB_MNT_DIR
# Persist mount in fstab for instance restarts
echo UUID=$(blkid -s UUID -o value /dev/$DATA_DISK_ID) $SPLUNK_DB_MNT_DIR ext4 discard,defaults,nofail 0 2 | tee -a /etc/fstab
# Point SPLUNK_DB to data disk mount directory
cp $SPLUNK_HOME/etc/splunk-launch.conf.default $SPLUNK_HOME/etc/splunk-launch.conf
sed -i "/SPLUNK_DB/c\SPLUNK_DB=$SPLUNK_DB_MNT_DIR" $SPLUNK_HOME/etc/splunk-launch.conf
chown $SPLUNK_USER:$SPLUNK_USER $SPLUNK_HOME/etc/splunk-launch.conf
fi
log "Configuring Splunk installation..."
# Work around for having to pass admin pass
cd ~
mkdir .splunk
chmod 777 -R .splunk
touch .splunk/authToken_hostname_port
chmod 600 .splunk/authToken_hostname_port
cd $SPLUNK_HOME
# Set Splunk admin password and disable first-time run password prompt
cat >>$SPLUNK_HOME/etc/system/local/user-seed.conf <<end
[user_info]
USERNAME = admin
PASSWORD = ${SPLUNK_ADMIN_PASSWORD}
end
touch $SPLUNK_HOME/etc/.ui_login
# Configure systemd to start Splunk at boot
cd /opt/splunk
bin/splunk enable boot-start -user $SPLUNK_USER --accept-license -systemd-managed 0
# Configure Splunk before starting service
# Increase splunkweb connection timeout with splunkd
mkdir -p $SPLUNK_HOME/etc/apps/base-autogenerated/local
cat >>$SPLUNK_HOME/etc/apps/base-autogenerated/local/web.conf <<end
[settings]
splunkdConnectionTimeout = 300
end
chown -R $SPLUNK_USER:$SPLUNK_USER $SPLUNK_HOME/etc/apps/base-autogenerated
log "Starting Splunk Service..."
# Start Splunk service
sudo /etc/init.d/splunk start
# Allow for Splunk start-up time
sleep 10
if [ $SPLUNK_ROLE = "IDX-Master" ]; then
log "Cluster Master configuration"
# Change default to HTTPS on the web interface
# cat >>$SPLUNK_HOME/etc/system/local/web.conf <<end
# [settings]
# enableSplunkWebSSL = 1
# end
# Forward to indexer cluster using indexer discovery
cat >>$SPLUNK_HOME/etc/apps/base-autogenerated/local/outputs.conf <<end
# Turn off indexing
[indexAndForward]
index = false
[tcpout]
defaultGroup = indexer_cluster_peers
forwardedindex.filter.disable = true
indexAndForward = false
[tcpout:indexer_cluster_peers]
indexerDiscovery = cluster_master
[indexer_discovery:cluster_master]
pass4SymmKey = ${SPLUNK_INDEXER_DISCOVERY_SECRET}
master_uri = https://127.0.0.1:8089
end
chown -R $SPLUNK_USER:$SPLUNK_USER $SPLUNK_HOME/etc/apps/base-autogenerated
sudo -u $SPLUNK_USER $SPLUNK_BIN login -auth admin:'${SPLUNK_ADMIN_PASSWORD}'
sudo -u $SPLUNK_USER $SPLUNK_BIN edit cluster-config -mode master -replication_factor 3 -search_factor 2 -secret '${SPLUNK_CLUSTER_SECRET}' -cluster_label Splunk-IDX
# Configure indexer discovery - pass4SymmKey doesn't get hashed
cat >>$SPLUNK_HOME/etc/system/local/server.conf <<end
[indexer_discovery]
pass4SymmKey = ${SPLUNK_INDEXER_DISCOVERY_SECRET}
indexerWeightByDiskCapacity = true
end
chown -R $SPLUNK_USER:$SPLUNK_USER $SPLUNK_HOME/etc/system/local/server.conf
# Add base configs for peer nodes as an app under master-apps
# Peer config 1: Enable HEC input
sudo -u $SPLUNK_USER $SPLUNK_BIN http-event-collector enable -uri https://localhost:8089 -auth admin:'${SPLUNK_ADMIN_PASSWORD}'
sudo -u $SPLUNK_USER $SPLUNK_BIN http-event-collector create default-token -uri https://localhost:8089 -auth admin:'${SPLUNK_ADMIN_PASSWORD}' > /tmp/token
TOKEN=`sed -n 's/\\ttoken=//p' /tmp/token`
rm /tmp/token
log "Setting HEC Token as guest attribute"
curl -X PUT --data "$TOKEN" http://metadata.google.internal/computeMetadata/v1/instance/guest-attributes/splunk/token -H "Metadata-Flavor: Google"
mkdir -p $SPLUNK_HOME/etc/master-apps/peer-base-autogenerated/local
mv $SPLUNK_HOME/etc/apps/splunk_httpinput/local/inputs.conf $SPLUNK_HOME/etc/master-apps/peer-base-autogenerated/local
# Peer config 2: Enable splunktcp input
cat >>$SPLUNK_HOME/etc/master-apps/peer-base-autogenerated/local/inputs.conf <<end
[splunktcp://9997]
disabled=0
end
chown -R $SPLUNK_USER:$SPLUNK_USER $SPLUNK_HOME/etc/master-apps
else
# Link up with License Master
# TODO: Add following when enterprise license installed on cluster master
#sudo -u $SPLUNK_USER $SPLUNK_BIN edit licenser-localslave -master_uri https://${SPLUNK_CM_PRIVATE_IP}:8089 -auth admin:'${SPLUNK_ADMIN_PASSWORD}'
log "Skip license master link up"
fi
if [ $SPLUNK_ROLE = "SHC-Deployer" ]; then
log "Deployer configurations"
# Change default to HTTPS on the web interface
# cat >>$SPLUNK_HOME/etc/system/local/web.conf <<end
# [settings]
# enableSplunkWebSSL = 1
# end
# Configure some SHC parameters
cat >>$SPLUNK_HOME/etc/apps/base-autogenerated/local/server.conf <<end
[shclustering]
pass4SymmKey = ${SPLUNK_CLUSTER_SECRET}
shcluster_label = SplunkSHC
end
# Forward to indexer cluster using indexer discovery
cat >>$SPLUNK_HOME/etc/apps/base-autogenerated/local/outputs.conf <<end
# Turn off indexing on the search head
[indexAndForward]
index = false
[tcpout]
defaultGroup = indexer_cluster_peers
forwardedindex.filter.disable = true
indexAndForward = false
[tcpout:indexer_cluster_peers]
indexerDiscovery = cluster_master
[indexer_discovery:cluster_master]
pass4SymmKey = ${SPLUNK_INDEXER_DISCOVERY_SECRET}
master_uri = https://${SPLUNK_CM_PRIVATE_IP}:8089
end
chown -R $SPLUNK_USER:$SPLUNK_USER $SPLUNK_HOME/etc/apps/base-autogenerated
# Add base config for search head cluster members
mkdir -p $SPLUNK_HOME/etc/shcluster/apps/member-base-autogenerated/local
cat >>$SPLUNK_HOME/etc/shcluster/apps/member-base-autogenerated/local/outputs.conf <<end
# Turn off indexing on the search head
[indexAndForward]
index = false
[tcpout]
defaultGroup = indexer_cluster_peers
forwardedindex.filter.disable = true
indexAndForward = false
[tcpout:indexer_cluster_peers]
indexerDiscovery = cluster_master
[indexer_discovery:cluster_master]
pass4SymmKey = ${SPLUNK_INDEXER_DISCOVERY_SECRET}
master_uri = https://${SPLUNK_CM_PRIVATE_IP}:8089
end
chown -R $SPLUNK_USER:$SPLUNK_USER $SPLUNK_HOME/etc/shcluster/apps/member-base-autogenerated
sudo -u $SPLUNK_USER $SPLUNK_BIN apply shcluster-bundle -action stage --answer-yes -auth admin:'${SPLUNK_ADMIN_PASSWORD}'
# TODO: send bundle after SHC is initialized with captain bootstrapped
elif [ $SPLUNK_ROLE = "SHC-Member" ]; then
log "Search Head Member configurations"
# Configure some SHC parameters
cat >>$SPLUNK_HOME/etc/system/local/server.conf <<end
[shclustering]
register_replication_address = $LOCAL_IP
end
chown -R $SPLUNK_USER:$SPLUNK_USER $SPLUNK_HOME/etc/system/local
log "Setting cluster config and connecting to master"
# Sometimes the master is restarting at the same time, retry up to 5 times
command="sudo -u $SPLUNK_USER $SPLUNK_BIN login -auth admin:'${SPLUNK_ADMIN_PASSWORD}' && \
sudo -u $SPLUNK_USER $SPLUNK_BIN init shcluster-config -mgmt_uri https://$LOCAL_IP:8089 -replication_port 8090 -replication_factor 2 -conf_deploy_fetch_url https://${SPLUNK_DEPLOYER_PRIVATE_IP}:8089 -shcluster_label Splunk-SHC -secret '${SPLUNK_CLUSTER_SECRET}' && \
sudo -u $SPLUNK_USER $SPLUNK_BIN edit cluster-config -mode searchhead -master_uri https://${SPLUNK_CM_PRIVATE_IP}:8089 -secret '${SPLUNK_CLUSTER_SECRET}'"
count=1;until eval $command || (( $count >= 5 )); do sleep 10; count=$((count + 1)); done
elif [ $SPLUNK_ROLE = "IDX-Peer" ]; then
log "Setting cluster config and connecting to master"
# Sometimes the master is restarting at the same time, retry up to 5 times
command="sudo -u $SPLUNK_USER $SPLUNK_BIN login -auth admin:'${SPLUNK_ADMIN_PASSWORD}' && \
sudo -u $SPLUNK_USER $SPLUNK_BIN edit cluster-config -mode slave -master_uri https://${SPLUNK_CM_PRIVATE_IP}:8089 -replication_port 9887 -secret '${SPLUNK_CLUSTER_SECRET}'"
count=1;until eval $command || (( $count >= 5 )); do sleep 10; count=$((count + 1)); done
# Override Splunk server name of peer node by adding a random number from 0 to 999 as suffix to hostname
SUFFIX=$(cat /dev/urandom | tr -dc '0-9' | fold -w 256 | head -n 1 | sed -e 's/^0*//' | head --bytes 3)
if [ "$SUFFIX" == "" ]; then SUFFIX=0; fi
sudo -u $SPLUNK_USER $SPLUNK_BIN set servername $(hostname)-$SUFFIX
fi
# Removing temporary permissive .splunk directory
cd ~
rm -Rf .splunk
log "Final restart of services"
# Start Splunk service - changed with 8.0.0 - sometimes it gets an error connecting to it's local web server
command="/etc/init.d/splunk restart"
count=1;until eval $command || (( $count >= 5 )); do sleep 10; count=$((count + 1)); done
# Add guest attribute indicating the install process has successfully completed
curl -X PUT --data "complete" http://metadata.google.internal/computeMetadata/v1/instance/guest-attributes/splunk/install -H "Metadata-Flavor: Google"
log "Finished setup on $HOSTNAME with role $SPLUNK_ROLE"
exit 0
| true
|
44f66a1dbe081875b266c0cfc05a9957a577f96a
|
Shell
|
demhydraz/til
|
/bin/upsl
|
UTF-8
| 219
| 3.359375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
st=$1; shift
en=$1; shift
fl=$1; shift
ext=${fl##*.}
tmp="/tmp/$(basename -s ".$ext" "$fl").$st-$en.$ext"
if [[ -f "$fl" ]]; then
sed -n "$st,${en}p" "$fl" > $tmp
up h $tmp
rm -rf $tmp
fi
| true
|
64b2a0ee8a9db4b8679c6e32d4c020abcb2ceb18
|
Shell
|
octobot-dev/react-boilerplate
|
/scripts/go-script-bash/tests/modules/help.bats
|
UTF-8
| 1,598
| 3.328125
| 3
|
[
"ISC",
"LicenseRef-scancode-free-unknown",
"MIT"
] |
permissive
|
#! /usr/bin/env bats
load ../environment
load helpers
setup() {
test_filter
@go.create_test_go_script '@go "$@"'
setup_test_modules
}
teardown() {
@go.remove_test_go_rootdir
}
@test "$SUITE: no args shows module system help" {
run "$TEST_GO_SCRIPT" 'modules' '-h'
assert_output_matches '^\$_GO_USE_MODULES - '
}
@test "$SUITE: accept -h, -help, and --help as synonyms" {
run "$TEST_GO_SCRIPT" modules -h
assert_success
local help_output="$output"
run "$TEST_GO_SCRIPT" modules -help
assert_success "$help_output"
run "$TEST_GO_SCRIPT" modules --help
assert_success "$help_output"
}
@test "$SUITE: --help honored" {
run "$TEST_GO_SCRIPT" 'modules' '--help'
assert_output_matches '^\$_GO_USE_MODULES - '
}
@test "$SUITE: error if more than one module specified" {
run "$TEST_GO_SCRIPT" 'modules' '-h' '_foo/_plugh' '_bar/_quux'
assert_failure 'Please specify only one module name.'
}
@test "$SUITE: error if module does not exist" {
run "$TEST_GO_SCRIPT" 'modules' '-h' '_foo/_frotz'
assert_failure 'Unknown module: _foo/_frotz'
}
@test "$SUITE: error if parsing description fails" {
skip_if_cannot_trigger_file_permission_failure
local module_path="$TEST_GO_PLUGINS_DIR/_foo/lib/_plugh"
chmod ugo-r "$module_path"
run "$TEST_GO_SCRIPT" 'modules' '-h' '_foo/_plugh'
assert_failure
assert_output_matches "ERROR: failed to parse description from $module_path\$"
}
@test "$SUITE: print help from the module's header comment" {
run "$TEST_GO_SCRIPT" 'modules' '-h' '_foo/_plugh'
assert_success '_foo/_plugh - Summary for _foo/_plugh'
}
| true
|
bb837b1432f19f0980031fda9acf192aaa0b0e14
|
Shell
|
nolim1t/bitcoin-merchants
|
/scripts/rename-tool.sh
|
UTF-8
| 919
| 3.25
| 3
|
[
"Unlicense"
] |
permissive
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This is a script which converts the md files into usable files (to be run before jekyll build)
# Grab existing files
READMECONTENTS=`cat README.md`
LICENSECONTENTS=`cat LICENSE`
# Make README.md into default index (/)
cat >./index.md <<EOF
---
layout: default
permalink: /
---
${READMECONTENTS}
EOF
# Make LICENSE.md into /license/
cat >./LICENSE.md <<EOF
---
layout: default
permalink: /license/
---
# License
${LICENSECONTENTS}
EOF
| true
|
e766705771d9717a3c39ff9448a00f29385b4eed
|
Shell
|
simta/simta-admin
|
/simqc
|
UTF-8
| 4,566
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
# simqc: does queue tricks, plays the fight song
#
# -s : prints out stats on slow queue
# -f : prints out stats on fast queue
# -# : sets number to be used by head in -s and -f, default is 10
# -d : prints out queue stats for last days ( equiv to -t 24 )
# -t hours : prints out queue stats for last [hours] hours
# -n : prints out the name of the newest queue file in simta/etc
export PATH=/bin:/usr/bin:/sbin:/usr/sbin:/usr/local/sbin:/usr/rsug/bin
name=simqc
stats=$(mktemp -d /tmp/${name}.XXXXXXXX)
simtadir=/var/spool/simta
ignore=umich.edu
slow_opt=0
fast_opt=0
local_opt=0
dead_opt=0
ip_opt=0
authuser_opt=0
subject_opt=0
head_opt=10
day_opt=0
time_opt=0
newest_opt=0
usage() {
echo "Usage: $name [-sfldaiS] [-h num] [-D] [ -t hours ] | [-n]"
exit 1
}
error() {
echo $* 1>&2
exit 1
}
checkq(){
queue=$1
find $simtadir/$queue -maxdepth 1 -ignore_readdir_race -name E\* -type f > $stats/$queue.list
# `xargs cat` is used to avoid jq dying when a file goes away
cat $stats/$queue.list | xargs cat 2>/dev/null | jq -r .recipients[] | sort | uniq -c | sort -rn > $stats/$queue.sortedrecip
echo "Top $queue recipients"
head -n $head_opt $stats/$queue.sortedrecip
echo ""
cat $stats/$queue.list | xargs cat 2>/dev/null | jq -r .hostname | sort | uniq -c | sort -rn > $stats/$queue.sortedhosts
echo "Top $queue hosts"
head -n $head_opt $stats/$queue.sortedhosts
echo ""
cat $stats/$queue.list | xargs cat 2>/dev/null | jq -r .sender | sort | uniq -c | sort -rn > $stats/$queue.sortedfrom
echo "Top $queue senders"
head -n $head_opt $stats/$queue.sortedfrom
echo ""
if [[ $authuser_opt -eq 1 ]]; then
sed 's/E/D/' $stats/$queue.list | xargs -r fgrep -h 'auth=pass smtp.auth=' | sed -e 's/.*=//' > $stats/$queue.authusers
sort -n $stats/$queue.authusers | uniq -c | sort -rn > $stats/$queue.sortedauthusers
echo "Top $queue authusers"
head -n $head_opt $stats/$queue.sortedauthusers
echo ""
fi
if [[ $ip_opt -eq 1 ]]; then
sed 's/E/D/' $stats/$queue.list | xargs -r fgrep 'policy.iprev' | grep -v $ignore | sed -e 's/.*policy.iprev=//; s/ .*//' > $stats/$queue.ips
sort -n $stats/$queue.ips | uniq -c | sort -rn > $stats/$queue.sortedips
echo "Top $queue non-$ignore IPs"
head -n $head_opt $stats/$queue.sortedips
echo ""
cut -d. -f1-3 $stats/$queue.ips | sort -n | uniq -c | sort -rn > $stats/$queue.sortedsubnets
echo "Top $queue non-$ignore subnets"
head -n $head_opt $stats/$queue.sortedsubnets
echo ""
fi
if [[ $subject_opt -eq 1 ]]; then
sed 's/E/D/' $stats/$queue.list | xargs -r awk '/auth=pass smtp.auth=/{ auth=$NF } /^Subject:/{ print auth, $0} /^$/{ exit }' | sort -n | uniq -c | sort -rn > $stats/$queue.subjects
echo "Top $queue subjects"
head -n $head_opt $stats/$queue.subjects
echo ""
fi
}
qstats() {
minutes=$1
if [[ -d $simtadir/etc ]]; then
for x in $(find $simtadir/etc -ignore_readdir_race -name queue_schedule.\* -type f -mmin -${minutes}); do
awk -v f=$x '{ s += $2 } END { print f,s }' $x
done | sort -n -k 2 > $stats/tmpfile
adqavg=$(awk '{ s += $2 } END { print s/NR }' $stats/tmpfile)
adqmin=$(awk 'NR==1 { print $2 }' $stats/tmpfile)
adqmax=$(awk 'END { print $2 }' $stats/tmpfile)
admed=$(( $(awk 'END { print NR }' $stats/tmpfile) / 2 ))
adqmed=$(awk -v m=$admed 'NR==m { print $2 }' $stats/tmpfile)
echo -e "$(( $minutes / 60 )) hour total queue stats"
echo -e "max: $adqmax min: $adqmin avg: $adqavg med: $adqmed"
else
error $simtadir/etc does not exist
fi
}
while getopts adDfh:ilsSt: opt; do
case $opt in
a) authuser_opt=1
;;
d) dead_opt=1
;;
D) day_opt=1
;;
f) fast_opt=1
;;
h) head_opt="$OPTARG"
;;
i) ip_opt=1
;;
l) local_opt=1
;;
s) slow_opt=1
;;
S) subject_opt=1
;;
t) time_opt="$OPTARG"
;;
*) usage
;;
esac
done
if [[ $# -eq 0 ]] || [[ $slow_opt -eq 1 || $fast_opt -eq 1 || $local_opt -eq 1 ]] || [[ $dead_opt -eq 1 ]]; then
if [[ $slow_opt -eq 1 ]]; then
checkq slow
fi
if [[ $fast_opt -eq 1 ]]; then
checkq fast
fi
if [[ $local_opt -eq 1 ]]; then
checkq local
fi
if [[ $dead_opt -eq 1 ]]; then
checkq dead
fi
fi
if [[ $day_opt -eq 1 ]]; then
qstats $(( 60 * 24 ))
elif [[ $time_opt -gt 0 ]]; then
qstats $(( 60 * time_opt ))
fi
rm -rf $stats
| true
|
69aeb443066263952103a5acf4b9e75c979be648
|
Shell
|
danailbd/bulgarian_dictionary
|
/generate_dict.sh
|
UTF-8
| 1,065
| 3.546875
| 4
|
[] |
no_license
|
#!/usr/bin/sh
# TODO ADD OPTIONS
ROOTPWD=${2:-""}
TMPPATH=${3-"/tmp"}
TMP_DATABASE_NAME=${4:-"rechko"}
# TODO ADD REQUIREMENTS
# * pv, penelope, kindlegen
# * mysql with root
# TODO ensure dependences
# pv, penelope, kindlgen
# pip install --user penelope
# yaourt -S kindlegen
echo "Downloading file"
wget -P $TMPPATH/ https://rechnik.chitanka.info/db.sql.gz
echo "Extracting file"
gzip -d -v $TMPPATH/db.sql.gz
# TODO ensure permissions for import mysql
echo "PREPARE DATABASE. This might take a while..."
sed -i "1s/^/CREATE DATABASE IF NOT EXISTS ${TMP_DATABASE_NAME};\nuse ${TMP_DATABASE_NAME};\n/" $TMPPATH/db.sql
pv $TMPPATH/db.sql | mysql -u root --password=$ROOTPWD
echo "EXTRACT dict.csv"
mysql -u root --password=$ROOTPWD < ../lib/formatter.sql | sed 's/\t/,/g' > $TMPPATH/dict.csv
echo "GENERATE DICT"
penelope --description "Bulgarian Dictionary" -i $TMPPATH/dict.csv -j csv -f bg -t bg -p mobi -o dict.mobi
echo "CLEAR TEMP DATA"
rm $TMPPATH/db.sql $TMPPATH/dict.csv
mysql -u root --password=$ROOTPWD <<<"DROP DATABASE $TMP_DATABASE_NAME;"
| true
|
db6fbe8212de010b67228e53859b7b455ba65d22
|
Shell
|
bAndie91/posix_signal_dispatcher
|
/gen_signames.sh
|
UTF-8
| 188
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/sh
set -e
#set -o pipefail
echo "#include <signal.h>" |\
g++ -E -dD -xc++ - |\
perl -ne 'if(/^#define SIG([A-Z]+) (\d+|SIG[A-Z]+)/) { print "{ \"$1\", $2 },\n"; }' \
> signames.h
| true
|
283c85885101179214de7cdfc62ff295f4bebdc6
|
Shell
|
obonyojimmy/meteorjs
|
/cli/plugins/meteor/alpinize.sh
|
UTF-8
| 597
| 3.703125
| 4
|
[
"MIT"
] |
permissive
|
set -e
METEORJS_BUILD_NAME=$1
METEORJS_BUILD_NAME_ALPINE=${METEORJS_BUILD_NAME}.alpine
# Alpine build & install meteor
mkdir /meteorg
tar -xzf /meteor/${METEORJS_BUILD_NAME}.tar.gz -C /meteorg
(cd /meteorg/bundle/programs/server && npm i && npm run install)
(cd /meteorg && tar -czf /meteor/${METEORJS_BUILD_NAME_ALPINE} ./bundle)
# Dockerfile
cat base | sed "s/__BUILD__/${METEORJS_BUILD_NAME_ALPINE}/g" > /meteor/Dockerfile
# Message
echo "Run following command to create the image:"
echo " $ docker build -t ${METEORJS_BUILD_NAME} ."
printf "\n"
echo "Successful alpine build created !!!"
| true
|
ff0390625a5b9fac6e8f141f7974b06ea98bf0b8
|
Shell
|
jerryz123/onnx-composer
|
/scripts/build-tools.sh
|
UTF-8
| 1,224
| 2.703125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
set -o pipefail
# Build riscv tools
(cd rocket-tools && git apply ../scripts/build.patch &&
./build.sh)
# Build LLVM
(cd llvm-project && mkdir -p build && cd build && \
cmake -DCMAKE_BUILD_TYPE="Release" \
-DLLVM_ENABLE_PROJECTS=clang \
-DBUILD_SHARED_LIBS=False \
-DLLVM_USE_SPLIT_DWARF=True \
-DCMAKE_INSTALL_PREFIX="$RISCV" \
-DLLVM_OPTIMIZED_TABLEGEN=True \
-DLLVM_BUILD_TESTS=False \
-DDEFAULT_SYSROOT="$RISCV/riscv64-unknown-elf" \
-DLLVM_DEFAULT_TARGET_TRIPLE="riscv64-unknown-elf" \
-DLLVM_TARGETS_TO_BUILD="RISCV" \
../llvm)
(cd llvm-project/build && make -j16 install)
# Build Halide
(cd Halide && make -j16 install PREFIX=$RISCV)
# Install python dependencies
python3 -m pip install numpy protobuf pytest
# Build onnx
(cd onnx && git submodule update --init --recursive && \
python3 setup.py install)
# Install onnx-halide base
python3 -m pip install -e onnx-halide
| true
|
f194d5b20baadd57ab9b5ba50935113f3aee6db7
|
Shell
|
ed00m/Mayordomo
|
/Multi-threading/crea_data.sh
|
UTF-8
| 315
| 3.109375
| 3
|
[] |
no_license
|
#/bin/sh -x
set -e
set -v
OBJECT=data.txt
rm -f ${OBJECT}
for i in $(seq 1000000)
do
NAME=$(tr -dc A-Za-z0-9 < /dev/urandom|head -c 10)
LASTNAME=$(tr -dc A-Za-z0-9 < /dev/urandom|head -c 10)
FOLIO=$(tr -dc 0-9 < /dev/urandom|head -c 10)
echo ${NAME}"|"${LASTNAME}"|"${FOLIO}"|"${i} >> ${OBJECT}
done
exit 0
| true
|
9531785a31cf5d37fba6192c30d38bc1ad1d1b28
|
Shell
|
jethrosun/dotfiles
|
/bin/bin/deprecated/tmux_start.sh
|
UTF-8
| 930
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/bash
# https://github.com/swaroopch/flask-boilerplate
BASE="$HOME/dev/"
cd $BASE
# the name of your primary tmux session
SESSION=foo
# your IRC nickname
IRC_NICK=$USER
tmux start-server
tmux new-session -d -s foo
tmux new-window -t foo:1
tmux new-window -t foo:2
#tmux new-window -t foo:3
#tmux new-window -t flaskboilerplate:4 -n console
#tmux new-window -t flaskboilerplate:5 -n tests
#tmux new-window -t flaskboilerplate:6 -n git
tmux send-keys -t foo:0 " fish " C-m
tmux send-keys -t foo:2 " fish " C-m
#tmux send-keys -t flaskboilerplate:2 "cd $BASE/flask_application/controllers; ls" C-m
#tmux send-keys -t flaskboilerplate:3 "cd $BASE/flask_application/templates; ls" C-m
#tmux send-keys -t flaskboilerplate:4 "bpython -i play.py" C-m
#tmux send-keys -t flaskboilerplate:5 "python tests.py" C-m
#tmux send-keys -t flaskboilerplate:6 "git status" C-m
tmux select-window -t $SESSION:0
tmux -2 attach -t $SESSION
| true
|
800d9aeae6ee1856a5272c5f64cc692a19ffbfee
|
Shell
|
elkrejzi/lfs-pacman
|
/pkgbuild/at-spi2-atk/PKGBUILD
|
UTF-8
| 744
| 2.6875
| 3
|
[
"MIT"
] |
permissive
|
pkgname=at-spi2-atk
pkgver=2.38.0
pkgrel=1
pkgdesc="A GTK+ module that bridges ATK to D-Bus at-spi"
arch=('x86_64')
url="https://wiki.gnome.org/Accessibility"
license=('GPL2')
#depends=()
source=("https://download.gnome.org/sources/at-spi2-atk/${pkgver:0:4}/at-spi2-atk-${pkgver}.tar.xz")
prepare() {
install -v -dm755 "${srcdir}/build"
}
build() {
cd "${srcdir}/build"
meson --prefix /usr \
--sysconfdir /etc \
--localstatedir /var \
--buildtype=plain \
"${srcdir}/at-spi2-atk-${pkgver}"
ninja ${MAKEFLAGS}
}
package() {
cd "${srcdir}/build"
DESTDIR="${pkgdir}" ninja install
}
sha512sums=('2f40ecbc55b0fbaa57ade952a75583bc8fbfde234cce9248489e9ae06e0597d98c2f4c77d8279758dec29da97e06cde5708d30a1238d91bebd023b2320f38528')
| true
|
82119b21831a42622f8a0c9b39913933ad1d9450
|
Shell
|
DericM/COMP8006_ASN03
|
/login_detect.sh
|
UTF-8
| 2,251
| 3.921875
| 4
|
[] |
no_license
|
#!/bin/sh
#USER DEFINED SECTION
#############################################################
MAX_LOGIN_ATTEMPTS=3
BLOCK_DURATION_SEC=20 #-1 for forever
SCRIPT_FREQUENCY=5s
#Monitor File Format
#------------Default Fedora
#MONITOR_FILE="/var/log/secure"
#DATE_FORMAT="+%b %e %H:%M:%S"
#FAILED_LOGIN_MARKER=sshd.\*Failed
#------------Generated Testfile
MONITOR_FILE="/root/Desktop/COMP8006_ASN03/testlog"
DATE_FORMAT="+"
FAILED_LOGIN_MARKER=abc123
#############################################################
#Globals
SCRIPT_NAME=$(basename "$0")
#Get logs from the source file that are newer than BLOCK_DURATION_SEC
function get_logs_from_source(){
if [ $BLOCK_DURATION_SEC -eq -1 ]
then
##get all ssh log entries
logs=$(grep $FAILED_LOGIN_MARKER $MONITOR_FILE)
else
date_option_string="$(date) - ${BLOCK_DURATION_SEC#0} seconds"
time_marker=$(date -d "$date_option_string" "$DATE_FORMAT")
awk_comparison="\$0 > \"$time_marker\""
#get all ssh log entries after designated time
logs=$(grep $FAILED_LOGIN_MARKER $MONITOR_FILE | awk "$awk_comparison")
fi
echo "$logs"
}
#Strips all elements except ip addresses.
function strip_logs_to_ip(){
ipformat="[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}"
logs=$(echo "$1" | grep -o "$ipformat")
echo "$logs"
}
#Sorts and groups ips into matching sets
#Removes sets less than MAX_LOGIN_ATTEMPTS
function sort_group_remove(){
logs=$(echo "$1"| uniq -cd | awk -v limit=$MAX_LOGIN_ATTEMPTS '$1 > limit{print $2}')
echo "$logs"
}
function iptables_block_ips(){
#clear previous rules
iptables -F
iptables -X
for ip in $1
do
iptables -A INPUT -s "$ip" -j DROP
done
}
function check_log_for_failed_logins() {
logs=$(get_logs_from_source)
logs=$(strip_logs_to_ip "$logs")
logs=$(sort_group_remove "$logs")
echo "Blocked IPs:"
echo "$logs"
iptables_block_ips "$logs"
}
function main(){
repeats=$(60 / $SCRIPT_FREQUENCY)
for i in {1..20}
do
check_log_for_failed_logins
sleep $SCRIPT_FREQUENCY
done
#while true
#do
# check_log_for_failed_logins
# sleep $SCRIPT_FREQUENCY
#done
}
#check_log_for_failed_logins
main
| true
|
00e9a6fc26ea4be3685f3bd858255c6cb17e2ca1
|
Shell
|
PennBBL/reward2018
|
/scripts/heudiconv/dicomInfo_joy.sh
|
UTF-8
| 904
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
# A script for obtaining dicom info from a directory of dicoms, structured: {subjID}/{scanID}/{scantype}/dicoms/{dicom}.nii.gz
# Modified to get dicom info for recently downloaded XNAT dicoms in /data/joy/BBL/studies/reward2018
# obtain scan and session labels
scans=/data/joy/BBL/studies/reward2018/dicomsFromXnat/*/*/
for sc in $scans; do
ses=$(echo $sc|cut -d'/' -f9);
subID=$(echo $sc|cut -d'/' -f8);
# USE SINGULARITY HERE TO RUN HEUDICONV FOR DICOM INFO
# note to replace axu with your chead name instead
/share/apps/singularity/2.5.1/bin/singularity run -B /data:/home/ttapera/data /data/joy/BBL/applications/heudiconv/heudiconv-latest.simg -d /home/ttapera/data/joy/BBL/studies/reward2018/dicomsFromXnat/{subject}/{session}/*.dcm.gz -o /home/ttapera/data/joy/BBL/studies/reward2018/dicomsFromXnat/output -f convertall -s ${subID} -ss ${ses} -c none --overwrite;
done
| true
|
4671f4313ebe9e89741b378f8eee2e7196748ff2
|
Shell
|
flxndn/bin
|
/graba_video.sh
|
UTF-8
| 618
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
#readonly ARGS="$@"
#-------------------------------------------------------------------------------
main() {
#-------------------------------------------------------------------------------
local time=0:1:0
local dev=/dev/video0
local format=rgb24
local rate=10
local dir=~/video
if [ ! -d "$dir" ]; then
mkdir -p $dir;
fi
local output=$dir/$(date -Iseconds).avi
streamer -t $time -c $dev -f $format -r $rate -o $output
}
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
main
| true
|
50070f5e02639eeb1388bb408a0fdde0272da253
|
Shell
|
kaiostech/B2G
|
/scripts/updates.sh
|
UTF-8
| 891
| 3.71875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Determine the absolute path of our location.
B2G_DIR=$(cd `dirname $0`/..; pwd)
. $B2G_DIR/setup.sh
# Run standard set of tests by default. Command line arguments can be
# specified to run specific tests (an individual test file, a directory,
# or an .ini file).
TEST_PATH=$GECKO_PATH/testing/marionette/client/marionette/tests/update-tests.ini
MARIONETTE_FLAGS+=" --homedir=$B2G_DIR --type=b2g-smoketest"
while [ $# -gt 0 ]; do
case "$1" in
--*)
MARIONETTE_FLAGS+=" $1" ;;
*)
MARIONETTE_TESTS+=" $1" ;;
esac
shift
done
MARIONETTE_TESTS=${MARIONETTE_TESTS:-$TEST_PATH}
echo "Running tests: $MARIONETTE_TESTS"
SCRIPT=$GECKO_PATH/testing/marionette/client/marionette/venv_b2g_update_test.sh
PYTHON=${PYTHON:-`which python`}
echo bash $SCRIPT "$PYTHON" $MARIONETTE_FLAGS $MARIONETTE_TESTS
bash $SCRIPT "$PYTHON" $MARIONETTE_FLAGS $MARIONETTE_TESTS
| true
|
b4015b8d93b78b2b3ecaceccc48cdd7212b1ea09
|
Shell
|
confine-project/confine-dist
|
/utils/vct/vct.sh
|
UTF-8
| 69,563
| 3.921875
| 4
|
[] |
no_license
|
#!/bin/bash
set -u # set -o nounset
#set -o errexit
# We want to expand aliases because
# in a fedora environment it might be useful
# to alias virsh to 'sudo virsh' otherwise it would ask for
# a password every time the command is executed.
# Aliasses set before this line (outside the script), will not be expanded.
shopt -s expand_aliases
LANG=C
# Compute VCT_DIR, the absolute location of VCT source and configuration.
# Please do not use paths relative to the current directory, but make them
# relative to VCT_DIR instead.
if echo "$0" | grep -q /; then
VCT_FILE=$0
else
VCT_FILE=$(type "$0" | sed -ne 's#.* \(/.*\)#\1#p')
fi
VCT_DIR=$(dirname "$(readlink -f "$VCT_FILE")")
if [ -f "$VCT_DIR/vct.conf.overrides" ]; then
. "$VCT_DIR/vct.conf.default"
. "$VCT_DIR/vct.conf.overrides"
elif [ -f "$VCT_DIR/vct.conf" ]; then
. "$VCT_DIR/vct.conf"
elif [ -f "$VCT_DIR/vct.conf.default" ]; then
. "$VCT_DIR/vct.conf.default"
fi
# MAIN_PID=$BASHPID
UCI_DEFAULT_PATH=$VCT_UCI_DIR
ERR_LOG_TAG='VCT'
. "$VCT_DIR/lxc.functions"
. "$VCT_DIR/confine.functions"
##########################################################################
####### some general tools for convinience
##########################################################################
# The functions below can be used to selectively disable commands in dry run
# mode. For instance:
#
# # This would change system state, `vct_do` disables in dry run
# # and reports the change.
# vct_do touch /path/to/file
# # This would then fail in dry run mode, `vct_true` succeeds.
# if vct_true [ ! -f /path/to/file ]; then
# echo "Failed to create /path/to/file." >&2
# exit 1
# fi
#
# The snippet above works as usual in normal mode. The idiom `vct_true false
# || COMMAND` avoids running the whole COMMAND in dry run mode (useful for
# very complex commands).
# In dry run mode just exit successfully.
# Otherwise run argument(s) as a command and return result.
vct_true() {
test "${VCT_DRY_RUN:-}" && return 0
"$@"
}
# Same as `vct_true()`, run command in shell.
vct_true_sh() {
vct_true sh -c "$@"
}
# In dry run mode print command to stderr and exit successfully.
# Otherwise run argument(s) as a command and return result.
vct_do() {
if [ "${VCT_DRY_RUN:-}" ]; then
echo ">>>> $@ <<<<" >&2
return 0
fi
"$@"
}
# Same as `vct_do()`, run command in shell.
vct_do_sh() {
vct_do sh -c "$@"
}
# Same as `vct_do()`, run command with `sudo`.
vct_sudo() {
if [ "${VCT_DRY_RUN:-}" ]; then
vct_do sudo $@
return $?
fi
local QUERY=
if [ "$VCT_SUDO_ASK" != "NO" ]; then
echo "" >&2
echo "$0 wants to execute (VCT_SUDO_ASK=$VCT_SUDO_ASK set to ask):" >&2
echo ">>>> sudo $@ <<<<" >&2
read -p "Pleas type: y) to execute and continue, s) to skip and continue, or anything else to abort: " QUERY >&2
if [ "$QUERY" == "y" ] ; then
sudo $@
return $?
elif [ "$QUERY" == "s" ] ; then
return 0
fi
err $FUNCNAME "sudo execution cancelled: $QUERY"
return 1
fi
sudo $@
return $?
}
vct_sudo_sh() {
if [ "${VCT_DRY_RUN:-}" ]; then
vct_do sudo sh -c "$@"
return $?
fi
local QUERY=
if [ "$VCT_SUDO_ASK" != "NO" ]; then
echo "" >&2
echo "$0 wants to execute (VCT_SUDO_ASK=$VCT_SUDO_ASK set to ask):" >&2
echo ">>>> sudo sh -c $@ <<<<" >&2
read -p "Pleas type: y) to execute and continue, s) to skip and continue, or anything else to abort: " QUERY >&2
if [ "$QUERY" == "y" ] ; then
sudo sh -c "$@"
return $?
elif [ "$QUERY" == "s" ] ; then
return 0
fi
err $FUNCNAME "sudo execution cancelled: $QUERY"
return 1
fi
sudo sh -c "$@"
return $?
}
vct_do_ping() {
if echo $1 | grep -e ":" >/dev/null; then
PING="ping6 -c 1 -w 1 -W 1"
else
PING="ping -c 1 -w 1 -W 1"
fi
$PING $1
return $?
}
##########################################################################
#######
##########################################################################
vct_system_config_check() {
variable_check VCT_SUDO_ASK quiet
variable_check VCT_VIRT_DIR quiet
variable_check VCT_SYS_DIR quiet
variable_check VCT_DL_DIR quiet
variable_check VCT_RPC_DIR quiet
variable_check VCT_MNT_DIR quiet
variable_check VCT_UCI_DIR quiet
variable_check VCT_DEB_PACKAGES quiet
variable_check VCT_RPM_PACKAGES quiet
variable_check VCT_USER quiet
variable_check VCT_BRIDGE_PREFIXES quiet
variable_check VCT_TOOL_TESTS quiet
variable_check VCT_INTERFACE_MODEL quiet
variable_check VCT_INTERFACE_MAC24 quiet
variable_check VCT_SSH_OPTIONS quiet
variable_check VCT_TINC_PID quiet
variable_check VCT_TINC_LOG quiet
variable_check VCT_TINC_START quiet
# Typical cases:
# VCT_NODE_TEMPLATE_URL="http://media.confine-project.eu/vct/openwrt-x86-generic-combined-ext4.img.tgz"
# VCT_NODE_TEMPLATE_URL="ssh:22:user@example.org:///confine/confine-dist/openwrt/bin/x86/openwrt-x86-generic-combined-ext4.img.gz"
# VCT_NODE_TEMPLATE_URL="file:///../../openwrt/bin/x86/openwrt-x86-generic-combined-ext4.img.gz"
variable_check VCT_NODE_TEMPLATE_URL quiet
VCT_NODE_TEMPLATE_COMP=$( ( echo $VCT_NODE_TEMPLATE_URL | grep -e "\.tgz$" >/dev/null && echo "tgz" ) ||\
( echo $VCT_NODE_TEMPLATE_URL | grep -e "\.tar\.gz$" >/dev/null && echo "tar.gz" ) ||\
( echo $VCT_NODE_TEMPLATE_URL | grep -e "\.gz$" >/dev/null && echo "gz" ) )
variable_check VCT_NODE_TEMPLATE_COMP quiet
VCT_NODE_TEMPLATE_TYPE=$(echo $VCT_NODE_TEMPLATE_URL | awk -F"$VCT_NODE_TEMPLATE_COMP" '{print $1}' | awk -F'.' '{print $(NF-1)}')
variable_check VCT_NODE_TEMPLATE_TYPE quiet
VCT_NODE_TEMPLATE_NAME=$(echo $VCT_NODE_TEMPLATE_URL | awk -F'/' '{print $(NF)}' | awk -F".${VCT_NODE_TEMPLATE_TYPE}.${VCT_NODE_TEMPLATE_COMP}" '{print $1}')
variable_check VCT_NODE_TEMPLATE_NAME quiet
VCT_NODE_TEMPLATE_SITE=$(echo $VCT_NODE_TEMPLATE_URL | awk -F"${VCT_NODE_TEMPLATE_NAME}.${VCT_NODE_TEMPLATE_TYPE}.${VCT_NODE_TEMPLATE_COMP}" '{print $1}')
variable_check VCT_NODE_TEMPLATE_SITE quiet
( [ $VCT_NODE_TEMPLATE_TYPE = "vmdk" ] || [ $VCT_NODE_TEMPLATE_TYPE = "raw" ] || [ $VCT_NODE_TEMPLATE_TYPE = "img" ] ) ||\
err $FUNCNAME "Non-supported fs template type $VCT_NODE_TEMPLATE_TYPE"
[ "$VCT_NODE_TEMPLATE_URL" = "${VCT_NODE_TEMPLATE_SITE}${VCT_NODE_TEMPLATE_NAME}.${VCT_NODE_TEMPLATE_TYPE}.${VCT_NODE_TEMPLATE_COMP}" ] ||\
err $FUNCNAME "Invalid $VCT_NODE_TEMPLATE_URL != ${VCT_NODE_TEMPLATE_SITE}${VCT_NODE_TEMPLATE_NAME}.${VCT_NODE_TEMPLATE_TYPE}.${VCT_NODE_TEMPLATE_COMP}"
variable_check VCT_SLICE_OWRT_TEMPLATE_URL quiet
VCT_SLICE_OWRT_TEMPLATE_COMP=$((echo $VCT_SLICE_OWRT_TEMPLATE_URL | grep -e "\.tgz$" >/dev/null && echo "tgz" ) ||\
(echo $VCT_SLICE_OWRT_TEMPLATE_URL | grep -e "\.tar\.gz$" >/dev/null && echo "tar.gz" ))
VCT_SLICE_OWRT_TEMPLATE_NAME=$(echo $VCT_SLICE_OWRT_TEMPLATE_URL | awk -F'/' '{print $(NF)}' | awk -F".${VCT_SLICE_OWRT_TEMPLATE_COMP}" '{print $1}')
VCT_SLICE_OWRT_TEMPLATE_SITE=$(echo $VCT_SLICE_OWRT_TEMPLATE_URL | awk -F"${VCT_SLICE_OWRT_TEMPLATE_NAME}.${VCT_SLICE_OWRT_TEMPLATE_COMP}" '{print $1}')
variable_check VCT_SLICE_OWRT_DATA_URL quiet
VCT_SLICE_OWRT_DATA_COMP=$(echo $VCT_SLICE_OWRT_DATA_URL | grep -e "\.tgz$" >/dev/null && echo "tgz" )
VCT_SLICE_OWRT_DATA_NAME=$(echo $VCT_SLICE_OWRT_DATA_URL | awk -F'/' '{print $(NF)}' | awk -F".${VCT_SLICE_OWRT_DATA_COMP}" '{print $1}')
VCT_SLICE_OWRT_DATA_SITE=$(echo $VCT_SLICE_OWRT_DATA_URL | awk -F"${VCT_SLICE_OWRT_DATA_NAME}.${VCT_SLICE_OWRT_DATA_COMP}" '{print $1}')
variable_check VCT_SLICE_DEBIAN_TEMPLATE_URL quiet
VCT_SLICE_DEBIAN_TEMPLATE_COMP=$((echo $VCT_SLICE_DEBIAN_TEMPLATE_URL | grep -e "\.tgz$" >/dev/null && echo "tgz" ) ||\
(echo $VCT_SLICE_DEBIAN_TEMPLATE_URL | grep -e "\.tar\.gz$" >/dev/null && echo "tar.gz" ))
VCT_SLICE_DEBIAN_TEMPLATE_NAME=$(echo $VCT_SLICE_DEBIAN_TEMPLATE_URL | awk -F'/' '{print $(NF)}' | awk -F".${VCT_SLICE_DEBIAN_TEMPLATE_COMP}" '{print $1}')
VCT_SLICE_DEBIAN_TEMPLATE_SITE=$(echo $VCT_SLICE_DEBIAN_TEMPLATE_URL | awk -F"${VCT_SLICE_DEBIAN_TEMPLATE_NAME}.${VCT_SLICE_DEBIAN_TEMPLATE_COMP}" '{print $1}')
variable_check VCT_SLICE_DEBIAN_DATA_URL quiet
VCT_SLICE_DEBIAN_DATA_COMP=$(echo $VCT_SLICE_DEBIAN_DATA_URL | grep -e "\.tgz$" >/dev/null && echo "tgz" )
VCT_SLICE_DEBIAN_DATA_NAME=$(echo $VCT_SLICE_DEBIAN_DATA_URL | awk -F'/' '{print $(NF)}' | awk -F".${VCT_SLICE_DEBIAN_DATA_COMP}" '{print $1}')
VCT_SLICE_DEBIAN_DATA_SITE=$(echo $VCT_SLICE_DEBIAN_DATA_URL | awk -F"${VCT_SLICE_DEBIAN_DATA_NAME}.${VCT_SLICE_DEBIAN_DATA_COMP}" '{print $1}')
}
vct_tinc_setup() {
vct_do rm -rf $VCT_TINC_DIR/$VCT_TINC_NET
vct_do mkdir -p $VCT_TINC_DIR/$VCT_TINC_NET/hosts
vct_do_sh "cat <<EOF > $VCT_TINC_DIR/$VCT_TINC_NET/tinc.conf
BindToAddress = 0.0.0.0
Port = $VCT_SERVER_TINC_PORT
Name = server
StrictSubnets = yes
EOF
"
vct_do_sh "cat <<EOF > $VCT_TINC_DIR/$VCT_TINC_NET/hosts/server
Address = $VCT_SERVER_TINC_IP
Port = $VCT_SERVER_TINC_PORT
Subnet = $VCT_TESTBED_MGMT_IPV6_PREFIX48:0:0:0:0:2/128
EOF
"
#vct_do tincd -c $VCT_TINC_DIR/$VCT_TINC_NET -K
vct_do_sh "cat $VCT_KEYS_DIR/tinc/rsa_key.pub >> $VCT_TINC_DIR/$VCT_TINC_NET/hosts/server"
vct_do ln -s $VCT_KEYS_DIR/tinc/rsa_key.priv $VCT_TINC_DIR/$VCT_TINC_NET/rsa_key.priv
vct_do_sh "cat <<EOF > $VCT_TINC_DIR/$VCT_TINC_NET/tinc-up
#!/bin/sh
ip -6 link set \\\$INTERFACE up mtu 1400
ip -6 addr add $VCT_TESTBED_MGMT_IPV6_PREFIX48:0:0:0:0:2/48 dev \\\$INTERFACE
EOF
"
vct_do_sh "cat <<EOF > $VCT_TINC_DIR/$VCT_TINC_NET/tinc-down
#!/bin/sh
ip -6 addr del $VCT_TESTBED_MGMT_IPV6_PREFIX48:0:0:0:0:2/48 dev \\\$INTERFACE
ip -6 link set \\\$INTERFACE down
EOF
"
vct_do chmod a+rx $VCT_TINC_DIR/$VCT_TINC_NET/tinc-{up,down}
}
vct_tinc_start() {
echo "$FUNCNAME $@" >&2
vct_sudo $VCT_TINC_START
}
vct_tinc_stop() {
echo "$FUNCNAME $@" >&2
local TINC_PID=$( [ -f $VCT_TINC_PID ] && cat $VCT_TINC_PID )
local TINC_CNT=0
local TINC_MAX=20
if [ "$TINC_PID" ] ; then
vct_sudo $VCT_TINC_STOP
# vct_sudo kill $TINC_PID
echo -n "waiting till tinc cleaned up" >&2
while [ $TINC_CNT -le $TINC_MAX ]; do
sleep 1
[ -x /proc/$TINC_PID ] || break
TINC_CNT=$(( TINC_CNT + 1 ))
echo -n "." >&2
done
echo >&2
echo >&2
[ -x /proc/$TINC_PID ] && vct_sudo kill -9 $TINC_PID && \
echo "Killing vct tincd the hard way" >&2
fi
}
type_of_system() {
if [ -f /etc/fedora-release ]; then
echo "fedora"
else
echo "debian"
fi
}
is_rpm() {
local tos=$(type_of_system)
case $tos in
"fedora" | "redhat") true ;;
*) false ;;
esac
}
is_deb() {
local tos=$(type_of_system)
case $tos in
"debian" | "ubuntu") true ;;
*) false ;;
esac
}
check_deb() {
# check debian system, packages, tools, and kernel modules
! apt-get --version > /dev/null && dpkg --version > /dev/null &&\
{ err $FUNCNAME "missing debian system tool dpkg or apt-get" $CMD_SOFT || return 1 ;}
if ! [ $CMD_QUICK ]; then
local PACKAGE=
local UPDATED=
for PACKAGE in $VCT_DEB_PACKAGES; do
if ! dpkg -s $PACKAGE 2>&1 |grep "Status:" |grep -v "not-installed" |grep "ok installed" > /dev/null ; then
if [ $CMD_INSTALL ] ; then
echo "Missing debian package: $PACKAGE! Trying to install all required packets..." >&2
else
err $FUNCNAME "Missing debian packages $PACKAGE !!!" $CMD_SOFT || return 1
fi
if [ -z $UPDATED ] ; then
vct_sudo "apt-get update" && UPDATED=1
fi
vct_sudo "apt-get --no-install-recommends install $PACKAGE" || \
{ err $FUNCNAME "Missing debian packages $PACKAGE !!!" $CMD_SOFT || return 1 ;}
fi
done
local TOOL_POS=
local TOOL_CMD=
for TOOL_POS in $(seq 0 $(( ${#VCT_TOOL_TESTS[@]} - 1)) ); do
TOOL_CMD=${VCT_TOOL_TESTS[$TOOL_POS]}
$TOOL_CMD > /dev/null 2>&1 ||\
{ err $FUNCNAME "Please install linux tool: $TOOL_CMD !! " $CMD_SOFT || return 1 ;}
done
fi
}
check_rpm() {
touch .rpm-installed.cache
for PKG in $VCT_RPM_PACKAGES; do
if [ "x$(grep "$PKG" .rpm-installed.cache)" != "x" ]; then
echo "$PKG ok (cached)"
else
if [ "x$(yum info $PKG 2>&1 | grep 'No matching')" == "x" ]; then
if [ "x$(yum info $PKG 2>/dev/null | grep installed)" == "x" ]; then
vct_sudo "yum install -y $PKG"
else
echo "$PKG ok"
echo $PKG >> .rpm-installed.cache
fi
else
echo "$PKG not available"
fi
fi
done
}
vct_system_install_server() {
local CURRENT_VERSION=$(python -c "from controller import get_version; print get_version();" || echo false)
vct_sudo apt-get update
vct_sudo apt-get install -y --force-yes python-pip
# Since controller v.0.8a32 slices files path has changed (rev ae55e13c)
vct_do mkdir -p $VCT_SERVER_DIR/{static,pki/ca}
vct_do mkdir -p $VCT_DL_DIR/{templates,exp_data,overlay}
# Don't know why /pki gets created as root.. but here a quick fix:
vct_sudo chown -R $VCT_USER $VCT_SERVER_DIR/pki
# Executes pip commands on /tmp because of garbage they generate
cd /tmp
if [[ ! $(pip freeze|grep confine-controller) ]]; then
# First time controller gets installed
vct_sudo pip install confine-controller==$VCT_SERVER_VERSION
vct_sudo controller-admin.sh install_requirements
else
# An older version is present, just go ahead and proceed with normal way
vct_sudo python "$VCT_DIR/server/manage.py" upgradecontroller --pip_only --controller_version $VCT_SERVER_VERSION
fi
# cleanup possible pip shit
# vct_sudo rm -fr {pip-*,build,src}
cd -
# We need to be sure that postgres is up:
vct_sudo service postgresql start
vct_sudo python "$VCT_DIR/server/manage.py" setuppostgres --db_name controller --db_user confine --db_password confine
if [[ $CURRENT_VERSION != false ]]; then
# Per version upgrade specific operations
( cd $VCT_DIR/server && vct_sudo python manage.py postupgradecontroller --no-restart --from $CURRENT_VERSION )
else
vct_sudo python "$VCT_DIR/server/manage.py" syncdb --noinput
vct_sudo python "$VCT_DIR/server/manage.py" migrate --noinput
fi
vct_sudo python "$VCT_DIR/server/manage.py" setupceleryd --username $VCT_USER --processes 2 --greenlets 50
if [ -d /etc/apache/sites-enabled ] && ! [ -d /etc/apache/sites-enabled.orig ]; then
vct_sudo cp -ar /etc/apache/sites-enabled /etc/apache/sites-enabled.orig
vct_sudo rm -f /etc/apache/sites-enabled/*
fi
# Setup tincd
vct_sudo python "$VCT_DIR/server/manage.py" setuptincd --noinput --address="${VCT_SERVER_TINC_IP}"
python "$VCT_DIR/server/manage.py" updatetincd
# Setup https certificate for the management network
vct_do python "$VCT_DIR/server/manage.py" setuppki --org_name VCT --noinput
vct_sudo apt-get install -y apache2 libapache2-mod-wsgi
vct_sudo python "$VCT_DIR/server/manage.py" setupapache --noinput --user $VCT_USER --processes 2 --threads 25
# Move static files in a place where apache can get them
python "$VCT_DIR/server/manage.py" collectstatic --noinput
# Setup and configure firmware generation
vct_sudo python "$VCT_DIR/server/manage.py" setupfirmware
vct_do python "$VCT_DIR/server/manage.py" loaddata firmwareconfig
vct_do python "$VCT_DIR/server/manage.py" loaddata "$VCT_DIR/server/vct/fixtures/firmwareconfig.json"
vct_do python "$VCT_DIR/server/manage.py" syncfirmwareplugins
# Apply changes
vct_sudo python "$VCT_DIR/server/manage.py" startservices --no-tinc --no-celeryd --no-celerybeat --no-apache2
vct_sudo python "$VCT_DIR/server/manage.py" restartservices
vct_sudo $VCT_TINC_START
# Create a vct user, default VCT group and provide initial auth token to vct user
# WARNING the following code is sensitive to indentation !!
# NOTE you need to include an EMPTY line to clean indentation
cat <<- EOF | python "$VCT_DIR/server/manage.py" shell
from users.models import *
users = {}
if not User.objects.filter(username='vct').exists():
print 'Creating vct superuser'
User.objects.create_superuser('vct', 'vct@localhost', 'vct', name='vct')
users['vct'] = User.objects.get(username='vct')
for username in ['admin', 'researcher', 'technician', 'member']:
if not User.objects.filter(username=username).exists():
print 'Creating %s user' % username
User.objects.create_user(username, 'vct+%s@localhost' % username, username, name=username)
users[username] = User.objects.get(username=username)
group, created = Group.objects.get_or_create(name='vct', allow_slices=True, allow_nodes=True)
print '\nCreating roles ...'
Roles.objects.get_or_create(user=users['vct'], group=group, is_group_admin=True)
Roles.objects.get_or_create(user=users['admin'], group=group, is_group_admin=True)
Roles.objects.get_or_create(user=users['researcher'], group=group, is_slice_admin=True)
Roles.objects.get_or_create(user=users['technician'], group=group, is_node_admin=True)
Roles.objects.get_or_create(user=users['member'], group=group)
token_data = open('${VCT_KEYS_DIR}/id_rsa.pub', 'ro').read().strip()
for __, user in users.items():
print '\nAdding auth token to user %s' % user.username
AuthToken.objects.get_or_create(user=user, data=token_data)
# Update VCT server API URIs as plain HTTP as has no configured certificate
from nodes.models import Server
print '\nUpdating ServerAPI to use plain HTTP'
server = Server.objects.first()
if hasattr(server, 'api'): # only version > 0.11 requires this patch
for api in server.api.filter(base_uri__startswith='https'):
api.base_uri = api.base_uri.replace('https', 'http', 1)
api.save()
EOF
# Load further data into the database
vct_do python "$VCT_DIR/server/manage.py" loaddata "$VCT_DIR/server/vct/fixtures/vcttemplates.json"
vct_do python "$VCT_DIR/server/manage.py" loaddata "$VCT_DIR/server/vct/fixtures/vctslices.json"
# Enable local system monitoring via crontab
vct_do python "$VCT_DIR/server/manage.py" setuplocalmonitor
}
vct_system_purge_server() {
vct_sudo python "$VCT_DIR/server/manage.py" stopservices --no-postgresql || true
ps aux | grep ^postgres > /dev/null || vct_sudo /etc/init.d/postgresql start # || true
sudo su postgres -c 'psql -c "DROP DATABASE controller;"' # || true
vct_sudo pip uninstall confine-controller -y
#grep "^confine" /etc/passwd > /dev/null && vct_sudo deluser --force --remove-home confine || true
#grep "^confine" /etc/group > /dev/null && vct_sudo delgroup confine || true
if [ -d $VCT_SERVER_DIR ]; then
vct_do rm -rf $VCT_SERVER_DIR || true
fi
}
vct_system_install_check() {
#echo $FUNCNAME $@ >&2
local OPT_CMD=${1:-}
local CMD_SOFT=$( echo "$OPT_CMD" | grep -e "soft" > /dev/null && echo "soft," )
local CMD_QUICK=$( echo "$OPT_CMD" | grep -e "quick" > /dev/null && echo "quick," )
local CMD_INSTALL=$( echo "$OPT_CMD" | grep -e "install" > /dev/null && echo "install," )
local UPD_NODE=$( echo "$OPT_CMD" | grep -e "node" > /dev/null && echo "y" )
local UPD_SLICE=$( echo "$OPT_CMD" | grep -e "slice" > /dev/null && echo "y" )
local UPD_KEYS=$( echo "$OPT_CMD" | grep -e "keys" > /dev/null && echo "y" )
local UPD_TINC=$( echo "$OPT_CMD" | grep -e "tinc" > /dev/null && echo "y" )
local UPD_SERVER=$( echo "$OPT_CMD" | grep -e "server" > /dev/null && echo "y" )
# check if correct user:
if [ $(whoami) != $VCT_USER ] || [ $(whoami) = root ] ;then
err $FUNCNAME "command must be executed as user=$VCT_USER" $CMD_SOFT || return 1
fi
if ! [ -d $VCT_VIRT_DIR ]; then
( [ $CMD_INSTALL ] && vct_sudo mkdir -p $VCT_VIRT_DIR ) && vct_sudo chown $VCT_USER: $VCT_VIRT_DIR ||\
{ err $FUNCNAME "$VCT_VIRT_DIR not existing" $CMD_SOFT || return 1 ;}
fi
for dir in "$VCT_SYS_DIR" "$VCT_DL_DIR" "$VCT_RPC_DIR" "$VCT_MNT_DIR" "$VCT_UCI_DIR"; do
if ! [ -d $dir ]; then
( [ $CMD_INSTALL ] && vct_do mkdir -p $dir) ||\
{ err $FUNCNAME "$dir not existing" $CMD_SOFT || return 1 ;}
fi
done
if is_rpm; then
check_rpm
else
check_deb
fi
# check uci binary
local UCI_URL="http://media.confine-project.eu/vct/uci.tgz"
local UCI_INSTALL_DIR="/usr/local/bin"
local UCI_INSTALL_PATH="/usr/local/bin/uci"
if ! uci help 2>/dev/null && [ "$CMD_INSTALL" -a ! -f "$UCI_INSTALL_PATH" ] ; then
[ -f $VCT_DL_DIR/uci.tgz ] && vct_sudo "rm -f $VCT_DL_DIR/uci.tgz"
[ -f $UCI_INSTALL_PATH ] && vct_sudo "rm -f $UCI_INSTALL_PATH"
if ! vct_do wget -O $VCT_DL_DIR/uci.tgz $UCI_URL || \
! vct_sudo "tar xzf $VCT_DL_DIR/uci.tgz -C $UCI_INSTALL_DIR" || \
! vct_true $UCI_INSTALL_PATH help 2>/dev/null ; then
err $FUNCNAME "Failed installing statically linked uci binary to $UCI_INSTALL_PATH "
fi
fi
if ! vct_true uci help 2>/dev/null; then
cat <<EOF >&2
uci (unified configuration interface) tool is required for
this command (see: wiki.openwrt.org/doc/uci ).
Unfortunately, there is no debian package available for uci.
Please install uci manually using sources from here:
http://downloads.openwrt.org/sources/uci-0.7.5.tar.gz
Alternatively you can run
$0 install
to download and install a statically linked uci binary.
EOF
err $FUNCNAME "uci binary not available" $CMD_SOFT
fi
if is_deb; then
# check if user is in libvirt groups:
local VCT_VIRT_GROUP=$( cat /etc/group | grep libvirt | awk -F':' '{print $1}' )
if [ "$VCT_VIRT_GROUP" ]; then
groups | grep "$VCT_VIRT_GROUP" > /dev/null || { \
err $FUNCNAME "user=$VCT_USER MUST be in groups: $VCT_VIRT_GROUP \n do: sudo adduser $VCT_USER $VCT_VIRT_GROUP and ReLogin!" $CMD_SOFT || return 1 ;}
else
err $FUNCNAME "Failed detecting libvirt group" $CMD_SOFT || return 1
fi
fi
# check ssh and tinc keys:
if ! [ -d $VCT_KEYS_DIR ] && [ $CMD_INSTALL ] ; then
echo "Copying $VCT_DIR/vct-default-keys to $VCT_KEYS_DIR. " >&2
echo "Keys are INSECURE unless vct_system_install is called with override_keys directive !! " >&2
vct_do cp -rv "$VCT_DIR/vct-default-keys" $VCT_KEYS_DIR
vct_do chmod -R og-rwx $VCT_KEYS_DIR/*
fi
if [ -d $VCT_KEYS_DIR ] && [ $CMD_INSTALL ] && [ $UPD_KEYS ] ; then
echo "Backing up existing keys to $VCT_KEYS_DIR.old (just in case) " >&2
[ -d $VCT_KEYS_DIR.old.old ] && vct_do rm -rf $VCT_KEYS_DIR.old.old
[ -d $VCT_KEYS_DIR.old ] && vct_do mv $VCT_KEYS_DIR.old $VCT_KEYS_DIR.old.old
[ -d $VCT_KEYS_DIR ] && vct_do mv $VCT_KEYS_DIR $VCT_KEYS_DIR.old
fi
if ! [ -d $VCT_KEYS_DIR ] && [ $CMD_INSTALL ] ; then
vct_do mkdir -p $VCT_KEYS_DIR
vct_do rm -rf $VCT_KEYS_DIR/*
vct_do mkdir -p $VCT_KEYS_DIR/tinc
vct_do touch $VCT_KEYS_DIR/tinc/tinc.conf
echo "Creating new tinc keys..." >&2
vct_do_sh "tincd -c $VCT_KEYS_DIR/tinc -K <<EOF
$VCT_KEYS_DIR/tinc/rsa_key.priv
$VCT_KEYS_DIR/tinc/rsa_key.pub
EOF
"
echo "Creating new ssh keys..." >&2
vct_do ssh-keygen -f $VCT_KEYS_DIR/id_rsa
local QUERY=
echo "Copy new public key: $VCT_KEYS_DIR/id_rsa.pub -> $VCT_DIR/../../files/etc/dropbear/authorized_keys" >&2
read -p "(then please recompile your node images afterwards)? [Y|n]: " QUERY >&2
[ "$QUERY" = "y" ] || [ "$QUERY" = "" ] && vct_do mkdir -p "$VCT_DIR/../../files/etc/dropbear/" && \
vct_do cp -v $VCT_KEYS_DIR/id_rsa.pub "$VCT_DIR/../../files/etc/dropbear/authorized_keys"
fi
[ -f $VCT_KEYS_DIR/tinc/rsa_key.priv ] && [ -f $VCT_KEYS_DIR/tinc/rsa_key.pub ] || \
{ err $FUNCNAME "$VCT_KEYS_DIR/tinc/rsa_key.* not existing" $CMD_SOFT || return 1 ;}
[ -f $VCT_KEYS_DIR/id_rsa ] && [ -f $VCT_KEYS_DIR/id_rsa.pub ] || \
{ err $FUNCNAME "$VCT_KEYS_DIR/id_rsa not existing" $CMD_SOFT || return 1 ;}
# check tinc configuration:
[ -d $VCT_TINC_DIR ] && [ $CMD_INSTALL ] && [ $UPD_TINC ] && vct_do rm -rf $VCT_TINC_DIR/$VCT_TINC_NET
if ! [ -d $VCT_TINC_DIR/$VCT_TINC_NET ] && [ $CMD_INSTALL ] ; then
vct_tinc_setup
fi
[ -f /etc/tinc/nets.boot ] || vct_sudo touch /etc/tinc/nets.boot
[ -f $VCT_TINC_DIR/nets.boot ] || vct_sudo touch $VCT_TINC_DIR/nets.boot
[ -f $VCT_TINC_DIR/$VCT_TINC_NET/hosts/server ] || \
{ err $FUNCNAME "$VCT_TINC_DIR/$VCT_TINC_NET/hosts/server not existing" $CMD_SOFT || return 1 ;}
if [ "$CMD_INSTALL" ] && [ "$UPD_NODE" ]; then
echo "" >&2
read -p "Purge existing nodes and slivers (Please type 'y' or anything else to skip): " QUERY >&2
if [ "$QUERY" == "y" ] ; then
vct_do vct_node_remove all
fi
fi
# check for update and downloadable node-system-template file:
[ "$UPD_NODE" ] && vct_do rm -f $VCT_DL_DIR/${VCT_NODE_TEMPLATE_NAME}.${VCT_NODE_TEMPLATE_TYPE}.${VCT_NODE_TEMPLATE_COMP}
if ! vct_do install_url $VCT_NODE_TEMPLATE_URL $VCT_NODE_TEMPLATE_SITE $VCT_NODE_TEMPLATE_NAME.$VCT_NODE_TEMPLATE_TYPE $VCT_NODE_TEMPLATE_COMP $VCT_DL_DIR 0 "${CMD_SOFT}${CMD_INSTALL}" ; then
err $FUNCNAME "Installing ULR=$VCT_NODE_TEMPLATE_URL failed" $CMD_SOFT || return 1
else
ln -fs $VCT_DL_DIR/$VCT_NODE_TEMPLATE_NAME.$VCT_NODE_TEMPLATE_TYPE.$VCT_NODE_TEMPLATE_COMP $VCT_DL_DIR/confine-node-template.img.gz
fi
# check for update and downloadable slice-openwrt-template file:
[ "$UPD_SLICE" ] && vct_do rm -f $VCT_DL_DIR/${VCT_SLICE_OWRT_TEMPLATE_NAME}.${VCT_SLICE_OWRT_TEMPLATE_COMP}
if ! vct_do install_url $VCT_SLICE_OWRT_TEMPLATE_URL $VCT_SLICE_OWRT_TEMPLATE_SITE $VCT_SLICE_OWRT_TEMPLATE_NAME $VCT_SLICE_OWRT_TEMPLATE_COMP $VCT_DL_DIR 0 "${CMD_SOFT}${CMD_INSTALL}" ; then
err $FUNCNAME "Installing ULR=$VCT_SLICE_OWRT_TEMPLATE_URL failed" $CMD_SOFT || return 1
else
ln -fs $VCT_DL_DIR/$VCT_SLICE_OWRT_TEMPLATE_NAME.$VCT_SLICE_OWRT_TEMPLATE_COMP $VCT_DL_DIR/confine-slice-openwrt-template.tgz
fi
[ "$UPD_SLICE" ] && vct_do rm -f $VCT_DL_DIR/${VCT_SLICE_OWRT_DATA_NAME}.${VCT_SLICE_OWRT_DATA_COMP}
if ! vct_do install_url $VCT_SLICE_OWRT_DATA_URL $VCT_SLICE_OWRT_DATA_SITE $VCT_SLICE_OWRT_DATA_NAME $VCT_SLICE_OWRT_DATA_COMP $VCT_DL_DIR 0 "${CMD_SOFT}${CMD_INSTALL}" ; then
err $FUNCNAME "Installing ULR=$VCT_SLICE_OWRT_DATA_URL failed" $CMD_SOFT || return 1
else
ln -fs $VCT_DL_DIR/$VCT_SLICE_OWRT_DATA_NAME.$VCT_SLICE_OWRT_DATA_COMP $VCT_DL_DIR/confine-slice-openwrt-exp-data.tgz
fi
# check for update and downloadable slice-debian-template file:
[ "$UPD_SLICE" ] && vct_do rm -f $VCT_DL_DIR/${VCT_SLICE_DEBIAN_TEMPLATE_NAME}.${VCT_SLICE_DEBIAN_TEMPLATE_COMP}
if ! vct_do install_url $VCT_SLICE_DEBIAN_TEMPLATE_URL $VCT_SLICE_DEBIAN_TEMPLATE_SITE $VCT_SLICE_DEBIAN_TEMPLATE_NAME $VCT_SLICE_DEBIAN_TEMPLATE_COMP $VCT_DL_DIR 0 "${CMD_SOFT}${CMD_INSTALL}" ; then
err $FUNCNAME "Installing ULR=$VCT_SLICE_DEBIAN_TEMPLATE_URL failed" $CMD_SOFT || return 1
else
ln -fs $VCT_DL_DIR/$VCT_SLICE_DEBIAN_TEMPLATE_NAME.$VCT_SLICE_DEBIAN_TEMPLATE_COMP $VCT_DL_DIR/confine-slice-debian-template.tgz
fi
[ "$UPD_SLICE" ] && vct_do rm -f $VCT_DL_DIR/${VCT_SLICE_DEBIAN_DATA_NAME}.${VCT_SLICE_DEBIAN_DATA_COMP}
if ! vct_do install_url $VCT_SLICE_DEBIAN_DATA_URL $VCT_SLICE_DEBIAN_DATA_SITE $VCT_SLICE_DEBIAN_DATA_NAME $VCT_SLICE_DEBIAN_DATA_COMP $VCT_DL_DIR 0 "${CMD_SOFT}${CMD_INSTALL}" ; then
err $FUNCNAME "Installing ULR=$VCT_SLICE_DEBIAN_DATA_URL failed" $CMD_SOFT || return 1
else
ln -fs $VCT_DL_DIR/$VCT_SLICE_DEBIAN_DATA_NAME.$VCT_SLICE_DEBIAN_DATA_COMP $VCT_DL_DIR/confine-slice-debian-exp-data.tgz
fi
if [ $CMD_INSTALL ] && [ $UPD_SERVER ] ; then
echo "" >&2
read -p "Purge server installation (type 'y' or anything else to skip): " QUERY >&2
if [ "$QUERY" == "y" ] ; then
vct_system_purge_server
fi
fi
if [ $CMD_INSTALL ] && ( [ $UPD_SERVER ] || ! [ -d $VCT_SERVER_DIR ] ); then
vct_system_install_server
fi
if ! [ -d $VCT_SERVER_DIR ]; then
err $FUNCNAME "Missing controller installation at $VCT_SERVER_DIR"
fi
}
vct_system_install() {
vct_system_install_check "install,$@"
}
vct_system_init_check(){
local OPT_CMD=${1:-}
local CMD_SOFT=$( echo "$OPT_CMD" | grep -e "soft" > /dev/null && echo "soft," )
local CMD_QUICK=$( echo "$OPT_CMD" | grep -e "quick" > /dev/null && echo "quick," )
local CMD_INIT=$( echo "$OPT_CMD" | grep -e "init" > /dev/null && echo "init," )
vct_system_install_check $CMD_SOFT$CMD_QUICK
# check if kernel modules are loaded:
local KMOD=
for KMOD in $VCT_KERNEL_MODULES; do
if ! lsmod | grep "$( echo $KMOD | sed s/-/_/ )" > /dev/null ; then
( [ $CMD_INIT ] &&\
vct_sudo "modprobe $KMOD " ) ||\
{ err $FUNCNAME "Failed loading module $KMOD" $CMD_SOFT || return 1 ;}
fi
done
# check if libvirtd is running:
! virsh --connect qemu:///system list --all > /dev/null &&\
{ err $FUNCNAME "libvirt-bin service not running! " $CMD_SOFT || return 1 ;}
# check if bridges are initialized:
local BRIDGE=
local BR_NAME=
for BRIDGE in $VCT_BRIDGE_PREFIXES; do
if BR_NAME=$( variable_check ${BRIDGE}_NAME soft 2>/dev/null ); then
# check if bridge exist:
if ! brctl show | grep $BR_NAME >/dev/null; then
( [ $CMD_INIT ] &&\
vct_sudo "brctl addbr $BR_NAME && brctl setfd $BR_NAME 0 && brctl sethello $BR_NAME 1 && brctl stp $BR_NAME off" ) ||\
{ err $FUNCNAME "unconfigured bridge $BR_NAME" $CMD_SOFT || return 1 ;}
fi
local BR_DUMMY_DEV=$( variable_check ${BRIDGE}_DUMMY_DEV soft 2>/dev/null )
if [ $BR_DUMMY_DEV ] ; then
if ! ip link show dev $BR_DUMMY_DEV >/dev/null 2>&1 ; then
vct_sudo ip link add $BR_DUMMY_DEV type dummy || \
{ err $FUNCNAME "Failed adding $BR_DUMMY_DEV" $CMD_SOFT || return 1 ;}
fi
if ! brctl show | grep $BR_NAME | grep $BR_DUMMY_DEV >/dev/null; then
( [ $CMD_INIT ] && \
vct_sudo "brctl addif $BR_NAME $BR_DUMMY_DEV" ) || \
{ err $FUNCNAME "bridge $BR_NAME: $BR_DUMMY_DEV NOT first dev " $CMD_SOFT || return 1 ;}
fi
fi
# check if local bridge has rescue IPv4 address for local network:
local BR_V4_RESCUE_IP=$( variable_check ${BRIDGE}_V4_RESCUE_IP soft 2>/dev/null )
if [ $BR_V4_RESCUE_IP ] ; then
if ! ip addr show dev $BR_NAME | grep -e "inet " |grep -e " $BR_V4_RESCUE_IP " |grep -e " $BR_NAME" >/dev/null; then
( [ $CMD_INIT ] && vct_sudo ip addr add $BR_V4_RESCUE_IP dev $BR_NAME label $BR_NAME:resc) ||\
{ err $FUNCNAME "unconfigured ipv4 rescue net: $BR_NAME $BR_V4_RESCUE_IP " $CMD_SOFT || return 1 ;}
fi
fi
# check if local bridge has IPv4 address for local network:
local BR_V4_LOCAL_IP=$( variable_check ${BRIDGE}_V4_LOCAL_IP soft 2>/dev/null )
if [ $BR_V4_LOCAL_IP ] ; then
if ! ip addr show dev $BR_NAME | grep -e "inet " |grep -e " $BR_V4_LOCAL_IP " |grep -e " $BR_NAME" >/dev/null; then
( [ $CMD_INIT ] && vct_sudo ip addr add $BR_V4_LOCAL_IP dev $BR_NAME ) ||\
{ err $FUNCNAME "unconfigured ipv4 rescue net: $BR_NAME $BR_V4_LOCAL_IP " $CMD_SOFT || return 1 ;}
fi
# check if bridge needs routed NAT:
local BR_V4_NAT_OUT=$( variable_check ${BRIDGE}_V4_NAT_OUT_DEV soft 2>/dev/null )
local BR_V4_NAT_SRC=$( variable_check ${BRIDGE}_V4_NAT_OUT_SRC soft 2>/dev/null )
if [ "$BR_V4_NAT_OUT" = "auto" ] ; then
BR_V4_NAT_OUT=$( ip -4 r |grep -e "^default" |awk -F'dev ' '{print $2}' |awk '{print $1}' ) && \
ip link show dev $BR_V4_NAT_OUT >/dev/null || \
err $FUNCNAME "default route dev can not be resolved"
fi
if [ $BR_V4_NAT_SRC ] && [ $BR_V4_NAT_OUT ] && [ -z $CMD_QUICK ]; then
if ! vct_sudo iptables -t nat -L POSTROUTING -nv | \
grep -e "MASQUERADE" |grep -e "$BR_V4_NAT_OUT" |grep -e "$BR_V4_NAT_SRC" >/dev/null; then
( [ $CMD_INIT ] && vct_sudo iptables -t nat -I POSTROUTING -o $BR_V4_NAT_OUT -s $BR_V4_NAT_SRC -j MASQUERADE ) ||\
{ err $FUNCNAME "invalid NAT from $BR_NAME" $CMD_SOFT || return 1 ;}
fi
if ! [ $(cat /proc/sys/net/ipv4/ip_forward) = "1" ]; then
[ $CMD_INIT ] && vct_sudo sysctl -w net.ipv4.ip_forward=1 > /dev/null
fi
fi
# check if bridge needs udhcpd:
local DHCPD_IP_MIN=$( variable_check ${BRIDGE}_V4_DHCPD_IP_MIN soft 2>/dev/null )
local DHCPD_IP_MAX=$( variable_check ${BRIDGE}_V4_DHCPD_IP_MAX soft 2>/dev/null )
local DHCPD_DNS=$( variable_check ${BRIDGE}_V4_DHCPD_DNS soft 2>/dev/null )
local DHCPD_MASK=$( variable_check ${BRIDGE}_V4_DHCPD_MASK soft 2>/dev/null )
local UDHCPD_CONF_FILE=$VCT_VIRT_DIR/udhcpd-$BR_NAME.conf
local UDHCPD_LEASE_FILE=$VCT_VIRT_DIR/udhcpd-$BR_NAME.leases
local UDHCPD_COMMAND
if is_rpm; then
UDHCPD_COMMAND="busybox udhcpd $UDHCPD_CONF_FILE"
else
UDHCPD_COMMAND="udhcpd $UDHCPD_CONF_FILE"
fi
echo $UDHCPD_COMMAND;
local UDHCPD_PID=$( ps aux | grep "$UDHCPD_COMMAND" | grep -v grep | awk '{print $2}' )
[ $CMD_INIT ] && [ ${UDHCPD_PID:-} ] && echo "kill udhcpd" >&2 && vct_sudo kill $UDHCPD_PID && sleep 1
if [ $DHCPD_IP_MIN ] && [ $DHCPD_IP_MAX ] && [ $DHCPD_DNS ] && [ $DHCPD_MASK ]; then
if [ $CMD_INIT ] ; then
vct_do_sh "cat <<EOF > $UDHCPD_CONF_FILE
start $DHCPD_IP_MIN
end $DHCPD_IP_MAX
interface $BR_NAME
lease_file $UDHCPD_LEASE_FILE
option router $( echo $BR_V4_LOCAL_IP | awk -F'/' '{print $1}' )
option dns $DHCPD_DNS
option subnet $DHCPD_MASK
EOF
"
vct_sudo $UDHCPD_COMMAND
fi
vct_true [ "$(ps aux | grep "$UDHCPD_COMMAND" | grep -v grep )" ] || \
err $FUNCNAME "NO udhcpd server running for $BR_NAME "
fi
fi
# check if local bridge has IPv6 for recovery network:
local BR_V6_RESCUE2_PREFIX64=$( variable_check ${BRIDGE}_V6_RESCUE2_PREFIX64 soft 2>/dev/null )
if [ $BR_V6_RESCUE2_PREFIX64 ] ; then
# local BR_V6_RESCUE2_IP=$BR_V6_RESCUE2_PREFIX64:$( vct_true eui64_from_link $BR_NAME )/64
local BR_V6_RESCUE2_IP=$BR_V6_RESCUE2_PREFIX64::2/64
if vct_true false || ! ip addr show dev $BR_NAME | grep -e "inet6 " | \
grep -ie " $( ipv6calc -I ipv6 $BR_V6_RESCUE2_IP -O ipv6 ) " >/dev/null; then
( [ $CMD_INIT ] && vct_sudo ip addr add $BR_V6_RESCUE2_IP dev $BR_NAME ) ||\
{ err $FUNCNAME "unconfigured ipv6 rescue net: $BR_NAME $BR_V6_RESCUE2_IP" $CMD_SOFT || return 1 ;}
fi
fi
# disabled, currently not needed...
# #check if local bridge has IPv6 for debug network:
# local BR_V6_DEBUG_IP=$( variable_check ${BRIDGE}_V6_DEBUG_IP soft 2>/dev/null )
# if [ $BR_V6_DEBUG_IP ] ; then
# if ! ip addr show dev $BR_NAME | grep -e "inet6 " | \
# grep -ie " $( ipv6calc -I ipv6 $BR_V6_DEBUG_IP -O ipv6 ) " >/dev/null; then
# ( [ $CMD_INIT ] && vct_sudo ip addr add $BR_V6_DEBUG_IP dev $BR_NAME ) ||\
# { err $FUNCNAME "unconfigured ipv6 debut net: $BR_NAME $BR_V6_DEBUG_IP" $CMD_SOFT || return 1 ;}
# fi
# fi
# check if bridge is UP:
if ! ip link show dev $BR_NAME | grep ",UP" >/dev/null; then
( [ $CMD_INIT ] && vct_sudo ip link set dev $BR_NAME up ) ||\
{ err $FUNCNAME "disabled link $BR_NAME" $CMD_SOFT || return 1 ;}
fi
fi
done
# check if controller system and management network is running:
[ $CMD_INIT ] && vct_tinc_stop
[ $CMD_INIT ] && vct_sudo service postgresql start
[ $CMD_INIT ] && vct_sudo python "$VCT_DIR/server/manage.py" startservices
[ $CMD_INIT ] && vct_sudo $VCT_TINC_START
}
vct_system_init() {
vct_system_init_check init
}
vct_system_cleanup() {
local FLUSH_ARG="${1:-}"
case $FLUSH_ARG in
"") vct_do vct_node_stop all ;;
"flush")
vct_do vct_node_remove all # also stops them
;;
*) err $FUNCNAME "Invalid argument: $FLUSH_ARG" ;;
esac
local BRIDGE=
local BR_NAME=
for BRIDGE in $VCT_BRIDGE_PREFIXES; do
if BR_NAME=$( variable_check ${BRIDGE}_NAME soft 2>/dev/null ); then
# check if local bridge has IPv4 address for local network:
local BR_V4_LOCAL_IP=$( variable_check ${BRIDGE}_V4_LOCAL_IP soft 2>/dev/null )
if [ "$BR_V4_LOCAL_IP" ] ; then
# check if bridge had routed NAT:
local BR_V4_NAT_OUT=$( variable_check ${BRIDGE}_V4_NAT_OUT_DEV soft 2>/dev/null )
local BR_V4_NAT_SRC=$( variable_check ${BRIDGE}_V4_NAT_OUT_SRC soft 2>/dev/null )
if [ "$BR_V4_NAT_OUT" = "auto" ] ; then
BR_V4_NAT_OUT=$( ip -4 r |grep -e "^default" |awk -F'dev ' '{print $2}' |awk '{print $1}' ) && \
ip link show dev $BR_V4_NAT_OUT >/dev/null || \
err $FUNCNAME "default route dev can not be resolved"
fi
if [ $BR_V4_NAT_SRC ] && [ $BR_V4_NAT_OUT ]; then
if vct_sudo iptables -t nat -L POSTROUTING -nv | \
grep -e "MASQUERADE" |grep -e "$BR_V4_NAT_OUT" |grep -e "$BR_V4_NAT_SRC" >/dev/null; then
vct_sudo iptables -t nat -D POSTROUTING -o $BR_V4_NAT_OUT -s $BR_V4_NAT_SRC -j MASQUERADE
fi
fi
# check if bridge had udhcpd:
local UDHCPD_CONF_FILE=$VCT_VIRT_DIR/udhcpd-$BR_NAME.conf
local UDHCPD_LEASE_FILE=$VCT_VIRT_DIR/udhcpd-$BR_NAME.leases
local UDHCPD_COMMAND="udhcpd $UDHCPD_CONF_FILE"
local UDHCPD_PID=$( ps aux | grep -e "$UDHCPD_COMMAND" | grep -v "grep" | awk '{print $2}' )
[ ${UDHCPD_PID:-} ] && echo "kill udhcpd" >&2 && vct_sudo kill $UDHCPD_PID
fi
# check if bridge is UP:
if ip link show dev $BR_NAME | grep -e ',UP' >/dev/null; then
vct_sudo ip link set dev $BR_NAME down
fi
# check if bridge exist:
if brctl show | grep -e "$BR_NAME" >/dev/null; then
vct_sudo brctl delbr $BR_NAME
fi
# check if bridge had a dummy device:
local BR_DUMMY_DEV=$( variable_check ${BRIDGE}_DUMMY_DEV soft 2>/dev/null )
if [ $BR_DUMMY_DEV ] ; then
if ip link show dev $BR_DUMMY_DEV >/dev/null 2>&1 ; then
vct_sudo ip link del $BR_DUMMY_DEV || \
{ err $FUNCNAME "Failed deleting $BR_DUMMY_DEV" $CMD_SOFT || return 1 ;}
fi
fi
fi
done
vct_tinc_stop
if [ $VCT_SERVER_DIR ]; then
vct_sudo python "$VCT_DIR/server/manage.py" stopservices --no-postgresql
fi
}
vct_system_purge() {
vct_system_cleanup flush
vct_system_purge_server
[ "$VCT_VIRT_DIR" != "/" ] && vct_sudo rm -rf $VCT_VIRT_DIR
}
##########################################################################
#######
##########################################################################
vcrd_ids_get() {
local VCRD_ID_RANGE=$1
local VCRD_ID_STATE=${2:-}
local VCRD_ID=
if [ "$VCRD_ID_RANGE" = "all" ] ; then
virsh -c qemu:///system list --all 2>/dev/null | grep -e "$VCT_RD_NAME_PREFIX" | grep -e "$VCRD_ID_STATE$" | \
awk -F" $VCT_RD_NAME_PREFIX" '{print $2}' | awk '{print $1}'
elif echo $VCRD_ID_RANGE | grep -e "-" >/dev/null; then
local VCRD_ID_MIN=$( echo $VCRD_ID_RANGE | awk -F'-' '{print $1}' )
local VCRD_ID_MAX=$( echo $VCRD_ID_RANGE | awk -F'-' '{print $2}' )
check_rd_id $VCRD_ID_MIN >/dev/null || err $FUNCNAME ""
check_rd_id $VCRD_ID_MAX >/dev/null || err $FUNCNAME ""
local DEC
for DEC in $( seq $(( 16#${VCRD_ID_MIN} )) $(( 16#${VCRD_ID_MAX} )) ); do
check_rd_id $( printf "%.4x " $DEC )
done
else
check_rd_id $VCRD_ID_RANGE
fi
}
vct_node_get_ip_from_db() {
local VCRD_ID=$1
local IP=
[ -f $VCT_NODE_MAC_DB ] && {
VCT_NODE_MACIP=$(grep -e "^$VCRD_ID" $VCT_NODE_MAC_DB | awk '{print $2}')
if echo $VCT_NODE_MACIP | grep -e "|" > /dev/null ; then
IP="$( grep -e "^$VCRD_ID" $VCT_NODE_MAC_DB | awk '{print $2}' | cut -d\| -f2 )"
fi
}
echo "$IP"
}
vct_node_get_mac() {
local VCRD_ID=$1
local OPT_CMD=${2:-}
local CMD_QUIET=$( echo "$OPT_CMD" | grep -e "quiet" > /dev/null && echo "quiet," )
local MAC=
[ -f $VCT_NODE_MAC_DB ] && \
MAC="$( grep -e "^$VCRD_ID" $VCT_NODE_MAC_DB | awk '{print $2}' | cut -d\| -f1 )"
# echo "vcrd_id=$VCRD_ID mac=$MAC db=$VCT_NODE_MAC_DB pwd=$(pwd)" >&2
if [ $MAC ] ; then
[ "$CMD_QUIET" ] || echo $FUNCNAME "connecting to real node=$VCRD_ID mac=$MAC" >&2
else
local VCRD_NAME="${VCT_RD_NAME_PREFIX}${VCRD_ID}"
if ! virsh -c qemu:///system dominfo $VCRD_NAME | grep -e "^State:" >/dev/null; then
err $FUNCNAME "$VCRD_NAME not running"
fi
MAC=$( virsh -c qemu:///system dumpxml $VCRD_NAME | \
xmlstarlet sel -T -t -m "/domain/devices/interface" \
-v child::source/attribute::* -o " " -v child::mac/attribute::address -n | \
grep -e "^$VCT_RD_LOCAL_BRIDGE " | awk '{print $2 }' || \
err $FUNCNAME "Failed resolving MAC address for $VCRD_NAME $VCT_RD_LOCAL_BRIDGE" )
[ "$CMD_QUIET" ] || echo $FUNCNAME "connecting to virtual node=$VCRD_ID mac=$MAC" >&2
fi
echo $MAC
}
vct_node_info() {
local VCRD_ID_RANGE=${1:-}
# virsh --connect qemu:///system list --all
local REAL_IDS="$( [ -f $VCT_NODE_MAC_DB ] && cat $VCT_NODE_MAC_DB | awk '{print $1}' | grep -e "^[0-9,a-f][0-9,a-f][0-9,a-f][0-9,a-f]$" )"
local VIRT_IDS="$( virsh -c qemu:///system list --all | grep ${VCT_RD_NAME_PREFIX} | awk '{print $2}' | awk -F'-' '{print $2}' )"
local ALL_IDS="$REAL_IDS $VIRT_IDS"
printf "%-4s %-8s %-39s %-5s %-22s %-5s\n" node state rescue rtt management rtt
echo "-----------------------------------------------------------------------------------------"
local ID=
for ID in $( [ "$VCRD_ID_RANGE" ] && vcrd_ids_get $VCRD_ID_RANGE || echo "$ALL_IDS" ); do
local NAME="${VCT_RD_NAME_PREFIX}${ID}"
local STATE=$( echo "$VIRT_IDS" | grep -e "$ID" > /dev/null && \
( virsh -c qemu:///system dominfo $NAME | grep -e "State:" | grep -e "running" > /dev/null && echo "running" || echo "down" ) || \
echo "EXTERN" )
local MAC=$( vct_node_get_mac $ID quiet )
local IPV6_RESCUE="${VCT_BR00_V6_RESCUE2_PREFIX64}:$( eui64_from_mac $MAC )"
local IP="$(vct_node_get_ip_from_db $ID)"
IP="${IP:-$IPV6_RESCUE}"
local RESCUE_DELAY="$( [ "$STATE" = "down" ] && echo "--" || vct_do_ping $IP | grep avg | awk -F' = ' '{print $2}' | awk -F'/' '{print $1}')"
local MGMT=$VCT_TESTBED_MGMT_IPV6_PREFIX48:$ID::2
local MGMT_DELAY="$( [ "$STATE" = "down" ] && echo "--" || vct_do_ping $MGMT | grep avg | awk -F' = ' '{print $2}' | awk -F'/' '{print $1}')"
printf "%-4s %-8s %-39s %-5s %-22s %-5s\n" $ID $STATE $IP ${RESCUE_DELAY:---} $MGMT ${MGMT_DELAY:---}
done
}
vct_node_stop() {
local VCRD_ID_RANGE=$1
local VCRD_ID=
for VCRD_ID in $( vcrd_ids_get $VCRD_ID_RANGE ); do
local VCRD_NAME="${VCT_RD_NAME_PREFIX}${VCRD_ID}"
if virsh -c qemu:///system dominfo $VCRD_NAME 2>/dev/null | grep -e "^State:" | grep "running" >/dev/null ; then
virsh -c qemu:///system destroy $VCRD_NAME ||\
err $FUNCNAME "Failed stopping domain $VCRD_NAME"
fi
done
}
vct_node_remove() {
local VCRD_ID_RANGE=$1
local VCRD_ID=
for VCRD_ID in $( vcrd_ids_get $VCRD_ID_RANGE ); do
local VCRD_NAME=
for VCRD_NAME in $( virsh -c qemu:///system list --all 2>/dev/null | grep ${VCRD_ID} | awk '{print $2}' ) ; do
echo removing id=$VCRD_ID name=$VCRD_NAME
if [ "$VCRD_NAME" ]; then
vct_node_unmount $VCRD_ID
local VCRD_PATH=$( virsh -c qemu:///system dumpxml $VCRD_NAME | \
xmlstarlet sel -T -t -m "/domain/devices/disk/source" -v attribute::file -n |
grep -e "^${VCT_SYS_DIR}" || \
err $FUNCNAME "Failed resolving disk path for $VCRD_NAME" soft )
if virsh -c qemu:///system dominfo $VCRD_NAME 2>/dev/null | grep -e "^State:" | grep "running" >/dev/null ; then
virsh -c qemu:///system destroy $VCRD_NAME ||\
err $FUNCNAME "Failed stopping domain $VCRD_NAME"
fi
if virsh -c qemu:///system dominfo $VCRD_NAME 2>/dev/null | grep -e "^State:" | grep "off" >/dev/null ; then
virsh -c qemu:///system undefine $VCRD_NAME ||\
err $FUNCNAME "Failed undefining domain $VCRD_NAME"
fi
[ $VCRD_PATH ] && [ -f $VCRD_PATH ] && rm -f $VCRD_PATH
else
err $FUNCNAME "No system with rd-id=$VCRD_ID $VCRD_NAME found"
fi
done
done
}
vct_node_create() {
vct_system_init_check quick
local VCRD_ID_RANGE=$1
local VCRD_ID=
for VCRD_ID in $( vcrd_ids_get $VCRD_ID_RANGE ); do
local VCRD_NAME="${VCT_RD_NAME_PREFIX}${VCRD_ID}"
# local VCRD_PATH="${VCT_SYS_DIR}/${VCT_NODE_TEMPLATE_NAME}-rd${VCRD_ID}.${VCT_NODE_TEMPLATE_TYPE}"
local VCRD_PATH="${VCT_SYS_DIR}/rd${VCRD_ID}.${VCT_NODE_TEMPLATE_TYPE}"
virsh -c qemu:///system dominfo $VCRD_NAME 2>/dev/null && \
err $FUNCNAME "Domain name=$VCRD_NAME already exists"
[ -f $VCRD_PATH ] && \
echo "Removing existing rootfs=$VCRD_PATH" >&2 && rm -f $VCRD_PATH
local VCRD_FW_NAME="$( echo $VCT_SERVER_NODE_IMAGE_NAME | sed s/NODE_ID/$(( 16#${VCRD_ID} ))/ )"
local FW_PATH="${VCT_SYS_DIR}/${VCRD_FW_NAME}"
if ! [ -f $FW_PATH ]; then
err $FUNCNAME "Missing firmware=$FW_PATH for rd-id=$VCRD_ID"
fi
local FW_URL="file://${FW_PATH}"
local FW_COMP=$( ( echo $FW_URL | grep -e "\.tgz$" >/dev/null && echo "tgz" ) ||\
( echo $FW_URL | grep -e "\.tar\.gz$" >/dev/null && echo "tar.gz" ) ||\
( echo $FW_URL | grep -e "\.gz$" >/dev/null && echo "gz" ) )
local FW_TYPE=$(echo $FW_URL | awk -F"$FW_COMP" '{print $1}' | awk -F'.' '{print $(NF-1)}')
local FW_NAME=$(echo $FW_URL | awk -F'/' '{print $(NF)}' | awk -F".${FW_TYPE}.${FW_COMP}" '{print $1}')
local FW_SITE=$(echo $FW_URL | awk -F"${FW_NAME}.${FW_TYPE}.${FW_COMP}" '{print $1}')
( [ $FW_TYPE = "vmdk" ] || [ $FW_TYPE = "raw" ] || [ $FW_TYPE = "img" ] ) ||\
err $FUNCNAME "Non-supported fs template type $FW_TYPE"
[ "$FW_URL" = "${FW_SITE}${FW_NAME}.${FW_TYPE}.${FW_COMP}" ] ||\
err $FUNCNAME "Invalid $FW_URL != ${FW_SITE}${FW_NAME}.${FW_TYPE}.${FW_COMP}"
if ! install_url $FW_URL $FW_SITE $FW_NAME.$FW_TYPE $FW_COMP $VCT_SYS_DIR $VCRD_PATH install ; then
err $FUNCNAME "Installing $VCT_NODE_TEMPLATE_URL to $VCRD_PATH failed"
fi
# Enlarge the node image to the configured size if smaller.
if [ "$VCT_NODE_IMAGE_SIZE_MiB" ]; then
# With other template types we cannot even figure out image size.
if [ "$VCT_NODE_TEMPLATE_TYPE" != "img" ]; then
err $FUNCNAME "Unsupported template type $VCT_NODE_TEMPLATE_TYPE while enlarging $VCRD_PATH"
fi
local IMAGE_SIZE_B
IMAGE_SIZE_B=$(stat -c %s "$VCRD_PATH")
if [ $IMAGE_SIZE_B -lt $((VCT_NODE_IMAGE_SIZE_MiB * 1024 * 1024)) ]; then
dd if=/dev/zero of="$VCRD_PATH" bs=1M count=0 seek=$VCT_NODE_IMAGE_SIZE_MiB 2>&1\
|| err $FUNCNAME "Failed to enlarge $VCRD_PATH"
fi
fi
local VCRD_NETW=""
local BRIDGE=
for BRIDGE in $VCT_BRIDGE_PREFIXES; do
local BR_NAME=
echo $BRIDGE | grep -e "^VCT_BR[0-f][0-f]$" >/dev/null || \
err $FUNCNAME "Invalid VCT_BRIDGE_PREFIXES naming convention: $BRIDGE"
if BR_NAME=$( variable_check ${BRIDGE}_NAME soft 2>/dev/null ); then
local BR_MODEL=$( variable_check ${BRIDGE}_MODEL soft 2>/dev/null || \
echo "${VCT_INTERFACE_MODEL}" )
local BR_MAC48=$( variable_check ${BRIDGE}_MAC48 soft 2>/dev/null || \
echo "${VCT_INTERFACE_MAC24}:$( echo ${BRIDGE:6:7} ):${VCRD_ID:0:2}:${VCRD_ID:2:3}" )
local BR_VNET="vct-rd${VCRD_ID}-br$( echo ${BRIDGE:6:7} )"
VCRD_NETW="${VCRD_NETW} --network bridge=${BR_NAME}"
[ "$BR_MODEL" ] && VCRD_NETW="${VCRD_NETW},model=${BR_MODEL}"
[ "$BR_MAC48" != "RANDOM" ] && VCRD_NETW="${VCRD_NETW},mac=${BR_MAC48}"
fi
# ,target=${BR_VNET}"
# this requires virsh --version 0.9.9
# local VCRD_IFACE="bridge ${BR_NAME} --persistent --target ${BR_VNET}"
# [ "$BR_MODEL" ] && VCRD_IFACE="$VCRD_IFACE --model ${BR_MODEL} "
# [ "$BR_MAC48" != "RANDOM" ] && VCRD_IFACE="$VCRD_IFACE --mac ${BR_MAC48} "
# echo "attach-interface $VCRD_IFACE"
# if ! virsh -c qemu:///system attach-interface $VCRD_NAME $VCRD_IFACE ; then
# vct_node_remove $VCRD_ID
# err $FUNCNAME "Failed attaching-interface $VCRD_IFACE to $VCRD_NAME"
# fi
done
local TEMPLATE_TYPE=$( [ "$VCT_NODE_TEMPLATE_TYPE" = "img" ] && echo "raw" || echo "$VCT_NODE_TEMPLATE_TYPE" )
local VIRT_CMD="\
virt-install --connect qemu:///system -n $VCRD_NAME -r $VCT_RD_MEM --cpu=pentiumpro --os-type linux \
--import --disk path=$VCRD_PATH,format=$TEMPLATE_TYPE \
--vcpus=1 --noautoconsole --virt-type kvm --hvm --accelerate --noacpi --noapic --noreboot \
$VCRD_NETW"
# --nonetworks"
# --graphics none --cpu 486
echo $VIRT_CMD
if ! $VIRT_CMD; then
vct_node_remove $VCRD_ID
err $FUNCNAME "Failed creating domain name=$VCRD_NAME"
fi
done
}
vct_node_start() {
local VCRD_ID_RANGE=$1
local VCRD_ID=
for VCRD_ID in $( vcrd_ids_get $VCRD_ID_RANGE ); do
local VCRD_NAME="${VCT_RD_NAME_PREFIX}${VCRD_ID}"
local VCRD_PATH=$( virsh -c qemu:///system dumpxml $VCRD_NAME | \
xmlstarlet sel -T -t -m "/domain/devices/disk/source" -v attribute::file -n |
grep -e "^${VCT_SYS_DIR}" || \
err $FUNCNAME "Failed resolving disk path for $VCRD_NAME" )
local VCRD_MNTP=$VCT_MNT_DIR/$VCRD_NAME
mount | grep "$VCRD_MNTP" >/dev/null && \
err $FUNCNAME "node-id=$VCRD_ID already mounted offline, use vct_node_unmount"
( [ -f $VCRD_PATH ] &&\
virsh -c qemu:///system dominfo $VCRD_NAME | grep -e "^State:" | grep "off" >/dev/null &&\
virsh -c qemu:///system start $VCRD_NAME ) ||\
err $FUNCNAME "Failed starting domain $VCRD_NAME"
done
}
vct_node_console() {
local VCRD_ID=$1; check_rd_id $VCRD_ID quiet
local VCRD_NAME="${VCT_RD_NAME_PREFIX}${VCRD_ID}"
if virsh -c qemu:///system dominfo $VCRD_NAME | grep -e "^State:" | grep "running" >/dev/null ; then
virsh -c qemu:///system console $VCRD_NAME && return 0
local CONSOLE_PTS=$( virsh -c qemu:///system dumpxml $VCRD_NAME | \
xmlstarlet sel -T -t -m "/domain/devices/console/source" -v attribute::path -n |
grep -e "^/dev/pts/" || \
err $FUNCNAME "Failed resolving pts path for $VCRD_NAME" )
if ! ls -l $CONSOLE_PTS | grep -e "rw....rw." ; then
vct_sudo chmod o+rw $CONSOLE_PTS
virsh -c qemu:///system console $VCRD_NAME && return 0
fi
err $FUNCNAME "Failed connecting console to domain $VCRD_NAME"
fi
}
vct_node_ssh() {
local VCRD_ID_RANGE=$1
local COMMAND=${2:-}
local VCRD_ID=
shift
for VCRD_ID in $( vcrd_ids_get $VCRD_ID_RANGE ); do
local VCRD_NAME="${VCT_RD_NAME_PREFIX}${VCRD_ID}"
if ! ( [ -f $VCT_NODE_MAC_DB ] && grep -e "^$VCRD_ID" $VCT_NODE_MAC_DB >&2 || virsh -c qemu:///system dominfo $VCRD_NAME | grep -e "^State:" | grep "running" >/dev/null ); then
err $FUNCNAME "$VCRD_NAME not running"
fi
local MAC=$( vct_node_get_mac $VCRD_ID )
local IP=$( vct_node_get_ip_from_db $VCRD_ID )
local IPV6_RESCUE=${VCT_BR00_V6_RESCUE2_PREFIX64}:$( eui64_from_mac $MAC )
local COUNT=0
local COUNT_MAX=60
[ -z "$IP" ] && IP=$IPV6_RESCUE
while [ "$COUNT" -le $COUNT_MAX ]; do
vct_do_ping $IP >/dev/null && break
[ "$COUNT" = 0 ] && \
echo -n "Waiting for $VCRD_ID to listen on $IP (first boot may take up to 40 secs)" || \
echo -n "."
COUNT=$(( $COUNT + 1 ))
done
[ "$COUNT" = 0 ] || \
echo
[ "$COUNT" -le $COUNT_MAX ] || \
err $FUNCNAME "Failed connecting to node=$VCRD_ID via $IP"
echo > $VCT_KEYS_DIR/known_hosts
if [ "$COMMAND" ]; then
ssh $VCT_SSH_OPTIONS root@$IP ". /etc/profile > /dev/null; $@"
else
ssh $VCT_SSH_OPTIONS root@$IP
fi
done
}
vct_node_scp() {
local VCRD_ID_RANGE=$1
local VCRD_ID=
shift
local WHAT="$@"
for VCRD_ID in $( vcrd_ids_get $VCRD_ID_RANGE ); do
local VCRD_NAME="${VCT_RD_NAME_PREFIX}${VCRD_ID}"
if ! ( [ -f $VCT_NODE_MAC_DB ] && grep -e "^$VCRD_ID" $VCT_NODE_MAC_DB >&2 || virsh -c qemu:///system dominfo $VCRD_NAME | grep -e "^State:" | grep "running" >/dev/null ); then
err $FUNCNAME "$VCRD_NAME not running"
fi
local IP=$( vct_node_get_ip_from_db $VCRD_ID )
local MAC=$( vct_node_get_mac $VCRD_ID )
local IPV6_RESCUE=${VCT_BR00_V6_RESCUE2_PREFIX64}:$( eui64_from_mac $MAC )
local COUNT_MAX=60
local COUNT=
[ -z "$IP" ] && IP="$IPV6_RESCUE"
local IS_IPV6=$(echo $IP | grep -e ":" -c )
COUNT=0
while [ "$COUNT" -le $COUNT_MAX ]; do
vct_do_ping $IP >/dev/null && break
[ "$COUNT" = 0 ] && echo -n "Waiting for $VCRD_ID on $IP (first boot may take up to 40 secs)" >&2 || echo -n "." >&2
COUNT=$(( $COUNT + 1 ))
done
#echo >&2
# [ "$COUNT" = 0 ] || echo >&2
[ "$COUNT" -le $COUNT_MAX ] || err $FUNCNAME "Failed ping6 to node=$VCRD_ID via $IP"
COUNT=0
while [ "$COUNT" -le $COUNT_MAX ]; do
echo > $VCT_KEYS_DIR/known_hosts
ssh $VCT_SSH_OPTIONS root@$IP "exit" 2>/dev/null && break
sleep 1
[ "$COUNT" = 0 ] && echo -n "Waiting for $VCRD_ID to accept ssh..." >&2 || echo -n "." >&2
COUNT=$(( $COUNT + 1 ))
done
#echo >&2
# [ "$COUNT" = 0 ] || echo >&2
[ "$COUNT" -le $COUNT_MAX ] || err $FUNCNAME "Failed ssh to node=$VCRD_ID via $IP"
echo > $VCT_KEYS_DIR/known_hosts
if [ $IS_IPV6 -ne 0 ]; then
scp $VCT_SSH_OPTIONS $( echo $WHAT | sed s/remote:/root@\[$IP\]:/ ) 2>/dev/null
else
scp $VCT_SSH_OPTIONS $( echo $WHAT | sed s/remote:/root@$IP:/ ) 2>/dev/null
fi
done
}
vct_node_scp_cns() {
local VCRD_ID=$1; check_rd_id $VCRD_ID quiet
local CNS_FILES_DIR="$VCT_DIR/../../packages/confine/confine-system/files"
local LXC_FILES_DIR="$VCT_DIR/../../packages/confine/lxc/files"
local SFS_FILES_DIR="$VCT_DIR/../../packages/confine/confine-parted/files"
# This is automatic but slow:
# for f in $(cd $CNS_FILES_DIR && find | grep -v "/etc/config"); do
# echo $f
# [ -f $CNS_FILES_DIR/$f ] && \
# vct_node_scp $VCRD_ID remote:/$f $CNS_FILES_DIR/$f || true
# done
# This is manual but faster:
vct_node_scp $VCRD_ID remote:/usr/lib/lua/confine/*.lua $CNS_FILES_DIR/usr/lib/lua/confine/
vct_node_scp $VCRD_ID remote:/usr/sbin/confine.lib $CNS_FILES_DIR/usr/sbin/
vct_node_scp $VCRD_ID remote:/usr/sbin/confine.functions $CNS_FILES_DIR/usr/sbin/
vct_node_scp $VCRD_ID remote:/usr/sbin/confine.udhcpc.test $CNS_FILES_DIR/usr/sbin/
vct_node_scp $VCRD_ID remote:/usr/sbin/confine.remote-upgrade $SFS_FILES_DIR/usr/sbin/
vct_node_scp $VCRD_ID remote:/usr/sbin/confine.disk-parted $SFS_FILES_DIR/usr/sbin/
vct_node_scp $VCRD_ID remote:/usr/sbin/confine.sysupgrade $SFS_FILES_DIR/usr/sbin/
vct_node_scp $VCRD_ID remote:/usr/sbin/lxc.* $CNS_FILES_DIR/usr/sbin/
vct_node_scp $VCRD_ID remote:/etc/lxc/scripts/*.sh $CNS_FILES_DIR/etc/lxc/scripts/
vct_node_scp $VCRD_ID remote:/etc/config/confine-default s $CNS_FILES_DIR/etc/config/
vct_node_scp $VCRD_ID remote:/etc/init.d/confine $CNS_FILES_DIR/etc/init.d/
vct_node_scp $VCRD_ID remote:/etc/confine-ebtables.lst $CNS_FILES_DIR/etc/
}
vct_node_mount() {
local VCRD_ID_RANGE=$1
local VCRD_ID=
for VCRD_ID in $( vcrd_ids_get $VCRD_ID_RANGE ); do
local VCRD_NAME="${VCT_RD_NAME_PREFIX}${VCRD_ID}"
local VCRD_PATH=$( virsh -c qemu:///system dumpxml $VCRD_NAME | \
xmlstarlet sel -T -t -m "/domain/devices/disk/source" -v attribute::file -n |
grep -e "^${VCT_SYS_DIR}" || \
err $FUNCNAME "Failed resolving disk path for $VCRD_NAME" )
local VCRD_MNTP=$VCT_MNT_DIR/$VCRD_NAME
if [ -f $VCRD_PATH ] && \
! mount | grep "$VCRD_MNTP" >/dev/null && \
virsh -c qemu:///system list --all 2>/dev/null | grep $VCRD_NAME | grep "shut off" >/dev/null; then
local IMG_UNIT_SIZE=$( fdisk -lu $VCRD_PATH 2>/dev/null | \
grep "^Units = " | awk -F'=' '{print $(NF) }' | awk '{print $1 }' )
local IMG_ROOTFS_START=$( fdisk -lu $VCRD_PATH 2>/dev/null | \
grep "${VCRD_PATH}2" | awk '{print $(NF-4) }' )
[ $IMG_UNIT_SIZE ] && [ $IMG_ROOTFS_START ] || \
err $FUNCNAME "Failed resolving rootfs usize=$IMG_UNIT_SIZE start=$IMG_ROOTFS_START"
mkdir -p $VCRD_MNTP
echo vct_sudo mount -o loop,rw,offset=$(( $IMG_UNIT_SIZE * $IMG_ROOTFS_START )) $VCRD_PATH $VCRD_MNTP || \
err $FUNCNAME "Failed mounting $VCRD_PATH"
echo $VCRD_MNTP
else
err $FUNCNAME "Failed offline mounting node-id=$VCRD_ID"
fi
done
}
vct_node_unmount() {
local VCRD_ID_RANGE=$1
local VCRD_ID=
for VCRD_ID in $( vcrd_ids_get $VCRD_ID_RANGE ); do
local VCRD_NAME="${VCT_RD_NAME_PREFIX}${VCRD_ID}"
local VCRD_MNTP=$VCT_MNT_DIR/$VCRD_NAME
if mount | grep "$VCRD_MNTP" >/dev/null ; then
vct_sudo umount $VCRD_MNTP || \
err $FUNCNAME "Failed unmounting $VCRD_MNTP"
rmdir $VCRD_MNTP
fi
done
}
vct_build_node_base_image() {
local BUILD_PATH="$VCT_DIR/../.."
local IMAGE_NAME="vct-node-base-image-build.img.gz"
local CPU_COUNT=$(cat /proc/cpuinfo | grep processor | tail -1 | awk '{print $3}')
for parameter in "$@"
do
if [ "$parameter" == "V=s" ]
then
CPU_COUNT=1
fi
done
( ! [ -d $BUILD_PATH/openwrt/scripts ] || (\
cd $BUILD_PATH/openwrt &&\
./scripts/feeds update -a &&\
./scripts/feeds install -a )) &&\
cd $BUILD_PATH &&\
make confclean $@ &&\
make J=$CPU_COUNT $@ &&\
ln -fs $BUILD_PATH/images/CONFINE-owrt-current.img.gz $VCT_DL_DIR/$IMAGE_NAME &&\
echo &&\
echo "The new image is available at:" &&\
echo "$BUILD_PATH/images/CONFINE-owrt-current.img.gz" &&\
echo "And via the controller portal at:" &&\
echo "administration->firmware->configuration->Image as:" &&\
echo "$IMAGE_NAME" || {
rm -f $VCT_DL_DIR/$IMAGE_NAME
echo
echo "Building new image failed!"
return 1
}
}
vct_build_node_base_image_clean() {
local BUILD_PATH="$VCT_DIR/../.."
rm -rf $BUILD_PATH/.prepared
rm -rf $BUILD_PATH/openwrt
rm -rf $BUILD_PATH/packages
git reset --hard HEAD
git submodule sync
git submodule update --init --recursive
make prepare
vct_build_node_base_image $@
}
vct_build_sliver_data() {
local EXP_PATH=$1
local EXP_TAIL="$(echo $EXP_PATH | sed 's/\/$//' | awk -F'/' '{print $NF}')"
local EXP_NAME="vct-sliver-data-build-$EXP_TAIL.tgz"
[ -d $EXP_PATH ] &&\
tar -czvf $VCT_DL_DIR/$EXP_NAME --exclude=*~ --numeric-owner --group=root --owner=root -C $EXP_PATH . &&\
echo &&\
echo "The slice/sliver data archive is available via the controller portal at:" &&\
echo "slices->[select slice]->sliver data as:" &&\
echo "$EXP_NAME" || {
rm -f $VCT_DL_DIR/$EXP_NAME
echo
echo "Building new slice/sliver data failed!"
return 1
}
}
vct_build_sliver_template() {
local OS=${1:-"debian"}
local OS_TYPE=$(echo $OS | awk -F'/' '{print $1}')
local OS_VARIANT=$(echo $OS | awk -F'/' '{print $2}')
shift
mkdir -p $VCT_VIRT_DIR/sliver-templates
if [ "$OS_TYPE" == "debian" ]; then
OS_VARIANT=${OS_VARIANT:-"wheezy"}
local TMPL_DIR=$VCT_VIRT_DIR/sliver-templates/$OS_TYPE-$OS_VARIANT
local TMPL_NAME=vct-sliver-template-build-$OS_TYPE-$OS_VARIANT
vct_sudo rm -rf $TMPL_DIR
mkdir -p $TMPL_DIR
if ! [ "LXCDEBCONFIG_SLIVER_TEMPLATE" ]; then
# Documentation: https://wiki.confine-project.eu/soft:debian-template
VCT_LXC_PACKAGES_DIR="/usr/share/lxc/packages"
VCT_LIVEDEB_CFG=$VCT_DIR/templates/debian,wheezy,i386.cfg
VCT_LIVEDEB_PACKAGE_URL="http://live.debian.net/files/4.x/packages/live-debconfig/4.0~a27-1/live-debconfig_4.0~a27-1_all.deb"
VCT_LIVEDEB_PACKAGE_SHA="7a7c154634711c1299d65eb5acb059eceff7d3328b5a34030b584ed275dea1fb"
VCT_LIVEDEB_PACKAGE_DEB="$(echo $VCT_LIVEDEB_PACKAGE_URL | awk -F'/' '{print $NF}')"
[ -f $VCT_LXC_PACKAGES_DIR/$VCT_LIVEDEB_PACKAGE_DEB ] && [ "$(sha256sum $VCT_LXC_PACKAGES_DIR/$VCT_LIVEDEB_PACKAGE_DEB |awk '{print $1}' )" = "$VCT_LIVEDEB_PACKAGE_SHA" ] || {
vct_sudo rm -f $VCT_LXC_PACKAGES_DIR/$VCT_LIVEDEB_PACKAGE_DEB
vct_sudo mkdir -p $VCT_LXC_PACKAGES_DIR
vct_sudo wget -P $VCT_LXC_PACKAGES_DIR $VCT_LIVEDEB_PACKAGE_URL
}
vct_sudo lxc-create -t debian -n $TMPL_NAME -B --dir $TMPL_DIR -- --preseed-file=$VCT_LIVEDEB_CFG
elif [ "DEBOOTSTRAP_SLIVER_TEMPLATE" ]; then
# Inspired by: http://www.wallix.org/2011/09/20/how-to-use-linux-containers-lxc-under-debian-squeeze/
vct_sudo debootstrap --verbose --variant=minbase --arch=i386 --include $VCT_SLIVER_TEMPLATE_DEBIAN_PACKAGES $OS_VARIANT $TMPL_DIR/rootfs $VCT_SLIVER_TEMPLATE_DEBIAN_BASE_URL
vct_sudo rm -f $TMPL_DIR/rootfs/var/cache/apt/archives/*.deb
vct_sudo rm -f $TMPL_DIR/rootfs/dev/shm
vct_sudo mkdir -p $TMPL_DIR/rootfs/dev/shm
vct_sudo chroot $TMPL_DIR/rootfs /usr/sbin/update-rc.d -f umountfs remove
vct_sudo chroot $TMPL_DIR/rootfs /usr/sbin/update-rc.d -f hwclock.sh remove
vct_sudo chroot $TMPL_DIR/rootfs /usr/sbin/update-rc.d -f hwclockfirst.sh remove
# vct_sudo chroot $TMPL_DIR/rootfs /sbin/insserv -fr checkroot.sh || true
# vct_sudo chroot $TMPL_DIR/rootfs /sbin/insserv -fr checkfs.sh || true
# vct_sudo chroot $TMPL_DIR/rootfs /sbin/insserv -fr mtab.sh || true
# vct_sudo chroot $TMPL_DIR/rootfs /sbin/insserv -fr checkroot-bootclean.sh || true
vct_sudo chroot $TMPL_DIR/rootfs /sbin/insserv -fr hwclockfirst.sh || true
vct_sudo chroot $TMPL_DIR/rootfs /sbin/insserv -fr hwclock.sh || true
vct_sudo chroot $TMPL_DIR/rootfs /sbin/insserv -fr kmod || true
vct_sudo chroot $TMPL_DIR/rootfs /sbin/insserv -fr module-init-tools || true
# vct_sudo chroot $TMPL_DIR/rootfs /sbin/insserv -fr mountall.sh || true
vct_sudo chroot $TMPL_DIR/rootfs /sbin/insserv -fr mountkernfs.sh || true
vct_sudo chroot $TMPL_DIR/rootfs /sbin/insserv -fr umountfs || true
vct_sudo chroot $TMPL_DIR/rootfs /sbin/insserv -fr umountroot || true
vct_sudo_sh "cat <<EOF >> $TMPL_DIR/rootfs/etc/ssh/sshd_config
PasswordAuthentication no
EOF
"
vct_sudo chroot $TMPL_DIR/rootfs passwd<<EOF
$VCT_SLIVER_TEMPLATE_PASSWD
$VCT_SLIVER_TEMPLATE_PASSWD
EOF
vct_sudo rm -f $TMPL_DIR/rootfs/etc/ssh/ssh_host_*_key*
# vct_sudo ssh-keygen -q -f $TMPL_DIR/rootfs/etc/ssh/ssh_host_rsa_key -N '' -t rsa
# vct_sudo ssh-keygen -q -f $TMPL_DIR/rootfs/etc/ssh/ssh_host_dsa_key -N '' -t dsa
vct_sudo_sh "cat <<EOF > $TMPL_DIR/rootfs/etc/inittab
id:2:initdefault:
si::sysinit:/etc/init.d/rcS
#~:S:wait:/sbin/sulogin
l0:0:wait:/etc/init.d/rc 0
l1:1:wait:/etc/init.d/rc 1
l2:2:wait:/etc/init.d/rc 2
l3:3:wait:/etc/init.d/rc 3
l4:4:wait:/etc/init.d/rc 4
l5:5:wait:/etc/init.d/rc 5
l6:6:wait:/etc/init.d/rc 6
z6:6:respawn:/sbin/sulogin
1:2345:respawn:/sbin/getty 38400 console
# new from vctc:
c1:12345:respawn:/sbin/getty 38400 tty1 linux
c2:12345:respawn:/sbin/getty 38400 tty2 linux
c3:12345:respawn:/sbin/getty 38400 tty3 linux
c4:12345:respawn:/sbin/getty 38400 tty4 linux
p0::powerfail:/sbin/init 0
p6::ctrlaltdel:/sbin/init 6
EOF
"
vct_sudo tar -czvf $VCT_DL_DIR/$TMPL_NAME.tgz --numeric-owner --directory $TMPL_DIR/rootfs .
echo
echo "The slice/sliver template image can be uploaded via the controller portal at:"
echo "Slices->Templates->[select template]->image from:"
echo $VCT_DL_DIR/$TMPL_NAME.tgz
ls -l $VCT_DL_DIR/$TMPL_NAME.tgz
echo
fi
elif [ "$OS_TYPE" == "openwrt" ]; then
OS_VARIANT=${OS_VARIANT:-"aa"}
local DL_PATH="$VCT_USER_HOME/dl"
local BUILD_DIR=$VCT_VIRT_DIR/sliver-templates/$OS_TYPE-$OS_VARIANT
local BUILD_NAME=vct-sliver-template-build-$OS_TYPE-$OS_VARIANT
local GIT_URL=$( ( [ "$OS_VARIANT" == "aa" ] && echo $VCT_SLIVER_TEMPLATE_OPENWRT_AA_SYSTEM_GIT_URL) || ( [ "$OS_VARIANT" == "bb" ] && echo $VCT_SLIVER_TEMPLATE_OPENWRT_BB_SYSTEM_GIT_URL) || echo "ERROR")
local BUILD_CONFIG="$BUILD_DIR/openwrt/.config"
mkdir -p $BUILD_DIR
mkdir -p $DL_PATH
(( [ -d $BUILD_DIR/openwrt ] && cd $BUILD_DIR/openwrt && git remote show origin && git pull origin && git status) || git clone $GIT_URL $BUILD_DIR/openwrt) &&\
ln -fs $DL_PATH $BUILD_DIR/openwrt/dl &&\
( cd $BUILD_DIR/openwrt &&\
scripts/feeds update -a &&\
scripts/feeds install -a ) &&\
echo "$VCT_SLIVER_TEMPLATE_OPENWRT_BUILD_OPTS" > $BUILD_CONFIG &&\
( for PACKAGE in ${VCT_SLIVER_TEMPLATE_OPENWRT_PACKAGES}; do echo "CONFIG_PACKAGE_${PACKAGE}=y" >> $BUILD_CONFIG; done ) &&\
make -C $BUILD_DIR/openwrt defconfig > /dev/null &&\
time make -C $BUILD_DIR/openwrt J=$(cat /proc/cpuinfo | grep processor | tail -1 | awk '{print $3}') $@ &&\
cp $BUILD_DIR/openwrt/bin/x86/openwrt-x86-generic-rootfs.tar.gz $VCT_DL_DIR/$BUILD_NAME.tgz &&\
true || err $0 "Failed building $OS node image!!" || return 1
echo
echo "The slice/sliver template image can be uploaded via the controller portal at:"
echo "Slices->Templates->[select template]->image from:"
echo $VCT_DL_DIR/$BUILD_NAME.tgz
ls -l $VCT_DL_DIR/$BUILD_NAME.tgz
echo
fi
return 1
}
vct_help() {
echo "usage..."
cat <<EOF
vct_help
vct_system_install [OVERRIDE_DIRECTIVES] : install vct system requirements
vct_system_init : initialize vct system on host
vct_system_cleanup [flush] : revert vct_system_init
and optionally remove testbed data
vct_system_purge : purge vct installation
Node Management Functions
-------------------------
vct_node_info [NODE_SET] : summary of existing domain(s)
vct_node_create <NODE_SET> : create domain with given NODE_ID
vct_node_start <NODE_SET> : start domain with given NODE_ID
vct_node_stop <NODE_SET> : stop domain with given NODE_ID
vct_node_remove <NODE_SET> : remove domain with given NODE_ID
vct_node_console <NODE_ID> : open console to running domain
vct_node_ssh <NODE_SET> ["COMMANDS"] : ssh connect via recovery IPv6
vct_node_scp <NODE_SET> <SCP_ARGS> : copy via recovery IPv6
vct_node_mount <NODE_SET>
vct_node_unmount <NODE_SET>
Build Functions
---------------
vct_build_node_base_image : Build node image
vct_build_node_base_image_clean : Build node image from scratch
vct_build_sliver_data <EXP_DIR> : Build sliver data from dir
vct_build_sliver_template <OS_TYPE> : Build sliver template image
Argument Definitions
--------------------
OVERRIDE_DIRECTIVES:= comma seperated list of directives: node,server,keys
NODE_ID:= node id given by a 4-digit lower-case hex value (eg: 0a12)
NODE_SET:= node set as: 'all', NODE_ID, NODE_ID-NODE_ID (0001-0003)
COMMANDS:= Commands to be executed on node
SCP_ARGS:= MUST include 'remote:' which is substituted by 'root@[IPv6]:'
EXP_DIR:= a directoy name that must exist in utis/vct/experiments
OS_TYPE:= either debian or openwrt
EOF
}
vct_system_config_check
# check if correct user:
if [ $(whoami) != $VCT_USER ] || [ $(whoami) = root ] ;then
err $0 "command must be executed as non-root user=$VCT_USER" || return 1
fi
CMD=$( echo $0 | awk -F'/' '{print $(NF)}' )
if [ "$CMD" = "vct.sh" ]; then
if [ "${1:-}" ]; then
"$@"
else
vct_help
fi
else
case "$CMD" in
vct_help) $CMD;;
vct_system_install_check) $CMD "$@";;
vct_system_install) $CMD "$@";;
vct_system_init_check) $CMD "$@";;
vct_system_init) $CMD "$@";;
vct_system_cleanup) $CMD "$@";;
vct_system_purge) $CMD "$@";;
vct_node_info) $CMD "$@";;
vct_node_create) $CMD "$@";;
vct_node_start) $CMD "$@";;
vct_node_stop) $CMD "$@";;
vct_node_remove) $CMD "$@";;
vct_node_console) $CMD "$@";;
vct_node_ssh) $CMD "$@";;
vct_node_scp) $CMD "$@";;
vct_node_scp_cns) $CMD "$@";;
vct_node_mount) $CMD "$@";;
vct_node_unmount) $CMD "$@";;
vct_build_node_base_image*) $CMD "$@";;
vct_build_sliver_data) $CMD "$@";;
vct_build_sliver_template) $CMD "$@";;
*) vct_help;;
esac
fi
#echo "successfully finished $0 $*" >&2
| true
|
4c47ba39c91fc8d749ed4b5546716ff74e4632ac
|
Shell
|
jeromecoutant/mbed
|
/tools/test/travis-ci/doxy-spellchecker/spell.sh
|
UTF-8
| 5,098
| 3.671875
| 4
|
[
"SGI-B-1.1",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"MPL-2.0",
"BSD-3-Clause",
"BSD-4-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash -eu
# mbed Microcontroller Library
# Copyright (c) 2018 ARM Limited
# SPDX-License-Identifier: Apache-2.0
set -o pipefail
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
ERRORS=0
# Loops use here strings to allow them to run in the main shell and modify the correct version of
# the error counter global variable
while read file; do
echo "${file}"
res=$(awk '/\/\*\*/,/\*\//' "${file}" | cut -d '/' -f2 | sed 's/0x[^ ]*//' | sed 's/[0-9]*//g')
# Select a token to begin on, then a formating option such as strip all text between the start
# and end token, strip an entire line containing the start token, or strip a portion of a line
# containing the start token. Select an appropiate end token. The tokens and formats are index
# matched.
start_tokens=( "/@code"
"/addtogroup"
"ingroup"
"defgroup"
"<"
"()"
)
formats=( 'strip_between'
'strip_between'
'strip_line'
'strip_line'
'strip_between_sameline'
'strip_token'
)
end_tokens=( "/@endcode"
"/\*"
""
""
">"
""
)
# Stripping strings between tokens P1-P2 and P3-P4 inclusively ran into issues depending
# on if the tokens were on the same line or not.
#_________________________________________
# Don't remove this P1 remove me P2
# Keep me
# P3
# Remove me too please
# P4
# Keep me too
# Still here P1 But this shouldn't be P2
#_________________________________________
#
# Opted for having two separate formats. In particular this formatting issue came up when
# trying to strip the code segments and template type arguments between '<, >' as the multiline
# sed command would strip the entire line, causing the removal string to span across the entire file
# when trying to match the next end token (above format when stripping everything between P1 and P2
# would end up with just "Don't remove this" and the rest of the file stripped).
for ((i=0;i<${#start_tokens[@]};++i)); do
filter=""
if [[ "${formats[i]}" == 'strip_between' ]]; then
filter=$(<<< "${res}" sed "${start_tokens[i]}/,${end_tokens[i]}/d")
elif [[ "${formats[i]}" == 'strip_between_sameline' ]]; then
filter=$(<<< "${res}" sed -e "s/"${start_tokens[i]}".*"${end_tokens[i]}"//")
elif [[ "${formats[i]}" == 'strip_line' ]]; then
filter=$(<<< "${res}" sed "/"${start_tokens[i]}"/ d")
elif [[ "${formats[i]}" == 'strip_token' ]]; then
filter=$(<<< "${res}" sed "s/"${start_tokens[i]}"//g")
fi
if [ "${filter}" != "" ]; then
res=${filter}
fi
done
if [ "${2:-}" == "-vv" ]; then
echo "${res}"
fi
prev_err=("")
while read err; do
if [ $(echo "${res}" | grep "${err}" | wc -l) -eq $(grep "${err}" "${file}" | wc -l) ]; then
# Do not count all caps words as errors (RTOS, WTI, etc) or plural versions (APNs/MTD's)
if ! [[ ${err} =~ ^[A-Z]+$ || ${err} =~ ^[A-Z]+s$ || ${err} =~ ^[A-Z]+\'s$ ]]; then
# Disregard camelcase/underscored words. Hex was stripped at the beginning
if ! echo "${err}" | grep --quiet -E '[a-z]{1,}[A-Z]|_'; then
# The grep command to fetch the line numbers will report all instances, do not
# list repeated error words found from aspell in each file
if ! [[ ${prev_err[*]} =~ "${err}" ]]; then
prev_err+=("${err}")
if [ ${#prev_err[@]} -eq 2 ]; then
echo "================================="
echo "Errors: "
fi
while read ln; do
echo "${ln} ${err}"
ERRORS=$((ERRORS + 1))
done <<< "$(grep -n "${err}" "${file}" | cut -d ' ' -f1)"
fi
fi
fi
fi
done <<< "$(echo "${res}" | aspell list -C --ignore-case -p "${DIR}"/ignore.en.pws --local-data-dir "${DIR}")"
if [ ${#prev_err[@]} -ne 1 ]; then
echo "_________________________________"
fi
# ${1}: directory to check
# ${2}: file containing a list of paths (regex) to exclude
done < <(find "${1}" -type d -iname "*target*" -prune -o -name '*.h' -print | grep -v -f "${2}")
echo "----------------------------------------------------------------------------------"
echo "Total Errors Found: ${ERRORS}"
if [ ${ERRORS} -ne 0 ]; then
echo "If any of the failed words should be considered valid please add them to the ignore.en.pws file"\
"found in tools/test/travis-ci/doxy-spellchecker between the _code_ and _doxy_ tags."
exit 1
fi
| true
|
e831399e29702476e65cc0340b184a1b9857e39b
|
Shell
|
cgreer/ResearchScripts
|
/command/mirPipe.sh
|
UTF-8
| 1,292
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/bash
echo 'Defining Clusters'
cd /home/chrisgre/scripts/defineClusters
python defineClusters.py $1
echo 'Filtering Out Known MicroRNAs/snoRNAs'
cd /home/chrisgre/scripts/filterKnown
python filterOutKnown.py $1
echo 'Updating Results File/Running Initial Sort'
cd /home/chrisgre/scripts/updateFPF
python addPeriods.py $1
python updateDensity.py $1
python updateOverlaps.py $1
python updateClusterInfo.py $1
python sortResults.py $1
echo 'Splitting Results into Exons and Introns'
cd /home/chrisgre/scripts/splitExonIntron
python splitExonsIntrons.py $1
echo 'Finding Highest Read Count Per Cluster'
cd /home/chrisgre/scripts/readDensity
echo ' Exons...'
python getMaxReadDensity.py E $1
echo ' Introns...'
python getMaxReadDensity.py I $1
echo 'Collecting Noise Data'
cd /home/chrisgre/scripts/noisyRanges
echo ' Exons...'
python exonNoisy.py $1
echo ' Introns...'
python intronNoisy.py $1
echo 'Updating PVals For Prediction File'
python updateSignalNoise.py E $1
python updateSignalNoise.py I $1
echo 'Sorting (FINAL)'
cd /home/chrisgre/scripts/updateFPF
python finalSort.py E $1
python finalSort.py I $1
echo 'Locating Predictions w/ Canonical Peaks'
cd /home/chrisgre/scripts/seqPeaks
python bestSinglePeakPlus.py E $1
python bestSinglePeakPlus.py I $1
echo 'Done'
| true
|
ae63b4ddf27bf9e31b46ac477c4a1d58f4f83e10
|
Shell
|
gsc0107/BLAST-comparison
|
/run_orto2fasta.sh
|
UTF-8
| 404
| 2.859375
| 3
|
[] |
no_license
|
#!/usr/bin/bash
ORT_GROUPS='ortho_group.csv'
THR=13
RUN='./orto2fasta.sh'
OUT_DIR='./tmp'
cat $ORT_GROUPS | awk -F ',' '{print $1" "$2}' | tail -n +2 | parallel --verbose --colsep ' ' --jobs $THR "$RUN {2} > $OUT_DIR/{1}_{2}_Viridiplantae.fasta"
cat $ORT_GROUPS | awk -F ',' '{print $1" "$3}' | tail -n +2 | parallel --verbose --colsep ' ' --jobs $THR "$RUN {2} > $OUT_DIR/{1}_{2}_Eukaryotal.fasta"
| true
|
7ef96a32ffbfcb5231d21447b627a90431184bf7
|
Shell
|
pranet/bhuman2009fork
|
/Install/switchActiveWirelessConfig.sh
|
UTF-8
| 2,068
| 4.0625
| 4
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
set -eu
export scriptPath=$(echo $0 | sed "s|^\.\./|`pwd`/../|" | sed "s|^\./|`pwd`/|")
export baseDir=$(dirname ${scriptPath})
export gtDir=$(dirname ${baseDir})
export includeDir="${baseDir}/include/"
source "${includeDir}/bhumanBase.sh"
source "${includeDir}/robotAddresses.sh"
wlanConfigSuffix=""
runUpdateBeforeActivation=""
function switchConfig()
{
if ssh -i "${configDir}/Keys/id_rsa_nao" -o StrictHostKeyChecking=no root@${1} cp "/media/userdata/system/wpa_supplicant.d/wpa_supplicant.conf_${wlanConfigSuffix}" "/media/userdata/system/wpa_supplicant.conf" > "${stdout}" 2> "${stderr}" ; then
message "Switching wireless configuration to ${wlanConfigSuffix} on ${1}"
ssh -i "${configDir}/Keys/id_rsa_nao" -o StrictHostKeyChecking=no root@${1} /etc/init.d/wireless restart >"${stdout}" 2> "${stderr}"
else
error "Failed to switch wireless configuration to ${wlanConfigSuffix} on ${1}"
fi
}
function usage()
{
echo "usage:"
echo "${0} [-h]"
echo "${0} [-u] <suffix>"
echo ""
echo " <suffix>: the suffix to select the wlan configuration."
echo " The file name must be 'wpa_supplicant.conf_<suffix>'"
echo ""
echo " -u : run updateWirelessConfig.sh before switching to the new configuration"
echo " -h : Display this help"
exit 1
}
function parseOptions()
{
while getopts :uh opt ; do
case "$opt" in
u)
runUpdateBeforeActivation="true"
;;
[?]|h)
usage
;;
esac
done
shift $(($OPTIND-1))
if ! [ $# -eq 1 ] ; then
error "bad number of arguments"
usage
fi
wlanConfigSuffix="${1}"
debug "runUpdateBeforeActivatione: " "${runUpdateBeforeActivation}"
debug "wlanConfigSuffix: " "${wlanConfigSuffix}"
}
if [ $# -lt 1 ] ; then
error "bad number of arguments"
usage
else
parseOptions "${@}"
fi
if [ -n "${runUpdateBeforeActivation}" ]; then
${baseDir}/updateWirelessConfig.sh
fi
for ((i=0; i<numOfRobotAddresses; i++)); do
switchConfig "${robotAddresses[${i}]}" &
done
wait
| true
|
c82c8151b5288f33a6cc6947a2c6709b209eb7b1
|
Shell
|
lenik/uni
|
/fs/vexmount/mount-vpart.in
|
UTF-8
| 2,823
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
: ${RCSID:=$Id: - @VERSION@ @DATE@ @TIME@ - $}
: ${PROGRAM_TITLE:=Mount a partition in a disk image file}
: ${PROGRAM_SYNTAX:=[OPTIONS] [--] file.img mount-point}
. shlib-import cliboot
option -p --partition =index "Partition other then the first to mount (1-based)"
option -o --mountopts =options "Options passed to mount -o"
option -q --quiet "Repeat to get less info"
option -v --verbose "Repeat to get more info"
option -h --help "Show this help page"
option --version "Print the version info"
partition=1
mountopts=
function setopt() {
case "$1" in
-p|--partition)
partition=$2;;
-o|--mountopts)
mountopts="$2";;
-h|--help)
help $1; exit;;
-q|--quiet)
LOGLEVEL=$((LOGLEVEL - 1));;
-v|--verbose)
LOGLEVEL=$((LOGLEVEL + 1));;
--version)
show_version; exit;;
*)
quit "invalid option: $1";;
esac
}
function main() {
if [ $# != 2 ]; then
echo "both device-image and mount-point is required. "
exit 1
fi
img="$1"
mpoint="$2"
if [ ! -f "$img" ]; then
echo "Device image file isn't existed: $img"
exit 1
fi
if [ ! -d "$mpoint" ]; then
echo "Mount-point $mpoint/ isn't existed. "
exit 1
fi
namelen=${#img}
while read partnam _col str; do
if [ "$_col" != ':' ]; then continue; fi
start="${str##*start=}"
if [ "$start" == "$str" ]; then continue; fi
start="${start%%,*}"
start_sect="${start// }"
start_off=$((start_sect * 512))
_log2 "found partition $partnam (+$start)"
partnum=${partnam:namelen}
if [ $partnum != $partition ]; then
continue
fi
_log1 "partition $partnam matched."
break
done < <(sfdisk -d "$img")
if [ "$partnum" != $partition ]; then
echo "Partition isn't existed: $partition" >&2
exit 1
fi
_log2 "Get next available loop device"
if ! loopdev=`losetup -f`; then
echo "No available loop device. " >&2
exit 1
fi
_log1 "Attach loop device $loopdev to $img (+$start_off)"
if ! losetup -o $start_off $loopdev "$img"; then
echo "losetup failed: $?" >&2
exit 1
fi
mount=(mount)
nmount=1
if [ -n "$mountopts" ]; then
mount[nmount++]=-o
mount[nmount++]="$mountopts"
fi
mount[nmount++]=$loopdev
mount[nmount++]="$mpoint"
_log2 "${mount[@]}"
if ! "${mount[@]}"; then
echo "mount failed: $?"
_log2 "Detach loop device from image file: $loopdev"
losetup -d $loopdev
fi
}
boot "$@"
| true
|
f2fe3ee4784554f69a2061fb8f6cba7f49bf7efc
|
Shell
|
gtaylor/btmux_template_io
|
/misc/weapon_table_massager.py
|
UTF-8
| 855
| 2.875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
"""
This module parses a copy/paste from the in-game ADDWEAP table, spitting out
a properly formed WEAPON_TABLE dict.
Assumes that you have a weapons.txt file in the same directory. Try to add
an invalid weapon to a unit as XCODE MECHREP and see the weapons table list::
addweap invalid lt 1
Copy/paste everything between the ----- header/footers into weapons.txt.
Re-run this, copy what you see in stdout to btmux_template_io/item_table.py's
WEAPON_TABLE.
"""
fobj = open('weapons.txt')
lines = fobj.readlines()
print "WEAPON_TABLE = {"
for line in lines:
name, heat, damage, min_range, short_range, med_range, long_range,\
vrt, crits, ammo_per_ton = line.split()
print " '{name}': {{'crits': {crits}, 'ammo_count': {ammo_per_ton}}},".format(
name=name, crits=crits, ammo_per_ton=ammo_per_ton,
)
print "}"
| true
|
1bfe3b43d04bbbf70b054b59330747bd4843aed0
|
Shell
|
rubixlinux/rubixlinux
|
/l/libtasn1/PKGBUILD
|
UTF-8
| 891
| 3
| 3
|
[] |
no_license
|
# Maintainer: Joshua Rubin <joshua@rubixlinux.org>
pkgname=libtasn1
pkgver=0.2.18
pkgrel=1
url="http://josefsson.org/libtasn1"
pkgdesc="libtasn1 is an asn.1 library"
depends=('glibc')
source=(http://josefsson.org/gnutls/releases/$pkgname/$pkgname-$pkgver.tar.gz)
md5sums=('e44a58746803de101c55ae8683d677a1')
## Todo:
## None
## Notes:
## None
## Changelog:
## rel1: initial rubix release
build() {
export MAKEFLAGS=""
cd $startdir/src/$pkgname-$pkgver
./configure --prefix=/usr
make || return 1
make DESTDIR=$startdir/pkg install
mkdir -p $startdir/pkg/usr/doc/$pkgname-$pkgver
cp -a \
AUTHORS \
COPYING* \
ChangeLog \
INSTALL \
NEWS \
README \
THANKS \
$startdir/pkg/usr/doc/$pkgname-$pkgver
chmod 644 $startdir/pkg/usr/doc/$pkgname-$pkgver/*
chown -R root.bin $startdir/pkg/usr/bin
rm $startdir/pkg/usr/info/dir
find $startdir/pkg -name *.la -exec rm {} \;
}
| true
|
7f16495bc28b9c7a74aaa4902b5fcaf06eb5163b
|
Shell
|
kidlab/env_config
|
/.bash_profile
|
UTF-8
| 1,326
| 3.25
| 3
|
[] |
no_license
|
##
# Your previous /Users/manvuong/.bash_profile file was backed up as /Users/manvuong/.bash_profile.macports-saved_2013-08-14_at_14:48:24
##
export PATH=$HOME/Workspace/go_appengine:$HOME/.go/bin:/usr/local/sbin:/usr/local/bin:$PATH # Go App Engine SDK
if [ -f ~/.git-completion.bash ]; then
. ~/.git-completion.bash
fi
alias mongostart="launchctl start homebrew.mxcl.mongodb"
alias mongostop="launchctl stop homebrew.mxcl.mongodb"
export GOPATH=$HOME/.go/
alias gopath="export GOPATH=$GOPATH:`pwd`"
# The next line updates PATH for the Google Cloud SDK.
source '/Users/manvuong/google-cloud-sdk/path.bash.inc'
# The next line enables bash completion for gcloud.
source '/Users/manvuong/google-cloud-sdk/completion.bash.inc'
[[ -s "$HOME/.rvm/scripts/rvm" ]] && source "$HOME/.rvm/scripts/rvm" # Load RVM into a shell session *as a function*
# PS1='\u@\h:\W\$ '
# PS1='\[\e[0;32m\]\u@\h\[\e[m\]:\[\e[1;34m\]\W\[\e[m\] \[\e[1;32m\]\$\[\e[m\] \[\e[1;37m\]'
export PS1='\[\033[01;32m\]\u@\h\[\e[m\]:\[\033[01;34m\]\W \$\[\033[00m\] '
export GREP_OPTIONS='--color=always'
# Color SSH
function colorssh() {
ARGS=$@
if [[ $ARGS =~ (production|ec2-.*compute-1) ]]; then
printf "\033]7;file://%s/\007" "production-instance"
else
printf "\033]7;file://%s/\007" "$ARGS"
fi
ssh $*
}
alias ssh="colorssh"
| true
|
a46a92579048a94ffe696bc40b0a46ba963474f9
|
Shell
|
bobhlo/integration
|
/extra/statistics-generator
|
UTF-8
| 2,734
| 4.1875
| 4
|
[
"BSD-4-Clause",
"WTFPL",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"LicenseRef-scancode-public-domain-disclaimer",
"BSD-2-Clause-Views",
"BSD-3-Clause",
"ISC",
"GPL-2.0-only",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
set -e
usage() {
cat <<EOF
$0 --base-dir=<DIR_WITH_REPOS> [options] <REV-RANGE> [addition git-log arguments]
Main options:
--base-dir=<DIR_WITH_REPOS>
Supplies the folder where all the Mender repositories live.
--repo Query only the repository we're in, not the default Mender release
repositories.
Extra options:
-n Instead of generating the statistics, look at the raw Git output that is
given to gitdm. You probably want to pipe this somewhere.
EOF
}
if [ -z "$1" ]; then
usage
exit 1
fi
while [ -n "$1" ]; do
case "$1" in
--base-dir=*)
BASE_DIR="${1#--base-dir=}"
;;
--base-dir)
shift
BASE_DIR="$1"
;;
--repo)
REPO_ONLY=1
;;
-n)
DRY_RUN=1
;;
-h|--h*)
usage
exit 1
;;
*)
break
;;
esac
shift
done
if [ -z "$BASE_DIR" -a "$REPO_ONLY" != 1 ]; then
echo "Need either --base-dir or --repo parameter."
exit 1
fi
collect_changes() {
RELEASE_TOOL="$(dirname "$0")/release_tool.py"
if [ "$REPO_ONLY" = 1 ]; then
REPO_LIST=.
else
REPO_LIST="$("$RELEASE_TOOL" --list git)"
fi
for repo in $REPO_LIST; do
(
if [ "$REPO_ONLY" = 1 ]; then
CHANGES="$1"
else
CHANGES="$("$RELEASE_TOOL" --version-of $repo --in-integration-version "$1")"
fi
shift
if [ -z "$1" ]; then
GIT_ARGS="-p -M -C -C"
else
GIT_ARGS=
fi
echo "Fetching changes for $repo, rev $CHANGES" 1>&2
if [ "$REPO_ONLY" != 1 ]; then
cd "$BASE_DIR/$repo"
fi
git --no-pager log --use-mailmap $GIT_ARGS "$CHANGES" "$@" -- '*' ':!vendor' ':!node_modules' ':!package-lock.json'
)
done
}
post_process() {
sed -nre '
/^$/{
p
n
s/^(.*)$/| \1 | |/
p
c |---|---|
p
n
}
/%/{
s/^(.+)/| \1/
s/(.+)$/\1 |/
s/ {2,}/ | /
}
p
'
}
ACTUAL_MAILMAP_MD5=$(md5sum "$(git config --get mailmap.file)" 2>/dev/null | sed -e 's/ .*//')
DESIRED_MAILMAP_MD5=$(md5sum "$(dirname "$0")/gitdm/mailmap" | sed -e 's/ .*//')
if [ "$ACTUAL_MAILMAP_MD5" != "$DESIRED_MAILMAP_MD5" ]; then
echo "Please execute this before running the command:"
echo " git config --global mailmap.file" "$(realpath "$(dirname "$0")/gitdm/mailmap")"
exit 1
fi
if [ "$DRY_RUN" = 1 ]; then
collect_changes "$@"
else
set -o pipefail
collect_changes "$@" | python "$(dirname "$0")"/gitdm/gitdm/gitdm -s -b "$(dirname "$0")"/gitdm -l 10 | post_process
fi
| true
|
e2a3614b3f450a3a5d8be0260af132dfd0e49131
|
Shell
|
gan-orlab/MIPVar
|
/Scripts/Processing.step07.selectByCohorts.sh
|
UTF-8
| 1,456
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
#I use variables, for readability
#####LOOPING NOT DONE YET, WORK ON IT WITH DAN
###general code; can be put in a loop if you have a file with list of genes + specify the cohorts: for gene in selectedgenes.list; do for cohort in FC NY ISR; do for DP in 15 30 50; do bash script.sh $gene $cohort $DP; done; done; done
###Khair's version: can be put in a loop if you have a file with list of genes: for gene in PARK2 PARK7 PINK1 VPS13C; do for DP in 15 30 50; do bash script.sh $gene RBD $DP; done; done
read vcf cohort_name DP DIR core<<< $@
if [[ ! -s $vcf ]]; then echo "ERROR: input vcf (1st arg) not specified, empty or does not exist"; exit 42; fi
if [[ -z $DP || ! $DP -gt 0 ]]; then echo "ERROR: cohort name (3rd arg) not specified"; exit 42; fi
if [[ -z $cohort_name ]]; then echo "ERROR: cohort name (3rd arg) not specified"; exit 42; fi
REF=~/runs/go_lab/Reference/human_g1k_v37.fasta
sample_list=$DIR/$cohort_name.samples.list
output=$cohort_name.DP$DP.all.genes.vcf
mem=`echo "-Xmx"$((core*4))g`
java $mem -jar /lustre03/project/6004655/COMMUN/soft/lib/java/GATK/GenomeAnalysisTK-3.8/dist/GenomeAnalysisTK.jar -T SelectVariants -R $REF -V $vcf -o $output -sf $sample_list -env --ALLOW_NONOVERLAPPING_COMMAND_LINE_SAMPLES -nt $core
#for cohort in FC NY ISR; do bash ../Scripts/Processing.step07.selectByCohorts.sh PD_familial_genes_except_GBA_AllSamples_GF25_annotated_GQ30_DP30_MISS10_filtered_cleaned.vcf $cohort 30; done
| true
|
be7a462b6981b5f3887e697b57605087dec0c880
|
Shell
|
znh1992/Harris_Kovacs_Londo
|
/Maker/gff_prep_igv.sh
|
UTF-8
| 293
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash
file=$1
acc=$2
grep "chr" $file | grep -v ">" > $acc\_presort.gff
sortBed -i $acc\_presort.gff > $acc\_sorted.gff
for i in `cut -f3 $acc\_sorted.gff | sort | uniq`; do
grep $i $acc\_sorted.gff > $i.$acc\_sorted.gff
bgzip $i.$acc\_sorted.gff
tabix $i.$acc\_sorted.gff.gz
done
| true
|
e68e241f0c313c8da66d1295e6589d85c8dc3e63
|
Shell
|
jfqd/mi-qutic-lx-base
|
/copy/usr/local/bin/systemd_health_check
|
UTF-8
| 954
| 3.40625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
PATH=/usr/sbin:/usr/bin:/sbin:/bin
FAILED_SERVICES=$(/usr/bin/systemctl list-unit-files --type=service --state=failed --plain | /usr/bin/tail -n +2 | /usr/bin/head -n -2 | /usr/bin/awk '{ print $1 }')
for service in $FAILED_SERVICES; do
systemctl reset-failed "$service"
systemctl start "$service"
echo "start $service after failed state (1)"
done
ENABLED_SERVICES=$(/usr/bin/systemctl list-unit-files --type=service --state=enabled --plain | /usr/bin/tail -n +2 | /usr/bin/head -n -2 | /usr/bin/awk '{ print $1 }')
for service in $ENABLED_SERVICES; do
if [[ "$service" =~ "@" ]]; then
/usr/bin/echo "skip $service including an @-sign" > /dev/null
else
if [[ `/usr/bin/systemctl status "$service" | /usr/bin/grep "Active: " | /usr/bin/awk '{print $2}'` = "failed" ]]; then
systemctl reset-failed "$service"
systemctl start "$service"
echo "start $service after failed state (2)"
fi
fi
done
exit 0
| true
|
071182986cdefde8cc6fee1dad5bf3b7037d9f55
|
Shell
|
Bahler-Lab/yogy
|
/YogiUp/download_inparanoid.sh
|
UTF-8
| 3,231
| 4.15625
| 4
|
[] |
no_license
|
source settings.sh
INPAR_DIR="$TEMP_DIR/inparanoid"
# the web location
INPAR_LOCATION="$INPAR_DIR/inparanoid.sbc.su.se/download/8.0_current/Orthologs_other_formats"
INPAR_SELECT_DIR="$TEMP_DIR/inparanoid_select_species"
DATE_FILE="inparanoid_update_date.txt"
#############################################
# check if the download is old enough (4 months)
# Arguments:
# None
# Returns:
# update_tag - 1: update required 0: update not required
#############################################
check_update()
{
if [ ! -d ${INPAR_DIR} ]; then
mkdir ${INPAR_DIR}
fi
# check update if the update is the latest.
if [ -e "$INPAR_DIR/$DATE_FILE" ]; then
update_date=$(cat $INPAR_DIR/$DATE_FILE)
today=$(date +%Y%m)
monthdiff=$(( $today-$update_date ))
if [ $monthdiff -lt 4 ]; then
update_tag=0
else
update_tag=1
fi
else
update_tag=1
fi
echo $update_tag
}
#############################################
# download the inparanoid files write a $DATE_FILE
# to record the date
# Arguments:
# update_tag
# Global:
# inparanoid_link: specified setting.sh
# DATE_FILE
# INPAR_DIR
# Returns:
# None
#############################################
download_update()
{
update_tag=$1
printf "downloading %-40s\n " "inparanoid orthologs (full download takes several hours):"
if [ $update_tag -eq 1 ]; then
cd ${INPAR_DIR} # switch to temp directory
wget -q -r --no-parent --reject "index.htm*" $inparanoid_link
echo $(date +%Y%m) > $DATE_FILE
cd ../../
else
printf "\e[0;33m[is the latest: %s]\e[0m\n" "$(cat $INPAR_DIR/$DATE_FILE)"
fi
}
#############################################
# Extract all files from the downloading folder
# Arguments:
# 1: inparanoid download directory
# 2: extract destination
# 3: update tag (returned by check_update())
# Returns:
# None
#############################################
extract_inparanoid ()
{
update_tag=$3
local files=($(find $1 -name *.tgz))
if [ ! -d $2 ];then
mkdir $2
fi
if [ $update_tag -eq "1" ];then
echo "extracting ${#files[@]} orthologs ... "
for (( i=0; i<${#files[@]}; i++))
do
tar -xzf ${files[$i]} -C $2 sqltable.*
printf '\r \r'$i/${#files[@]} ,
done
if [ $i == ${#files[@]} ];then
$DONE
else
$FAILED
fi
fi
}
#############################################
# copy the selected species to a new directory
# Global:
# inparanoid_species: array of selected species
# INPAR_SELECT_DIR
# INPAR_LOCATION
#############################################
select_species()
{
if [ ! -d $INPAR_SELECT_DIR ]; then
mkdir $INPAR_SELECT_DIR
fi
for (( i=0; i<${#inparanoid_species[@]}; i++ ))
do
copy_file=$INPAR_LOCATION/${inparanoid_species[i]}
if [ -d $copy_file ]; then
cp -r $copy_file $INPAR_SELECT_DIR/
else
$FAILED
fi
done
}
download_update $(check_update)
#select_species
extract_inparanoid $INPAR_SELECT_DIR $DATA_DIR/inparanoid $(check_update)
| true
|
c7f90bd6b466b42bd333a51d80db8c8218bd26f3
|
Shell
|
ds2d/ops_script
|
/bash/clean_zookeeper_log.sh
|
UTF-8
| 304
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
# clean zookeeper snapshot,log
#snapshot file dir
dataDir=/java/zkdata/version-2
#tran log dir
dataLogDir=/java/zkdata/version-2
#Leave 500 files
count=500
count=$[$count+1]
ls -t $dataLogDir/log.* | tail -n +$count | xargs rm -f
ls -t $dataDir/snapshot.* | tail -n +$count | xargs rm -f
| true
|
8509555cb078acdb24c05e85fb0bd012d07fcdfd
|
Shell
|
OJoklrO/OJoklrO.github.io
|
/add.sh
|
UTF-8
| 328
| 2.984375
| 3
|
[] |
no_license
|
# !/bin/bash
read -p "name: " fname
read -p "title:" title
BLOGPATH="/home/$USER/blog/post/category/"
date=$(date "+%Y-%m-%d")
time=$(date "+%H-%M-%S")
filename=$BLOGPATH$date-$time$fname.md
touch $filename
# time: year-month-day
echo "---
title: $title
date: $date
time: $time
---
# $title
" > $filename
typora $filename
| true
|
2b0135f59fd5a3c1909f4f891a5725139e409680
|
Shell
|
scorelabio/lambda-packages
|
/lambda_packages/pandas/build.sh
|
UTF-8
| 438
| 2.828125
| 3
|
[] |
no_license
|
BUILD_DIR=build
# NB: numpy has to be build along with pandas for proper linkage between libraries
pip-3.6 install --target $BUILD_DIR --no-binary :all: pandas
cd $BUILD_DIR
# Compress binaries
du -sh pandas
find pandas -name "*.so"|xargs strip
du -sh pandas
# Create archive
tar -czvf ../python3.6-pandas-0.22.0.tar.gz pandas
# Check version
python3 -c "import pandas;print(pandas.__version__)"
# Remove artifacts
cd ..
rm -r $BUILD_DIR
| true
|
471346460eeecc8f60e4577826d59c6e03fb1758
|
Shell
|
ashafaei/pdf2pptx
|
/pdf2pptx.sh
|
UTF-8
| 3,770
| 3.796875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Alireza Shafaei - shafaei@cs.ubc.ca - Jan 2016
resolution=1024
density=300
#colorspace="-depth 8"
colorspace="-colorspace sRGB -background white -alpha remove"
makeWide=true
if [ $# -eq 0 ]; then
echo "No arguments supplied!"
echo "Usage: ./pdf2pptx.sh file.pdf"
echo " Generates file.pdf.pptx in widescreen format (by default)"
echo " ./pdf2pptx.sh file.pdf notwide"
echo " Generates file.pdf.pptx in 4:3 format"
exit 1
fi
if [ $# -eq 2 ]; then
if [ "$2" == "notwide" ]; then
makeWide=false
fi
fi
echo "Doing $1"
tempname="$1.temp"
if [ -d "$tempname" ]; then
echo "Removing ${tempname}"
rm -rf "$tempname"
fi
mkdir "$tempname"
# Set return code of piped command to first nonzero return code
set -o pipefail
n_pages=$(identify "$1" | wc -l)
returncode=$?
if [ $returncode -ne 0 ]; then
echo "Unable to count number of PDF pages, exiting"
exit $returncode
fi
if [ $n_pages -eq 0 ]; then
echo "Empty PDF (0 pages), exiting"
exit 1
fi
for ((i=0; i<n_pages; i++))
do
convert -density $density $colorspace -resize "x${resolution}" "$1[$i]" "$tempname"/slide-$i.png
returncode=$?
if [ $returncode -ne 0 ]; then break; fi
done
if [ $returncode -eq 0 ]; then
echo "Extraction succ!"
else
echo "Error with extraction"
exit $returncode
fi
if (which perl > /dev/null); then
# https://stackoverflow.com/questions/1055671/how-can-i-get-the-behavior-of-gnus-readlink-f-on-a-mac#comment47931362_1115074
mypath=$(perl -MCwd=abs_path -le '$file=shift; print abs_path -l $file? readlink($file): $file;' "$0")
elif (which python > /dev/null); then
# https://stackoverflow.com/questions/1055671/how-can-i-get-the-behavior-of-gnus-readlink-f-on-a-mac#comment42284854_1115074
mypath=$(python -c 'import os,sys; print(os.path.realpath(os.path.expanduser(sys.argv[1])))' "$0")
elif (which ruby > /dev/null); then
mypath=$(ruby -e 'puts File.realpath(ARGV[0])' "$0")
else
mypath="$0"
fi
mydir=$(dirname "$mypath")
pptname="$1.pptx.base"
fout=$(basename "$1" .pdf)".pptx"
rm -rf "$pptname"
cp -r "$mydir"/template "$pptname"
mkdir "$pptname"/ppt/media
cp "$tempname"/*.png "$pptname/ppt/media/"
function call_sed {
if [ "$(uname -s)" == "Darwin" ]; then
sed -i "" "$@"
else
sed -i "$@"
fi
}
function add_slide {
pat='slide1\.xml\"\/>'
id=$1
id=$((id+8))
entry='<Relationship Id=\"rId'$id'\" Type=\"http:\/\/schemas\.openxmlformats\.org\/officeDocument\/2006\/relationships\/slide\" Target=\"slides\/slide-'$1'\.xml"\/>'
rep="${pat}${entry}"
call_sed "s/${pat}/${rep}/g" ../_rels/presentation.xml.rels
pat='slide1\.xml\" ContentType=\"application\/vnd\.openxmlformats-officedocument\.presentationml\.slide+xml\"\/>'
entry='<Override PartName=\"\/ppt\/slides\/slide-'$1'\.xml\" ContentType=\"application\/vnd\.openxmlformats-officedocument\.presentationml\.slide+xml\"\/>'
rep="${pat}${entry}"
call_sed "s/${pat}/${rep}/g" ../../\[Content_Types\].xml
sid=$1
sid=$((sid+256))
pat='<p:sldIdLst>'
entry='<p:sldId id=\"'$sid'\" r:id=\"rId'$id'\"\/>'
rep="${pat}${entry}"
call_sed "s/${pat}/${rep}/g" ../presentation.xml
}
function make_slide {
cp ../slides/slide1.xml ../slides/slide-$1.xml
cat ../slides/_rels/slide1.xml.rels | sed "s/image1\.JPG/slide-${slide}.png/g" > ../slides/_rels/slide-$1.xml.rels
add_slide $1
}
pushd "$pptname"/ppt/media/
count=`ls -ltr | wc -l`
for (( slide=$count-2; slide>=0; slide-- ))
do
echo "Processing "$slide
make_slide $slide
done
if [ "$makeWide" = true ]; then
pat='<p:sldSz cx=\"9144000\" cy=\"6858000\" type=\"screen4x3\"\/>'
wscreen='<p:sldSz cy=\"6858000\" cx=\"12192000\"\/>'
call_sed "s/${pat}/${wscreen}/g" ../presentation.xml
fi
popd
pushd "$pptname"
rm -rf ../"$fout"
zip -q -r ../"$fout" .
popd
rm -rf "$pptname"
rm -rf "$tempname"
| true
|
178d7111b09e294cd87e74bb3d1e7e84eed852ea
|
Shell
|
dmfullstack/hs_d-p-o
|
/languagepacks/stackroute/angulario/cmd/stackroute/angulario/tslint
|
UTF-8
| 463
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/sh
# in this we will run tslint rules
set -e
echo executing TS Lint in $WORKSPACE
# echo ignoring patterns $IGNORE_PATTERN
cd $WORKSPACE
echo running with NPM_OPTIONS as $NPM_OPTIONS
echo "-----------------------------------"
# NPM_SCRIPT_OPTIONS gives the tslint result in json structure '--format json'
echo passing NPM_SCRIPT_OPTIONS as $NPM_SCRIPT_OPTIONS for command $NPM_SCRIPT_CMD
npm run $NPM_SCRIPT_CMD $NPM_OPTIONS -- $NPM_SCRIPT_OPTIONS
| true
|
5081cc4b74cc9be8fd735ac536a467a79c3d598d
|
Shell
|
tech-otaku/macos-defaults
|
/date-time.sh
|
UTF-8
| 11,595
| 2.984375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# See: https://www.tech-otaku.com/mac/setting-the-date-and-time-format-for-the-macos-menu-bar-clock-using-terminal/
# Exit with error if installed macOS version is not macOS 10.15 Catalina or earlier
if [ $(system_profiler SPSoftwareDataType | awk '/System Version/ {print $4}' | cut -d . -f 1) -ge 11 ]; then
printf "\nERROR: * * * For use with macOS 10.15 Catalina and earlier * * * \n\n"
exit 1
fi
# System Preferences > Date & Time > Time options
# Analogue
#defaults write com.apple.menuextra.clock IsAnalog -bool true
# Digital
defaults write com.apple.menuextra.clock IsAnalog -bool false
# System Preferences > Date & Time > Flash the time separators
# checked
#defaults write com.apple.menuextra.clock FlashDateSeparators -bool true
# unchecked
defaults write com.apple.menuextra.clock FlashDateSeparators -bool false
# Thu 18 Aug 23:46:18
# System Preferences > Date & Time > Display time with seconds - Checked [:ss]
# System Preferences > Date & Time > Use a 24-hour clock - Checked [HH:mm]
# System Preferences > Date & Time > Show AM/PM - Unchecked
# System Preferences > Date & Time > Show the day of the week - Checked [EEE]
# System Preferences > Date & Time > Show date - Checked [d MMM]
#defaults write com.apple.menuextra.clock DateFormat -string "EEE d MMM HH:mm:ss"
# Thu 23:46:18
# System Preferences > Date & Time > Display time with seconds - Checked [:ss]
# System Preferences > Date & Time > Use a 24-hour clock - Checked [HH:mm]
# System Preferences > Date & Time > Show AM/PM - Unchecked
# System Preferences > Date & Time > Show the day of the week - Checked [EEE]
# System Preferences > Date & Time > Show date - Unchecked
#defaults write com.apple.menuextra.clock DateFormat -string "EEE HH:mm:ss"
# 18 Aug 23:46:18
# System Preferences > Date & Time > Display time with seconds - Checked [:ss]
# System Preferences > Date & Time > Use a 24-hour clock - Checked [HH:mm]
# System Preferences > Date & Time > Show AM/PM - Unchecked
# System Preferences > Date & Time > Show the day of the week - Unchecked
# System Preferences > Date & Time > Show date - Checked [d MMM]
#defaults write com.apple.menuextra.clock DateFormat -string "d MMM HH:mm:ss"
# 23:46:18
# System Preferences > Date & Time > Display time with seconds - Checked [:ss]
# System Preferences > Date & Time > Use a 24-hour clock - Checked [HH:mm]
# System Preferences > Date & Time > Show AM/PM - Unchecked
# System Preferences > Date & Time > Show the day of the week - Unchecked
# System Preferences > Date & Time > Show date - Unchecked
#defaults write com.apple.menuextra.clock DateFormat -string "HH:mm:ss"
# Thu 18 Aug 11:46:18 pm
# System Preferences > Date & Time > Display time with seconds - Checked [:ss]
# System Preferences > Date & Time > Use a 24-hour clock - Unchecked
# System Preferences > Date & Time > Show AM/PM - Checked [a]
# System Preferences > Date & Time > Show the day of the week - Checked [EEE]
# System Preferences > Date & Time > Show date - Checked [d MMM]
#defaults write com.apple.menuextra.clock DateFormat -string "EEE d MMM h:mm:ss a"
# Thu 11:46:18 pm
# System Preferences > Date & Time > Display time with seconds - Checked [:ss]
# System Preferences > Date & Time > Use a 24-hour clock - Unchecked
# System Preferences > Date & Time > Show AM/PM - Checked [a]
# System Preferences > Date & Time > Show the day of the week - Checked [EEE]
# System Preferences > Date & Time > Show date - Unchecked
#defaults write com.apple.menuextra.clock DateFormat -string "EEE h:mm:ss a"
# 18 Aug 11:46:18 pm
# System Preferences > Date & Time > Display time with seconds - Checked [:ss]
# System Preferences > Date & Time > Use a 24-hour clock - Unchecked
# System Preferences > Date & Time > Show AM/PM - Checked [a]
# System Preferences > Date & Time > Show the day of the week - Unchecked
# System Preferences > Date & Time > Show date - Checked [d MMM]
#defaults write com.apple.menuextra.clock DateFormat -string "d MMM h:mm:ss a"
# 11:46:18 pm
# System Preferences > Date & Time > Display time with seconds - Checked [:ss]
# System Preferences > Date & Time > Use a 24-hour clock - Unchecked
# System Preferences > Date & Time > Show AM/PM - Checked [a]
# System Preferences > Date & Time > Show the day of the week - Unchecked
# System Preferences > Date & Time > Show date - Unchecked
#defaults write com.apple.menuextra.clock DateFormat -string "h:mm:ss a"
# Thu 18 Aug 11:46:18
# System Preferences > Date & Time > Display time with seconds - Checked [:ss]
# System Preferences > Date & Time > Use a 24-hour clock - Unchecked
# System Preferences > Date & Time > Show AM/PM - Unchecked
# System Preferences > Date & Time > Show the day of the week - Checked [EEE]
# System Preferences > Date & Time > Show date - Checked [d MMM]
#defaults write com.apple.menuextra.clock DateFormat -string "EEE d MMM h:mm:ss"
# Thu 11:46:18
# System Preferences > Date & Time > Display time with seconds - Checked [:ss]
# System Preferences > Date & Time > Use a 24-hour clock - Unchecked
# System Preferences > Date & Time > Show AM/PM - Unchecked
# System Preferences > Date & Time > Show the day of the week - Checked [EEE]
# System Preferences > Date & Time > Show date - Unchecked
#defaults write com.apple.menuextra.clock DateFormat -string "EEE h:mm:ss"
# 18 Aug 11:46:18
# System Preferences > Date & Time > Display time with seconds - Checked [:ss]
# System Preferences > Date & Time > Use a 24-hour clock - Unchecked
# System Preferences > Date & Time > Show AM/PM - Unchecked
# System Preferences > Date & Time > Show the day of the week - Unchecked
# System Preferences > Date & Time > Show date - Checked [d MMM]
#defaults write com.apple.menuextra.clock DateFormat -string "d MMM h:mm:ss"
# 11:46:18
# System Preferences > Date & Time > Display time with seconds - Checked [:ss]
# System Preferences > Date & Time > Use a 24-hour clock - Unchecked
# System Preferences > Date & Time > Show AM/PM - Unchecked
# System Preferences > Date & Time > Show the day of the week - Unchecked
# System Preferences > Date & Time > Show date - Unchecked
#defaults write com.apple.menuextra.clock DateFormat -string "h:mm:ss"
# Thu 18 Aug 23:46
# System Preferences > Date & Time > Display time with seconds - Unchecked
# System Preferences > Date & Time > Use a 24-hour clock - Checked [HH:mm]
# System Preferences > Date & Time > Show AM/PM - Unchecked
# System Preferences > Date & Time > Show the day of the week - Checked [EEE]
# System Preferences > Date & Time > Show date - Checked [d MMM]
defaults write com.apple.menuextra.clock DateFormat -string "EEE d MMM HH:mm"
# Thu 23:46
# System Preferences > Date & Time > Display time with seconds - Unchecked
# System Preferences > Date & Time > Use a 24-hour clock - Checked [HH:mm]
# System Preferences > Date & Time > Show AM/PM - Unchecked
# System Preferences > Date & Time > Show the day of the week - Checked [EEE]
# System Preferences > Date & Time > Show date - Unchecked
#defaults write com.apple.menuextra.clock DateFormat -string "EEE HH:mm"
# 18 Aug 23:46
# System Preferences > Date & Time > Display time with seconds - Unchecked
# System Preferences > Date & Time > Use a 24-hour clock - Checked [HH:mm]
# System Preferences > Date & Time > Show AM/PM - Unchecked
# System Preferences > Date & Time > Show the day of the week - Unchecked
# System Preferences > Date & Time > Show date - Checked [d MMM]
#defaults write com.apple.menuextra.clock DateFormat -string "d MMM HH:mm"
# 23:46
# System Preferences > Date & Time > Display time with seconds - Unchecked
# System Preferences > Date & Time > Use a 24-hour clock - Checked [HH:mm]
# System Preferences > Date & Time > Show AM/PM - Unchecked
# System Preferences > Date & Time > Show the day of the week - Unchecked
# System Preferences > Date & Time > Show date - Unchecked
#defaults write com.apple.menuextra.clock DateFormat -string "HH:mm"
# Thu 18 Aug 11:46 pm
# System Preferences > Date & Time > Display time with seconds - Unchecked
# System Preferences > Date & Time > Use a 24-hour clock - Unchecked
# System Preferences > Date & Time > Show AM/PM - Checked [a]
# System Preferences > Date & Time > Show the day of the week - Checked [EEE]
# System Preferences > Date & Time > Show date - Checked [d MMM]
#defaults write com.apple.menuextra.clock DateFormat -string "EEE d MMM h:mm a"
# Thu 11:46 pm
# System Preferences > Date & Time > Display time with seconds - Unchecked
# System Preferences > Date & Time > Use a 24-hour clock - Unchecked
# System Preferences > Date & Time > Show AM/PM - Checked [a]
# System Preferences > Date & Time > Show the day of the week - Checked [EEE]
# System Preferences > Date & Time > Show date - Unchecked
#defaults write com.apple.menuextra.clock DateFormat -string "EEE h:mm a"
# 18 Aug 11:46 pm
# System Preferences > Date & Time > Display time with seconds - Unchecked
# System Preferences > Date & Time > Use a 24-hour clock - Unchecked
# System Preferences > Date & Time > Show AM/PM - Checked [a]
# System Preferences > Date & Time > Show the day of the week - Unchecked
# System Preferences > Date & Time > Show date - Checked [d MMM]
#defaults write com.apple.menuextra.clock DateFormat -string "d MMM h:mm a"
# 11:46 pm
# System Preferences > Date & Time > Display time with seconds - Unchecked
# System Preferences > Date & Time > Use a 24-hour clock - Unchecked
# System Preferences > Date & Time > Show AM/PM - Checked [a]
# System Preferences > Date & Time > Show the day of the week - Unchecked
# System Preferences > Date & Time > Show date - Unchecked
#defaults write com.apple.menuextra.clock DateFormat -string "h:mm a"
# Thu 18 Aug 11:46
# System Preferences > Date & Time > Display time with seconds - Unchecked
# System Preferences > Date & Time > Use a 24-hour clock - Unchecked
# System Preferences > Date & Time > Show AM/PM - Unchecked
# System Preferences > Date & Time > Show the day of the week - Checked [EEE]
# System Preferences > Date & Time > Show date - Checked [d MMM]
#defaults write com.apple.menuextra.clock DateFormat -string "EEE d MMM h:mm"
# Thu 11:46
# System Preferences > Date & Time > Display time with seconds - Unchecked
# System Preferences > Date & Time > Use a 24-hour clock - Unchecked
# System Preferences > Date & Time > Show AM/PM - Unchecked
# System Preferences > Date & Time > Show the day of the week - Checked [EEE]
# System Preferences > Date & Time > Show date - Unchecked
#defaults write com.apple.menuextra.clock DateFormat -string "EEE h:mm"
# 18 Aug 11:46
# System Preferences > Date & Time > Display time with seconds - Unchecked
# System Preferences > Date & Time > Use a 24-hour clock - Unchecked
# System Preferences > Date & Time > Show AM/PM - Unchecked
# System Preferences > Date & Time > Show the day of the week - Unchecked
# System Preferences > Date & Time > Show date - Checked [d MMM]
#defaults write com.apple.menuextra.clock DateFormat -string "d MMM h:mm"
# 11:46
# System Preferences > Date & Time > Display time with seconds - Unchecked
# System Preferences > Date & Time > Use a 24-hour clock - Unchecked
# System Preferences > Date & Time > Show AM/PM - Unchecked
# System Preferences > Date & Time > Show the day of the week - Unchecked
# System Preferences > Date & Time > Show date - Unchecked
#defaults write com.apple.menuextra.clock DateFormat -string "h:mm"
killall SystemUIServer
| true
|
00ed3bb0fcde21ed3db8c54a75f33b8204c957b9
|
Shell
|
bfnoack/scripts
|
/idba_assemble.sh
|
UTF-8
| 985
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash -l
#SBATCH -J ibda_ud_assembly
#SBATCH -o assembly-out-%j.txt
#SBATCH -e assembly-error-%j.txt
module load python
#set -e IDBA segfaults a lot
set -u
software=/home/adurvasu/software
reads=$1
temp_sample=${reads##*/}
sample_no_file_ending=${temp_sample%%.*}
trimmedreads=$sample_no_file_ending-trimmed.fastq.gz
qualreads=$sample_no_file_ending-qual.fastq
#trim adaptors with WGA adaptors
>&2 echo "Trimming adaptors with cutadapt using supplied adaptor sequences"
${software}/cutadapt -a file:$2 \
-e 0.1 -O 5 -m 15 \
-o $trimmedreads $reads
#remove low quality sequences using the defaults
>&2 echo "Removing low quality sequences with sickle"
${software}/sickle se -f $trimmedreads -t sanger -o $qualreads
mkdir contigs-$sample_no_file_ending
>&2 echo '##########################'
>&2 echo '### Assembling '$reads' ###'
>&2 echo '##########################'
${software}/idba_ud -r $qualreads -o contigs-$sample_no_file_ending --mink 29 --maxk 49 --step 2
| true
|
56c0a16e2885715ac26d61ca471e74e7934bf891
|
Shell
|
bmwiedemann/openSUSE
|
/packages/s/systemd-generator-cron2timer/cron2timers
|
UTF-8
| 742
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
# SPDX-License-Identifier: MIT
# SPDX-FileCopyrightText: Copyright 2022-2023 SUSE LLC
set -e
out="${1:?}"
shopt -s nullglob
mkdir -p "$out/timers.target.wants/"
for interval in hourly daily weekly monthly yearly; do
for script in /etc/cron."$interval"/*; do
f="${script##*/}"
cat > "$out/$interval-$f.timer" <<-EOF
[Unit]
Description=Timer created from $script
[Timer]
OnCalendar=$interval
AccuracySec=1h
Persistent=true
[Install]
WantedBy=timers.target
EOF
cat > "$out/$interval-$f.service" <<-EOF
[Unit]
Description=Service created from $script
ConditionACPower=true
[Service]
ExecStart=$script
EOF
ln -s "../$interval-$f.timer" "$out/timers.target.wants/$interval-$f.timer"
done
done
| true
|
a7c8ac04659e1efc249605fe24e331547323736f
|
Shell
|
akora/drupal-install-configure-uninstall
|
/drupal-configure.sh
|
UTF-8
| 4,373
| 4.125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
#===========================================================================
#
# FILE: drupal-configure.sh
#
# USAGE: 1. chmod ugo+x drupal-configure.sh
# 2. sudo ./drupal-configure.sh [sitename]
#
# DESCRIPTION: This script takes a fresh and clean Drupal install, removes
# all unnecessary text files and some default themes, checks
# and installs Drush if not present and makes some initial
# configuration changes.
#
# OPTIONS: You have to provide the site name
# REQUIREMENTS: Debian linux, Apache, MySQL, PHP
# BUGS: no major known bugs so far
#
# NOTES: Tested on Debian GNU/Linux 6.0.2 (squeeze)
# apache2 2.2.16-6+squeeze1
# mysql-server 5.1.49-3
# php5 5.3.3-7+squeeze3
# bash 4.1-3
# using drush 7.x-4.5
#
# VERSION: v0.1
# CREATED: 2011-10-14
# REVISION: initial version
#
# AUTHOR: Andras Kora
# EMAIL: ak@akora.info
# WEBSITE: http://akora.info
#
#===========================================================================
# default configuration settings
sitename="$1"
webroot="/var/www"
drush_version="7.x-4.5"
drush_download_filename="drush-$drush_version.tar.gz"
drush_download_url="http://ftp.drupal.org/files/projects/$drush_download_filename"
text_files=( CHANGELOG.txt COPYRIGHT.txt INSTALL.mysql.txt INSTALL.pgsql.txt
INSTALL.txt LICENSE.txt MAINTAINERS.txt UPGRADE.txt )
themes=( bluemarine chameleon pushbutton )
directories=( modules themes )
# managing messages
function message_same_line () {
msg=$1
param=$2
printf "%-50s %s" "=== $msg" $param
}
function message_new_line () {
msg=$1
param=$2
printf "%-50s %s\n" "=== $msg" $param
}
function message_response () {
msg=$1
# printf "%-10s %s\n" "[ $msg ]"
printf "%-10s %s\n" " $msg"
}
# main functions
function get_site_name () {
if [ $# -ne 1 ]; then
message_new_line "Usage: $0 [sitename] (e.g. d6)"
exit 0
else
message_new_line "Configuring the website" $1
fi
}
function clean_up_files () {
message_new_line "Removing unnecessary files..."
for file_to_remove in ${text_files[@]}; do
if [ -f $webroot/$sitename/$file_to_remove ]; then
rm $webroot/$sitename/$file_to_remove
else
message_response "$webroot/$sitename/$file_to_remove removed..."
fi
done
}
function clean_up_themes () {
message_new_line "Removing unnecessary themes..."
for theme_to_remove in ${themes[@]}; do
if [ -d $webroot/$sitename/themes/$theme_to_remove ]; then
rm -rf $webroot/$sitename/themes/$theme_to_remove
else
message_response "$webroot/$sitename/themes/$theme_to_remove removed..."
fi
done
}
function create_directories () {
message_new_line "Creating site specific /modules and /themes directories..."
for dir_to_create in ${directories[@]}; do
if [ -d $webroot/$sitename/sites/all/$dir_to_create ]; then
message_response "$webroot/$sitename/sites/all/$dir_to_create already present..."
else
mkdir $webroot/$sitename/sites/all/$dir_to_create
fi
done
}
function install_drush () {
message_same_line "Checking if Drush is installed..."
if [ -d /home/$SUDO_USER/drush ]; then
message_response "Drush present"
else
message_response "installing drush..."
cd /home/$SUDO_USER
wget $drush_download_url
tar -xzf $drush_download_filename
chown -R $SUDO_USER:$SUDO_USER /home/$SUDO_USER/drush
rm $drush_download_filename
ln -s /home/$SUDO_USER/drush/drush /usr/local/bin/drush
fi
}
function test_drush () {
message_new_line "Testing Drush..."
cd $webroot/$sitename
drush status
}
function configure_website () {
message_new_line "Configuring the website..."
# add more Drush magic here...
drush -y en path
drush -y en php
drush -y en statistics
drush -y en syslog
drush -y en upload
# setting the default theme Garland
drush vset --always-set theme_default garland 1
drush vset --always-set admin_theme garland 1
# run cron
drush cron
}
# main script control flow
get_site_name $1
clean_up_files
clean_up_themes
create_directories
install_drush
test_drush
configure_website
# All done.
exit 0
| true
|
b8211ec71bcc1edb385e569f5f766d976ee4f089
|
Shell
|
vcamaral/dotfiles
|
/install
|
UTF-8
| 1,992
| 4.34375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
function exit_with_message() {
echo $1
exit 1
}
# First, retrieve the zip file from Github.
# It will use wget or curl (whichever is defined).
curl_available=$(command -v curl)
wget_available=$(command -v wget)
unzip_available=$(command -v unzip)
dotfiles_dir="$(dirname $0)"
dotfiles_zip=/tmp/dotfiles-master.zip
dotfiles_url=https://github.com/vcamaral/dotfiles/archive/master.zip
if [[ "$unzip_available" = "" ]]; then
exit_with_message "#=> Make sure you have the unzip command available."
fi
# Check if installation must be from local files.
# If not, download zip file and extract it to tmp directory.
if [[ ! -d "${dotfiles_dir}/files" ]]; then
dotfiles_dir=/tmp/dotfiles-master
echo "#=> Downloading $dotfiles_url to $dotfiles_zip"
if [[ "$curl_available" != "" ]]; then
curl -Ls -o $dotfiles_zip $dotfiles_url
elif [[ "$wget_available" != "" ]]; then
wget -q -O $dotfiles_zip $dotfiles_url
else
exit_with_message "#=> Please make sure curl or wget is installed."
fi
[ -f "$dotfiles_zip" ] || exit_with_message "#=> ERROR: Couldn't download ${dotfiles_url}."
# Now, unzip the directory.
rm -rf $dotfiles_dir
unzip -q $dotfiles_zip -d /tmp
fi
shell_name="zsh"
# Copy files.
cp -R ${dotfiles_dir}/files/.${shell_name} $HOME/
find ${dotfiles_dir}/files/common -maxdepth 1 -mindepth 1 -exec cp -R "{}" $HOME/.${shell_name} \;
# Source init file.
source $HOME/.zsh/exports.sh
# Set up local directory.
mkdir -p $HOME/local
[ -x "/usr/bin/chflags" ] && chflags hidden $HOME/local
# Copy binaries to ~/local/bin.
echo -n "#=> Copying binaries... "
find ${dotfiles_dir}/files/home -maxdepth 1 -mindepth 1 -exec cp -R "{}" $HOME/ \;
cp -R $dotfiles_dir/files/bin $HOME/local
find $HOME/local/bin -type f -exec chmod +x {} \;
echo "OK"
# macOS hacks.
if [[ "$(uname)" == "Darwin" ]]; then
echo -n "#=> macOS hacks... "
sh ${dotfiles_dir}/files/macOS.sh
echo "OK"
fi
echo "#=> Done! Restart your terminal!"
| true
|
7791c343a8688f9ef4e6d9d23f12dcf23e03495a
|
Shell
|
KhronosGroup/KTX-Software
|
/ci_scripts/install_linux.sh
|
UTF-8
| 3,657
| 3.625
| 4
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#! /usr/bin/env bash
# Copyright 2015-2020 The Khronos Group Inc.
# SPDX-License-Identifier: Apache-2.0
# Install software in CI environment necessary to build on Linux.
# Exit if any command fails.
set -e
# Set parameters from command-line arguments, if any.
for i in $@; do
eval $i
done
ARCH=${ARCH:-$(uname -m)} # Architecture to install tools for.
FEATURE_GL_UPLOAD=${FEATURE_GL_UPLOAD:-ON}
FEATURE_VK_UPLOAD=${FEATURE_VK_UPLOAD:-ON}
if [ "$ARCH" = "x86_64" ]; then
FEATURE_LOADTESTS=${FEATURE_LOADTESTS:-OpenGL+Vulkan}
else
# No Vulkan SDK yet for Linux/arm64.
FEATURE_LOADTESTS=${FEATURE_LOADTESTS:-OpenGL}
fi
VULKAN_SDK_VER=${VULKAN_SDK_VER:-1.3.243}
sudo apt-get -qq update
# Packages can be specified as 'package:architecture' pretty-much
# anywhere. Use :native to request a package for the build machine.
# See https://wiki.debian.org/Multiarch/HOWTO for information on
# multi-architecture package installs.
# Tools to run on the build host.
# LFS is not preinstalled in the arm64 image.
sudo apt-get -qq install git-lfs:native
sudo apt-get -qq install ninja-build:native
sudo apt-get -qq install doxygen:native
sudo apt-get -qq install rpm:native
if [ "$ARCH" = "$(uname -m)" ]; then
dpkg_arch=native
# gcc, g++ and binutils for native builds should already be installed
# on CI platforms together with cmake.
# sudo apt-get -qq install gcc g++ binutils make
else
# Adjust for dpkg/apt architecture naming. How irritating that
# it differs from what uname -m reports.
if [ "$ARCH" = "x86_64" ]; then
dpkg_arch=amd64
gcc_pkg_arch=x86-64
elif [ "$ARCH" = "aarch64" ]; then
dpkg_arch=arm64
gcc_pkg_arch=$ARCH
fi
sudo dpkg --add-architecture $dpkg_arch
sudo apt-get update
# Don't think this is right to install cross-compiler. apt reports
# package not available.
#sudo apt-get -qq install gcc:$dpkg_arch g++:$dpkg_arch binutils:$dpkg_arch
# Try this where `arch` is x86-64 or arm64.
sudo apt-get -qq install gcc-$gcc_pkg_arch-linux-gnu:native g++-$gcc_pkg_arch-linux-gnu:native binutils-$gcc_pkg_arch-linux-gnu:native
fi
sudo apt-get -qq install opencl-c-headers:$dpkg_arch
sudo apt-get -qq install mesa-opencl-icd:$dpkg_arch
if [[ "$FEATURE_GL_UPLOAD" = "ON" || "$FEATURE_LOADTESTS" =~ "OpenGL" ]]; then
sudo apt-get -qq install libgl1-mesa-glx:$dpkg_arch libgl1-mesa-dev:$dpkg_arch
fi
if [[ "$FEATURE_VK_UPLOAD" = "ON" || "$FEATURE_LOADTESTS" =~ "Vulkan" ]]; then
sudo apt-get -qq install libvulkan1 libvulkan-dev:$dpkg_arch
fi
if [[ -n "$FEATURE_LOADTESTS" && "$FEATURE_LOADTESTS" != "OFF" ]]; then
sudo apt-get -qq install libsdl2-dev:$dpkg_arch
sudo apt-get -qq install libassimp5 libassimp-dev:$dpkg_arch
fi
if [[ "$FEATURE_LOADTESTS" =~ "Vulkan" ]]; then
# No Vulkan SDK for Linux/arm64 yet.
if [[ "$dpkg_arch" = "arm64" ]]; then
echo "No Vulkan SDK for Linux/arm64 yet. Please set FEATURE_LOADTESTS to OpenGL or OFF."
else
os_codename=$(grep -E 'VERSION_CODENAME=[a-zA-Z]+$' /etc/os-release)
os_codename=${os_codename#VERSION_CODENAME=}
echo "Download Vulkan SDK"
# tee is used (and elevated with sudo) so we can write to the destination.
wget -qO- https://packages.lunarg.com/lunarg-signing-key-pub.asc | sudo tee /etc/apt/trusted.gpg.d/lunarg.asc > /dev/null
sudo wget -qO /etc/apt/sources.list.d/lunarg-vulkan-$VULKAN_SDK_VER-$os_codename.list https://packages.lunarg.com/vulkan/$VULKAN_SDK_VER/lunarg-vulkan-$VULKAN_SDK_VER-$os_codename.list
echo "Install Vulkan SDK"
sudo apt update
sudo apt install vulkan-sdk
fi
fi
git lfs pull --include=tests/srcimages,tests/testimages
# vim:ai:ts=4:sts=2:sw=2:expandtab
| true
|
32fc0e35658448713557aff82bf4b588ac419d5b
|
Shell
|
pedz/aix-build-scripts
|
/build-scripts/build-httpd
|
UTF-8
| 1,069
| 3.703125
| 4
|
[] |
no_license
|
#!/usr/bin/ksh
# Note. I'm trying to build expact, apr, apr-util, and pcre before
# building this. If you build this first, it dies when it tries to
# package it because the needed libraries are not installed.
full_path=http://mirrors.ibiblio.org/apache/httpd/httpd-2.2.21.tar.bz2
suffix=.tar.bz2
package_name=${full_path##*/}
export basedir_bit=${package_name%${suffix}}
# The if gives us a way to terminate the script if we are debugging
# Setup by having Setup exit with a non-zero status.
if ! . "${0%/*}/Setup" ; then
exit 1
fi
mk_dir_structure
cd ${image_dir}
if [[ ! -r "${package_name}" ]] ; then
wget ${full_path}
fi
# NOTE -- next type use DEFAULT_PREFIX instead of this tap dance
# apr, apr-util, and httpd have a unique idea of prefix
ORIG_PUBLIC_BASE=${PUBLIC_BASE}
export PUBLIC_BASE="${PUBLIC_BASE}/apache2"
Unpack &&
mkdir -p "${build_dir}" &&
cd "${build_dir}" &&
Patch &&
Configure \
--with-apr=${PUBLIC_BASE} \
--with-apr-util=${PUBLIC_BASE} \
--with-pcre=${ORIG_PUBLIC_BASE} &&
Make &&
Test &&
Package &&
Install
| true
|
93c9fc1e396c094ede55f0a6ec74755e688a3b6a
|
Shell
|
danikaze/ascii-ui2
|
/scripts/build.sh
|
UTF-8
| 2,557
| 4.28125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
##
# Retrieve a field value from a json file:
# getJsonField "filename.json" "fieldName"
##
getJsonField() {
local filename=$1
local field=$2
local value=$(cat "$filename" \
| grep "\"$field\"" \
| head -1 \
| awk -F: '{ print $2 }' \
| sed 's/[",]//g' \
| tr -d '[[:space:]]')
echo "$value"
}
##
# Stops the execution of the build with an error code and error message
##
error() {
local errorCode=$1
local errorMsg=$2
echo -e " ${C_RED}(!)${C_DEFAULT} Build stopped because of an error while ${C_YELLOW}${errorMsg}${C_DEFAULT}"
exit $errorCode
}
##
# Define constants
##
C_YELLOW='\033[1;37m'
C_RED='\033[1;31m'
C_DEFAULT='\033[0m'
PWD=`pwd`
PROJECT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/..
TARGET_DIR="${PROJECT_ROOT}/dist"
TSC="${PROJECT_ROOT}/node_modules/.bin/tsc --project tsconfig.build.json"
TSPM="${PROJECT_ROOT}/node_modules/.bin/ef-tspm -s -c tsconfig.build.json"
PACKAGE_JSON="${PROJECT_ROOT}/package.json"
MANIFEST_JSON="${PROJECT_ROOT}/manifest.json"
PACKAGE_NAME=$(getJsonField "${PACKAGE_JSON}" name)
PACKAGE_VERSION=$(getJsonField "${PACKAGE_JSON}" version)
BUILD_ONLY=0
##
# Read arguments
##
while test $# -gt 0
do
case "$1" in
--only) BUILD_ONLY=1
;;
*) error 1 "unknown argument $1"
;;
esac
shift
done
echo BUILD_ONLY $BUILD_ONLY
# Execute the tests
if [ $BUILD_ONLY -ne 1 ]; then
echo -e "* Running the tests..."
npm run test || error 2 "running the tests"
fi
# Generate built files in the `app` folder
echo -e "* Building ${C_YELLOW}${PACKAGE_NAME}-${PACKAGE_VERSION}${C_DEFAULT}..."
$TSC || error 3 "executing tsc"
# Copy package.json without the "security" private field
echo -e "* Copying ${C_YELLOW}package.json${C_DEFAULT} for publishing npm..."
cat "${PACKAGE_JSON}" | grep -v "private" > "${TARGET_DIR}/package.json" || error 3 "copying package.json"
# Copy other files to include in the npm
echo -e "* Copying ${C_YELLOW}README.md${C_DEFAULT} to be included within the npm..."
cp "${PROJECT_ROOT}/README.md" "${TARGET_DIR}/README.md" || error 4 "copying README.md"
# Revert the typescript aliases in the generated code
echo -e "* De-mapping typescript aliases..."
$TSPM || error 4 "executing tspm"
# Ask to publish the npm
if [ $BUILD_ONLY -ne 1 ]; then
echo
read -p "Publish npm? [y/N] " -n1 ans
echo
if [[ $ans =~ [yY] ]]; then
echo -e "* Publishing ${C_YELLOW}${PACKAGE_NAME}-${PACKAGE_VERSION}${C_DEFAULT}..."
cd "${TARGET_DIR}"
npm publish
fi
fi
echo
cd "${PWD}"
| true
|
929549b93e3d28493ae777bdd38bf354c6d4e31f
|
Shell
|
coord-e/urdfdev
|
/scripts/run.sh
|
UTF-8
| 1,334
| 3.390625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
source "/opt/ros/$ROS_DISTRO/setup.bash"
set -euo pipefail
source "/opt/urdfdev/lib/log.sh"
status "Starting..."
model_path=$1
shift
sources=${@:-$(pwd)}
urdf_path=$(mktemp)
run_status_path=$(mktemp)
build_cmd="/opt/urdfdev/build.sh $model_path $urdf_path $run_status_path"
display_size=${URDFDEV_DISPLAY_SIZE:-1024x768}
novnc_port=${URDFDEV_NOVNC_PORT:-6080}
export DISPLAY=:0
info "Starting X virtual framebuffer..."
exec_log Xvfb $DISPLAY -screen 0 ${display_size}x24 +extension GLX +render -noreset &
info "Starting fluxbox..."
exec_log_ fluxbox -log "$URDFDEV_LOG" &> /dev/null &
info "Starting x11vnc..."
exec_log_ x11vnc -display $DISPLAY -rfbport 5900 -noxrecord -xkb -forever -bg -o "$URDFDEV_LOG" &> /dev/null
info "Starting noVNC..."
exec_log /opt/urdfdev/noVNC/utils/launch.sh --vnc localhost:5900 --listen ${novnc_port} &
info "Starting roscore..."
exec_log roscore &
exec_log wait-for-it ${ROS_MASTER_URI#*//}
info "rosmaster is launched."
exec_log rosparam set use_gui true
eval $build_cmd
info "Waiting noVNC to be launched..."
wait-for-it -q localhost:$novnc_port -t 0 && status "Ready. You can now view RViz at http://localhost:6080/"
fswatch --event Created --event Updated --event Removed --event Renamed --recursive ${URDFDEV_FSWATCH_ADDITIONAL_OPTIONS:-} $sources | xargs -n1 $build_cmd
| true
|
7373d68b5d8235b573bb071f5484b9a009302ade
|
Shell
|
itomoyasu/dotfiles
|
/.bashrc
|
UTF-8
| 738
| 2.625
| 3
|
[] |
no_license
|
# PATH for Homebrew
export PATH=/usr/local/bin:$PATH
# PATH for python2.7
export PATH=/usr/local/share/python:$PATH
# PATH for virtualenvwrapper
export WORKON_HOME=$HOME/.virtualenvs
source `which virtualenvwrapper.sh`
# PATH for Cordova(Phone Gap)
export PATH=$PATH:/usr/local/Cordova/bin
# PATH for jsx
export PATH=$PATH:/$HOME/work/DeNA/JSX/bin
# Display --------------
PS1='[\h]\w $ '
HISTSIZE=5000
IGNOREEOF=10
HISTCONTROL=ignoreboth
HISTFILE=$HOME/.bash_history
HISTFILESIZE=50000
# Aliases --------------
alias cp='cp -iv'
alias rm='rm -iv'
alias mv='mv -iv'
alias make='make -j2'
alias ls='gls -F --color'
alias la='ls -a'
alias ll='ls -lag'
alias firefox='open -a Firefox'
alias chrome='open -a Google\ Chrome'
| true
|
95ae9bd73a209b30ee5a5385f496eac868cd3f80
|
Shell
|
sgrebnov/aws
|
/cli/entrypoint.sh
|
UTF-8
| 787
| 3.265625
| 3
|
[
"LGPL-2.1-or-later",
"Apache-2.0",
"MIT"
] |
permissive
|
#!/bin/sh
set -e
# Respect AWS_DEFAULT_REGION if specified
[ -n "$AWS_DEFAULT_REGION" ] || export AWS_DEFAULT_REGION=us-east-2
# Respect AWS_DEFAULT_OUTPUT if specified
[ -n "$AWS_DEFAULT_OUTPUT" ] || export AWS_DEFAULT_OUTPUT=json
# Capture output
#output=$( sh -c "aws $*" )
# Preserve output for consumption by downstream actions
#echo "$output" > "${HOME}/${GITHUB_ACTION}.${AWS_DEFAULT_OUTPUT}"
# Write output to STDOUT
#echo "$output"
#echo "$KUBE_CONFIG_DATA" | base64 --decode > /tmp/config
#export KUBECONFIG=/tmp/config
k8sversion=v1.16.0
curl -LO https://storage.googleapis.com/kubernetes-release/release/$k8sversion/bin/linux/amd64/kubectl
chmod +x ./kubectl
aws eks --region "$AWS_DEFAULT_REGION" update-kubeconfig --name projects
sh -c "$*"
| true
|
532cb5b4ff2b55d7658781a111653e10f3e9a1b6
|
Shell
|
1024sparrow/traliva
|
/src/build_scripts/traliva_final.sh
|
UTF-8
| 1,596
| 3.484375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Финальная сборка всего проекта. Сборка уже кода под каждую целевую платформу.
#echo $1 -- /home/boris/da/pro/traliva
#echo $2 -- compiled
. $1/src/config
compiled_dir="$1/$2/targets"
targets_dir="$1/targets"
mkdir "$compiled_dir"
mkdir "$targets_dir"
for i in $(ls -1 $1/src/build_scripts/targets)
do
if [ ! -d "$1/src/build_scripts/targets/$i" ]
then
continue
fi
if [[ "$i" == _* ]]
then
continue
fi
echo "Запускается скрипт генерации исходного кода под платформу \"$i\""
mkdir "$targets_dir"/"$i"
pushd "$targets_dir"/"$i"
if [ ! -d .git ]
then
git init
fi
popd
pushd "$compiled_dir"
git clone "$1"/targets/"$i"/.git
pushd "$compiled_dir"/"$i"
git checkout -b skeleton
git pull # если `git remote -v` не пустой
rm -r * # скрытые файлы и директории, в том чисел и .git, никуда не деваются
$1/src/build_scripts/targets/"$i"/init.sh "$1/$2/project" "$compiled_dir"/"$i"
if ! git diff-index --quiet HEAD --
then
git add .
git commit -m"traliva: skeleton changed for target \"$i\""
git push --set-upstream origin skeleton
fi
popd
popd
pushd "$targets_dir"/"$i"
git stash
git merge skeleton
git stash pop
popd
done
| true
|
13574c42fd26ab9674f442c6bdd145d14ebd97d3
|
Shell
|
mitmproxy/www
|
/build.sh
|
UTF-8
| 675
| 3.328125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -o errexit
set -o pipefail
set -o nounset
# set -o xtrace
cd src
# check for valid JSON
cat data/publications.json | jq empty
# check for existing publication types
allowed_types=$(cat <<-MOT
blog post
media
research
talk
MOT
)
types=$(cat data/publications.json | jq -r '.[].type' | sort | uniq)
if [[ ${types} != ${allowed_types} ]]; then
echo "Found invalid publication type!"
exit 1
fi
conference_missing=$(cat data/publications.json | jq '.[] | select(.type =="research") | select(.conference == null)')
if [ -n "$conference_missing" ]; then
echo "Research artifact without conference!"
echo "$conference_missing"
exit 1
fi
hugo
| true
|
0aaa09cfc08746f1a935e4de90912750e5636e2a
|
Shell
|
2ndcouteau/shell_script
|
/extraction_transilien.sh
|
UTF-8
| 4,646
| 3.578125
| 4
|
[] |
no_license
|
## Script Permettant de recuperer les noms et ID des lignes des du reseaux ferre
## d'ile de france disponible sur data.gouv;
## sous le nom de "gares-du-reseau-ferre-dile-de-france.csv" a l'adresse suivante :
## https://www.data.gouv.fr/fr/datasets/gares-du-reseau-ferre-d-ile-de-france-idf/
## Les donnees sont extraite et trie par lignes puis sont formate pour subir une
## transformation au format JSON;
## Note 1: une annexe au nom de _extraction_transilien.awk est necessaire au bon
## fonctionnement de ce script
## Il est a mettre dans le meme repertoire que ce script ainsi que
## du .csv traite par ce script;
## Il contient peut de ligne que je reporte ici au cas ou il ne serait
## plus disponible :
# _extraction_transilien.awk :
# $NF ~ ez {
# print $0
# }
## Note 2: Les Noms des Stations sont laisse avec la ponctuation donne dans le
## fichier initiale;
############################################################
###### INITIALISATION ########
############################################################
if [ -d "./Lignes_transilien" ];then
rm -rf Lignes_transilien;
fi
if [ -d "./TMP" ];then
rm -rf Lignes_transilien;
fi
## Creation d'un dossier receptionnant les fichiers finaux
mkdir Lignes_transilien;
mkdir TMP;
## Extrait les colonnes utiles Noms-ID-Noms de lignes
cat gares-du-reseau-ferre-dile-de-france.csv | awk -F\; '{ print $1, ";" $3, ";" $9 }' > TMP/fichier_tmp.csv;
############################################################
###### EXTRACTION DES DONNEES ########
############################################################
## Variable des lignes RER et Transilien
for letter in A B C D E H J K L N P R U
do
## Catch les lignes entre C et U et les insere dans une fichier.csv pour chacune
## A NOTER le fichier _extraction_transilien.awk afin d'utiliser une regex dans l'usage de awk
## // // defini et "affiche les lignes contenant le contenu de la variable
awk -v ez=$letter -f _extraction_transilien.awk TMP/fichier_tmp.csv > TMP/tmp_line$letter.csv;
## Trie les stations dans l'ordre alphabetique et les redirige vers un nouveau fichier
sort -k 1 TMP/tmp_line$letter.csv > TMP/tmp_1_line$letter.csv;
## Supprime la derniere colone
awk '{$NF=""; print $0}' TMP/tmp_1_line$letter.csv > TMP/tmp_2_line$letter.csv;
## Extrait les ID des stations
awk -F\; '{print $NF}' TMP/tmp_2_line$letter.csv > TMP/tmp_ID_line$letter.csv;
## Supprime les espaces et tabulation en debut et fin de ligne
sed 's/^[ \t]*//;s/[ \t]*$//' TMP/tmp_ID_line$letter.csv > TMP/tmp_ID_1_line$letter.csv;
## Extrait les Noms des stations
awk -F\; '{$NF=""; print $0}' TMP/tmp_2_line$letter.csv > TMP/tmp_NAME_line$letter.csv;
## Supprime les espaces et tabulation en debut et fin de ligne
sed 's/^[ \t]*//;s/[ \t]*$//' TMP/tmp_NAME_line$letter.csv > TMP/tmp_NAME_1_line$letter.csv
## Join les colones des deux fichiers
paste --delimiter=";" TMP/tmp_ID_1_line$letter.csv TMP/tmp_NAME_1_line$letter.csv > TMP/tmp_4_line$letter.csv;
## Insere le nom des colonnes -> sed -i '1i_name' 1i permet de cibler la
## premiere ligne du fichier donne en parametre apres la commande SED
sed -i '1iid;name' TMP/tmp_4_line$letter.csv;
############################################################
###### CSV->JSON + Minification ########
############################################################
## Convertit fichier CSV en json
csvtojson --delimiter=";" TMP/tmp_4_line$letter.csv > TMP/tmp_json_line$letter.json;
## Supprime tous les retours a la ligne afin de minifier la liste.
cat TMP/tmp_json_line$letter.json | tr "\n" " " > TMP/tmp_json_1_line$letter.json;
## Supprime l'espace apres la virgule, entre deux cellules.
cat TMP/tmp_json_1_line$letter.json | awk '{ gsub(", ", ",", $0); print $0 }' > Lignes_transilien/TR_$letter.json;
done
############################################################
###### ORGANISATION ET RENOMAGE ########
############################################################
## Creation des dossiers des lignes
mkdir Lignes_transilien/TR;
mkdir Lignes_transilien/R;
## Deplace et renome les lignes de RER
mv Lignes_transilien/TR_A.json Lignes_transilien/R/R_A.json;
mv Lignes_transilien/TR_B.json Lignes_transilien/R/R_B.json;
mv Lignes_transilien/TR_C.json Lignes_transilien/R/R_C.json;
mv Lignes_transilien/TR_D.json Lignes_transilien/R/R_D.json;
mv Lignes_transilien/TR_E.json Lignes_transilien/R/R_E.json;
## Deplace les lignes de transilien
mv Lignes_transilien/*.json Lignes_transilien/TR;
## Suppression des Fichiers Temporaire
rm -rf TMP;
rm -rf *~;
| true
|
e9170a790719e5c3ed2e6231a81e3a586dd6ec07
|
Shell
|
atykhonov/mirantis
|
/ngpatch/ngpatch.sh
|
UTF-8
| 696
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
function ngpatch() {
CONTAINER_ID=$(ssh root@$2 docker inspect -f '{{.Id}}' $(ssh root@$2 docker ps | grep nailgun | awk '{print $1}'))
ROOTFS=$(ssh root@$2 locate $CONTAINER_ID | grep rootfs | head -n 1)
SITE_PACKAGES_DIR=$ROOTFS/usr/lib/python2.6/site-packages/
scp $1 root@$2:$SITE_PACKAGES_DIR/$1
ssh root@$2 patch -d $SITE_PACKAGES_DIR $3 -p0 < $1
}
PATCH="ngpatch.diff"
REVERSED_PATCH="ngreversedpatch.diff"
if [ -f $REVERSED_PATCH ];
then
ngpatch $REVERSED_PATCH $1 "-R"
fi
git diff -u --no-prefix --relative=nailgun > $PATCH
ngpatch $PATCH $1 ""
mv $PATCH $REVERSED_PATCH
ssh root@$1 dockerctl shell nailgun supervisorctl restart nailgun
| true
|
db7f793ca878198c517330fec6fac1ef0cb37086
|
Shell
|
devinsba/zsh-plugins
|
/rvm/init.plugin.zsh
|
UTF-8
| 233
| 2.5625
| 3
|
[
"Apache-2.0"
] |
permissive
|
# Adapted from: https://www.reddit.com/r/node/comments/4tg5jg/lazy_load_nvm_for_faster_shell_start/d5ib9fs
export RVM_DIR="${HOME}/.local/opt/rvm"
echo "Loading RVM..."
[[ -s "${RVM_DIR}/scripts/rvm" ]] && . "${RVM_DIR}/scripts/rvm"
| true
|
4136be458cf8123feb8d37163f30721c9953f937
|
Shell
|
samuelviveiros/fixdvi
|
/fixdvi
|
UTF-8
| 4,449
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Autor: Samuel Viveiros Gomes
# Descrição: Script para corrigir a resolução de monitores que utilizam adaptador DVI.
# Versão: 0.2.0
# Data: Quarta-feira, 18 de Abril de 2018 -- Palmas - TO
#
# História:
# 0.2.0 Foi acrescentada opção de linha de comando [-g | --gen-config]
# 0.1.0 Lançamento.
#
script_ver="0.2.0"
script_name=$0
monitor_conf="10-monitor.conf"
device_conf="20-device.conf"
xorg_dir="/usr/share/X11/xorg.conf.d"
basic_modeline='Modeline "1024x768_60.00" 63.50 1024 1072 1176 1328 768 771 775 798 -hsync +vsync'
new_modeline=""
sub_generate_config()
{
if [ -z "$1" ] || [ -z "$2" ]; then
return 1
fi
echo 'Section "Monitor"' > $1/$monitor_conf
echo ' Identifier "DVI"' >> $1/$monitor_conf
echo " $2" >> $1/$monitor_conf
echo ' Option "RightOf" "VGA"' >> $1/$monitor_conf
echo 'EndSection' >> $1/$monitor_conf
echo ' ' >> $1/$monitor_conf
echo 'Section "Monitor"' >> $1/$monitor_conf
echo ' Identifier "VGA"' >> $1/$monitor_conf
echo ' Option "LeftOf" "DVI"' >> $1/$monitor_conf
echo 'EndSection' >> $1/$monitor_conf
echo 'Section "Device"' > $1/$device_conf
echo ' Identifier "Monitor"' >> $1/$device_conf
echo ' Driver "nouveau"' >> $1/$device_conf
echo ' Option "Monitor-DVI-I-1" "DVI"' >> $1/$device_conf
echo ' Option "Monitor-VGA-1" "VGA"' >> $1/$device_conf
echo 'EndSection' >> $1/$device_conf
echo ' ' >> $1/$device_conf
return 0
}
sub_create_modeline()
{
if [ ! $# -eq 2 ]; then
return 1
fi
if [ ! -e "/usr/bin/cvt" ]; then
return 1
fi
#new_modeline=`cvt $1 $2 | grep Modeline`
new_modeline=`/usr/bin/cvt $1 $2 | grep Modeline`
return $?
}
sub_fix()
{
echo "[*] Criando a Modeline ..."
if [ ! $# -eq 2 ]; then
echo "[-] Não foi possível criar a Modeline, pois faltam parâmetros."
return 1
fi
if [ ! -e "$xorg_dir" ] || [ ! -w "$xorg_dir" ]; then
echo "[-] Não é possível gravar no diretório $xorg_dir. ( Executando como sudo? )."
return 1
fi
/bin/rm -f $xorg_dir/$monitor_conf >/dev/null 2>&1
/bin/rm -f $xorg_dir/$device_conf >/dev/null 2>&1
sub_create_modeline $1 $2
if [ ! $? -eq 0 ] || [ -z "$new_modeline" ]; then
echo "[-] Não foi possível criar a Modeline, pois o cvt falhou"
return 1
fi
sub_generate_config "$xorg_dir" "$new_modeline"
return 0
}
sub_undo()
{
echo "[+] Removendo arquivos ..."
/bin/rm -f $xorg_dir/$monitor_conf >/dev/null 2>&1
/bin/rm -f $xorg_dir/$device_conf >/dev/null 2>&1
return 0
}
sub_print_about()
{
echo
echo "Fixdvi $script_ver -- Copyright (c) 2018 MPETO"
echo
return 0
}
sub_print_usage()
{
echo
echo "Uso: $script_name [OPÇÃO]"
echo "Tenta corrigir a resolução de vídeo do monitor VGA que utiliza adaptador DVI."
echo " -f, --fix-dvi Tenta corrigir a resolução."
echo " Ex.: $script_name -f 1920 1080"
echo " -u, --undo Desfaz as alterações."
echo " -g, --gen-config Gerar os arquivos 10-monitor.conf e 20-device.conf"
echo " Ex.: $script_name -g /diretorio/de/destino"
echo " -v, --version Informações da versão e do autor."
echo " -h, --help Exibe esta ajuda."
echo
return 0
}
sub_parser_params()
{
case "$1" in
"-f"|"--fix-dvi")
if [ -z $2 ] || [ -z $3 ]; then
echo "$script_name: faltam parâmetros para a opção '$1'"
echo "Tente '$script_name --help' para mais informações."
return 1
fi
sub_fix $2 $3
if [ ! $? -eq 0 ]; then
return 1
else
echo "[+] TUDO OK!"
echo "[+] Reinicie o sistema operacional para aplicar as configurações."
fi
;;
"-u"|"--undo")
sub_undo
;;
"-g"|"--gen-config")
if [ -z "$2" ]; then
echo "$script_name: faltam parâmetros para a opção '$1'"
echo "Tente '$script_name --help' para mais informações."
return 1
fi
if [ ! -e "$2" ] || [ ! -w "$2" ] || [ ! -d "$2" ]; then
echo "[-] Não foi possível gerar os arquivos."
return 1
fi
sub_generate_config "$2" "$basic_modeline"
if [ ! $? -eq 0 ]; then
return 1
else
echo "[+] Feito!"
fi
;;
"-v"|"--version")
sub_print_about
;;
"-h"|"--help")
sub_print_usage
;;
*)
echo "$script_name: opção '$1' inválida"
echo "Tente '$script_name --help' para mais informações."
return 1
;;
esac
return 0
}
if [ $# -lt 1 ]; then
sub_print_usage
exit 1
else
sub_parser_params $1 $2 $3
if [ ! $? -eq 0 ]; then
exit 1
fi
exit 0
fi
| true
|
e59126e9a4c226a7f4b0a6db6eb0b8bfc8482f34
|
Shell
|
mig8447/bash_scripts
|
/.bashrc
|
UTF-8
| 1,851
| 3.6875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
_os_name="$( uname -s )"
export _os_name
function _is_interactive_shell(){
local exit_code=0
[[ "$-" =~ "i" && -t 0 && -t 1 && -t 2 ]]
exit_code="$?"
return "$exit_code"
}
_is_interactive_shell
export _is_interactive_shell="$?"
# Enable these setings only if the terminal is interactive
if [[ "$_is_interactive_shell" -eq 0 ]]; then
# Enable forward history search
# NOTE: This setting should be in the .bashrc.d/*history.sh file
# but it only works if called directly from here
stty -ixon
fi
# Import common Bash functions
# shellcheck disable=SC1090
source "$HOME"'/lib/mig8447_commons.sh'
# Configure the PATH
append_path_to_path '/sbin'
append_path_to_path "$HOME"'/bin'
if [[ "$_os_name" == 'Darwin' ]]; then
# sleepwatcher binary lives here
append_path_to_path '/usr/local/sbin'
fi
# Personal Directories
export TERMINAL_TYPESCRIPTS_DIR="$HOME"'/terminal_typescripts'
# Import all of the non-executable *.sh files in $HOME/.bashrc.d
while read -r file; do
# shellcheck disable=SC1090
source "$file"
done < <(
case "$_os_name" in
( Linux )
find -L "$HOME"'/.bashrc.d' -maxdepth 1 \
-name '*.sh' -type f \
-not \( -executable \) \
| sort -n
;;
( Darwin )
find -L "$HOME"'/.bashrc.d' -maxdepth 1 \
-name '*.sh' -type f \
| sort -n
;;
esac
)
# iTerm2 - Shell Integrations
# iTerm2 Client Check based on https://gist.github.com/joerohde/b7a07db9ff9d1641bd3c7c2abbe2828d
# shellcheck disable=SC1090
{
"$HOME"'/lib/isiterm2.sh' \
&& test -e "${HOME}/.iterm2_shell_integration.bash" \
&& source "${HOME}/.iterm2_shell_integration.bash";
} || ( exit 0 )
| true
|
8bf281a294c2b8ceb5474b39761d00e69c21e221
|
Shell
|
arouze/dotfiles
|
/.zshrc
|
UTF-8
| 1,115
| 2.71875
| 3
|
[] |
no_license
|
# Path to your oh-my-zsh installation.
export ZSH=$HOME/.oh-my-zsh
export NVM_DIR=~/.nvm
source ~/.nvm/nvm.sh
# Set name of the theme to load --- if set to "random", it will
# load a random theme each time oh-my-zsh is loaded, in which case,
# to know which specific one was loaded, run: echo $RANDOM_THEME
# See https://github.com/robbyrussell/oh-my-zsh/wiki/Themes
ZSH_THEME="schminitz"
# Custom plugin sources
source ~/.zsh/zsh-autosuggestions/zsh-autosuggestions.zsh
# https://github.com/ohmyzsh/ohmyzsh/tree/master/plugins/git/
# https://github.com/zsh-users/zsh-autosuggestions
# https://github.com/ohmyzsh/ohmyzsh/tree/master/plugins/symfony2
# https://github.com/ohmyzsh/ohmyzsh/tree/master/plugins/npm
# https://github.com/ohmyzsh/ohmyzsh/tree/master/plugins/aws
# https://github.com/ohmyzsh/ohmyzsh/tree/master/plugins/composer
# https://github.com/ohmyzsh/ohmyzsh/tree/master/plugins/common-aliases
# https://github.com/ohmyzsh/ohmyzsh/tree/master/plugins/wd
#
plugins=(git zsh-autosuggestions symfony2 npm aws composer common-aliases wd)
source $ZSH/oh-my-zsh.sh
alias ohmyzsh="nano ~/.oh-my-zsh"
| true
|
bffc4c744a7eead361ab9bb02091afaee96fa073
|
Shell
|
wangscript007/CatraMMS
|
/scripts/tomcat.sh
|
UTF-8
| 531
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
if [ $# -ne 1 ]
then
echo "Usage $0 start | stop | status"
exit
fi
command=$1
if [ "$command" != "start" -a "$command" != "stop" -a "$command" != "status" ]
then
echo "Usage $0 start | stop | status[nodaemon]"
exit
fi
if [ "$command" == "start" ]
then
sudo systemctl start tomcat
elif [ "$command" == "status" ]
then
sudo systemctl status tomcat
elif [ "$command" == "stop" ]
then
sudo systemctl stop tomcat
echo "rm -rf /opt/catramms/tomcat/work/Catalina"
rm -rf /opt/catramms/tomcat/work/Catalina
fi
| true
|
6b47a6a8bcdf0e616586404f383d8b5614d1011e
|
Shell
|
itohanosa/deeplab-lakeice-webcams
|
/datasets/download_and_convert_lakeice.sh
|
UTF-8
| 2,310
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Script to download and preprocess the PASCAL VOC 2012 dataset.
#
# Usage:
# bash ./download_and_convert_voc2012.sh
#
# The folder structure is assumed to be:
# + datasets
# - build_data.py
# - build_lakeice.py
# - download_and_convert_voc2012.sh
# - remove_gt_colormap.py
# + pascal_voc_seg
# + VOCdevkit
# + VOC2012
# + JPEGImages
# + SegmentationClass
#
# Exit immediately if a command exits with a non-zero status.
CURRENT_DIR=$(pwd)
WORK_DIR="/home/pf/pfshare/data/MA_Rajanie/models/research/deeplab/datasets/lake"
PQR_ROOT="${WORK_DIR}"
SEG_FOLDER="${PQR_ROOT}/SegmentationClassRaw"
#SEG_FOLDER="/home/pf/pfshare/data/MA_Rajanie/Convert_json_to_PNG_masks/labelme/examples/semantic_segmentation/PTZ_Cam1_voc/labels_cropped_raw"
#SEMANTIC_SEG_FOLDER="${PQR_ROOT}/SegmentationClass"
# Build TFRecords of the dataset.
OUTPUT_DIR="${WORK_DIR}/nonptz_all_except_sihl_1617_tfrecord"
mkdir -p "${OUTPUT_DIR}"
#IMAGE_FOLDER="/home/pf/pfshare/data/MA_Rajanie/Convert_json_to_PNG_masks/labelme/examples/semantic_segmentation/PTZ_Cam1_voc/Images_cropped"
IMAGE_FOLDER="${PQR_ROOT}/JPEGImages"
LIST_FOLDER="${PQR_ROOT}/nonptz_all_except_sihl_1617"
#LIST_FOLDER="/home/pf/pfshare/data/MA_Rajanie/Convert_json_to_PNG_masks/labelme/examples/semantic_segmentation/PTZ_Cam1_voc/ptz_cam1_cropped_topbottom_325x1209"
echo "Converting lakeice dataset..."
python3 ./build_lakeice.py \
--image_folder="${IMAGE_FOLDER}" \
--semantic_segmentation_folder="${SEG_FOLDER}" \
--list_folder="${LIST_FOLDER}" \
--image_format="png" \
--output_dir="${OUTPUT_DIR}"
| true
|
e9c9eece52debba036e16913828b847d211854d0
|
Shell
|
pfandzelter/tinyFaaS
|
/scripts/get_logs.sh
|
UTF-8
| 196
| 2.65625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
for line in $(docker network ls --filter name=handler-net -q) ; do
for cont in $(docker ps -a -q --filter network="$line") ; do
docker logs "$cont"
done
done
| true
|
ea7bd4a289cc6262ee4dd73f6af9750a08e80157
|
Shell
|
ShreckYe/Huawei-Code-Craft-2019
|
/Preliminary/bin/startup.sh
|
UTF-8
| 682
| 2.640625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
basepath=$(cd `dirname $0`; pwd)
APP_HOME=$basepath/..
JAVA=$JAVA_HOME/bin/java
commons_logging_lib=$APP_HOME/code/CodeCraft-2019/lib/commons-logging-1.2.jar
log4j_lib=$APP_HOME/code/CodeCraft-2019/lib/log4j-1.2.17.jar
JVM_OPT="-Xms64M -Xmx64M"
JVM_OPT="$JVM_OPT -Djava.ext.dirs=./lib"
JVM_OPT="$JVM_OPT -Djava.library.path=$APP_HOME/bin"
JVM_OPT="$JVM_OPT -classpath"
JVM_OPT="$JVM_OPT $APP_HOME/bin/CodeCraft-2019-1.0.jar:$APP_HOME/bin/resources/"
carPath=$1
roadPath=$2
crossPath=$3
answerPath=$4
echo "$JAVA $JVM_OPT com.huawei.Main $carPath $roadPath $crossPath $answerPath 2>&1"
$JAVA $JVM_OPT com.huawei.Main $carPath $roadPath $crossPath $answerPath 2>&1
exit
| true
|
d25141ad79c9396dfdc60de4d51343cb5c067c5f
|
Shell
|
rs-services/Training-Support
|
/CAT_HelloWorldWebServer/ServerTemplates/training_helloworld_update_rightscript.sh
|
UTF-8
| 458
| 2.859375
| 3
|
[] |
no_license
|
#! /usr/bin/sudo /bin/bash
# ---
# RightScript Name: training_helloworld_update_rightscript
# Inputs:
# WEBTEXT:
# Category: Application
# Description: Text to display on web page.
# Input Type: single
# Required: true
# Advanced: false
# Default: text:Hello World!
# Attachments: []
# ...
echo $WEBTEXT > /var/www/index.html
echo ">>>> Placed WEBTEXT, $WEBTEXT into index.html"
service apache2 restart
echo ">>>> Started apache2"
| true
|
8b2128f2af5d361808ff297b35fba69ff0cdcd32
|
Shell
|
lucasfrossard/dotfiles
|
/bin/initialize-git-svn-url.sh
|
UTF-8
| 1,544
| 4.15625
| 4
|
[] |
no_license
|
#!/bin/sh
E_OPTERROR=85
set -u
err() {
echo "$@" >&2
}
log() {
echo "$@"
}
usage() {
read -d '' help <<- EOF
USAGE:
`basename $0` OPTIONS
OPTIONS:
-r svn revision (optional) default last copied revision
-f full history flag - default false (optional)
-d dir_path (optional)
-l svn_url
EOF
err "$help"
exit $E_OPTERROR # Exit and explain usage.
}
main() {
local parameters="fr:d:u:p:l:"
if ( ! getopts "${parameters}" opt); then
usage
fi
local revision=""
local full=false
local svn_url=""
local dir_path=""
while getopts "${parameters}" opt; do
case $opt in
r)
revision=$OPTARG
;;
f)
full=true
;;
l)
svn_url=$OPTARG
;;
d)
dir_path=$OPTARG
;;
\?)
err "Invalid option: -$OPTARG"
usage
;;
:)
err "Option -$OPTARG requires an argument."
usage
;;
esac
done
shift $((OPTIND-1))
local svn_command="svn log -r 1:HEAD -l 1 -q "
if [ -z $svn_url ] ; then
usage
fi
if [ -z $dir_path ] ; then
dir_path=`echo $svn_url|sed -e 's/.*\/\(.*\)/\1/g'`
fi
if [ "$full" = false ]; then
svn_command="${svn_command} --stop-on-copy"
fi
if [ -z $revision ] ; then
revision=`$svn_command $svn_url | tail -n 2 | head -n 1 | awk '{print $1}'|sed -e 's/r\(.*\)/\1/g'`
fi
git svn clone -r $revision $svn_url $dir_path
(
cd $dir_path;
git svn rebase
)
(
cd $dir_path;
git config --add branch.master.remote . ;
git config --add branch.master.merge refs/remotes/git-svn;
)
}
main $@
| true
|
0106bd334f47992c4225a4c904c909045b199772
|
Shell
|
petronny/aur3-mirror
|
/ut2004-mod-gunreal/PKGBUILD
|
UTF-8
| 930
| 2.65625
| 3
|
[] |
no_license
|
# Maintainer: Philipp 'TamCore' B. <philipp {at} tamcore {dot} eu>
pkgname=ut2004-mod-gunreal
pkgver=beta5
pkgrel=1
pkgdesc=""
arch=(i686 x86_64)
url="http://gunreal.com"
license=('unknown')
depends=(ut2004)
makedepends=(unzip)
source=(http://www.gunreal.com/updater/Gunreal%20Beta.zip ut2004-mod-gunreal.desktop gunreal.sh)
md5sums=('fde9d00220e01f7620a3886cbbbfc416'
'c7a6aad1e9480aca28d112f1417c8608'
'a28202b8f4209d067dcff1dd2fd5d9b9')
package() {
install -d $pkgdir/usr/share/applications
install -d $pkgdir/opt/ut2004
rm Gunreal/*.bat Gunreal/Updater*
mv Gunreal $pkgdir/opt/ut2004/
install -D -m 644 $srcdir/ut2004-mod-gunreal.desktop $pkgdir/usr/share/applications/ut2004-mod-gunreal.desktop
install -D -m 555 $srcdir/gunreal.sh $pkgdir/opt/ut2004/Gunreal/gunreal.sh
if [ "$CARCH" == "x86_64" ]; then
sed -i 's/ut2004-bin/&-linux-amd64/g' $pkgdir/opt/ut2004/Gunreal/gunreal.sh
fi
}
| true
|
8b916dc66bfe7bd63393be03ea0f1d12f67fb6eb
|
Shell
|
devkitPro/mesa
|
/.gitlab-ci/tracie-runner-gl.sh
|
UTF-8
| 1,490
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/sh
set -ex
INSTALL="$(pwd)/install"
# Set up the driver environment.
export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$(pwd)/install/lib/"
# Set environment for renderdoc libraries.
export PYTHONPATH="$PYTHONPATH:/renderdoc/build/lib"
export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/renderdoc/build/lib"
# Set environment for the waffle library.
export LD_LIBRARY_PATH="/waffle/build/lib:$LD_LIBRARY_PATH"
# Set environment for apitrace executable.
export PATH="/apitrace/build:$PATH"
# Use the surfaceless EGL platform.
export EGL_PLATFORM="surfaceless"
export DISPLAY=
export WAFFLE_PLATFORM="surfaceless_egl"
RESULTS=`pwd`/results
mkdir -p $RESULTS
# Perform a self-test to ensure tracie is working properly.
"$INSTALL/tracie/tests/test.sh"
if [ "$GALLIUM_DRIVER" = "virpipe" ]; then
# tracie is to use virpipe, and virgl_test_server llvmpipe
export GALLIUM_DRIVER="$GALLIUM_DRIVER"
GALLIUM_DRIVER=llvmpipe \
GALLIVM_PERF="nopt,no_filter_hacks" \
VTEST_USE_EGL_SURFACELESS=1 \
VTEST_USE_GLES=1 \
virgl_test_server >$RESULTS/vtest-log.txt 2>&1 &
sleep 1
fi
# Sanity check to ensure that our environment is sufficient to make our tests
# run against the Mesa built by CI, rather than any installed distro version.
MESA_VERSION=$(cat "$INSTALL/VERSION" | sed 's/\./\\./g')
wflinfo --platform surfaceless_egl --api gles2 | grep "Mesa $MESA_VERSION\(\s\|$\)"
python3 "$INSTALL/tracie/tracie.py" --file "$INSTALL/traces.yml" --device-name "$DEVICE_NAME"
| true
|
b907869c0cf844ca14cd9100b208ea2eaa85f26f
|
Shell
|
gliubaowen/automation
|
/geode/import-gemfire-data.sh
|
UTF-8
| 1,830
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
###############################################
# Filename: import-gemfire-data.sh
# Version: 2.0
# Date: 2019-10-23
# Author: LiuBaoWen
# Email: bwliush@cn.ibm.com
# Description: 导入gemfire region数据
# Notes:
###############################################
workspaces=$(dirname "$0")
. ${workspaces}/common-constants
. ${workspaces}/query-constants
. ${workspaces}/public_function.sh
if [ $# -lt 1 ]
then
echo "USAGE:$0 USAGE|BACKUP_DIR"
exit 1
fi
backup_dir=$1
import-gemfire-data(){
load_profile
echo "[info] 需要还原数据的目录为:${backup_dir}"
ls -l ${backup_dir}/
${gfsh} << EOF
${connect}
${list_members}
${query_MemberStatusComposite_count}
${query_MemberMgtLevel_count}
${query_OrgStores_count}
${query_ProductsNRate_count}
${query_MemberTicketData_count}
${query_PosOrderOffline_count}
import data --region=MemberStatusComposite --file=${backup_dir}snapshot-MemberStatusComposite.gfd --member=${cfm_server_name}
import data --region=MemberMgtLevel --file=${backup_dir}snapshot-MemberMgtLevel.gfd --member=${cfm_server_name}
import data --region=OrgStores --file=${backup_dir}snapshot-OrgStores.gfd --member=${cfm_server_name}
import data --region=ProductsNRate --file=${backup_dir}snapshot-ProductsNRate.gfd --member=${cfm_server_name}
import data --region=MemberTicketData --file=${backup_dir}snapshot-MemberTicketData.gfd --member=${cfm_server_name}
import data --region=PosOrderOffline --file=${backup_dir}snapshot-PosOrderOffline.gfd --member=${cfm_server_name}
${query_MemberStatusComposite_count}
${query_MemberMgtLevel_count}
${query_OrgStores_count}
${query_ProductsNRate_count}
${query_MemberTicketData_count}
${query_PosOrderOffline_count}
EOF
}
import-gemfire-data | tee ${logdir}/$(basename "$0")-$(date +%Y-%m-%d-%H:%M:%S).log
| true
|
db16136507c2dafb2e57f0f70a023d2ab194fae1
|
Shell
|
ttoocs/scripts
|
/old/clone_cd.sh.old
|
UTF-8
| 2,389
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
if [ -r /dev/sr0 ] ; then
echo "CD device is readable"
else
sudo chmod 664 /dev/sr0
# ssh localhost -p 443 -l fu-fu "sudo chmod 664 /dev/sr0"
fi
if [ "$1" != "1" ] ; then
#NEW SCRIPT
echo "Append argument 1 to run the old verson of script"
echo "Running automatically"
echo "add any addition things to the name here (or just hit enter, also be ready)"
read -e info
#GET CD INFO
isoinfo -d -i /dev/sr0 > /tmp/iso.info
bs=$(cat /tmp/iso.info | grep "Logical block size is" | cut -d " " -f 5)
c=$( cat /tmp/iso.info | grep "Volume size is:" | cut -d " " -f 4)
id1=$(cat /tmp/iso.info | grep "Volume id:" | cut -d : -f 2)
id2=${id1:1}
id=${id2// /_}
ip="192.168.1.101"
p=48325
#THROW IN THE DATE TO make it less lickly to overwrite
A=`date +%d-%m-%y`
#See if the additional info had stuff, otherwise ignore.
if [ -z $info ] ; then
name=$id.$A.iso.xz
else
name=${info// /_}.$id.$A.iso.xz
fi
echo "bs: $bs"
echo "count: $c"
echo "volume id: $id"
echo "port: $p"
echo "final name: $name"
echo "Is this alright?"
#Verify the thing has enough info with human
read -e al
if [ "$al" == "y" ]; then
echo "starting the clone"
else
echo "Enter addition info"
read -e info
name=${info// /_}.$id.$A.iso.xz
echo "Final name is: $name"
fi
#ACTUALLY CLONING
#SETUP busybox connection for disk stoage
ssh 192.168.1.101 -p 443 -l share "busybox nc -l -p $p > /shared/Shared/cd-rips/$name" &
sleep 1;
#start cloning
dd if=/dev/sr0 conv=noerror bs=$bs count=$c | xz -z -c -5 | ncat 192.168.1.101 $p
#VERIFY DISKS
(dd if=/dev/sr0 conv=noerror bs=$bs count=$c | md5sum ; echo " << DISK")
(ssh 192.168.1.101 -p 443 -l share "cat /shared/Shared/cd-rips/$name | xz -d | md5sum" ; echo " << IMAGE" )
echo "MANUALLY CHECK MD5SUMS"
wait;
echo "DONE"
eject
beep
else ##OLD SCRIPT STARTS HERE
echo "Please enter name of disk (will automatically append .iso.xz)"
read -e disk
echo $disk.iso.xz
BB="nc 192.168.1.101"
P=48325
#sudo chmod 664 /dev/sr0
#sudo eject -x 24 /dev/sr0
ssh 192.168.1.101 -p 443 -l share "busybox nc -l -p $P > /shared/Shared/cd-rips/$disk.iso.xz" &
( dd if=/dev/sr0 conv=noerror | xz -z -c -5 | ncat 192.168.1.101 $P ) # || ( sudo eject -x 4; dd if=/dev/sr0 bs=4k | xz -z -c -5 | busybox $BB $P )
#sudo eject -x 24 /dev/sr0
echo "Probably done, see terminal for errors."
sudo eject &
beep
fi
| true
|
66b9b22a8466dba85a9a09e0ace4753cd1df9cc9
|
Shell
|
snelis/dsmr-reader-docker
|
/src/app/run.sh
|
UTF-8
| 5,499
| 3.46875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#set -o errexit
#set -o pipefail
#set -o nounset
#---------------------------------------------------------------------------------------------------------------------------
# VARIABLES
#---------------------------------------------------------------------------------------------------------------------------
: "${DEBUG:=false}"
: "${COMMAND:=$@}"
: "${TIMER:=60}"
#---------------------------------------------------------------------------------------------------------------------------
# FUNCTIONS
#---------------------------------------------------------------------------------------------------------------------------
function _info () { printf "\\r[ \\033[00;34mINFO\\033[0m ] %s\\n" "$@"; }
function _warn () { printf "\\r\\033[2K[ \\033[0;33mWARN\\033[0m ] %s\\n" "$@"; }
function _error () { printf "\\r\\033[2K[ \\033[0;31mFAIL\\033[0m ] %s\\n" "$@"; }
function _debug () { printf "\\r[ \\033[00;37mDBUG\\033[0m ] %s\\n" "$@"; }
function _pre_reqs() {
_info "Checking if the DSMR web credential variables have been set..."
if [[ -z "${DSMR_USER}" ]] || [[ -z "${DSMR_EMAIL}" ]] || [[ -z "${DSMR_PASSWORD}" ]]; then
_error "DSMR web credentials not set. Exiting..."
exit 1
fi
_info "Fixing /dev/ttyUSB* security..."
[[ -e '/dev/ttyUSB0' ]] && chmod 666 /dev/ttyUSB*
_info "Removing existing PID files..."
rm -f /var/tmp/*.pid
_info "Creating log directory..."
mkdir -p /var/log/supervisor/
}
function _override_entrypoint() {
if [[ -n "${COMMAND}" ]]; then
_info "ENTRYPOINT: Executing override command..."
exec "${COMMAND}"
fi
}
function _check_db_availability() {
_info "Verifying if Postgres in running..."
cmd=$(command -v pg_isready)
cmd="${cmd} -h ${DB_HOST} -p ${DB_PORT} -U ${DB_USER} -d ${DB_NAME} -t 1"
while ! ${cmd} >/dev/null 2>&1; do
TIMER=$((TIMER-1))
sleep 1
if [[ "${TIMER}" -eq 0 ]]; then
_error "Could not connect to database server. Aborting..."
exit 1
fi
echo -n "."
done
}
function _set_throttle() {
if [[ -n "${DSMR_BACKEND_SLEEP}" ]] ; then
if grep 'DSMRREADER_BACKEND_SLEEP' /dsmr/dsmrreader/settings.py; then
_info "Setting DSMRREADER_BACKEND_SLEEP already present, replacing values..."
sed -i "s/DSMRREADER_BACKEND_SLEEP=.*/DSMRREADER_BACKEND_SLEEP=${DSMR_BACKEND_SLEEP}/g"
else
_info "Adding setting DSMRREADER_BACKEND_SLEEP..."
sed -i "/# Default settings/a DSMRREADER_BACKEND_SLEEP=${DSMR_BACKEND_SLEEP}" /dsmr/dsmrreader/settings.py
fi
fi
if [[ -n "${DSMR_DATALOGGER_SLEEP}" ]] ; then
if grep 'DSMRREADER_DATALOGGER_SLEEP' /dsmr/dsmrreader/settings.py; then
_info "Setting DSMRREADER_DATALOGGER_SLEEP already present, replacing values..."
sed -i "s/DSMRREADER_DATALOGGER_SLEEP=.*/DSMRREADER_DATALOGGER_SLEEP=${DSMR_DATALOGGER_SLEEP}/g"
else
_info "Adding setting DSMRREADER_DATALOGGER_SLEEP..."
sed -i "/# Default settings/a DSMRREADER_DATALOGGER_SLEEP=${DSMR_DATALOGGER_SLEEP}" /dsmr/dsmrreader/settings.py
fi
fi
}
function _run_post_config() {
_info "Running post configuration..."
cmd=$(command -v python3)
"${cmd}" manage.py migrate --noinput
"${cmd}" manage.py collectstatic --noinput
"${cmd}" manage.py shell -i python << PYTHON
from django.contrib.auth.models import User
if not User.objects.filter(username='${DSMR_USER}'):
User.objects.create_superuser('${DSMR_USER}', '${DSMR_EMAIL}', '${DSMR_PASSWORD}')
print('${DSMR_USER} created')
else:
print('${DSMR_USER} already exists')
PYTHON
}
function _generate_auth_configuration() {
_info "Checking for HTTP AUTHENTICATION configuration..."
if [[ ! -z "${ENABLE_HTTP_AUTH}" ]]; then
if [[ "${ENABLE_HTTP_AUTH}" = true ]] ; then
_info "ENABLE_HTTP_AUTH is enabled, let's secure this!"
canWeContinue=true
if [[ -z "${HTTP_AUTH_USERNAME}" ]]; then
_warn "Please provide a HTTP_AUTH_USERNAME"
canWeContinue=false
fi
if [[ -z "${HTTP_AUTH_PASSWORD}" ]]; then
_warn "Please provide a HTTP_AUTH_PASSWORD"
canWeContinue=false
fi
if [[ "${canWeContinue}" = false ]] ; then
_error "Cannot generate a valid .htpasswd file, please check above warnings."
exit 1
fi
_info "Generating htpasswd..."
printf ${HTTP_AUTH_USERNAME}":$(openssl passwd -apr1 "${HTTP_AUTH_PASSWORD}")\n" > /etc/nginx/htpasswd
_info "Done! Enabling the configuration in NGINX..."
sed -i "s/## auth_basic/ auth_basic/" /etc/nginx/conf.d/dsmr-webinterface.conf
if [[ $($(nginx -c /etc/nginx/nginx.conf -t 2>/dev/null); echo $?) > 0 ]]; then
_error "NGINX configuration error"
exit 1
fi
_info "HTTP AUTHENTICATION configured and enabled"
return
fi
fi
_info "ENABLE_HTTP_AUTH is disabled, nothing to see here."
}
function _start_supervisord() {
_info "Starting supervisord..."
_info "Logfiles can be found at: /var/log/supervisor/*.log and /tmp/supervisord.log"
cmd=$(command -v supervisord)
"${cmd}" -n
}
#---------------------------------------------------------------------------------------------------------------------------
# MAIN
#---------------------------------------------------------------------------------------------------------------------------
[[ "${DEBUG}" == 'true' ]] && set -o xtrace
_pre_reqs
_override_entrypoint
_check_db_availability
_set_throttle
_run_post_config
_generate_auth_configuration
_start_supervisord
| true
|
42e2941c00fbfbc994e933f7ba565fd386ad4d39
|
Shell
|
bcgov/jag-shuber-frontend
|
/openshift/deploy-oc-frontend.sh
|
UTF-8
| 441
| 3.609375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
SCRIPT_DIR=`dirname $0`
echo "You should log into OpenShift and select your project before running this script."
read -p "Continue? (Y/n): " ok
ok=${ok:-y}
ok=$(echo $ok |awk '{print tolower($0)}')
params=""
params="-p IMAGE_NAMESPACE=tools"
if [ "$ok" == "y" ]; then
echo "Deploying frontend application"
oc process -f "$SCRIPT_DIR/templates/frontend/frontend-deploy.json" $params | oc create -f -
else
exit 0
fi
| true
|
87d15fde4243171f5ac3454db9f86eba6759044b
|
Shell
|
octaspire/dern
|
/dev/etc/build_amalgamation.sh
|
UTF-8
| 1,725
| 3.515625
| 4
|
[
"Apache-2.0",
"ISC"
] |
permissive
|
#!/usr/bin/env sh
UNAME="$(uname)"
if [ "$#" -gt 0 ]; then
echo "-- Building with coverage ($1) for $UNAME..."
else
echo "-- Building for $UNAME..."
fi
if [ "$UNAME" = "Linux" ]; then
cd release && sh how-to-build/linux.sh $1 > /dev/null && echo "OK Done.";
elif [ "$UNAME" = "Darwin" ]; then
cd release && sh how-to-build/macOS.sh $1 > /dev/null && echo "OK Done.";
elif [ "$UNAME" = "OpenBSD" ]; then
cd release && sh how-to-build/OpenBSD.sh $1 > /dev/null && echo "OK Done.";
elif [ "$UNAME" = "FreeBSD" ]; then
cd release && sh how-to-build/FreeBSD.sh $1 > /dev/null && echo "OK Done.";
elif [ "$UNAME" = "NetBSD" ]; then
cd release && sh how-to-build/NetBSD.sh $1 > /dev/null && echo "OK Done.";
elif [ "$UNAME" = "Minix" ]; then
cd release && sh how-to-build/minix3.sh $1 > /dev/null && echo "OK Done.";
elif [ "$UNAME" = "DragonFly" ]; then
cd release && sh how-to-build/DragonFlyBSD.sh $1 > /dev/null && echo "OK Done.";
elif [ "$UNAME" = "Haiku" ]; then
if $(uname -a | grep -q x86_64); then
cd release && sh how-to-build/haiku-x86_64.sh $1 > /dev/null && echo "OK Done.";
else
cd release && sh how-to-build/haiku.sh $1 > /dev/null && echo "OK Done.";
fi
elif [ "$UNAME" = "AROS" ]; then
cd release && sh how-to-build/AROS.sh $1 > /dev/null && echo "OK Done.";
elif [ "$UNAME" = "AmigaOS" ]; then
cd release && sh how-to-build/AmigaOS41.sh $1 > /dev/null && echo "OK Done.";
else
echo "!! This platform is not handled by Makefile at the moment.";
echo "!! Please build using a script from 'release/how-to-build'.";
fi
| true
|
c147c71549597d9badf63c6af0d29c92be698389
|
Shell
|
coplate/KindleJailbreakHotfix
|
/src/install-dispatch.sh
|
UTF-8
| 1,699
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/sh
#
# Kindle Oasis Dispatch Installer
#
# $Id: install-dispatch.sh 13351 2016-07-11 18:01:40Z NiLuJe $
#
##
HACKNAME="jb_dispatch"
# Pull libOTAUtils for logging & progress handling
[ -f ./libotautils5 ] && source ./libotautils5
# Hack specific stuff
MKK_PERSISTENT_STORAGE="/var/local/mkk"
MKK_BACKUP_STORAGE="/mnt/us/mkk"
## Here we go :)
otautils_update_progressbar
# Install the dispatch
logmsg "I" "install" "" "Installing the dispatch"
cp -f dispatch "/usr/bin/logThis.sh"
chmod a+x "/usr/bin/logThis.sh"
otautils_update_progressbar
# Make sure we have enough space left (>512KB) in /var/local first...
logmsg "I" "install" "" "checking amount of free storage space..."
if [ "$(df -k /var/local | awk '$3 ~ /[0-9]+/ { print $4 }')" -lt "512" ] ; then
logmsg "C" "install" "code=1" "not enough space left in varlocal"
# Cleanup & exit w/ prejudice
rm -f dispatch
return 1
fi
otautils_update_progressbar
# Make sure we have an up to date persistent copy of MKK...
logmsg "I" "install" "" "Creating MKK persistent storage directory"
mkdir -p "${MKK_PERSISTENT_STORAGE}"
otautils_update_progressbar
logmsg "I" "install" "" "Storing dispatch script"
cp -af "/usr/bin/logThis.sh" "${MKK_PERSISTENT_STORAGE}/dispatch.sh"
otautils_update_progressbar
logmsg "I" "install" "" "Setting up backup storage"
# NOTE: Don't wreck the job the bridge install has just done (we're guaranteed to run *after* the bridge install).
mkdir -p "${MKK_BACKUP_STORAGE}"
cp -f "${MKK_PERSISTENT_STORAGE}/dispatch.sh" "${MKK_BACKUP_STORAGE}/dispatch.sh"
otautils_update_progressbar
# Cleanup
rm -f dispatch
logmsg "I" "install" "" "done"
otautils_update_progressbar
return 0
| true
|
269073db89c90d4bd86a73ca24188fc33b70ad9b
|
Shell
|
lirsacc/dotfiles
|
/.zshrc
|
UTF-8
| 3,366
| 2.515625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env zsh
autoload -Uz compinit
export TERM="xterm-256color"
export LC_ALL=en_US.UTF-8
export LANG=en_US.UTF-8
export EDITOR="vim"
export VISUAL_EDITOR="code -a -w"
# Avoid issues with `gpg` as installed via Homebrew.
# https://stackoverflow.com/a/42265848/96656
export GPG_TTY=(tty)
# Highlight section titles in manual pages
# highlighting inside manpages and elsewhere
export LESS_TERMCAP_mb=\e'[01;31m' # begin blinking
export LESS_TERMCAP_md=\e'[01;38;5;74m' # begin bold
export LESS_TERMCAP_me=\e'[0m' # end mode
export LESS_TERMCAP_se=\e'[0m' # end standout-mode
export LESS_TERMCAP_so=\e'[38;5;246m' # begin standout-mode - info box
export LESS_TERMCAP_ue=\e'[0m' # end underline
export LESS_TERMCAP_us=\e'[04;38;5;146m' # begin underline
# Don’t clear the screen after quitting a manual page
export MANPAGER="less -X"
# Always use color output for `ls`
export LS_COLORS='no=00:fi=00:di=01;34:ln=01;36:pi=40;33:so=01;35:do=01;35:bd=40;33;01:cd=40;33;01:or=40;31;01:ex=01;32:*.tar=01;31:*.tgz=01;31:*.arj=01;31:*.taz=01;31:*.lzh=01;31:*.zip=01;31:*.z=01;31:*.Z=01;31:*.gz=01;31:*.bz2=01;31:*.deb=01;31:*.rpm=01;31:*.jar=01;31:*.jpg=01;35:*.jpeg=01;35:*.gif=01;35:*.bmp=01;35:*.pbm=01;35:*.pgm=01;35:*.ppm=01;35:*.tga=01;35:*.xbm=01;35:*.xpm=01;35:*.tif=01;35:*.tiff=01;35:*.png=01;35:*.mov=01;35:*.mpg=01;35:*.mpeg=01;35:*.avi=01;35:*.fli=01;35:*.gl=01;35:*.dl=01;35:*.xcf=01;35:*.xwd=01;35:*.ogg=01;35:*.mp3=01;35:*.wav=01;35:'
export HOMEBREW_NO_AUTO_UPDATE=1
export HOMEBREW_BUILD_FROM_SOURCE=0
export HOMEBREW_NO_EMOJI=1
export HOMEBREW_NO_ANALYTICS=1
export HOMEBREW_PREFIX="/opt/homebrew"
export HOMEBREW_CELLAR="$HOMEBREW_PREFIX/Cellar"
export HOMEBREW_REPOSITORY="/opt/homebrew"
export PATH="\
$HOMEBREW_PREFIX/bin:\
$HOMEBREW_PREFIX/sbin:\
$HOMEBREW_PREFIX/opt/coreutils/libexec/gnubin:\
$HOMEBREW_PREFIX/opt/gnu-sed/libexec/gnubin:\
$HOMEBREW_PREFIX/opt/gnu-tar/libexec/gnubin:\
$HOMEBREW_PREFIX/opt/gnu-which/libexec/gnubin:\
$HOMEBREW_PREFIX/opt/grep/libexec/gnubin:\
$HOMEBREW_PREFIX/opt/make/libexec/gnubin:\
$PATH"
export MANPATH="$HOMEBREW_PREFIX/share/man:$MANPATH"
export INFOPATH="$HOMEBREW_PREFIX/share/info:$INFOPATH"
export PKG_CONFIG_PATH="\
$HOMEBREW_PREFIX/opt/curl/lib/pkgconfig \
$PKG_CONFIG_PATH"
export LDFLAGS="\
-L$HOMEBREW_PREFIX/lib \
-L$HOMEBREW_PREFIX/opt/curl/lib \
$LDFLAGS"
export LDFLAGS="\
-L$HOMEBREW_PREFIX/lib \
-L$HOMEBREW_PREFIX/opt/curl/lib \
$LDFLAGS"
_CFLAGS="\
-I$HOMEBREW_PREFIX/include \
-I$HOMEBREW_PREFIX/opt/curl/include \
"
export CFLAGS="$_CFLAGS $CFLAGS"
export CPPFLAGS="$_CFLAGS $CPPFLAGS"
export PATH="$HOME/.cargo/bin:$PATH"
export PATH="$HOME/bin:$PATH"
command -v rg >/dev/null && export FZF_DEFAULT_COMMAND='rg --files --hidden'
eval "$(direnv hook zsh)"
source "$HOMEBREW_PREFIX/opt/asdf/libexec/asdf.sh"
eval "$(starship init zsh)"
[[ -f "${HOME}/.config/.aliases.sh" ]] && source "${HOME}/.config/.aliases.sh"
[[ -f "${HOME}/.local.zsh" ]] && source "${HOME}/.local.zsh"
unsetopt CORRECT
HISTDUP=erase # Erase duplicates in the history file
setopt appendhistory # Append history to the history file (no overwriting)
setopt sharehistory # Share history across terminals
setopt incappendhistory # Immediately append to the history file, not just when a term is killed
bindkey '^U' backward-kill-line
| true
|
ac166abdeb4987655718ce0fcf69b1ed5c536ec3
|
Shell
|
Mr-Nobody-dey/CryOceanQCV
|
/code/gen_daily_report/check_empty.sh
|
UTF-8
| 401
| 3.109375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
ftp ftp://cryosat353:NoHtpQvL@science-pds.cryosat.esa.int
FILE=text.txt
if [[ -s $FILE ]] ; then
echo "$FILE has data."
else
echo "$FILE is empty."
fi ;
#! /bin/sh
HOST='servername'
USER='username'
PASSWD='password'
LOCAL_FILES='/local/dir'
wget ftp://$HOST/DIR01/* -nc --ftp-user=$USER --ftp-password=$PASSWD
wput --disable-tls --basename=$LOCAL_FILES/ $LOCAL_FILES/* ftp://$USER:$PASSWD@$HOST/DIR02/
| true
|
f33529046c8ac8353a6bed09f77d1d1fa8c6c47f
|
Shell
|
vitorta0506/challenge-delta
|
/k8s/painel_k8s.sh
|
UTF-8
| 1,126
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/bash
Principal() {
echo "Start Cluster Minikube - Projeto Delta"
echo
echo "1. Iniciar o cluster projeto delta"
echo "2. Desativar o cluster projeto delta"
echo "3. Verificar URL do cluster"
echo "4. Sair do menu"
echo
echo -n "Qual a opção desejada? "
read opcao
case $opcao in
1) Iniciar ;;
2) Desativar ;;
3) Listaurl ;;
4) exit ;;
*) "Opção desconhecida." ; echo ; Principal ;;
esac
}
Iniciar() {
clear
echo Realizando Deploy
sudo -- sh -c "echo $(minikube ip) k8sdelta >> /etc/hosts"
kubectl create namespace delta
kubectl apply -f secrets.yaml --namespace=delta
kubectl apply -f dbdelta.yaml --namespace=delta
sleep 10
kubectl apply -f nodedelta.yaml --namespace=delta
kubectl apply -f nginx-ingress.yaml --namespace=delta
Principal
}
Desativar() {
clear
echo Desativando o cluster delta
kubectl delete ns delta
Principal
}
Listaurl() {
clear
echo ---------
echo http://k8sdelta/packages
echo http://"$(minikube ip)"/packages
echo ---------
Principal
}
Principal
| true
|
b37238def1bf7627dd2960c6d36e2786cf18cbd5
|
Shell
|
avionic-design/pbs-stage2
|
/scripts/buildjail/copy-files
|
UTF-8
| 277
| 3.859375
| 4
|
[] |
no_license
|
#!/bin/sh
if [ "x$1" = "x" ]; then
echo "usage: $0 target"
exit 1
fi
target=$1
read line
while [ -n "$line" ]; do
directory=`dirname $line`
basename=`basename $line`
mkdir -p "${target}${directory}"
cp -dp $line "${target}${directory}/${basename}"
read line
done
| true
|
bf6b738392ec6492e9becb692f5607525b6c64d6
|
Shell
|
ekunish/dotfiles
|
/bin/mac.sh
|
UTF-8
| 697
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
# Rosetta
softwareupdate --install-rosetta --agree-to-license
# Show hidden files by default
defaults write com.apple.finder AppleShowAllFiles -bool true
# Show files with all extensions
defaults write NSGlobalDomain AppleShowAllExtensions -bool true
# Display the status bar
defaults write com.apple.finder ShowStatusBar -bool true
# Display the path bar
defaults write com.apple.finder ShowPathbar -bool true
# ====================
#
# SystemUIServer
#
# ====================
# Display battery level in the menu bar
defaults write com.apple.menuextra.battery ShowPercent -string "YES"
for app in "Dock" \
"Finder" \
"SystemUIServer"; do
killall "${app}" &> /dev/null
done
| true
|
4154f2246da9376fda7861e40459b054478e6c77
|
Shell
|
Iheve/docker-serf-reverse-proxy
|
/front-end-ssl-demo/bundle/serf-config/handlers/leave.sh
|
UTF-8
| 351
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
while read line; do
ROLE=`echo $line | awk '{print \$3 }'`
if [ "x${ROLE}" != "xbe" ]; then
continue
fi
SERVICE_NAME=`echo $line | awk '{print \$4 }' | \
sed -e 's/.*servicename=\([a-zA-Z0-9.\-]*\).*/\1/'`
FILE=/etc/nginx/sites-enabled/$SERVICE_NAME
rm -f $FILE
done
/usr/sbin/nginx -s reload
| true
|
36f932c561014ab44f6c59a76677fa1bee28c2bc
|
Shell
|
melnikaite/app-server-arena
|
/cookbooks/thin/templates/default/thin_control.sh.erb
|
UTF-8
| 1,401
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Thin control script for Engine Yard Cloud. Install via custom chef recipes:
# https://support.cloud.engineyard.com/entries/21009867-Customize-Your-Environment-with-Chef-Recipes
# https://support.cloud.engineyard.com/entries/21406977-Custom-Chef-Recipes-Examples-Best-Practices
# Author: J. Austin Hughey <jhughey@engineyard.com>
# I stood on the shoulders of the following giants to write this script:
# - Wayne E. Seguin
# - Scott M. Likens
# - Ben Burkert
# - Glenn Davy
# - Kevin Rutten
# - Dennis Bell
#
# NOTES:
# - THIS SCRIPT IS NOT FULLY TESTED, IS MISSING FEATURES, AND IS GENERALLY NOT PRODUCTION READY.
# - Use at your own risk. No warranties, expressed or implied, yadda yadda.
# Make sure we're not running as root.
if [[ $(id -u) -eq 0 ]]; then
echo "ERROR: This script must be run as a user, not as root." 2>&1
exit 1
fi
source /data/<%= @app_name %>/shared/config/env
source /data/<%= @app_name %>/shared/config/env.custom
start_thin()
{
cd "/data/${application}/current"
$THIN_EXEC start -e $RACK_ENV -C $THIN_CONFIG
}
restart_thin()
{
cd "/data/${application}/current"
$THIN_EXEC restart -e $RACK_ENV -C $THIN_CONFIG
}
stop_thin()
{
cd "/data/${application}/current"
$THIN_EXEC stop -C $THIN_CONFIG
}
# Take appropriate action depending on the argument
case "$1" in
restart)
restart_thin
;;
start)
start_thin
;;
stop)
stop_thin
;;
*)
echo "Usage: $0 {start|stop|restart}"
exit 1
;;
esac
exit $?
| true
|
97a339ef6bad953cad6d0f4d0f3f118c1840b6d5
|
Shell
|
CoderXXL/safedockerdown
|
/maninthemiddle.sh
|
UTF-8
| 178
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
command="docker-compose ${@}"
if [ "${command}" == "docker-compose down" ]; then
/bin/bash /home/${USER}/.safedockerdown/safedockerdown.sh
else
exec $command
fi
| true
|
e58400ec81b7bd0fd044c4cae31ee35c3c055b51
|
Shell
|
bridgecrewio/checkov
|
/kubernetes/run_checkov.sh
|
UTF-8
| 1,238
| 3.046875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
################################################################################
# Download all Kubernetes resources and run checkov against them
################################################################################
# kubectl api-resources --verbs=list --namespaced -o name | xargs -n1 -I{} bash -c "kubectl get {} --all-namespaces -oyaml && echo ---"
RESOURCES="clusterroles
clusterrolebindings
configmaps
cronjobs
daemonsets
deployments
endpoints
horizontalpodautoscalers
ingresses
jobs
limitranges
networkpolicies
poddisruptionbudgets
pods
podsecuritypolicies
replicasets
replicationcontrollers
resourcequotas
roles
rolebindings
serviceaccounts
services
statefulsets"
for resource in $RESOURCES;
do
kubectl get $resource --all-namespaces -oyaml | yq eval 'del(.items[] | select(.metadata.ownerReferences)) ' - > /data/runtime.${resource}.yaml
done
if [ -f /etc/checkov/apikey ]; then
apikey=$(cat /etc/checkov/apikey)
if [ -f /etc/checkov/repoid ]; then
repoid=$(cat /etc/checkov/repoid)
else
repoid="runtime/unknown"
fi
checkov -s -d /data --bc-api-key "$apikey" --repo-id "$repoid" --branch runtime --framework kubernetes "$@"
else
checkov -s -d /data --framework kubernetes "$@"
fi
| true
|
e1fb0b2dad30d87c940efeb5574d77c1f263214c
|
Shell
|
msllp/MSFRAME_Doc
|
/build/api.sh
|
UTF-8
| 537
| 2.90625
| 3
|
[] |
permissive
|
#!/bin/bash
base=/home/forge/laravel.com
sami=${base}/build/sami
cd $sami
composer install
# Cleanup Before
rm -rf ${sami}/build
rm -rf ${sami}/cache
rm -rf ${sami}/laravel
# Run API Docs
git clone https://github.com/laravel/framework.git ${sami}/laravel
${sami}/vendor/bin/sami.php update ${sami}/sami.php
# Delete old directory before copying new one
rm -rf ${base}/public/api
# Copy new docs to public path
cp -R ${sami}/build ${base}/public/api
# Cleanup After
rm -rf ${sami}/build
rm -rf ${sami}/cache
rm -rf ${sami}/laravel
| true
|
8ac05e6a2018a0824bff897c03ce20f59618cae3
|
Shell
|
Guan06/DADA2_pipeline
|
/step3_get_ASV_table.sh
|
UTF-8
| 1,529
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
# scripts for demultiplexing raw sequencing data for 16S
# exits whenever a function returns 1
set -e
# load functions
scripts_dir=`dirname $0`
source $scripts_dir/activate.sh
source $scripts_dir/config.sh
log() {
echo $(date -u)": "$1 >> $logfile
}
mkdir -p $working_dir
working_dir_s2=$working_dir/02.dada2
working_dir=$working_dir/03.ASV_tab
output=$working_dir/"output.txt"
logfile=$working_dir/"log.txt"
mkdir -p $working_dir
#rm -f -r $working_dir/*
if ($bacteria)
then
mkdir -p $working_dir/Bacteria
$scripts_dir/get_ASV_tab_bacteria.R \
$working_dir_s2/ $working_dir/Bacteria/
`less $working_dir/Bacteria/ASV_map.txt |
sed 's/ASV/>ASV/; s/\t/\n/' > $working_dir/Bacteria/ASV.fasta`
fi
if ($fungi)
then
mkdir -p $working_dir/Fungi
$scripts_dir/get_ASV_tab_fungi.R \
$working_dir_s2/ $working_dir/Fungi/
`less $working_dir/Fungi/ASV_map.txt |
sed 's/ASV/>ASV/; s/\t/\n/' > $working_dir/Fungi/ASV.fasta`
fi
if ($oomycetes)
then
mkdir -p $working_dir/Oomycetes
$scripts_dir/get_ASV_tab_oomycetes.R \
$working_dir_s2/ $working_dir/Oomycetes/
`less $working_dir/Oomycetes/ASV_map.txt |
sed 's/ASV/>ASV/; s/\t/\n/' >$working_dir/Oomycetes/ASV.fasta`
java -Xmx1g -jar \
/biodata/dep_psl/grp_psl/thiergart/RDPTools/classifier.jar \
-c 0.5 -t \
$scripts_dir/tax_oomycetes/rRNAClassifier.properties \
-o $working_dir/Oomycetes/ASV_taxonomy.txt \
$working_dir/Oomycetes/ASV.fasta
fi
| true
|
89d315e0f0012bdf7c65d4e7ebc98905715efc57
|
Shell
|
AmadeusITGroup/Assistive-Webdriver
|
/vagrant/win10-chromium-nvda/download.sh
|
UTF-8
| 1,080
| 3.375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
COMPONENTS_FOLDER="$(cd ../../components && pwd)"
COMPONENTS_TO_INCLUDE="assistive-playwright-server text-to-socket-engine tcp-web-listener"
function checkDLL() {
local DLL=0
for i in "$COMPONENTS_FOLDER"/text-to-socket-engine/TextToSocketEngine*.dll ; do if [ -f "$i" ]; then DLL=$((DLL+1)); fi; done
if [ "$DLL" == "0" ] ; then
echo 'Missing TextToSocketEngine*.dll in node_modules/text-to-socket-engine'
echo 'Please make sure you have built text-to-socket-engine and have run "pnpm install"'
return 1
fi
}
cd software
for component in $COMPONENTS_TO_INCLUDE ; do
if ! [ -f "$component.tgz" ] ; then
echo "Creating $component.tgz..."
if [ "$component" == "text-to-socket-engine" ]; then
checkDLL
fi
( cd "$COMPONENTS_FOLDER/$component" && pnpm pack )
mv "$COMPONENTS_FOLDER/$component/$component"-*.tgz "$component.tgz"
else
echo "OK: $component.tgz"
fi
done
aria2c -V --auto-file-renaming false -x 5 -i ../urls.txt
if ! [ -f "MSEdge - Win10.box" ]; then
unzip "MSEdge.Win10.Vagrant.zip"
fi
| true
|
aeead6066feb1da886f2d4ccb5d818b7af893e0f
|
Shell
|
mishin/gorod_io
|
/etc/restore_mysql_dump.sh
|
UTF-8
| 538
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/sh
#Script restores last mysqldump for for go.mail.ru blog.
# FIXME
db_user=root
db_host="localhost"
db_name="gorod_in"
db_password=""
dump_file=$1
#Trying to restore dump
#mysql -h $db_host -u $db_user -p$db_password $db_name < $dump_file
mysql -h $db_host -u $db_user $db_name < $dump_file
#If something wrong
if [ $? -gt 0 ]; then
echo "[`date +%F--%H-%M`] Failed. Restoring database failed."
exit 1
fi
#It's all ok
echo "[`date +%F--%H-%M`] Backup database [$db_name] was successfully restored from file $dump_file"
| true
|
0154eaa6d113d3c424d91327763f9e1e0968587a
|
Shell
|
archlinuxfr/afur
|
/scripts/watch.sh
|
UTF-8
| 4,965
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
SOFT_DIR="$HOME/afur"
UPLOAD_DIR="$SOFT_DIR/ftp"
WORK_DIR="$HOME/tmp/afur"
ARCH=('i686' 'x86_64' 'arm')
PKG_DIR="$SOFT_DIR/html/pkg"
PKGBUILD_DIR="$SOFT_DIR/html/pkgbuild"
REPO_NAME='archlinuxfr'
PARSE_CMD="$SOFT_DIR/scripts/parse_file"
[ -n "$1" ] && [ "$1" = "-q" ] && VERBOSE=0 || VERBOSE=1
[ -d "$WORK_DIR" ] || mkdir -p "$WORK_DIR" || exit 1
[ -e "$UPLOAD_DIR/exit" ] && rm "$UPLOAD_DIR/exit"
for arch in "${ARCH[@]}"
do
[ -d "$PKG_DIR/$arch" ] || mkdir -p "$PKG_DIR/$arch" || exit 1
done
[ -d "$PKG_DIR/any" ] || mkdir -p "$PKG_DIR/any" || exit 1
log ()
{
[ $VERBOSE -eq 1 ] && echo $(date +"%Y%m%d %H:%M:%S") "$@"
}
in_array ()
{
local needle=$1
shift
for e in "$@"
do
[ "$e" = "$needle" ] && return 0
done
return 1
}
add_repo ()
{
pushd "$PKG_DIR"/"$1" &> /dev/null
log "+ en attente du verrou repo-add..."
while [ -f "$REPO_NAME".db.tar.gz.lck ]; do sleep 1; done
repo-add "$REPO_NAME".db.tar.gz "$2" &> /dev/null && log "+ ajout de $2" || return 1
popd &> /dev/null
}
link_any_repo ()
{
for arch in "${ARCH[@]}"
do
# Un ln -sf active un évenement "delete", donc ln -s &> /dev/null
# TODO penser à nettoyer les liens obsolètes
ln -s ../any/"$1" "$PKG_DIR"/"$arch" &> /dev/null
(($2)) && add_repo "$arch" "$1"
done
}
add_any_repo ()
{
link_any_repo "$1" 1
add_repo "any" "$1"
}
del_repo ()
{
pushd "$PKG_DIR"/"$1" &> /dev/null
log "+ en attente du verrou repo-remove..."
while [ -f "$REPO_NAME".db.tar.gz.lck ]; do sleep 1; done
repo-remove "$REPO_NAME".db.tar.gz "$2" &> /dev/null && log "+ suppression de $2" || return 1
popd &> /dev/null
}
del_any_repo ()
{
for arch in "${ARCH[@]}"
do
rm "$PKG_DIR/$arch/$1"
done
del_repo "any" "$1"
}
pkg_archive ()
{
local archive="$1"
local user="$2"
log "+ extraction .PKGINFO"
bsdtar -xf "$archive" ".PKGINFO" || return 1
log "+ ajout de l'archive dans afur"
local arch=$($PARSE_CMD "$archive" .PKGINFO "$user")
[ $? -ne 0 ] && return 1
log "+ test $arch --"
if [ "$arch" = "any" ]; then
log "+ any"
mv "$archive" "$PKG_DIR"/any &> /dev/null
add_any_repo "$archive"
else
log "+ $arch"
in_array "$arch" "${ARCH[@]}" || return 1
log "+ copie dans le dépôt"
mv "$archive" "$PKG_DIR"/"$arch" &> /dev/null
add_repo "$arch" "$archive"
fi
return 0
}
sig_archive ()
{
local archive=$1
log "+ ajout de la signature"
arch=${archive%%.pkg.tar.*}
arch=${arch##*-}
if [ "$arch" = "any" ]; then
log "+ any"
mv "$archive" "$PKG_DIR"/any &> /dev/null
link_any_repo "$1" 0
else
log "+ $arch"
in_array "$arch" "${ARCH[@]}" || return 1
log "+ copie dans le dépôt"
mv "$archive" "$PKG_DIR"/"$arch" &> /dev/null
fi
return 0
}
src_archive ()
{
local archive="$1"
local pkg=${archive%-*-*.src.tar.*}
log "+ copie PKGBUILD de '$pkg'"
[ -d "$PKGBUILD_DIR"/"$pkg" ] && rm -rf "$PKGBUILD_DIR"/"$pkg"
bsdtar -xf "$archive" -C "$PKGBUILD_DIR"
}
new_archive ()
{
local archive="$1"
local ret=0
[ -z "$archive" ] && return 1
# Teste si le fichier n'a pas été effacé entre temps
[ ! -e "$archive" ] && return 1
log "+ récéption de '$archive'"
local file=${archive##*/}
local user=${archive%/*}
local user=${user##*/}
log "+ fichier '$file' de '$user'"
if [ "$file" = "${file%pkg.tar.*}" -a "$file" = "${file%src.tar.*}" ]
then
log "- '$file' n'est pas un format connu -> suppression"
rm "$archive"
return 1
fi
cd "$WORK_DIR"
local tmp_dir=$(mktemp -d --tmpdir="$WORK_DIR")
mv "$archive" "$tmp_dir" &> /dev/null
pushd "$tmp_dir" &> /dev/null
if [[ "$file" != "${file%.sig}" ]]; then
sig_archive "$file"
elif [ "$file" != "${file%pkg.tar.*}" ]; then
pkg_archive "$file" "$user"
else
src_archive "$file"
fi
ret=$?
popd &> /dev/null
rm -rf $tmp_dir
return $ret
}
watch_upload ()
{
while read -u 3 archive
do
[ "$archive" = "$UPLOAD_DIR/exit" ] && break
new_archive "$archive"
done 3< <(tail --pid=$SELF_PID -f "$1")
}
watch_pkg ()
{
while read -u 3 archive
do
[ -z "$archive" ] && continue
# Teste si le fichier n'a pas été effacé entre temps
[ ! -e "$archive" ] || continue
local file=${archive##*/}
local arch=${archive%/*}
local arch=${arch##*/}
log "++ suppression de '$file' de '$arch'"
if [ "$file" = "${file%pkg.tar.*}" ]
then
log "- '$file' n'est pas un format connu"
continue
fi
pkg=${file%-*-*-*.pkg.tar.*}
if [ "$arch" = "any" ]; then
del_any_repo "$file"
else
del_repo "$arch" "$pkg"
fi
done 3< <(tail --pid=$SELF_PID -f "$1")
}
tmp_upload=$(mktemp)
tmp_pkg=$(mktemp)
safe_quit ()
{
touch "$UPLOAD_DIR/exit"
kill $pit1 $pit2
wait $pit1 $pit2 2> /dev/null
rm "$tmp_upload" "$tmp_pkg"
}
trap "safe_quit" 0
SELF_PID=$$
$HOME/bin/inotify-tree "$UPLOAD_DIR" >> "$tmp_upload" &
pit1=$!
inotifywait --exclude="$REPO_NAME.db.tar.gz" \
--exclude="$REPO_NAME.db" \
-r -q -e delete --format "%w%f" -m "$PKG_DIR" >> "$tmp_pkg" &
pit2=$!
watch_upload "$tmp_upload" &
watch_pkg "$tmp_pkg"
| true
|
a5a998c4e0f8f3f2f67205fef8d31ccab0850153
|
Shell
|
atsmin/hinemos-playground
|
/web/run.sh
|
UTF-8
| 345
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
# Start hinemos_web
/opt/hinemos_web/bin/tomcat_start.sh -Wq
status=$?
if [ $status -ne 0 ]; then
echo "Failed to start hinemos_web: $status"
exit $status
fi
while sleep 60; do
ps aux |grep hinemos |grep -q -v grep
STATUS=$?
if [ $STATUS -ne 0]; then
echo "hinemos processes has already exited."
exit 1
fi
done
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.