blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
2b81d83c2531644a3e71dcbe4e1b18f403909a7e
|
Shell
|
gndowns/SceneSymmetryCNNs
|
/data/toronto_split_data.sh
|
UTF-8
| 2,439
| 3.921875
| 4
|
[] |
no_license
|
#!/bin/bash
# Gabriel Downs, 2018
# Example script for splitting toronto datasets into 'train' and 'test' groups
# This script assumes the following directory structure:
# .
# |-- unsplit/
# | toronto_rgb/
# | toronto_line_drawings/
# | (etc. directories of raw, ungrouped data)
# |-- toronto/
# | rgb/
# | train/
# | test/
# | split_data.sh (copy of this script)
# | line_drawings/
# | (etc. directories for each dataset)
# A copy of this script is meant to be run from the specific dataset folder, e.g. './toronto/rgb/split_data.sh' for rgb images
# the images will be grouped into 'train/' and 'test/' directories, with sub-directories for each class, such as:
# |- rgb/
# | train/
# | beach/
# | city/
# | ...
# | test/
# | ...
# please create empty train/ and test/ directories before running
# The class names & #/samples in each class and group should be the same for all Toronto based datasets,
# however the other variables below, such as directory names, should be changed accordingly
classes=('beach' 'city' 'forest' 'highway' 'office' 'mountain')
# the images are often named with the class name capitalized, e.g. 'Beach_0_0_0.png'
img_prefixes=('Beach' 'City' 'Forest' 'Highway' 'Office' 'Mountain')
# total number of images in each class
nb_images=(80 79 80 80 76 80)
# 75% / 25% train/test split for each class
nb_train=(60 59 60 60 57 60)
nb_test=(20 20 20 20 19 20)
# CHANGE THESE FOR YOUR DATASET
# e.g. if run from toronto/rgb/, set as
# unsplit_dir='../../unsplit/toronto_rgb'
unsplit_dir='RELATIVE_PATH_TO_YOUR_USNPLIT_DATA_FOLDER'
# === SPLIT DATA! ====
# reset, remove old data
rm -rf test/* train/*
# iterate over each class
i=0
for c in "${classes[@]}"; do
echo "splitting $c"
# make training directory
mkdir train/"$c"
# put first 75% into training
# NOTE: you may need to change these lines to reflect the organization and naming of your unsplit data
# i.e. whether it's all in 1 directory, or already sorted into class directories
img_prefix="${img_prefixes[i]}"
# use -v for numerical ordering, allows consistent grouping across toronto datasets
cp `ls "$unsplit_dir"/"$img_prefix"/*.png -v | head -"${nb_train[i]}"` train/"$c"
# make test dir
mkdir test/"$c"
# put remaining 25% into testing
cp `ls "$unsplit_dir"/"$img_prefix"/*.png -v | tail -"${nb_test[i]}"` test/"$c"
# increment
i=$((i+1))
done
| true
|
f9a3ec43351b44b8295ed606b8833fd65125bdac
|
Shell
|
la4okurs/rpi
|
/repeaters.bash
|
UTF-8
| 2,366
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
#
# A simple script to listen at the Norwegian VHF ham repeaters in South of Norway
#
# Stop rendering audio with typing Ctrl C (^C)
#
# Author: Made by Steinar/LA7XQ
#
# INFO:
# '>/dev/null 2>&1' below means: ignore any printout(s) from the statement in front
#
# NOTICE: be sure first to install alsa-utils (amixer) and vlc packages or vlc-nox (vlc and cvlc)
# first before running this script if amixer and vlc is not yet installed on your RPI:
# sudo apt-get update
# sudo apt-get install alsa-utils
# sudo apt-get install vlc # or sudo apt-get install vlc-nox
#
# (You may use other players as well)
#
ping -c 1 216.58.207.206 >/dev/null 2>&1
RET=$?
if [ $RET -ne 0 ];then
echo "== You have no connection to Inet =="
echo "Fix this first, then restart the program"
exit 0
fi
[ -f /usr/bin/cvlc ] || {
echo;echo "$(basename $0): ERROR: /usr/bin/cvlc not found."
echo "Be sure to have installed the package vlc or vlc-nox first by doing:"
echo "sudo apt-get update"
echo "(sudo apt-get purge vlc)"
echo "sudo apt-get autoremove vlc"
echo "sudo apt-get install vlc"
echo;echo "Now exit until above is done"
exit 1
}
[ -f /usr/bin/amixer ] || {
echo;echo "$(basename $0): ERROR: /usr/bin/amixer not found."
echo "Be sure to have installed the package alsa-utils first by doing:"
echo "sudo apt-get update"
echo "sudo apt-get install alsa-utils"
echo;echo "Now exit until above is done"
exit 1
}
echo "Good morning OM!"
echo "Now listening (scanning) the ham VHF repeaters in the South East of Norway"
echo "Please listen at your Raspberry PC audio jack connector (headphones) now"
echo "Silence ? Forgotten to turn up the volume? OK, it is silence when nobody on the repeater(s)"
echo;echo "Best regards Steinar/LA7XQ who made this program"
echo;echo "(Type Ctrl C to stop this program)"
/usr/bin/amixer cset numid=3 1 >/dev/null 2>&1 # force RPI audio output to the 3.5 mm jack, ignore printout
# /usr/bin/amixer cset numid=3 2 # use this instead if audio output force to HDMI screen is wanted (requre HDMI speakers)
kill -9 $(pgrep -f /usr/bin/vlc) >/dev/null 2>&1 # kill all (c)vlc processes already started, ignore print out
cvlc http://51.174.165.11:8888/hls/stream.m3u8 >/dev/null 2>&1 # listen ham FM VHF repeaters
exit $? # transfer the cvlc exit value to the outer shell
| true
|
1e0dc44aa0471d731b6d99b077aec34da0cf9dc5
|
Shell
|
psifive/WebApi-Curl-Testing
|
/Templates/get_application_documentrequest.template
|
UTF-8
| 815
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
URL=$(cat url.txt)
TOKEN=$(cat token.txt)
echo "curl -s -H \"Accept:application/json;version=1\" -H \"Content-Type: application/json\" -H \"Authorization: Bearer ${TOKEN}\" ${URL}/application/%APPLICATIONID%/documentrequest/%DOCUMENTREQUESTID%?get_file=true"
JSON=$(curl -s -H "Accept:application/json;version=1" -H "Content-Type: application/json" -H "Authorization: Bearer ${TOKEN}" ${URL}/application/%APPLICATIONID%/documentrequest/%DOCUMENTREQUESTID%?get_file=true)
echo ${JSON} | jq -r '.documentRequest.documents[0].fileContents' > CCADocument.base64
openssl enc -base64 -d -A -in CCADocument.base64 -out CCADocument.pdf
ENVIRONAME=$(uname)
if [ ${ENVIRONAME} == 'Darwin' ]
then
#open CCADocument.pdf
echo CCADocument.pdf
else
CCADocument.pdf
fi
chmod +x *
| true
|
b7b921b6616fb1479cc6ff9d83f443e2eddcde8b
|
Shell
|
tomkod/wasm-cpp
|
/emscripten_build.sh
|
UTF-8
| 1,184
| 3.140625
| 3
|
[] |
no_license
|
source emscripten_base.inc
say ''
say '****************'
say '*** Building ***'
say '****************'
cd "$EMSDK_ROOT"
# Build in release mode to not run out of memory
# https://github.com/kripken/emscripten/issues/4667
say '* Emscripten: setup'; {
./emsdk update
./emsdk install $EMSCRIPT_RELEASE --build=Release
./emsdk activate $EMSCRIPT_RELEASE
# Use incoming because of https://github.com/kripken/emscripten/pull/5239
# ./emsdk install emscripten-incoming-32bit --build=Release
# ./emsdk activate emscripten-incoming-32bit
# Needed by emcc
sed -i "s/NODE_JS *= *'\(.*\)'/NODE_JS=['\1','--stack_size=8192']/" ~/.emscripten
# Regenerate emsdk_set_env.sh
./emsdk construct_env ""
}
# Don't source emsdk_env directly, as it produces output that can't be logged
# without creating a subshell (which would break `source`)
source "${EMSDK_ROOT}emsdk_set_env.sh"
# emcc fails in all sorts of weird ways without this
ulimit -s unlimited
say '* Emscripten: stdlib (very slow!)'; {
mkdir -p "$EMSCRIPTEN_TEMPDIR"
cd "$EMSCRIPTEN_TEMPDIR"
printf '#include<stdio.h>\nint main() { return 0; }\n' > minimal.c
emcc minimal.c
}
| true
|
be892dee1ad2483dc8cf0faa95938c20f2fca9d3
|
Shell
|
YSShih/centos_laravel_env_installer_bash
|
/os_repo_download.sh
|
UTF-8
| 662
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
sleep_time=3
###################################install repo###############################
yum install --downloadonly --downloaddir=./package/epel-release epel-release
DIR_remi_release="package/remi-release"
if [ -f $DIR_remi_release ]; then
echo "File $DIR_remi_release exists."
else
echo "File $DIR_remi_release does not exists."
mkdir $DIR_remi_release
fi
curl http://rpms.remirepo.net/enterprise/remi-release-7.rpm > ./package/remi-release/remi-release-7.rpm
yum install --downloadonly --downloaddir=./package/yum-utils yum-utils -y
echo "finished download epel-release remi-release-7 yum-utils"
sleep $sleep_time
| true
|
c60fb1ee25831aeda132e52e480a96c39494b324
|
Shell
|
jermspeaks/useful-bash-scripts
|
/zip-parts.sh
|
UTF-8
| 409
| 3.234375
| 3
|
[] |
no_license
|
#/bin/bash
# Reference: https://apple.stackexchange.com/questions/12371/how-can-i-compress-a-folder-into-multiple-zip-files
zip -r -s 64 archive.zip FolderName/
# 64 is the size of the split (in this case 64Mb).
# Use -s to set the split size and create a split archive.
# The size is given as a number followed optionally by one
# of k (kB), m (MB), g (GB), or t (TB) (the default is m). [1]
| true
|
3f9633c8e4f878194f0c4e70c8e1205b421ba7fa
|
Shell
|
kensniper/triton2
|
/triton2/server/logserver/config/data/imp_logdb.sh
|
GB18030
| 938
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
#
# - UMS_ROLE_DETAIL_XX֮б PART_YYYYMMDDHH24MISS.SQL
# - 仯 UMS_ROLE_DETAIL_XX PART_UMS_ROLE_DETAIL.SQL
#
USERNAME=root
PASSWD=game
DBNAME=LogDB
if [ $# != 2 ]; then
echo "Usage: $0 path table_header"
exit;
fi;
HOST_NAME=$1
TABLE_HEAD=$2
MYSQL_HOME=/usr/local/mysql/
MYSQL_BIN=$MYSQL_HOME/bin/mysql
MYSQL_DUMP=$MYSQL_HOME/bin/mysqldump
MYSQL_EXEC="$MYSQL_BIN -u$USERNAME -p$PASSWD -s -vv -D$DBNAME -e"
TABLE_SQL="show tables like '$TABLE_HEAD%' "
NUM=`$MYSQL_EXEC "$TABLE_SQL" | wc -l`
HEADNUM=`expr ${NUM} - 3`
TAILNUM=`expr ${NUM} - 7`
ARR1=`$MYSQL_EXEC "$TABLE_SQL" | head -n"$HEADNUM" | tail -n "$TAILNUM"`
ARR2=($ARR1)
DUMPOPT="-u$USERNAME -p"$PASSWD" --hex-blob --skip-opt -t"
i=0
while [ "$i" -lt "${#ARR2[@]}" ]
do
TABLENAME=${ARR2[$i]}
if [ -f $HOST_NAME/$TABLENAME.SQL ]
then
$MYSQL_EXEC "source $HOST_NAME/$TABLENAME.SQL"
fi
let "i++"
done
| true
|
89e5e0f0bc17faca0b7232cb25a689a7c29bbe61
|
Shell
|
Yahayawebmaster/vpsmanager
|
/stunnel.sh
|
MacCentralEurope
| 2,148
| 2.96875
| 3
|
[] |
no_license
|
#/bin/bash
sleep 2
apt-get update -y
clear
yum update -y
apt-get install openssh-server -y
clear
apt-get install curl -y
clear
yum install openssh-server -y
clear
apt-get install openssh-client -y
clear
yum install openssh-client -y
clear
apt-get install stunnel4 -y
clear
yum install stunnel4 -y
clear
apt-get install stunnel -y
clear
yum install stunnel -y
clear
ip=$(curl https://api.ipify.org/)
echo $ip
clear
echo -e "Presione Enter"
sleep 1
openssl genrsa 2048 > stunnel.key
openssl req -new -key stunnel.key -x509 -days 1000 -out stunnel.crt
sleep 2
rm /etc/stunnel/stunnel.conf
clear
rm /etc/default/stunnel4
clear
cat stunnel.crt stunnel.key > stunnel.pem
mv stunnel.pem /etc/stunnel/
clear
echo -e "\033[1;31mESCRIBA EL PUERTO SSL A UTILIZAR"
read -p ": " port
clear
echo "client = no " >> /etc/stunnel/stunnel.conf
echo "[ssh] " >> /etc/stunnel/stunnel.conf
echo "cert = /etc/stunnel/stunnel.pem " >> /etc/stunnel/stunnel.conf
echo "accept = $port " >> /etc/stunnel/stunnel.conf
echo "connect = 127.0.0.1:22" >> /etc/stunnel/stunnel.conf
sleep 1
echo "ENABLED=1 " >> /etc/default/stunnel4
echo "FILES="/etc/stunnel/*.conf" " >> /etc/default/stunnel4
echo "OPTIONS="" " >> /etc/default/stunnel4
echo "PPP_RESTART=0" >> /etc/default/stunnel4
echo -e "\033[1;33m **********************************"
echo -e "\033[1;31mI N I C I A N D O - STUNNEL4"
echo -e "\033[1;33m **********************************"
sleep 1
service ssh restart 1>/dev/null 2 /dev/null
service stunnel4 start 1>/dev/null 2 /dev/null
service stunnel4 restart 1>/dev/null 2 /dev/null
clear
echo -e "\033[1;33m ***********REINICIADO...*************"
clear
echo -e "\033[1;33m **********************************"
echo -e "\033[1;33mCONFIGURAO SSL CRIADA COM SUCESSO"
echo -e "\033[1;33m **********************************"
echo -e "\033[1;33m- - - - -> \033[01;34MO TEU IP HOST:\033[0m $ip"
sleep 1
echo -e "\033[1;31mReinicie a VPS (Opcional) - sudo reboot"
sleep 2
echo -e "\033[1;33m- - ->>VPS \033[01;34mBY: "
echo -e "\033[1;33m- - ->>Creditos \033[01;34mSaucing"
sleep 2
| true
|
eb61ce5c5a02f70a934951d36306befe0a8f9863
|
Shell
|
Tucker459/terraform-k8s-modules
|
/modules/aws/aws-node-termination-handler/extract.sh
|
UTF-8
| 512
| 2.65625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
function tfextract() {
go run /go/src/github.com/mingfang/terraform-provider-k8s/cmd/extractor/*go $@
}
export DIR=modules/aws/aws-node-termination-handler
mkdir -p ${DIR}
tfextract -dir ${DIR} -url https://github.com/aws/aws-node-termination-handler/releases/download/v1.6.1/all-resources.yaml
rm ${DIR}/aws_node_termination_handler_win-daemon_set.tf
sed -i -e 's|.* = false||' ${DIR}/*.tf
sed -i -e 's|namespace *= ".*"|namespace = var.namespace|g' ${DIR}/*.tf
terraform fmt ${DIR}
| true
|
01bd9ed9bff99ead9e0a7bee54356151c42c7301
|
Shell
|
scarfacedeb/dotfiles
|
/zsh/zsh.d/init.zsh
|
UTF-8
| 316
| 2.546875
| 3
|
[] |
no_license
|
#!/usr/bin/env zsh
# Load all submodules
DIR="$HOME/.zsh.d/"
source "$DIR/setopt.zsh"
source "$DIR/autoload.zsh"
source "$DIR/bindings.zsh"
source "$DIR/prompt.zsh"
source "$DIR/env.zsh"
source "$DIR/aliases.zsh"
source "$DIR/git.zsh"
source "$DIR/fzf.zsh"
[ -f "$DIR/catawiki.zsh" ] && source "$DIR/catawiki.zsh"
| true
|
f44e0b135bff84de8db883883e7b47bc27384d6c
|
Shell
|
rekren/workshop-reproducible-research
|
/tutorials/conda/code/run_qc.sh
|
UTF-8
| 765
| 2.671875
| 3
|
[
"MIT"
] |
permissive
|
# Make needed directories
mkdir -p data/raw_internal
mkdir -p intermediate/fastqc
mkdir -p results/fastqc
# Download fastq files using sra-tools and put in data/raw_internal/:
fastq-dump SRR935090 -X 12000 --gzip -Z > data/raw_internal/SRR935090.fastq.gz
fastq-dump SRR935091 -X 12000 --gzip -Z > data/raw_internal/SRR935091.fastq.gz
fastq-dump SRR935092 -X 12000 --gzip -Z > data/raw_internal/SRR935092.fastq.gz
# Run fastqc, put output zip in intermediate/fastq/ and html in results/fastqc:
fastqc data/raw_internal/SRR935090.fastq.gz --outdir=intermediate/fastqc/
fastqc data/raw_internal/SRR935091.fastq.gz --outdir=intermediate/fastqc/
fastqc data/raw_internal/SRR935092.fastq.gz --outdir=intermediate/fastqc/
mv intermediate/fastqc/*html results/fastqc/
| true
|
5169de22bb2c21aa7838feaeddd60f827716e7b5
|
Shell
|
zyberzero/secure-videoconference
|
/scripts/webStop.sh
|
UTF-8
| 344
| 3.1875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
APP_DIR=$(cd `dirname $0`/../; pwd)
PID=`cat $APP_DIR/configs/node.pid`
if [ ! -n "$PID" ]; then
echo "pid not exist"
exit 1;
fi
SUB_PIDS=`pgrep -P $PID`
if [ -n "$SUB_PIDS" ]; then
GRANDSON_PIDS=`pgrep -P $SUB_PIDS`
fi
echo "kill $PID $SUB_PIDS $GRANDSON_PIDS"
kill $PID $SUB_PIDS $GRANDSON_PIDS
echo "stop web ok"
| true
|
a96b2bea33ea9a2bb02cf7b371bd56eec35da6c1
|
Shell
|
iharosi/dotfiles
|
/custom/functions.zsh
|
UTF-8
| 2,638
| 4.09375
| 4
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
# Find node_modules folder recursively and print reserved space
function nmls() {
find . -name "node_modules" -type d -prune -print | xargs du -chs
}
# Delete node_modules folder recursively
function nmrm() {
find . -name 'node_modules' -type d -prune -print -exec rm -rf '{}' \;
}
# Generate a random password
# @param integer $1 = 64 number of characters
# @param integer $2 = 0 exclude special characters
function randpass() {
local chars="[:alnum:]"
if [[ "$2" != 0 ]]; then
chars+="+-\E*@#%=?!_;./"
fi
cat /dev/urandom | LC_ALL=C tr -dc "${chars}" | head -c ${1:-64} | awk '{print $1}'
}
# fast backup of a file or directory with timestamp
function bkp() {
for name in ${@%/}; do
cp -R $name{,$(date +.%Y.%m.%d.%H.%M.%S)}
done
}
# Convert latin1 srt files to utf8 and replaces accents accordingly
function toutf8() {
for var in "$@"
do
echo "> $var"
iconv -f WINDOWS-1252 -t UTF-8 $var > $var.utf8
sed -i '' -e 's/
$//g' -e 's/õ/ő/g' -e 's/Õ/Ő/g' -e 's/Û/Ű/g' -e 's/û/ű/g' $var.utf8
mv $var $var.backup.srt
mv $var.utf8 $var
done
}
# Determine size of a file or total size of a directory
function fs() {
if du -b /dev/null > /dev/null 2>&1; then
local arg=-sbh;
else
local arg=-sh;
fi
if [[ -n "$@" ]]; then
du $arg -- "$@";
else
du $arg .[^.]* ./*;
fi;
}
# Create a data URL from a file
function dataurl() {
local mimeType=$(file -b --mime-type "$1");
if [[ $mimeType == text/* ]]; then
mimeType="${mimeType};charset=utf-8";
fi
echo "data:${mimeType};base64,$(openssl base64 -in "$1" | tr -d '\n')";
}
# Start an HTTP server from a directory, optionally specifying the port
function server() {
local port="${1:-8000}";
sleep 1 && open "http://localhost:${port}/" &
# Set the default Content-Type to `text/plain` instead of `application/octet-stream`
# And serve everything as UTF-8 (although not technically correct, this doesn’t break anything for binary files)
python -c $'import SimpleHTTPServer;\nmap = SimpleHTTPServer.SimpleHTTPRequestHandler.extensions_map;\nmap[""] = "text/plain";\nfor key, value in map.items():\n\tmap[key] = value + ";charset=UTF-8";\nSimpleHTTPServer.test();' "$port";
}
# `tre` is a shorthand for `tree` with hidden files and color enabled, ignoring
# the `.git` directory, listing directories first. The output gets piped into
# `less` with options to preserve color and line numbers, unless the output is
# small enough for one screen.
function tre() {
tree -aC -I '.git|node_modules|bower_components' --dirsfirst "$@" | less -FRNX;
}
| true
|
ba07ea6fa2597190297156fc803cbd131d1ad312
|
Shell
|
iAudioSWE2011/iAudio
|
/docs/deploy_iAudio.sh
|
UTF-8
| 988
| 3.046875
| 3
|
[] |
no_license
|
#
# Deployment of iAudio
#
#
base=/var/www/iAudio
backup=/var/www/tmp
application=/var/www/iAudio/application
public=/var/www/iAudio/public
tmp=/var/www/iAudio/public/tmp
echo "Start deployment.."
echo ""
#Backup tmp structure
echo -n "Backup up tmp structure..."
cp -r $tmp $backup
echo "DONE"
#delete iAudio
echo -n "Removing current version of iAudio..."
rm -r $base
echo "DONE"
#download latest version
echo -n "Clone iAudio from github..."
git clone git://github.com/iAudioSWE2011/iAudio.git >> log.txt
echo "DONE"
#copy mp3 back
echo -n "Copy tmp structure back..."
cp -r $backup $tmp
echo "DONE"
#remove backup
echo -n "remove backup..."
rm -r $backup
rm log.txt
echo "DONE"
#set rights for upload
echo -n "Set correct user rights..."
chmod 0777 $application
chmod 0777 $public
chmod 0777 $tmp
chown -Rv www-data $tmp >> log.txt
chgrp -Rv www-data $tmp >> log.txt
echo "DONE"
echo ""
echo "Deployment FINISHED"
exit 0
| true
|
c987e95f9c9a28c85a09955c9977993c5c36eb07
|
Shell
|
yusiwen/myConfigs
|
/change_theme.sh
|
UTF-8
| 2,971
| 3.671875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
I3_THEME_DIR="$HOME/myConfigs/i3/colors"
I3_THEME_FILE="$HOME/.i3/config.colors"
VIM_THEME_DIR="$HOME/myConfigs/vim/themes"
VIM_THEME_FILE="$HOME/.vim/vimrc.colortheme"
X11_THEME_DIR="$HOME/myConfigs/X11/themes/color-schemes"
X11_THEME_FILE="$HOME/.Xresources.theme"
BASE16_THEME_SHELL="$HOME/.base16rc"
MUTT_THEME_DIR="$HOME/myConfigs/mail/mutt/themes"
MUTT_THEME_FILE="$HOME/.mutt/theme.muttrc"
GREP="grep -q"
change_theme()
{
if [ "$(uname)" = 'Linux' ]; then
if [ ! -e "$HOME"/.Xresources.font ]; then
"$HOME"/myConfigs/change_font.sh
fi
echo "Setting theme to '$1'..."
if [ -e "$VIM_THEME_DIR"/vimrc.theme."$1" ]; then
ln -sfnv "$VIM_THEME_DIR"/vimrc.theme."$1" "$VIM_THEME_FILE"
fi
if [ -e "$I3_THEME_DIR"/_config."$1" ]; then
ln -sfnv "$I3_THEME_DIR"/_config."$1" "$I3_THEME_FILE"
i3bang
fi
BASE16=
if echo "$1" | $GREP "^base16"; then
ln -sfnv "$X11_THEME_DIR"/"$1".dark.256.xresources "$X11_THEME_FILE"
ln -sfnv "$X11_THEME_DIR"/"$1".dark.sh "$BASE16_THEME_SHELL"
BASE16=".dark.256"
else
ln -sfnv "$X11_THEME_DIR"/"$1".xresources "$X11_THEME_FILE"
rm -f "$BASE16_THEME_SHELL"
fi
if [ "$DISTRO" = 'Ubuntu' ]; then
# Check if mutt is installed or not
PACKAGE=$(dpkg -l | grep mutt)
if [ -n "$PACKAGE" ]; then
ln -sfnv "$MUTT_THEME_DIR"/"$1"$BASE16.muttrc "$MUTT_THEME_FILE"
fi
fi
echo "Reloading xresources..."
xrdb -load ~/.Xresources
else
echo 'Only Linux is supported.'
fi
}
echo "[1] Gruvbox"
echo "[2] Jellybeans"
echo "[3] Sourcerer"
echo "[4] Base16-Default"
echo "[5] Base16-Atelier Seaside"
echo "[6] Base16-Atelier Sulphurpool"
echo "[7] Base16-Bespin"
echo "[8] Base16-Solarized Dark"
echo "[9] Base16-Tomorrow"
echo "[0] Base16-Twilight"
echo "[a] Dracula"
echo "[b] Manjaro-i3"
echo "[c] Solarized-Dark"
printf "Choose theme[3]: "
read -r number
if [ -z "$number" ]; then
number='3'
fi
if echo "$number" | grep -iq "^1"; then
change_theme gruvbox
elif echo "$number" | grep -iq "^2"; then
change_theme jellybeans
elif echo "$number" | grep -iq "^3"; then
change_theme sourcerer
elif echo "$number" | grep -iq "^4"; then
change_theme base16-default
elif echo "$number" | grep -iq "^5"; then
change_theme base16-atelierseaside
elif echo "$number" | grep -iq "^6"; then
change_theme base16-ateliersulphurpool
elif echo "$number" | grep -iq "^7"; then
change_theme base16-bespin
elif echo "$number" | grep -iq "^8"; then
change_theme base16-solarized
elif echo "$number" | grep -iq "^9"; then
change_theme base16-tomorrow
elif echo "$number" | grep -iq "^0"; then
change_theme base16-twilight
elif echo "$number" | grep -iq "^[a|A]$"; then
change_theme dracula
elif echo "$number" | grep -iq "^[b|B]$"; then
change_theme manjaro-i3
elif echo "$number" | grep -iq "^[c|C]$"; then
change_theme solarized
else
echo "Nahh!"
exit
fi
echo "Done."
| true
|
04ba05b73c22b5bd36f5a6663c5f6954dbf4764a
|
Shell
|
openube/xubuntu-setup
|
/system/virtualbox
|
UTF-8
| 1,215
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash -eu
. `dirname $0`/../library/script
repository_url=http://download.virtualbox.org/virtualbox/debian
list_path=/etc/apt/sources.list.d/virtualbox.list
if [[ ! -f $list_path ]]; then
# add repository URL to source list
write-file $list_path <<EOF
deb $repository_url quantal contrib non-free
EOF
# import apt key
wget -q $repository_url/oracle_vbox.asc -O- | sudo apt-key add -
# update packages from added repository only
apt-update-list $list_path
fi
# install VirtualBox
install virtualbox-4.2
if ! VBoxManage list extpacks | grep -q "Oracle VM VirtualBox Extension Pack"; then
# download Oracle VM Extension Pack
# SOURCE: https://github.com/xdissent/ievms/blob/master/ievms.sh
version=`VBoxManage -v`
major_minor_release=${version%%[-_r]*}
filename=Oracle_VM_VirtualBox_Extension_Pack-${major_minor_release}.vbox-extpack
url=http://download.virtualbox.org/virtualbox/${major_minor_release}/${filename}
md5=22518ce5e06c267287f631915681250c
download $url $md5
# install Oracle VM Extension Pack
sudo VBoxManage extpack install $filename
# add user to group
sudo adduser $USER vboxusers
# remove autmatically created directory
rm -rf ~/.VirtualBox
fi
| true
|
8ca3a8140c6f7c15abf8d2a623774327519a37ba
|
Shell
|
n0rad/rkt-images
|
/aci/OLD/aci-arch-prometheus-exporter/runlevels/builder/06.getHAProxyExporter.sh
|
UTF-8
| 363
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
set -x
EXPORTER_VERSION=0.6.0
url="https://github.com/prometheus/haproxy_exporter/releases/download/${EXPORTER_VERSION}/haproxy_exporter-${EXPORTER_VERSION}.linux-amd64.tar.gz"
PROGRAM_PATH="$ROOTFS/etc/prometheus/exporter"
mkdir -p ${PROGRAM_PATH}
curl -Ls ${url} | tar --strip 1 -C ${PROGRAM_PATH} -xzvf -
chmod +x ${PROGRAM_PATH}/haproxy_exporter
| true
|
fbaad4b2b89f5226cf101735c05c685af501a9e1
|
Shell
|
Kenzu/DockSTARTer
|
/.scripts/pm_zypper_remove_docker.sh
|
UTF-8
| 375
| 3.015625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -euo pipefail
IFS=$'\n\t'
pm_zypper_remove_docker() {
# https://docs.docker.com/install/linux/docker-ce/fedora/
info "Removing conflicting Docker packages."
#dnf -y remove docker > /dev/null 2>&1 || true
}
test_pm_zypper_remove_docker() {
# run_script 'pm_zypper_remove_docker'
warn "CI does not test pm_zypper_remove_docker."
}
| true
|
e284691ff9175da64dcd66f8eb4dac6b3a181c6c
|
Shell
|
pmidford/arachadmin
|
/modules/test.sh
|
UTF-8
| 77
| 2.53125
| 3
|
[
"MIT",
"LicenseRef-scancode-public-domain"
] |
permissive
|
for f in *.py
do
echo "Testing module: "${f}
python ${f} --test
done
| true
|
e8e509a997519a1f6501d8c3b6bff40e1611a275
|
Shell
|
Mikor-mkr/robotics_setup
|
/vrep.sh
|
UTF-8
| 1,368
| 3.34375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# source: https://gist.github.com/phatblat/1713458
# Save script's current directory
DIR=$(pwd)
set -e
set -u
set -x
echo "############################"
echo "# V-REP Robot Simulator #"
echo "############################"
echo ""
echo "http://www.coppeliarobotics.com/downloads.html"
echo ""
echo "TO RUN V-REP after installation:"
echo ""
echo "cd ~/src/V-REP_PRO_EDU_V3_4_0_Linux"
echo "sh vrep.sh"
echo ""
echo "@author Andrew Hundt <ATHundt@gmail.com>"
echo ""
# VREP_VERSION=V3_4_0
# V-REP_PRO_EDU_V3_4_0_Linux
VREP_FILE="V-REP_PRO_EDU_V3_6_1_Ubuntu18_04"
. /etc/lsb-release # get ubuntu version number
if [ "$DISTRIB_RELEASE" = "16.04" ]
then
# http://coppeliarobotics.com/files/V-REP_PRO_EDU_V3_6_1_Ubuntu16_04.tar.xz
VREP_FILE="V-REP_PRO_EDU_V3_6_1_Ubuntu16_04"
fi
if [ "$DISTRIB_RELEASE" = "18.04" ]
then
# http://coppeliarobotics.com/files/V-REP_PRO_EDU_V3_6_1_Ubuntu16_04.tar.xz
VREP_FILE="V-REP_PRO_EDU_V3_6_1_Ubuntu18_04"
fi
# V-REP_PRO_EDU_V3_4_0_Linux
mkdir -p ~/src
FILE_PATH=~/src/${VREP_FILE}.tar.xz
if [ ! -f ${FILE_PATH} ]
then
echo "downloading"
cd ~/src
# wget http://coppeliarobotics.com/files/V-REP_PRO_EDU_V3_4_0_Linux.tar.gz
wget http://coppeliarobotics.com/files/${VREP_FILE}.tar.xz
fi
if [ ! -d ~/src/${VREP_FILE} ]
then
cd ~/src
tar -xvf $FILE_PATH
# tar -xvzf $FILE_PATH
fi
cd $DIR
| true
|
15f290875229f5032d0ff2bc5d06865526702798
|
Shell
|
waelby/application-Shell-Sujet-17-
|
/temps.sh
|
UTF-8
| 1,088
| 3.3125
| 3
|
[] |
no_license
|
#! /bin/bash
while :
do
echo "-------------------------------------------------------"
echo "*************************menu Temps**************************"
echo "tapez 0 pour quitter"
echo "-day"
echo "-week"
echo "-before-week"
echo "-month"
echo "-year"
echo "-help"
echo "veuillez saisir votre commande"
echo "-------------------------------------------------------"
echo "-------------------------------------------------------"
read choix
case $choix in
*-day)
echo "tout les fichier qui finissent par ~ ont été supprimés avec succcés"
find / -mtime 1
;;
*-week)
echo "tout les fichier qui finissent par ~ ont été supprimés avec succcés"
find / -mtime 7
;;
*-before-week)
echo "tout les fichier qui finissent par ~ ont été supprimés avec succcés"
find / +mtime 7
;;
*-month)
echo "tout les fichier qui finissent par ~ ont été supprimés avec succcés"
find / -mtime 30
;;
*-year)
echo "tout les fichier qui finissent par ~ ont été supprimés avec succcés"
find / -mtime 360
;;
*-help)
help
;;
0)
echo "retour au menu principale"
exit 0
;;
esac
done
| true
|
79da48e64729d67456b7e42b105d6b2c4eee6155
|
Shell
|
connext/indra
|
/ops/deploy-indra.sh
|
UTF-8
| 3,361
| 3.890625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
root="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." >/dev/null 2>&1 && pwd )"
project="`cat $root/package.json | grep '"name":' | head -n 1 | cut -d '"' -f 4`"
registry="`cat $root/package.json | grep '"registry":' | head -n 1 | cut -d '"' -f 4`"
registry_url="https://index.docker.io/v1/repositories/${registry#*/}"
patch=".deploy-indra.patch"
########################################
## Run some sanity checks to make sure we're really ready to deploy
if [[ -n "`git status -s`" ]]
then echo "Aborting: Make sure your git repo is clean" && exit 1
fi
if [[ "`git symbolic-ref HEAD | sed 's|.*/\(.*\)|\1|'`" != "staging" ]]
then echo "Aborting: Make sure you've checked out the staging branch" && exit 1
fi
if [[ -n "`git diff origin/staging`" ]]
then echo "Aborting: Make sure your branch is up to date with origin/staging" && exit 1
fi
if [[ ! "`pwd | sed 's|.*/\(.*\)|\1|'`" =~ "$project" ]]
then echo "Aborting: Make sure you're in the $project project root" && exit 1
fi
# Create patch to check for conflicts
# Thanks to: https://stackoverflow.com/a/6339869
# temporarily handle errors manually
set +e
git checkout master > /dev/null 2>&1
git merge --no-commit --no-ff staging
if [[ "$?" != "0" ]]
then
git merge --abort && git checkout staging > /dev/null 2>&1
echo "Merge aborted & rolled back, your repo is clean again"
echo
echo "Error: merging staging into master would result in the above merge conflicts."
echo "To deploy:"
echo " - Merge master into staging ie: git checkout staging && git merge master"
echo " - Take care of any merge conflicts & do post-merge testing if needed"
echo " - Re-run this script"
echo
exit 0
fi
git merge --abort && git checkout staging > /dev/null 2>&1
set -e
########################################
## Gather info needed for deployment
current_version="`git show origin/master:package.json | grep '"version":' | awk -F '"' '{print $4}'`"
echo "What version of Indra are we deploying? Current version: $current_version"
read -p "> " -r
echo
version="$REPLY" # get version from user input
if [[ -z "$version" || "$version" == "$current_version" ]]
then echo "Aborting: A new, unique $project version is required" && exit 1
fi
echo "Verifying..."
if [[ -n "`curl -sflL "$registry_url/${project}_node/tags/$version"`" ]]
then echo "Aborting: This version already exists on docker hub" && exit 1
fi
echo "Confirm: we'll deploy the current staging branch as $project-$version (y/n)?"
read -p "> " -r
echo
if [[ ! "$REPLY" =~ ^[Yy]$ ]]
then echo "Aborting by user request" && exit 1 # abort!
fi
echo "Let's go"
git checkout master
git merge --no-ff staging -m "Deploy $project-$version"
# edit package.json to set new version number
mv package.json .package.json
cat .package.json | sed 's/"version": ".*"/"version": "'$version'"/' > package.json
rm .package.json
cd modules/node
mv package.json .package.json
cat .package.json | sed 's/"version": ".*"/"version": "'$version'"/' > package.json
rm .package.json
cd ../..
# Push a new commit to master
git add .
git commit --amend --no-edit
git push origin master --no-verify
# Push a new semver tag
git tag $project-$version
git push origin $project-$version --no-verify
# Bring staging up-to-date w master for a cleaner git history
git checkout staging
git merge master
git push origin staging --no-verify
| true
|
71f45bccc8066b4f5bbc03e799a1dbef7d8d5116
|
Shell
|
dalanlan/my-kube-in-docker
|
/image/release.sh
|
UTF-8
| 1,472
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/bash
set -ex
export MASTER_IP=10.10.103.100
export CLUSTER_NAME=100-server
export USER=100-emma
export CONTEXT_NAME=100-context
#now we have v1.0.1 available
export VERSION=v1.0.3-dir
#strogly recomand to be MASTER_IP:5000
export REPO=dalanlan
#Seems that we should run as root
sudo ./make-ca-cert.sh ${MASTER_IP} IP:${MASTER_IP},IP:192.168.3.0,IP:127.0.0.1,DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.cluster.local
#sudo rm -f ~/.kube/config
#curl -O https://storage.googleapis.com/kubernetes-release/release/${VERSION}/bin/linux/amd64/kubectl
#sudo cp kubectl /bin/
sudo kubectl config set-cluster ${CLUSTER_NAME} --certificate-authority=/srv/kubernetes/ca.crt --embed-certs=true --server=https://${MASTER_IP}:6443
sudo kubectl config set-credentials ${USER} --client-certificate=/srv/kubernetes/kubecfg.crt --client-key=/srv/kubernetes/kubecfg.key --embed-certs=true
sudo kubectl config set-context ${CONTEXT_NAME} --cluster=${CLUSTER_NAME} --user=${USER}
sudo kubectl config use-context ${CONTEXT_NAME}
sudo cp $HOME/.kube/config /srv/kubernetes
sudo cp -R /srv/kubernetes .
sudo chmod 777 -R kubernetes/
# Distribute certs & keys to all of the nodes
# sudo scp -r /srv/kubernetes <username>:<master_ip>:/srv/
#make hyperkube binary && docker build
make
sudo docker save ${REPO}/hyperkube:${VERSION} > hyperkube-${VERSION}.tar
# sudo cp hyper.tar ../tarpackage
# scp hyperkube image to the master node
| true
|
714ae40340cd8fb5ba8884063fb8d693c763aaf1
|
Shell
|
trail-of-forks/cabotage-app
|
/docker-compose/vault/entry.sh
|
UTF-8
| 2,161
| 3.3125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
export VAULT_ADDR=http://127.0.0.1:8200
if [ -f /vault/file/unseal ]; then
echo "starting vault!"
vault server -dev -dev-skip-init -dev-listen-address=$VAULT_DEV_LISTEN_ADDRESS -dev-root-token-id=$VAULT_DEV_ROOT_TOKEN_ID -config /etc/vault/config.hcl &
echo "unsealing!"
while true; do
vault status 2>&1 >/dev/null
if [ $? == 2 ]; then
echo "we good"
break
fi
echo "vault not up yet..."
sleep .5
done
export UNSEAL_TOKEN=`cat /vault/file/unseal`
vault operator unseal ${UNSEAL_TOKEN}
wait
else
echo "starting vault!"
vault server -dev -dev-listen-address=$VAULT_DEV_LISTEN_ADDRESS -dev-root-token-id=$VAULT_DEV_ROOT_TOKEN_ID -config /etc/vault/config.hcl 2>&1 | tee $HOME/logfile &
while true; do
vault status 2>&1 >/dev/null
if [ $? == 0 ]; then
echo "we good"
break
fi
echo "vault not up and initialized yet..."
sleep .5
done
echo -n `grep 'Unseal Key: ' $HOME/logfile | awk '{print $NF}' | sed -r "s/\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]//g"` > /vault/file/unseal
echo "bootstrapping our transit key"
VAULT_TOKEN=$VAULT_DEV_ROOT_TOKEN_ID vault secrets enable transit
VAULT_TOKEN=$VAULT_DEV_ROOT_TOKEN_ID vault write transit/restore/cabotage-app backup=`cat /etc/vault/cabotage-vault-key.backup`
echo "bootstrapping postgres stufffff"
VAULT_TOKEN=$VAULT_DEV_ROOT_TOKEN_ID vault secrets enable database
VAULT_TOKEN=$VAULT_DEV_ROOT_TOKEN_ID vault write database/config/cabotage plugin_name=postgresql-database-plugin allowed_roles="cabotage" connection_url="postgresql://postgres@db/cabotage_dev?sslmode=disable" verify_connection=false
VAULT_TOKEN=$VAULT_DEV_ROOT_TOKEN_ID vault write database/roles/cabotage db_name=cabotage default_ttl="60s" max_ttl="120s" creation_statements="CREATE ROLE \"{{name}}\" WITH LOGIN PASSWORD '{{password}}' VALID UNTIL '{{expiration}}' IN ROLE cabotage;" revocation_statements="REASSIGN OWNED BY \"{{name}}\" TO cabotage" renew_statements="ALTER ROLE \"{{name}}\" VALID UNTIL '{{expiration}}';"
wait
fi
| true
|
d5c4df9696404c4b5417a03c3c158082a004fa9e
|
Shell
|
frankhoff/xictools
|
/xt_base/packages/util/MPbuild
|
UTF-8
| 2,795
| 3.90625
| 4
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
#! /bin/bash
#### Apple OS X setup script
#
# Create the gtk2 libraries and applications for building xictools
# with gtk2 support.
#
# See README at the top of the xictools source tree for info.
# Procedure:
# Go to macports.org and download the installer source file. This will
# have a name like "Macports-2.7.1.tar.gz".
# Set the numbers in the line below to match the macports source file
# that you have downloaded to the current directory.
mpbase=Macports-2.7.1
# With the downloaded file in the current directory, run this script.
# Stand by to give your password, and respond to prompts when asked.
# After answering 'y' to the long list of ports to install, you can
# take a break. The script will take a long time, maybe hours, to
# complete, as it builds from source gtk2 and all dependent programs.
# Once complete, it will have installed everything into
# /use/local/gtk2-bundle-x11 (standard x11 version)
# /use/local/gtk2-bundle (experimental native version)
# Whichever you have, add its bin subdirectory to your search path.
# You should then be able to build and run xictools and other programs
# that use the gtk2 user interface.
for_x11=yes
echo "The quartz version is experimental and doesn't really work last I checked."
echo -n "Build for quartz? (y/n) [n] "
read foo
if [ x$foo = xy -o x$foo = xyes ]; then
for_x11=no
fi
if [ $for_x11 = yes ]; then
MP_PREFIX=/usr/local/gtk2-bundle-x11
else
MP_PREFIX=/usr/local/gtk2-bundle
fi
if [ -d $MP_PREFIX ]; then
echo Error: $MP_PREFIX exists and is in the way, move or delete it.
exit 1
fi
echo -n "Creating $MP_PREFIX, x11: $for_x11, OK? (y/n) [y] "
read foo
if [ x$foo = x -o x$foo = xy -o x$foo = xyes ]; then
echo Continuing...
else
echo Terminating
exit 0
fi
rm -rf $mpbase
export PATH=/bin:/sbin:/usr/bin:/usr/sbin:$MP_PREFIX/bin
if [ -f $mpbase.tar.gz ]; then
tar xzf $mpbase.tar.gz
elif [ -f $mpbase.tar ]; then
tar xf $mpbase.tar
else
echo Macports source file not found
exit 1
fi
if [ ! -d $mpbase ]; then
echo Macports directory not found
exit 1
fi
cd $mpbase
./configure --prefix=$MP_PREFIX --with-applications-dir=$MP_PREFIX/Applications
make
sudo make install
sudo port -v selfupdate
if [ $for_x11 = yes ]; then
sudo port install gtk2
else
sudo port install gtk2 +no_x11 +quartz
fi
sudo port install gsl
sudo port install openssl11
sudo port install python2_select
sudo port install python27
#sudo port install py27-matplotlib py27-numpy py27-scipy py27-ipython +notebook
#sudo port install opencv +python27
#sudo port install py27-pip
# Set the default widget font, otherwise too small.
echo "gtk-font-name=\"sans-serif 14\"" > gtkrc
sudo mv -f gtkrc $MP_PREFIX/etc/gtk-2.0
# Clean up
cd ..
rm -rf $mpbase
echo Done
| true
|
923bb8a5315d10a60523a57d6f19729cf3992aec
|
Shell
|
kentaxinus/source.openwrt.melmac.net
|
/nebula/files/nebula.proto
|
UTF-8
| 2,522
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/sh
# Copyright 2021 Stan Grishin (stangri@melmac.net)
# shellcheck disable=SC1091,SC2039,SC2034,SC3043
PKG_VERSION='dev-test'
readonly PROG=/usr/sbin/nebula
[ -x "$PROG" ] || exit 0
[ -n "$INCLUDE_ONLY" ] || {
. /lib/functions.sh
. /lib/functions/network.sh
. ../netifd-proto.sh
init_proto "$@"
}
version() { echo "Version: $PKG_VERSION"; }
# https://gist.github.com/pkuczynski/8665367
# shellcheck disable=SC2086,SC2155
parse_yaml() {
local prefix=$2
local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034'|tr -d '\015')
sed -ne "s|^\($s\)\($w\)$s:$s\"\(.*\)\"$s\$|\1$fs\2$fs\3|p" \
-e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" "$1" |
awk "-F$fs" '{
indent = length($1)/2;
vname[indent] = $2;
for (i in vname) {if (i > indent) {delete vname[i]}}
if (length($3) > 0) {
vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
printf("%s%s%s=\"%s\"\n", "'$prefix'", vn, $2, $3);
}
}'
}
proto_nebula_init_config() {
available=1
no_proto_task=1
proto_config_add_string "config"
}
proto_nebula_setup() {
local interface="$1" config address addresses
local yaml_listen_host yaml_listen_port yaml_tun_dev
proto_init_update "$interface" 1
json_get_vars config
if [ -z "$config" ]; then
config="$(uci -q get "network.${interface}.config")"
fi
logger -t nebula "start $interface with config: $config"
[ -s "$config" ] || return 1
eval "$(parse_yaml "$config" "yaml_")"
proto_export "INTERFACE=$interface"
proto_run_command "$interface" "$PROG" -config "$config"
proto_add_data
json_add_array firewall
json_add_object ""
json_add_string type rule
json_add_string name "$interface"
json_add_string src "*"
json_add_string dest_ip "${yaml_listen_host:-0.0.0.0}"
json_add_string dest_port "${yaml_listen_port:-4242}"
json_add_string proto udp
json_add_string target ACCEPT
json_close_object
json_close_array
proto_close_data
proto_send_update "$interface"
addresses="$(ip -4 a list dev "$interface" | grep inet | awk '{print $2}')"
for address in ${addresses}; do
case "${address}" in
*:*/*)
proto_add_ipv6_address "${address%%/*}" "${address##*/}"
;;
*.*/*)
proto_add_ipv4_address "${address%%/*}" "${address##*/}"
;;
*:*)
proto_add_ipv6_address "${address%%/*}" "128"
;;
*.*)
proto_add_ipv4_address "${address%%/*}" "32"
;;
esac
done
proto_send_update "$interface" 1
}
proto_nebula_teardown() {
local interface="$1"
proto_kill_command "$interface"
}
[ -n "$INCLUDE_ONLY" ] || add_protocol nebula
| true
|
bc8c89d15fccbe6274872bb0c9c00c69e6bf3c0a
|
Shell
|
Appdynamics/AppD-Cloud9-Lambda-Lab
|
/bashUtilities.sh
|
UTF-8
| 604
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Copyright (c) AppDynamics Inc
# All rights reserved
#
# Maintainer: David Ryder, david.ryder@appdynamics.com
#
# Requires: jq (brew install jq)
#
#
_awsCloud9ListEnvironments() {
LIST1=$(aws cloud9 list-environments --query environmentIds[*] --output text)
for ENV_ID in $LIST1; do
aws cloud9 describe-environments --environment-ids $ENV_ID
done
}
_randomPassword() {
RND_PWD="$(openssl rand -base64 64 | tr -dc A-Z | cut -c1-4)+$(openssl rand -base64 64 | tr -dc 0-9 | cut -c1-4)-$(openssl rand -base64 64 | tr -dc a-z | cut -c1-4)=$(openssl rand -base64 8 | tr -dc A-Z-a-z-0-9)"
echo $RND_PWD
}
| true
|
815b6f26fd7fb862ef882ba8d8e772366f7f4728
|
Shell
|
aleksandrgilfanov/rotate-image
|
/video-to-imgdir
|
UTF-8
| 225
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/sh -e
INPUT_VIDEO="$1"
OUTPUT_DIR="$2"
if [ $# -ne 2 ]
then
echo "Usage:"
echo "$0 {input-video} {output-images-directory}"
exit
fi
mkdir -p $OUTPUT_DIR
ffmpeg -i $INPUT_VIDEO $OUTPUT_DIR/frame-%04d.png
| true
|
ae4a7b0120134c501e779f00e393e54aaaabc61f
|
Shell
|
ArthurManz/thrust-id
|
/eth-network/artifacts/entrypoint.sh
|
UTF-8
| 1,338
| 3.03125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
OPTIONS="--nodiscover --networkid $NETWORKID --port $NETWORKPORT --rpc --rpcport $MINERPORT --etherbase 0x57e25703aba36bd2575e9027de2cb9ac187dc6da --verbosity 6"
HELP="This is a help page. \
Available modes are: miner node1 node2 ethstats ethstatsclient help."
case $1 in
miner)
cp /root/key.miner /root/.ethereum/nodekey
sed -i "s/__subnet__/$SUBNET/g" /root/.ethereum/static-nodes.json
./geth --rpccorsdomain "*" --rpcapi admin,debug,shh,txpool,miner,personal,db,eth,net,web3 --verbosity "6" --identity $1 --rpcaddr $SUBNET.1 --mine --autodag --minerthreads "1" $OPTIONS
;;
node1)
cp /root/key.node1 /root/.ethereum/nodekey
sed -i "s/__subnet__/$SUBNET/g" /root/.ethereum/static-nodes.json
./geth --rpccorsdomain "*" --rpcapi eth,net,web3,debug --verbosity "6" --identity $1 --rpcaddr $SUBNET.2 $OPTIONS
;;
node2)
cp /root/key.node2 /root/.ethereum/nodekey
sed -i "s/__subnet__/$SUBNET/g" /root/.ethereum/static-nodes.json
./geth --rpccorsdomain "*" --rpcapi eth,net,web3,debug --verbosity "6" --identity $1 --rpcaddr $SUBNET.3 $OPTIONS
;;
ethstats)
cd /eth-netstats ; npm start
;;
ethstatsclient)
sed -i "s/__subnet__/$SUBNET/g" /eth-net-intelligence-api/app.json
cd /eth-net-intelligence-api ; pm2 start --no-daemon app.json
;;
help)
echo $HELP
;;
"")
echo $HELP
esac
| true
|
e183f2bfa979e666ceb3711909ff0985009ef26c
|
Shell
|
openwfm/wrfxpy
|
/forecast.sh
|
UTF-8
| 167
| 2.9375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
if [ $# -eq 0 ]
then
echo usage: ./forecast.sh input.json
exit 1
fi
cd $(dirname "$0")
export PYTHONPATH=src
python src/forecast.py $1
| true
|
14eef22b381bb06b90b3a7967aaaa992d2a6fe53
|
Shell
|
erikwilson/CactusCon7
|
/badge/data/genspiffs.sh
|
UTF-8
| 434
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/bash
mkdir pub
mkdir fs
mkdir spiffs
for i in {1..542}; do
echo badge$i
mkdir fs/badge$i
openssl genrsa -out fs/badge$i/private.pem 768
openssl rsa -in fs/badge$i/private.pem -pubout > pub/$i
echo $i > fs/badge$i/my.id
cp cactuscoinapi.pem fs/badge$i/
~/Downloads/mkspiffs/mkspiffs-0.2.3-2-g4eae236-arduino-esp32-linux64/mkspiffs -c fs/badge$i/ -b 4096 -p 256 -s 0x16F000 spiffs/badge_$i.bin
done
| true
|
9637e5cb1a3e4e25cef25a4f046de65bf8018a6e
|
Shell
|
kellerli/mycat_haproxy_keepalive-docker-swarm-
|
/xinetd_config/mycat_status.sh
|
UTF-8
| 233
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/bash
mycat=`/usr/local/mycat/bin/mycat status | grep 'not running' | wc -l`
if [ "$mycat" = "0" ];
then
/bin/echo -e "HTTP/1.1 200 OK\r\n"
exit 0
else
/bin/echo -e "HTTP/1.1 503 Service Unavailable\r\n"a
exit 1
fi
| true
|
b280be9ad099215d971cd4500c3b2791d638208f
|
Shell
|
StefanHeimberg/ee7-ejb-cdi-scopemix
|
/run-weblogic.sh
|
UTF-8
| 727
| 3.015625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
__dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
cd ${__dir}
if [ -f "logs/example.log" ]; then
ts=$(date "+%Y%m%d-%H%M%S")
mv -v logs/example.log logs/example-$ts.log
fi
if [ -d "deployments/" ]; then
find deployments/ ! -name 'ee7-ejb-cdi-scopemix-ear.ear' -type f -exec rm -f {} +
fi
docker run -i -t --rm \
--name ee7-ejb-cdi-scopemix-weblogic \
-p 8001:8001 \
-p 8080:8001 \
-v ${__dir}/deployments:/u01/oracle/user_projects/domains/base_domain/autodeploy \
-v ${__dir}/logs:/u01/oracle/user_projects/domains/base_domain/logs \
iatebes/weblogic-1221-generic-domain:latest \
/bin/bash -c "startWebLogic.sh -Dweblogic.ProductionModeEnabled=false"
| true
|
b7ed8b377b58998644f413affac27677f6a5f344
|
Shell
|
stevleibelt/shell_script
|
/propel/checkRelease.sh
|
UTF-8
| 457
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
####
#
####
# @author artodeto
# @since 2013-04-23
####
URL_FEED='https://raw.github.com/propelorm/Propel/master/CHANGELOG'
DIR_SELF=$(cd $(dirname "$0"); pwd);
FILE_TO_DIFF='.PROPEL_RELEASE_CHANGELOG_TO_DIFF'
$(wget -q $URL_FEED)
if [ -f "$FILE_TO_DIFF" ]; then
$(diff CHANGELOG $FILE_TO_DIFF)
$(rm $FILE_TO_DIFF)
$(mv CHANGELOG $FILE_TO_DIFF)
else
echo First run, no file exists so far.
$(mv CHANGELOG $FILE_TO_DIFF)
fi
| true
|
67fbc72017f5043ccc153701306cba331fe3d53b
|
Shell
|
nikzasel/GOES
|
/omovie.sh
|
UTF-8
| 939
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
export CYGWIN="winsymlinks:nativestrict"
for GOES in 17 16;
do
date
echo "Link GOES-${GOES} source images"
mkdir /cygdrive/S/NASA/overlay-${GOES}
x=0; for i in `find S:/NASA/GOES-${GOES}_03_geocolor/overlay -name "*.png"`; do counter=$(printf %04d $x); ln -s "$i" /cygdrive/S/NASA/overlay-${GOES}/img-"$counter".png >/dev/null 2>&1; x=$(($x+1)); done
# date
# echo
# # With x265
# C:/Users/Spear/ffmpeg-4.1-win64-static/bin/ffmpeg.exe -y -benchmark -i "S:/NASA/overlay-${GOES}/img-%04d.png" -filter:v "setpts=2*PTS" -c:v libx265 -an -x265-params crf=25 -tag:v hvc1 -pix_fmt yuv420p SFC-${GOES}_h265_1080.mp4
# echo
# With x264
date
C:/Users/Spear/ffmpeg-4.1-win64-static/bin/ffmpeg.exe -y -benchmark -i "S:/NASA/overlay-${GOES}/img-%04d.png" -filter:v "setpts=2*PTS" -c:v libx264 -pix_fmt yuv420p SFC-${GOES}_h264_1080.mp4
echo
# rm -rf /cygdrive/S/NASA/tmp-${GOES}
# echo
date
echo
done
| true
|
ad618b4836efc0d05fb5da884a36474b16503eb7
|
Shell
|
abdxxw/2i010
|
/TP3/ensemble_calculs3.sh
|
UTF-8
| 530
| 3.75
| 4
|
[] |
no_license
|
#! /bin/bash
# ensemble_calculs3.sh
if [ $# -eq 0 ]
then
echo "Il faut des parametres"
exit 1
fi
if [ -d $1 ]
then
echo "Dossier $1 existant"
exit 1
elif [ -d $2 ]
then
echo "Dossier $2 existant"
exit 1
else
s=""
s1=""
name=$1
name1=$2
shift 2
for x in "$@"
do
if [ $(./calcul.sh $x) -le 0 ]
then
s=$s$(./calcul.sh $x)'\n'
else
s1=$s1$(./calcul.sh $x)'\n'
fi
done
echo -e $s > $name
echo -e $s1 > $name1
fi
| true
|
4fb2e41320ba14333bc2a532aa94b45c7b1dbd96
|
Shell
|
jelledebock/bestii
|
/Bash opl/92a.sh
|
UTF-8
| 899
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
(( $# >= 1 )) || { echo Gebruik: "$0" bestandsnaam aantal >&2 ; exit 1 ; }
[[ -f "$1" ]] || { echo $1 is geen bestand >&2 ; exit 1 ; }
exec 3< "$1"
aantal=${2-10} #${parameter-value}: gebruik parameter $2 als die bestaat, zoniet gebruik value 10
tel=0
while IFS='' read -u3 lijn ; do # while lus om file in te lezen regel per regel in $lijn, IFS='' zorgt dat witruimte vooraan niet wordt weggelaten
tot[tel % aantal]=$lijn # $lijn kopieren naar array tot op positie: tel modulo aantal
(( tel++ ))
done
exec 3<&-
(( aantal <= tel )) || aantal=$tel # als aantal>tel (maw er zijn minder lijnen in de file dan gevraagd), wordt aantal ingesteld op aantal lijnen in file (huidige teller)
i=0
while (( i < aantal )) ; do #while lus om de aantal lijnen, opgeslagen in tot, weer te geven
echo "${tot[(tel + i) % aantal]}"
(( i ++ ))
done
| true
|
a336db18e02a2fbfbeb24606d41f6d200796e967
|
Shell
|
gc-ss/Raft-Membership-Service
|
/network_partition_demo.sh
|
UTF-8
| 528
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/bash
# Create directory for writing logs to, if it does not already exist
mkdir -p MemberLogs;
echo "Launching $NUM_SERVERS nodes"
for i in $(seq 2 3)
do
python3 "$(pwd)"/member/Member.py False 0 &
#python3 member/Member.py False 0 &
done
for i in $(seq 4 5)
do
python3 "$(pwd)"/member/Member.py False 30 '224.3.29.72' &
#python3 member/Member.py False 30 '224.3.29.72' &
echo "Partition node launched"
done
python3 "$(pwd)"/member/Member.py True 0
#python3 member/Member.py True 0
| true
|
e546f170102cc3968f93dd9aa180c61a6f0ee130
|
Shell
|
hak5/sharkjack-payloads
|
/payloads/library/util/Backup-and-Restore-scripts-with-logging-and-notification/restore.sh
|
UTF-8
| 10,611
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Title: restore.sh
# Description: Restore backed-up date and install packages on SharkJack
# Execute with: bash ./restore.sh /path/to/backup.zip (e.g. "bash ./restore.sh /tmp/1-20200101-SharkJack-backup.zip")
# Copy the backup file to the Shark Jack's /tmp directory via SCP (e.g. "scp 1-20200101-SharkJack-backup.zip root@172.16.24.1:/tmp/")
# Author: Robert Coemans
# Version: 1.0 (20-08-2020)
# Category: Util
#
# Dependencies: this payload requires you to have the following packages already installed and configured via 'opkg install' (do 'opkg update' first):
# - curl = E.g. to grab external IP address and to post notifications
# - unzip
#
# LED indications (https://docs.hak5.org/hc/en-us/articles/360010554653-LED)
# - Setting up = Magenta solid [LED SETUP]
# - Restoring = Yellow single blink [LED ATTACK]
# - Finishing up = Yellow double blink [LED STAGE2]
# - Finished = Green very fast blinking followed by solid [LED FINISH]
# ****************************************************************************************************
# Configuration
# ****************************************************************************************************
# Setup toggles
NOTIFY_PUSHOVER=true
START_CLOUD_C2_CLIENT=false
# Restore toggles
INSTALL_PACKAGES=false
RESTORE_ONLY_NEWER_FILES=false # If set to false all files from backup will be restored even older files!
# Finish toggles
EXFIL_TO_CLOUD_C2=true
EXFIL_TO_SCP=false
# Setup variables
RESTORE_DIR_ROOT="/root/restore" # Be careful, this folder and all its contents including subfolders will be deleted!
TODAY=$(date +%Y%m%d)
START_TIME=$(date)
BATTERY_STATUS=$(BATTERY)
CLOUD_C2_PROVISION="/etc/device.config"
# Restore variables
OPKG_PACKAGES_TO_INSTALL=( "unzip" "zip" "nano" "curl" "lldpd" "bind-dig" "bind-host" "libustream-openssl" )
RESTORE_DESTINATION_USER="{username}" # Generate a ssh key (ssh-keygen) on the destination host and copy it (~/.ssh/id_rsa_pub) to the SharkJack (~/.ssh/authorized/keys) in order to bypass password!
RESTORE_DESTINATION_HOST="192.168.10.1"
RESTORE_DESTINATION_DIR_ROOT="/some/destination/folder/for/log_file"
# Exfiltrate and notification variables
PUSHOVER_API_POST_URL="https://api.pushover.net/1/messages.json"
PUSHOVER_APPLICATION_TOKEN="{your-application-token}"
PUSHOVER_USER_TOKEN="{your-user-token}"
PUSHOVER_PRIORITY="1" # send as -2 to generate no notification/alert, -1 to always send as a quiet notification or 1 to display as high-priority and bypass the user's quiet hours!
PUSHOVER_DEVICE="{your-device}" # Multiple devices may be separated by a comma!
# ****************************************************************************************************
# Setup functions
# ****************************************************************************************************
function CHECK_INPUT_PARAM() {
if [ $# -lt 1 ]; then
echo "Please specify the backup.zip file to be restored (e.g. "bash $0 /tmp/1-20200101-SharkJack-backup.zip")."
exit
elif [ ! -f "$1" ]; then
echo "$1 is not an existing file, please specify a backup.zip file to be restored (e.g. "bash $0 /tmp/1-20200101-SharkJack-backup.zip")."
exit
elif [ "${1##*.}" == "zip" ]; then
BACKUP_FILENAME=$(basename $1)
BACKUP_FILENAME=${BACKUP_FILENAME%.*}
else
echo "$1 is not an zip file, please specify a backup.zip file to be restored (e.g. "bash $0 /tmp/1-20200101-SharkJack-backup.zip")."
exit
fi
return
}
function CREATE_RESTORE_FOLDER() {
if [ -d "$RESTORE_DIR_ROOT" ]; then
rm -r "$RESTORE_DIR_ROOT"
fi
mkdir -p "$RESTORE_DIR_ROOT" > /dev/null
RESTORE_DIR="$RESTORE_DIR_ROOT/$BACKUP_FILENAME"
mkdir -p "$RESTORE_DIR" > /dev/null
return
}
function INITIALIZE_LOG_FILE() {
LOG_FILE=$RESTORE_DIR_ROOT/$BACKUP_FILENAME-restore.log
touch $LOG_FILE
echo "****************************************************************************************************" >> $LOG_FILE
echo "Restore executed at: $START_TIME" >> $LOG_FILE
echo "SharkJack battery status: $BATTERY_STATUS" >> $LOG_FILE
echo "****************************************************************************************************" >> $LOG_FILE
echo >> $LOG_FILE
echo "Free diskspace before actions: $(df -h | grep overlayfs | awk {'print $4'})" >> $LOG_FILE
echo "Restore directory has been created: $RESTORE_DIR" >> $LOG_FILE
return
}
function RESTORE_STARTED_NOTIFICATION() {
if [ "$NOTIFY_PUSHOVER" = "true" ]; then
curl -s --form-string token="$PUSHOVER_APPLICATION_TOKEN" --form-string user="$PUSHOVER_USER_TOKEN" --form-string priority="$PUSHOVER_PRIORITY" --form-string device="$PUSHOVER_DEVICE" --form-string title="SharkJack restore started on date: $(date '+%d-%m-%Y'), time: $(date '+%H:%M') $(date '+%Z %z')" --form-string message="Restore identifier: $BACKUP_FILENAME" $PUSHOVER_API_POST_URL > /dev/null && echo "Restore started notification has been sent to Pushover" >> $LOG_FILE || echo "Restore started notification has NOT been sent to Pushover as something went wrong" >> $LOG_FILE
fi
return
}
function START_CLOUD_C2_CLIENT() {
if [ "$START_CLOUD_C2_CLIENT" = "true" ]; then
if [[ -f "$CLOUD_C2_PROVISION" ]]; then
C2CONNECT
while ! pgrep cc-client; do sleep 1; done
echo "Connected to Cloud C2" >> $LOG_FILE
else
echo "Cloud C2 client configuration file ($CLOUD_C2_PROVISION) does not exists" >> $LOG_FILE
fi
fi
return
}
# ****************************************************************************************************
# Restore functions
# ****************************************************************************************************
function INSTALL_PACKAGES() {
if [ "$INSTALL_PACKAGES" = "true" ]; then
echo "INSTALL_PACKAGES function to be implemented!"
# Wait until Shark Jack has an IP address
while [ -z "$IPADDR" ]; do sleep 1 && IPADDR=$(ifconfig eth0 | grep "inet addr"); done
#opkg update >> $LOG_FILE 2>&1 && echo "opkg (open package management) has been updated succesfully" >> $LOG_FILE || echo "opkg (open package management) has not been (fully) updated" >> $LOG_FILE
opkg update && echo "opkg (open package management) has been updated succesfully" >> $LOG_FILE || echo "opkg (open package management) has not been (fully) updated" >> $LOG_FILE
for OPKG_PACKAGE_TO_INSTALL in ${OPKG_PACKAGES_TO_INSTALL[@]}; do
#opkg install $OPKG_PACKAGE_TO_INSTALL >> $LOG_FILE 2>&1 && echo "Package $OPKG_PACKAGE_TO_INSTALL has been installed succesfully" >> $LOG_FILE || echo "Package $OPKG_PACKAGE_TO_INSTALL has not been installed" >> $LOG_FILE
opkg install $OPKG_PACKAGE_TO_INSTALL && echo "Package $OPKG_PACKAGE_TO_INSTALL has been installed succesfully" >> $LOG_FILE || echo "Package $OPKG_PACKAGE_TO_INSTALL has not been installed" >> $LOG_FILE
done
fi
return
}
function RESTORE_DATA() {
unzip $1 -d $RESTORE_DIR && echo "Backup file $1 has been extracted" >> $LOG_FILE || echo "Backup file $1 has NOT been extracted" >> $LOG_FILE
if [ "$RESTORE_ONLY_NEWER_FILES" = "true" ]; then
cp -ru $RESTORE_DIR/* / && echo "Files from backup $BACKUP_FILENAME has been restored while skipping existing newer files" >> $LOG_FILE || echo "Something went wrong, no files have been restored" >> $LOG_FILE
else
cp -r "$RESTORE_DIR/*" "/" && echo "Files from backup $BACKUP_FILENAME has been restored while overwriting existing files" >> $LOG_FILE || echo "Something went wrong, no files have been restored" >> $LOG_FILE
fi
rm -r "$RESTORE_DIR" && echo "Extraction folder $RESTORE_DIR has been cleaned up" >> $LOG_FILE || echo "Extraction folder $RESTORE_DIR has NOT been cleaned up" >> $LOG_FILE
}
# ****************************************************************************************************
# Finish functions
# ****************************************************************************************************
function EXFIL_TO_CLOUD_C2() {
if [ "$EXFIL_TO_CLOUD_C2" = "true" ]; then
if [[ $(pgrep cc-client) ]]; then
LOG_FILE_DESC="$BACKUP_FILENAME-restore-log"
C2EXFIL STRING $LOG_FILE $LOG_FILE_DESC && echo "Exfiltration of $LOG_FILE to Cloud C2 has passed" >> $LOG_FILE || echo "Exfiltration of $LOG_FILE to Cloud C2 has failed" >> $LOG_FILE
else
echo "Exfiltration of $LOOT_FILE to Cloud C2 has failed, CC-CLIENT seems not to be running" >> $LOG_FILE
fi
fi
return
}
function EXFIL_TO_SCP() {
if [ "$EXFIL_TO_SCP" = "true" ]; then
scp "$LOG_FILE" "$RESTORE_DESTINATION_USER@$RESTORE_DESTINATION_HOST:$RESTORE_DESTINATION_DIR_ROOT" && echo "Exfiltration of $LOG_FILE to $BACKUP_DESTINATION_HOST:$BACKUP_DESTINATION_DIR_ROOT/ has passed" >> $LOG_FILE || echo "Exfiltration of $LOG_FILE to $BACKUP_DESTINATION_HOST:$BACKUP_DESTINATION_DIR_ROOT/ has failed" >> $LOG_FILE
fi
return
}
function RESTORE_COMPLETED_NOTIFICATION() {
if [ "$NOTIFY_PUSHOVER" = "true" ]; then
curl -s --form-string token="$PUSHOVER_APPLICATION_TOKEN" --form-string user="$PUSHOVER_USER_TOKEN" --form-string priority="$PUSHOVER_PRIORITY" --form-string device="$PUSHOVER_DEVICE" --form-string title="SharkJack restore completed message" --form-string message="Restore identifier: $BACKUP_FILENAME, Complete restore took $SECONDS seconds" $PUSHOVER_API_POST_URL > /dev/null && echo "Restore completed notification has been sent to Pushover" >> $LOG_FILE || echo "Restore completed notification has NOT been sent to Pushover as something went wrong" >> $LOG_FILE
fi
return
}
# ****************************************************************************************************
# Execute payload
# ****************************************************************************************************
# Setup
LED SETUP
CHECK_INPUT_PARAM $1 # Checks whether given paramerter is an existing zip file
CREATE_RESTORE_FOLDER # Checks whether restore folder exists and creates or empties if required
INITIALIZE_LOG_FILE # Initialize the log file
RESTORE_STARTED_NOTIFICATION
START_CLOUD_C2_CLIENT
# Restore
LED ATTACK
INSTALL_PACKAGES
RESTORE_DATA $1
# Finish
LED STAGE2
echo "Free diskspace after actions: $(df -h | grep overlayfs | awk {'print $4'})" >> $LOG_FILE
echo "Restore script took $SECONDS seconds" >> $LOG_FILE
EXFIL_TO_CLOUD_C2
EXFIL_TO_SCP
RESTORE_COMPLETED_NOTIFICATION
sync # Sync filesystem in order to prevent data loss
# ****************************************************************************************************
# Prevent logging after this line!
# ****************************************************************************************************
LED FINISH
echo
cat $LOG_FILE
| true
|
8cacf9e9ebd1b70fc6c193ecb5fa39aad296f2b4
|
Shell
|
bcourbon/h2gglobe
|
/Reduction/check_jobs.sh
|
UTF-8
| 554
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
if [[ -z $1 ]]; then
echo "usage: $0 <directory with log files>"
exit 1
fi
tail -n 100 $1/*.log | egrep -B 2 '=>|cmsStage|rfcp|Red|Break|Disk quota exceeded' | egrep '=>|cmsStage|rfcp|[Rr]ed|Break|Disk quota exceeded' |
#COLOR RED
sed "s:Disk quota exceeded:\x1b\[01;31m&\x1b\[00m:g" |
sed "s:CPU time limit exceeded:\x1b\[01;31m&\x1b\[00m:g" |
#COLOR GREEN
sed "s:[^/]\+\.root:\x1b\[01;32m&\x1b\[00m:g"
echo -e "[ \033[01;35mSHA1\033[00m ]"
tail -n 100 $1/*.log | sed 's/
/\r\n/g' | grep '^[0-9a-z]\{40,\}'
| true
|
5ad24f8aba5ee9f89736f5cf35526ee8ebb97c8b
|
Shell
|
FazioNico/node-micro
|
/scripts/install.sh
|
UTF-8
| 937
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
source $(pwd)/scripts/findpackages.sh
echo $packages | sed 's#.*/##'
# defin project rootDirectory
rootDirectory=$(pwd)
# create docker-cache Workspace Directory
mkdir docker-cache
# do loop for each microservice updated into packages folder
for dir in $packages; do
# go to microservice folder
cd $dir
echo $(pwd)
serviceName=$(pwd | sed 's#.*/##')
echo "[INSTALL] ${serviceName} microservice: packages dependencies"
# install project dependencies
docker build -f Dockerfile.dev -t ${serviceName} .
# save image to docker cache
echo "[SAVE DEV IMAGE] ${serviceName} microservice"
docker save -o $rootDirectory/docker-cache/${serviceName}.tar ${serviceName}
# return to rootDirectory project
cd $rootDirectory
done
echo '----------------------------------------------------'
echo '----------------------------------------------------'
echo '[SUCCESS] Project dependencies install with success!'
| true
|
a8ca13247aeb72e1e00a6d0400e4ae81cb6c2e2d
|
Shell
|
mikechau/xmind
|
/docker/build.sh
|
UTF-8
| 322
| 3.046875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -euxo pipefail
DIST_DIR="/build/RPMs/x86_64"
VERSION=$1
TARGET=$2
docker build . -f "docker/${VERSION}/${TARGET}/Dockerfile" --rm --tag "xmind-rpm-${VERSION}-${TARGET}"
mkdir -p docker/dist
docker run --rm -v `pwd`/docker/dist:"${DIST_DIR}" -e LOCAL_USER_ID=`id -u $USER` xmind-rpm-${VERSION}-${TARGET}
| true
|
6af11bd5f222e70e722f8ffa617fdc7af51ebf18
|
Shell
|
VapeShoppe/vapeshoppe.github.io
|
/tester.sh
|
UTF-8
| 1,338
| 3.671875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
#colors
red=`tput setaf 1`
green=`tput setaf 2`
yellow=`tput setaf 3`
reset=`tput sgr0`
echo "${green} _____ __ _____ _____"
echo "| | | | | | __|"
echo "| --| |__| | | | |"
echo "|_____|_____|_____|_____|${reset}"
echo "${yellow}Exporting path${reset}"
rversion=`ruby --version | awk {'print substr($2, 0, 3)'}`
case "$rversion" in
2.2)
echo "2.2.0 path set"
export PATH=~/.gem/ruby/2.2.0/bin:$PATH
;;
1.9)
echo "1.9 path set"
export PATH=~/.gem/ruby/1.9.0/bin:$PATH
;;
*)
echo {$rversion}
;;
esac
echo "${yellow}Checking Dependencies${reset}"
if [ -f ~/.gem/ruby/2.2.0/bin/bundler ]
then
echo "${green}Found Bundler${reset}"
elif [ -f ~/.gem/ruby/1.9.0/bin/bundler ]
then
echo "${green}Found Bundler${reset}"
else
echo "${red}Installing Bundler${reset}"
gem install bundler --user-install
fi
echo "${yellow}Checking Dependencies${reset}"
if [ -d vendor ]
then
echo "${green}Found vendor folder${reset}"
else
echo "${red}Installing Dependencies${reset}"
bundle install --path vendor
fi
echo "${green}Opening Browser Tab...${reset}"
xdg-open http://0.0.0.0:4000 &
echo "${yellow}You Will Need To Refresh Your Browser After The Server Starts${reset}"
echo "${green}Statring Jekyll${reset}"
notify-send "Starting Server"
bundle exec jekyll serve
echo "Bye!"
exit
| true
|
7f2333d26f3b4f45e0776441c1c4b8c1eaf01dc2
|
Shell
|
Cloudxtreme/platform-hostapd
|
/buildimg.sh
|
UTF-8
| 1,113
| 3.375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -eu
# e.g. 'platform-dokku'
REPONAME=$(echo $TRAVIS_REPO_SLUG | cut -f2 -d '/')
# e.g. 'dokku'
SERVICENAME=$(echo $REPONAME | sed 's/^platform-//')
docker build -t experimentalplatform/$SERVICENAME:$TRAVIS_BRANCH .
if [ "${TRAVIS_BRANCH}" == "master" ]; then
echo -e "\n\nWe're not uploading master anywhere."
else
docker login -e $DOCKER_EMAIL -u $DOCKER_USER -p $DOCKER_PASS
docker push experimentalplatform/$SERVICENAME:$TRAVIS_BRANCH
if [ "$TRAVIS_BRANCH" != "development" ]; then
BODY="{ \"request\": {
\"message\": \"Triggered by '$TRAVIS_REPO_SLUG'\",
\"config\": {
\"env\": {
\"SERVICE_TAG\": \"$TRAVIS_BRANCH\",
\"SERVICE_NAME\": \"$SERVICENAME\"
}}}}"
URL="https://api.travis-ci.org/repo/experimental-platform%2Fplatform-configure/requests"
echo "URL: $URL"
echo "BODY: $BODY"
curl -f -s -X POST \
-H "Content-Type: application/json" \
-H "Accept: application/json" \
-H "Travis-API-Version: 3" \
-H "Authorization: token $TRAVIS_TOKEN" \
-d "$BODY" \
$URL
fi
fi
| true
|
b93c56a4c670b5e345c1782a4adbadf06fb2173d
|
Shell
|
OnGle/plone
|
/conf.d/downloads
|
UTF-8
| 260
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/bash -ex
dl() {
[ "$FAB_HTTP_PROXY" ] && PROXY="--proxy $FAB_HTTP_PROXY"
cd $2; curl -L -f -O $PROXY $1; cd -
}
VERSION="5.0/5.0.8/+download/Plone-5.0.8-UnifiedInstaller.tgz"
URL="https://launchpad.net/plone/$VERSION"
dl $URL /usr/local/src
| true
|
99e55368dac94ebb61a55a22d12d05e3497a58b1
|
Shell
|
sym3tri/dotfiles
|
/bash/.bashrc
|
UTF-8
| 2,092
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash -e
# Source global definitions
if [ -f /etc/bashrc ]; then
source /etc/bashrc
fi
export HISTFILESIZE=3000
# Homebrew Software
PATH=/opt/homebrew/bin:$PATH
PATH=/opt/homebrew/opt/python@3.11/libexec/bin:$PATH
# Python
PYTHONPATH="$(brew --prefix)/lib/python@3.11/site-packages"
export PYTHONPATH
#export PYTHONUSERBASE=~/.local
#PATH=$PATH:$PYTHONUSERBASE/bin
#export PATH="$HOME/.pyenv/bin:$PATH"
# Go
#export GOPATH=$HOME/dev/go
#PATH=$PATH:/usr/local/go/bin
#PATH=$PATH:~/usr/local/bin
#PATH=$PATH:$GOPATH/bin
export EDITOR=nvim
# GIT
if [ -e "/usr/share/git-core/contrib/completion/git-prompt.sh" ]; then
source "/usr/share/git-core/contrib/completion/git-prompt.sh"
export PS1='[\h \[\e[1;36m\]\W\[\e[m\]$(declare -F __git_ps1 &>/dev/null && __git_ps1 " \[\e[1;32m\](%s)\[\e[m\]")]\$ '
else
export PS1='[\h \[\e[1;36m\]\W\[\e[m\]]\$ '
fi
alias c='clear'
alias vim=nvim
# allows aliases to work when using sudo
alias sudo='sudo '
alias grep='grep --color=always'
alias vimrc='vim $HOME/.config/nvim/init.vim'
alias bashrc='vim $HOME/.bashrc'
alias ..='cd ..'
alias ...='cd ../..'
alias cwd='pwd | xsel -b'
# GIT ALIASES
alias gs='git status -sb'
alias gc='git commit -m'
alias gco='git checkout'
alias gd='git diff'
alias gr='git rebase'
alias ga='git add'
alias gl='git log --graph --pretty=format:"%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%cr) %C(bold blue)%Creset" --abbrev-commit'
alias gbd='git for-each-ref --sort=-committerdate refs/heads/ | less'
alias gpr='open "https://github.com/$(git_current_origin)/pull/new/$(git_current_branch)"'
# GPG
# FZF
[ -f ~/.fzf.bash ] && source ~/.fzf.bash
# NVM
export NVM_DIR="/Users/ed/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && source "$NVM_DIR/nvm.sh" # This loads nvm
# beets
if [ -d /Volumes/2TB/audio/beets/master ]; then
export BEETSDIR=/Volumes/2TB/audio/beets/master
fi
# Make sure colors show up in iTerm
export CLICOLOR=1
export LSCOLORS=dxfxcxdxbxegedabagacad
export TERM=xterm-256color
export BEETSDIR=/Volumes/extern/audio/beets/master
randpw(){ /usr/bin/openssl rand -base64 32; }
| true
|
53296bf65cd6bb7e3e337d6502c07defcb64b158
|
Shell
|
numbnet/wordpress-vm
|
/static/change_db_pass.sh
|
UTF-8
| 956
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash
# shellcheck disable=2034,2059
true
SCRIPT_NAME="Change Database Password"
# shellcheck source=lib.sh
source /var/scripts/fetch_lib.sh || source <(curl -sL https://raw.githubusercontent.com/techandme/wordpress-vm/master/lib.sh)
# T&M Hansson IT AB © - 2020, https://www.hanssonit.se/
# Check for errors + debug code and abort if something isn't right
# 1 = ON
# 0 = OFF
DEBUG=0
debug_mode
# Change MARIADB Password
if mysqladmin -u root password "$NEWMARIADBPASS" > /dev/null 2>&1
then
msg_box "Your new MariaDB root password is: $NEWMARIADBPASS
Please keep it somewhere safe.
To login to MariaDB, simply type 'mysql -u root' from your CLI.
Authentication happens with the UNIX socket. In other words,
no password is needed as long as you have access to the root account."
exit 0
else
print_text_in_color "$IRed" "Changing MARIADB root password failed."
print_text_in_color "$ICyan" "Your old password is: $MARIADBMYCNFPASS"
exit 1
fi
| true
|
8b7977566d44737263f241b7a4d76ffc76b24dbf
|
Shell
|
MORTAL2000/oglplu2
|
/config/tools/run_test-oglplus
|
UTF-8
| 808
| 3.59375
| 4
|
[
"BSL-1.0"
] |
permissive
|
#!/bin/bash
# Copyright Matus Chochlik.
# Distributed under the Boost Software License, Version 1.0.
# See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt
kind=${1}
shift
library=${1}
shift
test_src_dir="$(<SOURCE_DIR)/test/${kind}/${library}"
test_bin_dir="$(dirname ${0})/test/${kind}/${library}"
function compile_test() {
make ${1}
return $?
}
function boost_test() {
if ! make ${1}
then return $?
fi
if ! ./${1}
then return $?
fi
}
function memcheck() {
if ! make ${1}
then return $?
fi
if ! valgrind --tool=memcheck --leak-check=full ./${1}
then return $?
fi
}
cd "${test_bin_dir}" &&
for test
do
if [[ -f "${test_src_dir}/${test}.cpp" ]]
then ${kind} ${library}-${test}-${kind}
else echo "Invalid '${library}' '${kind}' test name: '${test}'"
fi
done
| true
|
3ce88dada6113744dd85378cf81bf6a2680187c7
|
Shell
|
anviar/scripts
|
/filename_process.sh
|
UTF-8
| 2,581
| 4.4375
| 4
|
[] |
no_license
|
#!/bin/bash
USAGE="Usage:\n\t$0 [-e] [action] required_strings files
Here is:
-e Process considering file extensions, optional.
Actions can be:
-a|--add string add string at the end of filenames
-p|--prefix string add string at the begining of filenames
-d|--delete string delete string from filename
-r|--replace string to_string replace string to string"
if [[ -z $1 ]]
then
echo -e "${USAGE}"
exit 1
fi
while (( "$#" )); do
case $1 in
"--extension"|"-e" )
fextension=1
shift
;;
"--prefix"|"-p" )
faction=Prefix
if [[ -z $3 ]]
then
echo "Error in syntax"
echo -e "${USAGE}"
exit 1
fi
fstring=$2
shift; shift
break
;;
"--add"|"-a" )
faction=Add
if [[ -z $3 ]]
then
echo "Error in syntax"
echo -e "${USAGE}"
exit 1
fi
fstring=$2
shift; shift
break
;;
"--delete"|"-d" )
faction=Delete
if [[ -z $3 ]]
then
echo "Error in syntax"
echo -e "${USAGE}"
exit 1
fi
fstring=$2
shift; shift
break
;;
"--replace"|"-r" )
faction=Replace
if [[ -z $4 ]]
then
echo "Error in syntax"
echo -e "${USAGE}"
exit 1
fi
fstring=$2
fstringr=$3
shift; shift; shift
break
;;
* )
echo "Warrning: skipped input \"$1\""
shift
;;
esac
done
echo "Debug: action is \"$faction\", string is \"${fstring}\" and may be \"${fstringr}\""
# processing files
for file in $@
do
if [[ ! -f ${file} ]]
then
echo "Error: ${file} not found"
continue
fi
case ${faction} in
"Add" )
if [[ ${fextension} ]]
then
extension=$([[ "${file}" = *.* ]] && echo ".${file##*.}" || echo '')
mv --verbose --interactive ${file} ${file%.*}${fstring}${extension}
else
mv --verbose --interactive ${file} ${file}${fstring}
fi
;;
"Prefix" )
mv --verbose --interactive ${file} $(dirname ${file})/${fstring}$(basename ${file})
;;
"Delete"|"Replace" )
if [[ ${fextension} ]]
then
extension=$([[ "${file}" = *.* ]] && echo ".${file##*.}" || echo '')
fname=$(basename ${file%.*})
else
fname=$(basename ${file})
fi
if [[ "${fname}" == *${fstring}* ]]
then
fnamedst=$(echo ${fname}|sed s/${fstring}/${fstringr}/g)${extension}
mv --verbose --interactive ${file} $(dirname ${file})/${fnamedst}
else
echo ${file} skipped
fi
;;
* )
echo "Error: Action \"${faction}\" not implemented"
;;
esac
done
| true
|
2d41d6664acb98c3705945a1518bfef4d5dbfd7b
|
Shell
|
vnwildman/myutils
|
/src/irs
|
UTF-8
| 355
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/bash
usage() {
echo "Image resize"
echo "Usage: $0 FILE"
exit -1
}
if [ $# -eq 0 ] ; then
usage
fi
# Name of resize file will append .jpg. It can't prepend because
# it false if filename contain dir
resizefile="$1.jpg"
if [[ -f "$resizefile" ]]; then
echo "Ready resize! Ignore."
else
convert "$1" -resize 25% -quality 70 "$resizefile"
fi
| true
|
dc0b1d907602ee7cf20c6753a23aa2e1ed5326f0
|
Shell
|
IBMCloudDevOps/bluemix-python-deploy-sample
|
/bin/downloadArtifacts.sh
|
UTF-8
| 2,157
| 3.921875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
GH_BASE_URL=$OCTOKIT_API_ENDPOINT
# Check if GHE url is set, if not fall back to public GH
if [ -z $OCTOKIT_API_ENDPOINT ]; then
#If we are running in travis, try using OCTOKIT_API_ENDPOINT instead
GH_BASE_URL="https://api.github.com/"
echo "No GitHub url specified, using: $GH_BASE_URL"
fi
#Get the release assets
RELEASE_URL="$GH_BASE_URL"repos"/$APP_REPO_OWNER/$APP_REPO_NAME/releases/latest"
# Get the release data
echo "curling $RELEASE_URL"
# If you are getting rate limited or using GHE, add a GITHUB_OAUTH_TOKEN to authenticate
if [ -n "$GITHUB_OAUTH_TOKEN" ]; then
echo "Using GitHub token"
HEADER="Authorization: Token $GITHUB_OAUTH_TOKEN"
RELEASE=$(curl -H "$HEADER" $RELEASE_URL)
else
echo "Not using GitHub token"
RELEASE=$(curl $RELEASE_URL)
fi
# Parse the release info
ASSET_LIST=$(echo $RELEASE | jq ".assets")
ASSET_LIST_LENGTH=$(echo $ASSET_LIST | jq ". | length")
# If we got a message (error) display it here
echo $RELEASE | jq ".message"
# Put all assets in the deploy directory
mkdir deploy
cd deploy
COUNTER=0
# For each asset in the list
while [ "$COUNTER" -le "$ASSET_LIST_LENGTH" ]; do
echo The counter is $COUNTER
# Get the asset name and URL
ASSET_NAME=$(echo $ASSET_LIST | jq ".[$COUNTER] | .name" | sed s/\'//g | sed s/\"//g)
ASSET_URL=$(echo $ASSET_LIST | jq ".[$COUNTER] | .url" | sed s/\'//g | sed s/\"//g)
let COUNTER=COUNTER+1
# If the URL is not null literal
if [ "$ASSET_URL" != "null" ]; then
echo "curl -L $ASSET_URL > $ASSET_NAME"
# Support for GHE
if [ -n "$GITHUB_OAUTH_TOKEN" ]; then
echo "Using GitHub token"
# Get the artifact
#curl -H "$HEADER" -H "Accept: application/octet-stream" -L $ASSET_URL > $ASSET_NAME
#Trying this method instead
curl -L $ASSET_URL\?access_token=$GITHUB_OAUTH_TOKEN -H 'Accept: application/octet-stream' > $ASSET_NAME
else
# Get the artifact
curl -H "Accept: application/octet-stream" -L $ASSET_URL > $ASSET_NAME
fi
# Expecting all assets to be tarballs, modify as needed
tar -xvzf $ASSET_NAME
# Clean up archive directory
rm $ASSET_NAME
fi
done
| true
|
18384af3c58cc69079a26d5205a6d385cfa59455
|
Shell
|
henrybw/dotfiles
|
/bin/devenv
|
UTF-8
| 681
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/sh
#
# devenv
#
# Script that sets up a specific layout with gvim, a mini ranger browser, and a
# shell. This is designed around the gvim instance being used as a singleton;
# the ranger browser is also configured with this setup in mind. You can open
# files with the singleton gvim instance by using the 'gvim1' script.
#
# Make gvim have most of the screen
i3-msg split h
i3-msg exec gvim && sleep 0.2
i3-msg resize grow left 27 px or 27 ppt && sleep 0.1
# Add a ranger browser above the current shell
i3-msg focus left
i3-msg split v
i3-msg "exec konsole --workdir `pwd` -e 'branger'" && sleep 0.1
i3-msg move up
# Focus back on the original shell
i3-msg focus down
| true
|
af42b3e1e0dbf8f91e529de472d0de25a86bdc65
|
Shell
|
babywyrm/sysadmin
|
/aws/find_unsafe_.sh
|
UTF-8
| 2,693
| 3.984375
| 4
|
[] |
no_license
|
#!/bin/sh
# Script finds AWS Security Groups with potentially unsafe rules and lists instances that use such security groups.
#
# Unsafe security group rules are:
# 1. open to the world (0.0.0.0/0),
# 2. not restricted to another security group,
# 3. not use safe ports (22,80,443; you can set SAFE_PORTS environment variable to override).
#
# To run this script:
# 1. sudo pip install awscli
# and configure AWS region and credentials (https://github.com/aws/aws-cli#getting-started):
# export AWS_DEFAULT_REGION=us-west-2
# export AWS_ACCESS_KEY_ID=<access_key>
# export AWS_SECRET_ACCESS_KEY=<secret_key>
# 2. sudo npm install -g jsontool
# (Manual at http://trentm.com/json/)
#
# After run a set of json-files will be created, see the bottom of the script.
# Set region from env value
AWS_REGION=${AWS_DEFAULT_REGION:-"us-west-2"}
echo "Region: $AWS_REGION"
# Fetch security groups
test -f sg_all.json || aws ec2 describe-security-groups --region $AWS_REGION > sg_all.json
# Filter unsafe security groups
SAFE_PORTS=${SAFE_PORTS:-"22,80,443"}
echo "Safe ports: $SAFE_PORTS"
CODE="
IpPermissionsEgress = undefined;
FilteredIpPermissions = [];
IpPermissions.forEach(function(v){
// skip safe ports
if(v.ToPort==v.FromPort && [${SAFE_PORTS}].indexOf(v.ToPort)!=-1) return;
// skip ports opened to another sg
if(v.UserIdGroupPairs.length>0) return;
// skip permissions where address '0.0.0.0/0' is not used
if(!v.IpRanges.some(function(r){return r.CidrIp=='0.0.0.0/0';})) return;
FilteredIpPermissions.push(v);
});
IpPermissions=undefined;
"
cat sg_all.json | json SecurityGroups | json -e "$CODE" | json -c 'FilteredIpPermissions.length>0' > sg_unsafe_rules.json
# Get instances for each unsafe security group
echo "Potentially unsafe security groups:"
UNSAFE_GROUP_IDS=`cat sg_unsafe_rules.json | json -a GroupId -d,`
for i in $UNSAFE_GROUP_IDS; do
echo $i
test -f sg_instances_$i.json || aws ec2 describe-instances --filter Name=group-id,Values=$i --region $AWS_REGION | \
json Reservations -j | json -a Instances | \
json -g -a PublicDnsName LaunchTime InstanceId Tags -j \
> sg_instances_$i.json
done
echo <<EOF
See AWS Security Groups analyze reports:
- sg_all.json - all security groups
- sg_unsafe_rules.json - filtered potentially unsafe security groups' rules
- sg_instances_<security_group_id> - instance list for each security group from the previous file
Remove these files if you want data to be redownloaded next run.
EOF
#################
#################
##
##
##
EOF
| true
|
79c1a214d61fe3b74396a2221f0ee7175288222a
|
Shell
|
noseka1/ansible-base
|
/roles/openshift_openebs/files/create-openebs-lvm.sh
|
UTF-8
| 520
| 3.15625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -x
# Create loop device and LVM volume group
device_name=loop0
device_path=/dev/$device_name
data_path=/var/local/${device_name}-device-data.img
data_size=2048G
if [ ! -f $data_path ]; then
truncate --size $data_size $data_path
losetup --partscan --show $device_path $data_path
pvcreate $device_path
vgcreate $device_name-vg $device_path
else
losetup --partscan $device_path $data_path
fi
# Install snapshot and thin volume module for lvm
sudo modprobe dm-snapshot
sudo modprobe dm_thin_pool
| true
|
1f05cb4a46820cd48c507d52f8ca01fb9ab1583f
|
Shell
|
regro-cf-autotick-bot/cycamore-feedstock
|
/recipe/run_test.sh
|
UTF-8
| 626
| 3.046875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
# setup env for tests
cd "${SRC_DIR}/tests"
export PATH="${PREFIX}/bin:${PATH}"
export CYCLUS_PATH="${PREFIX}/lib/cyclus"
if [ -z "$CYCLUS_NUC_DATA" ]; then
export CYCLUS_NUC_DATA="${PREFIX}/share/cyclus/cyclus_nuc_data.h5"
fi
UNAME="$(uname)"
if [ "${UNAME}" == "Darwin" ]; then
export DYLD_FALLBACK_LIBRARY_PATH="${PREFIX}/lib/cyclus:${PREFIX}/lib:${DYLD_FALLBACK_LIBRARY_PATH}"
else
export LD_LIBRARY_PATH="${PREFIX}/lib/cyclus:${PREFIX}/lib:${LD_LIBRARY_PATH}"
fi
# test that agents exist
${PREFIX}/bin/cyclus -l :cycamore
# run unit tests
${PREFIX}/bin/cycamore_unit_tests --gtest_filter=-MixerTests.*
| true
|
fb0d4bebf8eb870fa50def13f3ce6f5c9c3e58ee
|
Shell
|
IgorBoyarshin/dotfiles
|
/scripts/lfub
|
UTF-8
| 424
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/sh
set -e
cleanup() {
exec 3>&-
rm "$FIFO_UEBERZUG"
}
if [ -n "SSH_CLIENT" ] || [ -n "SSH_TTY" ]; then
lf "$@"
else
[ ! -d "$HOME/.cache/lf" ] && mkdir -p "$HOME/.cache/lf"
export FIFO_UEBERZUG="$HOME/.cache/lf/ueberzug-$$"
mkfifo "$FIFO_UEBERZUG"
ueberzug layer -s <"$FIFO_UEBERZUG" -p json &
exec 3>"$FIFO_UEBERZUG"
trap cleanup HUP INT QUIT TERM PWR EXIT
lf "$@" 3>&-
fi
| true
|
0412895ec18ef196a7745f495c30398136c26fb4
|
Shell
|
BruceZhanKai/LinuxFaceDetectionCLM
|
/execute/Setup-expect.sh
|
UTF-8
| 2,640
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/bash
###!/usr/bin/expect
set PASS = NKG
set USR = nkg-1f-6001
echo "Start install expect."
sudo apt-get update
sudo apt-get install expect
echo "Finish install expect."
echo "Start Copy File."
#cp ./flycapture2-2.11.3.121-amd64-pkg.tgz ~/Downloads
#cp ./LinuxLocalPredict.7z /home/$USER
#cd ~/Downloads
tar zxvf flycapture2-2.11.3.121-amd64-pkg.tgz
cd flycapture2-2.11.3.121-amd64
echo "Start install File."
#sudo rm /var/lib/apt/lists/lock
#sudo rm /var/cache/apt/archives/lock
#sudo rm /var/lib/dpkg/lock
expect <<EOF
spawn sudo apt-get install build-essential cmake git libgtk2.0-dev pkg-config libavcodec-dev libavformat-dev libswscale-dev python-dev python-numpy libtbb2 libtbb-dev libjpeg-dev libpng-dev libtiff-dev libjasper-dev libdc1394-22-dev
expect {
"password for" {send "NKG\r";exp_continue}
"Do you want to continue?" {send "y\r";exp_continue}
eof
}
EOF
expect <<EOF
spawn sudo apt-get install libraw1394-11 libgtkmm-2.4-1c2a \
libglademm-2.4-1c2a libgtkglextmm-x11-1.2-dev libgtkglextmm-x11-1.2 libusb-1.0-0
expect {
"password for" {send "NKG\r";exp_continue}
"Do you want to continue?" {send "y\r";exp_continue}
eof
}
EOF
expect <<EOF
spawn sudo apt-get install libglademm-2.4-1v5:i386
expect {
"password for" {send "NKG\r";exp_continue}
"Do you want to continue?" {send "y\r";exp_continue}
eof
}
EOF
expect <<EOF
spawn sudo apt-get install libgtkmm-2.4-1v5
expect {
"password for:" {send "NKG\r";exp_continue}
"Do you want to continue?" {send "y\r";exp_continue}
eof
}
EOF
expect <<EOF
spawn sudo apt-get install libgtkmm-2.4-dev libgtkglextmm-x11-1.2-dev
expect {
"password for" {send "NKG\r";exp_continue}
"Do you want to continue?" {send "y\r";exp_continue}
eof
}
EOF
expect <<EOF
spawn sudo apt-get -f install
expect {
"password for" {send "NKG\r";exp_continue}
"Do you want to continue?" {send "y\r";exp_continue}
eof
}
EOF
expect <<EOF
spawn sudo apt-get install libglademm-2.4-1v5
expect {
"password for" {send "NKG\r";exp_continue}
"Do you want to continue?" {send "y\r";exp_continue}
eof
}
EOF
expect <<EOF
spawn sudo apt-get -f install
expect {
"password for" {send "NKG\r";exp_continue}
"Do you want to continue?" {send "y\r";exp_continue}
eof
}
EOF
expect <<EOF
spawn sudo sh install_flycapture.sh
expect {
"password for" {send "NKG\r";exp_continue}
"install all the FlyCapture2 SDK packages?" {send "y\r";exp_continue}
"Do you want to continue?" {send "y\r";exp_continue}
"(y/n)" {send "y\r";exp_continue}
"Enter the name of the user to add" {send "nkg-1f-6001\r";exp_continue}
eof
}
EOF
echo "Finish install File."
echo "Finish Copy File."
| true
|
7fd7630b05d9f38b493260149f17b9be714009f9
|
Shell
|
clemsonbds/cloud-latency
|
/benchmarks/iperf/run.sh
|
UTF-8
| 1,948
| 3.828125
| 4
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
REPO=$(cd "$(dirname "$(realpath "${BASH_SOURCE[0]}")")" && git rev-parse --show-toplevel)
UTIL=${REPO}/util
resultName=none
seconds=10
hostfile="/nfs/mpi.hosts"
groupClass=none
nodeClasses=none
msgBytes=1
POSITIONAL=()
while [[ $# -gt 0 ]]
do
key="$1"
case $key in
--resultDir)
resultDir="$2"
shift # past argument
shift # past value
;;
--resultName)
resultName="$2"
shift # past argument
shift # past value
;;
--seconds)
seconds="$2"
shift
shift
;;
--hostfilter)
hostfilter="$2"
shift
shift
;;
--hostfile)
hostfile="$2"
shift
shift
;;
--nodeClassifier)
nodeClassifier="$2"
shift
shift
;;
--groupClass)
groupClass="$2"
shift
shift
;;
--dryrun)
dryrun="T"
shift
;;
--trash)
trash="T"
shift
;;
*) # unknown option
POSITIONAL+=("$1") # save it in an array for later
shift # past argument
;;
esac
done
set -- "${POSITIONAL[@]}" # restore positional parameters
[ -z "${hostfilter}" ] && hostfilter=`${UTIL}/hostfileToHosts.sh ${hostfile} 2`
executable="iperf3"
if [ ! -z "${resultDir}" ]; then
[ ! -z "${nodeClassifier}" ] && nodeClasses=`${UTIL}/classifyNodes.sh ${hostfilter} ${nodeClassifier}`
timestamp="`date '+%Y-%m-%d_%H:%M:%S'`"
outFile="${resultDir}/iperf.${resultName}.${nodeClasses}.${groupClass}.${timestamp}.json"
output="> ${outFile}"
fi
server=`echo ${hostfilter} | cut -d, -f1`
serverParams="-s -1"
client=`echo ${hostfilter} | cut -d, -f2`
clientParams="-c ${server} -t ${seconds} -J"
echo Running iperf between ${server} and ${client}.
if [ -z "$dryrun" ]; then
ssh -q -f ${server} "sh -c 'nohup ${executable} ${serverParams} > /dev/null 2>&1 &'" # start in background and move on
ssh -q ${client} "${executable} ${clientParams}" ${output}
# throw away?
[ -z "$trash" ] || rm -f ${outFile}
else
echo "fix this someday"
fi
| true
|
aa47540f8b9af48a8ee70cfb8e5d1c760726c1a6
|
Shell
|
CarlosDiogo01/NUMA_Aware_Thesis
|
/c_src/__jobs_LUFACT/job_LUFACT_C_OriginalMatrix_TimeTest_ICC.sh
|
UTF-8
| 5,419
| 2.890625
| 3
|
[] |
no_license
|
#PBS -N LUFACT_C_OriginalMatrix_TimeTest_ICC
#PBS -l walltime=20:00:00
#PBS -q mei
#PBS -m abe
#PBS -M carlos.sa01@gmail.com
#PBS -lnodes=1:r641:ppn=32
# Machine USED
read -r node_info<$PBS_NODEFILE
############ Information about algorithm to run ################
alg="LUFACT_C_OriginalMatrix_ICC"
exeICC="$HOME/NUMA_Aware_Thesis/c_src/exes/lufact_sm_matrix-original_icc_16_default"
exeICCprocbindmaster="$HOME/NUMA_Aware_Thesis/c_src/exes/lufact_sm_matrix-original_icc_16_procbind_master"
exeICCprocbindspread="$HOME/NUMA_Aware_Thesis/c_src/exes/lufact_sm_matrix-original_icc_16_procbind_spread"
exeICCprocbindclose="$HOME/NUMA_Aware_Thesis/c_src/exes/lufact_sm_icc_16_procbind_close"
################################################################
########################## Tests configuration ###############################
dataset="1 2 3 4 5"
thread_bundle="1 2 4 8 10 12 16 24 32"
REP=20
index_val_to_collect=10
Project_Folder="$HOME/NUMA_Aware_Thesis/c_src/${alg}_TimeTest_$node_info"
TIMES_ALL_TESTS_PER_SIZE="TIMES_${alg}_ALL_TESTS_PER_SIZE"
##############################################################################
############################### Tests ######################################
# ICC KMP_AFFINITY tests based on "Default" execution. Just changing binding policy on KMP_AFFINITY
test7="KMP_AFFINITY_compact"
test8="KMP_AFFINITY_scatter"
# ICC omp_proc_bind tests
#test9="omp_proc_bind_master"
#test10="omp_proc_bind_spread"
#test11="omp_proc_bind_close"
############################################################################
########### Loading ICC for using intel exe tests ##################
module purge
# ICC 16
source /share/apps/intel/compilers_and_libraries_2016/linux/bin/compilervars.sh intel64
source /share/apps/intel/compilers_and_libraries_2016/linux/mkl/bin/mklvars.sh intel64
#############################################################################
# Go to Project Folder
mkdir $Project_Folder
cd $Project_Folder
# test9 -> omp_proc_bind_master
#mkdir $test9
#for size in $dataset
#do
# mkdir "$test9/Size_$size"
# for thr in $thread_bundle
# do
# csv="times.${alg}_$test9.$size.size.$thr.thr.csv"
# echo "OMP_PROC_BIND=master"
# for ((i = 0; i < $REP; i++))
# do
# $exeICCprocbindmaster -5 $size $thr >> "$test9/Size_$size/$csv"
# done
# sort -t, -nk1 -o "$test9/Size_$size/$csv" "$test9/Size_$size/$csv"
# done
# done
# test10 -> omp_proc_bind_spread
#mkdir $test10
#for size in $dataset
#do
# mkdir "$test10/Size_$size"
# for thr in $thread_bundle
# do
# csv="times.${alg}_$test10.$size.size.$thr.thr.csv"
# echo "OMP_PROC_BIND=spread"
# for ((i = 0; i < $REP; i++))
# do
# $exeICCprocbindspread -5 $size $thr >> "$test10/Size_$size/$csv"
# done
# sort -t, -nk1 -o "$test10/Size_$size/$csv" "$test10/Size_$size/$csv"
# done
# done
# test11 -> omp_proc_bind_close
#mkdir $test11
#for size in $dataset
#do
# mkdir "$test11/Size_$size"
# for thr in $thread_bundle
# do
# csv="times.${alg}_$test11.$size.size.$thr.thr.csv"
# echo "OMP_PROC_BIND=close"
# for ((i = 0; i < $REP; i++))
# do
# $exeICCprocbindclose -5 $size $thr >> "$test11/Size_$size/$csv"
# done
# sort -t, -nk1 -o "$test11/Size_$size/$csv" "$test11/Size_$size/$csv"
# done
# done
# test7 -> KMP_AFFINITY_compact
mkdir $test7
for size in $dataset
do
mkdir "$test7/Size_$size"
for thr in $thread_bundle
do
csv="times.${alg}_$test7.$size.size.$thr.thr.csv"
echo "Test7 - KMP_AFFINITY Compact"
export KMP_AFFINITY=compact
for ((i = 0; i < $REP; i++))
do
$exeICC -5 $size $thr >> "$test7/Size_$size/$csv"
done
sort -t, -nk1 -o "$test7/Size_$size/$csv" "$test7/Size_$size/$csv"
done
done
#test8 -> KMP_AFFINITY_scatter
mkdir $test8
for size in $dataset
do
mkdir "$test8/Size_$size"
for thr in $thread_bundle
do
csv="times.${alg}_$test8.$size.size.$thr.thr.csv"
echo "Test4 - KMP_AFFINITY Scatter"
export KMP_AFFINITY=scatter
for ((i = 0; i < $REP; i++))
do
$exeICC -5 $size $thr >> "$test8/Size_$size/$csv"
done
sort -t, -nk1 -o "$test8/Size_$size/$csv" "$test8/Size_$size/$csv"
done
done
#Extract Medians and merge all times for all tests per thread
mkdir $TIMES_ALL_TESTS_PER_SIZE
for size in $dataset
do
echo "Size_$size","$test7","$test8" >> "$TIMES_ALL_TESTS_PER_SIZE/TIMES_${alg}_Size_$size.csv"
for thr in $thread_bundle
do
median_test7=`cat "$test7/Size_$size/times.${alg}_$test7.$size.size.$thr.thr.csv" | awk 'FNR == '$index_val_to_collect' {print}'`
median_test8=`cat "$test8/Size_$size/times.${alg}_$test8.$size.size.$thr.thr.csv" | awk 'FNR == '$index_val_to_collect' {print}'`
echo "$thr.Threads","$median_test7","$median_test8" >> "$TIMES_ALL_TESTS_PER_SIZE/TIMES_${alg}_Size_$size.csv"
done
done
| true
|
2250d9d9d0941805bc77be153fad4980955a028a
|
Shell
|
MaxwellShih/akutils
|
/strip_primers_parallel.sh
|
UTF-8
| 3,605
| 4.1875
| 4
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#!/bin/bash
set -e
# check whether user had supplied -h or --help . If yes display help
if [[ "$1" == "--help" ]] || [[ "$1" == "-h" ]]; then
echo "
Usage (order is important!):
strip_primers_parallel <fastq1> <fastq2> <rev/comp primers as fasta> <threads to use>
Resulting files will be output to a subdirectory called fastq-mcf_out.
This script parallelizes adapter stripping using the fastq-mcf utility from ea-utils.
For this to work, you must have installed (and in your path) ea-utils and NGSutils.
This script is intended for Ubuntu 14.04. I can't help you if you have problems!!
Importantly, this script trims primers from input fastqs without removing any sequence
reads which is important if you need to pass an associated index file against them next
for demultiplexing purposes (eg for QIIME processing of amplicon data).
Rev/comp primers fasta file should contain somthing like this:
>515F-1
TTACCGCGGCTGCTGGCAC
>515F-2
TTACCGCGGCGGCTGGCAC
>806R-1
ATTAGATACCCTTGTAGTCC
>806R-2
ATTAGAAACCCTTGTAGTCC
>806R-3
ATTAGATACCCCTGTAGTCC
"
exit 0
fi
# if other than four arguments supplied, display usage
if [ $# -ne 4 ]; then
echo "
Usage (order is important!):
strip_primers_parallel <fastq1> <fastq2> <rev/comp primers as fasta> <threads to use>
Resulting files will be output to the same directory.
"
exit 1
fi
#Determine number of sequences in input file
echo "
Reading input files...
"
fastqlines=$(cat $1 | wc -l)
fastqseqs=$(($fastqlines/4))
corelines=$(($fastqseqs/$4))
digits=$(grep -o \. <<<$corelines | wc -l)
## set working directory, move there, and check for existing outputs
workdir=$(pwd)
cd $workdir
if [[ ! -d $workdir/fastq-mcf_out ]]; then
mkdir $workdir/fastq-mcf_out
else
echo " Directory fastq-mcf_output exists.
Deleting contents and filtering data.
"
rm -r fastq-mcf_out/*
fi
outdir=$workdir/fastq-mcf_out
if [ "$digits" -lt "4" ]; then
echo " Your fastq input has fewer than 10,000 sequences.
Processing on a single core only.
"
#extract filename bases for output naming purposes
fastq1base=`basename "$1" | cut -d. -f1`
fastq2base=`basename "$2" | cut -d. -f1`
#fastq-mcf command (single process)
`fastq-mcf -0 -t 0.0001 $3 $1 $2 -o $outdir/$fastq1base.mcf.fq -o $outdir/$fastq2base.mcf.fq > $outdir/fastq-mcf.log`
else
echo "
Processing on $4 threads...
"
# make temp dir
mkdir $outdir/mcf-temp
#make log file to compile all logged removals into
echo > $outdir/fastq-mcf.log
#use fastqutils command (NGSutils) to split fastq files according to desired processing level
`fastqutils split $1 $outdir/mcf-temp/r1.temp $4`
`fastqutils split $2 $outdir/mcf-temp/r2.temp $4`
wait
#Parallel processing of fastq-mcf commands in background
for splitseq in $outdir/mcf-temp/r1.*.fastq; do
( splitbase=$(basename $splitseq .fastq)
splitbase2=$(echo $splitbase | sed 's/r1/r2/g')
fastq-mcf -0 -t 0.0001 $3 $outdir/mcf-temp/$splitbase.fastq $outdir/mcf-temp/$splitbase2.fastq -o $outdir/mcf-temp/$splitbase.mcf.fastq -o $outdir/mcf-temp/$splitbase2.mcf.fastq >> $outdir/fastq-mcf.log ) &
done
wait
#Cat results together
cat $outdir/mcf-temp/r1.temp.*.mcf.fastq > $outdir/r1.mcf.fastq
cat $outdir/mcf-temp/r2.temp.*.mcf.fastq > $outdir/r2.mcf.fastq
wait
#Remove temp files
rm -r $outdir/mcf-temp
fi
echo " Processing complete. Filtered data is found in the
following output files:
$outdir/r1.mcf.fastq
$outdir/r2.mcf.fastq
Details can be found in $outdir/fastq-mcf.log
"
| true
|
e17da62c13048cda3daa43beabf9fe710d85017b
|
Shell
|
hemant19cse/git-changelog-generator
|
/Scripts/genchangelog.sh
|
UTF-8
| 3,316
| 4.28125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#Read the config properties
. ./config.props
startTag=""
endTag=""
#outputFormat="md"
if [ $# -eq 0 ];
then
echo "USAGE: sh $0 tag1 tag2"
exit -1
elif [ -z "$1" ] || [ -z "$2" ]
then
echo "USAGE: sh $0 tag1 tag2"
exit -1
fi
startTag=$1
endTag=$2
#If the user provides the output format as command line argument
#if [ ! -z "$3" ]
#then
# if [ "$3" == "md" ] || [ "$3" == "html" ]
# then
# outputFormat=$3
# else
# echo "*** Error => Unknown output format"
# echo "USAGE: output format can only be one of md or html"
# exit -1
# fi
#fi
#File name
fileName=./"$CHANGE_LOG_FILE_NAME".md
logChangesInMarkDownFormat() {
feature="$1_title"
featureTitle=${!feature}
#Append changes for the category if there exists one. othewise header alone will be displayed
changes=""
if [ $CREATE_COMMIT_LINK == "true" ]
then
if [ $REPO_TYPE == "git" ]
then
changes=`git log $startTag...$endTag --pretty=format:"- %s [$COMMIT_LINK_MSG]($PROJECT_COMMIT_BASE_URL%H)\n" | grep "#$1"`
elif [ $REPO_TYPE == "hg" ]
then
changes=`hg log -r "$startTag:$endTag and branch('$REPO_BRANCH')" --template "- {desc} [$COMMIT_LINK_MSG]($PROJECT_COMMIT_BASE_URL{node|short})\n" | grep "#$1"`
fi
#changes=`git log $startTag...$endTag --pretty=format:"- %s [$COMMIT_LINK_MSG]($PROJECT_COMMIT_BASE_URL%H)\n" | grep "#$1"`
else
if [ $REPO_TYPE == "git" ]
then
changes=`git log $startTag...$endTag --pretty=format:'- %s \n' | grep "#$1"` >> ./$fileName
elif [ $REPO_TYPE == "hg" ]
then
changes=`hg log -r "$startTag:$endTag and branch('$REPO_BRANCH')" --template "- {desc} \n" | grep "#$1"`
fi
#changes=`git log $startTag...$endTag --pretty=format:'- %s \n' | grep "#$1"` >> ./$fileName
fi
if [ -z "$changes" ];
then
echo "There is no changes for category \"$1\""
else
#Append change category title
echo "### $featureTitle" >> $fileName
echo $changes >> $fileName
echo "Changes for category \"$1\" has been tracked."
fi
#Remove the commit category tag from the change log
sed -i '' -e "s/#$1//g" ./$fileName
#Remove unwanted space before change for the category
sed -i '' -e "s/ \-/\-/g" ./$fileName
}
main() {
if [ -f $fileName ];
then
echo "*** Error => Change log file:$fileName already exists"
exit -1
fi
echo ""
echo "*** Tracking changes ....."
echo ""
#Append the header title for the change log
echo "# Change Log: $endTag" >> $fileName
echo "" >> $fileName
#Generate change log for each of the category that need to be tracked
for i in $(echo $TRACK_CHANGE_CATEGOTRIES | tr "," "\n")
do
logChangesInMarkDownFormat $i
done
echo ""
echo "*** Finished => Change log has been generated and saved in file: $fileName"
echo "*** HTML Format => If you need the Change log as HTML, then use online tool - https://dillinger.io/ and get the underlying HTML code"
echo "*** Plain Text Format => If you need the Change log as plain text, use the above tool and copy the rendered output text "
echo ""
exit 0
}
main *
| true
|
bf4b041bdc068b1306f1d8e600962fee46ec8a97
|
Shell
|
jasonmimick/product-service
|
/app/app.curl-tester-CHNGEDB.sh
|
UTF-8
| 1,057
| 3.09375
| 3
|
[] |
no_license
|
set -x
USERNAME=""
APIKEY=""
BASEURL="http://localhost:5000"
#BASEURL="http://product-service"
METHOD=$1
shift;
DB=$1
shift
COLL=$1
shift
HEADERS="--header \"X-MongoDB-Database: ${DB}\" --header \"X-MongoDB-Collection: ${COLL}\""
echo "testing method (with): ${METHOD} $@"
echo "Sending additional HTTP Headers: ${HEADERS}"
if [ "$METHOD" == "CLEAN" ]; then
curl -X DELETE ${HEADERS} -u ${USERNAME}:${APIKEY} ${BASEURL}/$@
elif [ "$METHOD" == "GET" ]; then
curl -X GET ${HEADERS} -u ${USERNAME}:${APIKEY} ${BASEURL}/?$@
elif [ "$METHOD" == "POST" ]; then
curl -vv -X POST ${HEADERS} -u ${USERNAME}:${APIKEY} -d "$@" ${BASEURL}
elif [ "$METHOD" == "PUT" ]; then
ID=$1
shift;
echo "PUT ID=${ID} args=$@"
curl -X PUT ${HEADERS} -u ${USERNAME}:${APIKEY} -d "$@" ${BASEURL}/${ID}
elif [ "$METHOD" == "DELETE" ]; then
curl -X DELETE ${HEADERS} -u ${USERNAME}:${APIKEY} ${BASEURL}/$@
elif [ "$METHOD" == "DELETE+BODY" ]; then
curl -vv -X DELETE ${HEADERS} -u ${USERNAME}:${APIKEY} -d "$@" ${BASEURL}
else
echo "unknown method: ${METHOD}"
fi
| true
|
e6057b6d86ebb4796ea1b867f9cd7e819553d06c
|
Shell
|
seshendranath/Grpn
|
/NewCerebro_Replicator/opswise_run_script.sh
|
UTF-8
| 11,175
| 3.796875
| 4
|
[] |
no_license
|
###############Variable Description################
### hdfs -> namenode of the hadoop cluster where the copy should happen
### script_host -> The Util Box where the Cerebro Copy scropt is stored
### schema -> Database of the table being copied
### table -> Table being copied
### inc_key -> partition key of the table, if the table is not partitioned, no argument should be passed
### inc_val -> Date for which copy is suppoed to happen, for increment tables, this value could either be a key (integer yyyymmdd ) or a date (yyyy-mm-dd), for full refreshes, it assumes to be the current date
hdfs="hdfs://cerebro-namenode.snc1"
script_host="pit-share-hdputil6.snc1"
schema=$1
table=$2
inc_key=$3
inc_val=$4
offset_days=$5
dsn=${cerebro_dsn}
# Start of Comment:- If the script is run without any arguments, the script will fail
if [[ -z "$schema" ]]; then
echo " No Schema argument was passed, terminating"
exit 1
else
echo "Schema Name :- ${schema}"
fi
if [ -z "$table" ]; then
echo " No table argument was passed, terminating"
exit 1
else
echo "Table Name :- ${table}"
fi
# End of Comment
# An increment key should always have an increment value with it, else fail the process
if [ ! -z "$inc_key" ] && [ -z "$inc_val" ]; then
echo "Provide an increment value for key: $inc_key"
echo "NOTE: When an increment key is specified, a value should be provided"
exit 1
fi
# End of Comment
#Start of Comment:- If inc_val is not passed as an argument, the script assumes that a full refresh is happening, and the inc_val for directory naming is defauled to the current date
if [ -z "$inc_val" ]; then
echo "No inc_val and inc_key was passed, Considering this table as a full refresh and defaulting to today's date "
inc_val=`date +%Y%m%d`
full_load=1
else
date -d $inc_val
rc=$?
if [ $rc -ne 0 ]; then
echo "Invalid date provided for increment value !!"
echo "Inc value: $inc_val"
exit 1
fi
echo " Picking the inc_val as ${inc_val} "
full_load=0
fi
#End of Comment
# The offset defines the no. of of days data that should be loaded into Cerebro. For Eg if 7, then we copy over date, date-1, ... date-6
if [ -z "$offset_days" ]; then
echo "No offset_days specified, loading just for a single date"
offset_days=1
elif [ -z "$inc_val" ] && [ -z "$inc_key" ]; then
echo "A full refresh cannot have an offset value!!"
exit 1
else
echo "No. of offset_days specified = $offset_days"
fi
org_inc_val=$inc_val
# Check if the inc_value provided is a date_key or a date
if echo $inc_val | egrep -q '^[0-9]+$'; then
echo "We don't have hiphens indate"
date_key="Y"
length=`expr length "$inc_val"`
#Inc_val should be in format yyyyddmm
if [ "$length" -ne 8 ]; then
echo "Invalid/Incorrect increment value for creating partition, inc_val = $inc_val"
exit 1
fi
else
echo "There are hiphens"
date_key="N"
length=`expr length "$inc_val"`
#Inc_val should be in format yyyy-dd-mm
if [ "$length" -ne 10 ]; then
echo "Invalid/Incorrect increment value for creating partition, inc_val = $inc_val"
exit 1
fi
fi
for i in `seq 1 $offset_days`
do
echo "###########################################################"
echo "Processing for date: $inc_val"
echo "###########################################################"
# Start of Comment:- Check if the file exists for the date of copy that we are trying to do, and a warning message would be printed to alert, but the script will not fail
inc_val_stripped=`echo "${inc_val//-/}"`
echo "hadoop fs -ls ${hdfs}/td_backup/${schema}.${table}/${inc_val_stripped}"
hadoop fs -ls ${hdfs}/td_backup/${schema}.${table}/${inc_val_stripped}
ret_code=$?
if [ "$ret_code" -eq 0 ]; then
echo " File Already Exist for the date ${inc_val} , Did you mean to re-load it?"
echo " Assuming that a backfill is happening, and continuing, if this is not the intention, then contact swason@"
#job_status="failed"
fi
#else
#End of Comment
start_dt=`date "+20%y-%m-%d %T"`
#Start of Comment:- Starting the import of the table, different zombie script will run depending on the format(yyyymmdd or yyyy-mm-dd) of the increment key being passed
echo "Starting Import of ${schema}.${table}"
if [ $full_load -eq 0 ]; then
echo " Picking the increment value as ${inc_val} "
echo "$inc_val" | grep -i "-"
if [[ $? -eq 0 ]] ; then
echo "ssh ${script_host} PYTHONPATH=$PYTHONPATH:/home/etl_adhoc/Replicator/ZombieRunner PATH=/usr/local/bin/unixodbc:/opt/teradata/client/Current/tbuild/bin:/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin:/home/etl_adhoc/bin zombie_runner run /home/etl_adhoc/Replicator/scripts/backup_to_cerebro_test_inc_date/ --context=schema:${schema},base_table:${table},inc_key:\x22${inc_key}\x22,inc_date:${inc_val}"
output=` ssh ${script_host} PYTHONPATH=$PYTHONPATH:/home/etl_adhoc/Replicator/ZombieRunner PATH=/usr/local/bin/unixodbc:/opt/teradata/client/Current/tbuild/bin:/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin:/home/etl_adhoc/bin zombie_runner run /home/etl_adhoc/Replicator/scripts/backup_to_cerebro_test_inc_date/ --context=schema:${schema},base_table:${table},inc_key:\"${inc_key}\",inc_date:${inc_val}`
else
echo "ssh ${script_host} PYTHONPATH=$PYTHONPATH:/home/etl_adhoc/Replicator/ZombieRunner PATH=/usr/local/bin/unixodbc:/opt/teradata/client/Current/tbuild/bin:/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin:/home/etl_adhoc/bin zombie_runner run /home/etl_adhoc/Replicator/scripts/backup_to_cerebro_test/ --context=schema:${schema},base_table:${table},inc_key:\x22${inc_key}\x22,inc_date:${inc_val}"
output=` ssh ${script_host} PYTHONPATH=$PYTHONPATH:/home/etl_adhoc/Replicator/ZombieRunner PATH=/usr/local/bin/unixodbc:/opt/teradata/client/Current/tbuild/bin:/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin:/home/etl_adhoc/bin zombie_runner run /home/etl_adhoc/Replicator/scripts/backup_to_cerebro_test/ --context=schema:${schema},base_table:${table},inc_key:\"${inc_key}\",inc_date:${inc_val}`
fi
ret_code1=$?
echo "$output"|fold -250
echo "$output" | grep -v "ERROR serde.TDFastExportInputFormat: found end"|grep -v "ERROR [root:131] - File not found"|grep -v "Error walking path:"| grep -v "ERROR for block" | grep -v "Error Recovery" | grep -i "error "
if [[ $? -eq 0 ]] ; then
echo " Found Error!!"
job_status="failed"
fi
if [ "$ret_code1" -ne 0 ]; then
job_status="failed"
fi
else
echo "No inc_val and inc_key was passed, Considering this table as a full refresh and defauling to today's date "
inc_val=`date +%Y%m%d`
echo "ssh ${script_host} PYTHONPATH=$PYTHONPATH:/home/etl_adhoc/Replicator/ZombieRunner PATH=/usr/local/bin/unixodbc:/opt/teradata/client/Current/tbuild/bin:/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin:/home/etl_adhoc/bin zombie_runner run /home/etl_adhoc/Replicator/scripts/backup_to_cerebro_test/ --context=schema:${schema},base_table:${table}"
output=` ssh ${script_host} PYTHONPATH=$PYTHONPATH:/home/etl_adhoc/Replicator/ZombieRunner PATH=/usr/local/bin/unixodbc:/opt/teradata/client/Current/tbuild/bin:/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin:/home/etl_adhoc/bin zombie_runner run /home/etl_adhoc/Replicator/scripts/backup_to_cerebro_test/ --context=schema:${schema},base_table:${table} `
ret_code2=$?
echo "$output"|fold -250
echo "$output" |grep -v "ERROR serde.TDFastExportInputFormat: found end"| grep -i "error "
if [[ $? -eq 0 ]] ; then
echo " Found error!!"
job_status="failed"
fi
if [ "$ret_code2" -ne 0 ]; then
job_status="failed"
fi
fi
#End of the Comment
end_dt=`date "+20%y-%m-%d %T"`
# Parsing the logs to extract the source and destination row count
pattern_to_search="Manifest"
new_output=` echo ${output/*$pattern_to_search/$pattern_to_search} `
echo "$new_output" | grep ")-"
if [[ $? -eq 0 ]] ; then
new_pattern_to_search=")-"
else
new_pattern_to_search="|"
fi
final_output=`echo ${new_output/$new_pattern_to_search*/$new_pattern_to_search} `
td_count=` echo $final_output | cut -d" " -f 2 | sed 's/(//g' | sed 's/)//g' | sed 's/|//g' `
hive_count=` echo $final_output | cut -d" " -f 5 | sed 's/(//g' | sed 's/)//g' | sed 's/|//g' `
hive_count_trimmed=` echo $hive_count| cut -d '-' -f 1 |sed 's/[a-z]//g'|sed 's/[A-Z]//g'|sed 's/://g' `
len=`expr length "$td_count"`
echo " Length=${len}"
hive_count_trimmed=`echo "${hive_count_trimmed}"|cut -c1-"${len}"`
echo " Teradata Count : ${td_count}"
echo " Hive Count : ${hive_count_trimmed}"
if [ "${td_count}" -eq "${hive_count_trimmed}" ]; then
echo "Counts have matched, Success!!"
if [ "${job_status}" != "failed" ]; then
job_status="complete"
fi
else
echo " Counts have not matched, exiting, and the script will fail "
job_status="failed"
fi
#Check if the counts retreived is not junk value
if echo $td_count | egrep -q '^[0-9]+$'; then
echo ""
else
echo "Some junk value retreived for count, which is incorrect"
td_count=0
hive_count_trimmed=0
job_status="failed"
fi
#For mv_fact_collections_master we need to do the dynamic partitioning after successful copy
if [ "${table}" == "mv_fact_collections_master" ] && [ "${job_status}" == "complete" ]; then
echo "sh /var/groupon/etl/deploy/etl/cerebro/run_partition.sh"
output=`sh /var/groupon/etl/deploy/etl/cerebro/run_partition.sh`
rcode=$?
if [ "$rcode" -ne 0 ]; then
job_status="failed"
fi
fi
#Update the cerebro metadata with the relevant info about the copy
echo "/usr/local/bin/python /var/groupon/etl/deploy/etl/cerebro/connect_metadata.py --dsn ${dsn} --table ${table} --start "${start_dt}" --end "${end_dt}" --td_count ${td_count} --hv_count ${hive_count_trimmed} --job_status ${job_status} --inc_val ${org_inc_val} --schema ${schema}"
output=` /usr/local/bin/python /var/groupon/etl/deploy/etl/cerebro/connect_metadata.py --dsn ${dsn} --table ${table} --start "${start_dt}" --end "${end_dt}" --td_count ${td_count} --hv_count ${hive_count_trimmed} --job_status ${job_status} --inc_val ${org_inc_val} --schema ${schema}`
rc=$?
echo $output|sed 's/\*\*\*/\n/g'
if [ $rc -ne 0 ]; then
echo ""
echo "Something went wrong while updating metadata info!!"
#exit 1
fi
if [ "${job_status}" == "failed" ]; then
echo "Processing failed for date : $inc_val"
exit 1
fi
if [ $date_key == "Y" ]; then
inc_val=`date "--date=$org_inc_val-$i day" +%Y%m%d`
else
inc_val=`date "--date=$org_inc_val-$i day" +%Y-%m-%d`
fi
echo "###########################################################"
echo ""
done
| true
|
dc49e5ff0fd531a5a276828e2e1bc329d0820c63
|
Shell
|
bricewge/dotfiles
|
/bin/.local/bin/monitor
|
UTF-8
| 407
| 3.015625
| 3
|
[
"CC0-1.0"
] |
permissive
|
#!/usr/bin/env sh
#
# Toogle between input sources for BenQ GW2765HT monitor
# TODO rename file
model="BenQ GW2765"
dp="x0f"
hdmi="x11"
current_source=$(sudo ddcutil getvcp 0x60 --model "${model}" --terse | cut -d' ' -f4)
if [[ $current_source == $dp ]]; then
new_source=$hdmi
elif [[ $current_source == $hdmi ]]; then
new_source=$dp
fi
sudo ddcutil setvcp 0x60 0$new_source --model "${model}"
| true
|
adc7a97ea140d02d0fc70a748aa69274de526666
|
Shell
|
CyberHolmes/CSCI5607
|
/Project2/shellScripts/crop.sh
|
UTF-8
| 568
| 3.09375
| 3
|
[] |
no_license
|
#! /usr/bin/bash
#-----------------------------------------------------------------------------
# # Crop
CurProc=crop
echo Processing ${CurProc}...
OUTDIR=$OUTROOT/$NAME/${CurProc}
if [ ! -d $OUTDIR ];
then
mkdir -p $OUTDIR;
fi
./image -input ${INNAME} -${CurProc} -1 -1 150 150 -output ${OUTDIR}/${NAME}_${CurProc}_-1_-1_150_150.${EXT}
./image -input ${INNAME} -${CurProc} 200 150 1000 1000 -output ${OUTDIR}/${NAME}_${CurProc}_200_150_1000_1000.${EXT}
./image -input ${INNAME} -${CurProc} 100 150 250 350 -output ${OUTDIR}/${NAME}_${CurProc}_100_150_250_350.${EXT}
| true
|
61b6fd8978b1833e8d99a7665958165058633236
|
Shell
|
mauriceliddy/junk
|
/jankgradle
|
UTF-8
| 398
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
case $1 in
clean)
rm Servertest.jar Servertest.class
;;
build)
javac Servertest.java
;;
package)
jar -cfe Servertest.jar Servertest Servertest.class
;;
run)
java Servertest
;;
*)
rm Servertest.jar Servertest.class
javac Servertest.java
jar -cfe Servertest.jar Servertest Servertest.class
java Servertest
echo All set. Script finished executing and server connection closed!
esac
| true
|
b8bc9230c4e2d10ec9478c30c33d3e16b7994d1e
|
Shell
|
TOLDOTECHNIK/buildroot-webkit
|
/board/toldotechnik_rpi/rootfs-overlay/etc/init.d/S22resize_rootpart
|
UTF-8
| 959
| 3.5625
| 4
|
[] |
no_license
|
#! /bin/sh
case "$1" in
start)
echo "Expanding the root partition..."
if [ -x "$(command -v psplash-write)" ]; then
/usr/bin/psplash-write "MSG RESIZING PARTITION - REBOOT"
fi
PART_START=$(parted /dev/mmcblk0 -ms unit chs p | grep ^2: | cut -f 2 -d:)
DISK_SIZE=$(parted /dev/mmcblk0 -ms unit chs p | grep /dev/mmcblk0: | cut -f 2 -d:)
parted /dev/mmcblk0 &>/dev/null <<EOF
unit chs
rm 2
i
mkpart primary $PART_START $DISK_SIZE
i
i
q
EOF
cat <<\EOF > /etc/init.d/S23resize_rootfs &&
#!/bin/sh
case "$1" in
start)
echo "Expanding the root filesystem..."
resize2fs /dev/mmcblk0p2 &>/dev/null
rm /etc/init.d/S23resize_rootfs
echo "Done."
;;
*)
echo "Usage: $0 {start}" >&2
exit 1
;;
esac
EOF
chmod +x /etc/init.d/S23resize_rootfs
rm /etc/init.d/S22resize_rootpart
echo "Reboot now."
sleep 2
reboot -f
;;
*)
echo "Usage: $0 {start}" >&2
exit 1
;;
esac
| true
|
5b186673455a7de0fca3394aca1524e2268b3bca
|
Shell
|
meldafrawi/longhorn-tests
|
/test_framework/scripts/longhorn-setup.sh
|
UTF-8
| 14,855
| 3.59375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -x
# create and clean tmpdir
TMPDIR="/tmp/longhorn"
mkdir -p ${TMPDIR}
rm -rf "${TMPDIR}/"
LONGHORN_NAMESPACE="longhorn-system"
# Longhorn version tag (e.g v1.1.0), use "master" for latest stable
# we will use this version as the base for upgrade
LONGHORN_STABLE_VERSION=${LONGHORN_STABLE_VERSION:-master}
LONGHORN_STABLE_MANIFEST_URL="https://raw.githubusercontent.com/longhorn/longhorn/${LONGHORN_STABLE_VERSION}/deploy/longhorn.yaml"
set_kubeconfig_envvar(){
ARCH=${1}
BASEDIR=${2}
if [[ ${ARCH} == "amd64" ]] ; then
if [[ ${TF_VAR_k8s_distro_name} =~ [rR][kK][eE] ]]; then
export KUBECONFIG="${BASEDIR}/kube_config_rke.yml"
else
export KUBECONFIG="${BASEDIR}/terraform/aws/${DISTRO}/k3s.yaml"
fi
elif [[ ${ARCH} == "arm64" ]]; then
export KUBECONFIG="${BASEDIR}/terraform/aws/${DISTRO}/k3s.yaml"
fi
}
install_csi_snapshotter_crds(){
CSI_SNAPSHOTTER_REPO_URL="https://github.com/kubernetes-csi/external-snapshotter.git"
CSI_SNAPSHOTTER_REPO_BRANCH="master"
CSI_SNAPSHOTTER_REPO_DIR="${TMPDIR}/k8s-csi-external-snapshotter"
git clone --single-branch \
--branch "${CSI_SNAPSHOTTER_REPO_BRANCH}" \
"${CSI_SNAPSHOTTER_REPO_URL}" \
"${CSI_SNAPSHOTTER_REPO_DIR}"
kubectl apply -f ${CSI_SNAPSHOTTER_REPO_DIR}/client/config/crd \
-f ${CSI_SNAPSHOTTER_REPO_DIR}/deploy/kubernetes/snapshot-controller
}
wait_longhorn_status_running(){
local RETRY_COUNTS=10 # in minutes
local RETRY_INTERVAL="1m"
RETRIES=0
while [[ -n `kubectl get pods -n ${LONGHORN_NAMESPACE} --no-headers | awk '{print $3}' | grep -v Running` ]]; do
echo "Longhorn is still installing ... re-checking in 1m"
sleep ${RETRY_INTERVAL}
RETRIES=$((RETRIES+1))
if [[ ${RETRIES} -eq ${RETRY_COUNTS} ]]; then echo "Error: longhorn installation timeout"; exit 1 ; fi
done
}
generate_longhorn_yaml_manifest() {
MANIFEST_BASEDIR="${1}"
LONGHORN_MANAGER_REPO_URI=${LONGHORN_MANAGER_REPO_URI:-"https://github.com/longhorn/longhorn-manager.git"}
LONGHORN_MANAGER_BRANCH=${LONGHORN_MANAGER_BRANCH:-"master"}
LONGHORN_MANAGER_REPO_DIR="${TMPDIR}/longhorn-manager"
CUSTOM_LONGHORN_MANAGER_IMAGE=${CUSTOM_LONGHORN_MANAGER_IMAGE:-"longhornio/longhorn-manager:master-head"}
CUSTOM_LONGHORN_ENGINE_IMAGE=${CUSTOM_LONGHORN_ENGINE_IMAGE:-"longhornio/longhorn-engine:master-head"}
CUSTOM_LONGHORN_INSTANCE_MANAGER_IMAGE=${CUSTOM_LONGHORN_INSTANCE_MANAGER_IMAGE:-""}
CUSTOM_LONGHORN_SHARE_MANAGER_IMAGE=${CUSTOM_LONGHORN_SHARE_MANAGER_IMAGE:-""}
CUSTOM_LONGHORN_BACKING_IMAGE_MANAGER_IMAGE=${CUSTOM_LONGHORN_BACKING_IMAGE_MANAGER_IMAGE:-""}
git clone --single-branch \
--branch ${LONGHORN_MANAGER_BRANCH} \
${LONGHORN_MANAGER_REPO_URI} \
${LONGHORN_MANAGER_REPO_DIR}
for FILE in `find "${LONGHORN_MANAGER_REPO_DIR}/deploy/install" -type f -name "*\.yaml" | sort`; do
cat ${FILE} >> "${MANIFEST_BASEDIR}/longhorn.yaml"
echo "---" >> "${MANIFEST_BASEDIR}/longhorn.yaml"
done
# get longhorn default images from yaml manifest
LONGHORN_MANAGER_IMAGE=`grep -io "longhornio\/longhorn-manager:.*$" "${MANIFEST_BASEDIR}/longhorn.yaml"| head -1`
LONGHORN_ENGINE_IMAGE=`grep -io "longhornio\/longhorn-engine:.*$" "${MANIFEST_BASEDIR}/longhorn.yaml"| head -1`
LONGHORN_INSTANCE_MANAGER_IMAGE=`grep -io "longhornio\/longhorn-instance-manager:.*$" "${MANIFEST_BASEDIR}/longhorn.yaml"| head -1`
LONGHORN_SHARE_MANAGER_IMAGE=`grep -io "longhornio\/longhorn-share-manager:.*$" "${MANIFEST_BASEDIR}/longhorn.yaml"| head -1`
LONGHORN_BACKING_IMAGE_MANAGER_IMAGE=`grep -io "longhornio\/backing-image-manager:.*$" "${MANIFEST_BASEDIR}/longhorn.yaml"| head -1`
# replace longhorn images with custom images
sed -i 's#'${LONGHORN_MANAGER_IMAGE}'#'${CUSTOM_LONGHORN_MANAGER_IMAGE}'#' "${MANIFEST_BASEDIR}/longhorn.yaml"
sed -i 's#'${LONGHORN_ENGINE_IMAGE}'#'${CUSTOM_LONGHORN_ENGINE_IMAGE}'#' "${MANIFEST_BASEDIR}/longhorn.yaml"
# replace images if custom image is specified.
if [[ ! -z ${CUSTOM_LONGHORN_INSTANCE_MANAGER_IMAGE} ]]; then
sed -i 's#'${LONGHORN_INSTANCE_MANAGER_IMAGE}'#'${CUSTOM_LONGHORN_INSTANCE_MANAGER_IMAGE}'#' "${MANIFEST_BASEDIR}/longhorn.yaml"
else
# use instance-manager image specified in yaml file if custom image is not specified
CUSTOM_LONGHORN_INSTANCE_MANAGER_IMAGE=${LONGHORN_INSTANCE_MANAGER_IMAGE}
fi
if [[ ! -z ${CUSTOM_LONGHORN_SHARE_MANAGER_IMAGE} ]]; then
sed -i 's#'${LONGHORN_SHARE_MANAGER_IMAGE}'#'${CUSTOM_LONGHORN_SHARE_MANAGER_IMAGE}'#' "${MANIFEST_BASEDIR}/longhorn.yaml"
else
# use share-manager image specified in yaml file if custom image is not specified
CUSTOM_LONGHORN_SHARE_MANAGER_IMAGE=${LONGHORN_SHARE_MANAGER_IMAGE}
fi
if [[ ! -z ${CUSTOM_LONGHORN_BACKING_IMAGE_MANAGER_IMAGE} ]]; then
sed -i 's#'${LONGHORN_BACKING_IMAGE_MANAGER_IMAGE}'#'${CUSTOM_LONGHORN_BACKING_IMAGE_MANAGER_IMAGE}'#' "${MANIFEST_BASEDIR}/longhorn.yaml"
else
# use backing-image-manager image specified in yaml file if custom image is not specified
CUSTOM_LONGHORN_BACKING_IMAGE_MANAGER_IMAGE=${LONGHORN_BACKING_IMAGE_MANAGER_IMAGE}
fi
}
install_longhorn_stable(){
kubectl apply -f "${LONGHORN_STABLE_MANIFEST_URL}"
wait_longhorn_status_running
}
install_longhorn_master(){
LONGHORN_MANIFEST_FILE_PATH="${1}"
kubectl apply -f "${LONGHORN_MANIFEST_FILE_PATH}"
wait_longhorn_status_running
}
create_longhorn_namespace(){
kubectl create ns ${LONGHORN_NAMESPACE}
}
install_backupstores(){
MINIO_BACKUPSTORE_URL="https://raw.githubusercontent.com/longhorn/longhorn-tests/master/manager/integration/deploy/backupstores/minio-backupstore.yaml"
NFS_BACKUPSTORE_URL="https://raw.githubusercontent.com/longhorn/longhorn-tests/master/manager/integration/deploy/backupstores/nfs-backupstore.yaml"
kubectl create -f ${MINIO_BACKUPSTORE_URL} \
-f ${NFS_BACKUPSTORE_URL}
}
create_aws_secret(){
AWS_ACCESS_KEY_ID_BASE64=`echo -n "${TF_VAR_lh_aws_access_key}" | base64`
AWS_SECRET_ACCESS_KEY_BASE64=`echo -n "${TF_VAR_lh_aws_secret_key}" | base64`
AWS_DEFAULT_REGION_BASE64=`echo -n "${TF_VAR_aws_region}" | base64`
yq e -i '.data.AWS_ACCESS_KEY_ID |= "'${AWS_ACCESS_KEY_ID_BASE64}'"' "${TF_VAR_tf_workspace}/templates/aws_cred_secrets.yml"
yq e -i '.data.AWS_SECRET_ACCESS_KEY |= "'${AWS_SECRET_ACCESS_KEY_BASE64}'"' "${TF_VAR_tf_workspace}/templates/aws_cred_secrets.yml"
yq e -i '.data.AWS_DEFAULT_REGION |= "'${AWS_DEFAULT_REGION_BASE64}'"' "${TF_VAR_tf_workspace}/templates/aws_cred_secrets.yml"
kubectl apply -f "${TF_VAR_tf_workspace}/templates/aws_cred_secrets.yml"
}
run_longhorn_upgrade_test(){
LONGHORH_TESTS_REPO_BASEDIR=${1}
LONGHORN_UPGRADE_TEST_POD_NAME="longhorn-test-upgrade"
LONGHORN_TESTS_CUSTOM_IMAGE=${LONGHORN_TESTS_CUSTOM_IMAGE:-"longhornio/longhorn-manager-test:master-head"}
LONGHORN_TESTS_MANIFEST_FILE_PATH="${LONGHORH_TESTS_REPO_BASEDIR}/manager/integration/deploy/test.yaml"
LONGHORN_UPGRADE_TESTS_MANIFEST_FILE_PATH="${LONGHORH_TESTS_REPO_BASEDIR}/manager/integration/deploy/upgrade_test.yaml"
LONGHORN_JUNIT_REPORT_PATH=`yq e '.spec.containers[0].env[] | select(.name == "LONGHORN_JUNIT_REPORT_PATH").value' "${LONGHORN_TESTS_MANIFEST_FILE_PATH}"`
local PYTEST_COMMAND_ARGS='''"-s",
"--junitxml='${LONGHORN_JUNIT_REPORT_PATH}'",
"--include-upgrade-test",
"-k", "test_upgrade",
"--upgrade-lh-manager-repo-url", "'${LONGHORN_MANAGER_REPO_URI}'",
"--upgrade-lh-manager-repo-branch", "'${LONGHORN_MANAGER_BRANCH}'",
"--upgrade-lh-manager-image", "'${CUSTOM_LONGHORN_MANAGER_IMAGE}'",
"--upgrade-lh-engine-image", "'${CUSTOM_LONGHORN_ENGINE_IMAGE}'",
"--upgrade-lh-instance-manager-image", "'${CUSTOM_LONGHORN_INSTANCE_MANAGER_IMAGE}'",
"--upgrade-lh-share-manager-image", "'${CUSTOM_LONGHORN_SHARE_MANAGER_IMAGE}'",
"--upgrade-lh-backing-image-manager-image", "'${CUSTOM_LONGHORN_BACKING_IMAGE_MANAGER_IMAGE}'"
'''
## generate upgrade_test pod manifest
yq e 'select(.spec.containers[0] != null).spec.containers[0].args=['"${PYTEST_COMMAND_ARGS}"']' "${LONGHORN_TESTS_MANIFEST_FILE_PATH}" > ${LONGHORN_UPGRADE_TESTS_MANIFEST_FILE_PATH}
yq e -i 'select(.spec.containers[0] != null).spec.containers[0].image="'${LONGHORN_TESTS_CUSTOM_IMAGE}'"' ${LONGHORN_UPGRADE_TESTS_MANIFEST_FILE_PATH}
yq e -i 'select(.spec.containers[0] != null).metadata.name="'${LONGHORN_UPGRADE_TEST_POD_NAME}'"' ${LONGHORN_UPGRADE_TESTS_MANIFEST_FILE_PATH}
if [[ $BACKUP_STORE_TYPE = "s3" ]]; then
BACKUP_STORE_FOR_TEST=`yq e 'select(.spec.containers[0] != null).spec.containers[0].env[1].value' ${LONGHORN_UPGRADE_TESTS_MANIFEST_FILE_PATH} | awk -F ',' '{print $1}' | sed 's/ *//'`
yq e -i 'select(.spec.containers[0] != null).spec.containers[0].env[1].value="'${BACKUP_STORE_FOR_TEST}'"' ${LONGHORN_UPGRADE_TESTS_MANIFEST_FILE_PATH}
elif [[ $BACKUP_STORE_TYPE = "nfs" ]]; then
BACKUP_STORE_FOR_TEST=`yq e 'select(.spec.containers[0] != null).spec.containers[0].env[1].value' ${LONGHORN_UPGRADE_TESTS_MANIFEST_FILE_PATH} | awk -F ',' '{print $2}' | sed 's/ *//'`
yq e -i 'select(.spec.containers[0] != null).spec.containers[0].env[1].value="'${BACKUP_STORE_FOR_TEST}'"' ${LONGHORN_UPGRADE_TESTS_MANIFEST_FILE_PATH}
fi
kubectl apply -f ${LONGHORN_UPGRADE_TESTS_MANIFEST_FILE_PATH}
# wait upgrade test pod to start running
while [[ -n "`kubectl get pods longhorn-test-upgrade --no-headers=true | awk '{print $3}' | grep -v \"Running\|Completed\"`" ]]; do
echo "waiting upgrade test pod to be in running state ... rechecking in 10s"
sleep 10s
done
# wait upgrade test to complete
while [[ -z "`kubectl get pods longhorn-test-upgrade --no-headers=true | awk '{print $3}' | grep -v Running`" ]]; do
echo "upgrade test still running ... rechecking in 30s"
sleep 30s
done
# get upgrade test junit xml report
kubectl logs ${LONGHORN_UPGRADE_TEST_POD_NAME} >> "${TF_VAR_tf_workspace}/longhorn-test-upgrade-junit-report.xml"
}
run_longhorn_tests(){
LONGHORH_TESTS_REPO_BASEDIR=${1}
LONGHORN_TESTS_CUSTOM_IMAGE=${LONGHORN_TESTS_CUSTOM_IMAGE:-"longhornio/longhorn-manager-test:master-head"}
LONGHORN_TESTS_MANIFEST_FILE_PATH="${LONGHORH_TESTS_REPO_BASEDIR}/manager/integration/deploy/test.yaml"
LONGHORN_JUNIT_REPORT_PATH=`yq e '.spec.containers[0].env[] | select(.name == "LONGHORN_JUNIT_REPORT_PATH").value' "${LONGHORN_TESTS_MANIFEST_FILE_PATH}"`
local PYTEST_COMMAND_ARGS='"-s", "--junitxml='${LONGHORN_JUNIT_REPORT_PATH}'"'
if [[ -n ${PYTEST_CUSTOM_OPTIONS} ]]; then
PYTEST_CUSTOM_OPTIONS=(${PYTEST_CUSTOM_OPTIONS})
for OPT in "${PYTEST_CUSTOM_OPTIONS[@]}"; do
PYTEST_COMMAND_ARGS=${PYTEST_COMMAND_ARGS}', "'${OPT}'"'
done
fi
## generate test pod manifest
yq e -i 'select(.spec.containers[0] != null).spec.containers[0].args=['"${PYTEST_COMMAND_ARGS}"']' "${LONGHORN_TESTS_MANIFEST_FILE_PATH}"
yq e -i 'select(.spec.containers[0] != null).spec.containers[0].image="'${LONGHORN_TESTS_CUSTOM_IMAGE}'"' ${LONGHORN_TESTS_MANIFEST_FILE_PATH}
if [[ $BACKUP_STORE_TYPE = "s3" ]]; then
BACKUP_STORE_FOR_TEST=`yq e 'select(.spec.containers[0] != null).spec.containers[0].env[1].value' ${LONGHORN_TESTS_MANIFEST_FILE_PATH} | awk -F ',' '{print $1}' | sed 's/ *//'`
yq e -i 'select(.spec.containers[0] != null).spec.containers[0].env[1].value="'${BACKUP_STORE_FOR_TEST}'"' ${LONGHORN_TESTS_MANIFEST_FILE_PATH}
elif [[ $BACKUP_STORE_TYPE = "nfs" ]]; then
BACKUP_STORE_FOR_TEST=`yq e 'select(.spec.containers[0] != null).spec.containers[0].env[1].value' ${LONGHORN_TESTS_MANIFEST_FILE_PATH} | awk -F ',' '{print $2}' | sed 's/ *//'`
yq e -i 'select(.spec.containers[0] != null).spec.containers[0].env[1].value="'${BACKUP_STORE_FOR_TEST}'"' ${LONGHORN_TESTS_MANIFEST_FILE_PATH}
fi
set +x
## inject aws cloudprovider and credentials env variables from created secret
yq e -i 'select(.spec.containers[0].env != null).spec.containers[0].env += {"name": "CLOUDPROVIDER", "value": "aws"}' "${LONGHORN_TESTS_MANIFEST_FILE_PATH}"
yq e -i 'select(.spec.containers[0].env != null).spec.containers[0].env += {"name": "AWS_ACCESS_KEY_ID", "valueFrom": {"secretKeyRef": {"name": "aws-cred-secret", "key": "AWS_ACCESS_KEY_ID"}}}' "${LONGHORN_TESTS_MANIFEST_FILE_PATH}"
yq e -i 'select(.spec.containers[0].env != null).spec.containers[0].env += {"name": "AWS_SECRET_ACCESS_KEY", "valueFrom": {"secretKeyRef": {"name": "aws-cred-secret", "key": "AWS_SECRET_ACCESS_KEY"}}}' "${LONGHORN_TESTS_MANIFEST_FILE_PATH}"
yq e -i 'select(.spec.containers[0].env != null).spec.containers[0].env += {"name": "AWS_DEFAULT_REGION", "valueFrom": {"secretKeyRef": {"name": "aws-cred-secret", "key": "AWS_DEFAULT_REGION"}}}' "${LONGHORN_TESTS_MANIFEST_FILE_PATH}"
set -x
LONGHORN_TEST_POD_NAME=`yq e 'select(.spec.containers[0] != null).metadata.name' ${LONGHORN_TESTS_MANIFEST_FILE_PATH}`
kubectl apply -f ${LONGHORN_TESTS_MANIFEST_FILE_PATH}
local RETRY_COUNTS=60
local RETRIES=0
# wait longhorn tests pod to start running
while [[ -n "`kubectl get pods "${LONGHORN_TEST_POD_NAME}" --no-headers=true | awk '{print $3}' | grep -v \"Running\|Completed\"`" ]]; do
echo "waiting longhorn test pod to be in running state ... rechecking in 10s"
sleep 10s
RETRIES=$((RETRIES+1))
if [[ ${RETRIES} -eq ${RETRY_COUNTS} ]]; then echo "Error: longhorn test pod start timeout"; exit 1 ; fi
done
# wait longhorn tests to complete
while [[ -z "`kubectl get pods ${LONGHORN_TEST_POD_NAME} --no-headers=true | awk '{print $3}' | grep -v Running`" ]]; do
echo "Longhorn tests still running ... rechecking in 5m"
sleep 5m
done
kubectl logs ${LONGHORN_TEST_POD_NAME} >> "${TF_VAR_tf_workspace}/longhorn-test-junit-report.xml"
}
main(){
set_kubeconfig_envvar ${TF_VAR_arch} ${TF_VAR_tf_workspace}
create_longhorn_namespace
install_backupstores
# set debugging mode off to avoid leaking aws secrets to the logs.
# DON'T REMOVE!
set +x
create_aws_secret
set -x
install_csi_snapshotter_crds
generate_longhorn_yaml_manifest "${TF_VAR_tf_workspace}"
if [[ "${LONGHORN_UPGRADE_TEST}" == true || "${LONGHORN_UPGRADE_TEST}" == True ]]; then
install_longhorn_stable
run_longhorn_upgrade_test ${WORKSPACE}
run_longhorn_tests ${WORKSPACE}
else
install_longhorn_master "${TF_VAR_tf_workspace}/longhorn.yaml"
run_longhorn_tests ${WORKSPACE}
fi
}
main
| true
|
01cea7436d7cdeba075d3256115d658d3d082461
|
Shell
|
Frogdong/kp1
|
/star/star_alignment_kp1.sh
|
UTF-8
| 771
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
#LF
#STAR_2.4.0j
set -o nounset -o pipefail
# read command line arguments:
read1=$1
read2=$2
prefix=$3
INDEX_DIRECTORY=$4 #~/projects/mapping/star/star_index/Mus_musculus.GRCm38.92.vM17.overhang74.index
cmd=\
"STAR \
--genomeDir $INDEX_DIRECTORY \
--readFilesIn $read1 $read2 \
--outFileNamePrefix ${prefix}. \
--readFilesCommand zcat \
--outSAMattributes NH HI AS nM NM MD jM jI\
--outFilterType BySJout \
--runThreadN 8 \
--outBAMsortingThreadN 8 \
--outSAMtype BAM SortedByCoordinate \
--outSAMunmapped Within \
--quantMode TranscriptomeSAM\
--genomeLoad LoadAndKeep \
--limitBAMsortRAM 15000000000 \
--chimSegmentMin 20 \
--outSAMattrRGline ID:${prefix} SM:${1}_${2} CN:kevin_smith LB:kp1_axelrod"
date
echo $cmd
$cmd
date
| true
|
ee5fcaa250a69744b20bc07d6915f7336e5403b4
|
Shell
|
goodnewday/UsefulTools
|
/MemAndCPUMonitor/format_data
|
UTF-8
| 452
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
input_file_name=$1
output_file_name=$2
grep "VmSize" $input_file_name > VmSize.txt
grep "VmRSS" $input_file_name > VmRSS.txt
grep "Cpu" $input_file_name > Cpu.txt
awk '{
if(NR==FNR)
{a[FNR]=$2;}
else
{a[FNR]=a[FNR]"\t"$2;}
}
END {print "" >"'"$output_file_name"'"; len=length(a); for (i = 1; i < len; i++) { print a[i]>>"'"$output_file_name"'"}}' VmSize.txt VmRSS.txt Cpu.txt
rm -f VmSize.txt VmRSS.txt Cpu.txt
| true
|
bb33e73f2afb9fe7e97ee0b7883d5fc3de6cc0ac
|
Shell
|
fabriziogaliano/docker-ansible-server
|
/ansible_conf/aws/ec2_script_boot
|
UTF-8
| 589
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
#Creation of Ansible User
sudo adduser --quiet --home /home/ansible --shell /bin/bash --disabled-password ansible
sudo mkdir -p /home/ansible/.ssh
sudo chmod 700 /home/ansible/.ssh
#Make Ansible User Sudoers
sudo usermod -aG sudo ansible
sudo echo "ansible ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/000-ansible
#RSA Ansible key
sudo echo "YOUR RSA PUB KEY HERE" >> /home/ansible/.ssh/authorized_keys
sudo chmod 600 /home/ansible/.ssh/authorized_keys
sudo chown ansible.ansible /home/ansible/ -R
#Install prerequisites tools
sudo apt update && apt install python awscli -y
| true
|
4fd5caa9570eb531298065ad4f2cc824b3f85786
|
Shell
|
jvandyke/yui-assetcompressor
|
/scripts/assetcompressor.sh
|
UTF-8
| 416
| 3.140625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
# Find out where we're starting
baseDir=$(dirname $_)
baseDir="${baseDir}/.."
buildDir="${baseDir}/build"
yuiBootClassPath="${baseDir}/lib/yuicompressor-2.4.2.jar"
args="$*"
# Options
minSuffix="-min";
# Command used on both JS and CSS files to minify content
minifyCommand="java -Xbootclasspath/p:${yuiBootClassPath} -jar ${baseDir}/assetcompressor.jar --suffix ${minSuffix}"
${minifyCommand} ${args}
| true
|
fbda33ac52846bb396c94f9c11460f85ec6278d5
|
Shell
|
JagwarWest/lab-ebpf-based-tracing-for-packer-analysis-public
|
/misc/install_open_gapps_pico.sh
|
UTF-8
| 3,993
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/sh
adb root
sleep 1
adb shell mount -o rw,remount /
mkdir tmp/
if [ -f "open_gapps-x86_64-7.1-pico-20201218.zip" ]
then
echo "File open_gapps-x86_64-7.1-pico-20201218.zip already downloaded!"
else
echo "Downloading houdini64.sfs"
curl "https://uni-bonn.sciebo.de/s/s2z65Xs8eN4DHQc/download?path=%2F&files=open_gapps-x86_64-7.1-pico-20201218.zip" -o open_gapps-x86_64-7.1-pico-20201218.zip
fi
cp open_gapps-x86_64-7.1-pico-20201218.zip tmp/
unzip -qq open_gapps-x86_64-7.1-pico-20201218.zip "Core/*" -d tmp/
tar --lzip -xf tmp/Core/defaultetc-common.tar.lz -p --directory tmp/
for dir in default-permissions preferred-apps sysconfig;
do
adb push tmp/defaultetc-common/common/etc/$dir /system/etc/
adb shell chown -R root /system/etc/$dir/
adb shell chgrp -R root /system/etc/$dir/
adb shell chmod og-x /system/etc/$dir/*
done
tar --lzip -xf tmp/Core/defaultframework-common.tar.lz -p --directory tmp/
for dir in com.google.android.maps.xml com.google.android.media.effects.xml;
do
adb push tmp/defaultframework-common/common/etc/permissions/$dir /system/etc/permissions/
adb shell chown -R root /system/etc/permissions/$dir
adb shell chgrp -R root /system/etc/permissions/$dir
adb shell chmod og-x /system/etc/permissions/$dir
done
for dir in com.google.android.maps.jar com.google.android.media.effects.jar;
do
adb push tmp/defaultframework-common/common/framework/$dir /system/framework/
adb shell chown -R root /system/framework/$dir
adb shell chgrp -R root /system/framework/$dir
adb shell chmod og-x /system/framework/$dir
done
tar --lzip -xf tmp/Core/googlepixelconfig-common.tar.lz -p --directory tmp/
adb push tmp/googlepixelconfig-common/common/etc/sysconfig/nexus.xml /system/etc/sysconfig/
adb shell chown -R root /system/etc/sysconfig/nexus.xml
adb shell chgrp -R root /system/etc/sysconfig/nexus.xml
adb shell chmod og-x /system/etc/sysconfig/nexus.xml
for dir in gsfcore-all.tar.lz gmscore-x86_64.tar.lz vending-x86_64.tar.lz extservicesgoogle-all.tar.lz configupdater-all.tar.lz extsharedgoogle-all.tar.lz googlebackuptransport-all.tar.lz googlecontactssync-all.tar.lz googlefeedback-all.tar.lz googleonetimeinitializer-all.tar.lz googlepartnersetup-all.tar.lz gsflogin-all.tar.lz setupwizarddefault-all.tar.lz
do
tar --lzip -xf tmp/Core/$dir -p --directory tmp/
done
adb push tmp/gsfcore-all/nodpi/priv-app/GoogleServicesFramework /system/priv-app/
adb push tmp/gmscore-x86_64/nodpi/priv-app/PrebuiltGmsCore /system/priv-app/
adb push tmp/vending-x86_64/nodpi/priv-app/Phonesky /system/priv-app/
# those services might not be strictly needed
# comment them out if you don't want them
adb push tmp/extservicesgoogle-all/nodpi/priv-app/GoogleExtServices /system/priv-app/
adb push tmp/configupdater-all/nodpi/priv-app/ConfigUpdater /system/priv-app/
adb push tmp/extsharedgoogle-all/nodpi/app/GoogleExtShared /system/priv-app/
adb push tmp/googlebackuptransport-all/nodpi/priv-app/GoogleBackupTransport /system/priv-app/
adb push tmp/googlecontactssync-all/nodpi/app/GoogleContactsSyncAdapter /system/priv-app/
adb push tmp/googlefeedback-all/nodpi/priv-app/GoogleFeedback /system/priv-app/
adb push tmp/googleonetimeinitializer-all/nodpi/priv-app/GoogleOneTimeInitializer /system/priv-app/
adb push tmp/googlepartnersetup-all/nodpi/priv-app/GooglePartnerSetup /system/priv-app/
adb push tmp/gsflogin-all/nodpi/priv-app/GoogleLoginService /system/priv-app/
adb push tmp/setupwizarddefault-all/nodpi/priv-app/SetupWizard /system/priv-app/
for dir in GoogleServicesFramework PrebuiltGmsCore Phonesky GoogleExtServices ConfigUpdater GoogleLoginService GooglePartnerSetup GoogleOneTimeInitializer GoogleFeedback GoogleExtShared GoogleBackupTransport GoogleContactsSyncAdapter SetupWizard
do
adb shell chown -R root /system/priv-app/$dir/
adb shell chgrp -R root /system/priv-app/$dir/
adb shell chmod og-x /system/priv-app/$dir/*
done
adb shell mount -o ro,remount /
rm -r tmp/
| true
|
358b843597ec2f94e0ab8abe4586509bf482faf9
|
Shell
|
NASA-PDS/roundup-action
|
/support/run-roundup.sh
|
UTF-8
| 4,488
| 3.65625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
#
#
# Convenience script to run the Roundup outside of Docker or GitHub Action.
# This assumes that the `roundup` executable is on your PATH. Run this inside of a
# checked-out project directory, whether it's Python or Java.
#
# Typical usage:
#
# cd roundup-action
# python3 -m venv venv
# venv/bin/pip install --quiet --upgrade pip setuptools wheel
# venv/bin/pip install --editable .
# export PATH=${PWD}/venv/bin:${PWD}/support:${PATH}
# cd ../some-package-i-want-to-roundup
# run-roundup.sh unstable preparation,unitTest
#
# If you don't give any steps (like `preparation,unitTest`) you get a default
# set. HOWEVER, only skip steps if you know what you're doing! For example, in
# Python repositories, the `preparation`` step sets the PATH to include the
# venv against which the project's being roudned up. Skip that and later steps
# which rely on that environment may fail.
#
# Note: you'll need a ~/.secrets/github-roundup.token containing a GitHub API
# token with the scopes repo:status, repo_deployment, and public_repo. You'll
# also need a ~/.secrets/roundup.sh file with Bourne shell statements that
# export your PyPI and Sonatype OSSRTH usernames and passwords, such as:
#
# export pypi_username=USERNAME
# export pypi_password=PASSWORD
# export ossrh_username=USERNAME
# export ossrh_password=PASSWORD
#
# You'll also need these on your PATH:
#
# gem install --silent github_changelog_generator --version 1.16.4
# pip install --quiet sphinx==3.2.1 sphinx-argparse==0.2.5 sphinx-rtd-theme==0.5.0 twine==3.4.2
#
# i.e., the executables `github_changelog_generator` and `sphinx-build`
# with `sphinx_rtd_theme` enabled.
#
# You'll also need the `deploy.sh` script:
#
# curl --location 'https://github.com/X1011/git-directory-deploy/raw/master/deploy.sh' > $HOME/bin/deploy.sh
# chmod 755 $HOME/bin/deploy.sh
#
# Then add `~/bin` to your PATH.
# Constantly
defaultSteps="preparation,unitTest,integrationTest,changeLog,requirements,docs,versionBump,build,githubRelease,artifactPublication,docPublication,cleanup"
# Check args
if [ "$#" -lt 2 -o "$#" -gt 3 ]; then
echo "Usage `basename $0` {stable|unstable} OWNER/REPO [step,step,…]" 1>&2
echo "Where OWNER = GitHub owner or organization and REPO = repository name" 1>&2
echo "Default steps are: $defaultSteps" 1>&2
exit 1
fi
if [ "$1" != "stable" -a "$1" != "unstable" ]; then
echo "First argument must be 'stable' or 'unstable'" 1>&2
exit 1
fi
# Check for required files
if [ ! -f "${HOME}/.secrets/github-roundup.token" ]; then
echo "No github-roundup.token file found; aborting" 1>&2
exit 1
fi
if [ ! -f "${HOME}/.secrets/roundup.sh" ]; then
echo "No roundup.sh found; aborting" 1>&2
exit 1
fi
. ${HOME}/.secrets/roundup.sh
# Check required env vars
if [ -z "${pypi_username} " -o -z "${pypi_password}" ]; then
echo "The pypi_username and pypi_password must be set in ~/.secrets/roundup.sh, always" 1>&2
exit 1
fi
if [ -z "${ossrh_username}" -o -z "${ossrh_password}" ]; then
echo "The ossrh_username and ossrh_password mut be set in ~/.secrets/roundup.sh, always" 1>&2
fi
# Set additional env vars
[ "$1" == "stable" ] && stable=true || stable=false
export ADMIN_GITHUB_TOKEN=`cat ${HOME}/.secrets/github-roundup.token`
export GITHUB_TOKEN=$ADMIN_GITHUB_TOKEN
export ROUNDUP_STABLE="$stable"
export ROUNDUP_STEPS=${3:-$defaultSteps}
export GITHUB_REPOSITORY=$2
export GITHUB_WORKSPACE=${PWD}
# Yes GITHUB_ACTIONS should be true to fully simulate things, but we specifically want to check if we need
# to update credentials in /root (GitHub Actions) or $HOME (everywhere else)
export GITHUB_ACTIONS=false
export CI=true
# These are set by GitHub Actions but are otherwise not needed by Roundup…YET.
export ACTIONS_CACHE_URL=''
export ACTIONS_RUNTIME_TOKEN=''
export ACTIONS_RUNTIME_URL=''
export GITHUB_ACTION=''
export GITHUB_ACTOR=''
export GITHUB_API_URL=''
export GITHUB_BASE_REF=''
export GITHUB_ENV=''
export GITHUB_EVENT_NAME=''
export GITHUB_EVENT_PATH=''
export GITHUB_GRAPHQL_URL=''
export GITHUB_HEAD_REF=''
export GITHUB_JOB=''
export GITHUB_PATH=''
export GITHUB_REF=''
export GITHUB_REPOSITORY_OWNER=''
export GITHUB_RUN_ID=''
export GITHUB_RUN_NUMBER=''
export GITHUB_SERVER_URL=''
export GITHUB_SHA=''
export GITHUB_WORKFLOW=''
export INPUT_MODE=''
export RUNNER_OS=''
export RUNNER_TEMP=''
export RUNNER_TOOL_CACHE=''
export RUNNER_WORKSPACE=''
# Do it (assuming "roundup" is on the PATH)
exec roundup --debug --assembly env
| true
|
697fb53f70b05ae070bf142864d98ef7a4fb7767
|
Shell
|
hooray1998/dotfiles
|
/shell/prompt.sh
|
UTF-8
| 407
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
export RPROMPT="%F{red}%(?..%?)%f"
if [ -n "$BASH_VERSION" ]; then
export PS1='\[\e[38;5;135m\]加油呀\u\[\e[0m\] \[\e[38;5;118m\]\w\[\e[0m\] > '
else
if [ "$UID" -eq 0 ]; then
export PROMPT="%F{135}%n%f@%F{166}%m%f %F{118}%~%f %# "
else
export PROMPT="%F{045}加油呀%n%f %F{118}%c%f > "
#export PROMPT="%F{135}%n%f@%F{166}%m%f %F{118}%~%f \$ "
fi
fi
| true
|
6dd8f48e7a8a03e48f1fbf6ee0fdb844ef5db1f7
|
Shell
|
vincent-zurczak/roboconf-docker-compliant-lamp
|
/src/main/model/graph/Tomcat/scripts/deploy.sh
|
UTF-8
| 1,041
| 2.828125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Install required packages
apt-get update
apt-get -y install wget
apt-get -y install unzip
apt-get -y install default-jre-headless
# Download and unzip Tomcat
cd ~
wget http://central.maven.org/maven2/org/apache/tomcat/tomcat/7.0.64/tomcat-7.0.64.zip
unzip tomcat-7.0.64.zip
mv apache-tomcat-7.0.64 tomcat
chmod 775 tomcat/bin/*.sh
# Update the configuration files
cp $ROBOCONF_FILES_DIR/server.xml ~/tomcat/conf/server.xml
sed -i "s/_ajpPort_/$ajpPort/g" ~/tomcat/conf/server.xml
sed -i "s/_serverPort_/$serverPort/g" ~/tomcat/conf/server.xml
sed -i "s/_httpPort_/$httpPort/g" ~/tomcat/conf/server.xml
# Update the welcome page
cat << EOF > ~/tomcat/webapps/ROOT/index.html
<?xml version="1.0" encoding="ISO-8859-1"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<title>Apache Tomcat</title>
</head>
<body>
<h1>It works !</h1>
<p>From Tomcat at: $ip</p>
</body>
</html>
EOF
| true
|
4c50d170fa1aaf13bf5516edb9750877c19a1b76
|
Shell
|
johnycao988/cloud-deployment
|
/01-env-setup/01-git.install.sh
|
UTF-8
| 462
| 2.75
| 3
|
[] |
no_license
|
## Master server ,first need to run "chmod +x *.sh" in command line
#!/bin/sh
### step1: install git
echo "Installing git..."
yum install git
### step2: config
echo "Configing git..."
git config --global user.name "Johnny Cao"
git config --global user.email "johnnycao@chinasystems.com"
## No password for git commit
git config --global credential.helper store
## Not commit after change file access.
git config core.fileMode false
echo "Successfully install and config git."
| true
|
e846e60373966edfdb8d8fa2ca5d6198aafb24cf
|
Shell
|
HellenicMilsim/serverconfig
|
/jobs/server-restart-job.sh
|
UTF-8
| 793
| 3.65625
| 4
|
[
"MIT"
] |
permissive
|
#! /usr/bin/env bash
# This script force-restarts the server and moves old missions out of the way
echo "${TS} - Starting job: server-restart"
source "job-config.sh"
cd "~"
# Raise maintenance flag and kill the server
touch $MAINTENANCE_FILE
server_stop($1)
age_threshold=$(date -d 'now - $MISSION_MAX_AGE_DAYS days' +%s)
counter=0
for file in `ls $MISSIONS_FOLDER/$MISSION_KEEP_REGEX`
do
file_age=$(date -r "$file" +%s)
# Skip files newer than specified threshold
[ (( file_age <= age_threshold )) ]; && continue
echo "${TS} - Moving mission file: $file"
mv "$MISSIONS_FOLDER/$file" "$OLD_MISSIONS_FOLDER"
$((counter+1))
done
echo "${TS} - Moved files: $counter"
echo "${TS} - End job: server-restart"
# Do NOT start server at the end. The keepalive job should kick in shortly
| true
|
e7064484340496c990e166ca1700853fdfff417f
|
Shell
|
sc2xos/letkf
|
/roms/letkf/run/init.sh
|
UTF-8
| 1,872
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/sh
#=======================================================================
# init.sh
# This script prepares for new LETKF cycle-run experiment
#=======================================================================
set -e
#-----------------------------------------------------------------------
# Modify below according to your environment
#-----------------------------------------------------------------------
MEMBER=20
### directory settings
CDIR=`pwd`
cd ../..
ROMS=`pwd`
OBS=5x20_new
EXP=LETKF20H50V050
NATURE=$ROMS/DATA/nature # nature run
SPINUP=$ROMS/DATA/spinup # spin-up run
OUTPUT=$ROMS/DATA/$OBS/$EXP # directory for new experiment
### initial time setting
IT=2004010100
#-----------------------------------------------------------------------
# Usually do not modify below
#-----------------------------------------------------------------------
cd $CDIR
### clean
rm -rf $OUTPUT
### mkdir
mkdir -p $OUTPUT/log
MEM=1
while test $MEM -le $MEMBER
do
if test $MEM -lt 100
then
MEM=0$MEM
fi
if test $MEM -lt 10
then
MEM=0$MEM
fi
mkdir -p $OUTPUT/anal/$MEM
mkdir -p $OUTPUT/gues/$MEM
MEM=`expr $MEM + 1`
done
for MEM in mean sprd
do
mkdir -p $OUTPUT/anal/$MEM
mkdir -p $OUTPUT/gues/$MEM
done
mkdir -p $OUTPUT/infl_mul
### copy initial conditions
Y=0
MEM=0
while test $Y -le 3
do
I=1
if test $Y -eq 0
then
I=4
fi
while test $I -le 6
do
MEM=`expr $MEM + 1`
if test $MEM -gt $MEMBER
then
break
fi
if test $MEM -lt 100
then
MEM=0$MEM
fi
if test $MEM -lt 10
then
MEM=0$MEM
fi
echo "copying.. rst0$Y.$I.nc --> member $MEM"
cp $SPINUP/rst0$Y.$I.nc $OUTPUT/gues/$MEM/${IT}_rst.nc
ln -s $NATURE/${IT}_rst.nc in.nc
ln -s $OUTPUT/gues/$MEM/${IT}_rst.nc out.nc
$ROMS/ncio/choceantime
rm in.nc
rm out.nc
I=`expr $I + 1`
done
if test $MEM -gt $MEMBER
then
break
fi
Y=`expr $Y + 1`
done
cp $NATURE/../infl_mul.nc $OUTPUT/infl_mul/${IT}_rst.nc
echo "NORMAL END"
| true
|
f3c6e852925d6dab5aefa744df918d3434087f0d
|
Shell
|
epfl-vlsc/persona-system
|
/compile.sh
|
UTF-8
| 1,277
| 3.34375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# unofficial "bash strict mode"
# http://redsymbol.net/articles/unofficial-bash-strict-mode/
# Commented out 'u' because the `activate` script has some unassignment issues
set -u
set -eo pipefail
IFS=$'\n\t'
build_type="opt"
DIR=$(dirname $(realpath $0))
process_args() {
case "$#" in
0)
;;
1)
build_type="$1"
;;
*)
echo "Script only accepts 0 or 1 argument!"
exit 2
;;
esac
}
process_args "$@"
extra_opts=""
if [ $build_type == "opt" ]; then
extra_opts="--copt -O3 --copt=-msse4.1 --copt=-msse4.2"
elif [ $build_type == "vtune" ]; then
build_type="opt"
extra_opts="--copt -g --copt -O3 --copt=-msse4.1 --copt=-msse4.2"
elif [ $build_type = "perf" ]; then
build_type="dbg"
extra_copts="--copt -pg --copt=-msse4.1 --copt=-msse4.2"
fi
# this is the option to build with the old ABI
# don't use because it'll cause linking issues with old libraries :/
# extra_opts="${extra_opts} --cxxopt -D_GLIBCXX_USE_CXX11_ABI=0"
echo "Building configuration $build_type"
#max_build_threads=$(bc <<< "scale=0; ($(nproc) * 1.1) / 1" )
max_build_threads=$(nproc)
set +u
eval "bazel build $extra_opts -j $max_build_threads -c $build_type //tensorflow/tools/pip_package:build_pip_package"
| true
|
bff5809b6cb167b0d4f611db4833bd9ae9d44f1d
|
Shell
|
petronny/aur3-mirror
|
/craft-fogleman-git/PKGBUILD
|
UTF-8
| 1,590
| 2.703125
| 3
|
[] |
no_license
|
# Maintainer: Martin C. Doege <mdoege at compuserve dot com>
pkgname=craft-fogleman-git
_gitname=Craft
pkgver=736.fe3e7b3
pkgrel=6
pkgdesc="A Minecraft clone in C and Python with multiplayer support"
arch=('i686' 'x86_64')
url="http://www.michaelfogleman.com/craft/"
license=('MIT')
depends=('glew' 'curl' 'python2-requests')
makedepends=('git' 'cmake')
source=('git://github.com/fogleman/Craft.git'
'craft.desktop'
'craft_icon.png'
'craft'
'craft-server'
)
md5sums=('SKIP'
'3f9ad8e597f9654d9db3beeb53051ebd'
'3b070adce3e98082a87e4c2bc50515d5'
'3d89922150d69586aa8d6e021c9ab154'
'4682aa6fe1d5592b500236f35fe9186c')
pkgver() {
cd $_gitname
echo $(git rev-list --count HEAD).$(git rev-parse --short HEAD)
}
build() {
cd $_gitname
cmake .
make
gcc -std=c99 -O3 -shared -o world -I src -fPIC -I deps/noise deps/noise/noise.c src/world.c # needed for Python server
}
package() {
cd $_gitname
install -Dm755 craft "$pkgdir/usr/bin/craft-bin"
install -Dm755 $srcdir/craft "$pkgdir/usr/bin/craft"
install -Dm755 $srcdir/craft-server "$pkgdir/usr/bin/craft-server"
install -Dm644 LICENSE.md "$pkgdir/usr/share/licenses/craft/LICENSE"
install -Dm644 README.md "$pkgdir/usr/share/doc/craft/README"
mkdir -p "$pkgdir/usr/share/craft"
cp -pr shaders textures "$pkgdir/usr/share/craft/"
cp -p world *.py "$pkgdir/usr/share/craft/"
install -Dm644 $srcdir/craft_icon.png $pkgdir/usr/share/icons/craft_icon.png
install -Dm644 $srcdir/craft.desktop $pkgdir/usr/share/applications/craft.desktop
}
| true
|
88134cc26e7d6b133fb59fdb53e5d988fc1d1ab3
|
Shell
|
devopstoday11/consensas-ansible
|
/k8s/Kubernetes-KubeConfig.sh
|
UTF-8
| 684
| 3.0625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#
# ansible/Kubernetes-KubeConfig.sh
#
# David Janes
# Consensas
# 2018-12-23
#
FOLDER=$(dirname $0)
ANSIBLE_HOSTS=$1
ANSIBLE_HOSTS=${ANSIBLE_HOSTS:=master}
ANSIBLE_INVENTORY=$FOLDER/../inventory.yaml
export ANSIBLE_HOST_KEY_CHECKING=False
set -x
ansible-playbook \
--inventory "${ANSIBLE_INVENTORY}" \
--verbose \
--extra-vars "in_hosts=${ANSIBLE_HOSTS}" \
$FOLDER/Kubernetes-KubeConfig.yaml
## admin.conf wants to give you an internal AWS IP
sed -e '1,$ s|server:.*6443|server: https://aws-0001:6443|' < $FOLDER/admin.conf > $FOLDER/admin.conf.tmp &&
mv $FOLDER/admin.conf.tmp $FOLDER/admin.conf
echo "---"
kubectl --kubeconfig $FOLDER/admin.conf get nodes
| true
|
e235487b0a1e54803724029ec35deae31ba13729
|
Shell
|
rbmarliere/stakemine.contracts
|
/build.sh
|
UTF-8
| 133
| 2.609375
| 3
|
[
"MIT"
] |
permissive
|
#! /bin/bash
CORES=`getconf _NPROCESSORS_ONLN`
mkdir -p build
pushd build &> /dev/null
cmake ../
make -j${CORES}
popd &> /dev/null
| true
|
890f0cd949b0f621979c005820b9292f42bf5c67
|
Shell
|
ksanjeev/Scripts
|
/Kamal_Sunquest_Backups/shell scripting/sources/app_a/exercise_05_02
|
UTF-8
| 547
| 4.1875
| 4
|
[] |
no_license
|
#! /bin/sh
Then, mark the script with execute permissions.
$ chmod a+x lockdown
The full script then appears as follows:
#!/bin/sh
# Locks down file permissions.
for filename in *
do
# Initialize all permissions.
r=""
w=""
x=""
# Check to preserve existing permissions.
if [ -r $filename ]
then
r="r"
fi
if [ -w $filename ]
then
w="w"
fi
if [ -x $filename ]
then
x="x"
fi
# Lock down the file permissions.
chmod u+$r$w$x,g-rwx,o-rwx $filename
done
| true
|
5a89c8d0c54cf4f5b8a7e242d5ccbc546a237965
|
Shell
|
ktsitsikas-signal/SignalSDK
|
/release.sh
|
UTF-8
| 860
| 3.609375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -e
while [[ "$#" -gt 0 ]]
do
case $1 in
--repository-url)
repository_url="$2"
shift 2
;;
--username)
username="$2"
shift 2
;;
--password)
password="$2"
shift 2
;;
*)
shift
;;
esac
done
if [[ ! $repository_url ]]
then
echo 'Repository URL was not provided.'
exit 1
fi
if [[ ! $username ]]
then
echo 'Username was not provided.'
exit 1
fi
if [[ ! $password ]]
then
echo 'Password was not provided.'
exit 1
fi
if [[ ! -f ./env/bin/activate ]]
then
echo 'Virtual environment does not exist. Run build.sh to create it.'
exit 1
fi
source ./env/bin/activate
twine upload --repository-url "$repository_url" -u "$username" -p "$password" ./dist/*
| true
|
3b12047601db69aa87fd48f9aa9883d7f6c40275
|
Shell
|
bayvictor/distributed-polling-system
|
/bin/kill_ps_has_keyword.sh
|
UTF-8
| 494
| 2.984375
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
echo "usage:$0 <kill_keyword_in_ps_ef>"
echo ".e.g.: $0 firefox"
echo "^C to break, anykey to continue...";read readline
echo "below ps -ef results will be killing...";read readline2
ps -ef | grep $1
echo "^C to break, anykey to continue...";read readline
#ps -ef|grep wget |tr '\b' ' '|cut -d" " -f4-9|sed 's/^ *//g'|cut -d" " -f1|sed 's/^/kill -9 /g'
ps -ef|grep $1|tr '\b' ' '|cut -d" " -f4-9 | sed 's/^ *//g' | cut -d" " -f1|sed 's/^/kill -9 /g' > ./run.sh
chmod +x *.sh
./run.sh
| true
|
72008c1f388520a4ea62a4e16d4028936886b3e0
|
Shell
|
AminTaheri1/uwc3x
|
/addons/amxmodx/scripting/compile.sh
|
UTF-8
| 491
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
# AMX Mod X
#
# by the AMX Mod X Development Team
# originally developed by OLO
#
# This file is part of AMX Mod X.
# new code contributed by \malex\
test -e compiled || mkdir compiled
rm -f temp.txt
for sourcefile in *.sma
do
amxxfile="`echo $sourcefile | sed -e 's/\.sma$/.amxx/'`"
echo -n "Compiling $sourcefile ..."
./amxxpc $sourcefile -ocompiled/$amxxfile >> temp.txt
echo "done"
done
less temp.txt
rm temp.txt
| true
|
e972556b17f30e492c8ea243733f83c79dae4904
|
Shell
|
t-ubukata/dotfiles
|
/.zshrc
|
UTF-8
| 2,734
| 2.546875
| 3
|
[] |
no_license
|
if [[ ! "$PATH" == *"$HOME"/.local/bin* ]]; then
export PATH="$HOME/.local/bin${PATH:+:${PATH}}"
fi
export LESSCHARSET=utf-8
export VISUAL=vi
export EDITOR=vi
export LESSEDIT="vi %f"
export FZF_DEFAULT_COMMAND=fd
HISTFILE=~/.zsh_history
HISTSIZE=100000
SAVEHIST=100000
bindkey -e
autoload -Uz colors
colors
autoload -Uz compinit
compinit
autoload -Uz promptinit
promptinit
prompt off
setopt auto_pushd
setopt pushd_ignore_dups
setopt nonomatch
setopt histignorealldups sharehistory
unsetopt promptcr
stty stop undef
stty start undef
zstyle :compinstall filename
zstyle ':completion:*' auto-description 'specify: %d'
zstyle ':completion:*' completer _expand _complete _correct _approximate
zstyle ':completion:*' format 'Completing %d'
zstyle ':completion:*' group-name ''
zstyle ':completion:*' menu select=2
zstyle ':completion:*:default' list-colors ${(s.:.)LS_COLORS}
zstyle ':completion:*' list-colors ''
zstyle ':completion:*' list-prompt %SAt %p: Hit TAB for more, or the character to insert%s
zstyle ':completion:*' matcher-list '' 'm:{a-z}={A-Z}' 'm:{a-zA-Z}={A-Za-z}' 'r:|[._-]=* r:|=* l:|=*'
zstyle ':completion:*' menu select=long
zstyle ':completion:*' select-prompt %SScrolling active: current selection at %p%s
zstyle ':completion:*' use-compctl false
zstyle ':completion:*' verbose true
zstyle ':completion:*:*:kill:*:processes' list-colors '=(#b) #([0-9]#)*=0=01;31'
zstyle ':completion:*:kill:*' command 'ps -u $USER -o pid,%cpu,tty,cputime,cmd'
alias grep='grep --color=auto'
alias gr='grep -RIn --color=auto'
alias py='python3'
alias tm='tmux -2u'
alias gs='git status'
alias gb='git branch'
alias gco='git checkout'
alias gd='git diff'
alias ga='git add'
alias gcm='git commit'
alias gf='git fetch'
alias gpl='git pull'
alias gps='git push'
alias gl='git log'
alias gdb="gdb -q"
case "$OSTYPE" in
darwin*)
export LANG=en_US.UTF-8
alias l='ls -ahlFG'
# For fzf-tmux
if [[ ! "$PATH" == */usr/local/opt/fzf/bin* ]]; then
export PATH="${PATH:+${PATH}:}/usr/local/opt/fzf/bin"
fi
# Source iff interactive
[[ $- == *i* ]] && source "/usr/local/opt/fzf/shell/completion.zsh" 2> /dev/null
source "/usr/local/opt/fzf/shell/key-bindings.zsh"
;;
linux*)
export LANG=C.UTF-8
export DEBIAN_FRONTEND=noninteractive
alias l='ls -ahlF --color=auto'
alias fd='fdfind'
eval "$(dircolors -b)"
# For fzf-tmux
if [[ ! "$PATH" == *"$HOME"/project/dotfiles/.vim/pack/mypackage/start/fzf/bin* ]]; then
export PATH="${PATH:+${PATH}:}$HOME/.vim/pack/mypackage/start/fzf/bin"
fi
# Source iff interactive
[[ $- == *i* ]] && source "$HOME/.vim/pack/mypackage/start/fzf/shell/completion.zsh" 2> /dev/null
source "$HOME/.vim/pack/mypackage/start/fzf/shell/key-bindings.zsh"
;;
esac
| true
|
8803f101047db9e7ce36f51bff3f905dcc7851cc
|
Shell
|
fsinfuhh/mafiasi
|
/scripts/start_db.sh
|
UTF-8
| 304
| 2.59375
| 3
|
[] |
no_license
|
#!/usr/bin/env sh
D=$(realpath $(dirname $0)/..)
mkdir -p "$D/db-data"
exec podman run \
--rm \
-e POSTGRES_DB=mafiasi-dashboard \
-e POSTGRES_USER=mafiasi-dashboard \
-e POSTGRES_PASSWORD=mafiasi-dashboard \
-v "$D/db-data:/var/lib/postgresql/data" \
-p 5432:5432 \
$@ \
docker.io/postgres:15
| true
|
6ce47cff7ea19cffeba09586fbe6b919d843f7ab
|
Shell
|
count0ru/redis-cluster
|
/create_cluster.sh
|
UTF-8
| 684
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
#install redis tools (for deb based linux)
apt-get update && apt-get install ruby wget redis-tools -y
#download and install redis cluster tool
wget http://download.redis.io/redis-stable/src/redis-trib.rb -O ./redis-trib.rb
chmod +x redis-trib.rb
gem install redis
./redis-trib.rb create --replicas 1 $(echo $(for i in {1..6}; do docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' rediscluster_node-$i\_1; done | sed ':a;N;$!ba;s/\n/:6379 /g'):6379)
echo -e "\n \t \e[32m \033[1m ALL DONE! \033[0m \n"
echo 'Please, run:'
echo '`redis-cli -c -h <ANY YOUR DOCKER CLUSTER IP> CLUSTER INFO`'
echo 'to read information about the assembled cluster'
| true
|
11d8b91bce4fc86aef5164d9e1c5bc9b7ba8ec62
|
Shell
|
OceanMixingGroup/chipod_gust
|
/driver/monitorMatlab.sh
|
UTF-8
| 502
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/bash
if [[ $(hostname) == matlab* ]]
then
watch -n 5 "echo 'matlab1';echo; ssh matlab1 'ps aux | grep matlab | grep -v grep';echo;echo 'matlab2';echo;ssh matlab2 'ps aux | grep matlab | grep -v grep';echo; echo 'matlab3';echo;ssh matlab3 'ps aux | grep matlab | grep -v grep';echo;echo 'matlab4';echo;ssh matlab4 'ps aux | grep matlab | grep -v grep';echo; echo 'matlab5';echo;ssh matlab5 'ps aux | grep matlab | grep -v grep ';"
else
watch -n 5 "ps aux | grep matlab | grep -v grep"
fi
| true
|
6ce2007d8ad0d4888345226cda06986efd6a0c63
|
Shell
|
plus3it/LxEBSbackups
|
/AutoTag.sh
|
UTF-8
| 4,279
| 3.703125
| 4
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# shellcheck disable=SC1090,SC2207
#
# This script is designed to make it easy to capture useful
# information about attached EBS volumes and save that
# information to the volumes' tag-sets
#
#################################################################
PVS=/sbin/pvs
INSTANCEMETADOC="http://169.254.169.254/latest/dynamic/instance-identity/document/"
INSTANCEMETADAT="$( curl -sL ${INSTANCEMETADOC} )"
INSTANCEID="$( echo "${INSTANCEMETADAT}" | \
python -c 'import json,sys;
obj=json.load(sys.stdin);print obj["instanceId"]'
)"
AWS_DEFAULT_REGION="$( echo "${INSTANCEMETADAT}" | \
python -c 'import json,sys;
obj=json.load(sys.stdin);print obj["region"]'
)"
# Export critical values so sub-shells can use them
export AWS_DEFAULT_REGION
# Lets force the use of credentials from attached IAM Instance-role
source "${PROGDIR}/setcred.sh"
# Check your privilege...
function AmRoot {
if [[ $(whoami) = root ]]
then
logIt "Running with privileges" 0
else
logIt "Insufficient privileges. Aborting..." 1
fi
}
# Got LVM?
function CkHaveLVM {
local HAVELVM
if [[ $(rpm --quiet -q lvm2)$? -eq 0 ]] && [[ -x ${PVS} ]]
then
HAVELVM=TRUE
else
HAVELVM=FALSE
fi
echo ${HAVELVM}
}
# Return list of attached EBS Volume-IDs
function GetAWSvolIds {
local VOLIDS
VOLIDS=(
$( aws ec2 describe-instances --instance-id="${INSTANCEID}" --query \
"Reservations[].Instances[].BlockDeviceMappings[].Ebs[].VolumeId" \
--out text
)
)
echo "${VOLIDS[@]}"
}
# Map attached EBS Volume-ID to sdX device-node
function MapVolIdToDsk {
local VOLID
local DEVMAP
VOLID="${1}"
DEVMAP=$( aws ec2 describe-volumes --volume-id="${VOLID}" \
--query="Volumes[].Attachments[].Device[]" --out text | \
sed 's/[0-9]*$//'
)
echo "${DEVMAP}"
}
# Tack on LVM group-associations where appropriate
function AddLVM2Map {
local EBSMAPARR
local LOOPC
local LVMMAPARR
local SRCHTOK
EBSMAPARR=("${!1}")
LVMMAPARR=("${!2}")
LOOPC=0
while [[ ${LOOPC} -le ${#LVMMAPARR[@]} ]]
do
SRCHTOK=$(echo "${LVMMAPARR[${LOOPC}]}" | cut -d ":" -f 1)
# This bit of ugliness avoids array re-iteration...
EBSMAPARR=("${EBSMAPARR[@]/${SRCHTOK}/${LVMMAPARR[${LOOPC}]}}")
LOOPC=$(( LOOPC + 1 ))
done
echo "${EBSMAPARR[@]}"
}
# Tag-up the EBSes
function TagYerIt {
# Initialize as local
local LOOPC
local MAPPING
local EBSID
local LVMGRP
local DEVMAP
LOOPC=0
MAPPING=("${!1}")
# Iterate over mapping-list...
while [[ ${LOOPC} -lt ${#MAPPING[@]} ]]
do
EBSID=$( echo "${MAPPING[${LOOPC}]}" | cut -d ":" -f 1 )
LVMGRP=$( echo "${MAPPING[${LOOPC}]}" | cut -d ":" -f 3 )
# Don't try to set null tags...
if [ "${LVMGRP}" = "" ]
then
LVMGRP="(none)"
fi
# Because some some EBSes declare dev-mappings that end in numbers...
DEVMAP=$( aws ec2 describe-volumes --volume-id="${EBSID}" \
--query="Volumes[].Attachments[].Device[]" --out text )
printf "Tagging EBS Volume %s... " "${EBSID}"
aws ec2 create-tags --resources "${EBSID}" --tags \
"Key=Owning Instance,Value=${INSTANCEID}" \
"Key=Attachment Point,Value=${DEVMAP}" \
"Key=LVM Group,Value=${LVMGRP}" \
&& echo "Done." || echo "Failed."
LOOPC=$(( LOOPC + 1 ))
done
}
#######################
## Main program flow
#######################
AmRoot
printf "Determining attached EBS volume IDs... "
EBSVOLIDS=$(GetAWSvolIds) && echo "Done." || echo "Failed."
printf "Mapping EBS volume IDs to local devices... "
LOOP=0
for EBSVOL in ${EBSVOLIDS}
do
EBSDEV="$( MapVolIdToDsk "${EBSVOL}" )"
EBSMAP[${LOOP}]="${EBSVOL}:${EBSDEV}"
LOOP=$(( LOOP + 1 ))
done && echo "Done." || echo "Failed."
if [ "$(CkHaveLVM)" = "TRUE" ]
then
echo "Looking for LVM object... "
LVOBJ=($(${PVS} --noheadings -o pv_name,vg_name --separator ':' | sed 's/[0-9]*:/:/'))
echo "Updating EBS/device mappings"
MAPPINGS=($(AddLVM2Map "EBSMAP[@]" "LVOBJ[@]"))
else
export LVOBJ=()
MAPPINGS=( "${EBSMAP[@]}" )
fi
TagYerIt "${MAPPINGS[@]}"
| true
|
5deb096df630cea6915babdffb2bed55ff957555
|
Shell
|
EugeneSqr/dotfiles
|
/setup/setup-xfce4-terminal.sh
|
UTF-8
| 165
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
if [ ! -f "$XDG_CONFIG_HOME/xfce4/terminal/terminalrc" ]; then
ln -s "$dotfiles_dir/xfce4-terminal/terminalrc" "$XDG_CONFIG_HOME/xfce4/terminal/"
fi
| true
|
f8c2250dae42889e06bb7a59315088cd10a175f3
|
Shell
|
skypexu/csup
|
/cpasswd.sh
|
UTF-8
| 2,971
| 4.09375
| 4
|
[
"BSD-2-Clause"
] |
permissive
|
#! /bin/sh
#
# Copyright 2007. Petar Zhivkov Petrov
# pesho.petrov@gmail.com
#
usage() {
echo "Usage: $0 clientName serverName"
echo " $0 -v"
}
countChars() {
_count="`echo "$1" | sed -e "s/[^$2]//g" | tr -d "\n" | wc -c`"
return 0
}
readPassword() {
while [ true ]; do
stty -echo
read -p "$1" _password
stty echo
echo ""
countChars "$_password" ":"
if [ $_count != 0 ]; then
echo "Sorry, password must not contain \":\" characters"
echo ""
else
break
fi
done
return 0
}
makeSecret() {
local clientLower="`echo "$1" | tr "[:upper:]" "[:lower:]"`"
local serverLower="`echo "$2" | tr "[:upper:]" "[:lower:]"`"
local secret="`md5 -qs "$clientLower:$serverLower:$3"`"
_secret="\$md5\$$secret"
}
if [ $# -eq 1 -a "X$1" = "X-v" ]; then
echo "Csup authentication key generator"
usage
exit
elif [ $# -ne 2 ]; then
usage
exit
fi
clientName=$1
serverName=$2
#
# Client name must contain exactly one '@' and at least one '.'.
# It must not contain a ':'.
#
countChars "$clientName" "@"
aCount=$_count
countChars "$clientName" "."
dotCount=$_count
if [ $aCount -ne 1 -o $dotCount -eq 0 ]; then
echo "Client name must have the form of an e-mail address,"
echo "e.g., \"user@domain.com\""
exit
fi
countChars "$clientName" ":"
colonCount=$_count
if [ $colonCount -gt 0 ]; then
echo "Client name must not contain \":\" characters"
exit
fi
#
# Server name must not contain '@' and must have at least one '.'.
# It also must not contain a ':'.
#
countChars "$serverName" "@"
aCount=$_count
countChars "$serverName" "."
dotCount=$_count
if [ $aCount != 0 -o $dotCount = 0 ]; then
echo "Server name must be a fully-qualified domain name."
echo "e.g., \"host.domain.com\""
exit
fi
countChars "$serverName" ":"
colonCount=$_count
if [ $colonCount -gt 0 ]; then
echo "Server name must not contain \":\" characters"
exit
fi
#
# Ask for password and generate secret.
#
while [ true ]; do
readPassword "Enter password: "
makeSecret "$clientName" "$serverName" "$_password"
secret=$_secret
readPassword "Enter same password again: "
makeSecret "$clientName" "$serverName" "$_password"
secret2=$_secret
if [ "X$secret" = "X$secret2" ]; then
break
else
echo "Passwords did not match. Try again."
echo ""
fi
done
echo ""
echo "Send this line to the server administrator at $serverName:"
echo "-------------------------------------------------------------------------------"
echo "$clientName:$secret::"
echo "-------------------------------------------------------------------------------"
echo "Be sure to send it using a secure channel!"
echo ""
echo "Add this line to your file \"$HOME/.csup/auth\", replacing \"XXX\""
echo "with the password you typed in:"
echo "-------------------------------------------------------------------------------"
echo "$serverName:$clientName:XXX:"
echo "-------------------------------------------------------------------------------"
echo "Make sure the file is readable and writable only by you!"
echo ""
| true
|
a2c84a8137759edc53140e60b9e6974b7f905c95
|
Shell
|
parallelworks/welding
|
/utils/genAnimation.sh
|
UTF-8
| 830
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
dir=/core/
rm pngs -R > /dev/null 2>&1
mkdir -p pngs
timestep=0.1
simtime=0.5
timesteps=$(echo $simtime/$timestep | bc)
#timesteps=1
startDS=2 # specific to NDTEMP
numElements=4
maxDatasets=$(echo $numElements*$timesteps | bc)
element=1
frame=1
for ds in $(seq 2 $numElements $maxDatasets);do
echo $frame,$ds,$element
cat > tmp.fbd <<END
read solve.frd new
view bg k
view fill
view volu
view disp
view elem
frame
zoom 0.8
rot y
rot r 35
rot u 35
ds $ds e $element
hcpy png
sys mv hcpy_1.png pngs/$(printf "%04d\n" $frame).png
exit
END
xvfb-run -a --server-args="-screen 0 1024x768x24" $dir/cgx-212/cgx_2.12/src/cgx -b tmp.fbd > /dev/null # 2>&1
((frame++))
done
convert -delay 15 -loop 0 pngs/*.png temp.gif
#rm pngs -R
| true
|
4496fb5f290ae9a68996dcc7f28573688cdfcef9
|
Shell
|
Kron610/Bash-lab
|
/5.23.sh
|
UTF-8
| 249
| 3.703125
| 4
|
[] |
no_license
|
echo "Do you want to see content of current catalog (Yes/No)?"
read ANS
if [ $ANS == "Yes" ]
then
ls
fi
if [ $ANS == "No" ]
then
echo "Enter catalog"
read CAT
if [ -e $CAT ]
then
echo $(ls ${CAT})
else
echo "Catalog doesn't exist"
fi
fi
| true
|
5f0bf7d16d0a85980efb3f270ec1564965c37d7d
|
Shell
|
clooney2007/Dev
|
/College projects/Sistemas Operativos Avanzados (C++)/Distributed BackUp Mejorado/distributed-backup-server
|
UTF-8
| 1,941
| 3.78125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
#
# Distributed-Backup-Server /etc/init.d initscript para Distributed-Backup-Server
#
### BEGIN INIT INFO
# Provides: Distributed-Backup-Server
# Required-Start: $network $local_fs $remote_fs
# Required-Stop: $network $local_fs $remote_fs
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: inicia y detiene midemonio
# Description: Distributed-Backup-Server es el mejor demonio.
### END INIT INFO
# Salir inmediatamente si un comando falla
# http://www.gnu.org/software/bash/manual/bashref.html#The-Set-Builtin
set -e
# Importar funciones LSB:
# start_daemon, killproc, status_of_proc, log_*, etc.
. /lib/lsb/init-functions
NAME=Distributed-Backup-Server
PIDFILE=/var/run/$NAME.pid
DAEMON=/home/rafael/SOA/SOA_mel/soa-distributed-backup-plus/build-Server-Desktop_Qt_5_8_0_GCC_64bit-Debug/$NAME
DAEMON_OPTS="--daemon"
# Si el demonio no existe, salir.
test -x $DAEMON || exit 5
start()
{
log_daemon_msg "Starting the $NAME process"
start_daemon -p $PIDFILE -- $DAEMON $DAEMON_OPTS
log_end_msg $?
}
stop()
{
log_daemon_msg "Stoppping the $NAME process"
killproc -p $PIDFILE
log_end_msg $?
}
case "$1" in
start)
start
;;
stop)
stop
;;
status)
if [ -e $PIDFILE ]; then
status_of_proc -p $PIDFILE $DAEMON "$NAME process"
else
log_failure_msg "$NAME process is not running"
log_end_msg 0
fi
;;
restart)
stop
start
;;
reload|force-reload)
if [ -e $PIDFILE ]; then
killproc -p $PIDFILE -SIGHUP
log_success_msg "$NAME process reloaded successfully"
else
log_failure_msg "$NAME process is not running"
log_end_msg 0
fi
;;
*)
echo "Usage: $0 {start|stop|status|reload|force-reload|restart}"
exit 2
;;
esac
| true
|
fed7e14dbf77756376e8df4cef9783f6b9e93ee6
|
Shell
|
zhaozhihua2008/deploy-service
|
/scripts/dservice.sh
|
UTF-8
| 1,554
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/sh
# edit by zzh
# ./
APP_BIN=`dirname $0`
APP_HOME=$APP_BIN/..
export APP_HOME
echo $APP_HOME
JARS="${APP_HOME}/conf:${APP_HOME}/devConf:"
LIB=$APP_HOME/libs
if [ -d $LIB ]; then
for i in $LIB/*.jar; do
JARS="$JARS":$i
done
fi
PLUGIN=$APP_HOME/plugins
if [ -d $PLUGIN ]; then
for i in $PLUGIN/*.jar; do
JARS="$JARS":$i
done
fi
#echo $JARS
IP=`hostname -i`
HOST=`hostname`
###COMMON OPTS
JAVA_OPTS="$JAVA_OPTS -Dlog.home=${APP_HOME}/log -Dfile.encoding=UTF-8"
JAVA_OPTS="$JAVA_OPTS -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
JAVA_OPTS="$JAVA_OPTS -server -XX:+DisableExplicitGC"
###CMS GC
#JAVA_OPTS="$JAVA_OPTS -XX:NewRatio=4 -XX:SurvivorRatio=2 -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:-CMSParallelRemarkEnabled -XX:+UseCMSInitiatingOccupancyOnly -XX:+CMSClassUnloadingEnabled"
###G1 GC
JAVA_OPTS="$JAVA_OPTS -XX:+UseG1GC -XX:MaxGCPauseMillis=1000"
JAVA_OPTS="$JAVA_OPTS -XX:+PrintGCDetails -XX:+PrintGCDateStamps "
###APP
JAVA_OPTS="$JAVA_OPTS -Xms2g -Xmx2g -XX:MetaspaceSize=128M -server -Xnoclassgc -Djava.net.preferIPv4Stack=true"
JAVA_OPTS="$JAVA_OPTS -Djava.rmi.server.hostname=${IP} -Dcom.sun.management.jmxremote.port=6395"
JAVA_OPTS="$JAVA_OPTS -Xloggc:"${APP_HOME}"/bin/gc-stat.vgc "
#JAVA_OPTS="$JAVA_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8321"
JAVA_OPTS="-Dprogram.name=deploy.service $JAVA_OPTS"
#echo $JAVA_OPTS
java $JAVA_OPTS -cp $JARS com.boco.deploy.Application $* >> /dev/null &
echo $! >> pidfile
echo "start finished"
| true
|
d2c6fa883fc22c0f9ffbca64ac2ab2e58b3ebd14
|
Shell
|
Scalingo/java-war-buildpack
|
/bin/compile
|
UTF-8
| 2,694
| 4.15625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# bin/compile <build-dir> <cache-dir> <env-dir>
set -eo pipefail
if [[ -n "${BUILDPACK_DEBUG}" ]]; then
set -x
fi
readonly build_dir="${1}"
readonly cache_dir="${2}"
env_dir="${3}"
readonly java_version="${JAVA_VERSION:-1.8}"
readonly webapp_runner_version="${JAVA_WEBAPP_RUNNER_VERSION:-${WEBAPP_RUNNER_VERSION:-9.0.68.1}}"
readonly base_dir="$( cd -P "$( dirname "$0" )" && pwd )"
readonly buildpack_dir="$( readlink -f "${base_dir}/.." )"
source "${buildpack_dir}/lib/common.sh"
export_env_dir "${env_dir}"
# Installs Java and webapp_runner
#
# Usage: install_webapp_runner <build_dir> <cache_dir> <java_version> <webapp_runner_version>
#
install_webapp_runner() {
local jvm_url
local runner_url
local build_d
local cache_d
local tmp_d
local jre_version
local runner_version
local cached_jvm_common
local cached_runner
build_d="${1}"
cache_d="${2}"
jre_version="${3}"
runner_version="${4}"
local buildpacks_repository_url="https://buildpacks-repository.s3.eu-central-1.amazonaws.com"
jvm_url="${JVM_COMMON_BUILDPACK:-"${buildpacks_repository_url}/jvm-common.tar.xz"}"
runner_url="${buildpacks_repository_url}/webapp-runner-${runner_version}.jar"
echo "-----> Installing Webapp Runner ${runner_version}..."
# Install JVM common tools:
cached_jvm_common="${cache_d}/jvm-common.tar.xz"
if [ ! -f "${cached_jvm_common}" ]
then
curl --location --silent --retry 6 --retry-connrefused --retry-delay 0 \
"${jvm_url}" \
--output "${cached_jvm_common}"
fi
tmp_d=$( mktemp -d jvm-common-XXXXXX ) && {
tar --extract --xz --touch --strip-components=1 \
--file "${cached_jvm_common}" \
--directory "${tmp_d}"
# Source utilities and functions:
source "${tmp_d}/bin/util"
source "${tmp_d}/bin/java"
echo "java.runtime.version=${jre_version}" \
> "${build_d}/system.properties"
install_java_with_overlay "${build_d}"
rm -Rf "${tmp_d}"
}
# Install Webapp Runner
cached_runner="${cache_d}/webapp-runner-${runner_version}.jar"
if [ ! -f "${cached_runner}" ]; then
echo "-----> Downloading webapp runner"
curl --location --silent --retry 6 --retry-connrefused --retry-delay 0 \
"${runner_url}" \
--output "${cached_runner}" \
|| {
echo "Unable to download webapp runner ${runner_version}. Aborting." >&2
exit 1
}
else
echo "-----> Got webapp runner from the cache"
fi
cp "${cached_runner}" "${build_d}/webapp-runner.jar"
}
readonly -f install_webapp_runner
install_webapp_runner "${build_dir}" "${cache_dir}" \
"${java_version}" "${webapp_runner_version}"
| true
|
ed3d365df7e4c4f39236f969c73d0f80248e6a10
|
Shell
|
3bars/ghostlab
|
/ghost/wait-for-it.sh
|
UTF-8
| 320
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
host="$1"
shift
cmd="$@"
until mysql -hmysql -p"3306" -u"${database__connection__user}" -p"${database__connection__password}" -D"${database__connection__database}" ; do
>&2 echo "MySQL is unavailable - sleeping"
sleep 1
done
>&2 echo "MySQL is up - executing command"
exec "$@"
# Fake Programmer
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.