blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
aa292b9d626f1a54c5684bd34aa3cd88868662c4
|
Shell
|
jsageryd/voiceme
|
/bot.sh
|
UTF-8
| 1,227
| 3.71875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
if [ -z "$SERVER" ]; then
echo '$SERVER not set (set it to e.g. "irc.freenode.net")'
exit 1
fi
if [ -z "$PORT" ]; then
echo '$PORT not set (set it to e.g. "6667")'
exit 1
fi
if [ -z "$NICK" ]; then
echo '$NICK not set (set it to e.g. "iibot")'
exit 1
fi
if [ -z "$PASS" ]; then
echo '$PASS not set (set it to e.g. "hunter2")'
exit 1
fi
if [ -z "$CHANNEL" ]; then
echo '$CHANNEL not set (set it to e.g. "#mychannel")'
exit 1
fi
mkfifo in
tail -f - > in &
while true; do
(sleep 60 && printf ":j %s\n" "$CHANNEL" > in) &
(sleep 30 && printf ":MODE %s -R\n" "$NICK" > in) &
<in sic -h "$SERVER" -p "$PORT" -n "$NICK" -k "$PASS" |
sed -u 's/:/ /' |
while read -r chan date time nick msg; do
if [ "$msg" == ".voiceme" ]; then
echo "$chan: $date $time $nick $msg"
nick="${nick#<}"
nick="${nick%>}"
printf ":MODE $CHANNEL +v $nick\n"
printf ":MODE $CHANNEL +v $nick\n" >in
elif [ "$msg" == ".devoiceme" ] || [ "$msg" == ".unvoiceme" ]; then
echo "$chan: $date $time $nick $msg"
nick="${nick#<}"
nick="${nick%>}"
printf ":MODE $CHANNEL -v $nick\n"
printf ":MODE $CHANNEL -v $nick\n" >in
fi
done
sleep 10
done
| true
|
870e77552f632363b0f27c3a07e85c70ae960233
|
Shell
|
rudi-c/computational-photography-research
|
/focusmeasure/median.sh
|
UTF-8
| 354
| 2.875
| 3
|
[] |
no_license
|
in="../Benchmarks"
cd $in
mkdir Gray
for b in \
LowLightCandle1 \
LowLightCandle2 \
LowLightCup \
LowLightPicture_1_over_30 \
LowLightPicture_1 \
LowLightPicture_2 \
LowLightPicture_4 \
LowLightPicture_8
do
echo $b
for f in $b/*.gray; do ../Src/median --adaptive-median $f ; done
mkdir Gray/$b
mv $b/*.gray.median Gray/$b/
done
| true
|
95e1b9543bba578b9d979523d5dcfd46c884adc2
|
Shell
|
random-builder/makaira-code
|
/buildroot/bin/custom_build.sh
|
UTF-8
| 1,748
| 3.5625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
#
# perform config build
#
set -e
# source repository
readonly job="$1"
readonly marlin="$makaira_code/Marlin"
readonly custom="$makaira_code/Marlin-Custom/$job"
# build parameters
readonly board="arduino:avr:mega:cpu=atmega2560"
readonly sketch="$marlin/Marlin.ino"
readonly target="/tmp/$job"
readonly binary="$target/Marlin.ino.hex"
# target repository
readonly repo="$makaira_repo"
readonly repo_path="$repo/Marlin-Custom/$job"
readonly repo_date=$(date "+%Y-%m-%d")
readonly repo_file="${repo_path}/${job//\//_}_${repo_date}.hex"
readonly repo_pack="${repo_file}.zip"
# logger values
readonly color_red='\e[31m'
readonly color_yel='\e[33m'
readonly color_blu='\e[34m'
readonly color_bold='\e[1m'
readonly color_none='\e[0m'
log() {
local text="$1"
echo -e "${color_bold}${color_blu}### $text ###${color_none}"
}
err() {
local text="$1"
echo -e "${color_bold}${color_red}### $text ###${color_none}"
}
log "========================================================================"
log "custom build: $job"
log "marlin master directory: $marlin"
log "marlin custom directory: $custom"
log "------------------------------------------------------------------------"
log "reset marlin repository"
git -C "$marlin" reset --hard
log "verify marlin repository"
git -C "$marlin" status
log "provision custom sources"
cp --force --recursive --verbose "$custom"/* "$marlin"
log "invoke arduino ide compiler"
mkdir -p "$target"
arduino --verify --board "$board" --pref build.path="$target" "$sketch"
ls -las "$target"
log "store build binary in repository"
mkdir -p "$repo_path"
mv -f -v "$binary" "$repo_file"
rm -f "$repo_pack"
zip -j "$repo_pack" "$repo_file"
rm -f "$repo_file"
ls -las "$repo_path"
| true
|
fd448291ff733c2914a74859ee9c6183a8dbb8a7
|
Shell
|
birocchi/mc823-1s2010
|
/proj3/teste_conexao.sh
|
UTF-8
| 366
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
# $# - número de parametros
if [ $# -ne 1 ]
then
echo "uso: ./teste_conexao.sh <endereço_servidor>"
exit 1
fi
OUT=tempos.rtt
# limpa o arquivo com os tempos antigos
rm $OUT
for i in $(seq 500)
do
./runclient.sh $1 <file.in 2>> $OUT
done
clear
echo "$OUT gerado"
echo "Lembrete da condição:"
echo "(Client.java) boolean TEST = false;"
| true
|
ca1ef9a1ebe7d90409c9178325b965e8d5a588ae
|
Shell
|
slegroux/kaldi
|
/egs/rdi/s5/local/speech_hints/make_generic_graph.sh
|
UTF-8
| 852
| 2.796875
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/usr/bin/env bash
# (c) 2019 sylvainlg@voicea.ai
dict_dir=$1
lm_base=$2
model_dir=$3
output_dir=$4
lang_base=$output_dir/lang_basevocab
lang_ext=$output_dir/lang_extvocab
output_dir_local=$output_dir/local
mkdir -p $output_dir_local
cp -r $dict_dir $output_dir_local/dict_basevocab
echo "#nonterm:unk" > $output_dir_local/dict_basevocab/nonterminals.txt
utils/prepare_lang.sh data/local/dict_basevocab \
"<unk>" $output_dir_local/lang_tmp $lang_base
nonterm_unk=$(grep '#nonterm:unk' $lang_base/words.txt | awk '{print $2}')
gunzip -c $lm_base | \
sed 's/<unk>/#nonterm:unk/g' | \
arpa2fst --disambig-symbol=#0 \
--read-symbol-table=$lang_base/words.txt - | \
fstrmsymbols --remove-from-output=true "echo $nonterm_unk|" - $lang_base/G.fst
utils/mkgraph.sh --self-loop-scale 1.0 $lang_base $model_dir $output_dir/extvocab_top
| true
|
ed2f35d9d6640254607d93224563812acb7e3cea
|
Shell
|
ZubairNabi/ciel
|
/scripts/local/delete_block_store_data_local.sh
|
UTF-8
| 382
| 3.59375
| 4
|
[
"LicenseRef-scancode-other-permissive",
"ISC"
] |
permissive
|
#!/bin/bash
#SCRIPT: delete_block_store_data_local.sh
#PURPOSE: Delete block store data on each local worker
FILENAME=$1
USERNAME=root
ROOT_DIR=/mnt/ssd/ssd
while read MACHINE
do
CIEL_DIR=$ROOT_DIR/$MACHINE/ciel_data
BS_DIR=$CIEL_DIR/block_store
echo "Deleting block store data on container $MACHINE"
rm -Rf $BS_DIR
mkdir $BS_DIR
done < $FILENAME
| true
|
1a730871b3851e08dc9b0607b235309ea28676a3
|
Shell
|
bozz/cashflow
|
/lib/shell_scripts/create_db_csv.sh
|
UTF-8
| 663
| 4.15625
| 4
|
[] |
no_license
|
#!/bin/bash
# Script for formatting DB (Deutsch Bank) CSV files.
# Expects an original DB CSV file as its argument.
set -e
if [ $# -lt 1 ]
then
echo "Error: missing parameters:"
echo "$0 source_file"
exit
fi
function main {
cat $1 |
grep "^[0-9]" |
ack -v "^[0-9]{2}.[0-9]{2}.[0-9]{4}.-.[0-9]{2}.[0-9]{2}.[0-9]{4}" |
format_columns
}
function format_columns {
awk '
BEGIN {
FS=";"
print "date;purpose;amount;currency"
}
{
# merge "haben" and "soll" into one field
amount=$4
if(amount=="") {
amount=$5
}
gsub(/"/,"",$3)
print $2 ";" $3 ";" amount ";" $6
}
'
}
main $1
| true
|
108b73d4014347bc569276e5d2b1405a5049108e
|
Shell
|
prangyapriyadas/wonder
|
/read.sh
|
UTF-8
| 95
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/sh
# count lines for a given file
echo "enter file name"
read FILENAME
wc -l $FILENAME
| true
|
f82ac65cdb46e0139c79496dc3f555435d1df0f9
|
Shell
|
marineLM/WHInter
|
/reproduce_results/reproduce_simulations.sh
|
UTF-8
| 2,209
| 2.578125
| 3
|
[
"MIT"
] |
permissive
|
# Simulations
# -----------
# Generate the simulated data
python ./get_data_preprocessed/simulate_lasso.py
# Create the folders to hold the results
mkdir -p ./results/WHInter/
mkdir -p ./results/zetaIL/
mkdir -p ./results/SPP/
mkdir -p ./results/Blitz/
# Run
for i in 1000,1000 1000,3000 1000,10000 300,1000 10000,1000
do
IFS=","
set -- $i
dat=./data_preprocessed/Bernoulli_n${1}_p${2}_qunif_coefnormal_rs0_nnzd.tsv
./../src/train_WHInter -nlambda 100 -lambdaMinRatio 0.01 -maxSelectedFeatures 150 -useBias 1 -useMyMips 0 -typeBound 2 -F 50 -pathResults ./results/WHInter/ ${dat}
./../src/train_WHInter -nlambda 100 -lambdaMinRatio 0.01 -maxSelectedFeatures 150 -useBias 1 -useMyMips 1 -typeBound 2 -F 50 -pathResults ./results/WHInter/ ${dat}
./../src/train_WHInter -nlambda 100 -lambdaMinRatio 0.01 -maxSelectedFeatures 150 -useBias 1 -useMyMips 2 -typeBound 2 -F 50 -pathResults ./results/WHInter/ ${dat}
./../src/train_WHInter -nlambda 100 -lambdaMinRatio 0.01 -maxSelectedFeatures 150 -useBias 1 -useMyMips 2 -typeBound 1 -F 50 -pathResults ./results/WHInter/ ${dat}
./../src/train_WHInter -nlambda 100 -lambdaMinRatio 0.01 -maxSelectedFeatures 150 -useBias 1 -useMyMips 2 -typeBound 0 -F 50 -pathResults ./results/WHInter/ ${dat}
./src/zetaIL/train_zetaIL -nlambda 100 -lambdaMinRatio 0.01 -maxSelectedFeatures 150 -useBias 1 -useMyMips 0 -F 50 -pathResults ./results/zetaIL/ ${dat}
./src/zetaIL/train_zetaIL -nlambda 100 -lambdaMinRatio 0.01 -maxSelectedFeatures 150 -useBias 1 -useMyMips 1 -F 50 -pathResults ./results/zetaIL/ ${dat}
./src/zetaIL/train_zetaIL -nlambda 100 -lambdaMinRatio 0.01 -maxSelectedFeatures 150 -useBias 1 -useMyMips 2 -F 50 -pathResults ./results/zetaIL/ ${dat}
./src/SPP/train_SPPbreadthFirst -nlambda 100 -lambdaMinRatio 0.01 -maxSelectedFeatures 150 -useBias 1 -F 50 -pathResults ./results/SPP/ ${dat}
if [ ${2} -ne 10000 ]
then
python ./src/Blitz/runBLITZ.py --results_dir ./results/Blitz --data_file ${dat} --nlambda 100 --lambdaMinRatio 0.01 --maxSelectedFeatures 150 --useBias 1 --tol 1e-8
fi
done
# Plot
Rscript ./analyze_results/analyze_sim.R
Rscript ./analyze_results/analyze_support_recovery.R
| true
|
ca582d519defb46fcafbe55a6cf1f3299724f895
|
Shell
|
popacai/TQOS
|
/code/generate_clang.sh
|
UTF-8
| 207
| 3.421875
| 3
|
[
"MIT-Modern-Variant"
] |
permissive
|
dir=`pwd`
echo $dir
files=`find $dir -name "*.h"`
echo "" > .clang
for file in $files
do
echo $file >> $dir/.clang
done
folders=`ls -d $dir/*/`
for folder in $folders
do
cp $dir/.clang $folder/
done
| true
|
72df571d7e3fbffd693b713549523f6782790347
|
Shell
|
ioBroker/ioBroker
|
/installer_library.sh
|
UTF-8
| 23,886
| 3.6875
| 4
|
[
"MIT"
] |
permissive
|
# ------------------------------
# Increase this version number whenever you update the lib
# ------------------------------
LIBRARY_VERSION="2022-12-09" # format YYYY-MM-DD
# ------------------------------
# Supported and suggested node versions
# ------------------------------
NODE_JS_LINUX_URL="https://deb.nodesource.com/setup_18.x"
NODE_JS_BREW_URL="https://nodejs.org/dist/v18.15.0/node-v18.15.0.pkg"
# ------------------------------
# test function of the library
# ------------------------------
function get_lib_version() { echo "$LIBRARY_VERSION"; }
# ------------------------------
# functions for ioBroker Installer/Fixer
# ------------------------------
enable_colored_output() {
# Enable colored output
if test -t 1; then # if terminal
ncolors=$(which tput > /dev/null && tput colors) # supports color
if test -n "$ncolors" && test $ncolors -ge 8; then
termcols=$(tput cols)
bold="$(tput bold)"
underline="$(tput smul)"
standout="$(tput smso)"
normal="$(tput sgr0)"
black="$(tput setaf 0)"
red="$(tput setaf 1)"
green="$(tput setaf 2)"
yellow="$(tput setaf 3)"
blue="$(tput setaf 4)"
magenta="$(tput setaf 5)"
cyan="$(tput setaf 6)"
white="$(tput setaf 7)"
fi
fi
}
print_step() {
stepname="$1"
stepnr="$2"
steptotal="$3"
echo
echo "${bold}${HLINE}${normal}"
echo "${bold} ${stepname} ${blue}(${stepnr}/${steptotal})${normal}"
echo "${bold}${HLINE}${normal}"
echo
}
print_bold() {
title="$1"
echo
echo "${bold}${HLINE}${normal}"
echo
echo " ${bold}${title}${normal}"
for text in "${@:2}"; do
echo " ${text}"
done
echo
echo "${bold}${HLINE}${normal}"
echo
}
print_msg() {
text="$1"
echo
echo -e "${text}"
echo
}
HLINE="=========================================================================="
enable_colored_output
get_platform_params() {
# Test which platform this script is being run on
# When adding another supported platform, also add detection for the install command
# HOST_PLATFORM: Name of the platform
# INSTALL_CMD: comand for package installation
# INSTALL_CMD_ARGS: arguments for $INSTALL_CMD to install something
# INSTALL_CMD_UPD_ARGS: arguments for $INSTALL_CMD to update something
# IOB_DIR: Directory where iobroker should be installed
# IOB_USER: The user to run ioBroker as
INSTALL_CMD_UPD_ARGS=""
unamestr=$(uname)
case "$unamestr" in
"Linux")
HOST_PLATFORM="linux"
INSTALL_CMD="apt-get"
INSTALL_CMD_ARGS="install -yq"
if [[ $(which "yum" 2>/dev/null) == *"/yum" ]]; then
INSTALL_CMD="yum"
# The args -y and -q have to be separate
INSTALL_CMD_ARGS="install -q -y"
INSTALL_CMD_UPD_ARGS="-y"
fi
IOB_DIR="/opt/iobroker"
IOB_USER="iobroker"
;;
"Darwin")
# OSX and Linux are the same in terms of install procedure
HOST_PLATFORM="osx"
ROOT_GROUP="wheel"
INSTALL_CMD="brew"
INSTALL_CMD_ARGS="install"
IOB_DIR="/usr/local/iobroker"
IOB_USER="$USER"
;;
"FreeBSD")
HOST_PLATFORM="freebsd"
ROOT_GROUP="wheel"
INSTALL_CMD="pkg"
INSTALL_CMD_ARGS="install -yq"
IOB_DIR="/opt/iobroker"
IOB_USER="iobroker"
;;
*)
# The following should never happen, but better be safe than sorry
echo "Unsupported platform $unamestr"
exit 1
;;
esac
if [ "$IS_ROOT" = true ]; then
USER_GROUP="$ROOT_GROUP"
fi
}
function set_some_common_params() {
CONTROLLER_DIR="$IOB_DIR/node_modules/iobroker.js-controller"
INSTALLER_INFO_FILE="$IOB_DIR/INSTALLER_INFO.txt"
# Where the fixer script is located
FIXER_URL="https://iobroker.net/fix.sh"
# Where the diag script is located
DIAG_URL="https://iobroker.net/diag.sh"
# Remember the full path of bash
BASH_CMDLINE=$(which bash)
# Check if "sudo" command is available (in case we're not root)
if [ "$IS_ROOT" != true ]; then
if [[ $(which "sudo" 2>/dev/null) != *"/sudo" ]]; then
echo "${red}Cannot continue because the \"sudo\" command is not available!${normal}"
echo "Please install it first using \"$INSTALL_CMD install sudo\""
exit 1
fi
fi
# Starting with Debian 10 (Buster), we need to add the [/usr[/local]]/sbin
# directories to PATH for non-root users
if [ -d "/sbin" ]; then add_to_path "/sbin"; fi
if [ -d "/usr/sbin" ]; then add_to_path "/usr/sbin"; fi
if [ -d "/usr/local/sbin" ]; then add_to_path "/usr/local/sbin"; fi
}
install_package_linux() {
package="$1"
# Test if the package is installed
dpkg -s "$package" &> /dev/null
if [ $? -ne 0 ]; then
if [ "$INSTALL_CMD" = "yum" ]; then
# Install it
errormessage=$( $SUDOX $INSTALL_CMD $INSTALL_CMD_ARGS $package > /dev/null 2>&1)
else
# Install it
errormessage=$( $SUDOX $INSTALL_CMD $INSTALL_CMD_ARGS --no-install-recommends $package > /dev/null 2>&1)
fi
# Hide "Error: Nothing to do"
if [ "$errormessage" != "Error: Nothing to do" ]; then
if [ "$errormessage" != "" ]; then
echo $errormessage
fi
echo "Installed $package"
fi
fi
}
install_package_freebsd() {
package="$1"
# check if package is installed (pkg is nice enough to provide us with a exitcode)
if ! $INSTALL_CMD info "$1" >/dev/null 2>&1; then
# Install it
$SUDOX $INSTALL_CMD $INSTALL_CMD_ARGS "$1" > /dev/null
echo "Installed $package"
fi
}
install_package_macos() {
package="$1"
# Test if the package is installed (Use brew to install essential tools)
$INSTALL_CMD list | grep "$package" &> /dev/null
if [ $? -ne 0 ]; then
# Install it
$INSTALL_CMD $INSTALL_CMD_ARGS $package &> /dev/null
if [ $? -eq 0 ]; then
echo "Installed $package"
else
echo "$package was not installed"
fi
fi
}
install_package() {
case "$HOST_PLATFORM" in
"linux")
install_package_linux $1
;;
"osx")
install_package_macos $1
;;
"freebsd")
install_package_freebsd $1
;;
# The following should never happen, but better be safe than sorry
*)
echo "Unsupported platform $HOST_PLATFORM"
;;
esac
}
install_necessary_packages() {
# Determine the platform we operate on and select the installation routine/packages accordingly
# TODO: Which other packages do we need by default?
case "$HOST_PLATFORM" in
"linux")
declare -a packages=(
"acl" # To use setfacl
"sudo" # To use sudo (obviously)
"libcap2-bin" # To give nodejs access to protected ports
# These are used by a couple of adapters and should therefore exist:
"build-essential"
"gcc-c++"
"make"
"libavahi-compat-libdnssd-dev"
"libudev-dev"
"libpam0g-dev"
"pkg-config"
"git"
"curl"
"unzip"
# These are required for canvas
"libcairo2-dev"
"libpango1.0-dev"
"libjpeg-dev"
"libgif-dev"
"librsvg2-dev"
"libpixman-1-dev"
"net-tools" # To fix issue #277
"cmake" # https://github.com/ioBroker/ioBroker.js-controller/issues/1604
)
for pkg in "${packages[@]}"; do
install_package $pkg
done
# ==================
# Configure packages
# Give nodejs access to protected ports and raw devices like ble
cmdline="$SUDOX setcap"
if running_in_docker; then
capabilities=$(grep ^CapBnd /proc/$$/status)
if [[ $(capsh --decode=${capabilities:(-16)}) == *"cap_net_admin"* ]]; then
$cmdline 'cap_net_admin,cap_net_bind_service,cap_net_raw+eip' $(eval readlink -f `which node`)
else
$cmdline 'cap_net_bind_service,cap_net_raw+eip' $(eval readlink -f `which node`)
echo "${yellow}Docker detected!"
echo "If you have any adapters that need the CAP_NET_ADMIN capability,"
echo "you need to start the docker container with the option --cap-add=NET_ADMIN"
echo "and manually add that capability to node${normal}"
fi
else
$cmdline 'cap_net_admin,cap_net_bind_service,cap_net_raw+eip' $(eval readlink -f `which node`)
fi
;;
"freebsd")
declare -a packages=(
"sudo"
"git"
"curl"
"bash"
"unzip"
"avahi-libdns" # avahi gets installed along with this
"dbus"
"nss_mdns" # needed for the mdns host resolution
"gcc"
"python" # Required for node-gyp compilation
)
for pkg in "${packages[@]}"; do
install_package $pkg
done
# we need to do some setting up things after installing the packages
# ensure dns_sd.h is where node-gyp expect it
ln -s /usr/local/include/avahi-compat-libdns_sd/dns_sd.h /usr/include/dns_sd.h
# enable dbus in the avahi configuration
sed -i -e 's/#enable-dbus/enable-dbus/' /usr/local/etc/avahi/avahi-daemon.conf
# enable mdns usage for host resolution
sed -i -e 's/hosts: file dns/hosts: file dns mdns/' /etc/nsswitch.conf
# enable services avahi/dbus
sysrc -f /etc/rc.conf dbus_enable="YES"
sysrc -f /etc/rc.conf avahi_daemon_enable="YES"
# start services
service dbus start
service avahi-daemon start
;;
"osx")
# Test if brew is installed. If it is, install some packages that are often used.
$INSTALL_CMD -v &> /dev/null
if [ $? -eq 0 ]; then
declare -a packages=(
# These are used by a couple of adapters and should therefore exist:
"pkg-config"
"git"
"curl"
"unzip"
)
for pkg in "${packages[@]}"; do
install_package $pkg
done
else
echo "${yellow}Since brew is not installed, frequently-used dependencies could not be installed."
echo "Before installing some adapters, you might have to install some packages yourself."
echo "Please check the adapter manuals before installing them.${normal}"
fi
;;
*)
;;
esac
}
disable_npm_audit() {
# Make sure the npmrc file exists
$SUDOX touch .npmrc
# If .npmrc does not contain "audit=false", we need to change it
$SUDOX grep -q -E "^audit=false" .npmrc &> /dev/null
if [ $? -ne 0 ]; then
# Remember its contents (minus any possible audit=true)
NPMRC_FILE=$($SUDOX grep -v -E "^audit=true" .npmrc)
# And write it back
write_to_file "$NPMRC_FILE" .npmrc
# Append the line to disable audit
append_to_file "# disable npm audit warnings" .npmrc
append_to_file "audit=false" .npmrc
fi
# Make sure that npm can access the .npmrc
if [ "$HOST_PLATFORM" = "osx" ]; then
$SUDOX chown -R $USER .npmrc
else
$SUDOX chown -R $USER:$USER_GROUP .npmrc
fi
}
disable_npm_updatenotifier() {
# Make sure the npmrc file exists
$SUDOX touch .npmrc
# If .npmrc does not contain "update-notifier=false", we need to change it
$SUDOX grep -q -E "^update-notifier=false" .npmrc &> /dev/null
if [ $? -ne 0 ]; then
# Remember its contents (minus any possible update-notifier=true)
NPMRC_FILE=$($SUDOX grep -v -E "^update-notifier=true" .npmrc)
# And write it back
write_to_file "$NPMRC_FILE" .npmrc
# Append the line to disable update-notifier
append_to_file "# disable npm update-notifier information" .npmrc
append_to_file "update-notifier=false" .npmrc
fi
# Make sure that npm can access the .npmrc
if [ "$HOST_PLATFORM" = "osx" ]; then
$SUDOX chown -R $USER .npmrc
else
$SUDOX chown -R $USER:$USER_GROUP .npmrc
fi
}
# This is obsolete and can maybe removed
set_npm_python() {
# Make sure the npmrc file exists
$SUDOX touch .npmrc
# If .npmrc does not contain "python=", we need to change it
$SUDOX grep -q -E "^python=" .npmrc &> /dev/null
if [ $? -ne 0 ]; then
# Remember its contents
NPMRC_FILE=$($SUDOX grep -v -E "^python=" .npmrc)
# And write it back
write_to_file "$NPMRC_FILE" .npmrc
# Append the line to change the python binary
append_to_file "# change link from python3 to python2.7 (needed for gyp)" .npmrc
append_to_file "python=/usr/local/bin/python2.7" .npmrc
fi
# Make sure that npm can access the .npmrc
if [ "$HOST_PLATFORM" = "osx" ]; then
$SUDOX chown -R $USER .npmrc
else
$SUDOX chown -R $USER:$USER_GROUP .npmrc
fi
}
force_strict_npm_version_checks() {
# Make sure the npmrc file exists
$SUDOX touch .npmrc
# If .npmrc does not contain "engine-strict=true", we need to change it
$SUDOX grep -q -E "^engine-strict=true" .npmrc &> /dev/null
if [ $? -ne 0 ]; then
# Remember its contents (minus any possible engine-strict=false)
NPMRC_FILE=$($SUDOX grep -v -E "^engine-strict=false" .npmrc)
# And write it back
write_to_file "$NPMRC_FILE" .npmrc
# Append the line to force strict version checks
append_to_file "# force strict version checks" .npmrc
append_to_file "engine-strict=true" .npmrc
fi
# Make sure that npm can access the .npmrc
if [ "$HOST_PLATFORM" = "osx" ]; then
$SUDOX chown -R $USER .npmrc
else
$SUDOX chown -R $USER:$USER_GROUP .npmrc
fi
}
# Adds dirs to the PATH variable without duplicating entries
add_to_path() {
case ":$PATH:" in
*":$1:"*) :;; # already there
*) PATH="$1:$PATH";;
esac
}
function write_to_file() {
echo "$1" | $SUDOX tee "$2" &> /dev/null
}
function append_to_file() {
echo "$1" | $SUDOX tee -a "$2" &> /dev/null
}
running_in_docker() {
# Test if we're running inside a docker container or as github actions job while building docker container image
if awk -F/ '$2 == "docker"' /proc/self/cgroup | read || awk -F/ '$2 == "buildkit"' /proc/self/cgroup | read || test -f /.dockerenv || test -f /opt/scripts/.docker_config/.thisisdocker ; then
return 0
else
return 1
fi
}
change_npm_command_user() {
# patches the npm command for the current user (if iobroker was installed as non-root),
# so that it is executed as `iobroker` when inside the iobroker directory
NPM_COMMAND_FIX_PATH=~/.iobroker/npm_command_fix
NPM_COMMAND_FIX=$(cat <<- EOF
# While inside the iobroker directory, execute npm as iobroker
function npm() {
__real_npm=\$(which npm)
if [[ \$(pwd) == "$IOB_DIR"* ]]; then
sudo -H -u $IOB_USER \$__real_npm \$*
else
eval \$__real_npm \$*
fi
}
EOF
)
BASHRC_LINES=$(cat <<- EOF
# Forces npm to run as $IOB_USER when inside the iobroker installation dir
source ~/.iobroker/npm_command_fix
EOF
)
mkdir -p ~/.iobroker
write_to_file "$NPM_COMMAND_FIX" "$NPM_COMMAND_FIX_PATH"
# Activate the change
source "$NPM_COMMAND_FIX_PATH"
# Make sure the bashrc file exists - it should, but you never know...
touch ~/.bashrc
# If .bashrc does not contain the source command, we need to add it
sudo grep -q -E "^source ~/\.iobroker/npm_command_fix" ~/.bashrc &> /dev/null
if [ $? -ne 0 ]; then
echo "$BASHRC_LINES" >> ~/.bashrc
fi
}
change_npm_command_root() {
# patches the npm command for the ROOT user (always! (independent of which user installed iobroker)),
# so that it is executed as `iobroker` when inside the iobroker directory
NPM_COMMAND_FIX_PATH=/root/.iobroker/npm_command_fix
NPM_COMMAND_FIX=$(cat <<- EOF
# While inside the iobroker directory, execute npm as iobroker
function npm() {
__real_npm=\$(which npm)
if [[ \$(pwd) == "$IOB_DIR"* ]]; then
sudo -H -u $IOB_USER \$__real_npm \$*
else
eval \$__real_npm \$*
fi
}
EOF
)
BASHRC_LINES=$(cat <<- EOF
# Forces npm to run as $IOB_USER when inside the iobroker installation dir
source /root/.iobroker/npm_command_fix
EOF
)
sudo mkdir -p /root/.iobroker
write_to_file "$NPM_COMMAND_FIX" "$NPM_COMMAND_FIX_PATH"
# Activate the change
if [ "$IS_ROOT" = "true" ]; then
source "$NPM_COMMAND_FIX_PATH"
fi
# Make sure the bashrc file exists - it should, but you never know...
sudo touch /root/.bashrc
# If .bashrc does not contain the source command, we need to add it
sudo grep -q -E "^source /root/\.iobroker/npm_command_fix" /root/.bashrc &> /dev/null
if [ $? -ne 0 ]; then
append_to_file "$BASHRC_LINES" /root/.bashrc
fi
}
enable_cli_completions() {
# Performs the necessary configuration for CLI auto completion
COMPLETIONS_PATH=~/.iobroker/iobroker_completions
COMPLETIONS=$(cat <<- 'EOF'
iobroker_yargs_completions()
{
local cur_word args type_list
cur_word="${COMP_WORDS[COMP_CWORD]}"
args=("${COMP_WORDS[@]}")
# ask yargs to generate completions.
type_list=$(iobroker --get-yargs-completions "${args[@]}")
COMPREPLY=( $(compgen -W "${type_list}" -- ${cur_word}) )
# if no match was found, fall back to filename completion
if [ ${#COMPREPLY[@]} -eq 0 ]; then
COMPREPLY=()
fi
return 0
}
complete -o default -F iobroker_yargs_completions iobroker
complete -o default -F iobroker_yargs_completions iob
EOF
)
BASHRC_LINES=$(cat <<- EOF
# Enable ioBroker command auto-completion
source ~/.iobroker/iobroker_completions
EOF
)
mkdir -p ~/.iobroker
write_to_file "$COMPLETIONS" "$COMPLETIONS_PATH"
# Activate the change
source "$COMPLETIONS_PATH"
# Make sure the bashrc file exists - it should, but you never know...
touch ~/.bashrc
# If .bashrc does not contain the source command, we need to add it
sudo grep -q -E "^source ~/\.iobroker/iobroker_completions" ~/.bashrc &> /dev/null
if [ $? -ne 0 ]; then
echo "$BASHRC_LINES" >> ~/.bashrc
fi
}
set_root_permissions() {
file="$1"
$SUDOX chown root:$ROOT_GROUP $file
$SUDOX chmod 755 $file
}
make_executable() {
file="$1"
$SUDOX chmod 755 $file
}
change_owner() {
user="$1"
file="$2"
if [ "$HOST_PLATFORM" == "osx" ]; then
owner="$user"
else
owner="$user:$user"
fi
cmdline="$SUDOX chown"
if [ -d $file ]; then
# recursively chown directories
cmdline="$cmdline -R"
elif [ -L $file ]; then
# change ownership of symbolic links
cmdline="$cmdline -h"
fi
$cmdline $owner $file
}
function add2sudoers() {
local xsudoers=$1
shift
xarry=("$@")
for cmd in "${xarry[@]}"; do
# Test each command if and where it is installed
cmd_bin=$(echo $cmd | cut -d ' ' -f1)
cmd_path=$(which $cmd_bin 2> /dev/null)
if [ $? -eq 0 ]; then
# Then add the command to SUDOERS_CONTENT
full_cmd=$(echo "$cmd" | sed -e "s|$cmd_bin|$cmd_path|")
SUDOERS_CONTENT+=$xsudoers"NOPASSWD: $full_cmd\n"
fi
done
}
create_user_linux() {
username="$1"
id "$username" &> /dev/null;
if [ $? -ne 0 ]; then
# User does not exist
$SUDOX useradd -m -s /usr/sbin/nologin "$username"
echo "User $username created"
fi
# Add the current non-root user to the iobroker group so he can access the iobroker dir
if [ "$username" != "$USER" ] && [ "$IS_ROOT" = false ]; then
sudo usermod -a -G $username $USER
fi
SUDOERS_CONTENT="$username ALL=(ALL) ALL\n"
# Add the user to all groups we need and give him passwordless sudo privileges
# Define which commands iobroker may execute as sudo without password
declare -a iob_commands=(
"shutdown" "halt" "poweroff" "reboot"
"systemctl start" "systemctl stop"
"mount" "umount" "systemd-run"
"apt-get" "apt" "dpkg" "make"
"ping" "fping"
"arp-scan"
"setcap"
"vcgencmd"
"cat"
"df"
"mysqldump"
"ldconfig"
)
add2sudoers "$username ALL=(ALL) " "${iob_commands[@]}"
# Additionally, define which iobroker-related commands may be executed by every user
declare -a all_user_commands=(
"systemctl start iobroker"
"systemctl stop iobroker"
"systemctl restart iobroker"
)
add2sudoers "ALL ALL=" "${all_user_commands[@]}"
# Furthermore, allow all users to execute node iobroker.js as iobroker
if [ "$IOB_USER" != "$USER" ]; then
add2sudoers "ALL ALL=($IOB_USER) " "node $CONTROLLER_DIR/iobroker.js *"
fi
SUDOERS_FILE="/etc/sudoers.d/iobroker"
$SUDOX rm -f $SUDOERS_FILE
echo -e "$SUDOERS_CONTENT" > ~/temp_sudo_file
$SUDOX visudo -c -q -f ~/temp_sudo_file && \
$SUDOX chown root:$ROOT_GROUP ~/temp_sudo_file &&
$SUDOX chmod 440 ~/temp_sudo_file &&
$SUDOX mv ~/temp_sudo_file $SUDOERS_FILE &&
echo "Created $SUDOERS_FILE"
# Add the user to all groups if they exist
declare -a groups=(
audio
bluetooth
dialout
gpio
i2c
redis
tty
video
)
for grp in "${groups[@]}"; do
getent group $grp &> /dev/null && $SUDOX usermod -a -G $grp $username
done
}
create_user_freebsd() {
username="$1"
id "$username" &> /dev/null
if [ $? -ne 0 ]; then
# User does not exist
$SUDOX pw useradd -m -s /usr/sbin/nologin -n "$username"
fi
# Add the user to all groups we need and give him passwordless sudo privileges
# Define which commands may be executed as sudo without password
SUDOERS_CONTENT="$username ALL=(ALL) ALL\n"
# Add the user to all groups we need and give him passwordless sudo privileges
# Define which commands iobroker may execute as sudo without password
declare -a iob_commands=(
"shutdown" "halt" "poweroff" "reboot"
"service iobroker start" "service iobroker stop"
"mount" "umount" "systemd-run"
"pkg" "make"
"ping" "fping"
"arp-scan"
"setcap"
"vcgencmd"
"cat"
"df"
"mysqldump"
"ldconfig"
)
add2sudoers "$username ALL=(ALL) " "${iob_commands[@]}"
# Additionally, define which iobroker-related commands may be executed by every user
declare -a all_user_commands=(
"service iobroker start"
"service iobroker stop"
"service iobroker restart"
)
add2sudoers "ALL ALL=" "${all_user_commands[@]}"
# Furthermore, allow all users to execute node iobroker.js as iobroker
if [ "$IOB_USER" != "$USER" ]; then
add2sudoers "ALL ALL=($IOB_USER) " "node $CONTROLLER_DIR/iobroker.js *"
fi
SUDOERS_FILE="/usr/local/etc/sudoers.d/iobroker"
$SUDOX rm -f $SUDOERS_FILE
echo -e "$SUDOERS_CONTENT" > ~/temp_sudo_file
$SUDOX visudo -c -q -f ~/temp_sudo_file && \
$SUDOX chown root:$ROOT_GROUP ~/temp_sudo_file &&
$SUDOX chmod 440 ~/temp_sudo_file &&
$SUDOX mv ~/temp_sudo_file $SUDOERS_FILE &&
echo "Created $SUDOERS_FILE"
# Add the user to all groups if they exist
declare -a groups=(
audio
bluetooth
dialout
gpio
i2c
redis
tty
video
)
for grp in "${groups[@]}"; do
getent group $grp && $SUDOX pw group mod $grp -m $username
done
}
fix_dir_permissions() {
# Give the user access to all necessary directories
# When autostart is enabled, we need to fix the permissions so that `iobroker` can access it
echo "Fixing directory permissions..."
change_owner $IOB_USER $IOB_DIR
# These commands are only for the fixer
if [ "$FIXER_VERSION" != "" ]; then
# ioBroker install dir
change_owner $IOB_USER $IOB_DIR
# and the npm cache dir
if [ -d "/home/$IOB_USER/.npm" ]; then
change_owner $IOB_USER "/home/$IOB_USER/.npm"
fi
fi
if [ "$IS_ROOT" != true ]; then
sudo usermod -a -G $IOB_USER $USER
fi
# Give the iobroker group write access to all files by setting the default ACL
$SUDOX setfacl -Rdm g:$IOB_USER:rwx $IOB_DIR &> /dev/null && $SUDOX setfacl -Rm g:$IOB_USER:rwx $IOB_DIR &> /dev/null
if [ $? -ne 0 ]; then
# We cannot rely on default permissions on this system
echo "${yellow}This system does not support setting default permissions.${normal}"
echo "${yellow}Do not use npm to manually install adapters unless you know what you are doing!${normal}"
echo "ACL enabled: false" >> $INSTALLER_INFO_FILE
else
echo "ACL enabled: true" >> $INSTALLER_INFO_FILE
fi
}
install_nodejs() {
print_bold "Node.js not found. Installing..."
if [ "$INSTALL_CMD" = "yum" ]; then
if [ "$IS_ROOT" = true ]; then
curl -sL $NODE_JS_LINUX_URL | bash -
else
curl -sL $NODE_JS_LINUX_URL | sudo -E bash -
fi
elif [ "$INSTALL_CMD" = "pkg" ]; then
$SUDOX $INSTALL_CMD $INSTALL_CMD_ARGS node
elif [ "$INSTALL_CMD" = "brew" ]; then
echo "${red}Cannot install Node.js using brew.${normal}"
echo "Please download Node.js from $NODE_JS_BREW_URL"
echo "Then try to install ioBroker again!"
exit 1
else
if [ "$IS_ROOT" = true ]; then
curl -sL $NODE_JS_LINUX_URL | bash -
else
curl -sL $NODE_JS_LINUX_URL | sudo -E bash -
fi
fi
install_package nodejs
# Check if nodejs is now installed
if [[ $(which "node" 2>/dev/null) != *"/node" ]]; then
echo "${red}Cannot install Node.js! Please install it manually.${normal}"
exit 1
else
echo "${bold}Node.js Installed successfully!${normal}"
fi
}
detect_ip_address() {
# Detect IP address
local IP
IP_COMMAND=$(type "ip" &> /dev/null && echo "ip addr show" || echo "ifconfig")
if [ "$HOST_PLATFORM" = "osx" ]; then
IP=$($IP_COMMAND | grep inet | grep -v inet6 | grep -v 127.0.0.1 | grep -Eo "([0-9]+\.){3}[0-9]+" | head -1)
else
IP=$($IP_COMMAND | grep inet | grep -v inet6 | grep -v 127.0.0.1 | grep -Eo "([0-9]+\.){3}[0-9]+\/[0-9]+" | cut -d "/" -f1)
fi
echo $IP
}
echo "library: loaded"
| true
|
107514ce4700199b8af24d3fd522ae50dec3f8b2
|
Shell
|
jluciano/DataFAQs
|
/doc/examples/default/faqt-brick/datafaqs-source-me.sh
|
UTF-8
| 1,084
| 2.578125
| 3
|
[] |
no_license
|
export DATAFAQS_HOME="/opt/DataFAQs"
export PATH=$PATH`$DATAFAQS_HOME/bin/df-situate-paths.sh`
# Project settings
export DATAFAQS_LOG_DIR="`pwd`/log"
export DATAFAQS_BASE_URI=""
export DATAFAQS_PUBLISH_THROUGHOUT_EPOCH="true"
export DATAFAQS_PUBLISH_METADATA_GRAPH_NAME="http://www.w3.org/ns/sparql-service-description#NamedGraph"
# If using TDB:
export DATAFAQS_PUBLISH_TDB="true"
export DATAFAQS_PUBLISH_TDB_DIR="`pwd`/tdb"
export TDBROOT="/opt/tdb/TDB-0.8.10"
if [ ! `which tdbloader` ]; then
export PATH=$PATH":$TDBROOT/bin"
fi
# If using Virtuoso:
export DATAFAQS_PUBLISH_VIRTUOSO='true'
export CSV2RDF4LOD_CONVERT_DATA_ROOT="`pwd`"
export CSV2RDF4LOD_PUBLISH_VIRTUOSO_HOME='/opt/virtuoso'
export CSV2RDF4LOD_PUBLISH_VIRTUOSO_ISQL_PATH='' # defaults to guess
export CSV2RDF4LOD_PUBLISH_VIRTUOSO_PORT=1111
export CSV2RDF4LOD_PUBLISH_VIRTUOSO_USERNAME='dba'
export CSV2RDF4LOD_PUBLISH_VIRTUOSO_PASSWORD='your-virtuoso-password'
# Software dependencies:
export CSV2RDF4LOD_HOME="/opt/csv2rdf4lod-automation"
export PATH=$PATH`$CSV2RDF4LOD_HOME/bin/util/cr-situate-paths.sh`
df-vars.sh
| true
|
bcf865944e1b4ee63873649c4cc9ac539048c96b
|
Shell
|
khalMeg/WorkedTime
|
/workedTime.sh
|
UTF-8
| 1,429
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/bash
# written by khalil meguenni, Wed, 27 Jun 2018 21:27:18
if [ $# -eq 4 ]
then
((startHoure = $1))
((startMin = $2))
((endHoure = $3))
((endMin = $4))
if [ $endMin -lt $startMin ]
then
((endMin += 60))
((endHoure -= 1))
fi
((workedHoures = $endHoure - $startHoure))
((workedMin = $endMin - $startMin))
if [ $workedHoures -lt 0 ]
then
((workedHoures += 24))
fi
echo -e "worked Time = $workedHoures h: $workedMin min"
else
echo -e "Not enough parameters, 4 parameters (start Houre, start Minute, end Houre, end Minute) was needed"
fi
read -r -p "Do you want to add this worked time to a previous one? [y/N] " addResponse
case "$addResponse" in [yY][eE][sS]|[yY])
read -p "Enter the worked houres followed by the worked Minutes (ex: 12 35) " addedWorkedHoures addedWorkedMin
if ! [[ "$addedWorkedHoures" =~ ^[0-9]+$ ]] || ! [[ "$addedWorkedMin" =~ ^[0-9]+$ ]]
then
echo "Enter numbers only (ex: 12 35)"
exit 1
fi
((TotalMinutes = $workedMin + $addedWorkedMin))
((houresPerMins = 0))
if [[ $TotalMinutes -gt 59 ]]
then
((houresPerMins = $TotalMinutes / 60))
((TotalMinutes = $TotalMinutes - (60 * $houresPerMins)))
fi
((TotalHoures = $workedHoures + $addedWorkedHoures + $houresPerMins))
echo -e "Total worked Time = $TotalHoures h: $TotalMinutes min"
;;
*)
;;
esac
| true
|
3b2784b5ca9436ba8eb793ef6e9fb7fc0b7a5eed
|
Shell
|
neoito-hub/dotfiles
|
/move_in.sh
|
UTF-8
| 1,059
| 3.453125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# CREDITS: https://github.com/jcs/dotfiles/blob/master/move_in.sh
printf -v BACKUP_DATE "%(%Y%m%d)T"
CONF_FILES=".zshrc .vimrc .tmux.conf"
echo "-> Backing up"
for FILE in $HOME/.{zshrc,vimrc,tmux.conf}; {
[ -f "$FILE" ] && mv "$FILE" "$HOME/${FILE##*/}.backup.$BACKUP_DATE"
}
echo "-> Making vim dirs"
mkdir -p $HOME/.vim/files/{backup,swap,undo,info/viminfo}
touch $HOME/.zshrc.mystuff.zsh
# check if we have the repo already cloned
# do a pull if present
# do a clone if not present
echo "-> Clonning repo"
if cd ~/.dotfiles 2>&-; then
git pull --ff-only
git submodule update --init --recursive
else
git clone --recursive https://github.com/neoito-hub/dotfiles ~/.dotfiles
fi
echo "-> Installing dotfiles"
cd ~/.dotfiles
for f in $CONF_FILES; do
rm -f ~/$f
(cd ~/; ln -s .dotfiles/$f $f)
done
echo "-> Installing vscode config and plugins"
cp ~/.dotfiles/settings.json $HOME/.config/Code/User/settings.json
code --install-extension dbaeumer.vscode-eslint
code --install-extension esbenp.prettier-vscode
echo "-> All done."
| true
|
8b69da18068120c6e2aa66587fb66179252e287c
|
Shell
|
arjan/zotonic-docs
|
/modules/dispatch/.generate
|
UTF-8
| 1,108
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
pushd ${0%/*}
ZOTONIC_SRC=${ZOTONIC_SRC:=/home/kaos/zotonic}
# /path/to/zotonic/modules/<mod>/dispatchs/dispatch
for f in `find $ZOTONIC_SRC/modules -name \*dispatch\* -type f`
do
read -r mod dispatch <<EOF
`echo $f | sed -e 's,.*/\(mod_[^/]*\).*/dispatch/\(.*\),\1 \2,'`
EOF
echo mod: $mod dispatch: $dispatch
./.parse-dispatch $f > meta-$mod-$dispatch.csv
if [ $? -ne 0 ] ; then
cat meta-$mod-$dispatch.csv
exit 1
fi
cat <<EOF > dispatch_$mod-$dispatch.rst
.. This file is generated.
To document this dispatch, edit the doc-$mod-$dispatch.rst file, which is included in this file.
$dispatch ($mod)
${dispatch//?/=}==${mod//?/=}=
* Module: :doc:\`../$mod\`
.. csv-table:: Dispatch rules
:delim: tab
:header: Name, Path, Resource, Args
:file: meta-$mod-$dispatch.csv
.. include:: doc-$mod-$dispatch.rst
EOF
cat <<EOF >> ../meta-$mod.rst
* Dispatch: :doc:\`dispatch/dispatch_$mod-$dispatch\`
EOF
if [ ! -e doc-$mod-$dispatch.rst ]; then
cat <<EOF > doc-$mod-$dispatch.rst
Not yet documented.
EOF
fi
done
popd
| true
|
808ba1e2b2dce9fb79b42660999547e632d0fc4c
|
Shell
|
keskes0203/CECS420_project_tests
|
/test_environment/run_tests.sh
|
UTF-8
| 672
| 3.359375
| 3
|
[] |
no_license
|
RESULTS=results.txt
EXECUTABLE=uoflinsort
make
foldername=`echo $eachStudent | rev | cut -d '/' -f 2 | rev`
rm -r output/$foldername
mkdir -p output/$foldername
valgrind ./$eachStudent$EXECUTABLE input/test1 output/$foldername/test1 > output/$foldername/valgrind.log 2>&1
echo "Results" > results/$foldername
for eachTest in input/*
do
testname=`echo $eachTest | rev | cut -d '/' -f 1 | rev`
./$EXECUTABLE $eachTest output/$foldername/$testname >out.txt 2>&1
if diff output/$foldername/$testname solutions/$testname > output/$foldername/$RESULTS$testname
then
echo "PASS "$testname >> results/$foldername
else
echo "FAIL "$testname >> results/$foldername
fi
done
done
| true
|
5fa1baaa08a313d8058edfbd1dafa20f55649e22
|
Shell
|
mintak21/terraform-old
|
/scripts/setup/macOS/setup.sh
|
UTF-8
| 538
| 3.390625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
install_packages() {
printf '\033[91m%s\033[m\n' 'installing packages...'
brew upgrade
brew bundle --file ./Brewfile
tfenv install
printf '\033[36m%s\033[m\n' 'install packages completed.'
}
setup_git_secrets() {
git secrets --register-aws --global
git secrets --install ~/.git-templates/git-secrets
git config --global init.templatedir "$HOME/.git-templates/git-secrets"
printf '\033[36m%s\033[m\n' 'git-secrets config set up completed.'
}
cd "$(dirname "$0")" || exit 1
install_packages
setup_git_secrets
| true
|
c51b7438bee316757fbe794d9912b4d8fbdc9f02
|
Shell
|
greyofficial/dots
|
/zsh/.zshrc
|
UTF-8
| 7,755
| 2.703125
| 3
|
[] |
no_license
|
########
# INIT #
########
#===============================================================================================
### Added by Zinit's installer
if [[ ! -f $HOME/.zinit/bin/zinit.zsh ]]; then
print -P "%F{33}▓▒░ %F{220}Installing %F{33}DHARMA%F{220} Initiative Plugin Manager (%F{33}zdharma/zinit%F{220})…%f"
command mkdir -p "$HOME/.zinit" && command chmod g-rwX "$HOME/.zinit"
command git clone https://github.com/zdharma/zinit "$HOME/.zinit/bin" && \
print -P "%F{33}▓▒░ %F{34}Installation successful.%f%b" || \
print -P "%F{160}▓▒░ The clone has failed.%f%b"
fi
source "$HOME/.zinit/bin/zinit.zsh"
autoload -Uz _zinit
(( ${+_comps} )) && _comps[zinit]=_zinit
# Load a few important annexes, without Turbo
# (this is currently required for annexes)
#zinit light-mode for \
#zinit-zsh/z-a-rust \
#zinit-zsh/z-a-as-monitor \
#zinit-zsh/z-a-patch-dl \
#zinit-zsh/z-a-bin-gem-node
### End of Zinit's installer chunk
#===============================================================================================
###########
# PLUGINS #
###########
#===============================================================================================
#zplugin load zdharma/history-search-multi-word
zplugin ice wait'1' lucid
zplugin snippet OMZ::plugins/fzf/fzf.plugin.zsh
zplugin ice wait'1' lucid
zplugin snippet OMZ::plugins/fancy-ctrl-z/fancy-ctrl-z.plugin.zsh
zplugin snippet OMZ::plugins/vi-mode/vi-mode.plugin.zsh
zplugin ice wait'1' lucid
zplugin load zdharma/fast-syntax-highlighting
zplugin ice wait'0' lucid
zplugin load 'flinner/zsh-emacs'
zplugin ice wait lucid atload'_zsh_autosuggest_start'
zplugin light zsh-users/zsh-autosuggestions
zplugin ice wait'0' lucid
zinit load agkozak/zsh-z
#zplugin ice wait'1' lucid
#zplugin load marlonrichert/zsh-autocomplete
# This one to be ran just once, in interactive session
#
autoload -Uz compinit
compinit
compdef _gnu_generic ytfzf
# insensitve completion
zstyle ':completion:*' matcher-list 'm:{a-z}={A-Z}'
setopt HIST_IGNORE_ALL_DUPS # do not put duplicated command into history list
setopt HIST_SAVE_NO_DUPS # do not save duplicated command
setopt HIST_REDUCE_BLANKS # remove unnecessary blanks
setopt INC_APPEND_HISTORY_TIME # append command to history file immediately after execution
setopt EXTENDED_HISTORY # record command start time
#===============================================================================================
############
# KEYBINDS #
############
#===============================================================================================
bindkey -e
function zle-keymap-select zle-line-init zle-line-finish
{
case $KEYMAP in
vicmd) print -n '\033[1 q';; #line cursor
viins|main) print -n '\033[6 q';; # block cursor
esac
}
bindkey jk vi-cmd-mode
# exit on partianl command with Ctrl-D
exit_zsh() { exit }
zle -N exit_zsh
bindkey '^v' edit-command-line
bindkey '^D' exit_zsh
#===============================================================================================
#########
# OTHER #
#########
#disable url globbing (for mpv) # https://superuser.com/questions/649635/zsh-says-no-matches-found-when-trying-to-download-video-with-youtube-dl
#TODO: delete this
#autoload -Uz bracketed-paste-magic
#zle -N bracketed-paste bracketed-paste-magic
autoload -Uz url-quote-magic
zle -N self-insert url-quote-magic
# should kill upto to the slash
autoload -U select-word-style
select-word-style bash
SAVEHIST=1000
HISTSIZE=1000
HISTFILE=~/.zsh_history
setopt CORRECT
setopt CORRECT_ALL
#zle -N zle-line-init
#zle -N zle-line-finish
#zle -N zle-keymap-select
#===============================================================================================
#########################
# ALIASES AND FUNCTIONS #
#########################
#===============================================================================================
copy () { xclip -selection c "$@" }
chmodx-last () { chmod +x "$_" ; }
# get weather, example: weather New York
weather () { curl wttr.in/"$*"; }
# get public ip address
ip.me () { curl eth0.me ; curl ipv6.icanhazip.com } # or ip.me
# 0x0, upload files and use as pastebin example: 0x0 file.sh
0x0 () {
[ ! -z "$1" ] && file=$1 || file=$(find . -maxdepth 2 -type f | fzf)
[ -z "$file" ] && return
echo "file=@$file"
curl -F "file=@$file" 0x0.st | xclip -sel clip
}
# curl with cache
curl_cache(){
local cache_path=`echo $1 | sed 's|/|_|g'`
local cache_path="/tmp/$cache_path"
[ -f "$cache_path" ] || curl -s "$1" -o "$cache_path"
cat "$cache_path"
}
alias doas='sudo '
alias sudo='sudo '
alias fm='ranger'
alias fm.='. ranger'
alias ll='ls -alF'
alias la='ls -A'
alias l='ls -CF'
alias sl='ls -CF'
alias ls='ls --color=auto'
alias please='sudo $(fc -ln -1)'
alias p='paru'
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
alias alert='notify-send --urgency=low -i "$([ $? = 0 ] && echo terminal || echo error)" "$(history|tail -n1|sed -e '\''s/^\s*[0-9]\+\s*//;s/[;&|]\s*alert$//'\'')"'
alias ip='ip --color=auto'
alias ytfzfd='YTFZF_PLAYER="youtube-dl --embed-subs --write-sub --sub-lang en" ytfzf'
alias cargo-doc-server="python -m http.server -d target/doc/ -b 127.0.0.1"
alias sc="bat ~/schedule.org"
#===============================================================================================
#VARS
#===============================================================================================
#xdg specs
export XDG_CONFIG_HOME="$HOME"/.config
export XDG_CACHE_HOME="$HOME"/.cache
export XDG_DATA_HOME="$HOME"/.local/share
export CARGO_HOME="$XDG_DATA_HOME"/cargo
export GOPATH="$XDG_DATA_HOME"/go
export GOBIN="$XDG_DATA_HOME"/go
export DOOM_PATH="$HOME/.emacs.d/bin"
export PATH="$DOOM_PATH:$HOME/.local/bin:$HOME/bin:$CARGO_HOME/bin:$GOPATH:$PATH"
# colored GCC warnings and errors
export GCC_COLORS='error=01;31:warning=01;35:note=01;36:caret=01;32:locus=01:quote=01'
# z-jumper to store symbolc links
export _Z_NO_RESOLVE_SYMLINKS=1
export _Z_DATA="$HOME/.local/share/z"
# jupyter garbage export JUPYTERLAB_DIR=$HOME/.local/share/jupyter/lab
# python path for jupyter garbage
export PYTHONPATH="$HOME/.local/bin"
# andriod studio, not that I use it
# also needed by shitlab! (matlab)
export _JAVA_AWT_WM_NONREPARENTING=1
export _JAVA_OPTIONS="-Dswing.defaultlaf=com.sun.java.swing.plaf.gtk.GTKLookAndFeel"
#support for gpg
export GPG_TTY=$(tty)
# ZSH Home
export ZSH="$HOME/.config/.oh-my-zsh"
# fish
export fish_greeting="" #disable greeting
#Preferred editor for local and remote sessions
if [[ -n $SSH_CONNECTION ]]; then
export EDITOR='vim'
else
export EDITOR='nvim'
fi
export TERMINAL="alacritty"
# man colors
export LESS_TERMCAP_mb=$(printf '\e[01;31m') # enter blinking mode - red
export LESS_TERMCAP_md=$(printf '\e[01;35m') # enter double-bright mode - bold, magenta
export LESS_TERMCAP_me=$(printf '\e[0m') # turn off all appearance modes (mb, md, so, us)
export LESS_TERMCAP_se=$(printf '\e[0m') # leave standout mode
export LESS_TERMCAP_so=$(printf '\e[01;33m') # enter standout mode - yellow
export LESS_TERMCAP_ue=$(printf '\e[0m') # leave underline mode
export LESS_TERMCAP_us=$(printf '\e[04;36m') # enter underline mode - cyan
#===============================================================================================
# Load the pure theme, with zsh-async library that's bundled with it
PS1="> "
RPS1=" "
#zplugin ice wait'!0' lucid pick"async.zsh" src"pure.zsh"; zplugin light sindresorhus/pure
eval "$(starship init zsh)"
[ -f "/home/user/.ghcup/env" ] && source "/home/user/.ghcup/env" # ghcup-env
#~/bin/dennis
#cutefetch 2> /dev/null
| true
|
f3cd409d9bd307655c45c6721e75ccf62746f927
|
Shell
|
alexorfanoud/Freeze
|
/setup.sh
|
UTF-8
| 520
| 3.140625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#! /bin/bash
FZ_ROOT_PATH=$(pwd)/
echo 'Setting the root path variables...'
if grep -q "#define FZ_ROOT_PATH" $FZ_ROOT_PATH\Freeze/Core.hpp
then
echo 'Root path already set.'
else
echo '#define FZ_ROOT_PATH std::string("'$FZ_ROOT_PATH'")' >> $FZ_ROOT_PATH\Freeze/Core.hpp
fi
echo $'#! /bin/bash \n'$FZ_ROOT_PATH\build/Sandbox/SandboxExec > run.sh && chmod +x run.sh
echo 'Updating submodules...'
git submodule init && git submodule update
echo 'Starting compilation...'
mkdir build
cd build
cmake .. && make
| true
|
82e2c3b84f1b38e789065c222d575651e612b729
|
Shell
|
zpppy/kaldi-aslp
|
/aslp_scripts/vad/calc_auc.sh
|
UTF-8
| 344
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
# Created on 2016-04-26
# Author: Binbin Zhang
stride=0.001
. parse_options.sh || exit 1;
if [ ! $# -eq 1 ]; then
echo "Caculation AUC for vad"
echo "Usage: $0 roc_result_file"
exit 1;
fi
log_file=$1
grep "Thresh" $log_file | \
awk -v stride=$stride '{
sum += $NF;
}
END {
print "AUC", (sum + 1) * stride;
}'
| true
|
2a4f06edf73c518e6518af207817a7c0809b6f3a
|
Shell
|
noporpoise/biogrok
|
/sam-fragment-size
|
UTF-8
| 347
| 2.625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Print mean, median, min, max of abs(TLEN)
# Requires: R, samtools, awk
samtools view $1 | awk '{if($9 < 10000) {print $9}}' | head -10000 | Rscript <(echo '
d<-scan("stdin", quiet=TRUE)
d<-abs(d)
cat("min:",min(d),"\n",sep="")
cat("max:",max(d),"\n",sep="")
cat("median:",median(d),"\n",sep="")
cat("mean:",mean(d),"\n",sep="")
')
| true
|
5db011a94e2d43becf3dc5989127f4e9ac6db5c6
|
Shell
|
alexarmstrongvi/AlexAnalysisPackage
|
/bash/store_output_ntuples.sh
|
UTF-8
| 2,712
| 4.09375
| 4
|
[] |
no_license
|
#!/bin/bash
dir=$PWD
# Check environment setup
if [ -z "$ANALYSIS_DIR" ]; then
printf "ERROR :: ANALYSIS_DIR not defined. "
printf "Make sure to setup environment with setup_env.sh.\n"
return 1
fi
function usage()
{
echo -e "Move flat ntuples from outputs into ntuples directory\n"
echo "./store_output_ntuples.sh"
echo -e "\t-h --help"
echo -e "\t-a : Add to name of output directories [mc_date -> mc_<input>_data]\n"
echo -e "\t-n : numerator samples\n"
echo -e "\t-d : denominator samples\n"
echo -e "\t-f : fake samples\n"
echo -e "\t--update : move flat ntuples into current directory instead of new directory\n"
}
update=false
name_mod=""
num=false
den=false
fakes=false
while [ "$1" != "" ]; do
PARAM=`echo $1 | awk -F= '{print $1}'`
VALUE=`echo $1 | awk -F= '{print $2}'`
echo "$PARAM -> $VALUE"
case $PARAM in
-h | --help)
usage
return 1
;;
-a)
name_mod="_${VALUE}"
;;
--update)
update=true
;;
-n)
num=true
;;
-d)
den=true
;;
-f)
fakes=true
;;
*)
echo "ERROR: unknown parameter \"$PARAM\""
usage
return 1
;;
esac
shift
done
cd $ANALYSIS_DIR/analysis_run/ntuples
if [ "$update" = false ]; then
echo "Creating data and MC directories"
for group in "data" "mc"; do
DATE=`date +%Y_%m_%d`
DIR="${group}${name_mod}_${DATE}/"
echo "$DIR"
if [ -d "$DIR" ] && [ "$(ls -A ${DIR})" ]; then
echo "$DIR is already filled"
cd $dir
return 1
fi
mkdir -p $DIR
link_name="${group}"
if [ "$num" = true ]; then
link_name="${link_name}_num"
elif [ "$den" = true ]; then
link_name="${link_name}_den"
elif [ "$fakes" = true ]; then
link_name="${link_name}_fakes"
fi
unlink $link_name
ln -s $DIR $link_name
done
echo "Moving output files"
else
echo "Updating data and MC directories"
fi
cd $ANALYSIS_DIR/analysis_run
# Order is important
if [ "$num" = true ]; then
suffix="_num"
elif [ "$den" = true ]; then
suffix="_den"
elif [ "$fakes" = true ]; then
suffix="_fakes"
else
suffix=""
fi
mv outputs/CENTRAL_physics_Main_*.root ntuples/data${suffix}/
mv outputs/CENTRAL_*.root ntuples/mc${suffix}/
rm outputs/RunCondorSF.sh outputs/submitFile.condor
if [ "$fakes" = true ]; then
rm -rf ntuples/mc${suffix}*
#unlink ../ntuples/mc_fakes/
fi
cd $dir
echo "Ready for plotting"
| true
|
da2a6b442d79f7aaf9c2a0f026f9ee647d54b045
|
Shell
|
JPalmerGithub/dotfiles
|
/.bashrc
|
UTF-8
| 1,565
| 2.890625
| 3
|
[] |
no_license
|
# .bashrc
# Source global definitions
if [ -f /etc/bashrc ]; then
. /etc/bashrc
fi
source ~/.bash_aliases
source ~/.bash_functions
source ~/.bash_prompt
if [ $(hostname -s) != "LDCSRUN1" ] ; then
source ~/.bash_ansible
fi
# User specific aliases and functions
if [[ $- != *i* ]] ; then #not interactive... don't need this file then
return
fi
stty -ixon # disables ^s and ^q because they suck
set +o noclobber
shopt -s checkwinsize
shopt -s histappend
shopt -s cdspell
export CVS_RSH="ssh"
export RSYNC_RSH="ssh"
export PAGER=less
#alias man="PAGER='most -s' man"
export EDITOR=vim
export SVN_EDITOR=${EDITOR}
export PATH=~/bin:${PATH}:/sbin:/usr/sbin:/usr/local/bin:/usr/local/sbin
if [ -d /opt/Navisphere/bin/ ] ; then
export PATH=${PATH}:/opt/Navisphere/bin
fi
export CDPATH=".:~"
export TERM=xterm
# GO Stuff
export GOPATH=${HOME}/gocode
export PATH=${PATH}:${GOPATH}/bin
if [ ! -d ${GOPATH} ] ; then
mkdir -p ${GOPATH}/{bin,pkg,src}
fi
export MAN_POSIXLY_CORRECT=true
export JAVA_HOME=/usr/java/latest
export JRE_HOME=${JAVA_HOME}
if [ -d /opt/Navisphere/bin ] ; then
export PATH=${PATH}:/opt/Navisphere/bin
fi
export CFLAGS="-Wall"
# Keychain setup
if [ ${HOSTNAME} = "ldcsrun1" ] ; then
if [ -f ~/.ssh/id_rsa ] ; then
keychain --nocolor -q id_rsa
[ -f $HOME/.keychain/$HOSTNAME-sh ] && \
. $HOME/.keychain/$HOSTNAME-sh
[ -f $HOME/.keychain/$HOSTNAME-sh-gpg ] && \
. $HOME/.keychain/$HOSTNAME-sh-gpg
fi
fi
if [ -x ~/bin/vcprompt ]; then
export PS1='\u@\h:\w $(vcprompt):\$ '
fi
| true
|
9eb6495f12a5aefe9ee6d48fdb5c90ed5340d034
|
Shell
|
WIEQLI/devenv
|
/install_linux/install_ffmpeg.sh
|
UTF-8
| 681
| 3.3125
| 3
|
[] |
no_license
|
set -e
FFMPEG=`which ffmpeg`
if [ "$FFMPEG" != "${DEV_HOME}/local/bin/ffmpeg" ]; then
echo "ffmpeg not found."
if [ ! -d "${DEV_HOME}/development/FFmpeg-master" ]; then
echo "Download ffmpeg"
wget -O ${DEV_HOME}/development/devenv/download/ffmpeg-master.zip\
https://github.com/FFmpeg/FFmpeg/archive/master.zip
unzip ${DEV_HOME}/development/devenv/download/ffmpeg-master.zip -d ${DEV_HOME}/development
fi
cd ${DEV_HOME}/development/FFmpeg-master
./configure --prefix=${DEV_HOME}/local --disable-yasm --enable-shared --enable-pic --enable-libx264 --enable-gpl --extra-cflags=-I$CPPFLAGS
make -j`nproc` && make install
fi
| true
|
b46f5ba12449bfd9b37284e7507f33dbc14a15d8
|
Shell
|
HenryHo2015/shell-scripts
|
/compare_variables.sh
|
UTF-8
| 322
| 3.796875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# All compare approaches
read -p "Enter the first number:" first
read -p "Enter the second number:" second
if [ $first -lt $second ]
then
echo "$first is less than $second."
else
if [ $first -eq $second ]
then
echo "$first equal to $second."
else
echo "$first is greater than $second."
fi
fi
| true
|
734744328522cf2fa96fb4024de3e8ff78cb7d85
|
Shell
|
xiph/rav1e
|
/cross/entrypoint-build-libs.sh
|
UTF-8
| 221
| 2.84375
| 3
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/sh
for arg in "$@"; do
arg=$(echo $arg | perl -pne \
's/(?:(?<=\s)|^)build(?=\s)/cinstall --library-type staticlib --library-type cdylib --prefix dist/')
set -- "$@" "$arg"
shift
done
set -ex
exec "$@"
| true
|
059c84100387c1815e57c358ce098d73a70d2730
|
Shell
|
apache/tvm
|
/tests/scripts/task_show_node_info.sh
|
UTF-8
| 1,438
| 2.734375
| 3
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"Zlib",
"LLVM-exception",
"BSD-2-Clause"
] |
permissive
|
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
set -euxo pipefail
echo "===== JENKINS INFO ====="
echo "NODE_NAME=$NODE_NAME"
echo "EXECUTOR_NUMBER=$EXECUTOR_NUMBER"
echo "WORKSPACE=$WORKSPACE"
echo "BUILD_NUMBER=$BUILD_NUMBER"
echo "WORKSPACE=$WORKSPACE"
echo "===== EC2 INFO ====="
function ec2_metadata() {
# See https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
curl -w '\n' -fsSL "http://169.254.169.254/latest/meta-data/$1" || echo failed
}
ec2_metadata ami-id
ec2_metadata instance-id
ec2_metadata instance-type
ec2_metadata hostname
ec2_metadata public-hostname
echo "===== RUNNER INFO ====="
df --human-readable
lscpu
free
| true
|
fea377d3cb800ac26a4b6d33d2ff253879ff1068
|
Shell
|
MrDonaldoWorking/Linux-Labs
|
/2/script.sh
|
UTF-8
| 1,699
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash
# Run with sudo is necessary!
sda=$(udevadm info /dev/sda --query=symlink --root | tr " " "\n" | grep "^/dev/disk/by-id/ata")
sda3="${sda}-part3"
sda4="${sda}-part4"
sda5="${sda}-part5"
sda6="${sda}-part6"
empty=''
# 1
fdisk $sda << 1_param
n
p
3
$empty
+300M
w
1_param
# 2
blkid $sda3 -o value > /root/sda3_UUID
# 3
mkfs.ext4 -b 4096 $sda3
# 4
dumpe2fs -h $sda3
# 5
tune2fs -c 2 -i 2m $sda3
# 6
mkdir /mnt/newdisk
mount $sda3 /mnt/newdisk
# 7
ln -s /mnt/newdisk /root/newdisk
ls -l /root
# 8
mkdir /mnt/newdisk/donaldo
ls -l /mnt/newdisk
# 9
if cat /etc/fstab | grep "^/dev/sda3"
then
echo "Already there"
else
echo "/dev/sda3 /mnt/newdisk ext4 noexec,noatime 0 0" >> /etc/fstab
fi
# reboot
echo "#!/bin/bash" > /mnt/newdisk/script
echo "echo \"Hello, World!\"" >> /mnt/newdisk/script
chmod ugo+x /mnt/newdisk/script
ls -l /mnt/newdisk
/mnt/newdusk/script
# 10
echo "doing 10..."
umount $sda3
fdisk $sda << 10_param
d
3
n
p
3
$empty
+350M
w
10_param
e2fsck -f $sda3
resize2fs $sda3
# 11
e2fsck -n $sda3
# 12
fdisk $sda << 12_param
n
$empty
$empty
+12M
w
12_param
mkfs.ext4 $sda4
tune2fs -J location=/dev/sda4 $sda3
# 13
fdisk $sda << 13_param
n
$empty
$empty
+100M
n
$empty
$empty
+100M
w
13_param
# 14
vgcreate LVM $sda5 $sda6
lvcreate -l 100%FREE -n LVM LVM
mkdir /mnt/supernewdisk
mkfs.ext4 /dev/LVM/LVM
mount /dev/LVM/LVM /mnt/supernewdisk
# 15
mkdir /mnt/share
mount.cifs //192.168.1.1/shared /mnt/share -o username=donaldo,password=mamba123
# 16
if cat /etc/fstab | grep "^//192.168.1.1/shared"
then
echo "Already there"
else
echo "//192.168.1.1/shared /mnt/share cifs user=donaldo,password=mamba123 0 0" >> /etc/fstab
fi
| true
|
194a3c036b869305d7f4fc49a4d47127f7b10255
|
Shell
|
ajdehel/terminal-config
|
/bash/functions/capsoption.sh
|
UTF-8
| 156
| 3.421875
| 3
|
[] |
no_license
|
export CAPSOPTION="escape"
function capsoption {
if [ -e "$(which setxkbmap)" ]; then
setxkbmap -option caps:$CAPSOPTION &> /dev/null
fi
}
| true
|
fb887ec221e71df6fee6b71f3f64a1f9cbaad3c0
|
Shell
|
CptnClaw/dotfiles
|
/.bashrc
|
UTF-8
| 3,826
| 3.40625
| 3
|
[] |
no_license
|
#
# ~/.bashrc
#
# Run only on interactive shells
[[ $- != *i* ]] && return
# Prompt
PS1='\[\033[01;36m\]\W\
\[\033[01;34m\] ❯\
\[\033[00m\] '
# Key bindings
bind '"\C-f":"cd_with_fzf\n"'
bind '"\C-o":"open_with_fzf\n"'
# Environment Variables
export VISUAL=vim
export EDITOR="$VISUAL"
# Aliases
#shopt -s expand_aliases
alias ls="ls --color=auto --group-directories-first"
alias grep="grep --color=auto"
alias diff="diff --color=auto"
alias ll="ls -lhN"
alias cp="cp -i" # confirm before overwriting something
alias rm="rm -vI" # a bit safer rm
alias df="df -h" # human-readable sizes
alias free="free -h" # human-readable sizes
alias term="xfce4-terminal"
alias yay="yay --answerdiff All"
alias clip="xclip -selection c"
alias spelling='aspell -c -t'
alias configit='/usr/bin/git --git-dir=$HOME/dotfiles.git/ --work-tree=$HOME'
alias yt='XDG_CONFIG_HOME=/home/eyal/.youtube XDG_DATA_HOME=/home/eyal/.youtube newsboat'
# Fix Locale
unset LANG
source /etc/profile.d/locale.sh
# Autocompletion
[ -r /usr/share/bash-completion/bash_completion ] && . /usr/share/bash-completion/bash_completion
# History
export HISTFILESIZE=20000
export HISTSIZE=10000
shopt -s histappend
shopt -s cmdhist # Combine multiline commands into one in history
export HISTCONTROL=ignoredups:ignorespace # Commands with leading space do not get added to history
export HISTIGNORE="&:ls:[bf]g:exit" # Ignore ls without options and builtin commands
# Properly handle window resizing
shopt -s checkwinsize
# Colors for manpages
export LESS=-R
export LESS_TERMCAP_mb=$'\E[1;31m' # begin blink
export LESS_TERMCAP_md=$'\E[1;36m' # begin bold
export LESS_TERMCAP_me=$'\E[0m' # reset bold/blink
export LESS_TERMCAP_so=$'\E[01;44;33m' # begin reverse video
export LESS_TERMCAP_se=$'\E[0m' # reset reverse video
export LESS_TERMCAP_us=$'\E[1;32m' # begin underline
export LESS_TERMCAP_ue=$'\E[0m' # reset underline
# Fuzzy Finder functions
cd_with_fzf()
{
cd $HOME && \
cd $(fd -t d | fzf --preview="tree -L 1 {}" --bind="space:toggle-preview" --preview-window=:hidden) && \
echo "$PWD"
}
cd_with_fzfh()
{
cd $HOME && \
cd $(fd -t d -H | fzf --preview="tree -L 1 {}" --bind="space:toggle-preview" --preview-window=:hidden) && \
echo "$PWD"
}
open_with_fzf_once()
{
FZF_CHOICE=$(fd -t f -H -I | fzf)
nohup xdg-open "$FZF_CHOICE" & exit
}
open_with_fzf()
{
fd -t f -H -I | \
fzf |
xargs -ro -d "\n" xdg-open 2<&-
}
pacs()
{
sudo pacman -S $(pacman -Ssq | fzf -m --preview="pacman -Si {}" --preview-window=:hidden --bind=space:toggle-preview)
}
# Helper functions
ex()
{
if [ -f $1 ] ; then
case $1 in
*.tar.bz2) tar xjf $1 ;;
*.tar.gz) tar xzf $1 ;;
*.bz2) bunzip2 $1 ;;
*.rar) unrar x $1 ;;
*.gz) gunzip $1 ;;
*.tar) tar xf $1 ;;
*.tbz2) tar xjf $1 ;;
*.tgz) tar xzf $1 ;;
*.zip) unzip $1 ;;
*.Z) uncompress $1;;
*.7z) 7z x $1 ;;
*) echo "'$1' cannot be extracted via ex()" ;;
esac
else
echo "'$1' is not a valid file"
fi
}
colors()
{
local fgc bgc vals seq0
printf "Color escapes are %s\n" '\e[${value};...;${value}m'
printf "Values 30..37 are \e[33mforeground colors\e[m\n"
printf "Values 40..47 are \e[43mbackground colors\e[m\n"
printf "Value 1 gives a \e[1mbold-faced look\e[m\n\n"
# foreground colors
for fgc in {30..37}; do
# background colors
for bgc in {40..47}; do
fgc=${fgc#37} # white
bgc=${bgc#40} # black
vals="${fgc:+$fgc;}${bgc}"
vals=${vals%%;}
seq0="${vals:+\e[${vals}m}"
printf " %-9s" "${seq0:-(default)}"
printf " ${seq0}TEXT\e[m"
printf " \e[${vals:+${vals+$vals;}}1mBOLD\e[m"
done
echo; echo
done
}
| true
|
d33286f72785bf29a77f7a9de41dde716f83b0f3
|
Shell
|
matthiaswh/bit4
|
/test832-blacklistd/templates/ipfw.rules.j2
|
UTF-8
| 1,159
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/sh
LOOP="lo*"
PIF="{{ ansible_default_ipv4.interface }}"
PING="icmptypes 8"
ipfw -q flush
alias ADD="ipfw -q add"
#
# Basic rules
#
ADD 0100 allow all from any to any via $LOOP
ADD 0999 check-state
#
# Inbound rules
#
# Block blacklisted peers from accessing any port. Do not change the rule
# number. It is specified by blacklistd (1000 + port number).
ADD 1022 deny all from "table(port22)" to any in
# Drop spoofed packets.
ADD 2000 deny all from any to any not verrevpath in
ADD 2100 allow icmp from any to me $PING in via $PIF keep-state
ADD 2100 allow tcp from any to me ssh in via $PIF keep-state setup
#
# Outbound rules
#
ADD 3000 allow icmp from me to any $PING out via $PIF keep-state
ADD 3000 allow udp from me to any domain out via $PIF keep-state
ADD 3000 allow tcp from me to any http out via $PIF keep-state setup
ADD 3000 allow tcp from me to any https out via $PIF keep-state setup
#
# Fallback rules
#
ADD 9000 unreach port log tcp from any to me via $PIF
ADD 9000 unreach port log udp from any to me via $PIF
ADD 9100 unreach host log all from any to me via $PIF
ADD 9999 deny log all from any to any
| true
|
afd7d72bf2e60b014e982441678524681a886472
|
Shell
|
ChearH/frizzer
|
/tests/simple_binary/run_tests.sh
|
UTF-8
| 2,029
| 3.28125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Expected bahavior:
# Find new paths at seeds: 5, 111, 135, ...
# Find crash at 1793 ( ~ 4 minutes )
# [*] [seed=1792] speed=[ 77 exec/sec (avg: 56)] coverage=[20 bblocks] corpus=[9 files] last new path: [1422] crashes: [0]
# [*] [seed=1793] 2020-08-11 12:53:53 tmpprojdir/corpus/1422_1290_111_5_3
# [!] doIteration: Got a frida error: the connection is closed
# [+] Current iteration: 2020-08-11 12:53:54 [seed=1793] [file=tmpprojdir/corpus/1422_1290_111_5_3]
# [+] Payload is written to tmpprojdir/crashes/20200811_125354_crash
# [+] stopping fuzzer loop
# [+] Detach Fuzzer ...
# [!] 'target'.detach: Could not unload frida script: script is destroyed
# [+] Done
# Average Speed: between 40 and 50
# Enable job control for shell script (so we can use 'fg', etc)
set -m
exitfn () {
trap SIGINT
echo 'Interrupted by user!'
kill $test_pid
kill $frizzer_pid
exit
}
trap "exitfn" INT # Set up SIGINT trap to call function.
./test > /dev/null &
test_pid=$!
rm -rf tmpprojdir
# new:
frizzer init tmpprojdir
cat > tmpprojdir/config <<EOF
[fuzzer]
log_level = 3 # debug
write_logfile = true
debug_mode = false
host = "localhost"
port = 7777
ssl = false
udp = false
fuzz_in_process = false
recv_timeout = 0.1
[target]
process_pid = $test_pid
function = "handleClient"
remote_frida = false
frida_port = 27042
modules = [
"tests/simple_binary/test",
]
EOF
frizzer add -p tmpprojdir indir
# start frizzer in the background
frizzer fuzz -p tmpprojdir &
frizzer_pid=$!
# kill frizzer after 15 seconds
(sleep 15; kill -s INT $frizzer_pid)&
# get frizzer back in the foreground
fg frizzer
echo frizzer stopped!
# check if fuzzer worked correctly:
grep 'Found new path: \[135\] tmpprojdir/corpus/111_5_3' tmpprojdir/*.log
result=$?
if [ $result -eq 0 ]; then
echo "Test succeeded!"
rm -rf tmpprojdir
else
echo "Test failed!"
fi
# cleanup
kill $test_pid
trap SIGINT
exit $result
| true
|
b8b9bdc2d1e665baac6aa22b2d22bd7c46615a91
|
Shell
|
zaurky/twitter_download
|
/download_images.sh
|
UTF-8
| 412
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
BASEDIR=$(dirname $0)
LOCK="/tmp/download_image.lock"
cd $BASEDIR
if [ ! -f $LOCK ]; then
touch $LOCK
source workspace/bin/activate;
PYTHONPATH=$BASEDIR python bin/download_image.py > /tmp/download_image.log 2> /tmp/download_image.err;
deactivate;
rm -f $LOCK
fi
# should add a non duplicate script, but jpg files contain tweet as exif, and that change from one to the other
| true
|
098b355133875c46e9c7bfb6801f2a631cba175a
|
Shell
|
edwardwawrzynek/arch-dotfiles
|
/bash/.bash_aliases
|
UTF-8
| 779
| 2.6875
| 3
|
[] |
no_license
|
## ls
alias ls='ls -hF --color=auto'
alias lr='ls -R' # recursive ls
alias ll='ls -l'
alias la='ll -A'
alias lx='ll -BX' # sort by extension
alias lz='ll -rS' # sort by size
alias lt='ll -rt' # sort by date
alias lm='la | more'
## safety
alias cp='cp -i'
alias mv='mv -i'
alias rm='rm -I --one-file-system'
alias ln='ln -i'
alias chown='chown --preserve-root'
alias chmod='chmod --preserve-root'
alias chgrp='chgrp --preserve-root'
##
alias :q=' exit'
alias :Q=' exit'
alias :x=' exit'
alias cd..='cd ..'
## Useful
alias hd='hexdump -C'
alias grep='grep --color=auto'
alias egrep='egrep --color=auto'
alias mkdir='mkdir -p -v'
alias diff='colordiff'
alias df='df -h'
alias du='du -c -h'
alias more='less'
| true
|
954467bfdaad07567eb960a6b06bcf8100497299
|
Shell
|
EarlyBirdAstro/AiGO
|
/aigo-tools_dev/src/old/postinst.2018v1a3-1.1.95
|
UTF-8
| 3,192
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/sh
type=$1
echo "postinst="$type
AIGO_TOOLS_DIR=/opt/aigo
LOCAL_DIR=/usr/local
ROOT_DIR=/
UDEV_RULES_DIR=/lib/udev/rules.d
AIGO_USER=aigo
AIGO_HOME=/home/$AIGO_USER
LG_CONF_DIR=$AIGO_HOME/.config/GM_software
LG_CONF=lin-guider.conf
LXPANEL_CONF_DIR=$AIGO_HOME/.config/lxpanel/LXDE/panels
LXPANEL_CONF=panel
AUTOSTART_DIR=$AIGO_HOME/.config/autostart
chfile() {
FNAME=$1
FDIR=$2
OWN=$3
MOD=$4
rm -rf $FDIR/$FNAME
mkdir -p $FDIR
cp -f $AIGO_TOOLS_DIR/$FNAME $FDIR
chown $OWN $FDIR/$FNAME
chmod $MOD $FDIR/$FNAME
}
rmfile() {
FNAME=$1
FDIR=$2
rm -rf $FDIR/$FNAME
}
chfile aigo_config $LOCAL_DIR/bin root.root 755
chfile aigo_config_wifi-options $LOCAL_DIR/bin root.root 755
chfile aigo_config_install-astronomy-softwares $LOCAL_DIR/bin root.root 755
chfile aigo_config.desktop $LOCAL_DIR/share/applications root.root 644
chfile aigo.png $LOCAL_DIR/share/pixmaps root.root 644
chfile aigo_look.sh $LOCAL_DIR/bin root.root 755
chfile do_change_ssid.sh $LOCAL_DIR/bin root.root 755
#2017v1 - chfile switch_libasicamera.sh $LOCAL_DIR/bin root.root 755
#2018v1a2 - rmfile switch_libasicamera.sh $LOCAL_DIR/bin
chfile aigo_version $ROOT_DIR/etc root.root 644
chfile 00-aigo.rules $UDEV_RULES_DIR root.root 644
chfile z99-aigo.rules $UDEV_RULES_DIR root.root 644
chfile aigo.conf $LOCAL_DIR/etc root.root 644
#2017v1 - chfile $LG_CONF $LG_CONF_DIR $AIGO_USER.$AIGO_USER 664
#2017v1 - GM_software aigo.aigo 775
#2017v1 - chown $AIGO_USER.$AIGO_USER $LG_CONF_DIR
#2017v1 - chmod 775 $LG_CONF_DIR
# Add aigo_config to LXPanel
#2017v1 - grep "aigo_config.desktop" $LXPANEL_CONF_DIR/$LXPANEL_CONF
#2017v1 - RET=$?
#2017v1 - if [ $RET -eq 1 ] ; then
#2017v1 - cp -f $LXPANEL_CONF_DIR/$LXPANEL_CONF $AIGO_HOME/$LXPANEL_CONF.postinst.bak
#2017v1 - sed -i -z 's/lxterminal.desktop/lxterminal.desktop\n }\n Button {\n id=menu:\/\/applications\/System\/aigo_config.desktop/' $LXPANEL_CONF_DIR/$LXPANEL_CONF
#2017v1 - fi
# Remove OpenSkyImager
#2018v1a2 - rm -rf /usr/local/bin/OpenSkyImager
#2018v1a2 - rm -f /usr/share/applications/OpenSkyImager.desktop
#2018v1a2 - grep "OpenSkyImager.desktop" $LXPANEL_CONF_DIR/$LXPANEL_CONF
#2018v1a2 - RET=$?
#2018v1a2 - if [ $RET -eq 0 ] ; then
#2018v1a2 - cp -f $LXPANEL_CONF_DIR/$LXPANEL_CONF $AIGO_HOME/$LXPANEL_CONF.postinst.bak
#2018v1a2 - sed -i -z 's/ Button {\n id=menu:\/\/applications\/Education\/OpenSkyImager.desktop\n }\n//' $LXPANEL_CONF_DIR/$LXPANEL_CONF
#2018v1a2 - fi
# Restart LXPanel
#2018v1a2 - sleep 0.25
#2018v1a2 - su $AIGO_USER -c 'DISPLAY=:0 lxpanelctl exit'
#2018v1a2 - sleep 0.25
#2018v1a2 - #su $AIGO_USER -c 'find ~/.cache/menus -name '*' -type f -print0 | xargs -0 rm'
#2018v1a2 - su $AIGO_USER -c 'rm -f ~/.cache/menus/*'
#2018v1a2 - sleep 0.25
#2018v1a2 - su $AIGO_USER -c 'DISPLAY=:0 lxpanel --profile LXDE &'
#2018v1a2 - sleep 0.25
#2018v1a2 - su $AIGO_USER -c 'DISPLAY=:0 lxpanelctl restart'
# Add autostart aigo_config
#2017v1 - chfile aigo_config.desktop $AUTOSTART_DIR $AIGO_USER.$AIGO_USER 644
# Add aigo_upgrade.sh
#2017v1 - chfile aigo_upgrade.sh $LOCAL_DIR/bin root.root 755
#2018v1a2 - rmfile aigo_upgrade.sh $LOCAL_DIR/bin
chmod +x $AIGO_TOOLS_DIR/*.sh
exit 0
| true
|
e23ed8eeeb7fb2bd0567c4849669053c683ba9c2
|
Shell
|
carlfriess/DoritOS
|
/tools/prepare-sdcard.sh
|
UTF-8
| 563
| 3.484375
| 3
|
[
"MIT",
"LicenseRef-scancode-dco-1.1"
] |
permissive
|
#!/bin/bash
SD_CARD=$1
function writefile()
{
echo "hello world! $1" > ${SD_CARD}${1}
}
echo "Preparing sd-card: $SD_CARD"
mkdir $SD_CARD/parent
mkdir $SD_CARD/parent-directory
writefile "/myfile.txt"
writefile "/mylongfilenamefile.txt"
writefile "/mylongfilenamefilesecond.txt"
writefile "parent/myfile.txt"
writefile "parent/mylongfilenamefile.txt"
writefile "parent/mylongfilenamefilesecond.txt"
writefile "parent-directory/myfile.txt"
writefile "parent-directory/mylongfilenamefile.txt"
writefile "parent-directory/mylongfilenamefilesecond.txt"
| true
|
1cd5586e284ce3b91d9545adab5e2aea87579333
|
Shell
|
fengyr/huawei_split_partition
|
/split_part.sh
|
UTF-8
| 1,211
| 2.71875
| 3
|
[] |
no_license
|
#!/sbin/sh
#===============================================================================
#
# FILE: splite_part.sh
#
# USAGE: /tmp/splite_part.sh
#
# DESCRIPTION:对华为U8825D和华为U8950d重新分区操作,默认Data大小是512M
# 需在recovery中操作,请上传 busybox 到 /tmp/busybox
#
# OPTIONS: ---
# REQUIREMENTS: ---
# BUGS: ---
# NOTES: ---
# AUTHOR: linkscue (scue), linkscue@gmail.com
# ORGANIZATION:
# CREATED: 2014年05月06日 18时46分56秒 CST
# REVISION: ---
#===============================================================================
# 数据空间
data_size=512M
# 每兆磁柱
cylinders_of_M=122.0703125
# busybox
busybox=/tmp/busybox
# 相关计算
p18_start=$($busybox fdisk -l /dev/block/mmcblk0 |\
$busybox tail -n2 |\
$busybox head -n1 |\
awk '{print $2}')
p19_start=$($busybox awk -va=$cylinders_of_M -vb=$p18_start \
'BEGIN {print a*512+b+2}')
p19_end=$($busybox fdisk -l /dev/block/mmcblk0 |\
$busybox tail -n1 |\
$busybox awk '{print $3}')
# 危险操作
$busybox fdisk /dev/block/mmcblk0 << EOF
d
19
d
18
n
$p18_start
+$data_size
n
$p19_start
t
19
c
w
EOF
| true
|
9b65b320082ee8e19368a16c86b8bcc05b2f189e
|
Shell
|
benjixxx/ProjetGitOrganisation
|
/backup.sh
|
UTF-8
| 945
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/bash
#permet de connaitre le nombre de seconde depuis 01/01/1970 de la derniere modif
t1=`find /backup/ -printf "%Td-%Tm-%Ty \n" | tail -1`
t2=`find /home/git/projet-1.git/ -printf "%Td-%Tm-%Ty \n" | tail -1`
#convertir le string provenant du format jj-mm-yy en int
echo $t1
echo $t2
# temps1=$(date -d $t1 +%s)
# temps2=$(date -d $t2 +%s)
#nommer le fichier et déclarer la source et la destination de l'archive
TIME=`date +%b-%d-%y`
FILENAME=backup-$TIME.tar.gz
SRCDIR=/home/git/projet-1.git
DESDIR=/backup
#Savoir si il y'a une une modif depuis le dernier archivage
if [[ $t1 -ge $t2 ]]
then
#Savoir si le fichier existe dejà : vu qu'il y'a une modif de temps,on rm et on refait
if ! [[ -e $DESDIR/$FILENAME ]]
then
tar -czf $DESDIR/$FILENAME $SRCDIR
else
rm -rf $DESDIR/$FILENAME
tar -czf $DESDIR/$FILENAME $SRCDIR
fi
else
exit
fi
| true
|
35cba895211c714114f1c608f5d9b35cc76eb275
|
Shell
|
GonzaloHirsch/sds-1-neighbour-finder
|
/generator/statistics_generator.sh
|
UTF-8
| 602
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
particle_radius="0.25";
interraction_radius="1";
area_length="20";
destdir=generator/variables_generator
if [ -f "$destdir" ]
then
echo "$1" > "$destdir"
echo "$area_length" >> "$destdir"
echo "$2" >> "$destdir"
echo "$interraction_radius" >> "$destdir"
echo "$particle_radius" >> "$destdir"
fi
for value in {1..4}
do
cat generator/variables_generator | python generator/input_generator.py
for num in {1..10}
do
java -jar ./target/sds-tp1-1.0-SNAPSHOT-jar-with-dependencies.jar -sf ./static.txt -df ./dynamic.txt -pb -bf
done
done
echo All done
| true
|
7f496dd6b0cbc0cffd500d640be7b18b680e18fd
|
Shell
|
usc-isi-i2/kgtk
|
/kgtk/join/test/analyze-wikidata_edges_20200504-properties.sh
|
UTF-8
| 2,223
| 3.125
| 3
|
[
"MIT"
] |
permissive
|
#! /bin/bash
PROPERTY=P580
WORKDIR=task-20200624
WIKIDATADIR=../../cache/datasets/wikidata-20200504
# WIKIDATAGZIP=.gz
WIKIDATAGZIP=
# VERBOSE=
VERBOSE=--verbose
# KGTK=echo
# KGTK="kgtk"
KGTK="python3 -m kgtk"
# Extract the qualifiers with ${PROPERTY} in the label column.
${KGTK} filter ${VERBOSE} \
${WIKIDATADIR}/wikidata_qualifiers_20200504.tsv${WIKIDATAGZIP} \
-o ${WORKDIR}/wikidata_qualifiers_20200504-${PROPERTY}.tsv
-p "; ${PROPERTY} ;" \
# Extract the edges with ID column values that match node1 column
# values from the extracted qualifiers.
${KGTK} ifexists ${VERBOSE} \
${WIKIDATADIR}/wikidata_edges_20200504.tsv${WIKIDATAGZIP} \
-o ${WORKDIR}/wikidata_edges_20200504-${PROPERTY}.tsv
--filter-on ${WORKDIR}/wikidata_qualifiers_20200504-${PROPERTY}.tsv \
--input-keys id \
--filter-keys node1 \
# Count the properties in the property-qualified edge file:
${KGTK} unique ${VERBOSE} \
${WORKDIR}/wikidata_edges_20200504-${PROPERTY}.tsv \
-o ${WORKDIR}/wikidata_edges_20200504-${PROPERTY}-property-counts.tsv
--column label \
--label ${PROPERTY}-count \
# Merge the total count with lift.
${KGTK} lift ${VERBOSE} \
${WORKDIR}/wikidata_edges_20200504-${PROPERTY}-property-counts.tsv \
--label-file ${WORKDIR}/wikidata_edges_20200504-property-counts.tsv \
--output-file ${WORKDIR}/wikidata_edges_20200504-${PROPERTY}-property-counts-with-totals.tsv \
--columns-to-lift node1 \
--label-value total-count \
--lift-suffix ';total' \
# Calculate the percentages:
${KGTK} calc ${VERBOSE} \
-i ${WORKDIR}/wikidata_edges_20200504-${PROPERTY}-property-counts-with-totals.tsv \
-o ${WORKDIR}/wikidata_edges_20200504-${PROPERTY}-property-counts-with-percents.tsv \
-c node2 'node1;total' \
--do percentage \
--into percent \
# Lift the property labels:
${KGTK} lift ${VERBOSE} \
${WORKDIR}/wikidata_edges_20200504-${PROPERTY}-property-counts-with-percents.tsv \
--label-file ${WIKIDATADIR}/wikidata_labels_only.tsv${WIKIDATAGZIP} \
--output-file ${WORKDIR}/wikidata_edges_20200504-${PROPERTY}-property-counts-with-labels.tsv \
--columns-to-lift node1 \
--prefilter-labels \
| true
|
00c51580715c8e23328f9ce4154b8c13625683bd
|
Shell
|
BlaineEXE/dev-rook-ceph
|
/scripts/cluster/minikube-down.sh
|
UTF-8
| 221
| 2.546875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -eEuo pipefail
: "${MINIKUBE:="minikube"}"
: "${PODMAN:="podman"}"
# # revert back to default docker context
# $PODMAN context use default
# $PODMAN context rm minikube || true
$MINIKUBE delete
| true
|
97cf061afd9285232e66f043a0e049573bb6276e
|
Shell
|
rsukkerd/Histaroach
|
/kill_j_a_v_a_processes.sh
|
UTF-8
| 745
| 3.96875
| 4
|
[] |
no_license
|
#!/bin/bash
#
# 1. gets list of current java process ids that this user is running
# 2. filters out the process id supplied as a command line argument to the script ($1)
# 3. does a kill -9 on each of the non-filtered process ids
#
# Usage:
# ./kill_java_processes.sh 32423
if [ -z "$1" ]
then
echo "ERROR: pass a pid to ignore"
else
# TODO: someone with some shell-foo needs to do some refactoring.
echo "Process list:"
ps auwx | grep `whoami` | grep java | grep -v grep
echo "Killing these pids:"
ps auwx | grep `whoami` | grep java | grep -v grep | awk '{print $2}' | grep -v $1
# Actually kill the pids:
ps auwx | grep `whoami` | grep java | grep -v grep | awk '{print $2}' | grep -v $1 | xargs kill -9
fi
| true
|
3bf64e2c4f0726fcb79a567f406d0919ea09fdc0
|
Shell
|
NigelGreenway/dotfiles
|
/bash/aliases/general
|
UTF-8
| 437
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/env bash
## Directory listing
alias ll='ls --color -lh'
alias lla='ll -a'
alias lld='vdir --color'
alias llr='ll -R'
## File preview
alias cat='pygmentize -g'
## Bash helpers
__='
Allows a bash profile to be loaded easier without them
extra keystrokes
'
function r {
if [ -z "${FRAPPER__ALIAS__CLEAR_PROMPT_ON_RELOAD+x}" ]
then
source $HOME/.bashrc
else
source $HOME/.bashrc
clear
fi
}
| true
|
07de0aa70ef1250abb873bd7162971b15dd18bf6
|
Shell
|
anton-m-kashin/iOSAppTemplate
|
/generate.sh
|
UTF-8
| 3,500
| 4.1875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
TEMPLATES="./templates"
BUNDLE_PREFIX_KEY="<BUNDLE_ID_PREFIX>"
NAME_KEY="<APP_NAME>"
TEAM_ID_KEY="<TEAM_ID>"
usage() {
echo "Usage: $0 <all|makefile|project|help> <options>"
echo " $0 all \\
--path </where/to/store/new/app> \\
--bundle-prefix <some.company> \\
--name <NewApp> \\
--team-id <XXXXXX>"
echo " $0 makefile --name <NewApp>"
echo " $0 project \\
--path </where/to/store/new/app> \\
--bundle-prefix <some.company> \\
--name <NewApp> \\
--team-id <XXXXXX>"
}
report_and_fail() {
MESSAGE="$1"
if [ -n "$MESSAGE" ]; then
echo "$MESSAGE" >&2
echo ""
fi
usage >&2
exit 1
}
makefile_out() {
NAME=$1
MAKEFILE_TEMPLATE="${TEMPLATES}/makefile-template"
sed "s/${NAME_KEY}/${NAME}/g" "$MAKEFILE_TEMPLATE"
}
project_out() {
BUNDLE_PREFIX=$1; NAME=$2; TEAM_ID=$3
PROJECT_TEMPLATE="${TEMPLATES}/project-template.yml"
cat < "$PROJECT_TEMPLATE" \
| sed "s/${BUNDLE_PREFIX_KEY}/${BUNDLE_PREFIX}/g" \
| sed "s/${NAME_KEY}/${NAME}/g" \
| sed "s/${TEAM_ID_KEY}/${TEAM_ID}/g"
}
brewfile_out() {
BREWFILE_TEMPLATE="${TEMPLATES}/brewfile-template"
cat "$BREWFILE_TEMPLATE"
}
gitignore_out() {
GITIGNORE_TEMPLATE="${TEMPLATES}/gitignore-template"
cat "$GITIGNORE_TEMPLATE"
}
copy_files() {
TO_PATH="$1"
STUBS="./stubs"
UTILS="./utils"
cp -R "$STUBS"/* "$TO_PATH"/
cp -R "$UTILS" "$TO_PATH"/
}
generate_from_templates() {
TO_PATH=$1; BUNDLE_PREFIX=$2; NAME=$3; TEAM_ID=$4
brewfile_out > "$TO_PATH"/Brewfile
project_out "$BUNDLE_PREFIX" "$NAME" "$TEAM_ID" > "$TO_PATH"/project.yml
makefile_out "$NAME" > "$TO_PATH"/Makefile
gitignore_out > "$TO_PATH"/.gitignore
}
all() {
TO_PATH=$1; BUNDLE_PREFIX=$2; NAME=$3; TEAM_ID=$4
test -d "$TO_PATH" || report_and_fail "${TO_PATH} is not a directory."
APP_PATH="${TO_PATH}/${NAME}"
test ! -d "$APP_PATH" \
|| report_and_fail "Path already contains folder ${NAME}."
mkdir "$APP_PATH"
copy_files "$APP_PATH"
generate_from_templates "$APP_PATH" "$BUNDLE_PREFIX" "$NAME" "$TEAM_ID"
}
ACTION=$1
shift
TO_PATH=
BUNDLE_PREFIX=
NAME=
TEAM_ID=
while [ -n "$1" ]; do
case "$1" in
--path) shift
TO_PATH="$1"
;;
--bundle-prefix) shift
BUNDLE_PREFIX="$1"
;;
--name) shift
NAME="$1"
;;
--team-id) shift
TEAM_ID="$1"
;;
esac
shift
done
case "$ACTION" in
all) test -n "$TO_PATH" \
-a -n "$BUNDLE_PREFIX"\
-a -n "$NAME" \
-a -n "$TEAM_ID" \
|| report_and_fail "Check options."
all "$TO_PATH" "$BUNDLE_PREFIX" "$NAME" "$TEAM_ID"
;;
makefile) test -n "$NAME" \
|| report_and_fail "App name is not specified."
makefile_out "$NAME"
;;
project) test -n "$BUNDLE_PREFIX" -a -n "$NAME" -a -n "$TEAM_ID" \
|| report_and_fail "Check options."
project_out "$BUNDLE_PREFIX" "$NAME" "$TEAM_ID"
;;
help) usage
;;
*) report_and_fail "Unknown action $1."
;;
esac
| true
|
691c46100c4c823cb34e37fc07fd2ed7b5b158e0
|
Shell
|
mcroteau/point-cloud
|
/xyz-parser/scripts/bin/prop
|
UTF-8
| 227
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
prop-file(){
echo ${SCRIPT_DIR}/etc/properties/${ENVIRONMENT}.properties
}
prop-list(){
cat $(prop-file) | grep -v "^$" | grep -v "^#"
}
prop-edit(){
vim $(prop-file)
}
CMD_PREFIX=prop
. ${CMD_SCRIPT_BASE}
| true
|
98eadd363da52a86482d3b3337c97236a8b907da
|
Shell
|
saibye/project
|
/auto/autoinfo
|
UTF-8
| 1,002
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
export SAI_HOME=/home/sai3
export LD_LIBRARY_PATH=/usr/lib64:/usr/local/lib64:${SAI_HOME}/tools/lib64:.
export LIBPATH=${LD_LIBRARY_PATH}
export PATH=/home/public/anaconda3/bin:${SAI_HOME}/project/bin:${SAI_HOME}/project/sbin:/usr/local/bin:/usr/local/sbin:/usr/sbin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/usr/local/mysql/bin
export PYTHONPATH=${SAI_HOME}/project/src/common:${SAI_HOME}/project/src:/usr/local/lib64/pkgconfig:/usr/lib64/pkgconfig:/usr/local/apr/lib/pkgconfig
export PHOME=${SAI_HOME}/project
export DATA=${SAI_HOME}/project/data
export LOG=${SAI_HOME}/project/log
export LANG=zh_CN.utf8
today=$(date +%Y%m%d)
log=${LOG}/AUTO_INFO.out
err=${LOG}/AUTO_INFO.err
exec 3>${log}
exec 4>${err}
exec 1>&3
exec 2>&4
# 1. get stock-basic
cd ${PHOME}/src/dayend && time python get_stock_basic.py
# 2. get xsg info
cd ${PHOME}/src/dayend && time python get_xsg.py
exec 3>&-
exec 4>&-
echo "shell: [${today}] run INFO succeeds."
#autoinfo
| true
|
11eeaf3816cd69e0c38e9e3c1d3186b0a005d2c3
|
Shell
|
albert4git/bTest
|
/bPot/ReBASH/TarBACKUP.sh
|
UTF-8
| 1,505
| 3.296875
| 3
|
[] |
no_license
|
#! /bin/sh
#
# TarBACKUP.sh
# Copyright (C) 2019 red <red@red-Swift-SF113-31>
#
# Distributed under terms of the MIT license.
#
#! /bin/bash
#
# Home Directory Backup Script
#
tar --listed-incremental=/media/backup/snapshot.file -cJpf /media/backup/home-backup-`date +%d-%m-%Y`.tar.xz /home/user/{Documents,Downloads,Pictures,Music,.config,.Xresources,.xinitrc,.i3,.mozilla,.zshrc}
####################################################################################################
# What to backup.
backup_files1="/home/wyzer/.config/google-chrome/Default/Local*/kbmfpngjjgdllneeigpgjifpgocmfgmb"
backup_files2="/home/wyzer/.config/google-chrome/Default/Favicons"
backup_files3="/home/wyzer/.config/google-chrome/Default/Google*.png"
backup_files4="/home/wyzer/.config/google-chrome/Default/Favicons-journal"
# Where to backup to.
dest="/home/wyzer/Downloads/Scripts/Test_Folder"
# Create archive filename.
day=$(date +%A)
hostname=$(hostname -s)
archive_file="$hostname-$day.tgz"
# Print start status message.
echo "Backing up $backup_files1 to $dest/$archive_file"
echo "Backing up $backup_files2 to $dest/$archive_file"
echo "Backing up $backup_files3 to $dest/$archive_file"
echo "Backing up $backup_files4 to $dest/$archive_file"
date
echo
# Backup the files using tar.
tar czf $dest/$archive_file $backup_files1 $backup_files2 $backup_files3 $backup_files4
# Print end status message.
echo
echo "Backup finished"
date
# Long listing of files in $dest to check file sizes.
ls -lh $dest
| true
|
6869f24aaa053d09f58e2b5be79e00ca42baeb23
|
Shell
|
richard534/dotfiles
|
/zshrc
|
UTF-8
| 1,035
| 3.03125
| 3
|
[] |
no_license
|
## Oh-my-zsh Config
# Path to your oh-my-zsh installation.
export ZSH=$HOME/.oh-my-zsh
ZSH_THEME="robbyrussell"
plugins=(git tmux)
source $ZSH/oh-my-zsh.sh
## PATH Config
# Base path
PATH=/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin
## Personal dotfiles bin dir
DOTFILES_ROOT="$HOME/dotfiles"
PATH="$DOTFILES_ROOT/bin:$PATH"
PATH="$HOME/.emacs.d/bin:$PATH"
export PATH
## User Config
# Set terminal colour scheme
export TERM='xterm-256color'
# set default pager to less
export PAGER=less
# set less envar (less options are taken from this envar to avoid typing each time)
# -R displays ANSI color escape sequences in "raw" form. (add colour to less)
# -S disables line wrapping. Side-scroll to see long lines.
# -X leaves file contents on the screen when less exits.
# -F makes less quit if the entire output can be displayed on one screen.
export LESS="-RSXF"
# Source secrets file
[ -f ~/.secret ] && source ~/.secret
[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh
export PATH="/usr/local/opt/openssl@3/bin:$PATH"
| true
|
703568820f0edee1fa0d5eefcb489e246b2b0d2a
|
Shell
|
lixf/rpi_server
|
/src/tests/testPi.sh
|
UTF-8
| 853
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Basic Test: Three workers, one master, three clients."
echo "[TEST] Starting timer:"
T="$(date +%s%N)"
echo "[TEST] CLIENT (run locally):"
CLIENT_GO=$GOPATH/src/runners/crunner/crunner.go
TESTS=$GOPATH/src/tests
go run $CLIENT_GO -b=$TESTS/shortGetPost1.txt > $TESTS/logs/client1.log &
CLIENT_PID1=$!
go run $CLIENT_GO -b=$TESTS/shortGetPost2.txt > $TESTS/logs/client2.log &
CLIENT_PID2=$!
go run $CLIENT_GO -b=$TESTS/shortGetPost3.txt > $TESTS/logs/client3.log &
CLIENT_PID3=$!
echo "[TEST] Waiting for client 1 to finish"
wait $CLIENT_PID1
echo "[TEST] Waiting for client 2 to finish"
wait $CLIENT_PID2
echo "[TEST] Waiting for client 3 to finish"
wait $CLIENT_PID3
T="$(($(date +%s%N)-$T))"
M="$((T/1000000))"
echo "[TEST] Time Elapsed in milliseconds: ${M}"
echo "[TEST] Correctness not yet checked"
echo "[TEST] Finished."
| true
|
1a1a550d6fb00b8c0e8162367a14bc938b76cfc9
|
Shell
|
JamesSWiggins/aws-ohdsi-rstudio-automated-deployment
|
/rstudio_ohdsi_sparklyr_emr5.sh
|
UTF-8
| 11,573
| 3.390625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -x -e
# AWS EMR bootstrap script
# for installing RStudio (and Shiny) with SparkR, SparklyR, etc on AWS EMR 4.x and 5.x
#
# 2014-09-24 - schmidbe@amazon.de initial version for RHadoop packages and RStudio
# 2015-07-14 - Tom Zeng tomzeng@amazon.com, modified on top of Christopher Bozeman's "--sparkr" change to add "--sparkr-pkg"
# 2015-07-29 - Tom Zeng tomzeng@amazon.com, converted to AMI 4.0.0 compatible
# 2016-01-15 - Tom Zeng tomzeng@amazon.com, converted to AMI 4.2.0 compatible and added shiny
# 2016-10-07 - Tom Zeng tomzeng@amazon.com, added Sparklyr and improved install speed by 2-3x
# 2016-11-04 - Tom Zeng tomzeng@amazon.com, added RStudio 1.0, and used function rather than separate script for child process, removed --sparkr-pkg
# 2017-05-26 - Tom Zeng tomzeng@amazon.com, fixed the Shiny install typo, thanks to David Howell for spotting it
# 2018-09-23 - TOm Zeng tomzeng@amazon.com, fixed issues with the R 3.4 upgrade, and added CloudyR
# Usage:
# --no-rstudio - don't install rstudio-server
# --rstudio-url - the url for the RStudio RPM file
# --sparklyr - install RStudio's sparklyr package
# --sparkr - install SparkR package
# --shiny - install Shiny server
# --shiny-url - the url for the Shiny RPM file
#
# --user - set user for rstudio, default "hadoop"
# --user-pw - set user-pw for user USER, default "hadoop"
# --rstudio-port - set rstudio port, default 8787
#
# --rexamples - add R examples to the user home dir, default false
# --rhdfs - install rhdfs package, default false
# --plyrmr - install plyrmr package, default false
# --no-updateR - don't update latest R version
# --latestR - install latest R version, default false (build from source - caution, may cause problem with RStudio)
# --cloudyr - install the CloudyR packages
# check for master node
IS_MASTER=false
if grep isMaster /mnt/var/lib/info/instance.json | grep true;
then
IS_MASTER=true
fi
# error message
error_msg ()
{
echo 1>&2 "Error: $1"
}
# get input parameters
RSTUDIO=true
SHINY=false
REXAMPLES=false
USER="hadoop"
USERPW="hadoop"
OHDSI=false
PLYRMR=false
RHDFS=false
UPDATER=true
LATEST_R=false
RSTUDIOPORT=8787
SPARKR=false
SPARKLYR=false
RSTUDIO_URL="https://download2.rstudio.org/rstudio-server-rhel-1.0.153-x86_64.rpm"
MIN_USER_ID=400 # default is 500 starting from 1.0.44, EMR hadoop user id is 498
SHINY_URL="https://download3.rstudio.org/centos5.9/x86_64/shiny-server-1.5.1.834-rh5-x86_64.rpm"
CLOUDYR=false
while [ $# -gt 0 ]; do
case "$1" in
--sparklyr)
SPARKLYR=true
;;
--rstudio)
RSTUDIO=true
;;
--rstudio-url)
shift
RSTUDIO_URL=$1
;;
--no-rstudio)
RSTUDIO=false
;;
--shiny)
SHINY=true
;;
--shiny-url)
shift
SHINY_URL=$1
;;
--rexamples)
REXAMPLES=true
;;
--plyrmr)
PLYRMR=true
;;
--rhdfs)
RHDFS=true
;;
--updateR)
UPDATER=true
;;
--no-updateR)
UPDATER=false
;;
--latestR)
LATEST_R=true
UPDATER=false
;;
--sparkr)
SPARKR=true
;;
--rstudio-port)
shift
RSTUDIOPORT=$1
;;
--user)
shift
USER=$1
echo "USER = $USER"
;;
--user-pw)
shift
USERPW=$1
echo "PW = $USERPW"
;;
--cloudyr)
CLOUDYR=true
;;
--ohdsi)
OHDSI=true
echo "OHDSI"
;;
-*)
# do not exit out, just note failure
error_msg "unrecognized option: $1"
;;
*)
break;
;;
esac
shift
done
if [ "$IS_MASTER" = true ]; then
# signal to other BAs that this BA is running
date > /tmp/rstudio_sparklyr_emr5.tmp
fi
export MAKE='make -j 8'
sudo yum install -y xorg-x11-xauth.x86_64 xorg-x11-server-utils.x86_64 xterm libXt libX11-devel libXt-devel libcurl-devel git compat-gmp4 compat-libffi5
# install latest R version from AWS Repo
if [ "$UPDATER" = true ]; then
sudo yum update R-core R-base R-core-devel R-devel -y
if [ -f /usr/lib64/R/etc/Makeconf.rpmnew ]; then
sudo cp /usr/lib64/R/etc/Makeconf.rpmnew /usr/lib64/R/etc/Makeconf
fi
if [ -f /usr/lib64/R/etc/ldpaths.rpmnew ]; then
sudo cp /usr/lib64/R/etc/ldpaths.rpmnew /usr/lib64/R/etc/ldpaths
fi
fi
# create rstudio user on all machines
# we need a unix user with home directory and password and hadoop permission
if [ "$USER" != "hadoop" ]; then
sudo adduser $USER
fi
sudo sh -c "echo '$USERPW' | passwd $USER --stdin"
mkdir /mnt/r-stuff
cd /mnt/r-stuff
# update to latest R version
if [ "$LATEST_R" = true ]; then
pushd .
mkdir R-latest
cd R-latest
wget http://cran.r-project.org/src/base/R-latest.tar.gz
tar -xzf R-latest.tar.gz
sudo yum install -y gcc gcc-c++ gcc-gfortran
sudo yum install -y readline-devel cairo-devel libpng-devel libjpeg-devel libtiff-devel
cd R-3*
./configure --with-readline=yes --enable-R-profiling=no --enable-memory-profiling=no --enable-R-shlib --with-pic --prefix=/usr --with-x --with-libpng --with-jpeglib --with-cairo --enable-R-shlib --with-recommended-packages=yes
make -j 8
sudo make install
sudo su << BASH_SCRIPT
echo '
export PATH=${PWD}/bin:$PATH
' >> /etc/profile
BASH_SCRIPT
popd
fi
sudo sed -i 's/make/make -j 8/g' /usr/lib64/R/etc/Renviron
# set unix environment variables
sudo su << BASH_SCRIPT
echo '
export HADOOP_HOME=/usr/lib/hadoop
export HADOOP_CMD=/usr/bin/hadoop
export HADOOP_STREAMING=/usr/lib/hadoop-mapreduce/hadoop-streaming.jar
export JAVA_HOME=/etc/alternatives/jre
' >> /etc/profile
BASH_SCRIPT
sudo sh -c "source /etc/profile"
# fix hadoop tmp permission
sudo chmod 777 -R /mnt/var/lib/hadoop/tmp
# fix java binding - R and packages have to be compiled with the same java version as hadoop
sudo R CMD javareconf
# install rstudio
# only run if master node
if [ "$IS_MASTER" = true -a "$RSTUDIO" = true ]; then
# install Rstudio server
# please check and update for latest RStudio version
RSTUDIO_FILE=$(basename $RSTUDIO_URL)
wget $RSTUDIO_URL
sudo yum install --nogpgcheck -y $RSTUDIO_FILE
# change port - 8787 will not work for many companies
sudo sh -c "echo 'www-port=$RSTUDIOPORT' >> /etc/rstudio/rserver.conf"
sudo sh -c "echo 'auth-minimum-user-id=$MIN_USER_ID' >> /etc/rstudio/rserver.conf"
sudo perl -p -i -e "s/= 5../= 100/g" /etc/pam.d/rstudio
sudo rstudio-server stop || true
sudo rstudio-server start
fi
# add examples to user
# only run if master node
if [ "$IS_MASTER" = true -a "$REXAMPLES" = true ]; then
# and copy R example scripts to user's home dir amd set permission
wget --no-check-certificate https://raw.githubusercontent.com/tomz/emr-bootstrap-actions/master/R/Hadoop/examples/rmr2_example.R
wget --no-check-certificate https://raw.githubusercontent.com/tomz/emr-bootstrap-actions/master/R/Hadoop/examples/biganalyses_example.R
wget --no-check-certificate https://raw.githubusercontent.com/tomz/emr-bootstrap-actions/master/R/Hadoop/examples/change_pw.R
#sudo cp -p *.R /home/$USER/.
sudo mv *.R /home/$USER/.
sudo chown $USER:$USER -Rf /home/$USER
fi
# install required packages
sudo R --no-save << R_SCRIPT
install.packages(c('RJSONIO', 'itertools', 'digest', 'Rcpp', 'functional', 'httr', 'plyr', 'stringr', 'reshape2', 'caTools', 'rJava', 'devtools', 'DBI', 'ggplot2', 'dplyr', 'R.methodsS3', 'Hmisc', 'memoise', 'rjson'),
repos="http://cran.rstudio.com")
# here you can add your required packages which should be installed on ALL nodes
# install.packages(c(''), repos="http://cran.rstudio.com", INSTALL_opts=c('--byte-compile') )
R_SCRIPT
# install rmr2 package
pushd .
rm -rf RHadoop
mkdir RHadoop
cd RHadoop
curl --insecure -L https://github.com/RevolutionAnalytics/rmr2/releases/download/3.3.1/rmr2_3.3.1.tar.gz | tar zx
sudo R CMD INSTALL --byte-compile rmr2
popd
# install rhdfs package
if [ "$RHDFS" = true ]; then
curl --insecure -L https://raw.github.com/RevolutionAnalytics/rhdfs/master/build/rhdfs_1.0.8.tar.gz | tar zx
sudo R CMD INSTALL --byte-compile --no-test-load rhdfs
fi
# install plyrmr package
if [ "$PLYRMR" = true ]; then
curl --insecure -L https://github.com/RevolutionAnalytics/plyrmr/releases/download/0.6.0/plyrmr_0.6.0.tar.gz | tar zx
sudo R CMD INSTALL --byte-compile plyrmr
fi
if [ "$CLOUDYR" = true ]; then
sudo R --no-save << R_SCRIPT
install.packages(c("base64enc","drat"),repos = "http://cran.us.r-project.org")
drat::addRepo("cloudyr", "http://cloudyr.github.io/drat")
install.packages(c("aws.signature","aws.ec2metadata","aws.efs"), repos = c(cloudyr = "http://cloudyr.github.io/drat"))
R_SCRIPT
fi
# the follow code will spawn a child process which waits for dependencies to be installed before proceed
child_process() {
if [ "$SPARKR" = true ] || [ "$SPARKLYR" = true ]; then
cat << 'EOF' > /tmp/Renvextra
JAVA_HOME="/etc/alternatives/jre"
HADOOP_HOME_WARN_SUPPRESS="true"
HADOOP_HOME="/usr/lib/hadoop"
HADOOP_PREFIX="/usr/lib/hadoop"
HADOOP_MAPRED_HOME="/usr/lib/hadoop-mapreduce"
HADOOP_YARN_HOME="/usr/lib/hadoop-yarn"
HADOOP_COMMON_HOME="/usr/lib/hadoop"
HADOOP_HDFS_HOME="/usr/lib/hadoop-hdfs"
YARN_HOME="/usr/lib/hadoop-yarn"
HADOOP_CONF_DIR="/usr/lib/hadoop/etc/hadoop/"
YARN_CONF_DIR="/usr/lib/hadoop/etc/hadoop/"
HIVE_HOME="/usr/lib/hive"
HIVE_CONF_DIR="/usr/lib/hive/conf"
HBASE_HOME="/usr/lib/hbase"
HBASE_CONF_DIR="/usr/lib/hbase/conf"
SPARK_HOME="/usr/lib/spark"
SPARK_CONF_DIR="/usr/lib/spark/conf"
PATH=${PWD}:${PATH}
EOF
cat /tmp/Renvextra | sudo tee -a /usr/lib64/R/etc/Renviron
# wait SparkR file to show up
while [ ! -f /var/run/spark/spark-history-server.pid ]
do
sleep 5
done
fi
# install SparkR or the out-dated SparkR-pkg
if [ "$SPARKR" = true ]; then
sudo mkdir /mnt/spark
sudo chmod a+rwx /mnt/spark
if [ -d /mnt1 ]; then
sudo mkdir /mnt1/spark
sudo chmod a+rwx /mnt1/spark
fi
sudo R --no-save << R_SCRIPT
library(devtools)
install('/usr/lib/spark/R/lib/SparkR')
# here you can add your required packages which should be installed on ALL nodes
# install.packages(c(''), repos="http://cran.rstudio.com", INSTALL_opts=c('--byte-compile') )
R_SCRIPT
fi
if [ "$SPARKLYR" = true ]; then
sudo R --no-save << R_SCRIPT
library(devtools)
devtools::install_github("rstudio/sparklyr")
install.packages(c('nycflights13', 'Lahman', 'data.table'),
repos="http://cran.rstudio.com" )
R_SCRIPT
fi
if [ "$CLOUDYR" = true ]; then
sudo R --no-save << R_SCRIPT
install.packages(c("aws.s3","aws.ec2"), repos = c(cloudyr = "http://cloudyr.github.io/drat"))
R_SCRIPT
fi
if [ "$IS_MASTER" = true ]; then
if [ "$SHINY" = true ]; then
# install Shiny server
SHINY_FILE=$(basename $SHINY_URL)
wget $SHINY_URL
sudo yum install --nogpgcheck -y $SHINY_FILE
sudo R --no-save <<R_SCRIPT
install.packages(c('shiny','rmarkdown'),
repos="http://cran.rstudio.com")
R_SCRIPT
fi
sudo rm -f /tmp/rstudio_sparklyr_emr5.tmp
#the following are needed only if not login in as hadoop
if [ "$USER" != "hadoop" ]; then
while [ ! -f /var/run/hadoop-hdfs/hadoop-hdfs-namenode.pid ]
do
sleep 5
done
sudo -u hdfs hdfs dfs -mkdir /user/$USER
sudo -u hdfs hdfs dfs -chown $USER:$USER /user/$USER
sudo -u hdfs hdfs dfs -chmod -R 777 /user/$USER
fi
sudo rstudio-server restart || true
fi # IS_MASTER
if [ "$OHDSI" = true ]; then
echo "OHDSI = true $OHDSI"
sudo yum install -y cairo-devel
wget https://repo.continuum.io/archive/Anaconda2-5.1.0-Linux-x86_64.sh
chmod +x Anaconda2-5.1.0-Linux-x86_64.sh
sudo ./Anaconda2-5.1.0-Linux-x86_64.sh -b -p /usr/anaconda2/
sudo yum install -y python-scipy
sudo pip install scipy
sudo pip install sklearn
fi # OHDSI
echo "rstudio server and packages installation completed"
} # end of child_process
child_process &
echo "bootstrap action completed after spwaning child process"
| true
|
75714726ec6e64b4d6cd146442971035b14caa47
|
Shell
|
eckinox/pdf-bundle
|
/DEV/hooks/pre-commit
|
UTF-8
| 1,730
| 4.1875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
BASEDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/../.."
# Define CI scripts to run
CI_SCRIPTS=("php-cs-fixer.sh" "phpstan.sh" "phpmd.sh" "stylelint.sh")
CI_SCRIPT_NAMES=("PHP-CS-Fixer" "PHPStan" "PHPMD" "CSS stylelint")
scriptCount=${#CI_SCRIPTS[@]}
addedFiles=$(git diff --diff-filter=d --cached --name-only)
# Utility functions
verbose()
{
msg=${1}
type=${2:-""}
addTrailingNewline=${3:-0}
if [ "${type}" = "error" ]; then
msg="\033[1m\e[41m${msg}\e[0m"
elif [ "${type}" = "success" ]; then
msg="\033[1m\e[42m${msg}\e[0m"
elif [ "${type}" = "info" ]; then
msg="\e[44m${msg}\e[0m"
fi
endNewline=""
if [ ${addTrailingNewline} -eq 1 ]; then
endNewline="\n"
fi
if [ "${type}" = "error" ]; then
>&2 printf "${msg}${endNewline}"
else
printf "${msg}${endNewline}"
fi
}
# Run PHP-CS-Fixer with automatic fixes
vendor/bin/php-cs-fixer fix --config=.php-cs-fixer.dist.php -q
# If there were any added files, re-add them after the automatic fixes
if [ -n "$addedFiles" ]
then
git add $addedFiles
fi
# Loop through scripts and execute them
# If no error is thrown, mute all outputs.
# Otherwise, stop execution and output the error(s).
for ((i=0; i < ${scriptCount}; i++)); do
filename=${CI_SCRIPTS[$i]}
label=${CI_SCRIPT_NAMES[$i]}
verbose "Running ${label}..." "info"
output=$(${BASEDIR}/DEV/cs/${filename} 2>&1)
if [ $? -ne 0 ]; then
printf " ❌\n"
verbose "Failed CI test ${label} (DEV/cs/${filename}). View output below." "error" 1
printf "%s\n" "${output}"
exit 1
else
echo " ✅"
fi
done
# All systems go: tests ran without errors!
verbose "Tests passed with flying colors: all systems go!\e[0m 🚀" "success" 1
| true
|
f59a46ed46a93d4468ce658f758938d7c664cdd0
|
Shell
|
mrusme/dotfiles
|
/usr/local/bin/waybar-cpu
|
UTF-8
| 336
| 3.46875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
cmd=$1
if [[ "$cmd" == "set" ]]
then
governor=$(cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_available_governors | tr " " "\n" | grep -v '^$' | bemenu)
if [[ "$governor" != "" ]]
then
sudo cpupower frequency-set -g "$governor"
fi
elif [[ "$cmd" == "get" ]]
then
cpupower frequency-info -p | tail -n -3
fi
| true
|
f95c697c6c6c13b6b1e879967c6b94567c6a5809
|
Shell
|
wbur/auto-tweet-videos
|
/create_srt_transcript.sh
|
UTF-8
| 1,029
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
# finds the status of AWS Transcribe job with name
# SHOW_YYYY_MM_DD
# if status is COMPLETED
# it downloads the transcript JSON,
# then converts it into an srt file named
# SHOW_YYYY_MM_DD.srt
year=$(date +'%Y')
month=$(date +'%m')
day=$(date +'%d')
job="SHOW_${year}_${month}_${day}"
temp_job_file=/tmp/$job.json
base="SHOW_${year}_${month}_${day}"
transcript_file=$base.mp4.transcript
response=$(aws transcribe get-transcription-job --transcription-job-name $job | jq . > $temp_job_file)
status=$(jq -r .TranscriptionJob.TranscriptionJobStatus $temp_job_file)
if [ $status = "COMPLETED" ];
then
uri=$(jq -r .TranscriptionJob.Transcript.TranscriptFileUri $temp_job_file)
cat $temp_job_file
# create dir, if needed
mkdir -p transcripts/$year
# get file
wget -O $transcript_file.json $uri && mv $transcript_file.json /tmp/$transcript_file.json
python amazon-transcribe-JSON-to-SRT.py /tmp/$transcript_file.json > $base.srt
rm /tmp/$transcript_file.json
fi
rm $temp_job_file
| true
|
7e7f0c8ce20a7349bec98035c740caba70ddfba3
|
Shell
|
LiJianYing-arch/endosperm_snRNAseq_2021
|
/scripts/merge_many_files.sh
|
UTF-8
| 6,705
| 3.9375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# ------------------------------------------------------------------------------------
# v1.0 by Colette L. Picard
# 06/23/2018
# ------------------------------------------------------------------------------------
# -------------------------
# Version history:
# v.1.0: initial build - 06/23/2018
# -------------------------
# Description printed when "help" option specified:
read -d '' usage <<"EOF"
v.1.0 by Colette L Picard, 06/23/2018
Simple wrapper script for merge_by_column.R to allow for efficient merging of large
numbers of files, when the file being created will be very large. Normally, when
just merging using merge_by_column.R in a loop over each individual file, the
merged file becomes so big that the I/O and the merging in R become very inefficient,
which is exacerbated when this is repeated many times.
This script instead takes a list of all the files you want to merge (provided in
the first input, the inputfilelist.txt), and splits them into separate batches to
be merged. Calls itself recursively until the number of input files is less than 20.
If number of input files is ≤ 20, this script simply merges them one after the other.
Speed-up is achieved by recursive calls to this script, submitted to LSF so they
can run simultaneously.
inputfilelist.txt = list of files to merge
outfile.txt = name for output file
mergebylist = comma-separated list of columns to merge over (see merge_by_column.R, same bhav)
howtomerge = one of three values (see merge_by_column.R also):
- "all" = all values kept regardless of merge status
- "allx" = all values from FIRST file kept regardless of merge status, values from other files
that didnt merge to first are discarded
- "merged" = keep only values that merged across all files
DEFAULT: "all"
Usage:
merge_many_files.sh [options] inputfilelist.txt outfile.txt mergebylist howtomerge
example:
merge_many_files.sh [options] inputfilelist.txt outfile.txt geneID merge
------------------------------------------------------------------------------------
EOF
# ----------------------
# MAIN
# ----------------------
[[ $# -eq 0 ]] && { printf "%s\n" "$usage"; exit 0; } # if no user-supplied arguments, print usage and exit
# ----------------------
# Get user-specified arguments
# ----------------------
# Initiate environment
scriptDir=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) # location of this script
workdir=$( pwd ) # working directory
# Required arguments:
# ----------------------
[ "$#" -le 2 ] && { echo "Error: at least 3 arguments required (see usage)"; exit 1; }
inputfilelist="$1"
outfile="$2"
mergebylist="$3"
[ "$#" -eq 4 ] && howtomerge="$4" || howtomerge="all"
[ "$howtomerge" != "all" ] && [ "$howtomerge" != "allx" ] && [ "$howtomerge" != "merged" ] && { echo "Error: value of 4th parameter must be either \"all\", \"allx\", or \"merged\""; exit 1; }
# Location of scripts (so that merge_by_column.R can be called)
# ----------------------
path_to_scripts="$scriptDir"
[ ! -f "$path_to_scripts/merge_by_column.R" ] && { echo "Could not find script merge_by_column.R in $path_to_scripts"; exit 1; }
# Check all inputs provided
# ----------------------
[ -z "$inputfilelist" ] && { echo "Two arguments required (see usage)"; exit 1; }
[ -z "$outfile" ] && { echo "Two arguments required (see usage)"; exit 1; }
[ ! -f "$inputfilelist" ] && { echo "Could not open input file $inputfilelist"; exit 1; }
[ -f "$outfile" ] && { echo "Output file already exists; exiting"; exit 1; }
# Output basic info to user
# ----------------------
echo "Running merge_many_files.sh v1.0 (06/23/2018):"
echo "-------------------------"
echo "Working directory: $( pwd )"
echo "List of files to merge: $inputfilelist"
echo "Output file name: $outfile"
echo "Merging files by column(s) named: $mergebylist"
echo "Merging method: $howtomerge"
echo "-------------------------"
# Get all files to merge and check that they exist
# ----------------------
inputfilearray=()
while read ll; do
[ -f "$ll" ] || { echo "Error: could not open input file $ll"; exit 1; }
inputfilearray+=( $ll )
done < "$inputfilelist"
echo "${#inputfilearray[@]} files to be merged"
# Merge files:
# ----------------------
if [ $( wc -l "$inputfilelist" | awk '{print $1}' ) -le 20 ]; then
# base case, merge all separately
cat "${inputfilearray[0]}" > "$outfile"
for ((i=1;i<${#inputfilearray[@]};++i)); do
$path_to_scripts/merge_by_column.R "$outfile" "${inputfilearray[i]}" "$mergebylist" "$outfile" --tokeep "$howtomerge" > /dev/null
[ $? -eq 0 ] || { echo "Error in merge_by_column.R"; echo "Failed command: $path_to_scripts/merge_by_column.R $outfile ${inputfilearray[i]} $mergebylist $outfile --tokeep $howtomerge"; exit 1; }
done
else
# split into sqrt(num_input_files) separate jobs and re-call merge_many_files.sh
baseoutfile="${outfile%.*}"
sqrt=$(echo "sqrt ( ${#inputfilearray[@]} )" | bc -l | cut -f1 -d '.')
[ "$sqrt" -le 20 ] && sqrt=20
echo "Separating the ${#inputfilearray[@]} into $sqrt subsets to be merged separately, then merged back together"
subsetlist=()
for ((i=0;i<${#inputfilearray[@]};++i)); do
subsetfilenum=$( echo "1+ ($i / $sqrt)" | bc -l | cut -f1 -d '.')
[[ " ${subsetlist[*]} " == *" $subsetfilenum "* ]] || subsetlist+=( $subsetfilenum )
echo "${inputfilearray[i]}" >> "${baseoutfile}_filelist_tmp${subsetfilenum}.txt"
done
# call merge_many_files on each subset, wait until finished then merge subsets
pid=()
for ((i=0;i<${#subsetlist[@]};++i)); do
cmd="$scriptDir/merge_many_files.sh ${baseoutfile}_filelist_tmp${subsetlist[i]}.txt ${baseoutfile}_tmp${subsetlist[i]}.txt $mergebylist $howtomerge"
bsub -o "${baseoutfile}_tmp${subsetlist[i]}_log.txt" -K "$cmd" & pid[i]=$!
done
for ((i=0;i<${#subsetlist[@]};++i)); do
wait "${pid[i]}" || { echo "merge_many_files failed, see ${baseoutfile}_tmp${subsetlist[i]}_log.txt"; exit 1; }
done
# once all jobs completed, merge files back together
echo "Done. Merging subset files back together..."
echo "Merging in subset 1 out of ${#subsetlist[@]}"
cat "${baseoutfile}_tmp${subsetlist[0]}.txt" > "$outfile"
for ((i=1;i<${#subsetlist[@]};++i)); do
echo "Merging in subset $(( $i + 1 )) out of ${#subsetlist[@]}"
$path_to_scripts/merge_by_column.R "$outfile" "${baseoutfile}_tmp${subsetlist[i]}.txt" "$mergebylist" "$outfile" --tokeep "$howtomerge" > /dev/null
[ $? -eq 0 ] || { echo "Error in merge_by_column.R, exiting"; exit 1; }
done
# remove all temp files
for ((i=0;i<${#subsetlist[@]};++i)); do
rm "${baseoutfile}_tmp${subsetlist[i]}_log.txt"
rm "${baseoutfile}_tmp${subsetlist[i]}.txt"
rm "${baseoutfile}_filelist_tmp${subsetlist[i]}.txt"
done
fi
| true
|
f819ab1293e0fdeaf99e283f2605bc9f442fb7e4
|
Shell
|
mdamt/blankon-installer
|
/scripts/blankon-session-try-installer
|
UTF-8
| 607
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
if [ -z "$TRY_B_I" ];then
exit
fi
grep "boot=live" /proc/cmdline
if [ $? -eq 0 -a -x /usr/bin/blankon-installer ];then
rm -f /run/locale
sudo -E nice -n -20 blankon-installer
# Run post-install script when created by blankon-installer
if [ -f /tmp/post-install.sh ];then
. /tmp/post-install.sh
fi
# Source locale environment variable generated by installer
if [ -f /run/locale ];then
. /run/locale
fi
# Source timezone environment variable generated by installer
if [ -f /run/timezone];then
. /run/timezone
fi
fi
| true
|
d149c7c9c40355ed679233dffa0d8360d89e3512
|
Shell
|
YJMOD-Dock/CentOS7_NvidiaDriver-Cuda10.1_install
|
/Nvidia_Driver_install_3.sh
|
UTF-8
| 973
| 3.53125
| 4
|
[] |
no_license
|
echo "install cuda10.1"
sub_dist=$(cat /etc/redhat-release)
arch=$(uname -m)
if [ ${sub_dist:0:6}${sub_dist:21:1} = CentOS7 ];then
dist="rhel7"
echo "Version : ${sub_dist:0:22}"
else
echo "check your OS"
fi
wget https://developer.download.nvidia.com/compute/cuda/repos/$dist/$arch/cuda-repo-$dist-10.1.105-1.$arch.rpm
sudo rpm -import https://developer.download.nvidia.com/compute/cuda/repos/$dist/$arch/7fa2af80.pub
sudo rpm -i cuda-repo-$dist-10.1.105-1.$arch.rpm
sudo yum clean all
wget http://developer.download.nvidia.com/compute/machine-learning/repos/$dist/$arch/nvidia-machine-learning-repo-$dist-1.0.0-1.$arch.rpm
sudo yum install -y ./nvidia-machine-learning-repo-$dist-1.0.0-1.$arch.rpm
sudo rm cuda-repo-$dist-10.1.105-1.$arch.rpm
sudo rm nvidia-machine-learning-repo-$dist-1.0.0-1.$arch.rpm
read -p "System reboot is required. Would you like to restart now? [y/n] " yn
case $yn in
[Yy]* ) echo "System restarting"
sudo reboot;;
[Nn]* ) exit;;
esac
| true
|
a6f45b3bb16c2d74f47031004c741c219cd6a302
|
Shell
|
itseytlin/dotfiles
|
/zshrc
|
UTF-8
| 2,364
| 2.921875
| 3
|
[] |
no_license
|
# Path to your oh-my-zsh installation.
export ZSH="/Users/itseytlin/.oh-my-zsh"
# Set name of the theme to load --- if set to "random", it will
# load a random theme each time oh-my-zsh is loaded, in which case,
# to know which specific one was loaded, run: echo $RANDOM_THEME
# See https://github.com/ohmyzsh/ohmyzsh/wiki/Themes
ZSH_THEME="dieter"
# If set to an empty array, this variable will have no effect.
# ZSH_THEME_RANDOM_CANDIDATES=( "robbyrussell" "agnoster" )
# CASE_SENSITIVE="true"
# HYPHEN_INSENSITIVE="true"
# DISABLE_AUTO_UPDATE="true"
# DISABLE_UPDATE_PROMPT="true"
# export UPDATE_ZSH_DAYS=13
# DISABLE_MAGIC_FUNCTIONS=true
# DISABLE_LS_COLORS="true"
# DISABLE_AUTO_TITLE="true"
# ENABLE_CORRECTION="true"
# COMPLETION_WAITING_DOTS="true"
# Which plugins would you like to load?
# Standard plugins can be found in ~/.oh-my-zsh/plugins/*
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
# Add wisely, as too many plugins slow down shell startup.
plugins=(copydir
colorize
colored-man-pages
osx
zsh-autosuggestions
)
# had a plugin called command not found
source $ZSH/oh-my-zsh.sh
# User configuration
# Preferred editor for local and remote sessions
#if [[ -n $SSH_CONNECTION ]]; then
# export EDITOR='vim'
#else
# export EDITOR='mvim'
#fi
############################################################
#
# Personal Aliases
#
# .zshrc access and sourcing
alias zshconfig="vim ~/.zshrc"
alias zshsource="source ~/.zshrc"
#
# accesing project folders
alias cdpr="cd ~/Projects"
alias cdfc="cd ~/Library/Mobile\ Documents/com~apple~CloudDocs/FileCloud"
alias cdic="cd ~/Library/Mobile\ Documents/com~apple~CloudDocs"
#
# more useful aliases
alias mv="mv -i"
alias ll="ls -lah"
alias up="cd .."
alias la="ls -a"
#
# git aliases
alias gl="git log --all --graph --decorate --oneline"
############################################################
#
# VIM editing mode for the command line
#bindkey -v
#
# Exporting PATH evnironment variable to shell
PATH="/Users/itseytlin/Projects/bin:$PATH"
export PATH
############################################################
#
# Functions
#
function realpath { echo $(cd $(dirname $1); pwd)/$(basename $1); }
############################################################
[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh
| true
|
daf5995dc535834c2b1c97b9dec7b38c74ead46b
|
Shell
|
josego85/ProyectosBeta
|
/scripts/LibreOffice/instalarLibreOffice6_1_0__64bits.sh
|
UTF-8
| 1,744
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
# Autor: josego
# Blog: proyectosbeta.net
# Fecha creacion: 15 de agosto de 2018 a las 19:56 hs.
# Desinstala por completo si se tiene instalado LibreOffice.
# Se instala LibreOffice 6.1.0 en español con su manual de ayuda correspondiente.
apt-get purge libreoffice* -y
apt-get remove libreoffice*
apt-get autoremove -y
# Creamos una carpeta nueva llamada libreoffice en /tmp
cd /tmp/
mkdir libreoffice
# Entramos a la carpeta que recien creamos.
cd libreoffice
# Descargamos los tres paquetes deb para tener instalado LibreOffice en español con su respectiva ayuda.
wget -c http://download.documentfoundation.org/libreoffice/stable/6.1.0/deb/x86_64/LibreOffice_6.1.0_Linux_x86-64_deb.tar.gz
wget -c http://download.documentfoundation.org/libreoffice/stable/6.1.0/deb/x86_64/LibreOffice_6.1.0_Linux_x86-64_deb_langpack_es.tar.gz
wget -c http://download.documentfoundation.org/libreoffice/stable/6.1.0/deb/x86_64/LibreOffice_6.1.0_Linux_x86-64_deb_helppack_es.tar.gz
# Descomprimimos.
tar xzvf LibreOffice_6.1.0_Linux_x86-64_deb.tar.gz
cd LibreOffice_6.1.0.3_Linux_x86-64_deb/DEBS/
# Instalamos todos los paquetes deb de LibreOffice.
dpkg -i *.deb
# Descomprimimos.
tar xzvf /tmp/libreoffice/LibreOffice_6.1.0_Linux_x86-64_deb_langpack_es.tar.gz
cd LibreOffice_6.1.0.3_Linux_x86-64_deb_langpack_es/DEBS/
# Instalamos todos los paquetes deb.
dpkg -i *.deb
# Descomprimimos.
tar xzvf /tmp/libreoffice/LibreOffice_6.1.0_Linux_x86-64_deb_helppack_es.tar.gz
cd LibreOffice_6.1.0.3_Linux_x86-64_deb_helppack_es/DEBS/
# Instalamos todos los paquetes deb del idioma español.
dpkg -i libobasis6.1-es-help_6.1.0.3-3_amd64.deb
# Borramos la carpeta donde se encontraban los tres paquetes deb de LibreOffice.
rm -R /tmp/libreoffice
| true
|
e55e271df6c031f15c4c8e8ca4feddd7e789758e
|
Shell
|
ninja971/hammer-scripts
|
/cap63-setup.sh
|
UTF-8
| 15,515
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
# vim: ft=sh:sw=2:et
cat <<EOF
Capsule-6.3 Demo/Test/PoC Setup Script
========================================
This script is intended to automate a Capsule-6.3 server installation on top of a Satellite-6.3 setup for demo, test or PoC purposes.
The script helps to perform the initial steps to finish the prerequisites, it installs and configures the software,
it fills the Capsule with various types of content, it creates activation keys and content views,
it customizes some smart class parameter overrides and finally installs a couple of new hosts.
With this setup, the script is well suited as a Capsule-6 test.
In an PoC scenario, it allows to emphasize on the actual use cases and requirements from the very first moment.
This demo setup shows some of the features and benefits of Capsule-6.3:
- Capsule is configured to use the existing IPA CA (again, IPA is prerequisite, out of scope for this demo).
- Capsule is configured to register hosts automatically into the IPA REALM.
- The simple baseline host is hardened so that root login is disabled.
This demo is intended to run in a VM on a libvirt/KVM host. The Capsule VM requires least 8GB of RAM and 4 cores. 12GB RAM and 6 cores are recommended.
I recommend using a dedicated server from a hosting provider which is available for less than €50 per month.
The network setup must allow Capsule to run DHCP and TFTP on a dedicated interface.
With all features enabled, the demo setup will consume around 180GB of disk space for package content in /var/lib/pulp.
Using the immediate sync policy, the content sync alone takes more than 24 hours even with a high bandwidth internet connection.
In preparation for a Capsule-6 PoC this script can be used to perform this time consuming procedure ahead of the actual PoC engagement.
There is at least one manual intervention required directly after
satellite-install has finished and a second halt is included right before the
demo hosts are created at the end. So be prepared to be around for at least an
hour or so after starting the script to proceed after the first manual
intervention. After that, you may go home and proceed the next day...
You may want to run this script in a screen session.
The header section of this script declares a lot of variables that are used later on to customize the script.
Read through the values carefully and change where appropriate.
When finished, delete or comment the following exit command.
EOF
exit 0
set -x
set -e
longname=$(hostname | tr '.' '_')
# STAGE Level:
# 1 = preqequisite preparation
# 2 = Capsule 6 installation
export STAGE=1
# This demo setup is built with IPA integration as one important feature to show.
# While it is possible to use IPA and leave Capsule with the self signed internal CA cert,
# it is recommended to demonstrate/test this feature as well.
# The IPA_EXT_CERT switch is mainly offered for debugging purposes.
export IPA_EXT_CERT=true
# The following block of parameters needs to reflect your environment.
# Most of the parameters are used with the satellite-installer
# The purpose should be pretty much self explanatory. In doubt, look at 'satellite-installer --help'
export SAT_IP=172.24.200.3
export SAT_NAME=satellite.example.com
export CAP_IP=172.24.100.5
export ORG="ACME"
export LOC="Elsewhere"
export ADMIN=capsule
export ADMIN_PASSWORD=',4d4jynIt3KZOD'
export IPA_SERVER=ipa.example.com
export DOMAIN=example.com
export REALM=EXAMPLE.COM
export C=DE
export ST=Berlin
export L=Berlin
export OU=IT-Ops
export DNS=172.24.100.2
export DNS_REV=100.24.172.in-addr.arpa
export DHCP_RANGE="172.24.100.20 172.24.100.50"
export DHCP_GW=172.24.100.1
export DHCP_DNS=172.24.100.2
export CAP_INTERFACE=eth1
export SUBNET=172.24.100.0
export SUBNET_MASK=255.255.255.0
export SUBNET_NAME='elsenet'
export SUBNET_IPAM_BEGIN=172.24.100.100
export SUBNET_IPAM_END=172.24.100.150
# The host prefix is used to distinguish the demo hosts created at the end of this script.
export HOST_PREFIX='el-'
# This is the default password used in hostgroup declarations.
export HOST_PASSWORD='Geheim!!'
# This demo is intended to run on a simple libvirt/KVM hypervisor.
# A dedicated server hosted by an internet service provider may be a cost effective choice for this ressource.
export CONFIGURE_LIBVIRT_RESOURCE=true
export COMPUTE_RES_FQDN="kvm2.hoster.com"
export COMPUTE_RES_NAME="Else"
# This script alternatively allows to use a RHV virtualization backend using the following parameters
export CONFIGURE_RHEV_RESOURCE=false
# export COMPUTE_RES_FQDN="rhv.example.com"
# export COMPUTE_RES_NAME="RHV"
export RHV_VERSION_4=true
export RHV_RES_USER="admin@internal"
export RHV_RES_PASSWD="Geheim!!"
export RHV_RES_UUID="Default"
if [ $CONFIGURE_RHEV_RESOURCE = 'true' -a $CONFIGURE_LIBVIRT_RESOURCE = 'true' ]; then
echo "Only one of CONFIGURE_RHEV_RESOURCE and CONFIGURE_LIBVIRT_RESOURCE may be true."
exit 1
fi
# This is the end of the header section.
# Depending on the STAGE declared above, the script will start at some point and continue all the way to the end -- if everything goes well ;-)
# As mentioned before, there is a halt for manual intervention right after satellite-install and a second halt at the end before creating the demo hosts.
# BEGIN preqeq prep
if [ $STAGE -le 1 ]; then
echo "${CAP_IP} $(hostname)" >>/etc/hosts
rpm -Uvh http://$SAT_NAME/pub/katello-ca-consumer-latest.noarch.rpm || true
subscription-manager register || true
subscription-manager repos --disable "*"
subscription-manager repos --enable=rhel-7-server-rpms \
--enable=rhel-server-rhscl-7-rpms \
--enable=rhel-7-server-optional-rpms \
--enable=rhel-7-server-satellite-tools-6.3-rpms \
--enable=rhel-7-server-satellite-capsule-6.3-rpms \
--enable=rhel-7-server-satellite-capsule-6.3-puppet4-rpms
rpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm || true
yum-config-manager --disable epel
yum -y upgrade
yum install -y screen yum-utils vim katello-agent
yum install -y ipa-client ipa-admintools
# ipa-client-install --server=$IPA_SERVER --domain=$DOMAIN --realm=$REALM
kinit admin@${REALM}
ipa service-add HTTP/$(hostname)
if [ $IPA_EXT_CERT = 'true' ]; then
mkdir -p /root/certs
openssl req -nodes -newkey rsa:2048 -keyout /root/certs/key.pem -out /root/certs/${longname}.csr -subj "/C=${C}/ST=${ST}/L=${L}/O=${ORG}/OU=${OU}/CN=$(hostname)"
serial=$(ipa cert-request --add --principal=host/$(hostname) /root/certs/${longname}.csr|grep number:|cut -d' ' -f5)
ipa cert-show --out /root/certs/${longname}.crt $serial
fi
read -p "
Manual action required!
To proceed you need to copy /root/certs/key.pem /root/certs/${longname}.crt and /root/certs/${longname}.csr to /root/capsule-certs/
on the Satellite server and generate the capsule cert package.
capsule-certs-generate --foreman-proxy-fqdn "$(hostname)" --certs-tar "~/$(hostname)-certs.tar" --server-cert "/root/capsule-certs/${longname}.crt" --server-cert-req "/root/capsule-certs/${longname}.csr" --server-key "/root/capsule-certs/key.pem"--server-ca-cert "/etc/ipa/ca.crt"
Then you need to edit this script, insert the OAUTH keys for the capsule integration as provided by capsule-certs-generate and proceed with stage 2.
Hit Enter to exit Stage 1." answer
exit 0
fi
# END preqeq prep
export OAUTH_CONSUMER_KEY='5RDhyAovwyDkysG6bQGbUBcJWayKaTYL'
export OAUTH_CONSUMER_SEC='uYhAqHTj55Y7VQaMtECA3JjZyCSyM8SG'
export PROXY_OAUTH_SECRET='Y4xmYLy3rLQJoEp2EipK7im9vzrK3wHD'
# BEGIN installation
if [ $STAGE -le 2 ]; then
yum -y install satellite-capsule qpid-dispatch-router tfm-rubygem-hammer*
firewall-cmd --permanent --add-port="53/udp" --add-port="53/tcp" \
--add-port="67/udp" --add-port="69/udp" \
--add-port="80/tcp" --add-port="443/tcp" \
--add-port="5000/tcp" --add-port="5647/tcp" \
--add-port="8000/tcp" --add-port="8140/tcp" \
--add-port="8443/tcp" --add-port="9090/tcp"
firewall-cmd --reload
if [ ! -f /root/.hammer/cli_config.yml ]; then
mkdir -p /root/.hammer
cat > /root/.hammer/cli_config.yml <<EOF
:foreman:
:host: 'https://$SAT_NAME/'
:username: '$ADMIN'
:password: '$ADMIN_PASSWORD'
:request_timeout: -1
EOF
fi
if [ ! -f /root/freeipa.keytab ]; then
read -p "
Manual action required!
To proceed you need to manually copy the freeipa.keytab from your existing Satellite server.
The file is located in /etc/foreman-proxy/freeipa.keytab.
Make sure it is owned by foreman-proxy.foreman-proxy and has permission 0600.
Do not run foreman-prepare-realm again. This will invalidate all pre-existing freeipa.keytab files.
Hit Enter after the freeipa.keytab has been copied." answer
else
echo "Using existing keytab in /root/freeipa.keytab"
fi
cp /root/freeipa.keytab /etc/foreman-proxy
chown foreman-proxy:foreman-proxy /etc/foreman-proxy/freeipa.keytab
chmod 0600 /etc/foreman-proxy/freeipa.keytab
cp /etc/ipa/ca.crt /etc/pki/ca-trust/source/anchors/ipa.crt
update-ca-trust enable
update-ca-trust
time satellite-installer --scenario capsule -v \
--foreman-proxy-content-parent-fqdn "$SAT_NAME"\
--foreman-proxy-register-in-foreman "true"\
--foreman-proxy-foreman-base-url "https://$SAT_NAME"\
--foreman-proxy-trusted-hosts "$SAT_NAME"\
--foreman-proxy-trusted-hosts "$(hostname)"\
--foreman-proxy-oauth-consumer-key "$OAUTH_CONSUMER_KEY"\
--foreman-proxy-oauth-consumer-secret "$OAUTH_CONSUMER_SEC"\
--foreman-proxy-content-pulp-oauth-secret "$PROXY_OAUTH_SECRET"\
--foreman-proxy-content-certs-tar "/root/$(hostname)-certs.tar"\
--puppet-server-foreman-url "https://$SAT_NAME"\
--foreman-proxy-dns=true \
--foreman-proxy-dns-interface=$CAP_INTERFACE \
--foreman-proxy-dns-zone=$DOMAIN \
--foreman-proxy-dns-forwarders=$DNS \
--foreman-proxy-dns-reverse=$DNS_REV \
--foreman-proxy-dhcp=true \
--foreman-proxy-dhcp-interface=$CAP_INTERFACE \
--foreman-proxy-dhcp-range="$DHCP_RANGE" \
--foreman-proxy-dhcp-gateway=$DHCP_GW \
--foreman-proxy-dhcp-nameservers=$DHCP_DNS \
--foreman-proxy-tftp=true \
--foreman-proxy-tftp-servername=$CAP_IP \
--foreman-proxy-puppetca=true \
--foreman-proxy-realm=true \
--foreman-proxy-realm-keytab=/etc/foreman-proxy/freeipa.keytab \
--foreman-proxy-realm-principal="realm-proxy@${REALM}" \
--foreman-proxy-realm-provider=freeipa \
--enable-foreman-proxy-plugin-openscap \
--enable-foreman-proxy-plugin-discovery \
--enable-foreman-proxy-plugin-remote-execution-ssh
service foreman-proxy restart
yum install -y puppet-foreman_scap_client
yum install -y foreman-discovery-image
mkdir -p /etc/puppet/environments/production/modules
fi
# END installation
exit 0
# BEGIN environment setup
if [ $STAGE -le 3 ]; then
hammer capsule content add-lifecycle-environment --name=$(hostname) --organization=$ORG --environment=Production
hammer domain update --id 1 --organizations "$ORG" --locations "$LOC"
CAPSULE_ID=$(hammer --output='csv' capsule list --search=$(hostname) | tail -n+2 | head -n1 | cut -d',' -f1)
hammer subnet create --name $SUBNET_NAME \
--network $SUBNET \
--mask $SUBNET_MASK \
--gateway $DHCP_GW \
--dns-primary $DHCP_DNS \
--ipam 'Internal DB' \
--from $SUBNET_IPAM_BEGIN \
--to $SUBNET_IPAM_END \
--tftp-id $CAPSULE_ID \
--dhcp-id $CAPSULE_ID \
--dns-id $CAPSULE_ID \
--domain-ids 1 \
--organizations "$ORG" \
--locations "$LOC"
if [ $CONFIGURE_LIBVIRT_RESOURCE = 'true' ]; then
hammer compute-resource create --organizations "$ORG" --name "$COMPUTE_RES_NAME" --locations "$LOC" --provider Libvirt --url qemu+ssh://root@${COMPUTE_RES_FQDN}/system --set-console-password false
fi
if [ $CONFIGURE_RHEV_RESOURCE = 'true' ]; then
hammer compute-resource create --name "${COMPUTE_RES_NAME}" --provider "Ovirt" --description "RHV4 Managment Server" --url "https://${COMPUTE_RES_FQDN}/ovirt-engine/api/v3" --user "${RHV_RES_USER}" --password "${RHV_RES_PASSWD}" --locations "$LOC" --organizations "$ORG" --uuid "${RHV_RES_UUID}"
fi
LOC_IDS=''
for LOC in $(hammer --output=csv location list|tail -n+2|cut -d',' -f1); do LOC_IDS="${LOC_IDS}${LOC_IDS:+,}$LOC"; done
hammer location add-medium --name=$LOC --medium="RHEL 7.5 Kickstart"
hammer location add-hostgroup --name=$LOC --hostgroup='RHEL7_Base'
hammer location add-domain --name=$LOC --domain=$DOMAIN
hammer realm update --name=$REALM --location-ids=$LOC_IDS
hammer capsule content synchronize --organization=$ORG --name=$(hostname)
fi
# END environment setup
if [ $CONFIGURE_LIBVIRT_RESOURCE = 'true' ]; then
read -p "
Manual action required!
To proceed you need to manually add /usr/share/foreman/.ssh/id_rsa.pub to root@${COMPUTE_RES_FQDN}:.ssh/authorized_keys
Hit Enter after the key has been authorized." answer
fi
read -p "
Manual action required!
To proceed you need to manually adjust Compute Profiles.
Log into your Satellite-6.3 as admin and go to Infrastructure->Compute Profiles.
Go through all profile sizes and make sure the network interfaces are correctly selected for the Capsule subnet.
Hit Enter after all Compute Profiles are set up correctly." answer
read -p "
Manual action required!
To proceed you may need to fix realm settings.
Edit /etc/foreman-proxy/settings.d/realm_freeipa.yml
and make sure it reads
:principal: realm-proxy@${REALM}
In case you need to edit the file, you also need to restart Satellite
katello-service restart
Hit Enter after realm settings are verified to be correct." answer
# Check your kickstart-network-setup snippet and check if you need to adjust for your
# network setup. The following lines may serve as an example:
# sed -ri 's/^PEERDNS=yes/PEERDNS=no/' /etc/sysconfig/network-scripts/ifcfg-eth1
# sed -ri 's/^ONBOOT=no/ONBOOT=yes/' /etc/sysconfig/network-scripts/ifcfg-eth1
# echo "DEFROUTE=no" >>/etc/sysconfig/network-scripts/ifcfg-eth0
# systemctl restart network
hammer host create --organization="$ORG" --location="$LOC" --compute-resource="$COMPUTE_RES_NAME" --compute-profile='1-Small' --hostgroup='RHEL7_Base' --name="${HOST_PREFIX}-rhel7std01"
hammer host start --name="${HOST_PREFIX}-rhel7std01.${DOMAIN}"
hammer host create --organization="$ORG" --location="$LOC" --compute-resource="$COMPUTE_RES_NAME" --compute-profile='2-Medium' --hostgroup='inf-git-rhel7' --name="${HOST_PREFIX}-git"
hammer host start --name="${HOST_PREFIX}-git.${DOMAIN}"
hammer host create --organization="$ORG" --location="$LOC" --compute-resource="$COMPUTE_RES_NAME" --compute-profile='2-Medium' --hostgroup='inf-docker-rhel7' --name="${HOST_PREFIX}-docker01"
hammer host start --name="${HOST_PREFIX}-docker01.${DOMAIN}"
hammer host create --organization="$ORG" --location="$LOC" --compute-resource="$COMPUTE_RES_NAME" --compute-profile='3-Large' --hostgroup='inf-builder-rhel7' --name="${HOST_PREFIX}-build01"
hammer host start --name="${HOST_PREFIX}-build01.${DOMAIN}"
| true
|
5e8fe950a7e229db96bcb3a2ec3143099870cdf2
|
Shell
|
mcmtroffaes/bibliography
|
/generate-bib-per-category
|
UTF-8
| 1,282
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/bash
if [ "$#" -lt 2 ]
then
echo "Usage: generate-bib-per-category <file.bib> Author [--delurl]"
exit 1
fi
BIBFILE=$1
AUTHOR=$2
DELURL=0
shift 2
while (( "$#" ))
do
if [[ "$1" == "--delurl" ]]
then
DELURL=1
fi
shift
done
bib2bib -s year -s month -r -ob publications.bib -c "author : \"$AUTHOR\" or (editor : \"$AUTHOR\" and (\$type = \"PROCEEDINGS\" or \$type = \"BOOK\"))" "$BIBFILE"
if [ "$DELURL" == "1" ]
then
cat publications.bib | sed -n '
1h
1!H
$ {
g
s/,\n[ ]*url[ ]*=[ ]*{[^}]*}//g
p
}
' > _publications.bib
mv _publications.bib publications.bib
fi
bib2bib -ob journals.bib -c "\$type = \"ARTICLE\"" publications.bib
bib2bib -ob conferences.bib -c "\$type = \"INPROCEEDINGS\"" publications.bib
bib2bib -ob unpublished.bib -c "\$type = \"UNPUBLISHED\"" publications.bib
bib2bib -ob books.bib -c "not booktitle : \"Newsletter\" and \$type = \"INCOLLECTION\"" publications.bib
bib2bib -ob edited.bib -c "editor : \"$AUTHOR\" and (\$type = \"PROCEEDINGS\" or \$type = \"BOOK\")" publications.bib
bib2bib -ob other.bib -c "author : \"$AUTHOR\" and not \$type = \"ARTICLE\" and (not \$type = \"INCOLLECTION\" or booktitle : \"Newsletter\") and not \$type = \"INPROCEEDINGS\" and not \$type = \"UNPUBLISHED\"" publications.bib
| true
|
4f27073a2c9d693e37adfd8f43064665dc054c23
|
Shell
|
analogdevicesinc/linux_image_ADI-scripts
|
/swap_to
|
UTF-8
| 1,978
| 4.46875
| 4
|
[
"LicenseRef-scancode-other-permissive",
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
# swap_to a different image on the SD Card, while keeping a backup
# usage:
# swap_to directory_name
if [ `id -u` != "0" ] ; then
echo "This script must be run as root" 1>&2
exit 1
fi
if [ "$(grep mmcblk0p1 /proc/mounts | wc -l)" -eq "0" ] ; then
mount /dev/mmcblk0p1 /media/boot
fi
DIR=$(grep mmcblk0p1 /proc/mounts | awk '{print $2}')
if [ $(cat /proc/cpuinfo | grep -i zynq | wc -l) -gt 0 ] ; then
ZYNQ=1
fi
if [ -z ${1} ] ; then
if [ "${ZYNQ}" = "1" ] ; then
ls -d ${DIR}/zynq*
else
ls -d ${DIR}/
fi
fi
find_match () {
needle=$(basename $1)
match=$(md5sum -b $1|awk '{print $1}')
haystack_dir=$(dirname $1)
for i in $(find ${haystack_dir} -mindepth 2 -name ${needle}) ; do
temp=$(md5sum -b $i|awk '{print $1}')
if [ "${temp}" = "${match}" ] ; then
echo not backing up ${needle}, match ${i}
return
fi
done
echo making backup of $1 in ${DIR}/backup_swap_from
mkdir -p ${DIR}/backup_swap_from
cp ${1} ${DIR}/backup_swap_from/
}
missing_file() {
echo missing file $1
echo SD card may not be bootable
exit
}
if [ ! -z ${1} ] ; then
if [ -d ${DIR}/${1} ] ; then
# if they are not on the drive, back them up
echo "### backing up existing files ###"
find_match ${DIR}/BOOT.BIN
find_match ${DIR}/devicetree.dtb
find_match ${DIR}/uImage
echo
echo "### copying files to BOOT partion ###"
if [ -f ${DIR}/${1}/BOOT.BIN ] ; then
echo copying ${DIR}/${1}/BOOT.BIN
cp ${DIR}/${1}/BOOT.BIN ${DIR}/
else
missing_file BOOT.BIN
fi
if [ -f ${DIR}/${1}/devicetree.dtb ] ; then
echo copying ${DIR}/${1}/devicetree.dtb
cp ${DIR}/${1}/devicetree.dtb ${DIR}/
else
missing_file devicetree.dtb
fi
if [ -f ${DIR}/${1}/uImage ] ; then
echo copying ${DIR}/${1}/uImage
cp ${DIR}/${1}/uImage ${DIR}/
else
if [ "${ZYNQ}" = "1" ] ; then
echo copying ${DIR}/zynq-common/uImage
cp ${DIR}/zynq-common/uImage ${DIR}/
fi
fi
else
echo could not find ${DIR}/${1}
fi
fi
umount $DIR
| true
|
c67ec99840ab1f1cd8b794201c58bb9b3248afaf
|
Shell
|
pereorga/poedit
|
/deps/gettext/gettext-tools/tests/xgettext-sh-2
|
UTF-8
| 844
| 3.25
| 3
|
[
"MIT",
"GPL-1.0-or-later",
"GPL-3.0-only",
"LGPL-2.1-or-later"
] |
permissive
|
#!/bin/sh
. "${srcdir=.}/init.sh"; path_prepend_ . ../src
# Test Shell support: --add-comments option.
cat <<EOF > xg-sh-2.sh
# This comment will not be extracted.
gettext "help"
# TRANSLATORS: This is an extracted comment.
gettext "me"
# Not extracted either.
gettext "Hey Jude"
# TRANSLATORS:
# Nickname of the Beatles
gettext "The Fabulous Four"
EOF
: ${XGETTEXT=xgettext}
${XGETTEXT} --omit-header --no-location --add-comments=TRANSLATORS: \
-d xg-sh-2.tmp xg-sh-2.sh || Exit 1
LC_ALL=C tr -d '\r' < xg-sh-2.tmp.po > xg-sh-2.po || Exit 1
cat <<EOF > xg-sh-2.ok
msgid "help"
msgstr ""
#. TRANSLATORS: This is an extracted comment.
msgid "me"
msgstr ""
msgid "Hey Jude"
msgstr ""
#. TRANSLATORS:
#. Nickname of the Beatles
msgid "The Fabulous Four"
msgstr ""
EOF
: ${DIFF=diff}
${DIFF} xg-sh-2.ok xg-sh-2.po
result=$?
exit $result
| true
|
6cf72b3c55f2ff901dc2360fc3d8a24d27f58a73
|
Shell
|
garikait/myappsample
|
/check_weekv2.sh
|
UTF-8
| 618
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
echo -e "Checking is we should run this script\n====\n"
if [ -f count_week.txt ]; then week_number=`cat count_week.txt`; else week_number=1; fi
case $week_number in
echo -e "This is week number $week_number"
if [ $week_number -le 3 ]
then
week_number=`expr $week_number + 1`
echo "New Week Number is $week_number"
echo -e "Run /root/scripts/vmbackup_to_risingsun.sh for week_1_to_3 Backup \n====\n"
echo $week_number > count_week.txt
else
echo -e "Running /root/scripts/vmbackup_to_risingsun4.sh Week_4 Script \n====\n"
echo 1 > count_week.txt
fi
| true
|
f9306aa811bac261d2737f26d88643eb8d7e63d1
|
Shell
|
zheng-at/myscripts
|
/整数连乘.sh
|
UTF-8
| 486
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
echo -e "\033[1m计算连续整数的连乘\033[0m"
read -p "输入整数x,默认为1:" x
echo $x | grep '[^0-9]' > /dev/null && echo "请输入整数" && exit
x=${x:-1}
read -p "输入整数y,默认为10:" y
echo $y | grep '[^0-9]' > /dev/null && echo "请输入整数" && exit
y=${y:-10}
i=1;j=1
while [ $j -le $[y-1] ];do
let j++
i=$[i*j]
done
a=1;b=1
while [ $b -le $[x-2] ];do
let b++
a=$[a*b]
done
echo -e "\033[36m整数${x}到整数${y}的连乘等于$[i/a]\033[0m"
| true
|
0fceb086cd684ddca953873f5cf684be4a83764d
|
Shell
|
ufcg-lsd/arrebol-pb-worker
|
/worker/bin/task-script-executor.sh
|
UTF-8
| 1,276
| 4.25
| 4
|
[] |
no_license
|
#!/bin/bash
# Read the task script file and execute one command at a time, saving your exitcodes in the .ts.ec file.
# Each command executed is written to the .cmds file.
# Use -tsf= or --task_filepath= to input the task file path (Required).
# Use the flag -d or --debug to store .out and .err from execution (Optional).
# This flag does the execution not stop on non-zero exit code commands
set +e
WORK_DIR=/arrebol
for i in "$@"
do
case $i in
-tsf=*|--task_filepath=*)
__TASK_SCRIPT_FILEPATH="${i#*=}"
shift
;;
-d|--debug)
DEBUG=YES
shift
;;
*)
# unknown option
;;
esac
done
if [ ! -f "$__TASK_SCRIPT_FILEPATH" ];
then
echo "$__TASK_SCRIPT_FILEPATH is not a file"
exit 17
fi
TS_FILENAME=$(basename $__TASK_SCRIPT_FILEPATH)
__EXIT_CODES=$WORK_DIR/$TS_FILENAME.ec
rm $__EXIT_CODES
touch $__EXIT_CODES
__COMMANDS=$WORK_DIR/$TS_FILENAME.cmds
rm $__COMMANDS
touch $__COMMANDS
if [ -n "$DEBUG" ];
then
rm $WORK_DIR/$TS_FILENAME.out
exec 1> $WORK_DIR/$TS_FILENAME.out
rm $WORK_DIR/$TS_FILENAME.err
exec 2> $WORK_DIR/$TS_FILENAME.err
fi
while IFS= read -r __line || [ -n "$__line" ]; do
set +e
eval $__line
echo $__line >> $__COMMANDS
echo "$?" >> $__EXIT_CODES
done < $__TASK_SCRIPT_FILEPATH
| true
|
12f375bafbee1379345ee0d14a8cb4ab6999f3c1
|
Shell
|
root-gg/plik
|
/testing/mssql/run.sh
|
UTF-8
| 795
| 3.609375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
cd "$(dirname "$0")"
BACKEND="mssql"
CMD=$1
TEST=$2
source ../utils.sh
check_docker_connectivity
DOCKER_VERSION=${DOCKER_VERSION-2019-latest}
DOCKER_IMAGE="mcr.microsoft.com/mssql/server:$DOCKER_VERSION"
DOCKER_NAME="plik.mssql"
DOCKER_PORT=2605
PASSWORD="P@ssw0rd"
function start {
if status ; then
echo "ALREADY RUNNING"
else
pull_docker_image
echo -e "\n - Starting $DOCKER_NAME\n"
docker run -d -p "$DOCKER_PORT:1433" \
-e "ACCEPT_EULA=Y" \
-e "SA_PASSWORD=$PASSWORD" \
--name "$DOCKER_NAME" "$DOCKER_IMAGE"
echo "waiting for mssql to start ..."
sleep 10
if ! status ; then
echo "IMAGE IS NOT RUNNING"
exit 1
fi
fi
}
run_cmd
| true
|
2dcc9da72fbc2ee42f0989433f986dfd20e7e53b
|
Shell
|
cfengine/buildscripts
|
/build-scripts/autogen
|
UTF-8
| 1,938
| 3.984375
| 4
|
[] |
no_license
|
#!/bin/sh -ex
. `dirname "$0"`/functions
GITSHAOF="core buildscripts buildscripts/deps-packaging"
case "$PROJECT" in
community)
NOVA=no
;;
nova)
NOVA=yes
GITSHAOF="$GITSHAOF enterprise nova"
;;
*)
echo "Unknown project: $PROJECT"
exit 42;;
esac
if test "x$NOVA" = "xyes"
then
projects="core enterprise nova masterfiles"
else
projects="core masterfiles"
fi
for p in $projects
do
(cd $BASEDIR/$p && NO_CONFIGURE=1 ./autogen.sh) || false
done
# %h (abbreviated commit hash) is not deterministic for length on different systems
# so far (up to Aug 2023) this didn't matter because one system (bootstrap-oslo-dc)
# was responsible for doing this autogen work and all other systems used the result.
# When we migrated from travis to github actions we needed things to be stable between
# bootstrap-oslo-dc and other systems so will force a length of 7 and check that
# the result is unique.
export CORE_ABBREV=7 # adjust this up if need be
git config --global --add core.abbrev $CORE_ABBREV
for i in $GITSHAOF
do
if [ -d $BASEDIR/$i ] && [ ! -f $BASEDIR/$i/revision ]
then
R=$(cd $BASEDIR/$i && git log --pretty='format:%h' -1 -- .) || false
(
cd $BASEDIR/$i
if ! git show $R --oneline >/dev/null; then
echo "abbreviated commit hash of $CORE_ABBREV is not unique. Consider increasing the value in the script $0."
exit 1
fi
)
echo $R | tr -d '\n' > $BASEDIR/$i/revision
fi
done
detected_versions=`echo $projects \
| xargs -n1 \
| sed "s|.*|$BASEDIR/&/CFVERSION|" \
| xargs cat`
number_of_different_versions=`echo $detected_versions \
| tr ' ' '\n' \
| sed -e 's/\([0-9]*\.[0-9]*\.[0-9]*\).*/\1/' \
| uniq | wc -l`
if [ x"$number_of_different_versions" != x1 ]
then
echo "Detected versions mismatch:" "$detected_versions" 1>&2
exit 33
fi
| true
|
df1af7f1de4084841c311f81549a565766f14a09
|
Shell
|
ivartz/opticalflow-bcond
|
/of-farneback-models-patient.sh
|
UTF-8
| 541
| 3.03125
| 3
|
[] |
no_license
|
run_evals=1
patientdir=$1
fixedimg="$patientdir/T1c.nii.gz"
oftype="offarneback"
# Make array of patient models, full paths
readarray -t models < <(find $patientdir -mindepth 1 -maxdepth 1 -type d | sort)
for model in ${models[*]}; do
outdir="$model/$oftype"
c="mkdir -p $outdir"
if [ $run_evals == 1 ]; then
eval $c
fi
movingimg="$model/warped.nii.gz"
c="CUDA_VISIBLE_DEVICES=1 python3 of-farneback.py $fixedimg $movingimg $outdir/flow.nii.gz"
if [ $run_evals == 1 ]; then
eval $c
fi
done
| true
|
a1ffa618c3607b66c39efe33492f153c81e87972
|
Shell
|
pinfort/Kazanami
|
/Profile_auto_generator/update.sh
|
UTF-8
| 2,160
| 3.875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Thank you Download This Script.
# This program can running on Github Actions.
# I checked successfull running on Github Actions.
#
# Require Env Parameter
#
# ${GITH_USER} -> Push Commit Username
# ${GITH_EMAIL} -> Push Commit User Email
# Debug Mode Switch
if [ $DEBUG_MODE = 0 ];then
set -u
else
set -uex
fi
# Upload Git Settings
git config --global user.name ${GITH_USER}
git config --global user.email ${GITH_EMAIL}
COMMIT_MESSAGE="Profile: Update!"
README_TEMPLATE="${PWD}"
README_DEPLOY=$(dirname ${PWD})
REMOTE_API="https://api.github.com/users/${GITH_USER}/repos?sort=updated&per_page=5&page=1"
#function manifest_check(){
# git log --pretty=format:%h -2
# COMMIT_HASH=`git log --pretty=format:%h -2`
# BEFORE=`echo ${COMMIT_HASH} | cut -d " " -f 1` >> /dev/null
# AFTER=`echo ${COMMIT_HASH} | cut -d " " -f 2` >> /dev/null
# git diff $BEFORE $AFTER --exit-code --name-only --relative=Profile_auto_generator
#git diff HEAD --relative=bucket --exit-code --name-only
# echo $?
#}
function main(){
# MAN_CHECK=$(manifest_check)
# if [ $MAN_CHECK == 0 ];then
# echo "No Update"
# return 0;
# fi
echo "Setupping ..."
(cd git_getter;yarn install;node main.js)
TMP_FILE=$(tempfile)
echo "Getting Your Repo..."
REPO_DATA=$(cat "./repos.json")
MAX_COUNTER=$(echo ${REPO_DATA} | jq '.[].name' -r | wc -l);
cat ./Header.md >> $TMP_FILE
cat ./list/repo_list_header.md >> $TMP_FILE
for (( count=0; count<${MAX_COUNTER}; count++));do
Repo_name="Kazanami/$(echo $REPO_DATA | jq .[$count].name -r)"
Repo_url=$(echo $REPO_DATA | jq .[$count].clone_url -r)
update_time=`date -d $(echo $REPO_DATA | jq .[$count].updated_at -r) '+%F %R'`
eval "echo \"$(eval cat ${README_TEMPLATE}/list/body.md)\"" >> ${TMP_FILE}
done
cat ./Footer.md >> $TMP_FILE
diff -s ./README.md ../README.md > /dev/null 2>&1
if [ $? -eq 0 ];then
echo "No update"
return 0
elif [ $? -eq 1 ];then
echo "Update README.md"
mv ${TMP_FILE} ${README_DEPLOY}/README.md
git add ${README_DEPLOY}/README.md;
git commit -m "${COMMIT_MESSAGE}"
git push
git reset
fi
}
main
| true
|
e202d625ed7d1508b02a49c9fd0002a944e5e25c
|
Shell
|
rbloomdaleIXL/my_dev_environment
|
/my_aliases.sh
|
UTF-8
| 980
| 2.828125
| 3
|
[] |
no_license
|
alias ShowHiddenFiles="defaults write com.apple.finder AppleShowAllFiles -boolean true"
alias HideHiddenFiles="defaults write com.apple.finder AppleShowAllFiles -boolean false"
export MainGitBranchName="develop"
###############
# iOS #
###############
################
# brew #
################
alias cask="brew cask"
#################
# Android #
#################
alias restartADB="~/Library/Android/sdk/platform-tools/adb kill-server; ~/Library/Android/sdk/platform-tools/adb start-server"
alias emulatorMarshmallowCharlesProxy="emulatorProxy Nexus_10_Edited_API_23"
alias emulatorKitKatCharlesProxy="emulatorProxy Nexus_10_4.4.2"
alias emulatorNougatCharlesProxy="emulatorProxy Nexus_10_Edited_API_25"
################
# Utils #
################
#alias deleteEmptyDirs = "for folder in $(find -type d ! -path *.svn*); do if [ "`find $folder ! -path *.svn* ! -path $folder | wc -l`" -eq 0 ]; then echo $folder; rm -r $folder; fi; done"
| true
|
02f8b07f0b183a4c91877f8a05f2b439e20a4486
|
Shell
|
SaundersLab/FieldPathogenomics
|
/scripts/init_working_copy.sh
|
UTF-8
| 3,008
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash -e
for i in "$@"
do
case $i in
--dev-dir=*)
dev_dir="${i#*=}"
shift # past argument=value
;;
--scratch-dir=*)
scratch_dir="${i#*=}"
shift # past argument=value
;;
--base-dir=*)
base_dir="${i#*=}"
shift # past argument=value
;;
--prod-dir=*)
prod_dir="${i#*=}"
shift # past argument=value
;;
*)
# unknown option
;;
esac
done
version_file=$prod_dir/production/src/fieldpathogenomics/fieldpathogenomics/version.py
function venv_create {
activate_prefix="source python-3.5.1;
source git-1.8.1.2;
export TMPDIR=$scratch_dir;
export LUIGI_CONFIG_PATH=$prod_dir/luigi.cfg;
"
# Create the new virtualenv
mkdir -p $dev_dir
cd $dev_dir
source python-3.5.1;
virtualenv -p `which python3` dev
# Add the source commands and environment variable defs to the activate script
echo $activate_prefix > temp
cat dev/bin/activate >> temp
mv -f temp dev/bin/activate
}
function install_fieldpathogenomics {
# Pull the code from the tip of master and install
ssh -t -t software << HERE
# Use the internet connected node to install required packages
source $dev_dir/dev/bin/activate
pip install --upgrade --force-reinstall -e git+https://github.com/SaundersLab/FieldPathogenomics.git@master#egg=fieldpathogenomics
exit
HERE
echo "Installed fieldpathogenomics to $dev_dir/dev/src/fieldpathogenomics/fieldpathogenomics"
}
function install_requirements {
# Use the requirements.txt to install python packages
ssh -t -t software << HERE
# Use the internet connected node to install required packages
source $dev_dir/dev/bin/activate
pip install -r $dev_dir/dev/src/fieldpathogenomics/requirements.txt
exit
HERE
echo "Installed the requirements in $dev_dir/dev/src/fieldpathogenomics/requirements.txt"
}
function install_scripts {
# Copy and localise the supporting scripts
# This makes the following vars available to all scripts:
# $dev_dir
# $src_dir
# $prod_dir
# $scratch_dir
cp -fr $dev_dir/dev/src/fieldpathogenomics/scripts $dev_dir/scripts
cd $dev_dir/scripts
vars=$(printf "#!/bin/bash -e
prod_dir=$prod_dir
dev_dir=$dev_dir
scratch_dir=$scratch_dir
base_dir=$base_dir
src_dir=$dev_dir/dev/src/fieldpathogenomics/")
printf '%b\n' "$vars" | cat - release.sh > temp && mv temp release.sh
printf '%b\n' "$vars" | cat - test.sh > temp && mv temp test.sh
printf '%b\n' "$vars" | cat - pull.sh > temp && mv temp pull.sh
#-- Pipeline scripts --#
printf '%b\n' "$vars" | cat - Callset.sh > temp && mv temp Callset.sh
printf '%b\n' "$vars" | cat - Transcripts.sh > temp && mv temp Transcripts.sh
printf '%b\n' "$vars" | cat - Tree.sh > temp && mv temp Tree.sh
}
function make_sym_links {
ln -Ts $dev_dir/dev/src/fieldpathogenomics/fieldpathogenomics $dev_dir/fieldpathogenomics
}
venv_create;
install_fieldpathogenomics;
install_requirements;
install_scripts;
make_sym_links;
| true
|
2ad01faaf9dad32621b5f4c8d87a4393758a7a01
|
Shell
|
leonardt/cgra_test
|
/scripts/pnr_setup.sh
|
UTF-8
| 253
| 3.0625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -xe
# clone the generator
dest_dir=${PNR_DIR}
WD=${ROOT_DIR}
if [ ! -d ${dest_dir} ]; then
git clone --depth 1 \
https://github.com/Kuree/cgra_pnr ${dest_dir}
fi
cd ${dest_dir}
make
pip install -r requirements.txt
| true
|
b9f4e7beca3fff2aa453ce22991167193ddd7ce4
|
Shell
|
nanite10/distributions
|
/old/centos_first_boot.sh
|
UTF-8
| 8,733
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
full=`cat /etc/centos-release | tr -dc '0-9.'`
major=$(cat /etc/centos-release | tr -dc '0-9.'|cut -d \. -f1)
minor=$(cat /etc/centos-release | tr -dc '0-9.'|cut -d \. -f2)
asynchronous=$(cat /etc/centos-release | tr -dc '0-9.'|cut -d \. -f3)
echo "CentOS Version: $full"
echo "Major Relase: $major"
echo "Minor Relase: $minor"
echo "Asynchronous Relase: $asynchronous"
# Update system and install common packages
if [[ "$major" == "7" ]]; then
yum update -y
if [ $? -ne 0 ]; then echo "ERROR: Failure on last command; run was ["!:0"] with arguments ["!:*"]"; exit 1; fi
yum install epel-release -y
if [ $? -ne 0 ]; then echo "ERROR: Failure on last command; run was ["!:0"] with arguments ["!:*"]"; exit 1; fi
yum update -y
if [ $? -ne 0 ]; then echo "ERROR: Failure on last command; run was ["!:0"] with arguments ["!:*"]"; exit 1; fi
yum install vim rsync tmux wget iptraf-ng iperf3 cifs-utils git glusterfs-client kernel-headers-$(uname -r) kernel-devel-$(uname -r) ncurses-devel flex bison openssl openssl-devel dkms elfutils-libelf-devel autoconf bzip2 automake libtool libuuid-devel libblkid-devel rpm-build libudev-devel libattr-devel libaio-devel python2-devel python-cffi python-setuptools libffi-devel cyrus-sasl-plain mailx strace mdadm lvm2 sysstat lm_sensors-libs net-tools sshpass samba ncdu python3 nfs-utils -y
if [ $? -ne 0 ]; then echo "ERROR: Failure on last command; run was ["!:0"] with arguments ["!:*"]"; exit 1; fi
yum update -y
if [ $? -ne 0 ]; then echo "ERROR: Failure on last command; run was ["!:0"] with arguments ["!:*"]"; exit 1; fi
elif [[ "$major" == "8" ]]; then
dnf update -y
if [ $? -ne 0 ]; then echo "ERROR: Failure on last command; run was ["!:0"] with arguments ["!:*"]"; exit 1; fi
dnf install epel-release -y
if [ $? -ne 0 ]; then echo "ERROR: Failure on last command; run was ["!:0"] with arguments ["!:*"]"; exit 1; fi
dnf update -y
if [ $? -ne 0 ]; then echo "ERROR: Failure on last command; run was ["!:0"] with arguments ["!:*"]"; exit 1; fi
dnf install vim rsync tmux wget iptraf-ng iperf3 cifs-utils git glusterfs-client kernel-headers-$(uname -r) kernel-devel-$(uname -r) ncurses-devel flex bison openssl openssl-devel dkms elfutils-libelf-devel autoconf bzip2 automake libtool libuuid-devel libblkid-devel rpm-build libudev-devel libattr-devel libaio-devel libtirpc-devel python2-devel libffi-devel cyrus-sasl-plain mailx strace mdadm lvm2 sysstat lm_sensors-libs net-tools sshpass samba ncdu python3 nfs-utils -y
if [ $? -ne 0 ]; then echo "ERROR: Failure on last command; run was ["!:0"] with arguments ["!:*"]"; exit 1; fi
dnf update -y
if [ $? -ne 0 ]; then echo "ERROR: Failure on last command; run was ["!:0"] with arguments ["!:*"]"; exit 1; fi
fi
sed -i 's/=enforcing/=disabled/g' /etc/selinux/config
if [ $? -ne 0 ]; then echo "ERROR: Failure on last command; run was ["!:0"] with arguments ["!:*"]"; exit 1; fi
systemctl disable firewalld
if [ $? -ne 0 ]; then echo "ERROR: Failure on last command; run was ["!:0"] with arguments ["!:*"]"; exit 1; fi
service firewalld stop
if [ $? -ne 0 ]; then echo "ERROR: Failure on last command; run was ["!:0"] with arguments ["!:*"]"; exit 1; fi
# Install ZFS
wget https://github.com/zfsonlinux/zfs/releases/download/zfs-0.8.5/zfs-0.8.5.tar.gz
if [ $? -ne 0 ]; then echo "ERROR: Failure on last command; run was ["!:0"] with arguments ["!:*"]"; exit 1; fi
tar -xvzf zfs-0.8.5.tar.gz
if [ $? -ne 0 ]; then echo "ERROR: Failure on last command; run was ["!:0"] with arguments ["!:*"]"; exit 1; fi
cd zfs-0.8.5
if [ $? -ne 0 ]; then echo "ERROR: Failure on last command; run was ["!:0"] with arguments ["!:*"]"; exit 1; fi
sh autogen.sh
if [ $? -ne 0 ]; then echo "ERROR: Failure on last command; run was ["!:0"] with arguments ["!:*"]"; exit 1; fi
./configure
if [ $? -ne 0 ]; then echo "ERROR: Failure on last command; run was ["!:0"] with arguments ["!:*"]"; exit 1; fi
make
if [ $? -ne 0 ]; then echo "ERROR: Failure on last command; run was ["!:0"] with arguments ["!:*"]"; exit 1; fi
make rpm
if [ $? -ne 0 ]; then echo "ERROR: Failure on last command; run was ["!:0"] with arguments ["!:*"]"; exit 1; fi
if [[ "$major" == "7" ]]; then
yum install zfs-dkms-0.8.5-1.el7.src.rpm zfs-dkms-0.8.5-1.el7.noarch.rpm zfs-0.8.5-1.el7.src.rpm python2-pyzfs-0.8.5-1.el7.noarch.rpm zfs-dracut-0.8.5-1.el7.noarch.rpm libnvpair1-0.8.5-1.el7.x86_64.rpm libuutil1-0.8.5-1.el7.x86_64.rpm libzfs2-0.8.5-1.el7.x86_64.rpm libzfs2-devel-0.8.5-1.el7.x86_64.rpm libzpool2-0.8.5-1.el7.x86_64.rpm zfs-0.8.5-1.el7.x86_64.rpm zfs-debuginfo-0.8.5-1.el7.x86_64.rpm -y
if [ $? -ne 0 ]; then echo "ERROR: Failure on last command; run was ["!:0"] with arguments ["!:*"]"; exit 1; fi
elif [[ "$major" == "8" ]]; then
dnf install zfs-dkms-0.8.5-1.el7.src.rpm zfs-dkms-0.8.5-1.el7.noarch.rpm zfs-0.8.5-1.el7.src.rpm python2-pyzfs-0.8.5-1.el7.noarch.rpm zfs-dracut-0.8.5-1.el7.noarch.rpm libnvpair1-0.8.5-1.el7.x86_64.rpm libuutil1-0.8.5-1.el7.x86_64.rpm libzfs2-0.8.5-1.el7.x86_64.rpm libzfs2-devel-0.8.5-1.el7.x86_64.rpm libzpool2-0.8.5-1.el7.x86_64.rpm zfs-0.8.5-1.el7.x86_64.rpm zfs-debuginfo-0.8.5-1.el7.x86_64.rpm -y
if [ $? -ne 0 ]; then echo "ERROR: Failure on last command; run was ["!:0"] with arguments ["!:*"]"; exit 1; fi
fi
modprobe zfs
if [ $? -ne 0 ]; then echo "ERROR: Failure on last command; run was ["!:0"] with arguments ["!:*"]"; exit 1; fi
systemctl enable zfs-import-scan.service
if [ $? -ne 0 ]; then echo "ERROR: Failure on last command; run was ["!:0"] with arguments ["!:*"]"; exit 1; fi
echo "zfs" > /etc/modules-load.d/zfs.conf
if [ $? -ne 0 ]; then echo "ERROR: Failure on last command; run was ["!:0"] with arguments ["!:*"]"; exit 1; fi
grub_file=`find /boot -type f -name grub.cfg`
if [ -z "$grub_file" ]; then echo "ERROR: Failed to find grub.cfg in /boot"; exit 1; fi
depmod -a
if [ $? -ne 0 ]; then echo "ERROR: Failure on last command; run was ["!:0"] with arguments ["!:*"]"; exit 1; fi
dracut -f
if [ $? -ne 0 ]; then echo "ERROR: Failure on last command; run was ["!:0"] with arguments ["!:*"]"; exit 1; fi
grub2-mkconfig -o "$grub_file"
if [ $? -ne 0 ]; then echo "ERROR: Failure on last command; run was ["!:0"] with arguments ["!:*"]"; exit 1; fi
# Install lsyncd
if [[ "$major" == "7" ]]; then
yum install cmake lua lua-devel gcc-c++ -y
if [ $? -ne 0 ]; then echo "ERROR: Failure on last command; run was ["!:0"] with arguments ["!:*"]"; exit 1; fi
elif [[ "$major" == "8" ]]; then
dnf install cmake lua lua-devel gcc-c++ -y
if [ $? -ne 0 ]; then echo "ERROR: Failure on last command; run was ["!:0"] with arguments ["!:*"]"; exit 1; fi
fi
git clone https://github.com/axkibe/lsyncd.git
if [ $? -ne 0 ]; then echo "ERROR: Failure on last command; run was ["!:0"] with arguments ["!:*"]"; exit 1; fi
cd lsyncd
mkdir build
cd build
cmake ..
make
if [ $? -ne 0 ]; then echo "ERROR: Failure on last command; run was ["!:0"] with arguments ["!:*"]"; exit 1; fi
cp lsyncd /usr/local/sbin/
if [ $? -ne 0 ]; then echo "ERROR: Failure on last command; run was ["!:0"] with arguments ["!:*"]"; exit 1; fi
echo "fs.inotify.max_user_watches = 16777216" >> /etc/sysctl.conf
echo "fs.inotify.max_queued_events = 1000000" >> /etc/sysctl.conf
sysctl fs.inotify.max_user_watches=16777216
sysctl fs.inotify.max_queued_events=1000000
# Install updated rsync
if [[ "$major" == "7" ]]; then
yum install gcc gawk autoconf automake acl libacl-devel attr libattr-devel openssl-devel lz4 lz4-devel -y
if [ $? -ne 0 ]; then echo "ERROR: Failure on last command; run was ["!:0"] with arguments ["!:*"]"; exit 1; fi
elif [[ "$major" == "8" ]]; then
dnf install gcc gawk autoconf automake acl libacl-devel attr libattr-devel openssl-devel lz4 lz4-devel -y
if [ $? -ne 0 ]; then echo "ERROR: Failure on last command; run was ["!:0"] with arguments ["!:*"]"; exit 1; fi
fi
wget https://download.samba.org/pub/rsync/src/rsync-3.2.3.tar.gz
if [ $? -ne 0 ]; then echo "ERROR: Failure on last command; run was ["!:0"] with arguments ["!:*"]"; exit 1; fi
tar -xvzf rsync-3.2.3.tar.gz
if [ $? -ne 0 ]; then echo "ERROR: Failure on last command; run was ["!:0"] with arguments ["!:*"]"; exit 1; fi
cd rsync-3.2.3
if [ $? -ne 0 ]; then echo "ERROR: Failure on last command; run was ["!:0"] with arguments ["!:*"]"; exit 1; fi
./configure --disable-xxhash --disable-zstd --enable-acl-support --disable-md2man
if [ $? -ne 0 ]; then echo "ERROR: Failure on last command; run was ["!:0"] with arguments ["!:*"]"; exit 1; fi
make
if [ $? -ne 0 ]; then echo "ERROR: Failure on last command; run was ["!:0"] with arguments ["!:*"]"; exit 1; fi
cp rsync /usr/local/sbin/
if [ $? -ne 0 ]; then echo "ERROR: Failure on last command; run was ["!:0"] with arguments ["!:*"]"; exit 1; fi
| true
|
e0dfa9ec3291c608056f5f19a3abbc6d36e9c119
|
Shell
|
1056599071/boss-hive
|
/src/main/resources/shell/prevue/run.sh
|
UTF-8
| 939
| 3.296875
| 3
|
[] |
no_license
|
source ~/.bash_profile;
BASEDIR=`dirname $0`
cd $BASEDIR
yesterday=`date -d "1 days ago" +"%Y%m%d"`
if [ "$#" -eq 1 ]; then
yesterday=$1
fi
echo "export user play record....."
sh run_play_record.sh $yesterday
echo "export movie configuration....."
sh export_rec_config_fromdb.sh $yesterday
url_filesize=`sed '/^[ \t]*$/d' config_data/rec_config_url_$yesterday.csv | wc -l`
pid_filesize=`sed '/^[ \t]*$/d' config_data/rec_config_pid_$yesterday.csv | wc -l`
if [ $url_filesize -gt 0 ]; then
etho "exec command......export active datas......"
sh run_config_url.sh $yesterday
else
echo "not url......"
fi
#echo "export pid,uid....."
#sh run_play_uid_pid.sh $yesterday
if [ $pid_filesize -gt 0 ]; then
echo "export pid,uid....."
sh run_play_uid_pid.sh $yesterday
echo "export pid uid from hive to db"
sh run_config_pid.sh $yesterday
sh import_rec_result_todb.sh $yesterday
else
echo "not pid......"
fi
| true
|
3d0c39cbc574170465bd04d382c7d1588d2bb876
|
Shell
|
oncoapop/data_reporting
|
/beast_scripts/check-primers.sh
|
UTF-8
| 1,680
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/sh
primerlist="/home/dyap/Projects/PrimerDesign/Splice/primer3/hct116_htert_primer_order.txt"
#$5 length
#$6 left primer
#$8 right primer
primer_order="/home/dyap/Projects/PrimerDesign/Splice/primer3/hct116_htert-filtered.AmpliconManifest"
temp="/home/dyap/dyap_temp/CG_Panel_primers.csv"
temp2="/home/dyap/dyap_temp/CG_Panel_isPCR"
rm -f $temp
outfile="/home/dyap/Projects/Takeda_T3/CG/CG_Panel_Suppl_Table"
rm -f $outfile
for i in `cat $primer_order | grep @ | awk -F"\t" '{print $2}'`
do
length=`grep -m1 "$i" $primerlist | awk -F"," '{print $5}'`
left=`grep -m1 "$i" $primerlist | awk -F"," '{print $6}'`
right=`grep -m1 "$i" $primerlist | awk -F"," '{print $8}'`
echo $i","$left","$right","$length | tr "," "\t" >> $temp
done
# isPCR is on beast at
command="/share/data/apps/isPcr/bin/x86_64/isPcr"
# database (hg19 2bit fa ) at
database="/share/data/apps/isPcr/isPcrSrc/isPcr/data/genomes/twoBit/hg19.2bit"
#database="/home/dyap/Projects/PrimerDesign/manual/gp140.2bit"
#database="/home/dyap/Projects/PrimerDesign/manual/"$name".2bit"
# IF reversecomplement of right primer is NOT required comment this
#flip="-flipReverse"
flip=""
# output format
output=fa # fasta format (default)
#output=bed # bed format (tab-delimited; Fields: chrom/start/end/name/score/strand)
#output=psl # blat format
# Name of the input file
inputfile=$temp
# Name of the output file
outputfile=$temp2
cat $inputfile
echo $outputfile
$command $database $flip "-out="$output $inputfile $outputfile -maxSize=100000000
echo $command" "$database" " $flip "-out="$output" " $inputfile" " $outputfile" -maxSize=100000000"
grep "@" $outputfile > $outfile
exit
| true
|
3449c4ca57bde88c41cee9d2223add7dad16c253
|
Shell
|
CV-AO/CV-DEMO-FRBT
|
/ao-front-module/ao-front-modulepack.sh
|
UTF-8
| 829
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/sh
#
# Module package
# auth <<CV.AO T-SHIOTSUKA>>
#
# 開始宣言
echo "[START][ao-front-modulepack]`date`"
MODULEDIR=/opt/cv/ao-front-module
# dropboxファイル一覧取得(.out .end)、差分未処理ファイル作成
echo "--- ao-front-filelist.sh ---"
${MODULEDIR}/ao-front-filelist.sh
# 空ファイルチェック
filename=/opt/cv-data/work/f-dbdiff.list
NUM=`wc -l ${filename} | awk '{print $1}'`
if [ ${NUM} = 0 ]; then
# 終了宣言
echo "[END][ao-front-modulepack]`date`"
exit 0
fi
# 取込みSQLファイル作成処理
echo "--- ao-front-sql.py ---"
python ${MODULEDIR}/ao-front-sql.py
# SQLファイル取込み、処理済み完了トリガファイル作成
echo "--- ao-front-insert.sh ---"
${MODULEDIR}/ao-front-insert.sh
sleep 3
# 終了宣言
echo "[END][ao-front-modulepack]`date`"
| true
|
6240312a6cbcb727f2ff5a125b7d74cd0012146c
|
Shell
|
pione/pione
|
/misc/endurance-test/run.sh
|
UTF-8
| 1,299
| 3.796875
| 4
|
[
"MIT",
"Ruby"
] |
permissive
|
#!/bin/sh
N=100
LOCATION=example/HelloWorld/
CLEAR=1
LIMIT=10
usage() {
echo "Usage: run.sh [-n NUMBER] [-l LOCATION] [-z] [--limit SEC]"
echo " -n NUMBER do the test NUMBER times"
echo " -l LOCATION location of process document"
echo " -z don't clear output location"
echo " --limit SEC timeout after SEC seconds"
exit 1
}
OPT=`getopt -o n:l:z -l limit: -- "$@"`; [ $? -ne 0 ] && usage
eval set -- "$OPT"
while true
do
case $1 in
-n)
N=$2; shift 2
;;
-l)
LOCATION="$2"; shift 2
;;
-z)
CLEAR=0; shift
;;
--limit)
LIMIT=$2; shift 2
;;
--)
shift; break
;;
*)
usage
;;
esac
done
echo "endurance-test($N times)"
echo " location: $LOCATION"
echo " limit: $LIMIT"
echo " clear output: $CLEAR (1: true, 0: faluse)"
RESULT=endurance-test-result.txt
TIME=endurance-test-time.txt
echo "number, result" > ${RESULT}
echo "number, real, user, sys" > ${TIME}
for i in `seq 1 $N`
do
echo "--- ${i} ---"
if [ $CLEAR -eq 1 ]
then
rm -rf output
fi
timeout -s 9 $LIMIT time --quiet --append -o ${TIME} -f "${i}, %e, %U, %S" pione-client ${LOCATION} --rehearse
echo "${i}, $?" >> ${RESULT}
sleep 1
pkill -KILL -fe pione-
done
| true
|
0125e900de210f77fabf67e36d6d7619114a86db
|
Shell
|
tmzullinger/fedora-ansible
|
/roles/supybot/files/meetings_by_team.sh
|
UTF-8
| 302
| 3.46875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
BASELOCATION=/srv/web/meetbot/teams
cd "$BASELOCATION/.." || exit
for f in $(find . -type f -mtime -30 | grep -v "fedora-meeting\.")
do
teamname="$(basename "$f" | awk -F. '{ print $1 }' )"
mkdir -p "$BASELOCATION/$teamname"
ln -f -s "$PWD/$f" "$BASELOCATION/$teamname/"
done
| true
|
f571f087a388db2ff9cc4dea10fb01b9b6ff68c1
|
Shell
|
sjatgutzmann/docker.centos.jackrabbit
|
/run.sh
|
UTF-8
| 612
| 3.671875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
ARG1=$1
# setting default, simulation of CMD ["console"] in Dockerfile
if [ -z ${ARG1} ]; then
echo -n "setting default start arg to "
ARG1="run"
echo $ARG1
fi
echo "starting this container with ${ARG1}"
case "$ARG1" in
"run")
echo "try to start jackrabbit with java $JAVA_OPTS -jar ${JACKRABBIT_HOME}/jackrabbit-standalone-${JACKRABBIT_VERSION}.jar" \
&& java $JAVA_OPTS -jar ${JACKRABBIT_HOME}/jackrabbit-standalone-${JACKRABBIT_VERSION}.jar
;;
"bash")
echo "entering bash mode" \
&& /bin/bash
;;
esac
| true
|
177a7f719ceed50e91406898f9848d15adbc9fd5
|
Shell
|
wpeterson/dotfiles
|
/scripts/research-dump
|
UTF-8
| 271
| 2.78125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
TARGET=/tmp/research.dump
export PGPASSWORD=$RESEARCH_DB_PASS
CMD="pg_dump -h alden-research-public.cuhiqihjaxwi.us-east-2.rds.amazonaws.com \
-U research --no-password --no-owner --no-privileges \
-Fc -f $TARGET $@"
echo $CMD
$CMD
| true
|
3e8fabfe9cc24976290abd5243d10e71a759888a
|
Shell
|
fevrin/home
|
/.shellrc.d/functions/yessh
|
UTF-8
| 6,559
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
yessh() {
# handy for ssh'ing into servers and getting some common basic environmental modifications without having to copy ~/.bashrc, ~/.bash_aliases, etc. to each server first; it's all done over one connection, if possible
# inspired by <https://superuser.com/a/503785>
# input: server name and optional arguments passed to 'ssh'
# output: connects to the specific node and sets up the environment contained in the ~/.bashrc.d/remote/* files
# examples:
# yessh reg1node22
# yessh reg1node22 'cat /proc/cmdline'
# any files or symlinks in the ~/.bashrc.d/remote/ directory will be dereferenced and passed as contents of the bashrc
# file from the local to the remote host.
# they should be directly executable by bash on the remote host
# they will not be sourced or evaluated on the local host
# to define functions available locally on the remote host, you can run a
# command like this to combine existing functions into a single file
# just replace the paths, as needed:
# cat ~/.bashrc.d/functions/* >~/.bashrc.d/remote/functions
_verify_reqs <<-EOF || return 1
base64
bzip2
rsync
ssh
verbose
EOF
local hostname
local port
local username
local ssh_args
local orig_command
local command
local COPY_CONFIG
local KEEP_CONFIG
if [[ $# -gt 1 ]]; then
while [[ $# -ge 1 ]]; do
local arg="$1"
verbose 8 "arg = $arg"
verbose 8 "\$@ = '$@'"
case $arg in
-p|--port)
shift
port="$1"
;;
*@*)
if [[ "$arg" =~ $DOMAIN_REGEX$ ]]; then
username="$(echo "$arg" | cut -d@ -f1)"
hostname="$(echo "$arg" | cut -d@ -f2)"
fi
;;
--copy-config)
COPY_CONFIG=1
;;
--keep-config)
KEEP_CONFIG=1
;;
-*)
if [[ "$arg" =~ -[46AaCfGgKkMNnqsTtVvXxYy]+ ]]; then
ssh_args="$ssh_args $arg"
else
ssh_args="$ssh_args $arg"
shift
ssh_args="$ssh_args $1"
fi
;;
*)
if [[ -z "$hostname" ]]; then
[[ "$arg" =~ $DOMAIN_REGEX$ || "$arg" =~ $IP_REGEX$ ]] && hostname="$arg"
else
# the last argument(s) should be the command, like with the ssh client
orig_command="$@"
break
fi
;;
esac
shift
_print_var_vals \
hostname \
port \
username \
ssh_args \
orig_command
verbose 8
done
else
if [[ "$1" =~ .*@.* ]]; then
username="$(echo "$1" | cut -d@ -f1)"
hostname="$(echo "$1" | cut -d@ -f2)"
else
hostname="$1"
fi
fi
port="${port:-22}"
username="${username:-$USER}"
verbose 8 "final hostname = $hostname"
verbose 8 "final port = $port"
verbose 8 "final username = $username"
verbose 8 "final ssh_args = $ssh_args"
verbose 8 "final orig_command = $orig_command"
verbose 8 "final COPY_CONFIG = '$COPY_CONFIG'"
if [[ "$hostname" ]]; then
[[ -d "$HOME/.bashrc.d" ]] ||
verbose "$HOME/.bashrc.d doesn't exist!"
# inspired by <https://superuser.com/a/1078431>
local bashrc_contents="$(
ls -1 ~/.bashrc.d/remote/* 2>/dev/null |
grep -v \.md$ | # do not include markdown files
xargs cat |
egrep -v '^ +#' | # remove all commented lines
bzip2 |
base64 -w0
)"
if [[ $(echo "$bashrc_contents" | wc -c) -le 20480 && "${COPY_CONFIG}" -ne 1 ]]; then
read -r -d '' command <<-EOF
bash --rcfile <(
which bunzip2 &>/dev/null || {
which yum &>/dev/null &&
echo "echo \"installing bzip2...\"" &&
yum install -qy bzip2 &>/dev/null;
};
echo "$bashrc_contents" | base64 --decode | bunzip2
)
EOF
else
# ssh will complain the argument list is too long, so there's no way around just uploading the files to temporary files
verbose 1 "copying rc content as a file due to its size ($(echo "$bashrc_contents" | wc -c))"
# generate a temp file name
local tempbashrc="$(mktemp /tmp/tmp.XXXXXXXXXX)"
# workaround for use on Mac OS X, which has no option for a dry run
[[ -f "$tempbashrc" ]] && rm -f "$tempbashrc"
echo "$bashrc_contents" >"$tempbashrc"
# copy the temporary bashrc file to the remote host, then delete it locally
rsync -avzPi -e "ssh -p $port" "$tempbashrc" ${username}@${hostname}:"$tempbashrc"
[[ "${KEEP_CONFIG}" -eq 1 ]] || rm -fv "$tempbashrc"
#scp -P $port "$tempbashrc" ${username}@${hostname}:"$tempbashrc"
command="bash --rcfile <(cat '$tempbashrc' | base64 --decode | bunzip2)"
fi
# inspired by <https://superuser.com/questions/671372/running-command-in-new-bash-shell-with-rcfile-and-c/671488#671488>
# necessary mess to mark just the output of the command (not any rcfile output)
# then decode it once the last sed is given the entire output
# \o2 = \n
# \03 = start of output
# those numbers are arbitrary but not likely to occur in typical output
[[ -n "$orig_command" ]] &&
orig_command="$(printf '%q' "$orig_command")" &&
_print_var_vals orig_command &&
command="$command -ic ${orig_command}\" |
tr '\n' '\2' | # convert all newlines to octal #2 so the output is a single line
sed -re 's;^;\o3;'\" | # insert an octal #3 at the beginning of the command output's one line
sed -rne 's;.*\o3;;' -e 's;\o2;\n;gp'" # now with the command's output alongside any other output from the rcfile, delete everything before the command output marker (octal #3) and convert back all octal #2s to newlines
[[ $(echo "$bashrc_contents" | wc -c) -le 20480 && "${COPY_CONFIG}" -ne 1 ]] || command="$command; rm '$tempbashrc'"
verbose 8 ssh -qt -p $port $ssh_args ${username}@${hostname} "$command"
ssh -qt -p $port $ssh_args ${username}@${hostname} "$command"
else
echo "you must specify a host!"
fi
}
| true
|
925bc58c00c3c894088e33390206b9df23f82c3b
|
Shell
|
zanhsieh/vagrant-salt-master-minion
|
/install-salt-minion.sh
|
UTF-8
| 1,131
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
if [ ! `grep -q '4505\|4506' /etc/sysconfig/iptables` ]; then
echo "Open port 4505, 4506"
sed -i 's|--dport 22 -j ACCEPT|--dport 22 -j ACCEPT\n-A INPUT -p tcp -m state --state NEW -m tcp --dport 4505 -j ACCEPT\n-A INPUT -p tcp -m state --state NEW -m tcp --dport 4506 -j ACCEPT\n|' /etc/sysconfig/iptables
service iptables restart
service iptables save
fi
if [ ! -f "/var/salt_minion_setup" ]; then
echo "Install salt-minion"
yum -y --enablerepo=epel install salt-minion
chkconfig salt-minion on
sed -i 's|#master: salt|#master: salt\nmaster: 192.168.40.11|' /etc/salt/minion
service salt-minion start
#sed -i 's|^other_args=$|other_args="--insecure-registry master:5000"|' /etc/sysconfig/docker
service docker restart
touch /var/salt_minion_setup
fi
echo "Check host resolution to /etc/hosts"
if [ ! `grep -q 192.168.40.11 /etc/hosts` ]; then
echo "192.168.40.11 master" >> /etc/hosts
fi
if [ ! `grep -q 192.168.40.12 /etc/hosts` ]; then
echo "192.168.40.12 minion1" >> /etc/hosts
fi
if [ ! `grep -q 192.168.40.13 /etc/hosts` ]; then
echo "192.168.40.13 minion2" >> /etc/hosts
fi
| true
|
4cc50d83349ddcced3c62fe67c16f4b4ab59ddb6
|
Shell
|
akhmetov/fire
|
/vagrant_provision.sh
|
UTF-8
| 760
| 2.8125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
sudo apt-get update
sudo debconf-set-selections <<< 'mysql-server mysql-server/root_password password root'
sudo debconf-set-selections <<< 'mysql-server mysql-server/root_password_again password root'
sudo apt-get install -y apache2 php5 mysql-server php5-mysql php5-mcrypt
sudo php5enmod mcrypt
sudo a2dissite 000-default
sudo a2enmod rewrite
sudo sh -c 'cat > /etc/apache2/sites-available/api.conf <<EOL
<VirtualHost *:80>
DocumentRoot /var/www/public
<Directory /var/www/public>
AllowOverride All
</Directory>
</VirtualHost>
EOL'
sudo a2ensite api
sudo sed -i "s/display_errors = .*/display_errors = On/" /etc/php5/apache2/php.ini
sudo service apache2 restart
mysql -u root -proot -e "CREATE DATABASE api"
| true
|
5c571c77797d9fb268a16ff8f6791b30b3820fb5
|
Shell
|
youjiahe/sh
|
/clone.sh
|
UTF-8
| 692
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
read -p "请输入虚拟机数量:" n
[ $n -le 0 ] && exit
j=1
i=1
while [ $j -le $n ]
do
if [ ! -e /etc/libvirt/qemu/host${i}.* ] && [ ! -e /var/lib/libvirt/images/host${i}.* ]; then
cd /var/lib/libvirt/images && qemu-img create -b node_new.qcow2 -f qcow2 host${i}.img 20G &>/dev/null
cd /etc/libvirt/qemu && sed "s,node,host${i}," node.xml > host${i}.xml
cd /etc/libvirt/qemu
virsh define host${i}.xml &>/dev/null
sleep 0.3
[ $? -eq 0 ] && echo -e "虚拟机host$i\033[32m 创建成功[OK]\033[0m" || echo -e "\033[31m虚拟机host$i 创建失败[NG]\033[0m"
let j++
else
let i++
continue
fi
done
| true
|
61c973b5c15c7237a8b85985b31c7313a11a88a3
|
Shell
|
tglatt/emjpm
|
/.k8s/postgres/restore/configmap/restore.sh
|
UTF-8
| 234
| 2.5625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
pg_isready
echo "Restore with ${1}"
pg_restore \
--clean \
--if-exists \
--exit-on-error \
--format=c \
--verbose \
--dbname emjpm \
${1}
psql -d emjpm -c "GRANT ALL ON SCHEMA public TO emjpm"
sleep 10s
| true
|
51f5871202f210fc36da4ab5e4a9f060e47989be
|
Shell
|
wizd3m/wadam-iso
|
/clean
|
UTF-8
| 296
| 2.984375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
if [ -d out ]; then
echo ":: Removing out folder..."
sudo rm -rf out
else
echo ":: Out folder not found..."
fi
if [ -d work ]; then
echo ":: Removing work folder..."
sudo rm -rf work
else
echo ":: Work folder not found..."
fi
echo
echo ":: Done..."
| true
|
3afb2b174206a0c653aa920751acf73ee702077f
|
Shell
|
temmuzyavuzer/MPI---Bigram-of-large-dataset
|
/ngram.sh
|
UTF-8
| 238
| 2.875
| 3
|
[] |
no_license
|
DATA="/home/thales/Desktop/ngram/Data"
OUTPUT="/home/thales/Desktop/ngram/Output/"
NGRAM_SIZE=2
if test "$#" -ne 1; then
echo "./ngram.sh numberof"
else
mpiexec -n $1 python3 parallel.py $DATA $OUTPUT $NGRAM_SIZE $NGRAM_SIZE
fi
| true
|
6eee0a936b9973acbaadc046a6e248adbc380063
|
Shell
|
cpnuj/chaos
|
/tools/install_i386_tools.sh
|
UTF-8
| 2,542
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
#
# This script is used to install i386-elf-tools for cross compile
# our os.
# Run this file in build directory and sudo
#
# Reference:
# https://steemit.com/esteem/@geyu/10-cross-compiler-i386-elf-gcc
#
# Requirements: gcc g++ libx11-dev
#
set -euxo pipefail
gmp=gmp-4.3.2
mpfr=mpfr-2.4.2
mpc=mpc-1.0.1
# build gmp
if [ ! -f gmp.mk ]; then
curl -O http://mirrors.nju.edu.cn/gnu/gmp/$gmp.tar.bz2
tar xf $gmp.tar.bz2
pushd $gmp
./configure
make
sudo make install
popd
touch gmp.mk
fi
# build mpfr
if [ ! -f mpfr.mk ]; then
curl -O http://mirrors.nju.edu.cn/gnu/mpfr/$mpfr.tar.bz2
tar xf $mpfr.tar.bz2
pushd $mpfr
./configure
make
sudo make install
popd
touch mpfr.mk
fi
# build mpc
if [ ! -f mpc.mk ]; then
curl -O http://mirrors.nju.edu.cn/gnu/mpc/$mpc.tar.gz
tar xf $mpc.tar.gz
pushd $mpc
./configure
make
sudo make install
popd
touch mpc.mk
fi
PREFIX=$(pwd)/i386-elf-gcc
TARGET=i386-elf
# build binutils
binutils=binutils-2.24
if [ ! -f binutils.mk ]; then
# If the link 404's, look for a more recent version
curl -O http://mirrors.nju.edu.cn/gnu/binutils/$binutils.tar.gz
tar xf $binutils.tar.gz
mkdir binutils-build
pushd binutils-build && \
../binutils-2.24/configure \
--target=$TARGET \
--enable-interwork \
--enable-multilib \
--disable-nls \
--disable-werror \
--prefix=$PREFIX 2>&1 | tee configure.log
make all install 2>&1 | tee make.log
popd
touch binutils.mk
fi
# build gcc-4.9.1
if [ ! -f gcc.mk ]; then
curl -O http://mirrors.nju.edu.cn/gnu/gcc/gcc-4.9.1/gcc-4.9.1.tar.bz2
tar xf gcc-4.9.1.tar.bz2
mkdir gcc-build
pushd gcc-build
../gcc-4.9.1/configure \
--target=$TARGET \
--prefix=$PREFIX \
--disable-nls \
--disable-libssp \
--enable-languages=c \
--without-headers
make all-gcc
# make all-target-libgcc
make install-gcc
# make install-target-libgcc
popd
touch gcc.mk
fi
# build bochs with --enable-gdb-stub
bochs_url='https://jaist.dl.sourceforge.net/project/bochs/bochs/2.6.11/bochs-2.6.11.tar.gz'
if [ ! -f bochs.mk ]; then
curl -O $bochs_url
tar -xvf bochs-2.6.11.tar.gz
mkdir bochs
pushd bochs-2.6.11
# build bochs-gdb and bochs independtly, since --enable-gdb-stub and
# --enable-debugger is mutually exclusive
./configure --enable-gdb-stub
make
cp bochs ../bochs/bochs-gdb
cp bximage ../bochs/bximage
make clean
./configure --enable-debugger
make
cp -r bios ../bochs/
cp bochs ../bochs/
popd
touch bochs.mk
fi
| true
|
f8331b5d7613530b1088916ffbbbe461c4b9fa12
|
Shell
|
ArtDu/DA
|
/labs/lab_7/test_code/wrapper.sh
|
UTF-8
| 770
| 4.03125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# *.sh <count of tests>
fail=false
if ! make -C ../; then
echo "ERROR: Failed to compile file."
exit 1
fi
if ! make ; then
echo "ERROR: Failed to compile file."
exit 1
fi
if [[ $# -ne 1 ]] ; then
echo "ERROR: Failed in args."
exit 1
fi
mkdir -p tests
if ! python3 test_gen.py $1 ; then
echo "ERROR: Failed to python generate tests."
exit 1
fi
for test_file in `ls tests/*.t`; do
answer_file="${test_file%.*}"
echo "Execute ${test_file}"
if ! ../main < ${test_file} > "${answer_file}.my" ; then
echo "ERROR"
continue
fi
if ! ./main < ${test_file} > "${answer_file}.a" ; then
echo "ERROR"
continue
fi
diff "${answer_file}.a" "${answer_file}.my"
done
| true
|
10e43e218cd59dff9412c9717c00e83c8274eab8
|
Shell
|
zm-git-dev/LRSDAY
|
/pipelines/LRSDAY.10.Mitochondrial_Gene_Annotation.sh
|
UTF-8
| 3,626
| 2.84375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e -o pipefail
#######################################
# load environment variables for LRSDAY
source ./../../env.sh
PERL5LIB="$PERL5LIB:$pirobject_dir/lib"
export RNAFINDER_CFG_PATH="$rnafinder_dir"
export MF2SQN_LIB="$mf2sqn_dir/lib"
export MFANNOT_LIB_PATH="$mfannot_data_dir/protein_collections"
export MFANNOT_EXT_CFG_PATH="$mfannot_data_dir/config"
export MFANNOT_MOD_PATH="$mfannot_data_dir/models"
export BLASTMAT="$blast_matrices_dir"
export EGC="$mfannot_data_dir/EGC"
export ERPIN_MOD_PATH="$mfannot_data_dir/models/Erpin_models"
export PIR_DATAMODEL_PATH="$pirobject_dir/PirModels"
export PATH="$flip_dir:$blast_dir:$muscle_dir:$umac_dir:$hmmer_dir:$erpin_dir:$tbl2asn_dir:$pirobject_dir:$pirmodels_dir:$hmmsearchwc_dir:$exonerate_dir:$emboss_dir:$mf2sqn_dir:$mf2sqn_dir:$grab_fasta_dir:$rnafinder_dir:$mfannot_dir:$PATH"
#######################################
# set project-specific variables
prefix="SK1" # The file name prefix for the processing sample. Default = "SK1" for the testing example.
genome="./../07.Supervised_Final_Assembly/$prefix.assembly.final.fa" # The file path of the input genome assembly.
chrMT_tag="chrMT" # The sequence name for the mitochondrial genome in the input genome assembly, if there are multiple corresponding contigs/scaffolds, use a single ';' to separate them. e.g. "chrMT_1;chrMT_2". Default = "chrMT".
genetic_code_table=3 # The NCBI genetic code table (https://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi) for the annotated mitochondrial genome. Default = 3 (i.e. Yeast Mitochondria)
debug="no" # Whehter to keep intermediate files for debugging. Use "yes" if prefer to keep intermediate files, otherwise use "no". Default = "no".
######################################
# process the pipeline
echo $chrMT_tag | sed -e "s/;/\n/g" > $prefix.assembly.chrMT.list
#$LRSDAY_HOME/scripts/select_fasta_by_list.pl -i $genome -l $prefix.assembly.chrMT.list -m reverse -o $prefix.assembly.nuclear_genome.fa
$LRSDAY_HOME/scripts/select_fasta_by_list.pl -i $genome -l $prefix.assembly.chrMT.list -m normal -o $prefix.assembly.mitochondrial_genome.fa
mkdir tmp
$mfannot_dir/mfannot \
--genetic $genetic_code_table \
--outputfile $prefix.mitochondrial_genome.mfannot.out \
--logfile $prefix.mitochondrial_genome.mfannot.log \
--T $(pwd)/tmp \
$prefix.assembly.mitochondrial_genome.fa
perl $LRSDAY_HOME/scripts/mfannot2gff3.pl -i $prefix.mitochondrial_genome.mfannot.out -o $prefix.mitochondrial_genome.mfannot.gff3 -m lite
perl $LRSDAY_HOME/scripts/extract_cds_from_tidy_gff3.pl -r $prefix.assembly.mitochondrial_genome.fa -g $prefix.mitochondrial_genome.mfannot.gff3 -o $prefix.mitochondrial_genome.mfannot.cds.fa
perl $LRSDAY_HOME/scripts/cds2protein.pl -i $prefix.mitochondrial_genome.mfannot.cds.fa -t $genetic_code_table -p $prefix.mitochondrial_genome.mfannot
perl $LRSDAY_HOME/scripts/prepare_PoFFgff_simple.pl -i $prefix.mitochondrial_genome.mfannot.gff3 -o $prefix.mitochondrial_genome.mfannot.PoFF.gff
perl $LRSDAY_HOME/scripts/prepare_PoFFfaa_simple.pl -i $prefix.mitochondrial_genome.mfannot.trimmed_cds.fa -o $prefix.mitochondrial_genome.mfannot.PoFF.ffn
perl $LRSDAY_HOME/scripts/prepare_PoFFfaa_simple.pl -i $prefix.mitochondrial_genome.mfannot.pep.fa -o $prefix.mitochondrial_genome.mfannot.PoFF.faa
# clean up intermediate files
if [[ $debug == "no" ]]
then
rm -r tmp
fi
############################
# checking bash exit status
if [[ $? -eq 0 ]]
then
echo ""
echo "LRSDAY message: This bash script has been successfully processed! :)"
echo ""
echo ""
exit 0
fi
############################
| true
|
b898f3d493d86955a5e94b02a76b07827d8ab991
|
Shell
|
LiYang412/tools
|
/install-cpplint-codestyle-check-githook.sh
|
UTF-8
| 259
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
HOOKPATH=$(find $PWD -wholename "*.git/hooks" -type d)
echo ${HOOKPATH}
if [ -z ${HOOKPATH} ]; then
echo "please run in dir with .git"
exit 1
fi
cd .git/hooks; \
ln -s ../../cpplint/cpplint_pre_commit_hook.sh pre-commit
| true
|
221baf9e0cbccac1dd949d750e4a9be6cc220bbc
|
Shell
|
qrohlf/dotfiles
|
/bin/screengif.sh
|
UTF-8
| 415
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/sh
palette="/tmp/palette.png"
scale=${3:-"650"}
filters="fps=15,scale=$scale:-1:flags=lanczos"
# filters="fps=15"
# use filters="fps=15" for no rescale
ffmpeg -v warning -i "$1" -vf "palettegen" -y $palette
ffmpeg -v warning -i "$1" -i $palette -lavfi "$filters [x]; [x][1:v] paletteuse" -y "$2"
echo "unoptimized file size:"
du -sh "$2"
echo "optimizing:"
gifsicle -O3 "$2" > "$2.opti"
du -sh "$2.opti"
| true
|
8969431462a67017d039c6d3d6ae3c09cab61aea
|
Shell
|
Vienta/BlogArticle
|
/package/PackageShell/Resign-ipa/resign.sh
|
UTF-8
| 3,782
| 3.390625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
for file2 in `ls -a ./module`
do
if [ x"$file2" != x"." -a x"$file2" != x".." -a x"$file2" != x".DS_Store" ]; then
echo $file2
#Conf file
CONF=./module/$file2/resign.conf
echo $CONF
#Datetime
NOW=$(date +"%Y%m%d_%s")
#Load config
if [ -f ${CONF} ]; then
. ${CONF}
fi
#Temp
TEMP="temp"
if [ -e ${TEMP} ]; then
echo "ERROR: temp already exists"
exit 1
fi
#Check app ID
if [ -z ${APP_ID} ]; then
echo "ERROR: missing APP_ID"
exit 1
fi
echo ${APP_ID}
#Create build dir
if [[ ! -d ${BUILD_PATH} ]]; then
mkdir ${BUILD_PATH}
fi
#Copy mother package
if [[ ! -f "../Package/ipa/QA/packageExample.ipa" ]]; then
echo "mother package not exists"
exit 1
fi
cp ../Package/ipa/QA/packageExample.ipa ./module/$file2${ASSETS_PATH}/packageExample.ipa
#Unzip the mother ipa
echo "Unzip ipa"
unzip -q ./module/$file2${ASSETS_PATH}${IPA_NAME}.ipa -d ${TEMP}
#Remove old Codesignature
echo "Remove old CodeSignature"
rm -r "${TEMP}/Payload/${APP_NAME}.app/_CodeSignature" "${TEMP}/Payload/${APP_NAME}.app/CodeResources" 2> /dev/null | true
#Replace embedded mobil provisioning profile
echo "Replace embedded mobile provisioning profile"
cp "./module/$file2${ASSETS_PATH}${PROFILE_NAME}.mobileprovision" "${TEMP}/Payload/${APP_NAME}.app/embedded.mobileprovision"
#Change icon
echo "Change icon"
cp "./module/$file2${ASSETS_PATH}/icon_120.png" "${TEMP}/Payload/${APP_NAME}.app/AppIcon60x60@2x.png"
cp "./module/$file2${ASSETS_PATH}/icon_180.png" "${TEMP}/Payload/${APP_NAME}.app/AppIcon60x60@3x.png"
#Change Bundleversion
if [[ ! -z ${APP_BUNDLE_VERSION} ]]; then
/usr/libexec/PlistBuddy -c "Set CFBundleVersion ${APP_BUNDLE_VERSION}" ${TEMP}/Payload/${APP_NAME}.app/Info.plist
fi
#Change CFBundleShortVersionString
if [[ ! -z ${APP_BUNDLE_SHORT_VERSION_STRING} ]]; then
/usr/libexec/PlistBuddy -c "Set CFBundleShortVersionString ${APP_BUNDLE_SHORT_VERSION_STRING}" ${TEMP}/Payload/${APP_NAME}.app/Info.plist
fi
#Change Bundleidentifier
/usr/libexec/PlistBuddy -c "Set CFBundleIdentifier ${APP_ID}" ${TEMP}/Payload/${APP_NAME}.app/Info.plist
#Create entitlements from template
ENTITLEMENTS=$(<./templates/entitlements.template)
ENTITLEMENTS=${ENTITLEMENTS//#APP_ID#/$APP_ID}
ENTITLEMENTS=${ENTITLEMENTS//#APP_PREFIX#/$APP_PREFIX}
echo ${ENTITLEMENTS} > ${TEMP}/entitlements.temp
#Re-sign
#这里注意命令参数的不同
#/usr/bin/codesign -f -s "${CERTIFICATE_TYPE}: ${CERTIFICATE_NAME}" --identifier "${APP_ID}" --entitlements "${TEMP}/entitlements.temp" --resource-rules "${TEMP}/Payload/${APP_NAME}.app/ResourceRules.plist" "${TEMP}/Payload/${APP_NAME}.app"
/usr/bin/codesign -f -s "${CERTIFICATE_TYPE}: ${CERTIFICATE_NAME}" --identifier "${APP_ID}" --entitlements "${TEMP}/entitlements.temp" "${TEMP}/Payload/${APP_NAME}.app"
#Remove copyed mother package
echo "Remove mother package"
rm -rf ./module/$file2${ASSETS_PATH}packageExample.ipa
#Re-package
echo "Re-package"
cd ${TEMP}
zip -qr "${IPA_NAME}_resigned_${NOW}.ipa" Payload
mv ${IPA_NAME}_resigned_${NOW}.ipa ../${BUILD_PATH}/${IPA_NAME}_${file2}_${NOW}.ipa
#Remove temp
cd ../
rm -rf ${TEMP}
fi
done
exit 0
| true
|
3153c800dbb51abb610b26b0b3ecddf46e11c988
|
Shell
|
gridcoin-community/Gridcoin-Research
|
/src/bdb53/dist/validate/s_chk_pubdef
|
UTF-8
| 4,706
| 3.390625
| 3
|
[
"BSD-3-Clause",
"Sleepycat",
"MIT"
] |
permissive
|
#!/bin/sh -
#
# Reconcile the list of public defines with the man pages and the Java files.
d=../..
[ -f $d/LICENSE ] || {
echo 'FAIL: cannot find source distribution directory.'
exit 1
}
docs=$d/docs_src
p=$d/dist/pubdef.in
exitv=0
# remove m4 doc tests, m4 has been removed for 4.8
# TODO: add test for csharp const
#cat <<END_OF_TEXT
#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
#Check that pubdef.in has everything listed in m4.links.
#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
#END_OF_TEXT
#f=$docs/m4/m4.links
#sed -n \
# -e 's/^\$1, \(DB_[^,]*\).*/\1/p' \
# -e d < $f |
#while read name; do
# if `egrep -w "$name" $p > /dev/null`; then
# :
# else
# echo "$f: $name is missing from $p"
# exitv=1
# fi
#done
#cat <<END_OF_TEXT
#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
#Check that m4.links has everything listed in pubdef.in.
#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
#END_OF_TEXT
#f=$docs/m4/m4.links
#sed '/^#/d' $p |
#while read name isdoc isinc isjava; do
# if `egrep -w "^.1, $name" $f > /dev/null`; then
# [ "X$isdoc" != "XD" ] && {
# echo "$name should not appear in $f"
# exitv=1
# }
# else
# [ "X$isdoc" = "XD" ] && {
# echo "$name does not appear in $f"
# exitv=1;
# }
# fi
#done
cat <<END_OF_TEXT
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
Check that pubdef.in has everything listed in db.in plus api_flags.in.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
END_OF_TEXT
cat $d/src/dbinc/db.in $d/src/dbinc_auto/api_flags.in | sed -n \
-e 's/^#.*[ ]\(DB_[A-Z_0-9][A-Z_0-9]*\).*/\1/p' \
-e 's/^#.*[ ]\(DB2_[A-Z_0-9][A-Z_0-9]*\).*/\1/p' \
-e 's/[ ]\(DB_[A-Z_]*\)=[0-9].*/\1/p' \
-e d |
while read name; do
if `egrep -w "$name" $p > /dev/null`; then
:
else
echo "db.in/api_flags.in: $name is missing from $p"
exitv=1
fi
done
cat <<END_OF_TEXT
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
Check that api_flags.in plus db.in has everything listed in pubdef.in.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
END_OF_TEXT
sed '/^#/d' $p |
while read name isdoc isinc isjava iscsharp; do
if `egrep -w "^#.*[ ]$name|[ ]$name=[0-9][0-9]*" \
$d/src/dbinc/db.in $d/src/dbinc_auto/api_flags.in > /dev/null`; then
[ "X$isinc" != "XI" ] && {
echo "$name should not appear in db.in/api_flags.in"
exitv=1
}
else
[ "X$isinc" = "XI" ] && {
echo "$name does not appear in db.in/api_flags.in"
exitv=1
}
fi
done
cat <<END_OF_TEXT
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
Check that pubdef.in has everything listed in DbConstants.java.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
END_OF_TEXT
j=$d/lang/java/src/com/sleepycat/db
f=$j/internal/DbConstants.java
sed -n -e 's/.*int[ ]\([^ ]*\).*;/\1/p' < $f |
while read name; do
if `egrep -w "$name" $p > /dev/null`; then
:
else
echo "$f: $name is missing from $p"
exitv=1
fi
done
cat <<END_OF_TEXT
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
Check that DbConstants.java has everything listed in pubdef.in.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
END_OF_TEXT
f=$j/internal/DbConstants.java
sed '/^#/d' $p |
while read name isdoc isinc isjava iscsharp; do
if `egrep -w "int[ ]$name =" $f > /dev/null`; then
[ "X$isjava" != "XJ" ] && {
echo "$name should not appear in $f"
exitv=1
}
else
[ "X$isjava" = "XJ" ] && {
echo "$name does not appear in $f"
exitv=1
}
fi
done
cat <<END_OF_TEXT
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
Check that all constants in pubdef.in are wrapped by the Java API.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
END_OF_TEXT
#Strip out Javadoc comments
t=__1
cat $j/*.java $j/internal/Db.java $j/internal/DbEnv.java \
$j/internal/db_javaJNI.java | sed '/\/\*\*/,/\*\// d' > $t
sed '/^#/d' $p |
while read name isdoc isinc isjava iscsharp; do
if `egrep -w "$name" $t > /dev/null`; then
[ "X$isjava" != "XJ" ] && {
echo "$name should not appear in the Java API"
exitv=1
}
else
[ "X$isjava" = "XJ" ] && {
echo "$name does not appear in the Java API"
exitv=1
}
fi
done
cat <<END_OF_TEXT
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
Check that all constants in pubdef.in are wrapped by the Java native layer.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
END_OF_TEXT
sed '/^#/d' $p |
while read name isdoc isinc isjava iscsharp; do
if `egrep -w "$name" $d/lang/java/libdb_java/db_java_wrap.c > /dev/null`; then
[ "X$isjava" != "XN" ] && [ "X$isjava" != "XJ" ] && {
echo "$name should not appear in the Java native layer"
exitv=1
}
else
[ "X$isjava" = "XN" ] && {
echo "$name does not appear in the Java native layer"
exitv=1
}
fi
done
rm -f $t
exit $exitv
| true
|
465682ea1fc404c0bcd95c08c8e2574cb86980eb
|
Shell
|
stomcavage/bin
|
/update_vim_bundles.sh
|
UTF-8
| 425
| 3.53125
| 4
|
[] |
no_license
|
#!/usr/local/bin/bash
PATH=/usr/local/bin:/usr/local/sbin:~/bin:/usr/bin:/bin:/usr/sbin:/sbin
GIT=`which git`
START_DIR=`pwd`
VIM_DIR=${HOME}/.vim/bundle
# The column containing the directory name might change depending on OS
BUNDLES=`ls -l $VIM_DIR | egrep '^d' | awk '{print $9}'`
for BUNDLE in $BUNDLES
do
echo "Updating ${BUNDLE}"
cd "${VIM_DIR}/${BUNDLE}"
${GIT} pull origin master
echo
done
cd $START_DIR
| true
|
a5aa0446bb692788819af995199783bea20abe53
|
Shell
|
j-cube/alembic-builder
|
/env-build-config.sh
|
UTF-8
| 664
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
if [[ $_ = $0 ]] ; then
echo "ERROR: the build config script MUST be sourced!"
exit 1
fi
if [ -z "${TOP_BUILD_DIR}" ]; then
echo "TOP_BUILD_DIR variable not defined"
exit 1
fi
# -- Edit variables here ----------------------------------------------
export TGT=/opt/jcube
export PYTHON_VERSION=2.6
# target system architecture addressing
# valid values: "yes" (64 bit), "no" (32 bit)
export TARGET_64=yes
# -- END OF USER-SETTABLE VARIABLES -----------------------------------
#export TARGET_PYTHON=${TGT}/bin/python${PYTHON_VERSION}
export TARGET_PYTHON=${TGT}/bin/python
export BUILD_CONFIG_READY=yes
echo "Build configuration setup."
| true
|
97dd128b15292b42035bbd9b8ab8675e6151d5fb
|
Shell
|
palladius/gce-spectre-meltdown-checker
|
/bin/create-vms.sh
|
UTF-8
| 1,179
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
source "env.sh"
VERSION="$(cat VERSION)"
set -x
# PROD
#IMAGES=$( cat images.list | xargs)
# DEV - manhouse :)
IMAGES="debian-9-drawfork-v20180102 centos-7-v20180104 rhel-7-v20180104 ubuntu-1404-trusty-v20171208"
SCOPES="https://www.googleapis.com/auth/devstorage.read_only","https://www.googleapis.com/auth/logging.write","https://www.googleapis.com/auth/monitoring.write","https://www.googleapis.com/auth/servicecontrol","https://www.googleapis.com/auth/service.management.readonly","https://www.googleapis.com/auth/trace.append"
for IMAGE in $IMAGES ; do
# "debian-9-drawfork-v20180102" => "debian"
SHORT_IMAGE_NAME=$( echo "$IMAGE" | cut -f 1-3 -d- )
gcloud beta compute --project "$PROJECT_ID" \
instances create "test-vuln-$SHORT_IMAGE_NAME" \
--zone "$ZONE" \
--machine-type "f1-micro" \
--network "default" \
--maintenance-policy "MIGRATE" \
--min-cpu-platform "Automatic" --tags "http-server" \
--image "$IMAGE" \
--metadata-from-file "startup-script=startup-scripts/spectre-check.sh" \
--image-project "eip-images" \
--boot-disk-size "10" \
--boot-disk-type "pd-standard"
touch created-$SHORT_IMAGE_NAME-v$VER.touch
done
| true
|
d8ca547f1d647822eb85324d21e62db32553a45e
|
Shell
|
Udacity-Cloud-DevOps-George-Projects/Project2
|
/UdagramDeleteStack.sh
|
UTF-8
| 3,965
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/bash
#Read environment parameters from udagram-dev-variables.txt file and add them to array
EnvValuesArr=()
while IFS= read -r line; do
EnvValuesArr+=("$line")
done < ./udagram-dev-variables.txt
#Empty S3 Buket
echo ""
echo -e "\e[1;32mTask1:\e[0m"
echo "Deleting S3 Bucket objects...."
#Get S3 Bucket Name from the stack resources
StackS3BuckName=`aws cloudformation describe-stack-resources --stack-name ${EnvValuesArr[0]} --logical-resource-id S3Bucket --query "StackResources[0].PhysicalResourceId" --output text`
aws s3 rm s3://$StackS3BuckName --recursive
#Delete CloudFormation Stack
echo ""
echo -e "\e[1;32mTask2:\e[0m"
echo "Deleting CloudFormation Stack ${EnvValuesArr[0]} from AWS Region ${EnvValuesArr[1]}...."
aws cloudformation delete-stack --stack-name ${EnvValuesArr[0]} --region ${EnvValuesArr[1]}
echo ""
echo "Deleting Cloud Formation Stack ${EnvValuesArr[0]} on AWS Region ${EnvValuesArr[1]} has been initiated"
echo -e "To Monitor stack deletion events open another session and run command:\e[1;34m aws cloudformation describe-stacks --stack-name ${EnvValuesArr[0]} --region ${EnvValuesArr[1]} --query \"Stacks[0].[StackName, StackStatus]\" --output text \e[0m"
#Wait until stack deletion completes
aws cloudformation wait stack-delete-complete --stack-name ${EnvValuesArr[0]} --region ${EnvValuesArr[1]}
echo "Stack has been deleted"
#Delete the SSH Key Pair
echo ""
echo -e "\e[1;32mTask3:\e[0m"
echo "Deleting SSH Key Pair from AWS Region ${EnvValuesArr[1]}...."
aws ec2 delete-key-pair --key-name ${EnvValuesArr[2]} --region ${EnvValuesArr[1]}
#Delete the SSH key from user's SSH directory
echo ""
echo -e "\e[1;32mTask4:\e[0m"
echo "Deleting SSH key ${EnvValuesArr[2]}.pem from $HOME/.ssh directory...."
rm $HOME/.ssh/${EnvValuesArr[2]}.pem
#Delete AWS SSM Parameters
echo ""
echo -e "\e[1;32mTask5:\e[0m"
echo "Deleting AWS SSM Parameters from AWS Region ${EnvValuesArr[1]}...."
aws ssm delete-parameter --name /Dev/Udagram/EnvironmentName --region ${EnvValuesArr[1]}
aws ssm delete-parameter --name /Dev/Udagram/S3BucketName --region ${EnvValuesArr[1]}
aws ssm delete-parameter --name /Dev/Udagram/AWSManagedPolicyARNForS3 --region ${EnvValuesArr[1]}
aws ssm delete-parameter --name /Dev/Udagram/AWSManagedPolicyARNForCF --region ${EnvValuesArr[1]}
aws ssm delete-parameter --name /Dev/Udagram/AWSManagedPolicyARNForSSMCore --region ${EnvValuesArr[1]}
aws ssm delete-parameter --name /Dev/Udagram/AWSManagedPolicyARNForCloudWatchAgent --region ${EnvValuesArr[1]}
aws ssm delete-parameter --name /Dev/Udagram/VpcCIDR --region ${EnvValuesArr[1]}
aws ssm delete-parameter --name /Dev/Udagram/PublicSubnet1CIDR --region ${EnvValuesArr[1]}
aws ssm delete-parameter --name /Dev/Udagram/PublicSubnet2CIDR --region ${EnvValuesArr[1]}
aws ssm delete-parameter --name /Dev/Udagram/PrivateSubnet1CIDR --region ${EnvValuesArr[1]}
aws ssm delete-parameter --name /Dev/Udagram/PrivateSubnet2CIDR --region ${EnvValuesArr[1]}
aws ssm delete-parameter --name /Dev/Udagram/WebAppImageID --region ${EnvValuesArr[1]}
aws ssm delete-parameter --name /Dev/Udagram/WebAppInstancesNumber --region ${EnvValuesArr[1]}
aws ssm delete-parameter --name /Dev/Udagram/LinuxSSHKey --region ${EnvValuesArr[1]}
aws ssm delete-parameter --name /Dev/Udagram/SSHPrivateKey --region ${EnvValuesArr[1]}
aws ssm delete-parameter --name /Dev/Udagram/WebAppInstanceType --region ${EnvValuesArr[1]}
aws ssm delete-parameter --name /Dev/Udagram/WebAppDiskSize --region ${EnvValuesArr[1]}
aws ssm delete-parameter --name /Dev/Udagram/BastionHostImageID --region ${EnvValuesArr[1]}
aws ssm delete-parameter --name /Dev/Udagram/BastionHostInstanceType --region ${EnvValuesArr[1]}
aws ssm delete-parameter --name /Dev/Udagram/BastionHostDiskSize --region ${EnvValuesArr[1]}
#Delete environment parameters file
echo ""
echo -e "\e[1;32mTask6:\e[0m"
echo "Deleting environment parameters file...."
rm ./udagram-dev-variables.txt
echo ""
echo -e "\e[1;32mDone\e[0m"
| true
|
b533874c6fdba0a7f2e15205786f53de8bfee231
|
Shell
|
P8P-7/core
|
/doc/push_docs.sh
|
UTF-8
| 1,459
| 4.1875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
set -e # Exit with nonzero exit code if anything fails
SOURCE_BRANCH="master"
TARGET_BRANCH="gh-pages"
OUT="html-git"
# Pull requests and commits to other branches shouldn't try to deploy
if [ "$TRAVIS_PULL_REQUEST" != "false" -o "$TRAVIS_BRANCH" != "$SOURCE_BRANCH" ]; then
echo "Skipping deploy"
exit 0
fi
# Clone the existing gh-pages for this repo into $OUT/
git clone -b $TARGET_BRANCH https://git@$GH_REPO_REF $OUT
# Clean out existing contents
rm -rf $OUT/**/* || exit 0
# Configure git
cd $OUT
git config --global push.default simple
git config user.name "Travis CI"
git config user.email "travis@travis-ci.org"
cd ..
if [ -d "html" ] && [ -f "html/index.html" ]; then
cp -r html/* $OUT
cd $OUT
# If there are no changes (e.g. this is a README update) then just bail.
if [ $(git status --porcelain | wc -l) -lt 1 ]; then
echo "No changes to the output on this push; exiting."
exit 0
fi
echo 'Uploading documentation to the gh-pages branch...'
# Commit the "changes", i.e. the new version.
# The delta will show diffs between new and old versions.
git add -A .
git commit -m "Deploy Goliath docs to GitHub Pages. Build: ${TRAVIS_BUILD_NUMBER}" -m "Commit: ${TRAVIS_COMMIT}"
# Now that we're all set up, we can push.
git push --force "https://${GH_REPO_TOKEN}@${GH_REPO_REF}" > /dev/null 2>&1
else
echo 'Warning: No documentation files have been found!' >&2
exit 1
fi
| true
|
3583fd12be05d815bc7477659ef259bac86757c5
|
Shell
|
flawnson/scripts_n_snippits
|
/config/cleanup.sh
|
UTF-8
| 956
| 3.515625
| 4
|
[] |
no_license
|
# If director is a github repository, clean cache, unused libs, etc.
if [ -d "$PWD/.git" ]
then
git gc --aggressive --prune
echo ".git has been cleaned"
else
:
# If conda is installed, clean conda
if [[$(which conda)]]
then
conda update --all && conda clean -p
echo "conda has been cleaned"
else
:
# To remove all stored archives in your cache for packages that can not be downloaded anymore (thus packages that are no longer in the repository or that have a newer version in the repository).
sudo apt-get autoclean
# To remove unnecessary packages (After uninstalling an app there could be packages you don't need anymore)
sudo apt-get autoremove
# To delete downloaded packages (.deb) already installed (and no longer needed)
sudo apt-get clean
echo "Linux system successfully cleaned"
# In case this doesn't do enough, try installing bleachbit and running it's command to clear more space
# sudo apt-get install bleachbit
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.