blob_id
stringlengths
40
40
language
stringclasses
1 value
repo_name
stringlengths
4
115
path
stringlengths
2
970
src_encoding
stringclasses
28 values
length_bytes
int64
31
5.38M
score
float64
2.52
5.28
int_score
int64
3
5
detected_licenses
listlengths
0
161
license_type
stringclasses
2 values
text
stringlengths
31
5.39M
download_success
bool
1 class
1c030827276dc18055907359982735a9b9d634fc
Shell
PaulKnoops/Experimental_Evolution_Sequence_Repo
/RunThrough_DataCleaning_Scripts/bowtie2_map.sh
UTF-8
679
3.109375
3
[]
no_license
#! /bin/bash project_name=episodic_data_bowtie project_dir1=/home/paul/episodicData project_dir=/home/paul/episodicData/bowtie index_dir=${project_dir1}/index_dir ref_genome=${index_dir}/dmel-all-chromosome-r5.57.fasta.gz ref_genome_base=${project_dir}/bowtie_indexes/dmel-all-chromosome-r5.57_2 trim_dir=${project_dir1}/trim_dir bowtie2_dir=/usr/local/bowtie2/2.2.2 sam_dir=${project_dir}/sam_dir files=(${trim_dir}/*_R1_PE.fastq.gz) for file in ${files[@]} do name=${file} base=`basename ${name} _R1_PE.fastq.gz` ${bowtie2_dir}/bowtie2 -x ${ref_genome_base} -1 ${trim_dir}/${base}_R1_PE.fastq.gz -2 ${trim_dir}/${base}_R2_PE.fastq.gz -S ${sam_dir}/${base}_bowtie_pe.sam done
true
7f9d2667500f2a97b9648091e2debf0d5db339c9
Shell
sarahDH615/song-predictions
/presentation/helpPresentation.sh
UTF-8
134
2.6875
3
[]
no_license
if [ $# != 2 ]; then echo "Usage: [input html] [output html]"; fi sed 's:reveal.js/://cdn.jsdelivr.net/reveal.js/2.6.2/:g' $1 > $2
true
e071df151a065c64645feb18b2e138ce37d0878f
Shell
sardejah/wifi_tools
/base.sh
UTF-8
1,155
2.984375
3
[]
no_license
#!/bin/bash echo "usage: base.sh net_iface_name ap_iface_name" #variables NET_IFACE=$1 ROGUE_IFACE=$2 ESSID="yami" #BSSID=00:24:D3:44:11:C0 CHANNEL=6 PASSWORD="ulysse31" #kill service dnsmasq stop pkill hostapd #configurations files rm /etc/hostapd/hostapd.conf echo "interface=$ROGUE_IFACE hw_mode=g channel=$CHANNEL wmm_enabled=0 macaddr_acl=0 auth_algs=1 ignore_broadcast_ssid=0 wpa=2 wpa_key_mgmt=WPA-PSK wpa_pairwise=TKIP rsn_pairwise=CCMP ssid=$ESSID wpa_passphrase=$PASSWORD" > /etc/hostapd/hostapd.conf rm /etc/dnsmasq.conf echo "dhcp-range=$ROGUE_IFACE,192.168.1.10,192.168.1.20,255.255.255.0,24h server=8.8.8.8 server=8.8.4.4" > /etc/dnsmasq.conf #configurations routing echo 1 > /proc/sys/net/ipv4/ip_forward iptables -F iptables -X iptables -t nat -F iptables -t nat -X iptables -t nat -A POSTROUTING -o $NET_IFACE -j MASQUERADE iptables -A FORWARD -i $NET_IFACE -j ACCEPT #RUN ifconfig $ROGUE_IFACE down #macchanger -m $BSSID $ROGUE_IFACE macchanger -r $ROGUE_IFACE ifconfig $ROGUE_IFACE up hostapd /etc/hostapd/hostapd.conf & service dnsmasq restart ifconfig $ROGUE_IFACE up ifconfig $ROGUE_IFACE 192.168.1.1 netmask 255.255.255.0
true
96bb5cc793e8a2aaccd9afad19b0dba71c8096ff
Shell
carpproject/vobla
/testsuite/blastest/summary.sh
UTF-8
2,708
2.828125
3
[ "MIT" ]
permissive
#!/bin/bash # Copyright (c) 2013-2014, ARM Limited # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE # Prints a summary of the BLAS test suite output. sblas1output=output/SBLAT1.SUMM sblas2output=output/SBLAT2.SUMM sblas3output=output/SBLAT3.SUMM dblas1output=output/DBLAT1.SUMM dblas2output=output/DBLAT2.SUMM dblas3output=output/DBLAT3.SUMM cblas1output=output/CBLAT1.SUMM cblas2output=output/CBLAT2.SUMM cblas3output=output/CBLAT3.SUMM zblas1output=output/ZBLAT1.SUMM zblas2output=output/ZBLAT2.SUMM zblas3output=output/ZBLAT3.SUMM echo "" echo "-----------------------" echo " Summary" echo "-----------------------" sblas1fails=`grep -c "FAIL" ${sblas1output}` sblas2fails=`grep -c "FAIL" ${sblas2output}` sblas3fails=`grep -c "FAIL" ${sblas3output}` dblas1fails=`grep -c "FAIL" ${dblas1output}` dblas2fails=`grep -c "FAIL" ${dblas2output}` dblas3fails=`grep -c "FAIL" ${dblas3output}` cblas1fails=`grep -c "FAIL" ${cblas1output}` cblas2fails=`grep -c "FAIL" ${cblas2output}` cblas3fails=`grep -c "FAIL" ${cblas3output}` zblas1fails=`grep -c "FAIL" ${zblas1output}` zblas2fails=`grep -c "FAIL" ${zblas2output}` zblas3fails=`grep -c "FAIL" ${zblas3output}` echo "" echo "SBLAS 1: $sblas1fails failure(s)" echo "SBLAS 2: $sblas2fails failure(s)" echo "SBLAS 3: $sblas3fails failure(s)" echo "" echo "DBLAS 1: $dblas1fails failure(s)" echo "DBLAS 2: $dblas2fails failure(s)" echo "DBLAS 3: $dblas3fails failure(s)" echo "" echo "CBLAS 1: $cblas1fails failure(s)" echo "CBLAS 2: $cblas2fails failure(s)" echo "CBLAS 3: $cblas3fails failure(s)" echo "" echo "ZBLAS 1: $zblas1fails failure(s)" echo "ZBLAS 2: $zblas2fails failure(s)" echo "ZBLAS 3: $zblas3fails failure(s)" echo ""
true
ead933cf261dbca9672b78b06e298889b4b51ad3
Shell
spameier/proton-bridge-docker
/entrypoint.sh
UTF-8
1,373
3.8125
4
[ "MIT" ]
permissive
#!/bin/sh # heaviliy inspired by https://github.com/sdelafond/docker-protonmail-bridge set -eux # constants BRIDGE="/proton-bridge --cli --log-level debug" BRIDGE_IMAP_PORT=1143 BRIDGE_SMTP_PORT=1025 FIFO="/fifo" # check if required variables are set die() { echo "$1"; exit 1; } [ -z "$IMAP_PORT" ] && die '$IMAP_PORT is not set' [ -z "$SMTP_PORT" ] && die '$SMTP_PORT is not set' # initialize gpg if necessary if ! [ -d /root/.gnupg ]; then gpg --generate-key --batch << 'EOF' %no-protection %echo Generating GPG key Key-Type:RSA Key-Length:4096 Name-Real:proton-bridge Expire-Date:0 %commit EOF fi # initialize pass if necessary if ! [ -d /root/.password-store ]; then pass init proton-bridge fi # login to ProtonMail if neccessary if ! [ -d /root/.cache/protonmail/bridge ]; then [ -z "$PM_USER" ] && die '$PM_USER is not set' [ -z "$PM_PASS" ] && die '$PM_PASS is not set' printf "login\n%s\n%s\n" "${PM_USER}" "${PM_PASS}" | ${BRIDGE} fi # socat will make the connection appear to come from 127.0.0.1, since # the ProtonMail Bridge expects that socat TCP-LISTEN:${SMTP_PORT},fork TCP:127.0.0.1:${BRIDGE_SMTP_PORT} & socat TCP-LISTEN:${IMAP_PORT},fork TCP:127.0.0.1:${BRIDGE_IMAP_PORT} & # display account information, then keep stdin open [ -e ${FIFO} ] || mkfifo ${FIFO} { printf "info\n"; cat ${FIFO} } | ${BRIDGE}
true
a1943cd55a7b13c493f038e33a724f49ecca4e8e
Shell
tree-sitter/tree-sitter
/script/build-wasm
UTF-8
3,806
3.9375
4
[ "MIT" ]
permissive
#!/usr/bin/env bash usage() { cat <<EOF USAGE $0 [--help] [--debug] [--docker] SUMMARY Compile the Tree-sitter WASM library. This will create two files in the \`lib/binding_web\` directory: \`tree-sitter.js\` and \`tree-sitter.wasm\`. REQUIREMENTS You must have either the \`emcc\` command or the \`docker\` command on your PATH for this to work. OPTIONS --help: Display this message. --debug: Compile the library more quickly, with fewer optimizations and more runtime assertions. --docker: Run emscripten using docker, even if \`emcc\` is installed. By default, \`emcc\` will be run directly when available. EOF } set -e web_dir=lib/binding_web emscripten_flags="-O3" minify_js=1 force_docker=0 emscripen_version=$(cat "$(dirname "$0")"/../cli/emscripten-version) while [[ $# > 0 ]]; do case "$1" in --debug) minify_js=0 emscripten_flags="-s ASSERTIONS=1 -s SAFE_HEAP=1 -O0" ;; --help) usage exit 0 ;; --docker) force_docker=1 ;; -v|--verbose) emscripten_flags="-s VERBOSE=1 -v $emscripten_flags" ;; *) usage echo "Unrecognized argument '$1'" exit 1 ;; esac shift done emcc= if which emcc > /dev/null && [[ "$force_docker" == "0" ]]; then emcc=emcc elif which docker > /dev/null; then emcc="docker run \ --rm \ -v $(pwd):/src:Z \ -u $(id -u) \ emscripten/emsdk:$emscripen_version \ emcc" else if [[ "$force_docker" == "1" ]]; then echo 'You must have `docker` on your PATH to run this script with --docker' else echo 'You must have either `docker` or `emcc` on your PATH to run this script' fi exit 1 fi mkdir -p target/scratch runtime_methods='stringToUTF16','AsciiToString' # Use emscripten to generate `tree-sitter.js` and `tree-sitter.wasm` # in the `target/scratch` directory $emcc \ -s WASM=1 \ -s INITIAL_MEMORY=33554432 \ -s ALLOW_MEMORY_GROWTH=1 \ -s MAIN_MODULE=2 \ -s FILESYSTEM=0 \ -s NODEJS_CATCH_EXIT=0 \ -s NODEJS_CATCH_REJECTION=0 \ -s EXPORTED_FUNCTIONS=@${web_dir}/exports.json \ -s EXPORTED_RUNTIME_METHODS=$runtime_methods \ $emscripten_flags \ -fno-exceptions \ -std=c99 \ -D 'fprintf(...)=' \ -D NDEBUG= \ -I lib/src \ -I lib/include \ --js-library ${web_dir}/imports.js \ --pre-js ${web_dir}/prefix.js \ --post-js ${web_dir}/binding.js \ --post-js ${web_dir}/suffix.js \ lib/src/lib.c \ ${web_dir}/binding.c \ -o target/scratch/tree-sitter.js # Use terser to write a minified version of `tree-sitter.js` into # the `lib/binding_web` directory. if [[ "$minify_js" == "1" ]]; then if [ ! -d ${web_dir}/node_modules/terser ]; then ( cd ${web_dir} npm install ) fi ${web_dir}/node_modules/.bin/terser \ --compress \ --mangle \ --keep-classnames \ -- target/scratch/tree-sitter.js \ > $web_dir/tree-sitter.js else cp target/scratch/tree-sitter.js $web_dir/tree-sitter.js fi mv target/scratch/tree-sitter.wasm $web_dir/tree-sitter.wasm
true
303955cda2f07c30425a07eff3f21fcea6dd44c2
Shell
coneyu/zimbra-build-scripts
/01-install-build-deps.sh
UTF-8
1,995
4.03125
4
[]
no_license
#!/bin/bash # # Install build dependencies based on distro # # Supports: # Ubuntu 18.04 # Ubuntu 16.04 # CentOS 8 # CentOS 7 # # Required dependencies: lsb-release # Get DISTRIB_ID, install lsb_release package # if necessary if [ -f "/usr/bin/lsb_release" ] then DISTRIB_ID=`lsb_release -i | awk '{print $3}'` else if [ -f "/etc/redhat-release" ] then sudo yum install -y redhat-lsb else sudo apt-get install -y lsb-release fi DISTRIB_ID=`lsb_release -i | awk '{print $3}'` fi # Start installing dependencies if [ $DISTRIB_ID == "Ubuntu" ] then # Get release information DISTRIB_RELEASE=`lsb_release -r | awk '{print $2}'` # Check if running supported version and install dependencies # or inform user of unsupported version and exit if [ $DISTRIB_RELEASE == "16.04" ] || [ $DISTRIB_RELEASE == "18.04" ] then sudo apt-get install -y software-properties-common openjdk-8-jdk ant ant-optional ant-contrib ruby git maven build-essential debhelper else echo "You are running an unsupported Ubuntu release!" exit 1 fi elif [ $DISTRIB_ID == "CentOS" ] then # Get release information DISTRIB_RELEASE=`lsb_release -r | awk '{print $2}' | cut -f1 -d "."` # Check if running supported version and install dependencies # or inform user of unsupported version and exit if [ $DISTRIB_RELEASE == "7" ] then sudo yum groupinstall -y 'Development Tools' sudo yum install -y java-1.8.0-openjdk ant ant-junit ruby git maven cpan wget perl-IPC-Cmd rpm-build createrepo elif [ $DISTRIB_RELEASE == "8" ] then sudo dnf group install -y "Development Tools" sudo dnf module enable -y javapackages-tools sudo dnf install -y java-1.8.0-openjdk gcc-c++ ant-junit ruby git maven cpan wget rpm-build createrepo rsync else echo "You are running an unsupported CentOS release!" exit 1 fi else echo "Unsupported distribution!" echo "This script only supports CentOS 7/8 and Ubuntu 16.04/18.04" exit 1 fi
true
9b08cea366e1a5e4f0fb3576be02c2af44e5ac2e
Shell
Guangyu-Yang/Hadoop-deploy-scripts
/bin/hadoop-set-env.sh
UTF-8
1,017
2.59375
3
[]
no_license
#!/bin/bash #set necessary hadoop environment variables export HADOOP_COMMON_HOME=$HADOOP_HOME export HADOOP_HDFS_HOME=$HADOOP_HOME export HADOOP_MAPRED_HOME=$HADOOP_HOME export YARN_HOME=$HADOOP_HOME export HADOOP_COMMON_LIB_NATIVE_DIR=${HADOOP_HOME}/lib/native export HADOOP_OPTS="-Djava.library.path=${HADOOP_HOME}/lib" #set hadoop configuration directory export HADOOP_CONF_DIR=${HADOOP_PBS_ECOSYSTEM_HOME}/config_dir/hadoop #set user sepcify direcotries export DISTRIBUTED_DIR="/local_scratch/$USER" #for yarn export YARN_LOCAL_DIR=${DISTRIBUTED_DIR}/local export YARN_LOG_DIR=${DISTRIBUTED_DIR}/logs export YARN_APP_LOG_DIR=${DISTRIBUTED_DIR}/apps export YARN_STAGING_DIR=${DISTRIBUTED_DIR}/staging #for hdfs export HDFS_NAME_DIR=${DISTRIBUTED_DIR}/hdfs/name export HDFS_DATA_DIR=${DISTRIBUTED_DIR}/hdfs/data #for hadoop export HADOOP_TMP_DIR=${DISTRIBUTED_DIR}/tmp #for zookeeper export ZOOKEEPER_DATA_DIR=${DISTRIBUTED_DIR}/zookeeper/data export ZOOKEEPER_LOG_DIR=${DISTRIBUTED_DIR}/zookeeper/logs
true
d4c7be432c91e01d7402c95b5d230f69fa8cb155
Shell
DrawZeroPoint/VIPS
/python/cluster_scripts_jmlr/ptmcmc_goodwin2.sh
UTF-8
265
2.59375
3
[ "MIT" ]
permissive
#!/bin/bash for i in 10; do for j in `seq 1 5`; do echo "Running with $i chain (Trial $j)" mpirun -n $i python -c "from experiments.PTMCMC.goodwin1_12_2 import *; sample(300000, \"ptmcmc_goodwin_300k_${i}chain_trial${j}\");"; done done
true
8feeb6e69ec09984aa8af8bc3cc4e3d944bf073b
Shell
somnam/ml
/bin/run_container
UTF-8
1,327
3.890625
4
[]
no_license
#!/bin/bash check_dependencies () { set -e [[ -x "$(which docker)" ]] || (echo -e "\e[33mDocker isn't installed\e[0m" && exit 1) set +e } build_docker_image () { echo -e "\e[36mBuilding docker image (this may take some time).\e[0m" local self_path=$(dirname "$0") local root_path=$(dirname "$self_path") local dockerfile_path=$(realpath "$root_path/etc/Dockerfile") set -e [[ -f "$dockerfile_path" ]] || (echo -e "\e[33mDockerfile doesn't exist\e[0m" && exit 1) set +e docker stop -t0 ml 1>/dev/null docker system prune -f 1>/dev/null docker build --quiet --tag ml --file $dockerfile_path . 1>/dev/null } run_container () { echo -e "\e[36mRunning container\e[0m" local self_name=$(basename "$0") local self_path=$(dirname "$0") local root_path=$(dirname "$self_path") local bind_path=$(realpath "$root_path/var/") docker run \ --detach \ --interactive \ --shm-size=256mb\ --mount type=bind,src="$bind_path",target=/home/sandbox/ml/var \ --env PATH="/home/sandbox/.local/bin:$PATH" \ --name ml \ ml:latest } run_container_shell () { echo -e "\e[36mRunning container shell\e[0m" docker exec -it ml bash -l } check_dependencies build_docker_image run_container run_container_shell
true
27778a8e44c602e3d10def2fc4211f472f147cb2
Shell
masarakki/dotfiles
/pkg/fcitx5
UTF-8
170
2.546875
3
[]
no_license
#!/bin/sh if [ ! -e /etc/apt/sources.list.d/ikuya-fruitsbasket-ubuntu-fcitx5-`lsb_release -cs`.list ]; then sudo apt-add-repository ppa:ikuya-fruitsbasket/fcitx5 fi
true
2b73b9a25d7c97c37288e4f3c8d5c968f4037ef6
Shell
iamsingularity/api-umbrella
/tasks/deps/pcre
UTF-8
843
2.859375
3
[ "MIT" ]
permissive
#!/usr/bin/env bash # PCRE 8.21+ required for OpenResty's JIT regex support. # # We can likely drop compiling our own version and use the system version once # we no longer need to support CentOS 6 (currently on PCRE 7.8). pcre_version="8.43" pcre_hash="636222e79e392c3d95dcc545f24f98c4" set -e -u -x source ./tasks/helpers.sh task_working_dir download "https://ftp.pcre.org/pub/pcre/pcre-$pcre_version.tar.bz2" "md5" "$pcre_hash" extract_download "pcre-$pcre_version.tar.bz2" cd "pcre-$pcre_version" ./configure \ --prefix="$INSTALL_PREFIX_EMBEDDED" \ --disable-cpp \ --enable-jit \ --enable-utf \ --enable-unicode-properties make -j"$NPROC" make install DESTDIR="$STAGE_DIR" chrpath -d "$STAGE_EMBEDDED_DIR/bin/pcregrep" chrpath -d "$STAGE_EMBEDDED_DIR/bin/pcretest" chrpath -d "$STAGE_EMBEDDED_DIR/lib/libpcreposix.so" stamp
true
cc32bbe7669aaa497bb32ecf42e1fc9b888fdd97
Shell
flbooo/mas
/script/lint
UTF-8
313
2.9375
3
[ "MIT" ]
permissive
#!/bin/bash -e # # script/lint # mas # # Linting checks for CI "Lint" stage. # echo "==> 🚨 Linting mas" echo echo "--> 🕊️ Swift" swiftlint lint --strict echo echo "--> 📜 Bash" shopt -s extglob # Only lint files with no extension (skipping .pl) shellcheck --shell=bash script/!(*.*) shopt -u extglob
true
577d76d33a6d72537eebad5c76171f63da649d27
Shell
sakhnik/arch-config
/20-pacman.sh
UTF-8
1,799
3.3125
3
[]
no_license
AddPackage aurutils # helper tools for the arch user repository AddPackage pacman-contrib # Contributed scripts and tools for pacman systems AddPackage pacutils # Helper tools for libalpm AddPackage vifm # A file manager with curses interface, which provides Vi[m]-like environment cat >"$(CreateFile /etc/pacman.d/hooks/paccache-remove.hook)" <<EOF [Trigger] Operation = Remove Type = Package Target = * [Action] Description = Cleaning pacman cache... When = PostTransaction Exec = /usr/bin/paccache -ruk0 EOF cat >"$(CreateFile /etc/pacman.d/hooks/paccache-upgrade.hook)" <<EOF [Trigger] Operation = Upgrade Type = Package Target = * [Action] Description = Cleaning pacman cache... When = PostTransaction Exec = /usr/bin/paccache -rk2 EOF cat >"$(CreateFile /etc/pacman.d/hooks/check-deps.hook)" <<EOF [Trigger] Operation = Upgrade Type = Package Target = * [Action] Description = Checking broken dependencies... When = PostTransaction Exec = /usr/local/bin/pacman-check-local-deps.sh EOF CopyFile /usr/local/bin/pacman-check-local-deps.sh 755 sed -i -f - "$(GetPackageOriginalFile pacman-mirrorlist /etc/pacman.d/mirrorlist)" <<'EOF' /mirror.mirohost.net/ s/^#// /mirrors.nix.org.ua/ s/^#// EOF sed -i -f - "$(GetPackageOriginalFile pacman /etc/pacman.conf)" <<EOF /^#CacheDir/ s/^#// /CacheDir/ a CacheDir = /var/cache/pacman/custom/ /CleanMethod/ s/.*/CleanMethod = KeepCurrent/ /^#Color/ s/^#// /^#CheckSpace/ s/^#// /^#VerbosePkgLists/ s/^#// /^#ParallelDownloads/ s/.*/ParallelDownloads = 3/ /ParallelDownloads/ a ILoveCandy /#\[custom\]/,/^$/ s/^#// EOF sed -i -f - "$(GetPackageOriginalFile pacman /etc/makepkg.conf)" <<EOF s/^#MAKEFLAGS=.*/MAKEFLAGS="-j5"/ s/^COMPRESSXZ=.*/COMPRESSXZ=(xz -c -z - --threads=0)/ s/-march=[^ ]*/-march=native/ s/-mtune=[^ ]*/-mtune=native/ EOF
true
e5ec4b6ad9644a32fcad896f1cfef4f73849c8c0
Shell
tsekaris/ubuntu
/ssh/add.sh
UTF-8
365
3.421875
3
[]
no_license
#!/bin/sh #Εμφάνιση των tags χωρίς το prefix. prefix=${HOME}/.ssh/ #Το -L απαραίτητο για συντομεύσεις key_public=${prefix}$(find -L ~/.ssh -name "*.pub" | awk '{ gsub("'${prefix}'","",$1); print $1 }'| fzf) key_private=${key_public%????} #Βγάλε το .pub if [ -f ${key_public} ] then ssh-add ${key_private} fi
true
3b6c2045155e33e4b754fe846c006318f7e460ef
Shell
thatmarkenglishguy/shared_scripts
/onpath/git_gen_file_script
UTF-8
1,479
3.921875
4
[ "MIT" ]
permissive
#!/usr/bin/env bash command_template='' ok_exit_code=0 for arg in "${@}" do case "${arg}" in *) if [ -z "${command_template}" ] then command_template="${arg}" else echo "Unexpected argument: '${arg}'" >&2 (( ++ok_exit_code )) fi ;; esac done if [ ${ok_exit_code} -ne 0 ] then exit ${ok_exit_code} fi while [ $(pwd) != "${HOME}" ] && [ $(pwd) != '/' ] && [ ! -d './.git' ] do cd .. done state='unset' add_newline=0 function add_newline() { if [ ${add_newline} -ne 0 ] then echo else add_newline=1 fi } while read -r do status="${REPLY:0:2}" case "${status}" in " M") if [ "${state}" != 'modified' ] then state='modified' add_newline echo '# Modified' fi ;; ??) if [ "${state}" != 'new' ] then state='new' add_newline echo '# New' fi ;; esac subpath="${REPLY:3}" if [ "${subpath}" != '.*temp/.*' ] && [[ ! "${subpath}" =~ .*fish.* ]] && [[ ! "${subpath}" =~ .*soup.* ]] then path="$(pwd)/${subpath}" if [ -n "${command_template}" ] then command="${command_template//\{\}/${path}}" if [ "${command}" == "${command_template}" ] then echo "${command_template} ${path} \"\${@}\"" else echo "${command} \"\${@}\"" fi else echo "${path} \"\${@}\"" fi fi done < <(git status --porcelain --untracked-files)
true
e355a4488747526c2e1ac440ca09c5b411684ae0
Shell
Nycander/dotfiles
/dotfiles/oh-my-zsh-custom/martins.zsh-theme
UTF-8
891
2.828125
3
[]
no_license
MARTINS_BRACKET_COLOR="%{$fg[white]%}" MARTINS_TIME_COLOR="%{$fg[blue]%}" MARTINS_RVM_COLOR="%{$fg[magenta]%}" MARTINS_DIR_COLOR="%{$fg[yellow]%}" MARTINS_GIT_BRANCH_COLOR="%{$fg[green]%}" MARTINS_GIT_CLEAN_COLOR="%{$fg[green]%}" MARTINS_GIT_DIRTY_COLOR="%{$fg[red]%}" # These Git variables are used by the oh-my-zsh git_prompt_info helper: ZSH_THEME_GIT_PROMPT_PREFIX="$MARTINS_BRACKET_COLOR:$MARTINS_GIT_BRANCH_COLOR" ZSH_THEME_GIT_PROMPT_SUFFIX="" ZSH_THEME_GIT_PROMPT_CLEAN=" $MARTINS_GIT_CLEAN_COLOR✓" ZSH_THEME_GIT_PROMPT_DIRTY=" $MARTINS_GIT_DIRTY_COLOR✗" # Core display elements: MARTINS_TIME_="$MARTINS_BRACKET_COLOR"["$MARTINS_TIME_COLOR%T$MARTINS_BRACKET_COLOR"]"%{$reset_color%} " MARTINS_DIR_="$MARTINS_DIR_COLOR%~\$(git_prompt_info) " MARTINS_PROMPT="$MARTINS_BRACKET_COLOR➭ " # Put it all together! PROMPT="$MARTINS_TIME_$MARTINS_DIR_$MARTINS_PROMPT%{$reset_color%}"
true
fe4bd8e66883a4dbbb4f53579404ad4ae7ca3bcd
Shell
bgerofi/mckernel
/test/issues/1065/CT_007.sh
UTF-8
1,455
3.34375
3
[]
no_license
#!/bin/sh TESTNAME=CT_007 exec_program="./print_maps_and_cmdline" test_program="./call_execve ${exec_program}" . ./config fail=0 echo "*** ${TESTNAME} start *******************" real_path=`realpath ${exec_program}` interp_path=`readelf -l ${exec_program} | grep "interpreter:" | sed -r 's/.*\[.*interpreter:\s(.*)\].*/\1/'` interp_real_path=`realpath ${interp_path}` echo "exec : ${test_program}" #${test_program} | tee ./${TESTNAME}.log ${MCEXEC} ${test_program} 1> ./${TESTNAME}_maps.log 2> ./${TESTNAME}_cmdline.log if [ X$? != X0 ]; then fail=1 fi cat ./${TESTNAME}_maps.log echo "" echo "** grep ${real_path} from maps" grep -a -e "${real_path}$" ./${TESTNAME}_maps.log if [ X$? = X0 ]; then echo "[OK] ${real_path} is found" else echo "[NG] ${real_path} is not found" fail=1 fi echo "" echo "** grep ${interp_real_path} from maps" grep -a -e "${interp_real_path}$" ./${TESTNAME}_maps.log if [ X$? = X0 ]; then echo "[OK] ${interp_real_path} is found" else echo "[NG] ${interp_real_path} is not found" fail=1 fi cat ./${TESTNAME}_cmdline.log echo "" echo "** grep ${exec_program} from cmdline" grep -a -e "${exec_program}" ./${TESTNAME}_cmdline.log if [ X$? = X0 ]; then echo "[OK] ${exec_program} is found" else echo "[NG] ${exec_program} is not found" fail=1 fi if [ X$fail = X0 ]; then echo "*** ${TESTNAME} PASSED" else echo "*** ${TESTNAME} FAILED" fi echo "" rm ./${TESTNAME}_maps.log rm ./${TESTNAME}_cmdline.log
true
9860fa5216d92cd5192b88c7e4c220fb6dfd2bd8
Shell
bopopescu/twemproxy-redis
/dists/redis-instances/bin/sentinel_shutdown.sh
UTF-8
689
3.515625
4
[]
no_license
#!/bin/sh cwd=`cd $(dirname $0)/..;pwd` cd $cwd function shutdown_instance(){ insname="" for arg in $@; do key=`echo $arg|awk -F= '{ print $1 }'` val=`echo $arg|awk -F= '{ print $2 }'` if [ $key = "name" ]; then insname=$val break fi done if [ -z "$insname" ]; then return 0 fi insws="$cwd/data/$insname" cd $insws pidfile="$insws/var/sentinel.pid" pid=`cat $pidfile` procinfo=`ps -ef|grep -w $pid|grep -v grep` while [ -n "$procinfo" ]; do kill $pid sleep 0.5 procinfo=`ps -ef|grep -w $pid|grep -v grep` done echo "redis sentinel process $pid shutdown" cd $cwd } while read data; do shutdown_instance $data done < config/sentinel.conf
true
3a5714941cb96a60a3207ddc04db5a47a7439487
Shell
Iristyle/lcow
/tests/cases/100_hostvol/070_socket_volume/socket_test.sh
UTF-8
219
3.640625
4
[ "Apache-2.0" ]
permissive
#!/bin/sh set -e set -x FILE=$1 nc -lU "$FILE" & pid=$! sleep 1 TYPE=$(stat -c %F "$FILE") kill "$pid" if [ "$TYPE" != "socket" ]; then echo "$FILE is not a unix domain socket: $TYPE != socket" exit 1 fi
true
997ef4a39874a9e6b833bcea854e7d7c380ea8b8
Shell
open-estuary/appbenchmark
/apps/hadoop/hadoop_test1/scripts/pb_build.sh
UTF-8
2,026
3.296875
3
[]
no_license
#!/bin/bash . ${APP_ROOT}/toolset/setup/basic_cmd.sh ###################################################################################### # Notes: # To build ProtoBuffer # ##################################################################################### BUILD_DIR="./build_protobuffer" CTAG_NAME="2.5.0" TARGET_DIR=$(tool_get_first_dirname ${BUILD_DIR}) ####################################################################################### PROTOC_VERSION="$(protoc --version)" echo "${PROTOC_VERSION}" if [[ "${PROTOC_VERSION}" =~ "2.5.0" ]] ; then echo "ProtoBuffer-2.5.0 has been built successfully" exit 0 fi #################################################################################### # Prepare for build #################################################################################### $(tool_add_sudo) rm -fr ${BUILD_DIR} mkdir ${BUILD_DIR} pushd ${BUILD_DIR} > /dev/null tar -zxvf ${APP_ROOT}/apps/hadoop/hadoop_test1/src/protobuf-2.5.0.tar.gz -C ./ TARGET_DIR=$(tool_get_first_dirname ./) cp ${APP_ROOT}/apps/hadoop/hadoop_test1/src/platform_macros.h ./${TARGET_DIR}/src/google/protobuf/stubs/platform_macros.h cp ${APP_ROOT}/apps/hadoop/hadoop_test1/src/atomicops_internals_arm64_gcc.h ./${TARGET_DIR}/src/google/protobuf/stubs/atomicops_internals_arm64_gcc.h cp ${APP_ROOT}/apps/hadoop/hadoop_test1/src/Makefile.am ./${TARGET_DIR}/src/Makefile.am cp ${APP_ROOT}/apps/hadoop/hadoop_test1/src/atomicops.h ./${TARGET_DIR}/src/google/protobuf/stubs/atomicops.h echo "Finish build preparation......" ###################################################################################### # Build OpenJDK ##################################################################################### #Build Step 1: auto generation cd ${TARGET_DIR} $(tool_add_sudo) chmod 755 ./configure ./configure --prefix=/usr make -j32 $(tool_add_sudo) make install $(tool_add_sudo) ldconfig popd > /dev/null ##########################################################################################
true
99b84d818b8bc71e84ec1fcd02e23c9a8d968d0d
Shell
imsekhar04/core-plans
/azure-cli/tests/tests.bats
UTF-8
360
3.46875
3
[ "Apache-2.0" ]
permissive
TEST_PKG_VERSION="$(echo $TEST_PKG_IDENT | cut -d/ -f 3)" @test "az exe runs" { run hab pkg exec $TEST_PKG_IDENT az [ $status -eq 0 ] } @test "az exe outputs the expected version $TEST_PKG_VERSION" { result="$(hab pkg exec $TEST_PKG_IDENT az --version | grep 'azure-cli (' | awk '{gsub(/azure-cli\ \(|\)/,"")}1')" [ "$result" = $TEST_PKG_VERSION ] }
true
6d101ec3c2886b07e57c4f1032a246fd642900ad
Shell
Parshuramsk/FedModules
/build.sh
UTF-8
542
2.5625
3
[]
no_license
#!/bin/bash export MESOS_HOME_DIR="$HOME/src/mesos" COMPILER="g++" THIRD_PARTY="$MESOS_HOME_DIR/build/3rdparty/libprocess/3rdparty" HEADER_FILES=" -I $MESOS_HOME_DIR/include -I $MESOS_HOME_DIR/src -I $MESOS_HOME_DIR/build/src -I $MESOS_HOME_DIR/3rdparty/libprocess/include -I $THIRD_PARTY/boost-1.53.0 -I$THIRD_PARTY/glog-0.3.3/src -I $THIRD_PARTY/stout/include -I $THIRD_PARTY/protobuf-2.5.0/src" $COMPILER $HEADER_FILES -lmesos -std=c++11 -fPIC -shared ./FedAllocator/src/FedAllocator.cpp ./FedComm/src/FedComm.cpp -o libFedModules.so
true
785c12f13e56f8a7535df6f2cc5e57aee824afae
Shell
TjlHope/scripts
/bin/cpu_mem_usage
UTF-8
7,384
4.09375
4
[]
no_license
#!/bin/sh # SCRIPTS_DIR/bin/cpu_mem_usage ### Outputs the current cpu and memory usage figures. # source library functions [ -h "${0}" ] && script_p="$(readlink -f "${0}")" || script_p="${0}" lib_d="${script_p%/*/*}/lib" . "${lib_d}/percent_blocks.sh" . "${lib_d}/check_type.sh" . "${lib_d}/colour.sh" . "${lib_d}/output.sh" # Use $SHM_D (if defined) for storage, fallback of /tmp/$USER shm_d="${SHM_D:-/tmp/${USER}}" { [ -d "${shm_d}" ] || { [ -w "${shm_d%/*}" ] && mkdir "${smh_d}" } } && [ -w "${shm_d}" ] || die "Cannot access shared memory directory: ${shm_d}." cpu_usage () { # Function to get current cpu usage, stores the current, and ${1:-inf} # previous values in ${SHM_D}/cpu; and outputs the current, and ${1:-0} # from the file each call. ## Variables local cpu v a t p _a _t _p fl="${shm_d}/cpu" ## Calculate current # get the total cpu line from /proc/stat: "IDLE|T1 T2 T3 IDLE T5 ... TN" cpu="$(sed -n /proc/stat -e \ 's/^cpu\s\+\(\([0-9]\+\s\+\)\{3\}\)\([0-9]\+\)\(.*\)$/\3|\1\3\4/p')" # get the previous values (defaults if necessary) [ -f "${fl}" ] && read _a _t _p < "${fl}" || echo "0 0 0" > "${fl}" # sum all the values after the bar to get the total for v in ${cpu#*|} do t=$(( ${t-0} + ${v})) # total time (cs) done a=$(( ${t} - ${cpu%%|*} )) # active time (cs) # percent use p=$(( (1000 * (${a} - ${_a:-0}) / (${t} - ${_t:-0}) + 5) / 10 )) ## Insert new value into file and rotate if necessary sed -i "${fl}" -n -e "\ 1 { h s:^.*$:${a} ${t} ${p}:p g } 1,${1:-$} p " ## Output values a=""; t=""; p="" # reset values for file read while read _a _t _p # for each line in file do [ -n "${a}" -a -n "${t}" -a -n "${p}" ] && { # output the percentage ${verbose-false} && echo "cpu: (${a} - ${_a}) / (${t} - ${_t}) = ${p}" >&2 echo "${p}" # only output ${1:-1} values [ -n "${1}" ] || break } # next set of values a="${_a}"; t="${_t}"; p="${_p}" done < "${fl}" } mem_usage () { # Function to get current memory usage, stores the current, and ${1:-inf} # previous values in ${SHM_D}/mem; and outputs the current, and ${1:-0} # from the file each call. # If a -u flag is given, rather than using the active memory, the used # (non-free/active & inactive) memory is used. ## Variables local t='' f='' a='' u='' p='' _u='' _t='' _p='' \ fl="${shm_d}/mem" active=true { [ "$1" = -u ] && shift || [ "$2" = -u ]; } && active=false # check the previous values (defaults if necessary) [ -f "$fl" ] && read u t p < "$fl" || echo "0 0 0" > "$fl" ## Calculate current # get the total, active & free values from /proc/meminfo eval "$(sed /proc/meminfo -nEe ' s/^MemTotal:\s+([0-9]+).*$/t=\1;/p; t; s/^MemFree:\s+([0-9]+).*$/f=\1;/p; t; s/^Active:\s+([0-9]+).*$/a=\1;/p; t; ')" t="$(( t / 1024 ))" # Total memory (MB) if "$active" && [ -n "$a" ] then u="$(( a / 1024 ))" # Active memory (MB) else u="$(( t - ( f / 1024 ) ))" # Non-Free memory (MB) fi p=$(( (1000 * u / t + 5) / 10 )) # percent used ## Insert new value into file and rotate if necessary sed -i "$fl" -ne " 1 { h s:^.*$:$u $t $p:p g } 1,${1:-$} p " ## Output values u=""; t=""; p="" # reset values for file read while read _u _t _p # for each line in file do if [ -n "$u" ] && [ -n "$t" ] && [ -n "$p" ] then # output the percentage ${verbose-false} && echo "mem: $u / $t = $p" >&2 echo "$p" # only output ${1:-1} values [ -n "$1" ] || break fi # next set of values u="$_u"; t="$_t"; p="$_p" done < "$fl" } while [ -n "${1}" ] do [ -z "${1##--*}" ] && opt_patt="--*=" || opt_patt="-[a-z]" case "${1}" in -b|--blocks) blocks=true ;; -w|--words) verbose=true ;; -h*|--hist*|--history*) [ "${1#${opt_patt}?}" = "${1}" ] && shift hist="${1#${opt_patt}}" check_int ${hist} || die "-h requires an integer argument" ;; -n*|--num*|--number*) [ "${1#${opt_patt}?}" = "${1}" ] && shift number="${1#${opt_patt}}" check_int ${number} || die "-n requires an integer argument" ;; -v|--verb|--verbose) verbose=true ;; -u|--used|--used-mem) used=-u ;; -s*|--sep*|--seperator*) [ "${1#${opt_patt}?}" = "${1}" ] && shift sep_str="${1#${opt_patt}}" ;; --start*) [ "${1#${opt_patt}?}" = "${1}" ] && shift start_str="${1#${opt_patt}}" ;; --end*) [ "${1#${opt_patt}?}" = "${1}" ] && shift end_str="${1#${opt_patt}}" ;; -c*|--fg-colour*) [ "${1#${opt_patt}?}" = "${1}" ] && shift set_fg_colour "${1#${opt_patt}}" ;; -k*|--bg-colour*) [ "${1#${opt_patt}?}" = "${1}" ] && shift set_bg_colour "${1#${opt_patt}}" ;; -a*|--attr*) [ "${1#${opt_patt}?}" = "${1}" ] && shift set_attr "${1#${opt_patt}}" ;; --cpu-c*|--cpu-fg-colour*) [ "${1#${opt_patt}?}" = "${1}" ] && shift set_fg_colour "${1#${opt_patt}}" "cpu_" ;; --cpu-k*|--cpu-bg-colour*) [ "${1#${opt_patt}?}" = "${1}" ] && shift set_bg_colour "${1#${opt_patt}}" "cpu_" ;; --cpu-a|--cpu-attr*) [ "${1#${opt_patt}?}" = "${1}" ] && shift set_attr "${1#${opt_patt}}" "cpu_" ;; --mem-c*|--mem-fg-colour*) [ "${1#${opt_patt}?}" = "${1}" ] && shift set_fg_colour "${1#${opt_patt}}" "mem_" ;; --mem-k*|--mem-bg-colour*) [ "${1#${opt_patt}?}" = "${1}" ] && shift set_bg_colour "${1#${opt_patt}}" "mem_" ;; --mem-a*|--mem-attr*) [ "${1#${opt_patt}?}" = "${1}" ] && shift set_attr "${1#${opt_patt}}" "mem_" ;; -h|-?|--help) echo "usage: ${0} [-h N] [-n N] [-v] (-b|-w)" echo echo "Display the current cpu and memory usage figures." echo " -b, --blocks" echo " display using unicode block characters" echo " -w, --words" echo " display using alphanumeric characters" echo " -h N, --history=N" echo " show N historical values as well" echo " -n N, --number=N" echo " (-b only) display using blocks of width N" echo " -u, --used, --used-mem" echo " show used (non-free) memory, not just active" echo " -v, --verbose" echo " more verbose display" echo " -s STR, --seperator=STR" echo " string to seperate cpu and mem displays" echo " --(|cpu-|mem-)(c|k|a)=VAL," echo " --(|cpu-|mem-)(fg-colour|bg-colour|attr)=VAL" echo " set the (normal|cpu|mem) output formating" exit 0 esac shift done cpu="$(cpu_usage ${hist})" mem="$(mem_usage ${used} ${hist})" if ${blocks-false} then if [ ${hist-0} -gt 0 ] || [ ${number-1} -eq 1 ] then pc_block="pc_vblock" elif [ ${hist-1} -eq 1 ] && [ ${number-1} -gt 1 ] then pc_block="pc_hblock" else die "'-h ${hist}' can only be used with '-n 1'" fi cpu="$(${pc_block} ${cpu} ${number})" mem="$(${pc_block} ${mem} ${number})" sep_str="${sep_str}" # default is no speration between blocks; # assignmentneeded as use ${sep- } to add a # space otherwise fi # Output representation echo "\ ${fmt}${start_str}\ ${cpu_fmt}${cpu}\ ${fmt}${sep_str- }\ ${mem_fmt}${mem}\ ${fmt}${end_str}\ "
true
9ae1a45ad4cc970ff46419be9f9bc5bd4de08b5a
Shell
weideng1/nosqlbench
/nb5/build-bin.sh
UTF-8
2,148
3.34375
3
[ "Apache-2.0" ]
permissive
#!/usr/bin/env bash # # Copyright (c) 2022 nosqlbench # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # set -e set -x APPDIR=target/NB.AppDir JAR_NAME="nb5.jar" BIN_NAME="nb5" JAVA_VERSION="17" mkdir -p ${APPDIR} if [ ! -f target/${JAR_NAME} ] then printf "target/${JAR_NAME} does not exist" exit 2 fi rsync -av appimage/skel/ "${APPDIR}/" cp target/${JAR_NAME} "${APPDIR}/usr/bin/${JAR_NAME}" mkdir -p "${APPDIR}/usr/bin/jre" if [ "$JAVA_VERSION" == "17" ] then if [ ! -d "cache/jdk17" ] ; then printf "getting jdk17 once into cache/jdk17\n"; mkdir -p cache (cd cache && ( wget -c https://github.com/adoptium/temurin17-binaries/releases/download/jdk-17.0.1%2B12/OpenJDK17U-jdk_x64_linux_hotspot_17.0.1_12.tar.gz tar -xf OpenJDK17U-jdk_x64_linux_hotspot_17.0.1_12.tar.gz mv jdk-17.0.1+12 jdk17 rm OpenJDK17U-jdk_x64_linux_hotspot_17.0.1_12.tar.gz )) fi rsync -av cache/jdk17/ "${APPDIR}/usr/bin/jre/" else printf "Unknown java version indicated in $0" exit 2 fi if [ ! -f "${APPDIR}/AppRun" ] then ( cd ${APPDIR} && ( printf "Linking AppRun...\n"; ln -s usr/bin/${BIN_NAME} AppRun )) fi printf "getting appimage tool and building image...\n"; ( cd target && ( if [ ! -x "appimagetool-x86_64.AppImage" ] then wget -c https://github.com/AppImage/AppImageKit/releases/download/12/appimagetool-x86_64.AppImage chmod +x appimagetool-x86_64.AppImage fi ARCH=x86_64 ./appimagetool-x86_64.AppImage NB.AppDir ${BIN_NAME} # && chmod +x ${BIN_NAME} ) ) if [ -x "target/${BIN_NAME}" ] then printf "nosqlbench AppImage binary was built at target/${BIN_NAME}\n"; fi
true
22777b8772ede53e8d3857070fae555b8d6ed4ba
Shell
Winddoing/CodeWheel
/shell/color.sh
UTF-8
788
2.75
3
[]
no_license
#!/bin/bash function out_red() { echo -e "\e[01;31m$@\e[0m" } out_red "test read" function out_yellow() { echo -e "\e[33m$@\e[0m" } out_yellow "test yellow" function out_green() { echo -e "\e[32m$@\e[0m" } out_green "test green" function out_blue() { echo -e "\e[34m$@\e[0m" } out_blue "test blue" echo -e "\033[31m 红色字 \033[0m" echo -e "\033[34m 黄色字 \033[0m" echo -e "\033[41;33m 红底黄字 \033[0m" echo -e "\033[41;37m 红底白字 \033[0m" echo -e "\033[30m 黑色字 \033[0m" echo -e "\033[31m 红色字 \033[0m" echo -e "\033[32m 绿色字 \033[0m" echo -e "\033[33m 黄色字 \033[0m" echo -e "\033[34m 蓝色字 \033[0m" echo -e "\033[35m 紫色字 \033[0m" echo -e "\033[36m 天蓝字 \033[0m" echo -e "\033[37m 白色字 \033[0m"
true
91968712f39ef3a043385a9dc390a205cfca41d9
Shell
Misterblue/misterblue.github.io
/_tools/oldImagesMakeThumbs.sh
UTF-8
698
3.75
4
[ "CC0-1.0" ]
permissive
#! /bin/bash # Scan all jpg's in the old images dir and create the thumbs. OLDIMAGESDIR=/home/Robert/dev/misterblue.github.io/images/oldimages THUMBSDIR=$OLDIMAGESDIR/thumb SIZE=150 TEMPFILE=xx.jpg TEMPFILE2=xx2.ppm EXIFFILE=exif.exif SEDFILE=xxsed.sed rm -f $TEMPFILE $TEMPFILE2 $EXIFFILE $SEDFILE cd "$OLDIMAGESDIR" ls *.jpg | while read file ; do if [[ -e "$file" ]] ; then cat "$file" | jpegtopnm --exif=$EXIFFILE --quiet > $TEMPFILE2 cat $TEMPFILE2 | pamscale --xyfit $SIZE $SIZE | ppmtojpeg --exif=$EXIFFILE --comment "Copyright 2015, Robert Adams" --quality=75 > $TEMPFILE mv $TEMPFILE "$THUMBSDIR/$file" fi done rm -f $TEMPFILE $TEMPFILE2 $EXIFFILE $SEDFILE
true
192f22a1772e5de4b343f0e7c89a639fd2ab3757
Shell
AlexzxelA/beame-gatekeeper
/install/service.sh
UTF-8
529
3.921875
4
[]
no_license
#!/bin/bash set -eu err_trap_func() { echo "ERROR: Installation as service failed" } trap err_trap_func ERR if [[ $EUID -ne 0 ]]; then echo "Please run this script as root." exit 1 fi if [[ $(uname -s) == Darwin ]];then echo "+ Running Mac OS installation" SCRIPT_DIR=$(zsh -c 'echo ${0:A:h}' "$0") exec "$SCRIPT_DIR/launchd-service.zsh" "$@" else echo "+ Running systemd installation" SCRIPT_DIR="$( cd "$( dirname "$( realpath "${BASH_SOURCE[0]}" )" )" && pwd )" exec "$SCRIPT_DIR/systemd-service.sh" "$@" fi
true
0e1373407bb2f14a08e014cc3d0aad2a104dc224
Shell
slackpanos/SlackOnly-SlackBuilds
/audio/jack-keyboard/jack-keyboard.SlackBuild
UTF-8
3,032
3.796875
4
[ "MIT" ]
permissive
#!/bin/sh # Slackware build script for jack-keyboard # Written by B. Watson (yalhcru@gmail.com) # Licensed under the WTFPL. See http://www.wtfpl.net/txt/copying/ for details. # 20180628 bkw: update for v2.7.2. # 20170218 bkw: make lash optional. # 20170216 bkw: # - Updated for v2.7.1. Apparently, upstream released 2.7.1 in 2012, but # never got around to updating their web page, which still says that # 2.5 is the latest release. It took me this long (5 years!) to notice # there was a newer version on their sourceforge download page. Note # that 2.5 used autotools but 2.7.1 uses cmake, so this script can no # longer be used to build the old version. # - Add capability stuff. PRGNAM=jack-keyboard VERSION=${VERSION:-2.7.2} BUILD=${BUILD:-1} TAG=${TAG:-_SBo} if [ -z "$ARCH" ]; then case "$( uname -m )" in i?86) ARCH=i586 ;; arm*) ARCH=arm ;; *) ARCH=$( uname -m ) ;; esac fi CWD=$(pwd) TMP=${TMP:-/tmp/SBo} PKG=$TMP/package-$PRGNAM OUTPUT=${OUTPUT:-/tmp} if [ "$ARCH" = "i586" ]; then SLKCFLAGS="-O2 -march=i586 -mtune=i686" LIBDIRSUFFIX="" elif [ "$ARCH" = "i686" ]; then SLKCFLAGS="-O2 -march=i686 -mtune=i686" LIBDIRSUFFIX="" elif [ "$ARCH" = "x86_64" ]; then SLKCFLAGS="-O2 -fPIC" LIBDIRSUFFIX="64" else SLKCFLAGS="-O2" LIBDIRSUFFIX="" fi set -e rm -rf $PKG mkdir -p $TMP $PKG $OUTPUT cd $TMP rm -rf $PRGNAM-$VERSION tar xvf $CWD/$PRGNAM-$VERSION.tar.gz cd $PRGNAM-$VERSION chown -R root:root . find -L . \ \( -perm 777 -o -perm 775 -o -perm 750 -o -perm 711 -o -perm 555 -o -perm 511 \) \ -exec chmod 755 {} \; -o \ \( -perm 666 -o -perm 664 -o -perm 600 -o -perm 444 -o -perm 440 -o -perm 400 \) \ -exec chmod 644 {} \; case "${LASH:-auto}" in auto) if pkg-config --exists lash-1.0; then LASHOPT="ON" else LASHOPT="OFF" fi ;; y*|Y*|t*|T*|on|ON|1) LASHOPT="ON" ;; *) LASHOPT="OFF" ;; esac echo "LASHOPT=\"$LASHOPT\"" mkdir -p build cd build cmake \ -DCMAKE_C_FLAGS:STRING="$SLKCFLAGS" \ -DCMAKE_CXX_FLAGS:STRING="$SLKCFLAGS" \ -DCMAKE_INSTALL_PREFIX=/usr \ -DLIB_SUFFIX=${LIBDIRSUFFIX} \ -DMAN_INSTALL_DIR=/usr/man \ -DCMAKE_BUILD_TYPE=Release .. \ -DLashEnable=$LASHOPT make make install DESTDIR=$PKG cd .. make make install DESTDIR=$PKG strip $PKG/usr/bin/$PRGNAM gzip -9 $PKG/usr/man/man1/$PRGNAM.1 mkdir -p $PKG/usr/doc/$PRGNAM-$VERSION cp -a AUTHORS COPYING NEWS README* TODO $PKG/usr/doc/$PRGNAM-$VERSION cat $CWD/$PRGNAM.SlackBuild > $PKG/usr/doc/$PRGNAM-$VERSION/$PRGNAM.SlackBuild mkdir -p $PKG/install cat $CWD/slack-desc > $PKG/install/slack-desc cat $CWD/slack-required > $PKG/install/slack-required cat $CWD/doinst.sh > $PKG/install/doinst.sh # Only add capability stuff if not disabled: if [ "${SETCAP:-yes}" = "yes" ]; then cat $CWD/setcap.sh >> $PKG/install/doinst.sh # Only allow execution by audio group chown root:audio $PKG/usr/bin/$PRGNAM chmod 0750 $PKG/usr/bin/$PRGNAM fi cd $PKG /sbin/makepkg -l y -c n $OUTPUT/$PRGNAM-$VERSION-$ARCH-$BUILD$TAG.${PKGTYPE:-tgz}
true
c64e72eaab0bfd7f446f45b0f03e496b0cdbbb75
Shell
justinba1010/USCCodeathon-S21-Lower
/polyLeaping/genSamples.sh
UTF-8
362
2.9375
3
[]
no_license
#on windows, first use dos2unix genSamples.sh to get rid of windows characters # then run this file using Bash # (accessed through start menu -> run -> Bash) #or using cmd use bash testgen.sh for i in {0..1} do echo $i | python3 ./mkin.py > samples/input/input$i.txt python3 solutions/sol.py < samples/input/input$i.txt > samples/output/output$i.txt done
true
67f9f0081d5d2cb9c6012fef82c507e2b50eefdb
Shell
yasserfarouk/dotfiles
/quick-install.sh
UTF-8
2,809
3.3125
3
[]
no_license
#!/usr/bin/env zsh echo "Updating init files with replacements" echo "-------------------------------------" export PATH="~/.pyenv/bin:$PATH" eval "$(pyenv init -)" eval "$(pyenv virtualenv-init -)" # pyenv activate neovim2 # neovim2_py=`pyenv which python` # Note the path # echo "neovim2 in $neovim2_py" # pyenv activate neovim3 # neovim3_py=`pyenv which python` # Note the path # echo "neovim3 in $neovim3_py" # function replace_tag_in_file(){ # python -c "s=open('$3', 'r').read().replace('$1','$2'); open('$3', 'w').write(s)" 2>&1 >/dev/null # } # function replace_tag_in_all(){ # for file_name in $(find ~/.dotfiles -type f -and ! -name '*.otf' -and ! -name '.*' -and ! -path '*tmux/plugins*' -and ! -name '*.png' -and ! -name 'Makefile' -and ! -path '*z*' ); # do # python -c "s=open('$file_name', 'r').read().replace('$1','$2'); open('$file_name', 'w').write(s)" 2>&1 >/dev/null # done # for file_name in $(find ~/.ysupport -type f -and ! -name '*.otf' -and ! -name '.*' -and ! -path '*tmux/plugins*' -and ! -name 'Makefile' -and ! -name '*.png' -and ! -path '*z*'); # do # python -c "s=open('$file_name', 'r').read().replace('$1','$2'); open('$file_name', 'w').write(s)" 2>&1 >/dev/null # done # } # # replace_tag_in_file '<<nvimpy2>>' $neovim2_py "$HOME/.config/nvim/init.vim" # replace_tag_in_file '<<nvimpy3>>' $neovim3_py "$HOME/.config/nvim/init.vim" # # # cd $HOME # echo "running RCM rcup command" # echo "This is symlink the rc files" # # case "$(uname -s)" in # Darwin) # rcup # ;; # Linux) # rcup # export PATH="$(brew --prefix)/bin:$(brew --prefix)/sbin:$PATH" # echo "export PATH='$(brew --prefix)/bin:$(brew --prefix)/sbin'":'"$PATH"' >>~/.bashrc # ;; # esac # echo "---------------------------------------------------------" echo "Installing Plug" curl -fLo ~/.vim/autoload/plug.vim --create-dirs https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim echo "Installing vim plugins" echo "----------------------" vim +PlugInstall +UpdateRemotePlugins +qa nvim +PlugInstall +UpdateRemotePlugins +CheckHealth +qa echo "Changing to zsh" chsh -s $(which zsh) echo "You will need to log out for this to take effect" echo "----------------------------------------------" case "$(uname -s)" in Darwin) echo "running oxs defaults" ./osx.sh ;; Linux) echo 'Linux ... no osx defaults' ;; esac echo "Correcting group permissions" echo "----------------------------" compaudit | xargs chmod g-w ./qupdate echo "------------------------------------------------------------" echo " All done! " echo "Change your terminal font to <Source Code Pro for Powerline>" echo "------------------------------------------------------------" exit 0
true
6b7534c4f02378535893a75337746366c40393e5
Shell
GerryLon/dev-scripts
/install_basic.sh
UTF-8
14,906
3.8125
4
[ "Apache-2.0" ]
permissive
#!/bin/bash # work dir scriptDir=$(cd `dirname $0`; pwd) # import public scripts . "$scriptDir/public/index.sh" appName="${0##*[\\/]}" # xx.sh appName=(${appName//\./ }) logFile="$scriptDir/${appName[0]}"".log" # xx.log etcProfile='/etc/profile' startDir=`pwd` appConf="$scriptDir/app.properties" softDir=`getProperty $appConf softDir` centosVersion=`cat /etc/redhat-release | sed -r 's/.* ([0-9]+)\..*/\1/'` echoInfo "centos version: $centosVersion" rm -rf $logFile echoInfo "install log will be set at:" echoInfo "$logFile" echo # write log to $logFile when install soft function appLog() { log $1 $logFile } function failedAppLog() { log "failed: $1" $logFile } function successAppLog() { log "success: $1" $logFile } if [ $UID -ne 0 ]; then echoInfo 'You are not root user' exit 1 fi [ ! -d $softDir ] && echoInfo "$softDir not exist, creating..." && mkdir -p "$softDir" # install wget if ! isCmdExist wget; then echoInfo 'installing wget' yum install -y wget else echoInfo 'wget was already installed' fi # install aliyun yum repository echoInfo 'checking aliyun yum repository' if ! yum repolist | grep -q 'aliyun.com'; then echoInfo 'installing aliyun yum repository ...' mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.bak wget -O "/etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-${centosVersion}.repo" yum makecache else echoInfo 'aliyun yum repository was already installed' fi # install sys tools function installSystools() { local systools="" local systoolsFromConf=`getProperty $appConf systools` local systoolsArr=(${systoolsFromConf//,/ }) # split by , to array for i in "${!systoolsArr[@]}"; do # if ! isCmdExist "${systoolsArr[i]}"; then rpm -q "${systoolsArr[i]}" if [ $? -ne 0 ]; then systools="$systools ${systoolsArr[i]}" else echoInfo "${systoolsArr[i]} was already installed" fi done if [ -n "$systools" ]; then echoInfo "installing $systools" # notice: $systools instead of "$systools" yum install -y $systools fi cat /etc/vimrc | grep -q 'set ts=4' if [ $? -ne 0 ]; then echo "set nu set ts=4 set ai set shiftwidth=4 " >> /etc/vimrc fi } installSystools function installGit() { local soft=git local installFlag=$(getProperty $appConf $soft) if [ "$installFlag" != "1" ]; then echoWarn "you do not wanna install $soft" return fi local gitVersion=$(getProperty $appConf gitVersion) local gitRoot=$(getProperty $appConf gitRoot) # install git if ! isCmdExist git; then echoInfo 'installing git ...' # kernel dependency yum install -y curl-devel expat-devel gettext-devel openssl-devel zlib-devel # avoid "tclsh failed; using unoptimized loading" yum install -y tcl build-essential tk gettext # install from source code,will cause error, should install below yum install -y perl-ExtUtils-CBuilder perl-ExtUtils-MakeMaker local gitBall="git-$gitVersion.tar.xz" wget -O "$softDir/$gitBall" -c "https://mirrors.edge.kernel.org/pub/software/scm/git/$gitBall" cd $softDir && tar -xJf "$gitBall" cd "$softDir/git-$gitVersion" ./configure --prefix="$gitRoot" make all make install if [ $? -ne 0 ]; then echoWarn "install $soft failed!!!" return fi lnsfFiles "$gitRoot/bin" "/usr/local/bin" git version >/dev/null 2>&1 if [ $? -ne 0 ]; then failedAppLog "install git" echoError 'install git failed, please check!' return 1 else echoInfo 'install git success' fi cd $startDir else echoInfo 'git was already installed' fi git version echo 'config git alias' git config --global alias.st "status" git config --global alias.br "branch" git config --global alias.co "checkout" git config --global alias.cm "commit -m" git config --global alias.df "diff" git config --global alias.sh "stash" } installGit function installGo() { local installFlag=$(getProperty $appConf go) if [ "$installFlag" != "1" ]; then echoWarn "you do not wanna install golang" return fi # install golang if ! isCmdExist go; then echoInfo 'installing golang ...' goroot=`getProperty $appConf goroot` gopath=`getProperty $appConf gopath` local goVersion=`getProperty $appConf goVersion` test ! -d $goroot && mkdir -p $goroot test ! -d $gopath && mkdir -p $gopath golangBall="go${goVersion}.linux-amd64.tar.gz" wget -O "$softDir/$golangBall" -c "https://studygolang.com/dl/golang/$golangBall" if [ $? -ne 0 ]; then echoError 'download golang tarball failed, please check!' exit 1 fi tar -C /usr/local -xzf "$softDir/$golangBall" echo "# added on `date +"%Y-%m-%d %H:%M:%S"` export GOROOT=$goroot export GOPATH=$gopath export GOBIN= export PATH=\$PATH:\$GOROOT/bin:\${GOPATH//://bin:}/bin" >> $etcProfile source $etcProfile go version >/dev/null 2>&1 if [ $? -ne 0 ]; then failedAppLog "install go" echoError 'install golang failed, please check!' exit 1 else echoInfo 'install golang success' fi else echoInfo 'go was already installed' fi go version } installGo function installRedis() { local installFlag=$(getProperty $appConf redis) if [ "$installFlag" != "1" ]; then echoWarn "you do not wanna install redis" return fi local redisRoot=`getProperty $appConf redisRoot` # redisRoot=${redisRoot:-'/usr/local/redis'} # redis root default value if [ $? -ne 0 ]; then redisRoot=/usr/local/redis echoWarn "redisRoot is not set, using defaults: $redisRoot" else echoInfo "Config: redisRoot=$redisRoot" fi [ ! -d "$redisRoot" ] && echo "creating dir: $redisRoot" && mkdir -p "$redisRoot" if ! isCmdExist "$redisRoot/bin/redis-server"; then redisVersion=`getProperty $appConf redisVersion` if [ $? -ne 0 ]; then redisVersion=4.0.11 echoWarn "redisVersion is not set, using defaults: $redisVersion" else echoInfo "Config: redisVersion=$redisVersion" fi redisSrcDir="redis-$redisVersion" redisBall="$redisSrcDir.tar.gz" # [ ! -f "$softDir/redis-$redisVersion.tar.gz" ] \ echoInfo "downloading redis-$redisVersion.tar.gz" wget -O "$softDir/$redisBall" -c "http://download.redis.io/releases/$redisBall" [ $? -ne 0 ] && echo "download $redisBall failed" && failedAppLog "download $redisBall" && exit 1 cd "$softDir" tar -zxf "$redisBall" cd "$redisSrcDir" make && cd src && make install PREFIX="$redisRoot" [ $? -ne 0 ] && echoError 'make redis failed' && failedAppLog "make redis" && exit 1 [ ! -d "$redisRoot/conf" ] && echo "mkdir $redisRoot/conf" && mkdir "$redisRoot/conf" cp ../redis.conf "$redisRoot/conf" echo 'make soft link for redis commands in /usr/local/bin' for i in `ls $redisRoot/bin`; do ln -s "$redisRoot/bin/$i" "/usr/local/bin/$i" done echoInfo 'install redis success' cd $startDir else echoInfo "redis was already installed" fi "$redisRoot"/bin/redis-server -v } installRedis function installDocker() { local installFlag=$(getProperty $appConf docker) if [ "$installFlag" != "1" ]; then echoWarn "you do not wanna install docker" return fi if ! isCmdExist docker; then echoInfo 'installing docker' rpm -Uvh http://ftp.riken.jp/Linux/fedora/epel/6Server/x86_64/epel-release-6-8.noarch.rpm yum install -y docker-io [ $? -ne 0 ] && echoError 'install docker failed' && failedAppLog "install docker" && exit 1 local dockerStartOnBoot=$(getProperty $appConf dockerStartOnBoot) [ "$dockerStartOnBoot" == "1" ] && echo "Config: dockerStartOnBoot=1" && chkconfig docker on && chkconfig --list else echoInfo 'docker was already installed, version:' docker version && echo fi return $? } installDocker function installNginx() { local installFlag=$(getProperty $appConf nginx) if [ "$installFlag" != "1" ]; then echoWarn "you do not wanna install nginx" return fi local nginxVersion=`getProperty $appConf nginxVersion 1.14.1` local nginxRoot=`getProperty $appConf nginxRoot /usr/local/nginx` local nginxBall="nginx-$nginxVersion.tar.gz" if [ -x $nginxRoot/sbin/nginx ]; then echoInfo 'nginx was already installed' $nginxRoot/sbin/nginx -v return 0 fi echoInfo "installing nginx" wget -O "$softDir/$nginxBall" -c "http://nginx.org/download/$nginxBall" cd "$softDir" tar -zxf $nginxBall local pcreBall="pcre-8.42.tar.gz" wget -O "$softDir/$pcreBall" -c "https://ftp.pcre.org/pub/pcre/$pcreBall" \ && tar -zxf "$pcreBall" \ || { echoError "doanload $pcreBall failed" && failedAppLog "download $pcreBall"; } local zlibBall="zlib-1.2.11.tar.gz" wget -O "$softDir/$zlibBall" -c "http://zlib.net/$zlibBall" \ && tar -zxf "$zlibBall" \ || { echoError "doanload $zlibBall failed" && failedAppLog "download $zlibBall"; } cd "$softDir/nginx-$nginxVersion" ./configure --prefix=$nginxRoot \ --with-pcre=$softDir/pcre-8.42 \ --with-zlib=$softDir/zlib-1.2.11 make && make install [ $? -ne 0 ] && echoError "install nginx failed" && failedAppLog "install nginx" && exit 1 echoInfo "install nginx success" $nginxRoot/sbin/nginx -v cd $startDir return 0 } installNginx function installMysql() { local installFlag=$(getProperty $appConf mysql) if [ "$installFlag" != "1" ]; then echoWarn "you do not wanna install mysql" return fi if isCmdExist mysql; then echoInfo "mysql was already installed" return 0 fi yum remove -y mariadb* # for centos7 ps -ef | grep mysql | grep -v grep if [ $? -eq 0 ]; then echoInfo "mysql is running..." return 0 fi local mysqlServerBall='MySQL-server-5.5.62-1.el6.x86_64.rpm' local mysqlClientBall='MySQL-client-5.5.62-1.el6.x86_64.rpm' wget -O "$softDir/$mysqlServerBall" -c "https://dev.mysql.com/get/Downloads/MySQL-5.5/$mysqlServerBall" \ || { echoInfo "download $mysqlServerBall failed" && failedAppLog "download $mysqlServerBall" && exit 1; } wget -O "$softDir/$mysqlClientBall" -c "https://dev.mysql.com/get/Downloads/MySQL-5.5/$mysqlClientBall" \ || { echoInfo "download $mysqlClientBall failed" && failedAppLog "download $mysqlClientBall" && exit 1; } yum remove -y mysql* cd $softDir if rpm -qa | grep MySQL-server; then echo 'mysql server was already installed' else echoInfo "installing mysql server" yum -y localinstall $mysqlServerBall [ $? -ne 0 ] && echoError "install mysql server failed" \ && failedAppLog "install mysql server" && exit 1 fi if rpm -qa | grep -q MySQL-client; then echo 'mysql client was already installed' else echoInfo "installing mysql client" yum -y localinstall $mysqlClientBall [ $? -ne 0 ] && echoError "install mysql client failed" \ && failedAppLog "install mysql client" && exit 1 fi echoInfo "install mysql success" cd $startDir local mysqlPassword=`getProperty $appConf mysqlPassword` echoInfo "set mysql password" service mysql restart && mysqladmin -uroot password $mysqlPassword [ $? -eq 0 ] && echoInfo "set mysql password success" || echo "set mysql password failed!" } installMysql function installNodejs() { local installFlag=$(getProperty $appConf nodejs) if [ "$installFlag" != "1" ]; then echoWarn "you do not wanna install nodejs" return fi local nodejsVersion=$(getProperty $appConf nodejsVersion) local nodejsRoot=$(getProperty $appConf nodejsRoot) if isCmdExist node; then echoInfo "nodejs was already installed, version: `node -v`" return 0 fi local nodejsBall="node-v$nodejsVersion-linux-x64.tar.xz" wget -O "$softDir/$nodejsBall" -c "https://nodejs.org/dist/v$nodejsVersion/$nodejsBall" \ || { echoError "download $nodejsBall failed" \ && failedAppLog "download $nodejsBall" && exit 1; } cd $softDir && tar -xJf $nodejsBall test -d $nodejsRoot || mkdir $nodejsRoot cp -rfu "node-v$nodejsVersion-linux-x64"/* $nodejsRoot # mv "$softDir/node-v$nodejsVersion-linux-x64" $nodejsRoot # create symbol link for nodejs relative command: node npm etc. for i in `ls $nodejsRoot/bin`; do test -x "$nodejsRoot/bin/$i" && ln -sf "$nodejsRoot/bin/$i" "/usr/local/bin/$i" done echoInfo "install nodejs success" node -v cd $startDir } installNodejs function installMongodb() { local soft=mongodb local installFlag=$(getProperty $appConf $soft) if [ "$installFlag" != "1" ]; then echoWarn "you do not wanna install $soft" return fi local mongodbVersion=$(getProperty $appConf mongodbVersion) local mongodbRoot=$(getProperty $appConf mongodbRoot) local mongodbDataDir=$(getProperty $appConf mongodbDataDir) local mainCmd=`selectCmd mongo "$mongodbRoot/bin/mongo"` if [ -n "$mainCmd" ]; then echo "$soft was already installed" return 0 fi local softBall="mongodb-linux-x86_64-$mongodbVersion.tgz" wget -O "$softDir/$softBall" -c "https://fastdl.mongodb.org/linux/$softBall" \ || { echo "download $softBall" \ && failedAppLog "download $softBall" && exit 1; } cd $softDir && tar -xzf $softBall mv "mongodb-linux-x86_64-$mongodbVersion" "$mongodbRoot" # soft link, optional for i in `ls $mongodbRoot/bin`; do test -x "$mongodbRoot/bin/$i" && ln -sf "$mongodbRoot/bin/$i" "/usr/local/bin/$i" done if ps -ef | grep -q mongo | grep -v grep; then echo "$soft is running" return 0 fi # --syslog:log will be write to /var/log/message # --fork daemon mode running mkdir -p "$mongodbDataDir" sh -c "$mongodbRoot/bin/mongod --dbpath=$mongodbDataDir --syslog --fork" } installMongodb function installVimPlugins() { local soft=vimPlugins local installFlag=$(getProperty $appConf $soft) if [ "$installFlag" != "1" ]; then echoWarn "you do not wanna install $soft" return fi # sh -c "$startDir/public/vim_bootstrap.sh" cd $startDir wget -c -O "$startDir/bootstrap.sh" "https://raw.githubusercontent.com/GerryLon/spf13-vim/gerrylon_dev/bootstrap.sh" \ || { echo "download vim_bootstrap.sh failed" \ && failedAppLog "download vim_bootstrap.sh" && exit 1; } chmod u+x "$startDir/bootstrap.sh" $startDir/bootstrap.sh cd $startDir } installVimPlugins function startServicesOnBoot() { local startOnBoot=`getProperty $appConf startServicesOnBoot` if [ "$startOnBoot" != '1' ]; then echoWarn 'startServicesOnBoot disabled' return 1 fi grep -q "$scriptDir/start_services.sh" /etc/rc.local if [ $? -ne 0 ]; then echo "[ -x $scriptDir/start_services.sh ] && $scriptDir/start_services.sh || echo \"start_services failed at \`date +'%Y-%m-%d %H:%M.%S'\`\" >> /var/log/start_services.log" >> /etc/rc.local fi } startServicesOnBoot
true
0267a1e1c3067288971fd4d1aba5e23bb6e1f0f4
Shell
polyvertex/git-tools
/git-backtag
UTF-8
660
3.984375
4
[]
no_license
#!/usr/bin/env bash # # git-backtag # Jean-Charles Lefebvre <polyvertex@gmail.com> # # Tag an existing commit using commit's date and time. # source "$(dirname "$(echo "$0" | sed -e 's,\\,/,g')")/gib" TAG_NAME="$1" COMMIT="$2" MESSAGE="$3" [ -z "$MESSAGE" ] && MESSAGE="$TAG_NAME" [ -z "$COMMIT" -o -z "$TAG_NAME" ] \ && gib_die "usage: $0 {new_tag} {commit} [message]" GIT_COMMITTER_DATE="$(git log -n 1 --no-color --pretty="format:%ci" "$COMMIT")" [ $? -eq 0 ] || gib_die "Commit not found: $COMMIT" export GIT_COMMITTER_DATE git tag -a -m "$MESSAGE" "$TAG_NAME" "$COMMIT" code=$? unset GIT_COMMITTER_DATE [ $code -eq 0 ] || gib_die "Failed to create annotated tag: $TAG_NAME"
true
587f39493453759dd8f158c36b114a1c6a13ff44
Shell
codemation/ec2-health-check
/gen-service.sh
UTF-8
620
3.015625
3
[]
no_license
#!/bin/bash echo '#!/bin/bash' > /usr/local/bin/ec2-healthz echo $(which python3)' '$(pwd)"/ec2-health-check/webapp/server.py" >> /usr/local/bin/ec2-healthz chmod +x /usr/local/bin/ec2-healthz cat <<EOF | sudo tee /etc/systemd/system/ec2-healthcheck.service [Unit] Description=ec2-healthcheck Documentation=https://github.com/codemation/ec2-health-check [Service] ExecStart=/usr/local/bin/ec2-healthz Restart=on-failure RestartSec=5 [Install] WantedBy=multi-user.target EOF #Start ec2-healthz service { sudo systemctl daemon-reload sudo systemctl enable ec2-healthcheck sudo systemctl start ec2-healthcheck }
true
1ba76d0cc6429c3fd36f31c45e7d50433baff456
Shell
bell07/bashscripts-switch_gentoo
/checks/version-junkie-update.sh
UTF-8
1,156
3.765625
4
[]
no_license
#!/bin/sh I=/tmp/ebuild.installed C=/tmp/ebuild.portage cleanup () { sed 's/ *#.*//g' $1 | \ grep -v $1 \ -e "^$" \ -e 'KEYWORDS=' \ -e 'HOMEPAGE=' \ -e 'LICENSE=' \ -e 'SRC_URI=' \ -e 'eerror' \ -e 'einfo' \ -e 'ewarn' \ -e 'elog' } cd /var/db/pkg/ || exit 1 find . -mindepth 3 -type f -name '*.ebuild' |\ sort |\ while read FILE do EBUILD_INSTALLED=$(basename $FILE) PACKAGE=$(echo $EBUILD_INSTALLED | sed 's/[.]ebuild//g') CATEGORIE=$(echo $FILE | cut -f2 -d'/') REPO="$(cat "$(dirname $FILE)/repository")" REPO_PATH="/var/db/repos/gentoo/$REPO" EBUILD_PORTAGE=$(ls "$REPO_PATH"/"$CATEGORIE"/$(echo $PACKAGE | cut -f1 -d'-')*/$EBUILD_INSTALLED 2>/dev/null) [[ -f $EBUILD_PORTAGE ]] || continue cleanup $FILE > $I cleanup $EBUILD_PORTAGE > $C DIFF=$(diff $I $C 2>/dev/null) if [[ $? -eq 1 ]]; then if [[ "$1" = "-q" ]]; then echo "=$CATEGORIE/$EBUILD_INSTALLED" | sed 's/\.ebuild//g' else echo -e "$CATEGORIE/$(basename $(dirname $EBUILD_PORTAGE))\t$EBUILD_INSTALLED" fi [[ "$1" = "-v" ]] && echo -e "$DIFF\n" fi rm $I $C done
true
78a14aa498c2c2fccf8c4ccf8a99de4d4fbf50ca
Shell
79laowang/Shell_scripts
/increasing-date.sh
UTF-8
587
3.578125
4
[ "MIT" ]
permissive
#!/usr/bin/env bash # -*- coding:utf-8 -*- #------------------------------------------------------------------------------- # File Name: increasing-date.sh # Purpose: # # Author: Ke Wang # # Created: 2019-11-07 # Copyright: (c) Ke Wang 2019 # Licence: <your licence> #------------------------------------------------------------------------------- main(){ first=$1 second=$2 while [ "$first" != "$second" ]; do echo $first first=$(date -d "-1 days ago ${first}" +%Y%m%d) done } #---------------- Main Program -------------- main "$@"
true
1935ba9139e08fddac768752804dcc80076d211c
Shell
notwa/rc
/sh/hex
UTF-8
640
3.46875
3
[]
no_license
#!/usr/bin/env zsh # YES_ZSH # YES_BASH # YES_DASH # YES_ASH # though technically compatible with other shells, # extra functionality is through zsh's extended arithmetic functions. hex() { ### @- ### perform arithmetic using the shell and display the result as ### an unsigned 32-bit integer in hexadecimal. ### see also [`arith`](#arith) and [`bin`](#bin). ### ### ``` ### $ hex 0x221EA8-0x212020 ### 0000FE88 ### ``` ### ### **NOTE:** there also exists a hex(1) program provided by ### the *basez* package that i don't use. printf "%08X\n" "$(($@))" } [ -n "${preload+-}" ] || hex "$@"
true
ac3b0aa05a58af038f623d1294cfcfa4e4ef59ad
Shell
adambirse/kafka-consumer
/deploy.sh
UTF-8
1,332
3.3125
3
[ "MIT" ]
permissive
#!/bin/bash set -e extract_secrets() { echo $GCLOUD_SERVICE_KEY | base64 --decode -i > ${HOME}/gcloud-service-key.json gcloud auth activate-service-account --key-file ${HOME}/gcloud-service-key.json } prepare_yaml() { cp deployment_template.yml deployment.yml sed -i.bak "s/_PROJECT_NAME_/$PROJECT_NAME/g" deployment.yml sed -i.bak "s/_DOCKER_IMAGE_NAME_/$DOCKER_IMAGE_NAME/g" deployment.yml sed -i.bak "s/_TRAVIS_COMMIT_/$TRAVIS_COMMIT/g" deployment.yml } prepare_cloud() { gcloud --quiet config set project $PROJECT_NAME gcloud --quiet config set container/cluster $CLUSTER_NAME gcloud --quiet config set compute/zone ${CLOUDSDK_COMPUTE_ZONE} gcloud --quiet container clusters get-credentials $CLUSTER_NAME } build_and_push() { cd $TRAVIS_BUILD_DIR/docker/build_context docker build -t gcr.io/${PROJECT_NAME}/${DOCKER_IMAGE_NAME}:$TRAVIS_COMMIT . cd $TRAVIS_BUILD_DIR cd build/ gcloud docker -- push gcr.io/${PROJECT_NAME}/${DOCKER_IMAGE_NAME} cd $TRAVIS_BUILD_DIR yes | gcloud beta container images add-tag gcr.io/${PROJECT_NAME}/${DOCKER_IMAGE_NAME}:$TRAVIS_COMMIT gcr.io/${PROJECT_NAME}/${DOCKER_IMAGE_NAME}:latest } deploy() { kubectl config view kubectl config current-context kubectl apply -f deployment.yml kubectl apply -f service.yml } extract_secrets prepare_yaml prepare_cloud build_and_push deploy
true
346c961e4383d8ad14314f4a4c8c1e6f61c64326
Shell
bu-rcs/examples
/r/examples/par_domc/rjob
UTF-8
311
2.578125
3
[]
no_license
#!/bin/bash -l #Request 8 cores #$ -pe omp 8 # #Join output and error stream #$ -j y # #Name your job #$ -N doMC # # # after we requested 8 slots with omp 8 option, # the variable NSLOTS is set to 8 and we can use it to pass the number # of cores to the R script # module load R/3.2.3 Rscript domc.R $NSLOTS
true
129fb4bab4adad7f02b997d636b3969319fc9051
Shell
blueyed/dotfiles
/usr/bin/mv-and-link-back
UTF-8
1,473
4.15625
4
[ "MIT" ]
permissive
#!/usr/bin/zsh if [[ $# -lt 2 ]]; then echo "Usage: mv-and-link-back <source>... <target>" exit 1 fi target=$argv[$#] sources=($argv[1,-2]) remove_trailing_slashes() { a=$1 while [[ $a =~ '/$' ]]; do a=${a%/} done echo $a } relpath() { python -c "import os.path; print os.path.relpath('$1','${2:-$PWD}')" } # Remove trailing slashes from target target=$(remove_trailing_slashes $target) # target has to be a directory when having multiple sources if [[ $#sources > 1 ]]; then if ! [[ -d $target ]]; then echo "Error: target is not a directory. But there are multiple sources." exit 1 fi fi for source in $sources; do if [[ -L $source ]]; then echo "Warning: source is a symbolic link already, skipping: $source" continue fi source_resolved=$(readlink -f $source) # resolve e.g. "." and ".." [[ -n $source_resolved ]] && source=$source_resolved source=$(remove_trailing_slashes $source) # this might be necessary when "readlink -f" fails if [[ -d $target ]] ; then mv_to=$target/$source:t else mv_to=$target fi if [[ -e $mv_to ]] ; then echo "Error: target already exists: $mv_to" exit 2 fi # if [[ $source -ef $mv_to ]]; then # echo "Error: source and target are the same file already" # exit 1 # fi # move: cmd=(mv $source $mv_to) echo "DEBUG: $cmd" $cmd # link: link_to=$(relpath $mv_to $source:h) cmd=(ln -s $link_to $source) echo "DEBUG: $cmd" $cmd done
true
7bc8c6906d3f8acfaccc93d6276cf3635ffa4ea3
Shell
soimort/dotfiles
/Scripts/arch
UTF-8
894
3.953125
4
[ "LicenseRef-scancode-warranty-disclaimer" ]
no_license
#!/bin/sh ARCH=x86_64 exec_install() { for package_name in "$@"; do status=`curl -sI https://www.archlinux.org/packages/extra/$ARCH/$package_name/ | head -n 1 | cut -d$' ' -f2` if [ $status = 200 ]; then git_repo=packages else status=`curl -sI https://www.archlinux.org/packages/community/$ARCH/$package_name/ | head -n 1 | cut -d$' ' -f2` if [ $status = 200 ]; then git_repo=community fi fi mkdir $package_name cd $package_name git init git remote add -t packages/$package_name -f origin git://projects.archlinux.org/svntogit/$git_repo.git git checkout packages/$package_name cd trunk makepkg done } if [ $# -eq 0 ]; then echo arch exit fi case $1 in 'install' ) shift exec_install $* ;; esac
true
730070aef5f0849c323ca375c63305caa4ddf0a1
Shell
yueng-amaiwa/javascript_examples
/juan/data_types/docker_build.sh
UTF-8
1,015
3.875
4
[]
no_license
#! /bin/bash # # docker_build.sh # Copyright (C) 2021 juanwilches <juanwilches@VPN-159-23.vpn.usf.edu> # # Distributed under terms of the MIT license. # DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" #1. Create url.txt file in the home folder (host) #2. Copy url.txt to docker building context (next Dockerfile) #3. Build docker image (copying url.txt in home folder in container' filesystem) #4. echo url.txt #5 delete url.txt from home and from docker building context (next Dockerfile) echo "Begin copying file..." if [ ! -f $HOME/url.txt ] then echo http://www.amaiwa.com/ > $HOME/url.txt cp $HOME/url.txt $DIR fi echo "End copying file..." echo "Begin build image..." docker build -f Dockerfile -t ubuntu-chrome-js $DIR echo "End build image..." echo "Show contents of url.txt" if [ -f $DIR/url.txt ] then cat $DIR/url.txt fi echo "Begin deleting files..." if [ -f $HOME/url.txt ] then rm $HOME/url.txt fi if [ -f $DIR/url.txt ] then rm $DIR/url.txt fi echo "End deleting files..."
true
f56cb49df67788bcf21b035cc5d453d2c942cc6f
Shell
NoUseFreak/wait-for-it
/scripts/get.sh
UTF-8
613
3.59375
4
[ "MIT" ]
permissive
#!/usr/bin/env bash # Usage: curl https://raw.githubusercontent.com/NoUseFreak/wait-for-it/master/scripts/get.sh | bash get_latest_release() { curl --silent "https://api.github.com/repos/NoUseFreak/wait-for-it/releases/latest" | grep '"tag_name":' | sed -E 's/.*"([^"]+)".*/\1/' } download() { curl -Ls -o /usr/local/bin/wait-for-it https://github.com/NoUseFreak/wait-for-it/releases/download/$1/`uname`_wait-for-it } echo "Looking up latest release" RELEASE=$(get_latest_release) echo "Downloading package" $(download $RELEASE) echo "Making executable" sudo chmod +x /usr/local/bin/wait-for-it
true
fd6ed8ea09665e3e709d23070d6a44a9eba3ae4f
Shell
manolonte/POP
/usr_local_bin/pop_init.sh
UTF-8
1,524
3.203125
3
[]
no_license
#!/bin/sh set -x # Kernel 4.3 # 408 - LED # 409 - S1 - posición abajo # 410 - S2 - posición arriba # 411 - B1 ROJO -> RESET/SHUTDOWN # 412 - B2 NEGRO echo 408 > /sys/class/gpio/export echo out > /sys/class/gpio/gpio408/direction echo 1 > /sys/class/gpio/gpio408/value echo 409 > /sys/class/gpio/export echo in > /sys/class/gpio/gpio409/direction echo 410 > /sys/class/gpio/export echo in > /sys/class/gpio/gpio409/direction echo 411 > /sys/class/gpio/export echo in > /sys/class/gpio/gpio409/direction echo 412 > /sys/class/gpio/export echo in > /sys/class/gpio/gpio409/direction v409=`cat /sys/class/gpio/gpio409/value` v410=`cat /sys/class/gpio/gpio410/value` echo $v409 echo $v410 # 408=1 409=0 -> Mantenimiento, switch abajo # 408=1 409=1 -> Switch enmedio # 408=0 409=1 -> Switch arriba if [ "$v409" -eq 1 ] && [ "$v410" -eq 0 ] then mode=mnt elif [ "$v409" -eq 1 ] && [ "$v410" -eq 1 ] then mode=p2 elif [ "$v409" -eq 0 ] && [ "$v410" -eq 1 ] then mode=p1 fi echo $mode if [ "$mode" != "mnt" ] then echo "iface wlan0 inet manual" > /etc/network/interfaces else :> /etc/network/interfaces fi /usr/local/bin/pop_loop.sh $mode & if [ "$mode" = "p1" ] then pop_script="pop_p1.sh" fi if [ "$mode" = "p2" ] then pop_script="pop_p2.sh" fi if [ "$mode" = "mnt" ] then pop_script="pop_mnt.sh" fi date >> /var/log/pop.log echo 1 > /sys/class/gpio/gpio408/value chmod 777 /sys/class/gpio/gpio408/value /sbin/runuser -l chip /home/chip/${pop_script} >> /var/log/pop.log 2>&1 &
true
34992b748c859bca016f2ec64033048c4a4e2750
Shell
arabine/grenier
/Bash/moulinette.sh
MacCentralEurope
886
3.578125
4
[]
no_license
#!/bin/bash root_file="/cygdrive/z/Root" search () { for dir in `echo *` # ==> `echo *` lists all the files in current working directory, #+ ==> without line breaks. # ==> Similar effect to for dir in * # ==> but "dir in `echo *`" will not handle filenames with blanks. do if [ -d "$dir" ] ; then # ==> If it is a directory (-d)... if [ "$dir" == "CVS" ] ; then cp $root_file $dir echo "$dir --> fichier copi" fi if cd "$dir" ; then # ==> If can move to subdirectory... search `expr $1 + 1` # with recursion ;-) # ==> Function calls itself. cd .. fi fi done } if [ $# != 0 ] ; then cd $1 # move to indicated directory. #else # stay in current directory fi echo "Initial directory = `pwd`" search 0 exit 0
true
e7a0151be63dfd44263dd72b99a8757c8a8223d9
Shell
dominicmcginnis/scripts
/serverCheck.sh
UTF-8
15,870
3.53125
4
[]
no_license
#!/bin/bash # Configure defaults export environment="INT" export appToTest="TPO" export baseOsbUrl="https://my.osb.url.com/v2" export siteUrl="http://my.app.url/tpo" export jenkinslink="" export credentials="{ Password: 'foobar', Realm: '1234BFG', SiteURL: 'https://my.site.com', UserName: 'my.user@user.com'}" export REALM="BE11158783" validStatus=["200","201","300","301"] serviceDown="false" sessionId="" statusOutLog="" #Setup input options while test $# -gt 0; do case "$1" in -h|--help) echo "Script for executing service checks" echo " " echo "Usage of the script looks like the following:" echo " serverCheck.sh -e <env> -a <app> -ou <osbUrl> -au <appUrl> -j <jenkinsJobUrl>" echo " " echo "options:" echo "-h, --help show brief help" echo "-e, --env=ENVIRONMENT Specify Environment running against, mainly used in reporting. (Default: INT)" echo "-a, --app=APPLICATION Specify Application running against, mainly used in reporting. (Default: TPO" echo "-ou, --osbUrl=URL Specify URL for OSB service. (Default: https://my.osb.url.com/v2)" echo "-au, --appUrl=URL Specify Application URL. (Default: http://my.app.url/tpo)" echo "-j, --jenkins=JENKINS_JOB_LINK Specify the link back to the jenkins job. (Default: empty)" echo "-c, --credentials Credentials used for the check{ Password: '', Realm: '', SiteURL: '', UserName: ''}" echo "-r, --realm Realm used for the Directory Services check" exit 0 ;; -e) shift if test $# -gt 0; then export environment=$1 else echo "no env specified" exit 1 fi shift ;; --env*) export environment=`echo $1 | sed -e 's/^[^=]*=//g'` shift ;; -a) shift if test $# -gt 0; then export appToTest=$1 else echo "no app specified" exit 1 fi shift ;; --app*) export appToTest=`echo $1 | sed -e 's/^[^=]*=//g'` shift ;; -ou) shift if test $# -gt 0; then export baseOsbUrl=$1 else echo "no osb url specified" exit 1 fi shift ;; --baseOsbUrl*) export baseOsbUrl=`echo $1 | sed -e 's/^[^=]*=//g'` shift ;; -au) shift if test $# -gt 0; then export siteUrl=$1 else echo "no app URL specified" exit 1 fi shift ;; --appUrl*) export siteUrl=`echo $1 | sed -e 's/^[^=]*=//g'` shift ;; -j) shift if test $# -gt 0; then export jenkinslink=$1 else echo "no jenkins link specified" exit 1 fi shift ;; --jenkins*) export jenkinslink=`echo $1 | sed -e 's/^[^=]*=//g'` shift ;; -c) shift if test $# -gt 0; then export credentials=$1 else echo "no credentials specified" exit 1 fi shift ;; --credentials*) export credentials=`echo $1 | sed -e 's/^[^=]*=//g'` shift ;; -r) shift if test $# -gt 0; then export REALM=$1 else echo "no realm specified" exit 1 fi shift ;; --realm*) export REALM=`echo $1 | sed -e 's/^[^=]*=//g'` shift ;; *) break ;; esac done # Every config must have a getSessionId in order to establish a session for executing APIs # Take special note of the JSON in the data arrays, it is not a standard format given the parsing # engine is python, the way the Key/Val/{}/[] are quoted must be enforced. export jsonConfig=$(< <(cat <<EOF { "getSessionId" : { "name" : "Create Session", "url" : "${baseOsbUrl}/auth/temp/sessions/", "data" : "${credentials}", "headers" : [], "method" : "POST", "jsonResponsePath" : "('TPOLoginResponse' 'SecurityContext' 'SessionId')" }, "OSBLoginCheck" : { "name" : "Login Service (deprecated soon)", "url" : "${baseOsbUrl}/auth/temp/sessions/", "data" : "${credentials}", "headers" : [], "method" : "POST", "jsonResponsePath" : "" }, "TPOSiteCheck" : { "name" : "TPO Site", "url" : "${siteUrl}", "data" : "", "headers" : [], "method" : "GET", "jsonResponsePath" : "" }, "MediaSvcs_IdentitySvcsCheck" : { "name" : "Media_Identity Srvcs", "url" : "${baseOsbUrl}/mediaserver", "data" : "", "headers" : ["OperationName:SaveFile", "TokenCreator:encompass", "TokenExpiration:1484384"], "method" : "GET", "jsonResponsePath" : "" }, "getOSBTPOSessionId" : { "name" : "Create Session", "url" : "${baseOsbUrl}/auth/sessions/", "data" : "${credentials}", "headers" : [], "method" : "POST", "jsonResponsePath" : "" }, "OSBDirectoryServiceCheck" : { "name" : "Directory Service", "url" : "${baseOsbUrl}/directory/host?InstanceID=${REALM}", "data" : "", "headers" : [], "method" : "GET", "jsonResponsePath" : "" }, "PipelineServicecheck" : { "name" : "Pipeline Service", "url" : "${baseOsbUrl}/loan/pipeline/cursors", "data" : "{ }", "headers" : [], "method" : "POST", "jsonResponsePath" : "" }, "EVP_User_Setting_Check" : { "name" : "EVP User Setting API", "url" : "${baseOsbUrl}/vendor/transactions", "data" : "{ KEY: 'value', KEY2: 'value2' }", "headers" : [], "method" : "POST", "jsonResponsePath" : "" }, "EVP_Pricing_Check" : { "name" : "EVP Pricing API", "url" : "${baseOsbUrl}/vendor/transactions", "data" : "{ KEY: 'value', KEY2: [{ key: 'val', key: 'val'}], KEY3: { key: 'val' }}", "headers" : [], "method" : "POST", "jsonResponsePath" : "" } } EOF )) export chatConfig=$(< <(cat <<EOF { "INT": { "room": "ROOM_ID", "authToken": "AUTH_TOKEN" }, "PEGL": { "room": "ROOM_ID", "authToken": "AUTH_TOKEN" } } EOF )) log() { echo -e $1 >&2 } # Use python to parse our Json Config string based on the check and it's key parseJsonConfig() { export check=$1 export checkKey=$2 local value=$(echo ${jsonConfig} | python -c 'import os, sys, json; mycheck = os.getenv("check"); mycheckKey = os.getenv("checkKey"); print json.load(sys.stdin)[mycheck][mycheckKey]') echo "${value}" } # Use python to parse our Chat Config string based on the environment and it's key parseChatConfig() { export envName=$1 export envKey=$2 local value=$(echo ${chatConfig} | python -c 'import os, sys, json; myenvName = os.getenv("envName"); myenvKey = os.getenv("envKey"); print json.load(sys.stdin)[myenvName][myenvKey]') echo "${value}" } # Utilize an array to walk the required JSON path to get the value to return from the response # Using python for the json parsing getJsonResponseValue() { export myJsonVal=$1 declare -a getResponseValJsonPath=$2 for i in "${getResponseValJsonPath[@]}"; do export key=$i export myJsonVal=$(echo ${myJsonVal} | python -c 'import os, sys, json; myKey = os.getenv("key"); print json.dumps(json.load(sys.stdin)[myKey])') done echo ${myJsonVal} } # Execute the health check runHealthCheck() { local check=$1 # Establish all of the curl params from the check config json object local url=$(parseJsonConfig ${check} "url") local method=$(parseJsonConfig ${check} "method") local dataArgs=$(parseJsonConfig ${check} "data") local headers="$(parseJsonConfig ${check} "headers")" local getResponseValJsonPath=$(parseJsonConfig ${check} "jsonResponsePath") local getHttpCode="" local curlCommand="curl --connect-timeout 30 --max-time 60 -s -X ${method}" # Ignore the response body, just capture the HTTP CODE if [[ "${getResponseValJsonPath}" == "" ]]; then getHttpCode="-i -o /dev/null -w %{http_code}" fi if [[ "${headers}" == "[]" ]]; then headers=$(echo -e -H 'Content-Type:application/json' -H 'elli-session:'${sessionId} | perl -pe 's/"//g') else # The perl script here attempts to format the python "dict" value from the json object into bash friendly stirngs for CURL headers=$(echo -e -H 'Content-Type:application/json' -H 'elli-session:'${sessionId} -H ${headers} | perl -pe 's/\[u//; s/\]//; s/, u/ -H /g; s/'\''//g; s/ -H / -H /g; s/: /:/g; s/"//g') fi # execute the curl command capturing the response local response="" if [[ "${dataArgs}" == "" ]]; then curlCommand=$(echo -e ${curlCommand} ${getHttpCode} ${headers} \"${url}\") log "Executing curl command: ${curlCommand}" response=$(curl --connect-timeout 30 --max-time 60 -s -X ${method} ${getHttpCode} ${headers} "${url}") else curlCommand=$(echo -e ${curlCommand} ${getHttpCode} ${headers} \"${url}\" -d \"${dataArgs}\") log "Executing curl command: ${curlCommand}" response=$(curl --connect-timeout 30 --max-time 60 -s -X ${method} ${getHttpCode} ${headers} "${url}" -d "${dataArgs}") fi # If our response is not being ignored, then we need to parse it for our value if [[ ! "${getResponseValJsonPath}" == "" ]]; then response=$(getJsonResponseValue ${response} "${getResponseValJsonPath}") fi # echo out the response value or http code for capture echo ${response} # Return an exit code indicating check pass/fail if [[ "${getResponseValJsonPath}" == "" ]]; then local valid=0 if [[ "${validStatus[@]}" =~ "${response}" ]]; then valid=1 fi return ${valid} fi } ## Begin Execute Checks executeChecks() { # Get the checks to execute as an arry (the perl command will format the python dict json value into a bash array variable) declare -a checks=$(echo ${jsonConfig} | python -c 'import sys, json; print json.load(sys.stdin).keys()' | perl -pe 's/\[u/(/; s/\]/)/; s/, u/ /g') log "Getting SessionId..." # Get a valid session export sessionId=$(runHealthCheck "getSessionId") if [[ "${sessionId}" == "" ]]; then log "Get session failed!" serviceDown="true" fi # Get the count of checks, not including getSessionId to be used in the TAP results checkTotal=`expr ${#checks[@]} - 1` $(echo "1..${checkTotal}" > serviceChecks.tap) # Loop through the checks building our TAP results and Console Output strings outputLog="" count=0 for i in "${checks[@]}"; do export check=$i if [[ ! "${check}" == "getSessionId" ]]; then count=`expr ${count} + 1` log "Running check: ${check}..." local checkName=$(parseJsonConfig ${check} "name") checkReturnVal=$(runHealthCheck "${check}") if [[ $? -eq 0 ]]; then log "${check} failed!" serviceDown="true" $(echo "${tapResults}not ok ${count} - ${checkName} failed: ${checkReturnVal}" >> serviceChecks.tap) outputLog="${outputLog}${checkName} response code: ${checkReturnVal} \n" else log "${check} passed." $(echo "${tapResults}ok ${count} - ${checkName} passed: ${checkReturnVal}" >> serviceChecks.tap) fi statusOutLog="${statusOutLog}${checkName} response code: ${checkReturnVal} \n" fi done echo ${outputLog} if [[ "${serviceDown}" == "true" ]]; then return 0 else return 1 fi } # Send out email notifications sendEmailNotification() { local output=$1 mail -s "A Service for ${appToTest} on ${environment} is down" foo.bar@company.com <<< ${output} } sendHipChatNotification() { local MESSAGE=$1 local ROOM_ID=$(parseChatConfig ${environment} "room") local AUTH_TOKEN=$(parseChatConfig ${environment} "authToken") local dataArgs=$(echo -e "{\"color\": \"red\", \"message_format\": \"text\", \"message\": \"${MESSAGE}\"}") log "Sending ServiceChecks hipchat notification..." log 'curl --connect-timeout 30 --max-time 60 -s -H "Content-Type: application/json" \ -X POST \ -d' "${dataArgs}"' \ https://api.hipchat.com/v2/room/2704310/notification?auth_token=ig5J9PFybM0C9TgRa8hxRh2Q6ASt5t1aly19JRD9 ' #Send to ServiceChecks room curl --connect-timeout 30 --max-time 60 -s -H "Content-Type: application/json" \ -X POST \ -d "${dataArgs}" \ https://api.hipchat.com/v2/room/2704310/notification?auth_token=Z1z8slcRlKROWOvNxzo9xK5gHLmeKWHlbCF61Ea7 log "Sending ServiceChecks hipchat notification..." log 'curl --connect-timeout 30 --max-time 60 -s -H "Content-Type: application/json" \ -X POST \ -d' "${dataArgs}"' \ https://api.hipchat.com/v2/room/${ROOM_ID}/notification?auth_token=${AUTH_TOKEN} ' #Send to AppToTest specified room curl --connect-timeout 30 --max-time 60 -s -H "Content-Type: application/json" \ -X POST \ -d "${dataArgs}" \ https://api.hipchat.com/v2/room/${ROOM_ID}/notification?auth_token=${AUTH_TOKEN} } sendTestHipChatNotification() { local MESSAGE=$1 local dataArgs=$(echo -e "{\"color\": \"red\", \"message_format\": \"text\", \"message\": \"${MESSAGE}\"}") log "Sending test hipchat notification..." log 'curl --connect-timeout 30 --max-time 60 -s -H "Content-Type: application/json" \ -X POST \ -d' "${dataArgs}"' \ https://api.hipchat.com/v2/room/<TEST_ROOM>/notification?auth_token=<AUTH_TOKEN> ' #Send to Test room curl --connect-timeout 30 --max-time 60 -s -H "Content-Type: application/json" \ -X POST \ -d "${dataArgs}" \ https://api.hipchat.com/v2/room/<TEST_ROOM>/notification?auth_token=<AUTH_TOKEN> } # Run the body of the script returnVal=0 output=$(executeChecks) if [[ $? -eq 0 ]]; then MESSAGE="$(echo -e "@all A Service for ${appToTest} on ${environment} is down! ${output}" | perl -pe "s/ \n/; /g; s/u'//g; s/\r//g; s/\"//g; s/}/ /g; s/{/ /g; ")" if [[ "${buildNumber}" != "" ]]; then MESSAGE="${MESSAGE} CheckLink: ${jenkinslink}" fi echo -e "${MESSAGE}" # Disabled as box running on doesn't support "mail" #sendEmailNotification "${output}" if [[ "${environment}" == "TEST" ]]; then sendTestHipChatNotification "${MESSAGE}" else sendHipChatNotification "${MESSAGE}" fi returnVal=-1 else log "${statusOutLog}" fi exit ${returnVal}
true
1c0ea05f7eec21c009142ec899bfb3589f35774c
Shell
denischanc/soaf.sh
/src/module.sh
UTF-8
5,612
3.109375
3
[]
no_license
################################################################################ ################################################################################ readonly SOAF_MODULE_VERSION_ATTR="soaf_module_version" readonly SOAF_MODULE_STATIC_FN_ATTR="soaf_module_static_fn" readonly SOAF_MODULE_CFG_FN_ATTR="soaf_module_cfg_fn" readonly SOAF_MODULE_INIT_FN_ATTR="soaf_module_init_fn" readonly SOAF_MODULE_PREPENV_FN_ATTR="soaf_module_prepenv_fn" readonly SOAF_MODULE_PRE_ACTION_FN_ATTR="soaf_module_pre_action_fn" readonly SOAF_MODULE_POST_ACTION_FN_ATTR="soaf_module_post_action_fn" readonly SOAF_MODULE_EXIT_FN_ATTR="soaf_module_exit_fn" readonly SOAF_MODULE_DEP_LIST_ATTR="soaf_module_dep_list" readonly SOAF_MODULE_DEP_STATE_ATTR="soaf_module_dep_state" readonly SOAF_MODULE_DEP_UNKNOWN_S="UNKNOWN" readonly SOAF_MODULE_DEP_INPROG_S="INPROG" readonly SOAF_MODULE_DEP_OK_S="OK" ################################################################################ ################################################################################ soaf_create_module() { local NAME=$1 local VERSION=$2 local STATIC_FN=$3 local CFG_FN=$4 local INIT_FN=$5 local PREPENV_FN=$6 local PRE_ACTION_FN=$7 local POST_ACTION_FN=$8 local EXIT_FN=$9 local DEP_LIST=${10} local POS=${11} [ "${NAME#soaf.core.}" = "$NAME" ] && POS= soaf_pmp_list_fill "$POS" SOAF_MODULE_LIST $NAME soaf_map_extend $NAME $SOAF_MODULE_VERSION_ATTR $VERSION soaf_map_extend $NAME $SOAF_MODULE_STATIC_FN_ATTR $STATIC_FN soaf_map_extend $NAME $SOAF_MODULE_CFG_FN_ATTR $CFG_FN soaf_map_extend $NAME $SOAF_MODULE_INIT_FN_ATTR $INIT_FN soaf_map_extend $NAME $SOAF_MODULE_PREPENV_FN_ATTR $PREPENV_FN soaf_map_extend $NAME $SOAF_MODULE_PRE_ACTION_FN_ATTR $PRE_ACTION_FN soaf_map_extend $NAME $SOAF_MODULE_POST_ACTION_FN_ATTR $POST_ACTION_FN soaf_map_extend $NAME $SOAF_MODULE_EXIT_FN_ATTR $EXIT_FN soaf_map_extend $NAME $SOAF_MODULE_DEP_LIST_ATTR "$DEP_LIST" soaf_map_extend $NAME $SOAF_MODULE_DEP_STATE_ATTR \ $SOAF_MODULE_DEP_UNKNOWN_S } ################################################################################ ################################################################################ soaf_module_resolve_dep_module_() { local MODULE=$1 local DEP_LIST_MSG=$2 local ERR_MSG= soaf_list_found "$SOAF_MODULE_LIST_" $MODULE if [ -z "$SOAF_RET_LIST" ] then local ERR_MSG="Module not found : [$MODULE]." else DEP_LIST_MSG+=" -> [$MODULE]" soaf_map_get $MODULE $SOAF_MODULE_DEP_STATE_ATTR local DEP_STATE=$SOAF_RET if [ "$DEP_STATE" = "$SOAF_MODULE_DEP_INPROG_S" ] then local ERR_MSG="Module dependance deadlock : $DEP_LIST_MSG." elif [ "$DEP_STATE" != "$SOAF_MODULE_DEP_OK_S" ] then soaf_map_extend $MODULE $SOAF_MODULE_DEP_STATE_ATTR \ $SOAF_MODULE_DEP_INPROG_S soaf_map_get $MODULE $SOAF_MODULE_DEP_LIST_ATTR local dep_module for dep_module in $SOAF_RET do soaf_module_resolve_dep_module_ $dep_module "$DEP_LIST_MSG" done SOAF_MODULE_SORT_LIST+=" $MODULE" SOAF_MODULE_SORT_R_LIST="$MODULE $SOAF_MODULE_SORT_R_LIST" soaf_map_extend $MODULE $SOAF_MODULE_DEP_STATE_ATTR \ $SOAF_MODULE_DEP_OK_S fi fi [ -n "$ERR_MSG" ] && soaf_engine_exit_dev "$ERR_MSG" } soaf_module_resolve_dep() { soaf_pmp_list_cat SOAF_MODULE_LIST SOAF_MODULE_LIST_=$SOAF_RET_LIST local module for module in $SOAF_MODULE_LIST_ do soaf_module_resolve_dep_module_ $module done } ################################################################################ ################################################################################ soaf_module_version() { local MODULE_NAME=$1 soaf_console_msg_ctl $MODULE_NAME "$SOAF_THEME_VAL_CTL_LIST" local TXT="$SOAF_CONSOLE_RET-" soaf_map_get $MODULE_NAME $SOAF_MODULE_VERSION_ATTR soaf_console_msg_ctl "$SOAF_RET" "$SOAF_THEME_VER_CTL_LIST" soaf_dis_txt "$TXT$SOAF_CONSOLE_RET" } ################################################################################ ################################################################################ soaf_module_call_fn_() { local MODULE_NAME=$1 local FN=$2 local VA_NATURE=$3 if [ -n "$FN" ] then if [ -n "$VA_NATURE" ] then soaf_varargs_fn_apply $VA_NATURE $FN $MODULE_NAME else $FN $MODULE_NAME fi fi } ################################################################################ ################################################################################ soaf_module_apply_fn_() { local MODULE_LIST=$1 local FN=$2 local VA_NATURE=$3 local module for module in $MODULE_LIST do soaf_module_call_fn_ $module $FN $VA_NATURE done } soaf_module_apply_all_fn() { local FN=$1 local VA_NATURE=$2 soaf_module_apply_fn_ "$SOAF_MODULE_SORT_LIST" $FN $VA_NATURE } soaf_module_apply_all_reverse_fn() { local FN=$1 local VA_NATURE=$2 soaf_module_apply_fn_ "$SOAF_MODULE_SORT_R_LIST" $FN $VA_NATURE } ################################################################################ ################################################################################ soaf_module_apply_fn_attr_() { local MODULE_LIST=$1 local FN_ATTR=$2 local VA_NATURE=$3 local module for module in $MODULE_LIST do soaf_map_get $module $FN_ATTR local FN=$SOAF_RET [ -n "$FN" ] && soaf_module_call_fn_ $module $FN $VA_NATURE done } soaf_module_apply_all_fn_attr() { local FN_ATTR=$1 local VA_NATURE=$2 soaf_module_apply_fn_attr_ "$SOAF_MODULE_SORT_LIST" $FN_ATTR $VA_NATURE } soaf_module_apply_all_reverse_fn_attr() { local FN_ATTR=$1 local VA_NATURE=$2 soaf_module_apply_fn_attr_ "$SOAF_MODULE_SORT_R_LIST" $FN_ATTR $VA_NATURE }
true
63ae209235801ff594f1228615b009eb78440f11
Shell
diagprov/retrowrite
/fuzzing/kernel/vms_files/run_qemu_cov.sh
UTF-8
571
2.609375
3
[ "MIT" ]
permissive
#!/bin/bash # this script run the syzskaller vms, no needs to start it by hands, it is ran from measure_coverage.sh (via run_cov.expect) set -euo pipefail qemu-system-x86_64 \ -kernel "$1/arch/x86/boot/bzImage" \ -append "console=ttyS0 rw debug root=/dev/sda debug earlyprintk=serial slub_debug=QUZ" \ -hda "$2" \ -snapshot \ -virtfs "local,path=$3/input,security_model=none,mount_tag=input,readonly" \ -virtfs "local,path=$3/coverage,security_model=none,mount_tag=output" \ -enable-kvm \ -nographic \ -cpu host \ -m 2G \ -smp 2 \ 2>&1 | tee cover_log.txt
true
fbdf2061bf54938f4ff901fb01edcc52eb6e0fa5
Shell
lindleyw/juno
/juno
UTF-8
6,025
3.828125
4
[ "BSD-3-Clause" ]
permissive
#!/bin/bash # Copyright (c) 2010-16, Mitchell Cooper DIR=`dirname "$(cd "${0%/*}" 2>/dev/null; echo "$PWD"/"${0##*/}")"` showusage() { cat << EOF usage: $0 [action] start start juno IRCd forcestart attempt to start juno under any circumstances stop terminate juno IRCd debug start in NOFORK mode with printed output forever run continuously foreverd run continuously in debug mode rehash rehash the server configuration file mkpasswd runs the password generator dev various developer actions ($0 dev help) help print this information EOF } showdevusage() { cat << EOF usage: $0 dev [action] makemodule create a .module and .pm pullsubs run git pull for all submodules runlocal run in debug mode using local submodules EOF } runningq() { if [ -e "$DIR/etc/juno.pid" ]; then PID=`cat $DIR/etc/juno.pid` if kill -0 $PID; then RUNNING=true fi fi } splash() { VER=$(cat VERSION) echo " ########################################################################## # _ _ _ # ( ) (_) | | # _ _ _ _ __ ___ _ _ __ ___ __| | # | | | | | | | '_ \ / _ \ | | | '__/ / __| / _\` | # | | | |_| | | | | | | (_) | == | | | | | (__ | (_| | # | | \__,_| |_| |_| \___/ |_| |_| \___| \__,_| # _/ | # |__/ version $VER https://github.com/cooper/juno # ########################################################################## " } check_if_running() { if [ ! "$RUNNING" ]; then echo "juno is not running!" exit 1 fi } dostart() { echo "Starting juno" splash $DIR/bin/ircd $DIR } start() { if [ "$RUNNING" ]; then echo "juno is already running!" exit 1 fi dostart } forcestart() { if [ -e "$DIR/etc/juno.pid" ]; then rm -v $DIR/etc/juno.pid fi dostart } debug() { if [ "$RUNNING" ]; then echo "juno is already running!" exit 1 fi echo "Running in NOFORK mode." splash $DIR/bin/ircd $DIR NOFORK & pid=$! trap ' echo "Signaling $pid TERM"; kill -TERM $pid' INT wait $pid } forever() { if [ "$RUNNING" ]; then echo "juno is already running!" exit 1 fi splash $DIR/bin/ircd_forever $DIR & pid=$! trap ' echo "Signaling $pid TERM"; kill -TERM $pid' INT wait $pid } forever_nofork() { if [ "$RUNNING" ]; then echo "juno is already running!" exit 1 fi echo "Running in NOFORK mode." splash $DIR/bin/ircd_forever $DIR NOFORK & pid=$! trap ' echo "Signaling $pid TERM"; kill -TERM $pid' INT wait $pid } stop() { check_if_running echo "Signaling $PID TERM" kill -TERM $PID } main() { case "$1" in start) start ;; forever) forever ;; foreverd) forever_nofork ;; debug) debug ;; forcestart) forcestart ;; rehash) check_if_running echo "Rehashing server" echo rehash > $DIR/etc/HUP kill -HUP $PID ;; stop) stop ;; restart) stop #sleep .01 forcestart ;; reset) rm -v $DIR/etc/juno.pid ;; mkpasswd) $DIR/bin/mkpasswd ;; genssl) SSLDIR=$DIR/etc/ssl mkdir -p $SSLDIR openssl req -x509 -nodes -newkey rsa:2048 -keyout $SSLDIR/key.pem -out $SSLDIR/cert.pem echo "Use the 'sslport' key in listener configuration blocks" echo "Enjoy using SSL" ;; lines) $DIR/bin/lines ;; dev) case "$2" in pullsubs) oldpwd=$(pwd) for MOD in $(ls $DIR/lib) do echo "=> Updating $MOD" cd $DIR/lib/$MOD git pull -q origin master cd $oldpwd done echo "!!! Don't forget to update dependency versions in ircd.pm" ;; runlocal) perl -I$DIR/../evented-object/lib -I$DIR/../evented-configuration/lib \ -I$DIR/../evented-database/lib -I$DIR/../evented-api-engine/lib \ -MEvented::Object -MEvented::Configuration -MEvented::Database \ -MEvented::API::Engine \ $DIR/bin/ircd $DIR NOFORK ;; commit) # get commit message file=$DIR/commit.tmp editor=${EDITOR:-nano} $editor $file message=$(cat $file) rm $file # increment version oldversion=$(cat $DIR/VERSION) newversion=$(echo print $oldversion + 0.01 | perl) echo "VERSION: $oldversion -> $newversion" echo $newversion > $DIR/VERSION # add to changelog printf "\n$newversion: $message\n" >> $DIR/INDEV # commit git add --all . git commit -m "$newversion: $message" ;; *) showdevusage ;; esac ;; *) showusage ;; esac } if [ ! "$1" ]; then showusage exit fi runningq main $@
true
1ba76787567e4e2708e90f648c16b8ee9d46e545
Shell
qileilove/capybaraDemo-1
/dockerRun.sh
UTF-8
3,579
4.09375
4
[]
no_license
#!/bin/bash # # Runs the docker container in which cucumber executes. # See usage statement below for more details # # NOTE: To pass options to cucumber, you must set the CUCUMBER_OPTS # environment variable. For example, # $ CUCUMBER_OPTS="--name MyFeature" ./dockerRun.sh # # # Set defaults # debug=false runAsRoot=false DRIVER_NAME=selenium TIMEOUT=10 # # The features/steps in this example assumes github as the default application if [ -z "${APPLICATION_URL-}" ]; then APPLICATION_URL="http://github.com" fi while (( "$#" )); do if [ "$1" == "-u" ]; then APPLICATION_USERID="${2}" shift 2 elif [ "$1" == "-p" ]; then APPLICATION_PASSWORD="${2}" shift 2 elif [ "$1" == "-d" ]; then DRIVER_NAME="${2}" shift 2 elif [ "$1" == "-t" ]; then TIMEOUT="${2}" shift 2 elif [ "$1" == "--root" ]; then runAsRoot=true shift 1 elif [ "$1" == "-i" ]; then debug=true shift 1 else if [ "$1" != "-h" ]; then echo "ERROR: invalid argument '$1'" fi echo "USAGE: dockerRun.sh [-u userid] [-p password] [-d driverName] [-t timeout]" echo " [--root] [-i] [-h]" echo "" echo "where" echo " -u userid a valid github user id (required)" echo " -p password the password for userid (required)" echo " -d driverName identifies the Capybara driver to use" echo " (e.g. selenium, selenium_chrome or poltergeist)" echo " -t timeout identifies the Capybara timeout to use (in seconds)" echo " --root run the tests as root in the docker container" echo " -i interactive mode. Starts a bash shell with all of the same" echo " env vars but doesn't run anything" echo " -h print this usage statement and exit" exit 1 fi done if [ -z "${APPLICATION_USERID-}" ]; then echo "ERROR: userid undefined. You must either set the environment variable" echo " APPLICATION_USERID, or specify it with the -u command line arg" exit 1 fi if [ -z "${APPLICATION_PASSWORD-}" ]; then echo "ERROR: password undefined. You must either set the environment variable" echo " APPLICATION_PASSWORD, or specify it with the -p command line arg" exit 1 fi # # Get the current UID and GID. These are passed into the container for use in # creating a container-local user account so ownership of files created in the # container will match the user in the host OS. # CALLER_UID=`id -u` CALLER_GID=`id -g` if [ "$debug" == true ]; then INTERACTIVE_OPTION="-i" CMD="bash" elif [ "$runAsRoot" == true ]; then CMD="runCucumber.sh --root ${CUCUMBER_OPTS}" elif [ `uname -s` == "Darwin" ]; then echo "ERROR: missing required argument '--root' for Mac OS X" exit 1 else CMD="runCucumber.sh ${CUCUMBER_OPTS}" fi docker run --rm --name capybara_demo \ -v /tmp/.X11-unix:/tmp/.X11-unix:ro \ -v /etc/timezone:/etc/timezone:ro \ -v /etc/localtime:/etc/localtime:ro \ -v `pwd`/demo:/capybara:rw \ -e CALLER_UID=${CALLER_UID} \ -e CALLER_GID=${CALLER_GID} \ -e CAPYBARA_DRIVER=${DRIVER_NAME} \ -e CAPYBARA_TIMEOUT=${TIMEOUT} \ -e APPLICATION_URL=${APPLICATION_URL} \ -e APPLICATION_USERID=${APPLICATION_USERID} \ -e APPLICATION_PASSWORD=${APPLICATION_PASSWORD} \ ${INTERACTIVE_OPTION} \ -t capybara_demo \ ${CMD}
true
93fde5a286c8d21b44632acd9d8fc6611bd3e20b
Shell
majinhui04/igrow-cli
/git-log.sh
UTF-8
14,363
4
4
[ "MIT" ]
permissive
#!/bin/bash # commit 类型 键 TYPE_MAP=(feat fix refactor style docs chore build ci pref test) # commit 类型 值 TYPE_TITLE_MAP=(新增 修改 重构 样式 文档 其他 构建 持续集成 优化 测试) # 当前日期+时间 NOW=$(date "+%F %H:%M") # 起始日期 SINCE="last.Monday" # 终止日期 UNTIL=$(date +%F) # 是否覆盖文件,默认否 FOUCE=0 # 首行是否为生成日期,默认否 PRINT_TIME=0 # 输出目录 OUTPUT_DIR="log" # 是否已设置作者, 默认否 HAS_SET_AUTHOR=0 # function,去除字符串两头空格 trim() { trimmed=$1 # https://blog.csdn.net/dongwuming/article/details/50605911 # 从变量$string的结尾, 删除最长匹配$substring的子串 trimmed=${trimmed%% } # 从变量$string的开头, 删除最长匹配$substring的子串 trimmed=${trimmed## } # TODO 首部空格无法全部移除 trimmed=${trimmed## } echo $trimmed # 移除全部空格 # echo ${trimmed// /} } # function,是否应该强制生成文件 shouldFouceResolve() { # 输出文件已存在 & 不强制生成 则提示并退出 if [ -e $OUTPUT -a $FOUCE -eq 0 ]; then echo "${OUTPUT} already exists! \n Please update package.json or remove log file or add '-f' follow the command!\n " exit 1 fi } # function,首行是否应该输出生成日期 shouldPrintTime() { if [ $PRINT_TIME -eq 1 ]; then echo "> Generated on ${NOW} By [Gen-Git-Log](https://www.npmjs.com/package/gen-git-log)\n" case $MODE in branch) # 分支模式 TITLE="${TARGET}...${SOURCE}" ;; tag) # 标签模式 TITLE="${TARGET}...v$(getVersion)" ;; esac if [ ! -z $TITLE ]; then # 如果TITLE赋值则组装输出 TITLE="## [${TITLE}](${REMOTE}/compare/${TITLE})" echo $TITLE fi fi } # function,获取版本 getVersion() { # 判断是否自定义版本 if [ -z $VERSION ]; then # 未传入版本 if [ -z $REPO ]; then # 没有传repo则package文件为当前目录所有 PKG_PATH="./package.json" else PKG_PATH="${REPO}/package.json" fi # 自定义version为空时抓取版本 while read line; do # 抓取定义version行文本 if [[ ${line} == *"version"* ]]; then # 移除双引号 VERSION=${line//\"/ } # 移除键名 VERSION=${VERSION##*"version :"} # 获取版本 VERSION=${VERSION%%" ,"} break fi done <$PKG_PATH fi trim $VERSION } # function,生成输出文件路径 generateOutPutPath() { # 输出文件路径,默认“v版本.md” echo "${OUTPUT_DIR}/v$(getVersion).md" } # 生成指定SOURCE TARGET tag差异记录 genSingleTagLog() { SOU=$1 TAR=$2 if [ ! "$3"x = "0"x ]; then if [ $SOU = HEAD ]; then # 如果是与最新HEAD对比则将HEAD设为version TIT="## [v$(getVersion)](${REMOTE}/compare/${TAR}...v$(getVersion))" else TIT="## [${SOU}](${REMOTE}/compare/${TAR}...${SOU})" fi echo echo echo "${TIT} ($(git -C "${REPO}" log -1 --format=%ad --date=short $SOU))" fi # 直接使用分支比对查找所有匹配 log GIT_PAGER=$(git -C "${REPO}" log --no-merges --reverse --format="${LOG_FORMAT}" "$SOU...$TAR") if [ ! -z "$GIT_PAGER" ]; then # 字符串分隔符 IFS="*" # 分割字符串为数组 singleTagArr=($GIT_PAGER) # 还原分割符,否则会导致if判断失效 IFS="" # 循环处理数组 for s in ${singleTagArr[@]}; do # 去除字符串两头空格 s=$(trim $s) # 判断字符串非空 if [ ! -z $s ]; then # 替换全角冒号 s=${s/:/:} # 循环commit 类型 for type in ${TYPE_MAP[@]}; do # 组织正则 reg="${type}:" # 判断commit类型 if [[ ${s} == *"${reg}"* ]]; then # 裁剪字符串 s=${s##*${reg}} s=${s%@*} # 移除空格 s=$(trim $s) # 动态数组变量赋值 eval COMMIT_${type}='(${COMMIT_'${type}'[*]} $s)' break fi done fi done # 处理数据 typeIndex=0 for type in ${TYPE_MAP[@]}; do # 拷贝数组 eval type='(${COMMIT_'${type}'[*]})' # 判断数组是否含有元素 if [ ${#type[*]} != 0 ]; then echo "#### ${TYPE_TITLE_MAP[$typeIndex]}" for i in ${type[@]}; do echo "* ${i}" done echo fi let typeIndex++ done fi } # 生成tag genTagLog() { LOG_FORMAT="$FORMAT_DEFAULT @%ae" OUTPUT=$(generateOutPutPath) shouldFouceResolve # 获取最新标签 LASTEST_TAG=$(git -C "${REPO}" describe --tags $(git -C "${REPO}" rev-list --tags --max-count=1)) if [ -z $SOURCE ]; then SOURCE="HEAD" fi if [ -z $TARGET ]; then TARGET=$LASTEST_TAG fi # 如果传参为 copy 则将结果输出至剪切板 if [ "$1"x = "copy"x ]; then genSingleTagLog $SOURCE $TARGET 0 | pbcopy else ( shouldPrintTime genSingleTagLog $SOURCE $TARGET 0 ) >$OUTPUT fi } # 传参覆盖默认值 while getopts "m:a:s:u:S:T:r:v:ftd:h" arg; do case $arg in m) # 模式 MODE=$OPTARG ;; a) # 作者 AUTHOR=$OPTARG # 已设置作者 HAS_SET_AUTHOR=1 ;; s) # 起始日期 SINCE=$OPTARG ;; u) # 终止日期 UNTIL=$OPTARG ;; S) # 源分支/标签 SOURCE=$OPTARG ;; T) # 目标分支/标签 TARGET=$OPTARG ;; r) # Git 仓库本地路径 REPO=$OPTARG ;; v) # 自定义版本 VERSION=$OPTARG ;; f) # 强制覆盖文件 FOUCE=1 ;; t) # 首行输出生成日期 PRINT_TIME=1 ;; d) # 输出目录路径 OUTPUT_DIR=$OPTARG ;; h) echo " Usage:\n git-log [options]\n Options:\n -m 生成模式 默认:无(周报),可选:branch(分支比对)、tag(标签比对)、changelog(汇总日志) -a 想要过滤的作者 默认:$(git config user.name) -s 起始日期 默认:上周一,格式:2018-01-01 -u 终止日期 默认:当天,格式:2018-01-01 -S 源分支/标签 默认:无,比对模式:当前分支/最近标签 -T 目标分支/标签 默认:无,比对模式:当前分支/当前HEAD -r Git 仓库本地路径 默认:当前目录 -v 版本号 默认:无,比对模式:仓库路径下 package.json 中 VERSION 字段值 -f 覆盖文件 默认:否,不需要传值 -t log 首行为生成日期 默认:否,不需要传值 -d log 输出目录 默认:仓库路径下 log 文件夹 " exit 2 ;; ?) echo "unknown argument" exit 3 ;; esac done # 判断是否需要设置默认作者 if [ $HAS_SET_AUTHOR -eq 0 ]; then # 没有设置过则使用默认作者 AUTHOR=$(git -C "${REPO}" config user.name) fi # 获取远程仓库地址 REMOTE=$(git -C "${REPO}" remote -v) REMOTE=${REMOTE#*git@} if [ ${REMOTE:0:6}x = "origin"x ]; then # 如果是origin开头认为是http(s)协议checkout下来的,否则是ssh REMOTE=${REMOTE#*origin} REMOTE=${REMOTE%%.git *} else REMOTE=${REMOTE%%.git *} REMOTE="http://${REMOTE/://}" fi REMOTE=$(trim $REMOTE) # 格式化 https://ruby-china.org/topics/939 # %H: commit hash # %h: 短commit hash # %an: 提交人名字 # %ae: 提交人邮箱 # %cr: 提交日期, 相对格式(1 day ago) # %d: ref名称 # %s: commit信息标题 # %cd: 提交日期 (--date= 制定的格式) FORMAT_DEFAULT=" * %s ([%h](${REMOTE}/commit/%H)) " # 判断是否指定仓库路径重写输出目录路径 if [ -z $REPO ]; then OUTPUT_DIR="./${OUTPUT_DIR}" else OUTPUT_DIR="${REPO}/${OUTPUT_DIR}" fi # 指定目录路径不存在则创建 if [ ! -e $OUTPUT_DIR ]; then mkdir $OUTPUT_DIR fi # 判断提交人,设定输出路径 if [ -z $AUTHOR ]; then # 提交人为空 LOG_FORMAT="$FORMAT_DEFAULT <%an>" # 输出文件路径,默认“当前日期.md” OUTPUT="${OUTPUT_DIR}/$(date +%F).md" else # 有提交人 LOG_FORMAT="$FORMAT_DEFAULT - %cr" # 输出文件路径,默认“提交人名.md” OUTPUT="${OUTPUT_DIR}/${AUTHOR}.md" fi # 默认分支为当前分支 CURRENT_BRANCH=$(git -C "${REPO}" rev-parse --abbrev-ref HEAD) case $MODE in branch) LOG_FORMAT="$FORMAT_DEFAULT @%ae" OUTPUT=$(generateOutPutPath) shouldFouceResolve if [ -z $SOURCE ]; then SOURCE=$CURRENT_BRANCH fi if [ -z $TARGET ]; then TARGET=$CURRENT_BRANCH fi # 直接使用分支比对查找所有匹配 log GIT_PAGER=$(git -C "${REPO}" log "$SOURCE...$TARGET" --no-merges --reverse --format="${LOG_FORMAT}") ( shouldPrintTime if [ ! -z "$GIT_PAGER" ]; then # 字符串分隔符 IFS="*" # 分割字符串为数组 arr=($GIT_PAGER) # 还原分割符,否则会导致if判断失效 IFS="" # 循环处理数组 for s in ${arr[@]}; do # 去除字符串两头空格 s=$(trim $s) # 判断字符串非空 if [ ! -z $s ]; then # 替换全角冒号 s=${s/:/:} # 循环commit 类型 for type in ${TYPE_MAP[@]}; do # 组织正则 reg="${type}:" # 判断commit类型 if [[ ${s} == *"${reg}"* ]]; then # 裁剪字符串 s=${s##*${reg}} s=${s%@*} # 移除空格 s=$(trim $s) # 动态数组变量赋值 eval COMMIT_${type}='(${COMMIT_'${type}'[*]} $s)' break fi done fi done # 处理数据 typeIndex=0 for type in ${TYPE_MAP[@]}; do # 拷贝数组 eval type='(${COMMIT_'${type}'[*]})' # 判断数组是否含有元素 if [ ${#type[*]} != 0 ]; then echo "#### ${TYPE_TITLE_MAP[$typeIndex]}" for i in ${type[@]}; do echo "* ${i}" done echo fi let typeIndex++ done else echo "${SOURCE}...${TARGET} 分支无差异" fi ) >$OUTPUT ;; tag) genTagLog ;; changelog) if [ -z $REPO ]; then OUTPUT="./CHANGELOG.md" else OUTPUT="${REPO}/CHANGELOG.md" fi LOG_FORMAT="$FORMAT_DEFAULT @%ae" shouldFouceResolve # 标签列表 TAG_LIST=$(git -C "${REPO}" tag) # 字符串分隔符 IFS="v" # 分割字符串为数组 arr=($TAG_LIST) # 还原分割符,否则会导致if判断失效 IFS="" # 第一次提交commit, 取duan hash LAST_TAG=$(git -C "${REPO}" rev-list --reverse HEAD | head -1) LAST_TAG=${LAST_TAG:0:6} FIRST_TAG="HEAD" len=$((${#arr[@]} - 1)) ( # 循环处理数组 for ((i = $len; i > 0; i--)); do s=${arr[$i]} # 去除字符串两头空格 s=v$(trim $s) # 判断字符串非空 if [ ! -z $s ]; then # 不知道为什么一定要缓存一下,否则$s值错乱 TEMP=$s echo $(genSingleTagLog $FIRST_TAG $s) FIRST_TAG=$TEMP fi done genSingleTagLog $FIRST_TAG $LAST_TAG ) >$OUTPUT ;; publish) # 发布模式 # 站在要发布分支上 # 同步代码 # 修改version # 执行该命令 # 根据当前版本号生成log文件 # 未传入版本 #genTagLog # 添加所有文件到暂存区 #git -C "${REPO}" add --all # 生成commit #git -C "${REPO}" commit -m "chore: publish version $(getVersion)" # ***********目前大部分项目不允许使用命令行操作master代码,以下代码暂时废弃************** # # 用户确认目标分支 https://ask.helplib.com/bash/post_113951 if [ -z $VERSION ]; then VERSION=`npx select-version-cli` else echo "" fi read -p "Releasing $VERSION - are you sure? (y/n)" -n 1 -r echo # (optional) move to a new line if [[ $REPLY =~ ^[Yy]$ ]] then LatestTag=$(git describe --tags `git rev-list --tags --max-count=1`) # 本地最新tag和即将生成的tag相同 则删除 if [ "$LatestTag" == "$VERSION" ];then git tag --delete $VERSION fi BRANCH="release-v$VERSION-$(date "+%F")" git checkout -b $BRANCH npm version $VERSION --message "docs: $VERSION" git tag --delete "v$VERSION" genTagLog # 支持mac pbcopy <$OUTPUT git -C "${REPO}" add --all git -C "${REPO}" commit -m "chore: publish version $(getVersion)" git tag -a "v$VERSION" # 先删除远程分支 # git -C "${REPO}" push origin ":refs/tags/v$VERSION" git -C "${REPO}" push origin "v$VERSION" git -C "${REPO}" push --set-upstream origin $BRANCH fi # BRANCH="develop" # read -p"Target branch: master111. (y: confirm; n: enter the target branch; c: cancel) " -n 1 -r # echo "\n" # # case $REPLY in # y) # ;; # n) # read -p"Please enter the target branch (For example, master): " -r # BRANCH=$REPLY # ;; # *) # echo "canceled" # exit 5 # ;; # esac # 切到branch #git -C "${REPO}" checkout $BRANCH # 合并分支到branch #git -C "${REPO}" merge $CURRENT_BRANCH #echo "v$(getVersion)" # 生成tag #git -C "${REPO}" tag -a "v$(getVersion)" -m "" # 推送tag # git -C "${REPO}" push origin --tags # 推送代码 #git -C "${REPO}" push origin # 复制release note到剪切板 #pbcopy <$OUTPUT # 完成 echo "Push success, Release branch is $BRANCH and version is $VERSION !" ;; copy) genTagLog "copy" echo "Log has been copied to the clipboard" exit 6 ;; *) shouldFouceResolve ( shouldPrintTime # 先根据起始及终止时间查找符合条件的log并且把日期格式化后输出 # 之后遍历所有输出的日期,在根据日期查询当天内的log进行打印 git -C "${REPO}" log --since="${SINCE}" --until="${UNTIL}" --format="%cd" --date=short | sort -u | while read DATE; do GIT_PAGER=$(git -C "${REPO}" log --no-merges --reverse --format="${LOG_FORMAT}" --since="${DATE} 00:00:00" --until="${DATE} 23:59:59" --author="${AUTHOR}") if [ ! -z "$GIT_PAGER" ]; then echo "[${DATE}]" echo "${GIT_PAGER}" echo fi done ) >$OUTPUT ;; esac echo "Log has been written to '${OUTPUT}'"
true
5ce152a6af557e939f9f188d6773419293952f21
Shell
john275/scripts
/movement
UTF-8
1,268
3.296875
3
[]
no_license
#!/bin/bash messagesent=false today=$(date +%Y%m%d) preamble="Initial Startup - $(date +%Y/%m/%d_%H:%M) - " gpio=17 gtalkuser=john275 gtalkrecipient=cherylbucke sleeptime=0.1 message="Movement at Elmstead - " /usr/local/bin/gpio -g mode ${gpio} up while $true do res=$(/usr/local/bin/gpio -g read ${gpio}) if [ $res == 1 ] then if [ "${today}" == "$(date +%Y%m%d)" ] then if ! ${messagesent} then if (( $(date +%k) > 8 )) then now=$(date +%s) while [ $res == 1 ] do res=$(/usr/local/bin/gpio -g read ${gpio}) sleep ${sleeptime} done while [ $res == 0 ] do res=$(/usr/local/bin/gpio -g read ${gpio}) sleep ${sleeptime} done if (( $(($(date +%s)-now))<20)) then messagesent=true echo ${preamble}${message}$(date +%H:%M) echo ${preamble}${message}$(date +%H:%M) | /usr/bin/sendxmpp -t -u ${gtalkuser} -o gmail.com ${gtalkrecipient} preamble="" fi fi fi else today=$(date +%Y%m%d) messagesent=false fi fi sleep ${sleeptime} done
true
55a2284e5a915b2c41792a02ee49d04884a97c43
Shell
fjudith/coreos-kubernetes
/multi-node/generic/worker-install.sh
UTF-8
10,508
3.453125
3
[ "Apache-2.0" ]
permissive
#!/bin/bash set -e # List of etcd servers (http://ip:port), comma separated export ETCD_ENDPOINTS= # Interface to be mapped export FLANNEL_IFACE= # The endpoint the worker node should use to contact controller nodes (https://ip:port) # In HA configurations this should be an external DNS record or loadbalancer in front of the control nodes. # However, it is also possible to point directly to a single control node. export CONTROLLER_ENDPOINT= # Specify the version (vX.Y.Z) of Kubernetes assets to deploy # https://kubernetes.io/docs/reference/workloads-18-19/ # https://nixaid.com/deploying-kubernetes-cluster-from-scratch/ export K8S_VER=v1.10.2_coreos.0 # Hyperkube image repository to use. export HYPERKUBE_IMAGE_REPO=quay.io/coreos/hyperkube # CNI plugin # https://github.com/containernetworking/plugins/releases export CNI_VER=0.7.0 # The CIDR network to use for pod IPs. # Each pod launched in the cluster will be assigned an IP out of this range. # Each node will be configured such that these IPs will be routable using the flannel overlay network. export POD_NETWORK=10.2.0.0/16 # The IP address of the cluster DNS service. # This must be the same DNS_SERVICE_IP used when configuring the controller nodes. export DNS_SERVICE_IP=10.3.0.10 # Whether to use Calico for Kubernetes network policy. export USE_CALICO=false # Determines the container runtime for kubernetes to use. Accepts 'docker' or 'rkt'. export CONTAINER_RUNTIME=docker # The above settings can optionally be overridden using an environment file: ENV_FILE=/run/coreos-kubernetes/options.env # To run a self hosted Calico install it needs to be able to write to the CNI dir if [ "${USE_CALICO}" = "true" ]; then export CALICO_OPTS="--volume cni-bin,kind=host,source=/opt/cni/bin \ --mount volume=cni-bin,target=/opt/cni/bin" else export CALICO_OPTS="" fi # ------------- function init_config { local REQUIRED=( 'ADVERTISE_IP' 'ETCD_ENDPOINTS' 'CONTROLLER_ENDPOINT' 'DNS_SERVICE_IP' 'K8S_VER' 'HYPERKUBE_IMAGE_REPO' 'USE_CALICO' ) if [ -f $ENV_FILE ]; then export $(cat $ENV_FILE | xargs) fi if [ -z $ADVERTISE_IP ]; then export ADVERTISE_IP=$(awk -F= '/COREOS_PUBLIC_IPV4/ {print $2}' /etc/environment) fi for REQ in "${REQUIRED[@]}"; do if [ -z "$(eval echo \$$REQ)" ]; then echo "Missing required config value: ${REQ}" exit 1 fi done } function init_templates { local TEMPLATE=/etc/systemd/system/kubelet.service local uuid_file="/var/run/kubelet-pod.uuid" if [ ! -f $TEMPLATE ]; then echo "TEMPLATE: $TEMPLATE" mkdir -p $(dirname $TEMPLATE) cat << EOF > $TEMPLATE [Service] Environment=PATH=/opt/bin/:/usr/bin/:/usr/sbin:${PATH} Environment=KUBELET_IMAGE_TAG=${K8S_VER} Environment=KUBELET_IMAGE_URL=${HYPERKUBE_IMAGE_REPO} Environment="RKT_RUN_ARGS=--uuid-file-save=${uuid_file} \ --volume dns,kind=host,source=/etc/resolv.conf \ --mount volume=dns,target=/etc/resolv.conf \ --volume rkt,kind=host,source=/opt/bin/host-rkt \ --mount volume=rkt,target=/usr/bin/rkt \ --volume var-lib-rkt,kind=host,source=/var/lib/rkt \ --mount volume=var-lib-rkt,target=/var/lib/rkt \ --volume stage,kind=host,source=/tmp \ --mount volume=stage,target=/tmp \ --volume var-log,kind=host,source=/var/log \ --mount volume=var-log,target=/var/log \ --volume modprobe,kind=host,source=/usr/sbin/modprobe \ --mount volume=modprobe,target=/usr/sbin/modprobe \ --volume lib-modules,kind=host,source=/lib/modules \ --mount volume=lib-modules,target=/lib/modules \ --volume etc-cni-netd,kind=host,source=/etc/cni/net.d \ --mount volume=etc-cni-netd,target=/etc/cni/net.d \ ${CALICO_OPTS}" ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/manifests ExecStartPre=/usr/bin/mkdir -p /var/log/containers ExecStartPre=-/usr/bin/rkt rm --uuid-file=${uuid_file} ExecStartPre=/usr/bin/mkdir -p /opt/cni/bin ExecStartPre=/usr/bin/mkdir -p /etc/cni/net.d ExecStartPre=/usr/bin/mkdir -p /var/lib/kubelet/volumeplugins ExecStartPre=/usr/bin/mkdir -p /var/lib/rook ExecStart=/usr/lib/coreos/kubelet-wrapper \ --anonymous-auth=false \ --node-labels=kubernetes.io/role=node \ --cni-conf-dir=/etc/cni/net.d \ --cni-bin-dir=/opt/cni/bin \ --network-plugin=cni \ --container-runtime=${CONTAINER_RUNTIME} \ --rkt-path=/usr/bin/rkt \ --register-node=true \ --allow-privileged=true \ --pod-manifest-path=/etc/kubernetes/manifests \ --hostname-override=${ADVERTISE_IP} \ --cluster_dns=${DNS_SERVICE_IP} \ --cluster_domain=cluster.local \ --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \ --bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig \ --client-ca-file=/etc/kubernetes/ssl/ca.pem \ --tls-cert-file=/etc/kubernetes/ssl/node.pem \ --tls-private-key-file=/etc/kubernetes/ssl/node-key.pem \ --volume-plugin-dir=/etc/kubernetes/volumeplugins \ --authentication-token-webhook=true \ --authorization-mode=Webhook \ --v=2 ExecStop=-/usr/bin/rkt stop --uuid-file=${uuid_file} Restart=always RestartSec=10 [Install] WantedBy=multi-user.target EOF fi local TEMPLATE=/opt/bin/host-rkt if [ ! -f $TEMPLATE ]; then echo "TEMPLATE: $TEMPLATE" mkdir -p $(dirname $TEMPLATE) cat << EOF > $TEMPLATE #!/bin/sh # This is bind mounted into the kubelet rootfs and all rkt shell-outs go # through this rkt wrapper. It essentially enters the host mount namespace # (which it is already in) only for the purpose of breaking out of the chroot # before calling rkt. It makes things like rkt gc work and avoids bind mounting # in certain rkt filesystem dependancies into the kubelet rootfs. This can # eventually be obviated when the write-api stuff gets upstream and rkt gc is # through the api-server. Related issue: # https://github.com/coreos/rkt/issues/2878 exec nsenter -m -u -i -n -p -t 1 -- /usr/bin/rkt "\$@" EOF fi local TEMPLATE=/etc/systemd/system/load-rkt-stage1.service if [ ${CONTAINER_RUNTIME} = "rkt" ] && [ ! -f $TEMPLATE ]; then echo "TEMPLATE: $TEMPLATE" mkdir -p $(dirname $TEMPLATE) cat << EOF > $TEMPLATE [Unit] Description=Load rkt stage1 images Documentation=http://github.com/coreos/rkt Requires=network-online.target After=network-online.target Before=rkt-api.service [Service] Type=oneshot RemainAfterExit=yes ExecStart=/usr/bin/rkt fetch /usr/lib/rkt/stage1-images/stage1-coreos.aci /usr/lib/rkt/stage1-images/stage1-fly.aci --insecure-options=image [Install] RequiredBy=rkt-api.service EOF fi local TEMPLATE=/etc/systemd/system/rkt-api.service if [ ${CONTAINER_RUNTIME} = "rkt" ] && [ ! -f $TEMPLATE ]; then echo "TEMPLATE: $TEMPLATE" mkdir -p $(dirname $TEMPLATE) cat << EOF > $TEMPLATE [Unit] Before=kubelet.service [Service] ExecStart=/usr/bin/rkt api-service Restart=always RestartSec=10 [Install] RequiredBy=kubelet.service EOF fi local TEMPLATE=/etc/kubernetes/kubelet.kubeconfig if [ ! -f $TEMPLATE ]; then echo "TEMPLATE: $TEMPLATE" mkdir -p $(dirname $TEMPLATE) cat << EOF > $TEMPLATE apiVersion: v1 kind: Config clusters: - name: local cluster: server: ${CONTROLLER_ENDPOINT} certificate-authority: /etc/kubernetes/ssl/ca.pem users: - name: system:node:${ADVERTISE_IP} user: client-certificate: /etc/kubernetes/ssl/node.pem client-key: /etc/kubernetes/ssl/node-key.pem contexts: - context: cluster: local user: system:node:${ADVERTISE_IP} name: default current-context: default EOF fi local TEMPLATE=/etc/kubernetes/bootstrap.kubeconfig if [ ! -f $TEMPLATE ]; then echo "TEMPLATE: $TEMPLATE" mkdir -p $(dirname $TEMPLATE) cat << EOF > $TEMPLATE apiVersion: v1 kind: Config clusters: - name: local cluster: server: ${CONTROLLER_ENDPOINT} certificate-authority: /etc/kubernetes/ssl/ca.pem users: - name: kubelet-bootstrap user: token: $(cat /etc/kubernetes/token.csv | awk -F ',' '{print $1}') contexts: - context: cluster: local user: kubelet-bootstrap name: default current-context: default EOF fi local TEMPLATE=/etc/kubernetes/kube-proxy.kubeconfig if [ ! -f $TEMPLATE ]; then echo "TEMPLATE: $TEMPLATE" mkdir -p $(dirname $TEMPLATE) cat << EOF > $TEMPLATE apiVersion: v1 kind: Config clusters: - name: local cluster: server: ${CONTROLLER_ENDPOINT} certificate-authority: /etc/kubernetes/ssl/ca.pem users: - name: kube-proxy user: client-certificate: /etc/kubernetes/ssl/kube-proxy.pem client-key: /etc/kubernetes/ssl/kube-proxy-key.pem contexts: - context: cluster: local user: kube-proxy name: default current-context: default EOF fi local TEMPLATE=/etc/kubernetes/manifests/kube-proxy.yaml if [ ! -f $TEMPLATE ]; then echo "TEMPLATE: $TEMPLATE" mkdir -p $(dirname $TEMPLATE) cat << EOF > $TEMPLATE apiVersion: v1 kind: Pod metadata: name: kube-proxy namespace: kube-system annotations: rkt.alpha.kubernetes.io/stage1-name-override: coreos.com/rkt/stage1-fly spec: hostNetwork: true containers: - name: kube-proxy image: ${HYPERKUBE_IMAGE_REPO}:$K8S_VER command: - /hyperkube - proxy - --master=${CONTROLLER_ENDPOINT} - --cluster-cidr=${POD_NETWORK} - --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig - --bind-address=${ADVERTISE_IP} - --logtostderr=true - --masquerade-all - --v=2 securityContext: privileged: true volumeMounts: - mountPath: /etc/ssl/certs name: "ssl-certs" - mountPath: /etc/kubernetes/kube-proxy.kubeconfig name: "kubeconfig" readOnly: true - mountPath: /etc/kubernetes/ssl name: "etc-kube-ssl" readOnly: true - mountPath: /var/run/dbus name: dbus readOnly: false volumes: - name: "ssl-certs" hostPath: path: "/usr/share/ca-certificates" - name: "kubeconfig" hostPath: path: "/etc/kubernetes/kube-proxy.kubeconfig" - name: "etc-kube-ssl" hostPath: path: "/etc/kubernetes/ssl" - hostPath: path: /var/run/dbus name: dbus EOF fi } init_config init_templates chmod +x /opt/bin/host-rkt systemctl stop update-engine; systemctl mask update-engine systemctl daemon-reload if [ $CONTAINER_RUNTIME = "rkt" ]; then systemctl enable load-rkt-stage1 systemctl enable rkt-api fi systemctl enable kubelet; systemctl start kubelet
true
54d0cf9258ad3fe24289496850a79104a84271a4
Shell
cloudflare/python-cloudflare
/examples/example_paging_thru_zones.sh
UTF-8
782
3.40625
3
[ "MIT" ]
permissive
: tmp=/tmp/$$_ trap "rm ${tmp}; exit 0" 0 1 2 15 PAGE_NUMBER=0 while true do # grab the next page PAGE_NUMBER=`expr ${PAGE_NUMBER} + 1` cli4 --raw per_page==5 page==${PAGE_NUMBER} /zones > ${tmp} domains=`jq -c '.|.result|.[]|.name' < ${tmp} | tr -d '"'` result_info=`jq -c '.|.result_info' < ${tmp}` COUNT=` echo "${result_info}" | jq .count` PAGE=` echo "${result_info}" | jq .page` PER_PAGE=` echo "${result_info}" | jq .per_page` TOTAL_COUNT=`echo "${result_info}" | jq .total_count` TOTAL_PAGES=`echo "${result_info}" | jq .total_pages` echo COUNT=${COUNT} PAGE=${PAGE} PER_PAGE=${PER_PAGE} TOTAL_COUNT=${TOTAL_COUNT} TOTAL_PAGES=${TOTAL_PAGES} -- ${domains} if [ "${PAGE_NUMBER}" == "${TOTAL_PAGES}" ] then ## last section break fi done
true
30c84f82ab9f8eefff1fd9f040f01ff53d60b66b
Shell
BigDataHub/MariaDB-Manager-GREX
/skysql/steps/configure.sh
UTF-8
3,612
3.578125
4
[]
no_license
#!/bin/bash # # This file is distributed as part of MariaDB Manager. It is free # software: you can redistribute it and/or modify it under the terms of the # GNU General Public License as published by the Free Software Foundation, # version 2. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 51 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Copyright 2012-2014 SkySQL Corporation Ab # # Author: Marcos Amaral # Date: July 2013 # # # This script does the necessary configuration steps to have the node ready for # command execution. # logger -p user.info -t MariaDB-Manager-Remote "Command start: configure" trap cleanup SIGTERM cleanup() { if [[ -f /etc/my.cnf.d/skysql-galera.cnf ]]; then rm -f /etc/my.cnf.d/skysql-galera.cnf fi exit 1 } # Determining path of galera library if [[ -f /usr/lib/galera/libgalera_smm.so ]]; then galera_lib_path="/usr/lib/galera/libgalera_smm.so" elif [[ -f /usr/lib64/galera/libgalera_smm.so ]]; then galera_lib_path="/usr/lib64/galera/libgalera_smm.so" else logger -p user.error -t MariaDB-Manager-Remote "No Galera wsrep library found." set_error "Failed to find Galera wsrep library." exit 1 fi # Creating MariaDB configuration file hostname=$(uname -n) sed -e "s/###NODE-ADDRESS###/$privateip/" \ -e "s/###NODE-NAME###/$nodename/" \ -e "s/###REP-USERNAME###/$rep_username/" \ -e "s/###REP-PASSWORD###/$rep_password/" \ -e "s|###GALERA-LIB-PATH###|$galera_lib_path|" \ steps/conf_files/skysql-galera.cnf > /etc/my.cnf.d/skysql-galera.cnf # Setting up MariaDB users /etc/init.d/mysql start sleep 5 mysql -u root -e "DELETE FROM mysql.user WHERE user = ''; \ GRANT ALL PRIVILEGES ON *.* TO $rep_username@'%' IDENTIFIED BY '$rep_password'; \ GRANT ALL PRIVILEGES ON *.* TO $db_username@'%' IDENTIFIED BY '$db_password'; \ FLUSH PRIVILEGES;" /etc/init.d/mysql stop # Configuring datadir in my.cnf (using hardcoded dir /var/lib/mysql) my_cnf_path=$(whereis my.cnf | awk 'END { if (NF >= 2) print $2; }') if [[ my_cnf_path != "" ]]; then sed -e "s|export my_cnf_file=.*|export my_cnf_file=\"$my_cnf_path\"|" \ mysql-config.sh > /tmp/mysql-config.sh.tmp mv /tmp/mysql-config.sh.tmp mysql-config.sh else my_cnf_path=$(cat mysql-config.sh | \ awk 'BEGIN { FS="=" } { gsub("\"", "", $2); if ($1 == "export my_cnf_file") print $2 }') fi cat /etc/my.cnf | grep -q ^datadir=.* if [[ $? = 0 ]]; then sed -e "s|datadir=.*|datadir=/var/lib/mysql|" $my_cnf_path > /tmp/my.cnf.tmp mv /tmp/my.cnf.tmp $my_cnf_path else echo "[mysqld]" >> $my_cnf_path echo "datadir=/var/lib/mysql" >> $my_cnf_path fi # Disabling mysqld auto startup on boot chkconfig --del mysql # Updating node state state_json=$(api_call "PUT" "system/$system_id/node/$node_id" "state=provisioned") if [[ $? != 0 ]] ; then set_error "Failed to set the node state to provisioned" logger -p user.error -t MariaDB-Manager-Remote "Failed to set the node state to provisioned" exit 1 fi json_error "$state_json" if [[ "$json_err" != "0" ]]; then set_error "Failed to set the node state to provisioned" logger -p user.error -t MariaDB-Manager-Remote "Failed to set the node state to provisioned" exit 1 fi
true
c2f98be1d7b54c785e0dc27b2fc84f78185b951b
Shell
braiins/braiins-os-feeds
/miner_recovery/files/miner_recovery.sh
UTF-8
2,056
4
4
[]
no_license
#!/bin/sh # redirect STDOUT and STDERR to /dev/kmsg exec 1<&- 2<&- 1>/dev/kmsg 2>&1 RECOVERY_MTD=/dev/mtd6 FIMRWARE_MTD=/dev/mtd7 FACTORY_OFFSET=0x800000 FACTORY_SIZE=0xC00000 FPGA_OFFSET=0x1400000 FPGA_SIZE=0x100000 SD_DIR=/mnt SD_FACTORY_BIN_PATH=$SD_DIR/factory.bin SD_SYSTEM_BIT_PATH=$SD_DIR/system.bit FACTORY_BIN_PATH=/tmp/factory.bin SYSTEM_BIT_PATH=/tmp/system.bit mtd_write() { mtd -e "$2" write "$1" "$2" } echo "Miner is in the recovery mode!" # try to set LEDs to signal recovery mode echo timer > "/sys/class/leds/Green LED/trigger" echo nand-disk > "/sys/class/leds/Red LED/trigger" # prevent NAND corruption when U-Boot env cannot be read if [ -n "$(fw_printenv 2>&1 >/dev/null)" ]; then echo "Do not use 'fw_setenv' to prevent NAND corruption!" exit 1 fi FACTORY_RESET=$(fw_printenv -n factory_reset 2>/dev/null) SD_IMAGES=$(fw_printenv -n sd_images 2>/dev/null) # immediately exit when error occurs set -e if [ x${FACTORY_RESET} == x"yes" ]; then echo "Resetting to factory settings..." if [ x${SD_IMAGES} == x"yes" ]; then echo "recovery: using SD images for factory reset" # mount SD mount /dev/mmcblk0p1 ${SD_DIR} # copy factory image to temp cp "$SD_FACTORY_BIN_PATH" "$FACTORY_BIN_PATH" # compress bitstream for FPGA gzip -c "$SD_SYSTEM_BIT_PATH" > "$SYSTEM_BIT_PATH" umount ${SD_DIR} else # get uncompressed factory image nanddump -s ${FACTORY_OFFSET} -l ${FACTORY_SIZE} ${RECOVERY_MTD} \ | gunzip \ > "$FACTORY_BIN_PATH" # get bitstream for FPGA nanddump -s ${FPGA_OFFSET} -l ${FPGA_SIZE} ${RECOVERY_MTD} \ > "$SYSTEM_BIT_PATH" fi # write the same FPGA bitstream to both MTD partitions mtd_write "$SYSTEM_BIT_PATH" fpga1 mtd_write "$SYSTEM_BIT_PATH" fpga2 # erase all firmware partition mtd erase firmware1 mtd erase firmware2 ubiformat ${FIMRWARE_MTD} -f "$FACTORY_BIN_PATH" # remove factory reset mode from U-Boot env fw_setenv factory_reset sync echo "recovery: factory reset has been successful!" # reboot system echo "Restarting system..." reboot fi
true
8de12441d4b683b1ca5abab53345d13dfc199a22
Shell
ciax/ciax-xml
/work/successtag.sh
UTF-8
401
3.421875
3
[]
no_license
#!/bin/bash # Add git tag for the version of successful operation # You can Specify date [ "$1" = -s ] && { s=1; shift; } date=$(date ${1:+-d $1} +%Y/%m/%d) br=$(git branch --contains) tag="Success!${PROJ^^}@$HOSTNAME($date)-${br#* }" git check-ref-format "$tag" || { echo "Invalid format"; exit 1; } echo "$tag" [ "$s" ] || { echo "successtag (-s: real setting)"; exit; } git tag $tag git push --tag
true
4cac6690390db95be19227323ed0e7629d77a77a
Shell
irsols-devops/azureAcctSetups
/list_registered_apps.sh
UTF-8
493
2.59375
3
[]
no_license
#!/bin/bash # Provided by IRSOLS DevOps team # IRSOLS Inc @irsols # www.irsols.com / Your Cloud Native Edge # MIT License. Attribution required # Reference: https://docs.microsoft.com/en-us/cli/azure/ad?view=azure-cli-latest echo " Following are the list of Apps currently registered w/ default subscription ID" az ad app list --query [*].[displayName,appId] echo "Carefully review these Apps in AZ Portal and then delete unused/unwanted ones using" echo "az ad app delete --id <App_Id>"
true
45e8b61b2e12a2b484e051c44d4636aefc955851
Shell
vtadrones/rogue-scripts
/geogit-scripts/install-geogit
UTF-8
522
2.84375
3
[]
no_license
#!/bin/bash source settings; printf "\n\n\t Downloading GeoGit...\n\n"; cd $GEOGIT_INSTALL; git clone https://github.com/ROGUE-JCTD/GeoGit.git -b SprintRelease; printf "\n\n\t Building GeoGit...\n\n"; cd GeoGit/src/parent; mvn clean install -DskipTests; chown $GEOGIT_OWNER:$GEOGIT_OWNER -R $GEOGIT_HOME; echo "export PATH=$PATH:$GEOGIT_HOME/src/cli-app/target/geogit/bin" >> /etc/profile; printf "\n\n\t GeoGit installed... To start using GeoGit, open a new shell, or run\n"; printf "\t source /etc/profile\n\n";
true
71dfcc9a641a72f7b712e56a24a88417977b1cf9
Shell
sebidude/dotfiles
/kubernetes
UTF-8
3,255
3.453125
3
[]
no_license
#!/bin/bash export PATH="$PATH:$HOME/.krew/bin" KUBECONFIG_DEFAULT="$HOME/.kube/default/default.conf" kctl=$(which kubectl) if [ -n "$kctl" -a -x "$kctl" ]; then source <(kubectl completion bash) fi helmbin=$(which helm) if [ -n "$helmbin" -a -x "$helmbin" ]; then source <(helm completion bash) fi updateKubectl() { if [ -n "$1" ]; then KUBECTLVERSION="$1" else KUBECTLVERSION=$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt) fi echo Updating kubectl to $KUBECTLVERSION curl -s -LO https://storage.googleapis.com/kubernetes-release/release/${KUBECTLVERSION}/bin/linux/amd64/kubectl mkdir -p $HOME/bin chmod 755 kubectl mv kubectl $HOME/bin/kubectl-$KUBECTLVERSION rm -f $HOME/bin/kubectl ln -s $HOME/bin/kubectl-$KUBECTLVERSION $HOME/bin/kubectl } debugpodon() { cat << EOF | kubectl apply -f - apiVersion: v1 kind: Pod metadata: name: $(whoami)-debug-$1 labels: owner: $(whoami) spec: nodeSelector: kubernetes.io/hostname: $1 terminationGracePeriodSeconds: 1 containers: - name: alpine image: alpine command: - sleep - infinity EOF while sleep 1 do READY=$(kubectl get pod $(whoami)-debug-$1 | grep Running ) [[ -n "$READY" ]] && { kubectl exec -i -t $(whoami)-debug-$1 -- /bin/sh kubectl delete pod $(whoami)-debug-$1 break } done } debugpod() { cat << EOF | kubectl apply -f - apiVersion: v1 kind: Pod metadata: name: $(whoami)-debug labels: owner: $(whoami) spec: terminationGracePeriodSeconds: 1 containers: - name: alpine image: alpine command: - sleep - infinity EOF while sleep 1 do READY=$(kubectl get pod $(whoami)-debug | grep Running ) [[ -n "$READY" ]] && { kubectl exec -i -t $(whoami)-debug -- /bin/sh kubectl delete pod $(whoami)-debug break } done } kubeconfig() { if [ -n "$1" ]; then KUBECONFIG_DEFAULT="$1" KUBECONFIG=$KUBECONFIG_DEFAULT elif [ -z "$KUBECONFIG" ]; then KUBECONFIG=$KUBECONFIG_DEFAULT fi cp $KUBECONFIG $HOME/.kube/default/current-$$.conf export PREVIOUS_KUBECONFIG=$KUBECONFIG; export KUBECONFIG=$HOME/.kube/default/current-$$.conf; } _getKubeNamespaces() { local curr_arg; local NAMESPACES; curr_arg=${COMP_WORDS[COMP_CWORD]} NAMESPACES="$(kubectl get ns -o jsonpath='{.items[*].metadata.name}')" COMPREPLY=( $(compgen -W "${NAMESPACES}" -- $curr_arg ) ); } _getKubeContexts() { local curr_arg; local CONTEXTS; curr_arg=${COMP_WORDS[COMP_CWORD]} CONTEXTS="$(kubectl config get-contexts -o name)" COMPREPLY=( $(compgen -W "${CONTEXTS}" -- $curr_arg ) ); } ktx() { if [ -n "$1" ]; then kubectl config use-context $1 return 0 fi return 1 } kubens() { if [ -n "$1" ]; then kubectl config set-context --current --namespace=$1 return 0 fi return 1 } getAdminConfig() { echo "not implemented" } cleanupSession() { rm $HOME/.kube/default/current-$$.conf } alias k=kubectl alias watch="watch " complete -F __start_kubectl k complete -F _getKubeNamespaces kubens complete -F _getKubeContexts ktx trap cleanupSession EXIT kubeconfig
true
ef310c8a96b8cec5456dc17455a5f397b58cf033
Shell
hcary/aws-devops
/src/rd-aws
UTF-8
1,164
2.703125
3
[]
no_license
#!/bin/bash # # This file is part of the aws-devops distribution (https://github.com/hcary/aws-devops). # Copyright (c) 2019 Harvey Cary. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, version 3. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # MYUSER=$USER MYUID=`id -u $USER` DEST=/home/$MYUSER docker run \ --name $USER.$$ \ --hostname "aws" \ -e "MYUSER=$MYUSER" \ -e "MYUID=$MYUID" \ -it \ --rm \ --mount type=bind,src="$(echo $HOME)",dst=/home/"$(echo $USER)" \ ${COMPANY}/aws \ /bin/bash #docker run --name $USER.$$ -e "MYUSER=$MYUSER" -e "MYUID=$MYUID" -t -i --rm --mount 'type=bind,src=/Users/hcary,dst=/home/hcary' rc3labs/youtube-dl /bin/bash
true
3740f18e0606c4cb270690ca3bda28abbf98193c
Shell
stockmind/gpd-pocket-ubuntu-respin
/update-kernel.sh
UTF-8
462
3.34375
3
[]
no_license
#!/bin/bash LATESTKERNEL="gpdpocket-20190225-5.0.0-rc7-kernel-files.zip" mkdir -p update-kernel cd update-kernel if [ ! -f "$LATESTKERNEL" ]; then echo "Downloading kernel files...." wget "https://bitbucket.org/simone_nunzi/gpdpocket-kernel/downloads/$LATESTKERNEL" fi echo "Extracting kernel files..." unzip -o "$LATESTKERNEL" echo "Installing kernel..." sudo dpkg -i *.deb echo "Update grub..." sudo update-grub rm *.deb cd .. rm -rfd update-kernel
true
b99ae3d74541383a764a6a0a84e22d94d453c15e
Shell
delianides/dotfiles
/install
UTF-8
3,485
3.71875
4
[ "MIT" ]
permissive
#!/bin/bash # vim: tw=0 # from https://github.com/gabebw/dotfiles/blob/main/install.sh set -eo pipefail mkdir -p ~/Code color() { local colornumber="$1" shift tput setaf "$colornumber" echo "$*" tput sgr0 } red() { color 1 "$*"; } green() { color 2 "$*"; } yellow() { color 3 "$*"; } info() { green "=== $*" } error() { red "!! $*" } stay_awake_while() { caffeinate -dims "$@" } quietly_brew_bundle() { local brewfile=$1 shift local regex='(^Using )|Homebrew Bundle complete|Skipping install of|It is not currently installed|Verifying SHA-256|==> (Downloading|Purging)|Already downloaded:|No SHA-256' stay_awake_while brew bundle --no-lock --file="$brewfile" "$@" | (grep -vE "$regex" || true) } command_does_not_exist() { ! command -v "$1" >/dev/null } info "Checking for command-line tools..." if command_does_not_exist xcodebuild; then stay_awake_while xcode-select --install fi info "Installing Homebrew (if not already installed)..." if command_does_not_exist brew; then stay_awake_while /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)" fi info "Installing Homebrew packages..." brew tap homebrew/bundle brew install mas 2>/dev/null quietly_brew_bundle "$HOME/Code/dotfiles/Brewfile" quietly_brew_bundle "$HOME/Code/dotfiles/Brewfile.lang" || true quietly_brew_bundle "$HOME/Code/dotfiles/Brewfile.casks" || true app_store_id=$(mas account || true) desired_app_store_id="apple@drew.delianides.com" if [[ "$app_store_id" == "$desired_app_store_id" ]]; then quietly_brew_bundle "$HOME/Code/dotfiles/Brewfile.mas" else if mas account &>/dev/null; then error "You are signed in to the App Store as $app_store_id." error "Sign out and re-sign in as $desired_app_store_id" else error "You are not signed in to the App Store." error "Sign in as $desired_app_store_id" fi error "(This won't affect your iCloud account.)" fi if ! echo "$SHELL" | grep -Fq zsh; then info "Your shell is not Zsh. Changing it to Zsh..." chsh -s /bin/zsh fi info "Linking dotfiles into ~..." # Before `rcup` runs, there is no ~/.rcrc, so we must tell `rcup` where to look. # We need the rcrc because it tells `rcup` to ignore thousands of useless Vim # backup files that slow it down significantly. export RCRC=rcrc stay_awake_while rcup -d . info "Setting Pre-Commit Hook" cp "$HOME/Code/dotfiles/lib/dotfiles-precommit-hook" "$HOME/Code/dotfiles/.git/hooks/pre-commit" info "Installing MacOS system settings..." stay_awake_while sudo "$HOME"/Code/dotfiles/system/macos info "Installing ASDF language version manager..." alias install_asdf_plugin=add_or_update_asdf_plugin add_or_update_asdf_plugin() { local name="$1" local url="$2" if ! asdf plugin-list | grep -Fq "$name"; then asdf plugin-add "$name" "$url" else asdf plugin-update "$name" fi } declare -a languages=( "nodejs" "python" "ruby" "rust" "golang" ) for language in "${languages[@]}"; do add_or_update_asdf_plugin "$language" done # Installs to ~/.terminfo echo "Installing italics-capable terminfo files..." if ! [[ -r ~/.terminfo/61/alacritty ]]; then alacritty_terminfo=$(mktemp) stay_awake_while curl -o "$alacritty_terminfo" https://raw.githubusercontent.com/jwilm/alacritty/master/extra/alacritty.info tic -xe alacritty,alacritty-direct "$alacritty_terminfo" fi info "Success!" yellow "== Post-install instructions ==" yellow "1. Remap Caps Lock to CTRL in the Keyboard prefpane" yellow "2. Make sure to install TMUX plugins: prefix+I"
true
e6cb4071de60b920bc9384da767a39aa27bb1422
Shell
joecox/dotfiles
/zshrc
UTF-8
3,390
2.859375
3
[]
no_license
# Path to your oh-my-zsh installation. export ZSH=$HOME/.oh-my-zsh # Set name of the theme to load. # Look in ~/.oh-my-zsh/themes/ # Optionally, if you set this to "random", it'll load a random theme each # time that oh-my-zsh is loaded. # ZSH_THEME="cox" ZSH_THEME="robbyrussell" # Uncomment the following line to use case-sensitive completion. # CASE_SENSITIVE="true" # Uncomment the following line to use hyphen-insensitive completion. Case # sensitive completion must be off. _ and - will be interchangeable. HYPHEN_INSENSITIVE="true" # Uncomment the following line to disable bi-weekly auto-update checks. # DISABLE_AUTO_UPDATE="true" # Uncomment the following line to change how often to auto-update (in days). # export UPDATE_ZSH_DAYS=13 # Uncomment the following line to disable colors in ls. # DISABLE_LS_COLORS="true" # Uncomment the following line to disable auto-setting terminal title. # DISABLE_AUTO_TITLE="true" # Uncomment the following line to enable command auto-correction. # ENABLE_CORRECTION="true" # Uncomment the following line to display red dots whilst waiting for completion. # COMPLETION_WAITING_DOTS="true" # Uncomment the following line if you want to disable marking untracked files # under VCS as dirty. This makes repository status check for large repositories # much, much faster. # DISABLE_UNTRACKED_FILES_DIRTY="true" # Uncomment the following line if you want to change the command execution time # stamp shown in the history command output. # The optional three formats: "mm/dd/yyyy"|"dd.mm.yyyy"|"yyyy-mm-dd" # HIST_STAMPS="mm/dd/yyyy" # Would you like to use another custom folder than $ZSH/custom? # ZSH_CUSTOM=/path/to/new-custom-folder # Which plugins would you like to load? (plugins can be found in ~/.oh-my-zsh/plugins/*) # Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/ # Example format: plugins=(rails git textmate ruby lighthouse) # Add wisely, as too many plugins slow down shell startup. plugins=(git) # User configuration export PATH="/usr/local/bin:$PATH" source $ZSH/oh-my-zsh.sh # You may need to manually set your language environment # export LANG=en_US.UTF-8 # Preferred editor for local and remote sessions # if [[ -n $SSH_CONNECTION ]]; then # export EDITOR='vim' # else # export EDITOR='mvim' # fi export EDITOR="code --wait --new-window" export VISUAL="$EDITOR" # Compilation flags # export ARCHFLAGS="-arch x86_64" # ssh # export SSH_KEY_PATH="~/.ssh/dsa_id" # Set personal aliases, overriding those provided by oh-my-zsh libs, # plugins, and themes. Aliases can be placed here, though oh-my-zsh # users are encouraged to define aliases within the ZSH_CUSTOM folder. # For a full list of active aliases, run `alias`. # # Example aliases # alias zshconfig="mate ~/.zshrc" # alias ohmyzsh="mate ~/.oh-my-zsh" alias re="source ~/.zshrc" alias algrep="alias | grep" alias ll="ls -lahoGF" alias eamcs="emacs" alias shrug="echo -n '¯\_(ツ)_/¯' | tee /dev/tty | pbcopy; echo" alias gbl="git branch -l" alias gbd="git branch -d" # functions synesthesia() { hex=$(echo "$@" | shasum | cut -c 1-6) echo $hex echo -n $hex | pbcopy open "https://www.google.com/search?q=%23${hex}" } server() { port=${1:-8000} python -m SimpleHTTPServer $port } # custom styles # LSCOLORS=exFxcxdxGxcdedabagacad # misc unsetopt AUTO_CD [[ -f "$HOME/.zshrc-local" ]] && source "$HOME/.zshrc-local"
true
433658528e4b25a83c57a10a1b96b6a28691f223
Shell
typetests/guava-typecheck
/build.sh
UTF-8
1,568
3.640625
4
[]
no_license
#!/bin/bash ROOT="$( cd "$(dirname "$0")"/.. ; pwd -P )" # Required argument $1 is one of: # formatter, interning, lock, nullness, regex, signature, nothing # Fail the whole script if any command fails set -e ## Build Checker Framework (cd $ROOT && git clone --depth 1 https://github.com/typetools/checker-framework.git) # This also builds annotation-tools and jsr308-langtools (cd $ROOT/checker-framework/ && ./.travis-build-without-test.sh downloadjdk) export CHECKERFRAMEWORK=$ROOT/checker-framework ## Obtain guava (cd $ROOT && git clone --depth 1 https://github.com/typetools/guava.git) if [[ "$1" == "lock" ]]; then (cd $ROOT/guava/guava && mvn compile -P checkerframework-local -Dcheckerframework.checkers=org.checkerframework.checker.lock.LockChecker) elif [[ "$1" == "nullness" ]]; then (cd $ROOT/guava/guava && mvn compile -P checkerframework-local -Dcheckerframework.checkers=org.checkerframework.checker.nullness.NullnessChecker) elif [[ "$1" == "misc" ]]; then (cd $ROOT/guava/guava && mvn compile -P checkerframework-local -Dcheckerframework.checkers=org.checkerframework.checker.regex.RegexChecker,org.checkerframework.checker.interning.InterningChecker,org.checkerframework.checker.formatter.FormatterChecker,org.checkerframework.checker.signature.SignatureChecker) elif [[ "$1" == "index" ]]; then (cd $ROOT/guava/guava && mvn compile -P checkerframework-local -Dcheckerframework.checkers=org.checkerframework.checker.index.IndexChecker) elif [[ "$1" == "nothing" ]]; then true else echo "Bad argument '$1' to build.sh" false fi
true
e69ca801ffc22751ddcbdcc561408a92b00c87e7
Shell
DLT1995/CaffeOnACL-Android
/model_download.sh
UTF-8
1,063
3.015625
3
[]
no_license
#!/bin/bash CAFFE_ON_ACL=CaffeOnACL chmod a+x $CAFFE_ON_ACL/data/ilsvrc12/get_ilsvrc_aux.sh chmod a+x $CAFFE_ON_ACL/scripts/download_model_binary.py # download data $CAFFE_ON_ACL/data/ilsvrc12/get_ilsvrc_aux.sh # download AlexNet $CAFFE_ON_ACL/scripts/download_model_binary.py $CAFFE_ON_ACL/models/bvlc_alexnet # download GoogLeNet $CAFFE_ON_ACL/scripts/download_model_binary.py $CAFFE_ON_ACL/models/bvlc_googlenet # download SqueezeNet SQUEEZE_NET_MODEL=$CAFFE_ON_ACL/models/SqueezeNet/SqueezeNet_v1.1/squeezenet_v1.1.caffemodel if [ -s $SQUEEZE_NET_MODEL ]; then echo "SqueezeNet model already exists, not getting it." else wget -c https://github.com/DeepScale/SqueezeNet/raw/master/SqueezeNet_v1.1/squeezenet_v1.1.caffemodel -O /tmp/squeezenet_v1.1.caffemodel.temp && mv /tmp/squeezenet_v1.1.caffemodel.temp $SQUEEZE_NET_MODEL fi # download MobileNet if [ -d $CAFFE_ON_ACL/models/MobileNet ]; then echo "MobileNet model already exists, not getting it." else git clone https://github.com/finley-/MobileNet-Caffe $CAFFE_ON_ACL/models/MobileNet fi
true
fb24d6f43596cf534a6148ec2c7f7d44802ea494
Shell
Peter5793/alx-system_engineering-devops
/0x0C-web_server/3-redirection
UTF-8
394
2.734375
3
[]
no_license
#!/usr/bin/env bash #Configure your Nginx server so that /redirect_me is #redirecting to another page sudo apt-get -y update sudo apt-get -y install nginx echo "Holberton School" > /var/www/html/inddex.nginx-debian.html variable="rewrite ^/redirect_me https://www.youtube.com/watch?v=QH2-TGUlwu4 permanent;" sed -i "19i $variable" /etc/nginx/sites-available/default sudo service nginx restart
true
0264cb58eae2e4c0d9f46fa6675b1cc339c8a95b
Shell
offensivenomad/dotfiles
/rootrc/bash.d/go
UTF-8
276
2.6875
3
[]
no_license
#!/bin/bash install-go() { curl https://dl.google.com/go/go1.13.5.linux-amd64.tar.gz -o /tmp/go1.13.5.linux-amd64.tar.gz sudo tar -C /usr/local -xzvf ./tmp/go } export GOARCH=amd64 export GOOS=linux export GOPATH="$HOME"/.go export PATH=$PATH:/usr/local/go:"$GOPATH"/bin
true
ab0db124e1de453cb49daef856ab11c96d02d827
Shell
s-yakoo/Bash
/hypo
UTF-8
1,638
2.84375
3
[]
no_license
#!/bin/bash var=$(grep "unknown" tools_all_names.clstr | grep -c "#vibrio#interprot#" grep "unknown" tools_all_names.clstr | grep -c "#firmicutes#interprot#" grep "unknown" tools_all_names.clstr | grep -c "#bacteroides#interprot#" grep "unknown" tools_all_names.clstr | grep -c "#vibrio#rast#" grep "unknown" tools_all_names.clstr | grep -c "#bacteroides#rast#" grep "unknown" tools_all_names.clstr | grep -c "#firm#rast#" grep "#hypothetical protein#" tools_all_names.clstr | grep -c "#ED#prokka" grep "#hypothetical protein#" tools_all_names.clstr | grep -c "#firmicutes#prokka" grep "#hypothetical protein#" tools_all_names.clstr | grep -c "#bacteroides#prokka" grep "hypothetical protein" tools_all_names.clstr | grep -c "#vibrio#interprot#" grep "hypothetical protein" tools_all_names.clstr | grep -c "#firmicutes#interprot#" grep "hypothetical protein" tools_all_names.clstr | grep -c "#bacteroides#interprot#" grep "hypothetical protein" tools_all_names.clstr | grep -c "#vibrio#rast#" grep "hypothetical protein" tools_all_names.clstr | grep -c "#firm#rast#" grep "hypothetical protein" tools_all_names.clstr | grep -c "#bacteroides#rast#") ### this puts the output of the command (which is only the sum value) into the variable called var echo $var | awk '{print "vibrio_interpro=",$1 += $10,"\n","firmicutes_interpro=", $2 += $11,"\n","bacteroides_interpro=",$3 += $12,"\n", "vibrio_rast=", $4 += $13,"\n","firm_rast=", $6 += $14, "\n", "bacteroides_rast=", $5 += $15, "\n", "vibrio_prokka=", $7, "\n", "firm_prokka=", $8, "\n", "bacteroides_prokka=",$9}' > hypothetical_counts.txt cat hypothetical_counts.txt
true
e7f86552f57d9ee458bcdb06104430b1731c7152
Shell
Yethal/tidal-offline-adder
/downloader.sh
UTF-8
324
2.625
3
[ "MIT" ]
permissive
#!/bin/sh source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/functions.sh" IFS=' ' #sanity checks checkDep adb checkDep xmllint checkDevice checkTidal #app launch launchTidal openMyCollection openAlbums while true; do for x in `getOptions`; do downloadAlbum $x; done scrollAlbums done
true
ecc91ee7bcf66e7894cc871792ad61c8280d8d03
Shell
chenbk85/easyScripts
/get_bad_ip.sh
UTF-8
1,179
3.515625
4
[]
no_license
#!/bin/sh [ -s /etc/53kf.cfg ] && source /etc/53kf.cfg IPTABLE="/sbin/iptables" OLD_FILE="/tmp/.old_bad_ip" NEW_FILE="/tmp/.new_bad_ip" FILE="/etc/bad_ip" cd $NGINX_LOG if [ -e $OLD_FILE ];then awk '/GET \/webCompany\.php HTTP/ {print $1}' access.log|sort|uniq > $NEW_FILE diff -q $OLD_FILE $NEW_FILE if [ $? = 1 ];then while read IP;do $IPTABLE -t nat -I PREROUTING -s $IP -j DROP done < $NEW_FILE cat $NEW_FILE >> $FILE sort $FILE|uniq > /tmp/.bad_ip_tmp mv -f /tmp/.bad_ip_tmp $FILE mv -f $NEW_FILE $OLD_FILE echo "Web was attacked at `date`" >> /var/log/53kf/web_attacked.log sed -r -i '/GET \/webCompany\.php HTTP/d' access.log fi else awk '/GET \/webCompany\.php HTTP/ {print $1}' access.log|sort|uniq > $OLD_FILE if [ -s $OLD_FILE ];then while read IP;do $IPTABLE -t nat -I PREROUTING -s $IP -j DROP done < $OLD_FILE cat $OLD_FILE >> $FILE sort $FILE|uniq > /tmp/.bad_ip_tmp mv -f /tmp/.bad_ip_tmp $FILE sed -r -i '/GET \/webCompany\.php HTTP/d' access.log fi fi
true
29443556e64eddb850cfe02d8b1532ccfb20c56a
Shell
Seagate/cortx-motr
/utils/spiel/m0_filesystem_stats
UTF-8
7,937
3.15625
3
[ "Apache-2.0" ]
permissive
#!/usr/bin/env bash # # Copyright (c) 2020 Seagate Technology LLC and/or its Affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # For any questions about this software or licensing, # please email opensource@seagate.com or cortx-questions@seagate.com. # # # # Script to calculate cluster size. # # Usage : ${SCRIPT_NAME} # # RETURN : # 0 : SUCCESS # 1 : System Error # 2 : Client endpoint busy # 3 : m0spiel command timed out # # On Success returns # free space <int> # available space <int> # total space <int # services total <int # services replied <int # # Author : Yeshpal Jain # email : yeshpal.jain@seagate.com # # #set -eu #set -x export PS4='+ ${FUNCNAME[0]:+${FUNCNAME[0]}():}line ${LINENO}: ' M0_SRC_DIR=`readlink -f ${BASH_SOURCE[0]}` M0_SRC_DIR=${M0_SRC_DIR%/*/*/*} echo $M0_SRC_DIR . $M0_SRC_DIR/utils/functions # sandbox_init, report_and_exit SCRIPT_NAME=`echo $0 | awk -F "/" '{print $NF}'` STATUS_FILE=/tmp/get_size_client$USER SAVEIFS=$IFS TIMEOUT=10 lnet_nids=$(m0_local_nid_get) server_addr="" client_addr="" profile_addr="" systemd=0 declare libmotr_path="/usr/lib64/libmotr.so" declare m0spiel_path="/usr/bin/m0spiel" declare consul_path="/usr/bin/consul" declare python3_path="/usr/bin/python3" consul_query_args="kv get -recurse m0conf" consul_query_cmd="$consul_path $consul_query_args" motr_conf_file="/etc/sysconfig/motr" [ -d /etc/sysconfig ] || motr_conf_file="/etc/default/motr" exit_with_error() { msg=$1 exit_code=$2 >&2 echo -e $msg [[ -e $STATUS_FILE ]] && rm $STATUS_FILE exit $exit_code } get_bin_path() { bin_name="$1" eval path_var="${bin_name}""_path" eval file_path=\${${bin_name}_path} if [ ! -e ${file_path} ] ; then declare $path_var=$(which $bin_name) eval file_path=\${${bin_name}_path} fi # return file_path string, Don't add any other echo statement # in this method. echo "$file_path" } check_bin_path() { bin_name="$1" eval file_path=\${${bin_name}_path} if [ ! -e ${file_path} ] ; then exit_with_error "ERROR: $bin_name not found" 1 fi } check_lib_path() { arg1=$1 lib_name="${arg1%.*}" eval path_var=\${${lib_name}_path} if [ ! -e ${path_var} ] ; then exit_with_error "ERROR: $lib_name not found" 1 fi } fs_stats_fetch() { /usr/bin/timeout --foreground $TIMEOUT $python3_path $m0spiel_path $M0_SPIEL_OPTS <<EOF spiel.rconf_status = 0 def handle_sigterm(signalNumber, frame): if spiel.rconf_status: spiel.rconfc_stop() sys.exit(1) return if spiel.cmd_profile_set(str(Fid($M0_PROFILE_ID))): sys.exit('cannot set profile') spiel.signal_nr = signal.SIGTERM spiel.signal_cb = handle_sigterm spiel.register_signal() if spiel.rconfc_start(): sys.exit('cannot start rconfc') else: spiel.rconf_status = 1 fs_stats = FsStats() rc = spiel.filesystem_stats_fetch(fs_stats) if rc != 0: sys.exit('Error: filesystem stats fetch: \ (status {0})'.format(rc)) spiel.rconfc_stop() spiel.rconf_status = 0 print(" free space {0:>20}".format(fs_stats.fs_free_disk)) print(" available space {0:>20}".format(fs_stats.fs_avail_disk)) print(" total space {0:>20}".format(fs_stats.fs_total_disk)) print(" services total {0:>20}".format(fs_stats.fs_svc_total)) print(" services replied {0:>20}".format(fs_stats.fs_svc_replied)) EOF rc=$? if [ $rc -ne 0 ] ; then [[ $rc -ge 124 ]] && exit_with_error "m0spiel command timedout" 3 exit_with_error "m0spiel command exit with error:$rc" $rc fi } get_hostname() { host=" " if [[ -f /etc/salt/minion_id ]] ; then host=$(cat /etc/salt/minion_id) fi if [[ -z "$host" ]] ; then host=$(hostname --fqdn) fi echo $host } # Get endpoints of local node's processes that run service(s) of given type. get_endpoints_of() { local svc_type=$1 $consul_query_cmd | awk -F/ -v host=$(get_hostname) -v svc_type=$svc_type ' $3 != host { next } $6 ~ /^endpoint:/ { sub(/^endpoint:/, "", $6); endpoints[$5] = $6 } $6 == "services" && match($7, "^" svc_type ":") { match_p[$5] = 1 } END { for (k in match_p) print endpoints[k] } ' } get_ep_status() { client_ep=$1 # return endpoint running status from hctl, # Dont add any other echo statement in this method. echo "$(hctl status | grep $client_ep | sed 's/.*\[\([^]]*\)\].*/\1/g' | sed 's/^[[:space:]]*//g')" } main() { pid=$$ python3_path=$(get_bin_path python3) m0spiel_path=$(get_bin_path m0spiel) check_bin_path python3 check_bin_path m0spiel if [ ! -s $libmotr_path ] ; then [[ -r $motr_conf_file ]] && source $motr_conf_file [[ -n "$MOTR_DEVEL_WORKDIR_PATH" ]] && \ libmotr_path=$MOTR_DEVEL_WORKDIR_PATH/motr/.libs/libmotr.so [[ ! -s $libmotr_path ]] && exit_with_error "libmotr Not Found" 1 fi if [ -z "$server_addr" ] ; then consul_path=$(get_bin_path consul) check_bin_path consul server_addr=$(get_endpoints_of ha | tail -1) fi if [ -z "$profile_addr" ] ; then profile_addr=$($consul_query_cmd | grep profiles: | sed -n -e 's/^.*\(profiles:\)/\1/p' | awk '{print $1}'| sed 's/^[^:]*://' | sed 's/:/,/') fi if [ -z "$client_addr" ] ; then local cmd="tail -2" [[ $systemd == 1 ]] && cmd="tail -1" client_addr=$(get_endpoints_of m0_client_other | $cmd | head -1) fi M0_SPIEL_OPTS="-s $server_addr -l $libmotr_path --client=$client_addr" M0_PROFILE_ID=$profile_addr if [[ -z "$server_addr" || -z "$libmotr_path" || -z "$client_addr" || -z "$profile_addr" ]]; then exit_with_error "Invalid Arguments" 1 fi touch $STATUS_FILE exec {FD}<>$STATUS_FILE #Wait for $TIMEOUT to acquire the lock if ! flock -x -w $TIMEOUT $FD; then [[ -e $STATUS_FILE ]] && rm $STATUS_FILE exit 2 fi #Lock acquired, below command would be executed exclusively to current process fs_stats_fetch [[ -e $STATUS_FILE ]] && rm $STATUS_FILE } function usage() { cat << EOF Usage: $SCRIPT_NAME [options] -c, --clent client endpoint address -s, --server server endpoint address -p, --profile profile endpoint address -l, --libmotr_path libmotr path -t, --timeout timeout in seconds -h, --help this help Example: $SCRIPT_NAME -s ${lnet_nids}:12345:1:1 -c ${lnet_nids}:12345:1:4 -p 0x7000000000000001,0x1e -l $libmotr_path -c, -s, -l and -p are optional arguments RETURN : 0 : SUCCESS 1 : System Error 2 : endpoint busy 3 : m0spiel command timed out Note: $SCRIPT_NAME can be executed only by privileged user EOF } function check_arg_value() { [[ $# -gt 1 ]] || { exit_with_error "Incorrect use of the option $1\nUse --help option" 1 } } while [[ $# -gt 0 ]]; do case $1 in -c| --client) check_arg_value $1 $2 client_addr="$2" shift ;; -s| --server) check_arg_value $1 $2 server_addr="$2" shift ;; -p| --profile) check_arg_value $1 $2 profile_addr="$2" shift ;; -t| --timeout) check_arg_value $1 $2 TIMEOUT=$2 shift ;; -l| --libmotr_path) check_arg_value $1 $2 libmotr_path=$2 shift ;; -d| --daemon) systemd=1 ;; -h| --help) usage exit 1 ;; esac shift done main exit 0
true
0d5162d014acf1314203c94ef3f99ddb6d47ca88
Shell
jbruceh88/scripts
/gitHooks/pre-commit.sample
UTF-8
1,759
3.890625
4
[]
no_license
#!/bin/bash #Redirect output to stderr. exec 1>&2 # Color codes red=`tput setaf 1` green=`tput setaf 2` blue=`tput setaf 4` reset=`tput sgr0` keywords=(print_r var_dump console\.log) #put key words together for grep keywords_for_grep=$(printf "|%s" "${keywords[@]}") keywords_for_grep=${keywords_for_grep:1} #debugging error counter debug_found=0 # Ignore the following files. exclude_dir_and_ext='\/features\/|\/contrib\/|\/devel\/|\/libraries\/|\/vendor\/|\.info$|\.png$|\.gif$|\.jpg$|\.ico$|\.patch$|\.htaccess$|\.sh$|\.ttf$|\.woff$|\.eot$|\.svg$' # Check for debugging functions # List all files added to commit excluding the exceptions files_changed=`git diff-index --diff-filter=ACMRT --cached --name-only HEAD -- | egrep -v $exclude_dir_and_ext` if [ -n "$files_changed" ] then for FILE in $files_changed ; do for keyword in "${keywords[@]}" ; do # find the pattern pattern="^\+(.*)?$keyword(.*)?" result_for_file=`git diff --cached $FILE | egrep -x "$pattern"` if [ ! -z "$result_for_file" ] then if [ $debug_found -eq 0 ] then echo "${red}" echo "# Debugging function(s):" echo "------------------------${reset}" fi debug_found=1 echo "Debugging function" $keyword git grep -n $keyword $FILE | awk '{split($0,a,":"); printf "\tfound in " a[1] " on line " a[2] "\n"; }' fi done done fi errors_found=$((debug_found)) #stop the commit if [ $errors_found -eq 0 ] then #echo "${green}" #echo "Your Code is clean from debug and has been committed" echo base64 -d <<<"@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@&,@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@(..&@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@*...#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@......%@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@(.&@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@.......,@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@&,...@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@........./@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@(.....,@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@...........(@@@@@@@@@@@@@@@@@@@@@@@@@@@#,.......*@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@.............%@@@@@@@@@@@@@@@@@@@@@@@/..........#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@..............*@@@@@@@@@@@@@@@@@@@@/............&@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@&................*@@@@@@@@@@@@@@@@*.............,@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@%..................&@@@@@@@@@@@@,...............(@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@#...................(@@@@@@@@@*.................&@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@/.........,*(%&&@@@@@@@@@@@@,..................,@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@,....,/@@@@@@@@@@@@@@@@@@@@@@@@%*............../@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@.,#@@@@&/*%@@@@@@@@@@@@@@@@%**#@@@@%,..........%@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@*.,/#&@@@@@@@@@@@@@@@@@@@@@@@@@@&@@@#/&@@@@%*............../&@@@@&/#@@@/........@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@*........,*#&@@@@@@@@@@@@@@@@@@@@@@@@#*......,*(#####(/*.......,(@@@@@@@(,....*@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@,..................*/#&@@@@@@@@@#,.....*&@@@@@&%%%%%&@@@@@@&/,....,%@@@@@(...#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@,....................../@@@@*...../@@@@%,,,,,,,,,,,,,,,,,*%@@@&,....,&@@@&..&@@@@@@@@@@@@@@@@@@@@@*...%@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@*....................(@@@*....*&@@@#,,,,,,,,,,,,,,,,,,,,,,,,#@@@/....,%@@&.&@@@@@@@@@@@@@@@%/*,......,@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@,..................%@@@@&%&@@@%,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,#@@@@@@@@@@&.........................&@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@,................#@@,,/(#(,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,***,,@@#....................../@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@.............../@@,,,,,,,,,,***,,,,,,,,,,,,,,,,,,,,,,,,,,***,,,,,,,,,,*@@*....................&@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@%.............,@@/,,,,*&@@@@@@@@@@@@@#,,,,,,,,,,,,,(@@@@@@@@@@@@@&*,,,,&@&..................*@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@(............%@&,,#@@@@&@@@        ,%@@&,,,,,,,&@@@*      #@@@@#(@@@(,*@@*................&@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@,...........@@(,@@@.,@@@@#           #@@,,,,*@@(          @@@@@(  &@%,@@%..............(@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@........../@@,@@,   /@,*.            /@@*,/@@             //      %@%(@@............,@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@#.........%@&&@(                      &@&,@@*                      @@*@@*..........#@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@*........@@#@@.                      %@@*@@                       @@*&@%......../@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@#.......*@@*@@                       &@&/@@                       @@*#@@......*@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@#,..........(@@,&@/                      @@#,@@.                     .@@,/@@....,@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@&/,..............%@&,*@@                     #@&,,#@&                     &@(,*@@,...,%@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@&*....................&@#,,*@@%                  &@&,,,,(@@.                  @@#,,,@@*......,#@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@#,........................@@(,,,,#@@@*             %@@/,,,,,,,&@@#             /@@@*,,,,@@/..........,#@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@#,.......................,@@*,,,,,,,(@@@@@%####&@@@&*%(,,,,,,#%,*&@@@@&%###&@@@@(,,,,,,,@@(................,(%@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@#,...................,@@*,,,,*,,,,,,,/#%%%#/,,,,(@@,,,,,,@@*,,,,,*/#%%%(*,,,,,,,,,,,@@(....................,#@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@/................*@@,,,,%@@@%/,,,,,,,,*#&,,,*@@,,,,,,@@*,,,,,,,,,,,,,&@@@@@@@@@#@@(.................,%@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@#,............*@@,,,,,,*%@@@@@@@@@@@@&,,,,@@*,,,,*@@,,,,,,,,,,,,,,,,,,,,,,/&@@@(...............*@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@/.........*@@,,,,(&@@@@@#,,,,,,,,,,,,,%@%,,,,#@&,,,,,,,,*&@@@@@@@@%,,,,,,@@(............*%@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@&.........*@@*@@@@(/***(@%,,,,,,,,,,,,,@@/,,,@@/,,,,#@@@@@&%%%%%%@@@(,,,,*@@,.........#@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@#...........*@@@&,,,,*(###(*,,,,,,,,,,,,,,@@@#@@%/&@@@@(,@@%%%%%%%%%%@@*,,,,&@(......,&@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@&/.............(@@,,,,@@@@@@@@@@@@@@@&(*,,,,*(@@@@@@@#*@@#./@@%%%%%%%%%%@@#,,,,&@(....,&@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@(...............,@@,,,,@@&%%%%%%%%%@@/*(@@@@@@@@#/,%@@,.#@@@@@@%%%%%%%%%%%@@/,,,,@@*...#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@/................./@@,,,*@@%%%%%%%%%%&@@#@@@@..#@@@/,@@@@@@@%%%%%%%%%%%%%%%&@@,,,,*@@....%@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@%,................(@@@@@,,,,@@&%%%%%%%%%%%&&&%&@@@@&%@@@@&%%%%%%%%%%%%%%%%%%%&@@#,,,,@@@@@(...(@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@*.............(@@%,,%@&,,,*@@&%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%@@&,,,,@@#,,#@@(...,@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@&/,.......@@*,,,,&@&,,,*@@@%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%&@@&,,,*@@@*,,,,@@(....,%@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@%,...&@/,,,,/@@@/,,,#@@%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%@@@,,,,%@@@@,,,,,#@%.......,%@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@#,/@@*,,,,@@@@&,,,/@@&%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%&@@#,,,*@@%(@@,,,,,&@/........../@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@*.#@@/,,,@@#*@@@#,,#@@@%%%%%%%%%%%%%%%%%%%%%%%%%%%&&&%%%@@&,,,,,@@,,&@%,,,,#@@.....*#&@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@.../@@@*,(@@,,,%@@&,,#@@@&%%%%%%&&%%%%%%%%%%&@@%@@@%@@@@@*,,,,,,,,,,@@*,,(@@&,%@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@(.....,@@@@@@(,,,,*&/,,,,&@@@@&&@@&@@%&@@@@&@@#&@@@,,&@@*,,,,,,,,,,,#@@@@@@&,.*@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@.........,%@@@,,,,,,,,,,,,,,(@@@@@%/@@@(.,@@@/.*#@@@@&,,,,,,,,,,,,,,@@#/,......@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@,............&@&,,,,,,,,,,%@@&/@@*/%@@@@@@@@@@@@@%(,,,*/,,,,,,,,,,,,&@&........./@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@,.............,@@(,,,,,,,,,,,%@@@@#*@@%....(@&,,,,,*%@@@@,,,,,,,,,,,/@@*..........@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@/...............*@@/,,,,,,,,,,,,,,&@@@@@&...&@@@@@@@@&/,,,,,,,,,,,,,,@@/...........(@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@%..*@@&,,,,,,,,,,,,,,,,,,%@@@@@%,,,,,,,,,,,,,,,,,,,,,,@@(,&@#*........&@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@*...,@@@,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,#@@(.,@@@@@@@@@/*@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@&....../@@%,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,@@@*...@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@*....*%@@@@@(,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,#@@@@*...&@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@(%@@@@@@@@@@@@*,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,*@@@@@@@@@(%@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@%,,,,,,,,,,,,,,,,,,,,,,,,,,,,,#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@#,,,,,,,,,,,,,,,,,,,,,/&@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@%#///////#%@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@" echo "${rest}" else echo "${red}" echo "degugging found - please fix before commiting" echo "${reset}" exit 1 fi
true
77575ecd89d02ba09bf034ae08ec90f2e3a42dd6
Shell
fcenobi/hypersocket-vpn
/client-network/src/main/resources/hs-socket-redirector
UTF-8
1,642
4.40625
4
[]
no_license
#!/bin/bash # # Simple front-end to iptables to redirect a destination to the client service # # By default, the script DELETES all the hypersocket added rules, use --add as the # first argument to add a new rule. # # Usage:0 # # [--add] <sourceIpOrHostname> <sourcePort> <destinationIp> <destinationPort> # The comment indicating Hypersocket temporary rules. This might save someone # accidentally saving firewall configuration while HS is running, and also # helps us find the rules to delete (cleaning up on the next connection # if the client crashes for example) COMMENT="#HYPERSOCKET-CLIENT-TEMPORARY-RULE-DO-NOT-SAVE" apply_rules() { status=0 # Apply PREROUTING rule if ! iptables -t nat -"${5}" PREROUTING -p tcp -d "${1}" --dport "${2}" -m comment --comment "${COMMENT}" -j DNAT --to-destination "${3}":"${4}" ; then echo "$0: PREROUTING rule ${1}:${2} -> ${3}:${4} failed." >&2 status=1 fi # Because we are redirecting to localhost, OUTPUT chain is also needed if ! iptables -t nat -"${5}" OUTPUT -p tcp -d "${1}" --dport "${2}" -m comment --comment "${COMMENT}" -j DNAT --to-destination "${3}":"${4}" ; then echo "$0: OUTPUT rule ${1}:${2} -> ${3}:${4} failed." >&2 status=1 fi return ${status} } # Insert rules by default option=D # Parse any options if [ "$1" == "--add" ]; then shift option=I elif [ "$1" == "--clear" ]; then shift option=C fi # Add the new rule if [ "${option}" = I ]; then apply_rules $* D >/dev/null 2>&1 apply_rules $* ${option} elif [ "${option}" = C ]; then iptables-save | grep -v "${COMMENT}" | iptables-restore elif [ "${option}" = D ]; then apply_rules $* D fi
true
697eab2ce5d10fa422932db93835016bb45d1c22
Shell
jahkeup/openstack-containers
/shared/preinstall.sh
UTF-8
1,118
2.8125
3
[]
no_license
#!/usr/bin/env bash # This script prepares a common openstack env for isolated services # This script is meant for use with Dockerfile and images. # Install and configure SSH + Common Deps apt-get install -y openssh-server sudo traceroute dnsutils less vim net-tools \ python-MySQLdb python-pip telnet # Make sshd login behave properly sed -i 's/session required pam_loginuid.so/#session required pam_loginuid.so/' /etc/pam.d/sshd mkdir /var/run/sshd # Add remote user useradd cloud -m -s /bin/bash echo 'cloud:$6$I0AFz22S$.8ffn3MMC43/2RvCABYIO5IEBscA0tO2R3JYPStYOoOC6pfIRS1ybpT0ReGrv6y1pzDY9b/lO.jjf08SrsU0f1' | chpasswd -e echo "cloud ALL=NOPASSWD: ALL" >> /etc/sudoers.d/cloud_user # Create groups groupadd -g 220 keystone groupadd -g 221 nova groupadd -g 222 neutron groupadd -g 223 cinder groupadd -g 224 glance # Crea†e users useradd -u 220 -g 220 -d /var/lib/keystone keystone useradd -u 221 -g 221 -d /var/lib/nova nova useradd -u 222 -g 222 -d /var/lib/neutron neutron useradd -u 223 -g 223 -d /var/lib/cinder cinder useradd -u 224 -g 224 -d /var/lib/glance glance
true
fcac869f9b1aa45005cc70ffb22345284b15de79
Shell
december454/Bash-Scripting
/Tutorials/learnShell-Basics/shellFunctions
UTF-8
808
4.125
4
[]
no_license
#! /bin/bash #Like other programming languages, bash allows for the implementation of functions. #These subroutines are convenient for repeated tasks. They use the following formats: #function funcName { # command #} #funcname() [ # command #} #funcName{ #command #} #Functions can be called by writing their names. A function call is equivelent to a command. #Parameters can also be passed to a function. Within the funtion, the first parameter is 41, the second is $2, and so on. function functionB { echo "Called functionB" } function functionA { echo "You called functionA with parametr $1" } function adder { echo "$(($1 + $2))" } #Function calls: functionA "Hello There" #Calling function a with a parameter. functionB #Calling functionB. adder 4 8 #Calling adder with a set of parameters.
true
39c9ad861bd73fe329349190379889204c65fbd0
Shell
acocalypso/madrom
/device/khadas/kvim2/mkern.sh
UTF-8
1,220
3.25
3
[]
no_license
#!/bin/bash -ex # Run from top of kitkat source #ROOTFS=$1 ROOTFS="out/target/product/kvim2/ramdisk.img" PREFIX_CROSS_COMPILE=/opt/toolchains/gcc-linaro-4.9.4-2017.01-x86_64_aarch64-linux-gnu/bin/aarch64-linux-gnu- if [ "$ROOTFS" == "" -o ! -f "$ROOTFS" ]; then echo "Usage: $0 <ramdisk.img> [m]" exit 1 fi KERNEL_OUT=out/target/product/kvim2/obj/KERNEL_OBJ #mkdir -p $KERNEL_OUT if [ ! -f $KERNEL_OUT/.config ]; then make -C common O=../$KERNEL_OUT kvim2_defconfig ARCH=arm64 CROSS_COMPILE=$PREFIX_CROSS_COMPILE fi make -C common O=../$KERNEL_OUT ARCH=arm64 -j6 CROSS_COMPILE=$PREFIX_CROSS_COMPILE modules Image.gz if [ "$2" != "m" ]; then # make -C common O=../$KERNEL_OUT kvim2.dtd ARCH=arm64 CROSS_COMPILE=$PREFIX_CROSS_COMPILE make -C common O=../$KERNEL_OUT kvim2.dtb ARCH=arm64 CROSS_COMPILE=$PREFIX_CROSS_COMPILE PARTITION_DTSI=partition_mbox.dtsi fi if [ "$2" != "m" ]; then out/host/linux-x86/bin/mkbootimg --kernel common/../$KERNEL_OUT/arch/arm64/boot/Image.gz \ --base 0x0 \ --kernel_offset 0x1080000 \ --ramdisk ${ROOTFS} \ --output ./out/target/product/kvim2/boot.img ls -l ./out/target/product/kvim2/boot.img echo "boot.img done" fi
true
bda5539cbefd73a32f30c22b3f1c4fa991213827
Shell
randstuff/vcure
/vcure.sh
UTF-8
3,117
3.8125
4
[]
no_license
#!/bin/bash RootDir="$HOME/data" ArchivedDir="archived" Trash="trash" declare -a arr=("btceur" "xrpeur" "ltceur" "etheur") #declare -a arr=("btceur" "xrpeur") shopt -s dotglob ############################################################################### Collect () { n=0 while [ 42 ]; do for i in "${arr[@]}" ; do GetData $i sleep 1 if ! (( n % 2048)); then Archive fi ((n++)) done sleep 1 done } GetData () { cDate=`date '+%y%m%d-%H%M%S'` mkdir -p $RootDir/$1/ echo "Current call : https://www.bitstamp.net/api/v2/ticker/$1/" echo "Dumped to : " $RootDir/$1/$cDate.json curl -XGET https://www.bitstamp.net/api/v2/ticker/$1/ >> $RootDir/$1/$cDate.json echo "\r" >> $RootDir/$1/$cDate.json } ############################################################################### Archive () { mkdir ${ArchivedDir} for cVirtMoney in "${arr[@]}" ; do cDir=$RootDir"/"$cVirtMoney"/" echo "Processing "$cVirtMoney " at " $cDir for d in $(ls -f $cVirtMoney | cut -d "-" -f1 | grep -v -E "(\.)|(\..)" | sort -u) ; do cDay=`echo $d | cut -d "/" -f1` cInputPath="./"$cVirtMoney"/"$d cOutputPath="./"${ArchivedDir}"/"${cVirtMoney}"/"${cDay}"/" mkdir -p $cOutputPath mv -vf $cInputPath* $cOutputPath echo " [" $cInputPath"*] files archived at " ${cOutputPath} done done } ############################################################################### CleanUp () { for i in "${tmp[@]}" ; do mv $i $Trash done } ReIndex () { mkdir -p "./"$Trash for cVirtMoney in "${arr[@]}" ; do mkdir -p "./"$cVirtMoney done for cVirtMoney in "${arr[@]}" ; do n=0 for f in $(find ./$ArchivedDir -name "*.json" ); do echo $f mv $f $RootDir/$cVirtMoney/ tmp[$n]=$f if ! (( n % 420)); then CleanUp $tmp sleep 180 fi ((n++)) done done } ############################################################################### die() { local _ret=$2 test -n "$_ret" || _ret=1 test "$_PRINT_HELP" = yes && print_help >&2 echo "$1" >&2 exit ${_ret} } begins_with_short_option() { local first_option all_short_options all_short_options='th' first_option="${1:0:1}" test "$all_short_options" = "${all_short_options/$first_option/}" && return 1 || return 0 } ############################################################################### print_help () { printf "%s\n" "vcure v0.1 help : " printf 'Usage: %s [option] or [-h|--help]\n' "$0" printf "\n" printf "\t%s\n" "-a,--archive: Archive collected data" printf "\t%s\n" "-c,--collect: Collected the data" printf "\t%s\n" "-r,--reindex: Force reindex archived data" printf "\t%s\n" "-h,--help: Prints help" printf "\n" } parse_commandline () { while test $# -gt 0 do _key="$1" case "$_key" in -a|--archive) Archive exit 0 ;; -c|--collect) Collect exit 0 ;; -r|--reindex) ReIndex exit 0 ;; -h|--help) print_help exit 0 ;; *) _PRINT_HELP=yes die "FATAL ERROR: Got an unexpected argument '$1'" 1 ;; esac shift done } parse_commandline "$@"
true
4f46d88477871ff65581be20722f5d5a4f1a8e80
Shell
rainwoodman/conda-channel-rainwoodman
/recipe-templates/ngspice/build.sh
UTF-8
299
2.75
3
[]
no_license
#!/bin/bash set -x if [[ $OSTYPE == darwin* ]]; then export CFLAGS="-headerpad_max_install_names" export LDFLAGS="${LDFLAGS} -headerpad_max_install_names" export CXXFLAGS="$CFLAGS" fi ./configure --prefix=$PREFIX --with-x --with-readline --enable-static make -j$CPU_COUNT make install
true
0e64ee6a37125f00999b96cd361d242e48120663
Shell
imclab/politicalframing
/aws.sh
UTF-8
3,331
3.25
3
[]
no_license
# Dokku Setup on EC2 Instructions # Because our AWS instance is associated with an elastic IP address, the PFURL should persist # across deleting and re-creating the instance given the elastic IP is reassociated. # Please note when doing anything that may change the instance's public IP (e.g. re-creating # or upgrading) you will recieve an error "WARNING: REMOTE HOST IDENTIFICATION HAS CHANGED!". # It will be accompanied with a line in known_hosts to remove such as: # Offending RSA key in /Users/atul/.ssh/known_hosts:18 # Simply delete this line and repeat the command. # Remember that the PEM file must have permissions 400. # chmod 400 politicalframing.pem # Attempted to create a profile.d folder with environment variables but that didn't seem to work. export PFURL=aljohri.com export PFPEM=~/Desktop/politicalframing.pem export APP=alpha alias pf="ssh -i $PFPEM ubuntu@$PFURL" function pflogs() { if [ -z "$1" ]; then echo "missing app name" echo "usage: pflogs app" return -1 fi pf "dokku logs $1 -t" } function pfcreate() { if [ -z "$1" ]; then echo "missing app name" echo "usage: pfcreate app" return -1 fi git remote add $APP "dokku@$PFURL:$1" } function pfpush() { if [ -z "$1" ]; then echo "missing app name" echo "usage: pfush app" return -1 fi if [ "$1" = "-h" ]; then echo "usage: pfush app" echo "note: if 'app' is a branch in git" echo "pfpush will push the branch named 'app' to aws" echo "otherwise, it will push the master branch" return -1 fi if ! git remote | grep -qi $1; then echo "$1 is not a remote of this repo" echo "check git remote -v" return -1 fi if git branch | grep -qi $1; then git push $1 $1:master else git push $1 master fi } pf "echo 'export PFURL=$PFURL' >> ~/.bashrc" pf "wget -qO- https://raw.github.com/progrium/dokku/v0.2.1/bootstrap.sh | sudo bash" pf "export PFURL=$PFURL; sudo sh -c 'echo $PFURL > /home/dokku/VHOST'" pf "sudo git clone https://github.com/statianzo/dokku-shoreman.git /var/lib/dokku/plugins/dokku-shoreman" pf "sudo git clone https://github.com/Kloadut/dokku-pg-plugin /var/lib/dokku/plugins/postgresql" pf "sudo git clone https://github.com/luxifer/dokku-redis-plugin /var/lib/dokku/plugins/redis" # pf "sudo git clone https://github.com/jeffutter/dokku-postgresql-plugin.git /var/lib/dokku/plugins/postgresql" # pf "sudo git clone https://github.com/jeffutter/dokku-mongodb-plugin.git /var/lib/dokku/plugins/mongodb" pf "sudo dokku plugins-install" cat ~/.ssh/id_rsa.pub | pf "sudo sshcommand acl-add dokku progrium" pf "dokku config:set $APP HEROKU=1" pf "dokku config:set $APP C_FORCE_ROOT=true" pf "sudo dokku postgresql:create $APP" pf "sudo dokku redis:create $APP" pf "dokku run $APP python manage.py createdb" # 1 GB swap space # pf "sudo /bin/dd if=/dev/zero of=/var/swap.1 bs=1M count=1024" # pf "sudo /sbin/mkswap /var/swap.1" # pf "sudo /sbin/swapon /var/swap.1" # Information # EC2 Instance AMI: ami-ef795786 # EC2 Instance AMI url: https://console.aws.amazon.com/ec2/home?region=us-east-1#launchAmi=ami-ef795786 # EC2 Instance Finder: http://cloud-images.ubuntu.com/locator/ec2/ # Search Parameters: Ubuntu 13.04 raring amd64 ebs us-east-1 # https://github.com/progrium/dokku/wiki/Recipes#specifying-a-custom-buildpack
true
26744764889d7a08818d4ed5cfa4e8e085275c9f
Shell
dafrito/scaf
/types.d/mit
UTF-8
658
3.875
4
[ "MIT" ]
permissive
#!/bin/bash PATH=/bin:/usr/bin # Description: Generate a license file (defaults to LICENSE) die() { echo $* >&2 exit 1; } realname() { if [ -n "$OWNER" ]; then echo $OWNER else # Extract a name from the passwd file grep "^`whoami`" /etc/passwd | cut -d: -f5 fi; } if [ $# = 0 ]; then set LICENSE fi; for f in $*; do mkdir -p `dirname $f` if [ -e $f ]; then echo "$f already exists, skipping." else cp $SCAFDATA/MIT.license $f || die sed -i -re "s/@YEAR@/`date +%Y`/g" \ -e "s:@OWNER@:`realname`:g" \ $f; fi; done; # vim: set ts=4 sw=4 et :
true
68faf436d64f7e1e9800a6f27015af8b3cd91779
Shell
ParomovEvg/micro-courses
/kill.bash
UTF-8
121
2.90625
3
[]
no_license
pids=$(ps aux | grep -i nodemon | grep -v grep | awk '{print $2}') for pid in $pids do echo "$pid" kill "$pid" done
true
cfc902ffd90a3d7799c1d806e186289001918cc1
Shell
Caligone/highLowGameAlexaSkill
/deploy.sh
UTF-8
309
3.03125
3
[ "MIT" ]
permissive
#!/bin/sh die () { echo >&2 "$@" exit 1 } zip -rq1 target.zip lib/ index.js node_modules/ package.json yarn.lock [ "$#" -eq 1 ] || die "Usage: ./deploy.sh [arn]" aws lambda update-function-code \ --function-name $1 \ --zip-file fileb://target.zip \ --region eu-west-1 rm -r target.zip
true
004bd1ca58b07f5fcab1ae0fe3ddd0930a712b43
Shell
gerritschoe/TowerBuildingBaxter_EECS_206A
/start.sh
UTF-8
1,396
2.921875
3
[]
no_license
#!/bin/sh # First of all make sure you have created a link to the baxter.sh shell script: # ln -s /scratch/shared/baxter_ws/baxter.sh # Connect to the robot: # ./baxter.sh [robotname].local #./baxter.sh asimov.local #./baxter.sh archytas.local echo "-> Starting awsome game ->" echo "-> Start Camera setup done" # Activate camera echo "Closing all cameras...." rosrun baxter_tools camera_control.py -c left_hand_camera rosrun baxter_tools camera_control.py -c right_hand_camera rosrun baxter_tools camera_control.py -c head_camera echo "Turning on right hand camera..." #rosrun baxter_tools camera_control.py -o right_hand_camera rosrun baxter_tools camera_control.py -o left_hand_camera echo "<- StartCamera setup done" # Enable Baxter: #rosrun baxter_tools enable_robot.py -e # Starting jointTrajectoryServer.sh # Starting moveGroup.sh echo "Starting main.launch" roslaunch brain main.launch #echo "<- Ending awsome game <-" ############################################### # HOW TO SET UP THE SERVICES MANUALLY: # close cameras and activate left hand camera # rosrun baxter_tools enable_robot.py -e # rosrun baxter_interface joint_trajectory_action_server.py # roslaunch baxter_moveit_config move_group.launch # this one works on archytas, not on Asimov # roslaunch ar_track_alvar webcam_track.launch # roslaunch brain main.launch echo "Reached the End of the start.sh file"
true
1bc9d59b521ad92e5dd5fb6f371a4b1a6b0ba137
Shell
djkormo/ContainersSamples
/Kubernetes/labs.play-with-k8s.com/kubernetes_start.sh
UTF-8
2,252
2.9375
3
[]
no_license
#!/bin/bash -x ## Setup http://labs.play-with-k8s.com/ # # Inspired heavily by: https://gist.github.com/jjo/78f60702fbfa1cbec7dd865f67a3728a # Some dev-tools omitted, changes in parenthesis. # # Run with: # bash -x <( curl -L url-to-raw-gist ) # this shell file # https://raw.githubusercontent.com/djkormo/ContainersSamples/master/Kubernetes/labs.play-with-k8s.com/kubernetes_start.sh # bash -x <( curl -L https://raw.githubusercontent.com/djkormo/ContainersSamples/master/Kubernetes/labs.play-with-k8s.com/kubernetes_start.sh ) # Initialize cluster and FIXUP some play-with-k8s annoyances (fixed kube-dashboard shortlink, update port-number) test -d /etc/kubernetes/pki || ( # run kubeadm kubeadm init --apiserver-advertise-address $(hostname -i) | tee ~/kubeadm-init.log # apply weave cni kubectl apply -n kube-system -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 -w0)" # apply dashboard curl -L -s https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/recommended/kubernetes-dashboard.yaml | sed 's/targetPort: 8443/targetPort: 8443\n type: LoadBalancer/' | kubectl apply -f - # add Google's 8.8.8.8 dns kubectl get deployment --namespace=kube-system kube-dns -oyaml|sed -r 's,(.*--server)=(/ip6.arpa/.*),&\n\1=8.8.8.8,'|kubectl apply -f - # add service account to dashboard, from https://gist.github.com/figaw/17dc8ed72c8d2fe1a12682beb9c1e57e # this gives anyone with access to the dashboard the cluster-admin role.. so.. clearly this is for development. kubectl create -f https://raw.githubusercontent.com/djkormo/ContainersSamples/master/Kubernetes/labs.play-with-k8s.com/service-account.yaml ) # allowing to deploy to master node kubectl taint nodes --all node-role.kubernetes.io/master- # k8s comfy'ness (add emacs) cd yum -q -y install bash-completion git-core tmux vim emacs wget sudo which nano mc lynx > /dev/null kubectl completion bash > /etc/bash_completion.d/kubectl.completion source /etc/bash_completion.d/kubectl.completion # show kubeadm join ... echo "* Join nodes with:" grep -o "kubeadm.*join.*" ~/kubeadm-init.log # (master shouldn't join # kubeadm join --token $(kubeadm token list |sed -n 2p|egrep -o '^\S+') $(sed -rn s,.*server:.*//,,p /etc/kubernetes/admin.conf)
true
63858d97fd3cfed393623b1ac65be4044d5481e7
Shell
itsbcit/openshift-dovecot
/mysql/80-dovecot-cleanpid.sh
UTF-8
134
2.875
3
[ "Apache-2.0" ]
permissive
PID_DOVECOT="/run/dovecot/master.pid" if [ -f ${PID_DOVECOT} ]; then echo "removing: ${PID_DOVECOT}" rm -f ${PID_DOVECOT} fi
true
1b612da09d24e92ef592764192a3ae8040675f2d
Shell
frfuan2021asix2/asix2_fuentes_fran_m06uf2pr2
/compt_fitx.sh
UTF-8
298
2.953125
3
[]
no_license
#!/bin/bash #primero imprime en pantalla y luego captura el teclado echo "Ingresa el directorio de origen: " read dir1 #comprime el directorio tar -cvf Archivo_Comprimido.tar $dir1 if [ -s $dir1 ] then echo "Ingresa el directorio de llegada: " read dir2 fi cp -rf Archivo_Comprimido.tar $dir2
true
4f082996e30961018be363ccaf804f308f0a5a38
Shell
EvgSumina/mmp_prac_spring_2021
/hw_4/run.sh
UTF-8
5,930
2.71875
3
[]
no_license
SOURCE_BASE_PATH="/Sumina_Eugenia_hw_4" TMP_FOLDER_TASK1="/Sumina_Eugenia_hw_4/task1" TMP_FOLDER_TASK2="/Sumina_Eugenia_hw_4/task2" TMP_FOLDER_TASK3="/Sumina_Eugenia_hw_4/task3" TMP_FOLDER_TASK4="/Sumina_Eugenia_hw_4/task4" TMP_FOLDER_TASK5="/Sumina_Eugenia_hw_4/task5" TMP_FOLDER_TASK6="/Sumina_Eugenia_hw_4/task6" INPUT_HADOOP_DIR="/Sumina_Eugenia_hw_4/input" OUTPUT_HADOOP_DIR="/Sumina_Eugenia_hw_4/output" HADOOP_STREAMING_PATH="${HADOOP_HOME}/share/hadoop/tools/lib/hadoop-streaming-3.2.1.jar" hdfs dfs -test -d ${INPUT_HADOOP_DIR} if [ $? -eq 0 ]; then echo "Remove ${INPUT_HADOOP_DIR}" hdfs dfs -rm -r ${INPUT_HADOOP_DIR} fi hdfs dfs -test -d ${OUTPUT_HADOOP_DIR} if [ $? -eq 0 ]; then echo "Remove ${OUTPUT_HADOOP_DIR}" hdfs dfs -rm -r ${OUTPUT_HADOOP_DIR} fi test -d ${SOURCE_BASE_PATH}/data/output if [ $? -eq 0 ]; then echo "Remove ${SOURCE_BASE_PATH}/data/output" rm -rf ${SOURCE_BASE_PATH}/data/output fi test -d ${TMP_FOLDER_TASK1} if [ $? -eq 0 ]; then echo "Remove ${TMP_FOLDER_TASK1}" hdfs dfs -rm -r ${TMP_FOLDER_TASK1} fi test -d ${TMP_FOLDER_TASK2} if [ $? -eq 0 ]; then echo "Remove ${TMP_FOLDER_TASK2}" hdfs dfs -rm -r ${TMP_FOLDER_TASK2} fi test -d ${TMP_FOLDER_TASK3} if [ $? -eq 0 ]; then echo "Remove ${TMP_FOLDER_TASK3}" hdfs dfs -rm -r ${TMP_FOLDER_TASK3} fi test -d ${TMP_FOLDER_TASK4} if [ $? -eq 0 ]; then echo "Remove ${TMP_FOLDER_TASK4}" hdfs dfs -rm -r ${TMP_FOLDER_TASK4} fi test -d ${TMP_FOLDER_TASK5} if [ $? -eq 0 ]; then echo "Remove ${TMP_FOLDER_TASK5}" hdfs dfs -rm -r ${TMP_FOLDER_TASK5} fi test -d ${TMP_FOLDER_TASK6} if [ $? -eq 0 ]; then echo "Remove ${TMP_FOLDER_TASK6}" hdfs dfs -rm -r ${TMP_FOLDER_TASK6} fi hdfs dfs -mkdir -p ${INPUT_HADOOP_DIR} hdfs dfs -copyFromLocal ${SOURCE_BASE_PATH}/data/input/ratings.csv ${INPUT_HADOOP_DIR}/ratings hdfs dfs -copyFromLocal ${SOURCE_BASE_PATH}/data/input/movies.csv ${INPUT_HADOOP_DIR}/movies chmod 0777 ${SOURCE_BASE_PATH}/src/mapper_1.py chmod 0777 ${SOURCE_BASE_PATH}/src/reducer_1.py hadoop_streaming_arguments="\ -D mapred.reduce.tasks=8 \ -D mapreduce.map.java.opts=-Xmx4g \ -D mapreduce.reduce.java.opts=-Xmx4g \ -D mapred.text.key.comparator.options=k1,1 \ -files ${SOURCE_BASE_PATH}/src \ -mapper src/mapper_1.py -reducer src/reducer_1.py \ -input ${INPUT_HADOOP_DIR}/* -output ${TMP_FOLDER_TASK1} \ " hadoop jar ${HADOOP_STREAMING_PATH} ${hadoop_streaming_arguments} chmod 0777 ${SOURCE_BASE_PATH}/src/mapper_2.py chmod 0777 ${SOURCE_BASE_PATH}/src/reducer_2.py hadoop_streaming_arguments="\ -D mapred.reduce.tasks=8 \ -D mapreduce.map.java.opts=-Xmx4g \ -D mapreduce.reduce.java.opts=-Xmx4g \ -D mapred.text.key.comparator.options=k1,1 \ -files ${SOURCE_BASE_PATH}/src \ -mapper src/mapper_2.py -reducer src/reducer_2.py \ -input ${TMP_FOLDER_TASK1}/* -output ${TMP_FOLDER_TASK2} \ " hadoop jar ${HADOOP_STREAMING_PATH} ${hadoop_streaming_arguments} chmod 0777 ${SOURCE_BASE_PATH}/src/mapper_3.py chmod 0777 ${SOURCE_BASE_PATH}/src/reducer_3.py hadoop_streaming_arguments="\ -D mapred.reduce.tasks=8 \ -D mapreduce.map.java.opts=-Xmx4g \ -D mapreduce.reduce.java.opts=-Xmx4g \ -D mapred.text.key.comparator.options=k1,1 \ -files ${SOURCE_BASE_PATH}/src \ -mapper src/mapper_3.py -reducer src/reducer_3.py \ -input ${INPUT_HADOOP_DIR}/ratings/* ${TMP_FOLDER_TASK2}/* -output ${TMP_FOLDER_TASK3} \ " hadoop jar ${HADOOP_STREAMING_PATH} ${hadoop_streaming_arguments} chmod 0777 ${SOURCE_BASE_PATH}/src/mapper_4.py chmod 0777 ${SOURCE_BASE_PATH}/src/reducer_4.py hadoop_streaming_arguments="\ -D mapred.reduce.tasks=8 \ -D mapreduce.map.java.opts=-Xmx4g \ -D mapreduce.reduce.java.opts=-Xmx4g \ -D mapred.text.key.comparator.options=k1,1 \ -files ${SOURCE_BASE_PATH}/src \ -mapper src/mapper_4.py -reducer src/reducer_4.py \ -input ${TMP_FOLDER_TASK3}/* -output ${TMP_FOLDER_TASK4} \ " hdfs dfs -copyToLocal ${OUTPUT_HADOOP_DIR} ${SOURCE_BASE_PATH}/data chmod 0777 ${SOURCE_BASE_PATH}/src/mapper_5.py chmod 0777 ${SOURCE_BASE_PATH}/src/reducer_5.py hadoop_streaming_arguments="\ -D mapred.reduce.tasks=8 \ -D mapreduce.map.java.opts=-Xmx4g \ -D mapreduce.reduce.java.opts=-Xmx4g \ -D mapred.text.key.comparator.options=k1,1 \ -files ${SOURCE_BASE_PATH}/src \ -mapper src/mapper_5.py -reducer src/reducer_5.py \ -input ${INPUT_HADOOP_DIR}/ratings/* ${TMP_FOLDER_TASK4}/* -output ${TMP_FOLDER_TASK5} \ " hdfs dfs -copyToLocal ${OUTPUT_HADOOP_DIR} ${SOURCE_BASE_PATH}/data chmod 0777 ${SOURCE_BASE_PATH}/src/mapper_6.py chmod 0777 ${SOURCE_BASE_PATH}/src/reducer_6.py hadoop_streaming_arguments="\ -D mapred.reduce.tasks=8 \ -D mapreduce.map.java.opts=-Xmx4g \ -D mapreduce.reduce.java.opts=-Xmx4g \ -D mapred.text.key.comparator.options=k1,1 \ -files ${SOURCE_BASE_PATH}/src \ -mapper src/mapper_6.py -reducer src/reducer_6.py \ -input ${INPUT_HADOOP_DIR}/movies/* ${TMP_FOLDER_TASK5}/* -output ${TMP_FOLDER_TASK6} \ " hdfs dfs -copyToLocal ${OUTPUT_HADOOP_DIR} ${SOURCE_BASE_PATH}/data chmod 0777 ${SOURCE_BASE_PATH}/src/mapper_7.py chmod 0777 ${SOURCE_BASE_PATH}/src/reducer_7.py hadoop_streaming_arguments="\ -D mapred.reduce.tasks=8 \ -D mapreduce.map.java.opts=-Xmx4g \ -D mapreduce.reduce.java.opts=-Xmx4g \ -D stream.num.map.output.key.fields=3 \ -D mapreduce.partition.keycomparator.options='-k1,1-k3,3nr-k2,2' \ -files ${SOURCE_BASE_PATH}/src \ -mapper src/mapper_7.py -reducer src/reducer_7.py \ -input ${TMP_FOLDER_TASK6}/* -output ${OUTPUT_HADOOP_DIR} \ " hdfs dfs -copyToLocal ${OUTPUT_HADOOP_DIR} ${SOURCE_BASE_PATH}/data hdfs dfs -rm -r ${INPUT_HADOOP_DIR} hdfs dfs -rm -r ${OUTPUT_HADOOP_DIR} hdfs dfs -rm -r ${TMP_FOLDER_TASK1} hdfs dfs -rm -r ${TMP_FOLDER_TASK2} hdfs dfs -rm -r ${TMP_FOLDER_TASK3} hdfs dfs -rm -r ${TMP_FOLDER_TASK4} hdfs dfs -rm -r ${TMP_FOLDER_TASK5} hdfs dfs -rm -r ${TMP_FOLDER_TASK6}
true
9bbf55b94eb0af76b15c49568b114a57a87ce08a
Shell
SchrodingerZhu/StanfordLib-CUHKSZ
/misc/generate-mac.sh
UTF-8
553
2.578125
3
[]
no_license
#!/usr/bin/env sh set -e mkdir -p dist/libs cp -r res dist cp -r src dist cp misc/template.cmake dist/CMakeLists.txt cp misc/template.qmake dist/my_project.pro cp misc/lib.conf dist/libs cp "build/libstanford.dylib" dist/libs # Generate Header files mkdir -p dist/includes/stanford mkdir -p dist/includes/abseil cp -r abseil-cpp/absl dist/includes/abseil cd StanfordCPPLib && rsync -R */*.h ../dist/includes/stanford && cp macro.h ../dist/includes/stanford &&\ cp images.qrc ../dist/includes/stanford && cd .. zip -9 -r "x86_64-darwin-clang.zip" dist
true
3c847ace181c7652afbf3f00d885d128c1d43bbe
Shell
phmullins/knowledge
/scripts/makepass.sh
UTF-8
400
3.0625
3
[]
no_license
#!/bin/bash # # Script Name: ~/bin/makepass.sh # Description: Super easy way of generating a random 24 character password. # Created: 2017-12-21 @ 10:28 AM | Modified 2018-11-23 @ 09:24 AM. # Author: patrick@arkmail.us # # Generates a random 24 character password. Change the number increase or decrease # the length of the password. # echo `env LC_CTYPE=C tr -dc "a-zA-Z0-9-_\$\?" < /dev/urandom | head -c 24`
true
a9a181c52cb75073d67fe69946c3805339b3af7c
Shell
prometheus/test-infra
/tools/prometheus-builder/build.sh
UTF-8
988
3.5625
4
[ "Apache-2.0" ]
permissive
#!/bin/bash DIR="/go/src/github.com/prometheus/prometheus" if [[ -z $PR_NUMBER || -z $VOLUME_DIR || -z $GITHUB_ORG || -z $GITHUB_REPO ]]; then echo "ERROR:: environment variables not set correctly" exit 1; fi echo ">> Cloning repository $GITHUB_ORG/$GITHUB_REPO" if ! git clone https://github.com/$GITHUB_ORG/$GITHUB_REPO.git $DIR; then echo "ERROR:: Cloning of repo $GITHUB_ORG/$GITHUB_REPO failed" exit 1; fi cd $DIR || exit 1 echo ">> Fetching Pull Request $GITHUB_ORG/$GITHUB_REPO/pull/$PR_NUMBER" if ! git fetch origin pull/$PR_NUMBER/head:pr-branch; then echo "ERROR:: Fetching of PR $PR_NUMBER failed" exit 1; fi git checkout pr-branch echo ">> Creating prometheus binaries" if ! make build PROMU_BINARIES="prometheus"; then echo "ERROR:: Building of binaries failed" exit 1; fi echo ">> Copy files to volume" cp prometheus $VOLUME_DIR/prometheus cp -r console_libraries/ $VOLUME_DIR cp -r consoles/ $VOLUME_DIR
true
1cc00479bc2dc606269fcc6eb800f1fcef3ce7d7
Shell
ourobouros/k8s
/test/test-infra/airflow/entrypoint.sh
UTF-8
2,395
3.953125
4
[ "Apache-2.0" ]
permissive
#!/usr/bin/env bash # # A startup script for Airflow. # # Based on # https://raw.githubusercontent.com/puckel/docker-airflow/master/script/entrypoint.sh AIRFLOW_HOME="/usr/local/airflow" CMD="airflow" TRY_LOOP="20" # TODO(jlewi): This is currently getting overwritten in the YAML file. : ${POSTGRES_HOST:="postgres"} : ${POSTGRES_PORT:="5432"} : ${POSTGRES_USER:="airflow"} : ${POSTGRES_PASSWORD:="airflow"} : ${POSTGRES_DB:="airflow"} # TODO(jlewi): Should we make the key into the Docker image rather than doing # it on startup? : ${FERNET_KEY:=$(python -c "from cryptography.fernet import Fernet; FERNET_KEY = Fernet.generate_key().decode(); print(FERNET_KEY)")} # Update airflow config - Fernet key sed -i "s|\$FERNET_KEY|$FERNET_KEY|" "$AIRFLOW_HOME"/airflow.cfg # Wait for Postresql # TODO(jlewi): If we are just using LocalExecutor we should always be starting # the webserver so we could maybe eliminate the if statement and simplify # this. if [ "$1" = "webserver" ] || [ "$1" = "worker" ] || [ "$1" = "scheduler" ] ; then i=0 while ! nc -z $POSTGRES_HOST $POSTGRES_PORT >/dev/null 2>&1 < /dev/null; do i=$((i+1)) if [ "$1" = "webserver" ]; then echo "$(date) - waiting for ${POSTGRES_HOST}:${POSTGRES_PORT}... $i/$TRY_LOOP" if [ $i -ge $TRY_LOOP ]; then echo "$(date) - ${POSTGRES_HOST}:${POSTGRES_PORT} still not reachable, giving up" exit 1 fi fi sleep 10 done fi if [ -n "${GOOGLE_APPLICATION_CREDENTIALS}" ]; then echo GOOGLE_APPLICATION_CREDENTIALS=${GOOGLE_APPLICATION_CREDENTIALS} # Configure gcloud to use this service account gcloud auth activate-service-account --key-file=${GOOGLE_APPLICATION_CREDENTIALS} fi # TODO(jlewi): Can we get get rid of the sed and just put this into our config # file? sed -i "s#sql_alchemy_conn = postgresql+psycopg2://airflow:airflow@postgres/airflow#sql_alchemy_conn = postgresql+psycopg2://$POSTGRES_USER:$POSTGRES_PASSWORD@$POSTGRES_HOST:$POSTGRES_PORT/$POSTGRES_DB#" "$AIRFLOW_HOME"/airflow.cfg sed -i "s#broker_url = redis://redis:6379/1#broker_url = redis://$REDIS_PREFIX$REDIS_HOST:$REDIS_PORT/1#" "$AIRFLOW_HOME"/airflow.cfg echo "Initialize database..." $CMD initdb echo start the webserver exec $CMD webserver & # TODO(jlewi): How do we capture logs from the scheduler? Maybe we should # move it into its own container? echo start the scheduler exec $CMD scheduler
true
c0b784c08e31d9ba7e6177c6d21c7f7a4308bd7a
Shell
louismunro/configfiles
/.bashrc
UTF-8
1,070
3.203125
3
[]
no_license
# Source global definitions if [ -f /etc/bashrc ]; then . /etc/bashrc fi # set prompt if [ -n "$PS1" ]; then PS1='\[\033[1;33m\]\H \[\033[1;32m\]\u: \w\n${?##0}\$ \[\033[1;37m\]'; fi # VARIABLES export HISTCONTROL=ignoredups # ignore duplicate commands in history export EDITOR=/usr/bin/vim # set vi as default editor export PAGER=less # use less export LESS=im # set pager options (see man 1 less) # SHELL OPTIONS # Make bash check it's window size after a process completes shopt -s checkwinsize # prevent file clobbering set -o noclobber # Make Bash append rather than overwrite the history on disk shopt -s histappend # Whenever displaying the prompt, write the previous line to disk export PROMPT_COMMAND='history -a' # correct minor spelling errors in a cd command shopt -s cdspell # ksh-88 egrep-style extended pattern matching shopt -s extglob # complete ssh hostnames complete -W "$(echo `cat ~/.ssh/known_hosts | cut -f 1 -d ' ' | cut -f 1 -d, | sort -u | grep -v '\['`; )" ssh source ~/.bashfunc source ~/.bashalias source ~/.bashlocal
true
1da03a652151f84592d68843b7ea0fb75d312a59
Shell
dockcross/dockcross
/imagefiles/build-and-install-ninja.sh
UTF-8
997
4.3125
4
[ "MIT" ]
permissive
#!/usr/bin/env bash # # Configure, build and install ninja # # Usage: # # build-and-install-ninja.sh [-python /path/to/bin/python] set -e set -o pipefail PYTHON=python while [ $# -gt 0 ]; do case "$1" in -python) PYTHON=$2 shift ;; *) echo "Usage: Usage: ${0##*/} [-python /path/to/bin/python]" exit 1 ;; esac shift done if [[ -z "${NINJA_VERSION}" ]]; then echo >&2 'error: NINJA_VERSION env. variable must be set to a non-empty value' exit 1 fi # Download url="https://github.com/ninja-build/ninja/archive/v${NINJA_VERSION}.tar.gz" curl --connect-timeout 30 \ --max-time 10 \ --retry 5 \ --retry-delay 10 \ --retry-max-time 30 \ -# -o ninja.tar.gz -LO "$url" mkdir ninja tar -xzvf ./ninja.tar.gz --strip-components=1 -C ./ninja # Configure, build and install pushd ./ninja echo "Configuring ninja using [$PYTHON]" $PYTHON ./configure.py --bootstrap && ./ninja cp ./ninja /usr/bin/ popd # Clean rm -rf ./ninja*
true
8847105fca909661ab1bdaee8bc0c296b58cb926
Shell
hebda/ggAnalysis
/radion/scripts/LaunchData2012.sh
UTF-8
2,793
3.6875
4
[]
no_license
#!/bin/bash usage() { echo "`basename $0` -f filelist -c configfile -n njobs(default=1) [-i]" echo " -f filelist: filelist of input (e.g. scripts/ExampleDataDiphoton8TeVSkimEOS.list)" echo " -c configfile: config file (e.g. configFilesDir/mvaAnalysisTest.config)" echo " -i: interactive mode (by default job sent to batch system)" } mainConfigFile=notSpecified inFileList=notSpecified nJobs=1 interactive=0 if ! options=$( getopt -o hc:f:n:i -l help -- "$@" ) then # something went wrong, getopt will put out an error message for us exit 1 fi eval set -- $options while [ $# -gt 0 ]; do case "$1" in -h | --help) usage; exit 0;; -c) mainConfigFile=$2; shift;; -f) inFileList=$2; shift;; -n) nJobs=$2; shift;; -i) interactive=1;; (--) shift; break;; (-*) echo "$0: error - unrecognized option $1" 1>&2; usage >> /dev/stderr; exit 1;; (*) break;; esac shift done ###### print and check configuration echo "=================================" echo " - config file: " $mainConfigFile echo " - file list : " $inFileList echo " - njobs : " $nJobs echo " FIX ME: user has to verify that nJobs x nEvtPerJobs (in config) > nTot" if [ $mainConfigFile == "notSpecified" ]; then echo no good: $mainConfigFile usage; exit 1; elif [ ! -f $mainConfigFile ]; then echo " config file does not exist... bailout" exit 1; fi if [ $inFileList == "notSpecified" ]; then usage; exit 1; elif [ ! -f $inFileList ]; then echo " input filelist does not exist... bailout" exit 1; fi echo "=================================" #exit 0; optionsub="-q 1nd " dirafs=`pwd` dirscript=tmpBatchOut/ eosPref=root://eoscms//eos/cms eosPref="" config() { file=$1 script=${2} ijob=$3 config=$4 filelist="unknown" if [ $# -ge 5 ]; then filelist=${5} fi exe="runAna.C+(${ijob},\"$file\",\"${config}\")" exe="Make.C(${ijob},\"$file\",\"${config}\")" echo "$exe" cat > $script<<EOF #!/bin/bash cd $dirafs source scripts/env532.sh echo "castor: $dircastor" echo "ROOTSYS: \$ROOTSYS" gcccom="\`which gcc\`" echo "gcc:" \$gcccom echo "where am I:\`pwd\`" echo root -b -q rootlogon.C '$exe' root -b -q '$exe' EOF chmod +x $script } mkdir -p $dirscript configfile=$mainConfigFile let "nJobs=nJobs-1" for f in $( cat $inFileList ); do for ijob in $( seq 0 1 $nJobs ); do # echo " test file $f exist" # cmsLs $f # retval=$? # if [ $retval -eq 0 ]; then filename=`basename $f` script=script$$ script=${script}_${filename/root/sh}_${ijob} cd $dirscript/ config ${eosPref}${f} $script $ijob $configfile echo "-> created script: "$script if [ $interactive -eq 1 ]; then source $script else bsub $optionsub $script fi cd - # fi done done
true
b2981b89d92eb47b06e64b87da5b3b68a10ea56a
Shell
dulcet/nvidia-patch
/patch.sh
UTF-8
7,297
2.828125
3
[]
no_license
#!/bin/bash # halt on any error for safety and proper pipe handling set -euo pipefail ; # <- this semicolon and comment make options apply # even when script is corrupt by CRLF line terminators (issue #75) # empty line must follow this comment for immediate fail with CRLF newlines backup_path="/opt/nvidia/libnvidia-encode-backup" silent_flag='' rollback_flag='' print_usage() { printf ' SYNOPSIS patch.sh [OPTION]... DESCRIPTION The patch for Nvidia drivers to increase encoder sessions -s Silent mode (No output) -r Rollback to original (Restore lib from backup) -h Print this help message ' } while getopts 'rsh' flag; do case "${flag}" in r) rollback_flag='true' ;; s) silent_flag='true' ;; *) print_usage exit 1 ;; esac done if [[ $silent_flag ]]; then exec 1> /dev/null fi declare -A patch_list=( ["375.39"]='s/\x85\xC0\x89\xC5\x75\x18/\x29\xC0\x89\xC5\x90\x90/g' ["390.77"]='s/\x85\xC0\x89\xC5\x75\x18/\x29\xC0\x89\xC5\x90\x90/g' ["390.87"]='s/\x85\xC0\x89\xC5\x75\x18/\x29\xC0\x89\xC5\x90\x90/g' ["396.24"]='s/\x85\xC0\x89\xC5\x0F\x85\x96\x00\x00\x00/\x29\xC0\x89\xC5\x90\x90\x90\x90\x90\x90/g' ["396.26"]='s/\x85\xC0\x89\xC5\x0F\x85\x96\x00\x00\x00/\x29\xC0\x89\xC5\x90\x90\x90\x90\x90\x90/g' ["396.37"]='s/\x85\xC0\x89\xC5\x0F\x85\x96\x00\x00\x00/\x29\xC0\x89\xC5\x90\x90\x90\x90\x90\x90/g' #added info from https://github.com/keylase/nvidia-patch/issues/6#issuecomment-406895356 # break nvenc.c:236,layout asm,step-mode,step,break *0x00007fff89f9ba45 # libnvidia-encode.so @ 0x15a45; test->sub, jne->nop-nop-nop-nop-nop-nop ["396.54"]='s/\x85\xC0\x89\xC5\x0F\x85\x96\x00\x00\x00/\x29\xC0\x89\xC5\x90\x90\x90\x90\x90\x90/g' ["410.48"]='s/\x85\xC0\x89\xC5\x0F\x85\x96\x00\x00\x00/\x29\xC0\x89\xC5\x90\x90\x90\x90\x90\x90/g' ["410.57"]='s/\x85\xC0\x89\xC5\x0F\x85\x96\x00\x00\x00/\x29\xC0\x89\xC5\x90\x90\x90\x90\x90\x90/g' ["410.73"]='s/\x85\xC0\x89\xC5\x0F\x85\x96\x00\x00\x00/\x29\xC0\x89\xC5\x90\x90\x90\x90\x90\x90/g' ["410.78"]='s/\x85\xC0\x89\xC5\x0F\x85\x96\x00\x00\x00/\x29\xC0\x89\xC5\x90\x90\x90\x90\x90\x90/g' ["410.79"]='s/\x85\xC0\x89\xC5\x0F\x85\x96\x00\x00\x00/\x29\xC0\x89\xC5\x90\x90\x90\x90\x90\x90/g' ["410.93"]='s/\x85\xC0\x89\xC5\x0F\x85\x96\x00\x00\x00/\x29\xC0\x89\xC5\x90\x90\x90\x90\x90\x90/g' ["410.104"]='s/\x85\xC0\x89\xC5\x0F\x85\x96\x00\x00\x00/\x29\xC0\x89\xC5\x90\x90\x90\x90\x90\x90/g' ["415.18"]='s/\x00\x00\x00\x84\xc0\x0f\x84\x40\xfd\xff\xff/\x00\x00\x00\x84\xc0\x90\x90\x90\x90\x90\x90/g' ["415.25"]='s/\x00\x00\x00\x84\xc0\x0f\x84\x40\xfd\xff\xff/\x00\x00\x00\x84\xc0\x90\x90\x90\x90\x90\x90/g' ["415.27"]='s/\x00\x00\x00\x84\xc0\x0f\x84\x40\xfd\xff\xff/\x00\x00\x00\x84\xc0\x90\x90\x90\x90\x90\x90/g' ["418.30"]='s/\x00\x00\x00\x84\xc0\x0f\x84\x40\xfd\xff\xff/\x00\x00\x00\x84\xc0\x90\x90\x90\x90\x90\x90/g' ["418.43"]='s/\x00\x00\x00\x84\xc0\x0f\x84\x40\xfd\xff\xff/\x00\x00\x00\x84\xc0\x90\x90\x90\x90\x90\x90/g' ["418.56"]='s/\x00\x00\x00\x84\xc0\x0f\x84\x40\xfd\xff\xff/\x00\x00\x00\x84\xc0\x90\x90\x90\x90\x90\x90/g' ["418.67"]='s/\x00\x00\x00\x84\xc0\x0f\x84\x40\xfd\xff\xff/\x00\x00\x00\x84\xc0\x90\x90\x90\x90\x90\x90/g' ["418.74"]='s/\x00\x00\x00\x84\xc0\x0f\x84\x0f\xfd\xff\xff/\x00\x00\x00\x84\xc0\x90\x90\x90\x90\x90\x90/g' ["418.87.00"]='s/\x00\x00\x00\x84\xc0\x0f\x84\x0f\xfd\xff\xff/\x00\x00\x00\x84\xc0\x90\x90\x90\x90\x90\x90/g' ["430.09"]='s/\x00\x00\x00\x84\xc0\x0f\x84\x0f\xfd\xff\xff/\x00\x00\x00\x84\xc0\x90\x90\x90\x90\x90\x90/g' ["430.14"]='s/\x00\x00\x00\x84\xc0\x0f\x84\x0f\xfd\xff\xff/\x00\x00\x00\x84\xc0\x90\x90\x90\x90\x90\x90/g' ["430.26"]='s/\x00\x00\x00\x84\xc0\x0f\x84\x0f\xfd\xff\xff/\x00\x00\x00\x84\xc0\x90\x90\x90\x90\x90\x90/g' ["430.34"]='s/\x00\x00\x00\x84\xc0\x0f\x84\x0f\xfd\xff\xff/\x00\x00\x00\x84\xc0\x90\x90\x90\x90\x90\x90/g' ["430.40"]='s/\x00\x00\x00\x84\xc0\x0f\x84\x0f\xfd\xff\xff/\x00\x00\x00\x84\xc0\x90\x90\x90\x90\x90\x90/g' ["435.17"]='s/\x00\x00\x00\x84\xc0\x0f\x84\x0f\xfd\xff\xff/\x00\x00\x00\x84\xc0\x90\x90\x90\x90\x90\x90/g' ["435.21"]='s/\x00\x00\x00\x84\xc0\x0f\x84\x0f\xfd\xff\xff/\x00\x00\x00\x84\xc0\x90\x90\x90\x90\x90\x90/g' ) declare -A object_list=( ["375.39"]='libnvidia-encode.so' ["390.77"]='libnvidia-encode.so' ["390.87"]='libnvidia-encode.so' ["396.24"]='libnvidia-encode.so' ["396.26"]='libnvidia-encode.so' ["396.37"]='libnvidia-encode.so' ["396.54"]='libnvidia-encode.so' ["410.48"]='libnvidia-encode.so' ["410.57"]='libnvidia-encode.so' ["410.73"]='libnvidia-encode.so' ["410.78"]='libnvidia-encode.so' ["410.79"]='libnvidia-encode.so' ["410.93"]='libnvidia-encode.so' ["410.104"]='libnvidia-encode.so' ["415.18"]='libnvcuvid.so' ["415.25"]='libnvcuvid.so' ["415.27"]='libnvcuvid.so' ["418.30"]='libnvcuvid.so' ["418.43"]='libnvcuvid.so' ["418.56"]='libnvcuvid.so' ["418.67"]='libnvcuvid.so' ["418.74"]='libnvcuvid.so' ["418.87.00"]='libnvcuvid.so' ["430.09"]='libnvcuvid.so' ["430.14"]='libnvcuvid.so' ["430.26"]='libnvcuvid.so' ["430.34"]='libnvcuvid.so' ["430.40"]='libnvcuvid.so' ["435.17"]='libnvcuvid.so' ["435.21"]='libnvcuvid.so' ) NVIDIA_SMI="$(command -v nvidia-smi)" if ! driver_version=$("$NVIDIA_SMI" --query-gpu=driver_version --format=csv,noheader,nounits | head -n 1) ; then echo 'Something went wrong. Check nvidia driver' exit 1; fi echo "Detected nvidia driver version: $driver_version" if [[ ! "${patch_list[$driver_version]+isset}" || ! "${object_list[$driver_version]+isset}" ]]; then echo "Patch for this ($driver_version) nvidia driver not found." 1>&2 echo "Available patches for: " 1>&2 for drv in "${!patch_list[@]}"; do echo "$drv" 1>&2 done exit 1; fi patch="${patch_list[$driver_version]}" object="${object_list[$driver_version]}" declare -a driver_locations=( '/usr/lib/x86_64-linux-gnu' '/usr/lib/x86_64-linux-gnu/nvidia/current/' '/usr/lib64' "/usr/lib/nvidia-${driver_version%%.*}" ) dir_found='' for driver_dir in "${driver_locations[@]}" ; do if [[ -e "$driver_dir/$object.$driver_version" ]]; then dir_found='true' break fi done [[ "$dir_found" ]] || { echo "ERROR: cannot detect driver directory"; exit 1; } if [[ $rollback_flag ]]; then if [[ -f "$backup_path/$object.$driver_version" ]]; then cp -p "$backup_path/$object.$driver_version" \ "$driver_dir/$object.$driver_version" echo "Restore from backup $object.$driver_version" else echo "Backup not found. Try to patch first." exit 1; fi else if [[ ! -f "$backup_path/$object.$driver_version" ]]; then echo "Attention! Backup not found. Copy current $object to backup." mkdir -p "$backup_path" cp -p "$driver_dir/$object.$driver_version" \ "$backup_path/$object.$driver_version" fi sha1sum "$backup_path/$object.$driver_version" sed "$patch" "$backup_path/$object.$driver_version" > \ "${PATCH_OUTPUT_DIR-$driver_dir}/$object.$driver_version" sha1sum "${PATCH_OUTPUT_DIR-$driver_dir}/$object.$driver_version" ldconfig echo "Patched!" fi
true
ce4dee20374255e950890683ce60c43acb9f8dee
Shell
bioinfo-fcav/metagenomatic
/pipe_wgs_asm_pe.sh
UTF-8
20,980
3.140625
3
[]
no_license
#!/bin/bash # # INGLÊS/ENGLISH # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # http://www.gnu.org/copyleft/gpl.html # # # PORTUGUÊS/PORTUGUESE # Este programa é distribuído na expectativa de ser útil aos seus # usuários, porém NÃO TEM NENHUMA GARANTIA, EXPLÍCITAS OU IMPLÍCITAS, # COMERCIAIS OU DE ATENDIMENTO A UMA DETERMINADA FINALIDADE. Consulte # a Licença Pública Geral GNU para maiores detalhes. # http://www.gnu.org/copyleft/gpl.html # # Copyright (C) 2012 Universidade de São Paulo # # Universidade de São Paulo # Laboratório de Biologia do Desenvolvimento de Abelhas # Núcleo de Bioinformática (LBDA-BioInfo) # # Daniel Guariz Pinheiro # dgpinheiro@gmail.com # http://zulu.fmrp.usp.br/bioinfo # # Diretório onde está o diretório ./processed/prinseq com os arquivos .fastq processados input="$1" # Expressão regular para distinguir grupos de arquivos para a montagem re="$2" # Nome para os contigs/scaffolds montados asmname="$3" # Número de threads disponíveis para os processos threads=14 # Número de threads para o processamento dos arquivos pmap utilizando parallel pmap_threads=2 # Número de sequências dentro de cada partição partitions_n=100000 # http://khmer.readthedocs.org/en/v1.0/choosing-table-sizes.html # Memória disponível = -N (n_tables) * -x (tablesize) memlimitGB=21 memlimitMB=$((memlimitMB*1000)) # khmer parâmetro -N # n_tables = Memória disponível em GB / tablesize # Retirado da documentação: "Just use use -N 4, always, and vary the -x parameter." khmer_N="4" # khmer parâmetro -x # tablesize = Memória disponível em GB / n_tables * 1 bilhão khmer_byte_x="1e9" khmer_byte_graph_x="$((memlimitGB/${khmer_N}))e9" # khmer parâmetro -k khmer_k=23 # Tamanho mínimo para a avaliação das montagens, realizada pela soma dos tamanhos de todos os contigs maiors que este valor cutoff=200 # Check read length: # perl -lane 'INIT { our $size=0; my $c = 0; } if ($.%4==2) { last if ($c > 10); $size+=length($_); $c++; } END { print $size/$c; }' ./processed/prinseq/sampleA1.scythe.cutadapt5p.filtered.prinseq_1.fastq read_len=150 cutoff_perc=$(echo "scale=2; (${cutoff}/${read_len})" | bc -l) TMP_DIR="/state/partition1" # as linhas que iniciam com cerquilha são comentários if [ ! ${input} ] then echo "Missing input directory" exit else if [ ! -d ${input} ] then echo "Wrong input directory ${input}" exit fi fi if [ ! ${asmname} ] then asmname="testasm" fi biosamples=() echo "Assembling data from ${input} for ${asmname} ..." if [ ! ${re} ] then biosamples=("*") else for i in ${input}/processed/prinseq/*_1.fastq; do name=`basename ${i} .fastq | sed 's/\..*//'` if [[ $name =~ ${re} ]]; then biosamples=($(printf "%s\n" ${biosamples[@]} "${BASH_REMATCH[1]}" | sort -u )) fi done fi echo " Selected samples: ${biosamples[*]} " curdir=`pwd` cd ${input} finalasm=() for samp in "${biosamples[@]}"; do sampname="" if [ "${samp}" == "*" ]; then sampname="all" else sampname="${samp}" fi mkdir -p ./${sampname} cd ./${sampname} echo " Processing sample ${sampname}" echo " Link and interleave reads ..." mkdir -p ./reads cd ./reads anyse=0 # s1_pe s1_se s2_pe s2_se for i in ../../processed/prinseq/${samp}*_1.fastq; do bn=`basename ${i} _1.fastq` simplename=`echo ${bn} | sed 's/.scythe.cutadapt5p.filtered.prinseq//'` ln -f -s ../../processed/prinseq/${bn}_1.fastq s1_pe.fq ln -f -s ../../processed/prinseq/${bn}_2.fastq s2_pe.fq interleave-reads.py s?_pe.fq > ${simplename}_combined.pe.fq 2> ${simplename}_combined.pe.interleave-reads.err.txt gzip -9c ${simplename}_combined.pe.fq > ${simplename}_combined.pe.fq.gz if [ -e ../../processed/prinseq/${bn}_1_singletons.fastq ]; then ln -f -s ../../processed/prinseq/${bn}_1_singletons.fastq s1_se.fq cat s1_se.fq > ${simplename}_combined.se.fq fi if [ -e ../../processed/prinseq/${bn}_2_singletons.fastq ]; then ln -f -s ../../processed/prinseq/${bn}_2_singletons.fastq s2_se.fq cat s2_se.fq >> ${simplename}_combined.se.fq fi if [ -e ${simplename}_combined.se.fq ]; then anyse=1 gzip -9c ${simplename}_combined.se.fq > ${simplename}_combined.se.fq.gz fi rm -f *.fq done cd ../ echo " Digital normalization - \"diginorm\" ..." mkdir -p diginorm cd ./diginorm ### Normaliza tudo para uma cobertura de 20, considerando um k-mer de 20 # PE (-p) normalize-by-median.py -k ${khmer_k} -C 20 -N ${khmer_N} -x ${khmer_byte_x} -p --savetable normC20k20.kh ../reads/*.pe.fq.gz > normC20k20.pe.out.txt 2> normC20k20.pe.err.txt # SE if [ ${anyse} ]; then normalize-by-median.py -k ${khmer_k} -C 20 -N ${khmer_N} -x ${khmer_byte_x} --savetable normC20k20.kh --loadtable normC20k20.kh ../reads/*.se.fq.gz > normC20k20.se.out.txt 2> normC20k20.se.err.txt fi ### Poda leituras em k-mers que são pouco abundantes em leituras de alta cobertura. A opção -V é usada para datasets com cobertura variável. filter-abund.py -V normC20k20.kh *.keep > filter-abund.out.txt 2> filter-abund.err.txta ### Extração de arquivos PE (final .pe) e SE (final .se) for i in *.pe.fq.gz.keep.abundfilt; do extract-paired-reads.py ${i} > ${i}.extract-paired-reads.out.txt 2> ${i}.extract-paired-reads.err.txt done ### Após eliminar k-mers errôneos, vamos abandonar mais alguns dados de alta cobertura. # PE (-p) normalize-by-median.py -C 5 -k ${khmer_k} -N ${khmer_N} -x ${khmer_byte_x} -p --savetable normC5k20.kh *.pe.fq.gz.keep.abundfilt.pe > normC5k20.pe.out.txt 2> normC5k20.pe.err.txt # SE if [ ${anyse} ]; then normalize-by-median.py -C 5 -k ${khmer_k} -N ${khmer_N} -x ${khmer_byte_x} --savetable normC5k20.kh --loadtable normC5k20.kh *.pe.fq.gz.keep.abundfilt.se *.se.fq.gz.keep.abundfilt > normC5k20.se.out.txt 2> normC5k20.se.err.txt fi # Compactar cada amostra (gzip) em arquivos com nome base mais curto for i in `ls *.pe.fq.gz.keep.abundfilt.pe.keep`; do bn=`basename ${i} .pe.fq.gz.keep.abundfilt.pe.keep`; gzip -9c ${i} > ${bn}.pe.kak.fq.gz; done if [ ${anyse} ]; then for i in `ls *.pe.fq.gz.keep.abundfilt.se.keep`; do bn=`basename ${i} .pe.fq.gz.keep.abundfilt.se.keep`; gzip -9c ${i} > ${bn}.se.kak.fq.gz; done for i in `ls *.se.fq.gz.keep.abundfilt.keep`; do bn=`basename ${i} .se.fq.gz.keep.abundfilt.keep`; gzip -9c ${i} >> ${bn}.se.kak.fq.gz; done fi # Remover arquivos desnecessários rm -f normC20k20.kh *.keep *.abundfilt *.pe *.se readstats.py *.kak.fq.gz ../../processed/prinseq/${samp}*.fastq > diginorm.out.txt 2> diginorm.err.txt cd ../ echo " Partitioning ..." mkdir -p ./partitioned cd ./partitioned ### Eliminação dos k-mers altamente repetitivos que podem juntar múltiplas espécies (quimeras) e renomear de forma apropriada: filter-below-abund.py ../diginorm/normC5k20.kh ../diginorm/*.fq.gz > filter-below-abund.out.txt 2> filter-below-abund.err.txt # Renomear para .below.fq for i in *.below; do mv ${i} ${i}.fq; done ## Carrega grafo de k-mers load-graph.py -k ${khmer_k} -T ${threads} -N ${khmer_N} -x ${khmer_byte_graph_x} lump *.below.fq > load-graph.out.txt 2> load-graph.err.txt ## Encontrando k-mers altamente conectados iniciais (possíveis artefatos) make-initial-stoptags.py -k ${khmer_k} -N ${khmer_N} -x ${khmer_byte_graph_x} lump > make-initial-stoptags.out.txt 2> make-initial-stoptags.err.txt ## Particiona o grafo de acordo com a sua conectividade partition-graph.py --threads ${threads} --stoptags lump.stoptags lump > partition-graph.out.txt 2> partition-graph.err.txt pmap_count=`ls -l lump.*.pmap | wc -l` # Evitar divisão por zero if [ ${pmap_count} -lt ${pmap_threads} ]; then pmap_count=${pmap_count} fi ## Encontrando k-mers altamente conectados (possíveis artefatos) pmap_count=`ls -l lump.*.pmap | wc -l` if [ ${pmap_count} -lt ${pmap_threads} ]; then # 1 em cada thread pmap_limit=1 else # (pmap_count / pmap_thread) em cada thread) pmap_limit=$((pmap_count / pmap_threads)) fi echo " PMAP count: ${pmap_count} ${pmap_limit}" pmap_c=0 pmap_dir=0 rm -f ./run-find-knots.sh for i in lump.*.pmap; do if [ $((pmap_c % pmap_limit)) == 0 ]; then pmap_dir=$((pmap_dir + 1)) mkdir -p ./fk.${pmap_dir} ln -s $(readlink -f lump.pt) ./fk.${pmap_dir}/lump.pt ln -s $(readlink -f lump.tagset) ./fk.${pmap_dir}/lump.tagset echo "(cd ./fk.${pmap_dir} && find-knots.py lump > find-knots.out.txt 2> find-knots.err.txt)" >> ./run-find-knots.sh fi echo " Move ${i} to fk.${pmap_dir}" mv ${i} ./fk.${pmap_dir}/ done echo " Running find-knots.sh using ${pmap_threads} parallel processes ..." parallel --gnu -j ${pmap_threads} < ./run-find-knots.sh echo " Merging all .stoptags files to merge.stoptags ..." merge-stoptags.py -k ${khmer_k} ./fk > merge-stoptags.out.txt 2> merge-stoptags.err.txt echo " Filtering stoptags ..." ### Poda sequências nos k-mers considerados artefatos filter-stoptags.py -k ${khmer_k} merge.stoptags *.kak.fq.gz.below.fq > filter-stoptags.out.txt 2> filter-stoptags.err.txt echo " do-partitions ..." ### Particionamento, gera arquivos que contêm as anotações das partições do-partition.py -N ${khmer_N} -k ${khmer_k} -x ${khmer_byte_graph_x} -T ${threads} kak *.kak.fq.gz.below.fq.stopfilt > do-partition.out.txt 2> do-partition.err.txt echo " extract-partitions ..." ### Extraindo partições em grupos extract-partitions.py -m 0 -X ${partitions_n} kak *.part > extract-partitions.out.txt 2> extract-partitions.err.txt # Extraindo arquivos PE e SE (dn - digital normalization) for i in kak*.fq; do extract-paired-reads.py ${i} > ${i}.extract-paired-reads.out.txt 2> ${i}.extract-paired-reads.err.txt name=$(basename ${i} .fq) mv ${name}.fq.pe ${name}.dn.pe.fq mv ${name}.fq.se ${name}.dn.se.fq done mkdir -p ../input # Cópia cp *.dn.?e.fq ../input # Compressão gzip *.dn.?e.fq echo " sweep-reads ..." ### Alocar os dados processados (sem normalização) em partições - Recupera reads baseadas no compartilhamento de k-mers sweep-files.py -k ${khmer_k} -N ${khmer_N} -x ${khmer_byte_graph_x} --db kak.group*.fq --query ../reads/*.?e.fq.gz > sweep-files.out.txt 2> sweep-files.err.txt # Extraindo arquivos PE e SE (nodn - no digital normalization) for i in *kak*.sweep; do sweep=$(basename $i .fq) mv $i $sweep.fq extract-paired-reads.py ${sweep}.fq mv $sweep.fq.pe ${sweep}.nodn.pe.fq mv $sweep.fq.se ${sweep}.nodn.se.fq done # Cópia cp *.nodn.?e.fq ../input # Compressão gzip *.nodn.se.fq *.nodn.pe.fq # Removendo arquivos desnecessários #rm -f *.sh *.part *.sweep.fq *.below.fq *.stopfilt *.fq #rm -f ../diginorm/normC5k20.kh cd ../ echo " Assembling ..." mkdir -p ./assembled cd ./assembled groups=() infiles=`ls ../input/*.*.pe.fq`; for i in ${infiles[@]}; do name=$(basename $i .pe.fq); indir=$(dirname $i) echo " [${name}] ..." if [ ! -s ${indir}/${name}.se.fq ]; then echo " Put a fake read to single end data ..." echo -e "@SAMPLE0000000000/1\nNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN\n+\nIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII\n" > ${indir}/${name}.se.fq echo -e "@SAMPLE0000000000/2\nNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN\n+\nIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII\n" >> ${indir}/${name}.se.fq fi echo " Change input files' format" # minia cat ${indir}/${name}.se.fq | fastq2fasta.pl -n 100 > ${indir}/${name}.fa cat ${indir}/${name}.pe.fq | fastq2fasta.pl -n 100 >> ${indir}/${name}.fa # NEWBLER cat ${indir}/${name}.pe.fq | deinterleave_pairs -o ${indir}/${name}.pe_1.fq ${indir}/${name}.pe_2.fq # idba_ud fastq-se2ipe.pl ${indir}/${name}.se.fq | fastq2fasta.pl -n 100 > ${indir}/${name}.se.pe.fa cat ${indir}/${name}.pe.fq | fastq2fasta.pl -n 100 > ${indir}/${name}.pe.fa # ABYSS - nomes idênticos em R1 e R2 com a única diferença de possuir um /1 ou /2, respectivamente (não tenho certeza se necessário). cat ${indir}/${name}.pe_1.fq | renameSeqs.pl -p ABYSSPE | sed 's/\(^@ABYSSPE[^ ]*\).*/\1\/1/' > ${indir}/${name}.abyss.pe_1.fq cat ${indir}/${name}.pe_2.fq | renameSeqs.pl -p ABYSSPE | sed 's/\(^@ABYSSPE[^ ]*\).*/\1\/2/' > ${indir}/${name}.abyss.pe_2.fq cat ${indir}/${name}.se.fq | renameSeqs.pl -p ABYSSSE > ${indir}/${name}.abyss.se.fq echo " IDBA-UD ..." # scaffold.fa idba_ud --num_threads ${threads} --read ${indir}/${name}.pe.fa --read_level_2 ${indir}/${name}.se.pe.fa --min_contig ${cutoff} --maxk 51 --step 16 --mink 19 --min_pairs 0 -o ${name}.idba_ud.0.d > ${name}.idba_ud.0.log.out.txt 2> ${name}.idba_ud.0.log.err.txt echo " Newbler ..." # 454AllContigs.fna newAssembly ${name}.newbler.0.d > ${name}.newbler.0.log.out.txt 2> ${name}.newbler.0.log.err.txt addRun -lib PE ${name}.newbler.0.d ${indir}/${name}.pe_1.fq >> ${name}.newbler.0.log.out.txt 2>> ${name}.newbler.0.log.err.txt addRun -lib PE ${name}.newbler.0.d ${indir}/${name}.pe_2.fq >> ${name}.newbler.0.log.out.txt 2>> ${name}.newbler.0.log.err.txt addRun -lib SE ${name}.newbler.0.d ${indir}/${name}.se.fq >> ${name}.newbler.0.log.out.txt 2>> ${name}.newbler.0.log.err.txt runProject -mi 95 -ml 20 -cpu ${threads} ${name}.newbler.0.d >> ${name}.newbler.0.log.out.txt 2>> ${name}.newbler.0.log.err.txt echo " MEGAHIT ..." # final.contigs.fa megahit --mem-flag 2 -t ${threads} --min-contig-len ${cutoff} -o ${name}.megahit.0.d -m 0.9 --presets meta-large --12 ${indir}/${name}.pe.fq -r ${indir}/${name}.se.fq --min-count 1 --k-min 19 --k-max 51 --k-step 16 > ${name}.megahit.0.log.out.txt 2> ${name}.megahit.0.log.err.txt echo " SPAdes ..." # scaffolds.fasta spades.py --phred-offset 33 --memory ${memlimitGB} -t ${threads} --cov-cutoff auto --12 ${indir}/${name}.pe.fq -s ${indir}/${name}.se.fq -o ${name}.spades.0.d > ${name}.spades.0.log.out.txt 2> ${name}.spades.0.log.err.txt echo " metaSPAdes ..." # scaffolds.fasta metaspades.py --phred-offset 33 --memory ${memlimitGB} -t ${threads} --12 ${indir}/${name}.pe.fq -s ${indir}/${name}.se.fq -o ${name}.metaspades.0.d > ${name}.metaspades.0.log.out.txt 2> ${name}.metaspades.0.log.err.txt echo " Minia ..." # é necessário criar o diretório antes de rodar o minia (https://www.biostars.org/p/168676/) # kak.contigs.fa for k in {19..51..16}; do echo " ${k} ..." mkdir -p ./${name}.minia.$k.d minia -nb-cores ${threads} -kmer-size $k -max-memory ${memlimitGB} -out-dir ${name}.minia.$k.d -out ${name}.minia.$k.d/kak -in ${indir}/${name}.fa > ${name}.minia.$k.log.out.txt 2> ${name}.minia.$k.log.err.txt done echo " ABySS ..." # kak-scaffolds.fa for k in {19..51..16}; do echo " ${k} ..." mkdir -p ./${name}.abyss.$k.d rsep=`readlink -f ${indir}/${name}.abyss.se.fq` rpep1=`readlink -f ${indir}/${name}.abyss.pe_1.fq` rpep2=`readlink -f ${indir}/${name}.abyss.pe_2.fq` cd ./${name}.abyss.$k.d # não usar lib='pe' (pe é um argumento de abyss-pe) abyss-pe k=${k} name=kak j=${threads} lib='pe1' pe1="${rpep1} ${rpep2}" se="${rsep}" e=2 c=2 s=100 N=1 n=1 S=${cutoff} scaffolds > ../${name}.abyss.$k.log.out.txt 2> ../${name}.abyss.$k.log.err.txt cd ../ done echo " Velvet ..." # contigs.fa for k in {19..51..16}; do echo " ${k} ..." velveth ${name}.velvet.$k.d $k -fastq -long ${indir}/${name}.se.fq -fastq -longPaired -interleaved ${indir}/${name}.pe.fq > ${name}.velveth.$k.log.out.txt 2> ${name}.velveth.$k.log.err.txt velvetg ${name}.velvet.$k.d -exp_cov auto -cov_cutoff auto -scaffolding yes -conserveLong yes -min_contig_lgth ${cutoff} > ${name}.velvetg.$k.log.out.txt 2> ${name}.velvetg.$k.log.err.txt done echo " metaVelvet ..." # meta-velvetg.contigs.fa for k in {19..51..16}; do echo " ${k} ..." mkdir -p ${name}.metavelvet.$k.d cd ${name}.metavelvet.$k.d/ find ../${name}.velvet.$k.d/ -maxdepth 1 -type f -exec ln -f -s {} \; cd ../ meta-velvetg ${name}.metavelvet.$k.d -exp_cov auto -cov_cutoff auto -scaffolding yes -min_contig_lgth ${cutoff} > ${name}.metavelvetg.$k.log.out.txt 2> ${name}.metavelvetg.$k.log.err.txt done echo " Ray ..." # Scaffolds.fasta for k in {19..51..16}; do echo " ${k} ..." mpiexec -n ${threads} -num-cores ${threads} Ray -minimum-seed-length 50 -k ${k} -i ${indir}/${name}.pe.fq -s ${indir}/${name}.se.pq -o ${name}.ray.$k.d > ${name}.ray.$k.log.out.txt 2> ${name}.ray.$k.log.err.txt done echo " MetaPlatanus ..." mkdir -p ${name}.meta_platanus.0.d/ # kak_finalClusters_all.fa # The parameters -c and -C must be increased with large datasets add 1 for each 50000000 meta_platanus_n=$(wc -l ${indir}/${name}.pe_1.fq) meta_platanus_cov=$(( $(echo "scale=0; (${meta_platanus_n}/50000000)" | bc -l)+1 )) meta_platanus assemble -tmp ${TMP_DIR} -k 0.15 -K 0.5 -c ${meta_platanus_cov} -C ${meta_platanus_cov} -l ${cutoff_perc} -t ${threads} -m ${memlimitGB} -f ${indir}/${name}.pe_1.fq ${indir}/${name}.pe_2.fq ${indir}/${name}.se.fq -o ${name}.meta_platanus.0.d/kak > ${name}.meta_platanus-assemble.0.log.out.txt 2> ${name}.meta_platanus-assemble.0.log.err.txt meta_platanus scaffold -tmp ${TMP_DIR} -k ${name}.meta_platanus.0.d/kak_kmer_occ.bin -t ${threads} -c ${name}.meta_platanus.0.d/kak_contig.fa -IP1 ${indir}/${name}.pe_1.fq ${indir}/${name}.pe_2.fq -o ${name}.meta_platanus.0.d/kak > ${name}.meta_platanus-scaffold.0.log.out.txt 2> ${name}.meta_platanus-scaffold.0.log.err.txt cd ${name}.meta_platanus.0.d/ meta_platanus iterate -tmp ${TMP_DIR} -m ${memlimitGB} -t ${threads} -k kak_kmer_occ.bin -c kak_scaffold.fa -IP1 ${indir}/../${name}.pe_1.fq ${indir}/../${name}.pe_2.fq -o kak > ../${name}.meta_platanus-iterate.0.log.out.txt 2> ../${name}.meta_platanus-iterate.0.log.err.txt meta_platanus cluster_scaffold -tmp ${TMP_DIR} -t ${threads} -c out_iterativeAssembly.fa -IP1 ${indir}/../${name}.pe_1.fq ${indir}/../${name}.pe_2.fq -o kak > ../${name}.meta_platanus-cluster_scaffold.0.log.out.txt 2> ../${name}.meta_platanus-cluster_scaffold.0.log.err.txt if [ ! -e "kak_finalClusters_all.fa" ]; then if [ ! -e "kak_contig.fa" ]; then touch kak_contig.fa fi ln -f -s kak_contig.fa kak_finalClusters_all.fa fi cd ../ groups=($(printf "%s\n" ${groups[@]} ${name} | sort -u )) done assemstats3.py ${cutoff} *.idba_ud.*.d/scaffold.fa *.newbler.*.d/assembly/454AllContigs.fna *.megahit.*.d/final.contigs.fa *.spades.*.d/scaffolds.fasta *.metaspades.*.d/scaffolds.fasta *.minia.*.d/kak.contigs.fa *.velvet.*.d/contigs.fa *.metavelvet.*.d/meta-velvetg.contigs.fa *.ray.*.d/Scaffolds.fasta *.abyss.*.d/kak-scaffolds.fa *.meta_platanus.*.d/kak_finalClusters_all.fa > assemstats3.out.txt 2> assemstats3.err.txt for g in ${groups[@]}; do echo " Evaluating assembly of group ${g} ..." calc-best-assembly.py -C ${cutoff} -q ${g}.{idba_ud.*.d/scaffold.fa,newbler.*.d/assembly/454AllContigs.fna,megahit.*.d/final.contigs.fa,spades.*.d/scaffolds.fasta,metaspades.*.d/scaffolds.fasta,minia.*.d/kak.contigs.fa,velvet.*.d/contigs.fa,metavelvet.*.d/meta-velvetg.contigs.fa,ray.*.d/Scaffolds.fasta,abyss.*.d/kak-scaffolds.fa,*.meta_platanus.*.d/kak_finalClusters_all.fa} -o ${g}.best.fa > calc-best-assembly.out.txt 2> calc-best-assembly.err.txt done multi-rename.py ${asmname} *.best.fa > final-assembly.fa finalasm=($(printf "%s\n" ${finalasm[@]} "../${sampname}/assembled/final-assembly.fa" | sort -u )) assemblathon_stats.pl final-assembly.fa > assemblathon_stats.out.txt 2> assemblathon_stats.err.txt echo "Finish ${sampname} assembly" cd ../../ done # Fundindo montagens de amostras echo "Merging assemblies ..." mkdir -p ./assembled cd ./assembled ### MeGAMerge #MeGAMerge-1.1.pl -overlap=${cutoff} -minID=99 -cpu=${threads} -force -o=final-assembly.fa . ${finalasm[*]} ### usearch cat ${finalasm[*]} > finalasm.tmp usearch81 -cluster_fast finalasm.tmp -id 0.99 -sort size -threads ${threads} -centroids nr.fasta -uc clusters.uc -consout final-assembly.fa > usearch.out.txt 2> usearch.err.txt rm -f finalasm.tmp assemblathon_stats.pl MergedContigs.fasta > assemblathon_stats.out.txt 2> assemblathon_stats.err.txt cd ../ echo "Finish Final Assembly (${asmname})" cd ${curdir}
true
dfc1ecc97c74a3a8ac9fbdade72649ada91b5a53
Shell
timmao78/bashScript
/ExpansionExamples.sh
UTF-8
340
3.3125
3
[]
no_license
#!/usr/local/bin/bash # Basic arithmetic using double parentheses a=$(( 4 + 5 )) echo $a a=$((3+5)) echo $a b=$(( a + 3 )) # We may include variables without the preceding $ sign. echo $b b=$(( $a + 4 )) # Variables can be included with the $ sign if you prefer. echo $b (( b++ )) echo $b (( b+= 3)) echo $b a=$(( 4 * 5 )) echo $a
true
2efb9aac0d7f336db66f93fda9d50eb79bcd5a2a
Shell
kholohan/JetPack
/src/update_upgrade/patch_ironic.sh
UTF-8
4,616
2.65625
3
[ "Apache-2.0", "LicenseRef-scancode-warranty-disclaimer" ]
permissive
#!/bin/bash # Copyright (c) 2016-2017 Dell Inc. or its subsidiaries. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Configure a cleaning network so that the Bare Metal service, ironic, can use # node cleaning. configure_cleaning_network() { network_name="$1" network_uuid=$(neutron net-list | grep "${network_name}" | awk '{print $2}') sudo sed -i.bak "s/^.*cleaning_network_uuid.*$/cleaning_network_uuid\ =\ $network_uuid/" /etc/ironic/ironic.conf sudo systemctl restart openstack-ironic-conductor.service } # This hacks in a patch to fix correct querying WSMAN Enumerations that have # more than 100 entries. We will need to remove this after the fix appears # in OSP10. Note that this patch must be here because we use this code prior # to deploying the director. echo echo "## Patching Ironic iDRAC driver WSMAN library..." OUT=$(sudo patch -b -s /usr/lib/python2.7/site-packages/dracclient/wsman.py ~/update_upgrade/wsman.patch) sudo rm -f /usr/lib/python2.7/site-packages/dracclient/wsman.pyc sudo rm -f /usr/lib/python2.7/site-packages/dracclient/wsman.pyo echo "## Done." # This hacks in a patch to work around a known issue where RAID configuration # fails because the iDRAC is busy running an export to XML job and is not # ready. Note that this patch must be here because we use this code prior to # deploying the director. echo echo "## Patching Ironic iDRAC driver is_ready check..." OUT=$(sudo patch -b -s /usr/lib/python2.7/site-packages/dracclient/resources/lifecycle_controller.py ~/update_upgrade/lifecycle_controller.patch) sudo rm -f /usr/lib/python2.7/site-packages/dracclient/resources/lifecycle_controller.pyc sudo rm -f /usr/lib/python2.7/site-packages/dracclient/resources/lifecycle_controller.pyo OUT=$(sudo patch -b -s /usr/lib/python2.7/site-packages/dracclient/resources/uris.py ~/update_upgrade/uris.patch) sudo rm -f /usr/lib/python2.7/site-packages/dracclient/resources/uris.pyc sudo rm -f /usr/lib/python2.7/site-packages/dracclient/resources/uris.pyo OUT=$(sudo patch -b -s /usr/lib/python2.7/site-packages/dracclient/client.py ~/update_upgrade/client.patch) sudo rm -f /usr/lib/python2.7/site-packages/dracclient/client.pyc sudo rm -f /usr/lib/python2.7/site-packages/dracclient/client.pyo OUT=$(sudo patch -b -s /usr/lib/python2.7/site-packages/ironic/drivers/modules/drac/raid.py ~/update_upgrade/raid.patch) sudo rm -f /usr/lib/python2.7/site-packages/ironic/drivers/modules/drac/raid.pyc sudo rm -f /usr/lib/python2.7/site-packages/ironic/drivers/modules/drac/raid.pyo echo "## Done." # This hacks in a patch to work around a known issue where a RAID-10 virtual # disk cannot be created from more than 16 backing physical disks. Note that # this code must be here because we use this code prior to deploying the # director. echo echo "## Patching Ironic iDRAC driver RAID library..." OUT=$(sudo patch -b -s /usr/lib/python2.7/site-packages/dracclient/resources/raid.py ~/update_upgrade/dracclient_raid.patch) sudo rm -f /usr/lib/python2.7/site-packages/dracclient/resources/raid.pyc sudo rm -f /usr/lib/python2.7/site-packages/dracclient/resources/raid.pyo echo "## Done." # This hacks in a patch to work around an issue where lock contention on the # nodes in ironic can occur during RAID cleaning. echo echo "## Patching ironic.conf for locking..." sudo sed -i 's/#node_locked_retry_attempts = 3/node_locked_retry_attempts = 15/' /etc/ironic/ironic.conf echo "## Done." echo echo "## Restarting openstack-ironic-conductor.service..." sudo systemctl restart openstack-ironic-conductor.service echo "## Done." # This hacks in a workaround to fix in-band introspection. A fix has been # made to NetworkManager upstream, but is not currently present in OSP10. # We will need to remove this after the fix appears in OSP10. echo echo "## Patching Ironic in-band introspection..." sudo sed -i 's/initrd=agent.ramdisk /initrd=agent.ramdisk net.ifnames=0 biosdevname=0 /' /httpboot/inspector.ipxe echo "## Done." network="ctlplane" echo echo "## Configuring neutron network ${network} as a cleaning network" configure_cleaning_network $network echo "## Done."
true
9d896e1c65dcc20ab616dbf718efda2971ac433e
Shell
zobnin/android-tools
/00-cleanup.sh
UTF-8
1,184
2.828125
3
[]
no_license
#!/sbin/sh # # Clean up CyanogenMod installation after install # Deleted: all sounds, but one ringtone and notify, # TTS languages, offline dicattion and few apps # # /system/addon.d/00-cleanup.sh # . /tmp/backuptool.functions RINGTONE=Machina NOTIFICATION=Argon case "$1" in backup) # Stub ;; restore) # Rington sounds cd /system/media/audio/ringtones/ rm [!${RINGTONE}]*.ogg # Notify sounds cd /system/media/audio/notifications/ rm [!${NOTIFICATION}]*.ogg # Alarm sounds (all) rm /system/media/audio/alarms/* # TTS Languages rm /system/tts/lang_pico/* # Offline dictation rm -rf /system/usr/srec/config/* # Apps A=/system/app/ rm $A/Email.apk rm $A/Exchange2.apk #rm $A/LockClock.apk rm $A/PicoTts.apk rm $A/Term.apk #rm $A/ThemeChooser.apk #rm $APPS/WAPPushManager.apk rm $A/LiveWallpapers.apk #rm $A/LiveWallpapersPicker.apk rm $A/VisualizationWallpapers.apk A=/system/priv-app/ rm $A/CMUpdater.apk #rm $A/ThemeManager.apk ;; pre-backup) # Stub ;; post-backup) # Stub ;; pre-restore) # Stub ;; post-restore) # Stub ;; esac
true