Instruction
stringlengths
14
778
input_code
stringlengths
0
4.24k
output_code
stringlengths
1
5.44k
Remove error redirection to /dev/null
#!/bin/bash source .env # stop machines irrespective of currently running or not, irrespective of node exists or not # since the error messages are redirected to /dev/null if [ "$#" -eq 1 ]; then export NODE_NAME="$1" fi echo "[$NODE_NAME] - stopping node..." docker-machine stop "$NODE_NAME" > /dev/null 2>&1 echo "[$NODE_NAME] - node stopped succesfully" # list the cluster machines echo "Current Docker Hosts:" docker-machine ls
#!/bin/bash source .env # stop machines irrespective of currently running or not, irrespective of node exists or not # since the error messages are redirected to /dev/null if [ "$#" -eq 1 ]; then export NODE_NAME="$1" fi echo "[$NODE_NAME] - stopping node..." docker-machine stop "$NODE_NAME" > /dev/null echo "[$NODE_NAME] - node stopped succesfully" # list the cluster machines echo "Current Docker Hosts:" docker-machine ls
Print c++ debug info too
echo "CC is: $CC" echo "CXX is: $CXX" echo "Unsetting" unset CC unset CXX echo "/usr/bin/cc is: $(readlink /usr/bin/cc)" echo "Tryna run cc" cc --version python -m pip install --no-deps --ignore-installed .
echo "CC is: $CC" echo "CXX is: $CXX" echo "Unsetting" unset CC unset CXX echo "/usr/bin/cc is: $(readlink /usr/bin/cc)" echo "Tryna run cc" cc --version echo "/usr/bin/c++ is: $(readlink /usr/bin/c++)" echo "Tryna run c++" c++ --version python -m pip install --no-deps --ignore-installed .
Set TARANTOOL_1_8_REPLICATION_SOURCE for mac tests as it should be.
#!/usr/bin/env bash set -e -v -x pushd ${BASH_SOURCE%/*}/.. dotnet test -c Release --no-build --filter "Tarantool!=1.8" tests/progaudi.tarantool.tests/progaudi.tarantool.tests.csproj -- -parallel assemblies pushd tarantool kill -KILL $(cat tarantool.pid) brew uninstall tarantool brew install tarantool --HEAD tarantool tarantool.lua popd dotnet test -c Release --no-build --filter "Tarantool=1.8" tests/progaudi.tarantool.tests/progaudi.tarantool.tests.csproj -- -parallel assemblies popd
#!/usr/bin/env bash set -e -v -x pushd ${BASH_SOURCE%/*}/.. dotnet test -c Release --no-build --filter "Tarantool!=1.8" tests/progaudi.tarantool.tests/progaudi.tarantool.tests.csproj -- -parallel assemblies pushd tarantool kill -KILL $(cat tarantool.pid) brew uninstall tarantool brew install tarantool --HEAD tarantool tarantool.lua popd TARANTOOL_1_8_REPLICATION_SOURCE="localhost:3301" dotnet test -c Release --no-build --filter "Tarantool=1.8" tests/progaudi.tarantool.tests/progaudi.tarantool.tests.csproj -- -parallel assemblies popd
Replace ls with more safe find | read, check for .git presence
#!/bin/bash #This script should pull a list of the directories in the parent folder, cd into each folder issue the Git Update command, update the files, move back to the parent directory and repeat until all files have been updated, then copy the files to the main mod directory. #Move to the parent directory so the script can self update. cd .. #Define MODS as a listing of all the subdirectories MODS=`ls` for i in ${MODS}; do echo "$i" #Prints the current directory, useful for debugging and logs, pointless right now. cd $i #Moves into the current directory, VERY IMPORTANT git pull #Runs the update call for GIT cd ..; #Moves back up to the parent directory done #Once all directories have cycled through this ends the loop #Now that all files are updated copy them to the main directory. rsync -r '' ~/.minetest/mods #Move to the minetest/mods folder this should be default on all installs. cd ~/.minetest/mods #Run an external script to find out which files to rename and do it. if [[ -f ./rename.sh ]]; then exec ./rename.sh fi
#!/bin/bash #This script should pull a list of the directories in the parent folder, cd into each folder issue the Git Update command, update the files, move back to the parent directory and repeat until all files have been updated, then copy the files to the main mod directory. # Path to the directory with mods MODS_PATH=. #Move to the parent directory so the script can self update. cd .. find $MODS_PATH -maxdepth 1 -type d -print0 | while read -d $'\0' dir do echo "$dir" #Prints the current directory, useful for debugging and logs, pointless right now. cd "$dir" #Moves into the current directory, VERY IMPORTANT if [[ -d .git ]]; then git pull #Runs the update call for GIT else printf 'Not a git repository\n' fi cd .. #Moves back up to the parent directory done #Once all directories have cycled through this ends the loop #Now that all files are updated copy them to the main directory. rsync -r '' ~/.minetest/mods #Move to the minetest/mods folder this should be default on all installs. cd ~/.minetest/mods #Run an external script to find out which files to rename and do it. if [[ -f ./rename.sh ]]; then exec ./rename.sh fi
Add 2006 to 2012 to downloaded data
#!/bin/sh for i in `seq 1979 2005` do curl -O 'http://data.gdeltproject.org/events/'$i'.zip' unzip $i'.zip' rm $i'.zip' sleep 3 done
#!/bin/sh for i in `seq 1979 2005` do curl -O 'http://data.gdeltproject.org/events/'$i'.zip' unzip $i'.zip' rm $i'.zip' gzip $i.csv sleep 3 done for year in `seq 2006 2012` do for month in `seq 1 12` do mmzero=0$month mm=${mmzero: -2} curl -O 'http://data.gdeltproject.org/events/'$year$mm'.zip' unzip $year$mm'.zip' rm $year$mm'.zip' gzip $year$mm.csv sleep 3 done done
Stop changing the secrets every time the solution is deployed, as it breaks stored encrypted passwords.
#! /bin/bash export SECRET_KEY_BASE=$(bundle exec rake secret) export DEVISE_PEPPER=$(bundle exec rake secret) export RAILS_SERVE_STATIC_FILES='true' . /usr/src/app/secrets.sh bundle exec rake db:create db:migrate bundle exec rake assets:precompile bundle exec rails s Puma -p 3000 -b '0.0.0.0'
#! /bin/bash #export SECRET_KEY_BASE=$(bundle exec rake secret) #export DEVISE_PEPPER=$(bundle exec rake secret) export RAILS_SERVE_STATIC_FILES='true' . /usr/src/app/secrets.sh bundle exec rake db:create db:migrate bundle exec rake assets:precompile bundle exec rails s Puma -p 3000 -b '0.0.0.0'
Remove cd to home dir.
cd ~/ echo "Bootstrap Ansible" curl -L https://raw.githubusercontent.com/andrewtchin/ansible-common/master/ubuntu-bootstrap.sh | sh echo "Clone ansible-common" ANSIBLE_COMMON_DIR="~/ansible-common" if [ -d "$ANSIBLE_COMMON_DIR" ]; then rm -rf $ANSIBLE_COMMON_DIR fi git clone https://github.com/andrewtchin/ansible-common.git $ANSIBLE_COMMON_DIR echo "Clone ansible-ubuntu" ANSIBLE_UBUNTU_DIR="~/ansible-ubuntu" if [ -d "$ANSIBLE_UBUNTU_DIR" ]; then rm -rf $ANSIBLE_UBUNTU_DIR fi git clone https://github.com/andrewtchin/ansible-ubuntu.git $ANSIBLE_UBUNTU_DIR cd ansible-common echo "Run ansible-common" ansible-playbook -vvv playbooks/common.yml --ask-sudo-pass -c local cd ../ansible-ubuntu echo "Run ansible-ubuntu" ansible-playbook -vvv playbooks/ubuntu.yml --ask-sudo-pass -c local --extra-vars=@vars/ubuntu.json echo "Install dotfiles" git clone https://github.com/andrewtchin/dotfiles-local.git ~/.dotfiles-local git clone https://github.com/andrewtchin/dotfiles.git ~/.dotfiles --recursive RCRC="$HOME/.dotfiles/rcrc" rcup echo "Install complete"
echo "Bootstrap Ansible" curl -L https://raw.githubusercontent.com/andrewtchin/ansible-common/master/ubuntu-bootstrap.sh | sh echo "Clone ansible-common" ANSIBLE_COMMON_DIR="~/ansible-common" if [ -d "$ANSIBLE_COMMON_DIR" ]; then rm -rf $ANSIBLE_COMMON_DIR fi git clone https://github.com/andrewtchin/ansible-common.git $ANSIBLE_COMMON_DIR echo "Clone ansible-ubuntu" ANSIBLE_UBUNTU_DIR="~/ansible-ubuntu" if [ -d "$ANSIBLE_UBUNTU_DIR" ]; then rm -rf $ANSIBLE_UBUNTU_DIR fi git clone https://github.com/andrewtchin/ansible-ubuntu.git $ANSIBLE_UBUNTU_DIR cd ansible-common echo "Run ansible-common" ansible-playbook -vvv playbooks/common.yml --ask-sudo-pass -c local cd ../ansible-ubuntu echo "Run ansible-ubuntu" ansible-playbook -vvv playbooks/ubuntu.yml --ask-sudo-pass -c local --extra-vars=@vars/ubuntu.json echo "Install dotfiles" git clone https://github.com/andrewtchin/dotfiles-local.git ~/.dotfiles-local git clone https://github.com/andrewtchin/dotfiles.git ~/.dotfiles --recursive RCRC="$HOME/.dotfiles/rcrc" rcup echo "Install complete"
Put the subl symlink in the brew bin folder
#!/bin/bash cd ~ mkdir -p bin PATH=$PATH:~/bin export PATH git config --global core.editor "subl -n -w" ln -s "/Applications/Sublime Text 2.app/Contents/SharedSupport/bin/subl" ~/bin/subl
#!/bin/sh #cd ~ #mkdir -p bin #PATH=$PATH:~/bin #export PATH=$PATH:~/bin ln -svf "/Applications/Sublime Text 2.app/Contents/SharedSupport/bin/subl" /usr/local/bin/subl git config --global core.editor "subl -n -w" echo "Adding subl symlink to your /usr/local/bin/subl" echo "Make sure this folder is in your PATH"
Customize fzf colors to match terminal
export FZF_DEFAULT_OPTS=$FZF_DEFAULT_OPTS' --prompt "❯ " --pointer "❯" --marker "❯"'
export FZF_DEFAULT_OPTS=$FZF_DEFAULT_OPTS' --prompt "❯ " --pointer "❯" --marker "❯" --color "fg:7,bg:-1,hl:2" --color "fg+:7,bg+:-1,hl+:2" --color "info:3,prompt:6,pointer:1" --color "marker:1,spinner:2,header:4"'
Add fall-back to localhost when no other IP can be found on which MongoDB may listen.
#!/bin/bash # # mongodb-connect.sh # # This script is mainly a wrapper around mongo command which is the so-called # MongoDB Shell (an interactive command line interface) that allows anyone # quickly connect to a database instance that does not listen on ANY but # rather binds to a specific IP and/or port on a particular system ... # NETSTAT_BINARY='/bin/netstat' MONGO_BINARY='/usr/bin/mongo' # Attempt to determine host and port on which MongoDB instance listens ... result=$(netstat -n -l -t 2> /dev/null | \ grep -i '^tcp' | \ awk '{ print $4 }' | \ grep ':27017' | \ grep -v '^127\.') # Abort is no results are present. Perhaps MongoDB is not running? [ -z "$result" ] && exit 1 # Get the host and port number ... host="${result%%:*}" port="${result##*:}" # Start the mongo shell and pass any additional command line arguments to it ... ${MONGO_BINARY} --host "$host" --port "$port" "$@"
#!/bin/bash # # mongodb-connect.sh # # This script is mainly a wrapper around mongo command which is the so-called # MongoDB Shell (an interactive command line interface) that allows anyone # quickly connect to a database instance that does not listen on ANY but # rather binds to a specific IP and/or port on a particular system ... # NETSTAT_BINARY='/bin/netstat' MONGO_BINARY='/usr/bin/mongo' # Attempt to determine host and port on which MongoDB instance listens ... result=$(netstat -n -l -t 2> /dev/null | \ grep -i '^tcp' | \ awk '{ print $4 }' | \ grep ':27017' | \ grep -v '^127\.') # Fall-back to localhost if no other results are present ... [ -z "$result" ] && result='127.0.0.1:27017' # Get the host and port number ... host="${result%%:*}" port="${result##*:}" # Start the mongo shell and pass any additional command line arguments to it ... $MONGO_BINARY --quiet --host "$host" --port "$port" "$@" if [[ ! $? == 0 ]] ; then echo "ERROR: Unable to connect any MongoDB instance ..." >&2 exit 1 fi exit 0
Update test script to include branch to test
#!/bin/bash ##===----------------------------------------------------------------------===## ## ## This source file is part of the SwiftNIO open source project ## ## Copyright (c) 2017-2019 Apple Inc. and the SwiftNIO project authors ## Licensed under Apache License v2.0 ## ## See LICENSE.txt for license information ## See CONTRIBUTORS.txt for the list of SwiftNIO project authors ## ## SPDX-License-Identifier: Apache-2.0 ## ##===----------------------------------------------------------------------===## set -eux HERE=$(dirname $0) TMPDIR=$(mktemp -d /tmp/.workingXXXXXX) BASEDIR=$HERE/.. test_repository() { ADDRESS=$1 REPODIR=$TMPDIR/$(basename $1) git clone $ADDRESS $REPODIR pushd $REPODIR git checkout aws-sdk-swift-master swift test popd } # Test latest code against test_repository https://github.com/adam-fowler/s3-filesystem-kit test_repository https://github.com/adam-fowler/aws-vapor-test test_repository https://github.com/adam-fowler/aws-cognito-authentication-kit rm -rf $TMPDIR
#!/bin/bash ##===----------------------------------------------------------------------===## ## ## This source file is part of the SwiftNIO open source project ## ## Copyright (c) 2017-2019 Apple Inc. and the SwiftNIO project authors ## Licensed under Apache License v2.0 ## ## See LICENSE.txt for license information ## See CONTRIBUTORS.txt for the list of SwiftNIO project authors ## ## SPDX-License-Identifier: Apache-2.0 ## ##===----------------------------------------------------------------------===## set -eux HERE=$(dirname $0) TMPDIR=$(mktemp -d /tmp/.workingXXXXXX) BASEDIR=$HERE/.. test_repository() { ADDRESS=$1 BRANCH=$2 REPODIR=$TMPDIR/$(basename $1) git clone $ADDRESS $REPODIR pushd $REPODIR git checkout $BRANCH swift test popd } # Test latest code against test_repository https://github.com/adam-fowler/s3-filesystem-kit aws-sdk-swift-master test_repository https://github.com/adam-fowler/aws-vapor-test master test_repository https://github.com/adam-fowler/aws-cognito-authentication-kit aws-sdk-swift-master rm -rf $TMPDIR
Use bash instead of sh in test
#!/bin/sh cd ${CURR_DIR}/issue1037-better-dependency-messages temp_file=$(mktemp $(basename $0).XXXXXX) expected_file="$CURR_DIR/expected-issue1037-output" function cleanup { rm $temp_file } trap cleanup EXIT $DUB upgrade 2>$temp_file && exit 1 # dub upgrade should fail if ! diff "$expected_file" "$temp_file"; then die 'output not containing conflict information' fi exit 0
#!/bin/bash set -e -o pipefail cd ${CURR_DIR}/issue1037-better-dependency-messages temp_file=$(mktemp $(basename $0).XXXXXX) expected_file="$CURR_DIR/expected-issue1037-output" function cleanup { rm $temp_file } trap cleanup EXIT $DUB upgrade 2>$temp_file && exit 1 # dub upgrade should fail if ! diff "$expected_file" "$temp_file"; then die 'output not containing conflict information' fi exit 0
Remove additional deb package files
#!/bin/sh rm chromium-* rm -rf chrome-deb
#!/bin/sh rm chromium-* rm ironframe_1.1.1.1-0ubuntu1.dsc rm ironframe_1.1.1.1-0ubuntu1.tar.gz rm ironframe_1.1.1.1-0ubuntu1_amd64.build rm ironframe_1.1.1.1-0ubuntu1_amd64.changes rm -rf chrome-deb
Remove email option from docker login
#!/bin/bash # Needs DOCKER_USERNAME, DOCKER_PASSWORD, and DOCKER_REPOSITORY environment variables. set -ex BIN_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" source $BIN_DIR/set_git_env_vars.sh DOCKER_USERNAME="${DOCKER_USERNAME:-mozjenkins}" docker login -u $DOCKER_USERNAME -p $DOCKER_PASSWORD -e $DOCKER_USERNAME@example.com # Push to docker hub docker push $DOCKER_IMAGE_TAG if [[ "$GIT_TAG_DATE_BASED" == true ]]; then docker tag $DOCKER_IMAGE_TAG $DOCKER_REPOSITORY:$GIT_TAG docker push $DOCKER_REPOSITORY:$GIT_TAG docker tag $DOCKER_IMAGE_TAG $DOCKER_REPOSITORY:latest docker push $DOCKER_REPOSITORY:latest fi
#!/bin/bash # Needs DOCKER_USERNAME, DOCKER_PASSWORD, and DOCKER_REPOSITORY environment variables. set -ex BIN_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" source $BIN_DIR/set_git_env_vars.sh DOCKER_USERNAME="${DOCKER_USERNAME:-mozjenkins}" docker login -u $DOCKER_USERNAME -p $DOCKER_PASSWORD # Push to docker hub docker push $DOCKER_IMAGE_TAG if [[ "$GIT_TAG_DATE_BASED" == true ]]; then docker tag $DOCKER_IMAGE_TAG $DOCKER_REPOSITORY:$GIT_TAG docker push $DOCKER_REPOSITORY:$GIT_TAG docker tag $DOCKER_IMAGE_TAG $DOCKER_REPOSITORY:latest docker push $DOCKER_REPOSITORY:latest fi
Add check if haskell binary is present for git prompt
GIT_PROMPT_EXECUTABLE="haskell" source $DOTFILES/vendor/zsh-git-prompt/zshrc.sh ZSH_THEME_GIT_PROMPT_PREFIX="(" ZSH_THEME_GIT_PROMPT_SUFFIX=")" ZSH_THEME_GIT_PROMPT_SEPARATOR="|" ZSH_THEME_GIT_PROMPT_BRANCH="%{$fg[yellow]%}" ZSH_THEME_GIT_PROMPT_STAGED="%{$fg[magenta]%}%{~%G%}" ZSH_THEME_GIT_PROMPT_CONFLICTS="%{$fg[red]%}%{×%G%}" ZSH_THEME_GIT_PROMPT_CHANGED="%{$fg[cyan]%}%{+%G%}" ZSH_THEME_GIT_PROMPT_BEHIND="%{↓%G%}" ZSH_THEME_GIT_PROMPT_AHEAD="%{↑%G%}" ZSH_THEME_GIT_PROMPT_UNTRACKED="%{…%G%}" ZSH_THEME_GIT_PROMPT_CLEAN="%{$fg_bold[green]%}%{✔%G%}"
# Use the faster haskell executable if it's present if [[ -x "$DOTFILES/vendor/zsh-git-prompt/src/.bin/gitstatus" ]]; then GIT_PROMPT_EXECUTABLE="haskell" fi source $DOTFILES/vendor/zsh-git-prompt/zshrc.sh ZSH_THEME_GIT_PROMPT_CACHE=1 ZSH_THEME_GIT_PROMPT_PREFIX="(" ZSH_THEME_GIT_PROMPT_SUFFIX=")" ZSH_THEME_GIT_PROMPT_SEPARATOR="|" ZSH_THEME_GIT_PROMPT_BRANCH="%{$fg[yellow]%}" ZSH_THEME_GIT_PROMPT_STAGED="%{$fg[magenta]%}%{~%G%}" ZSH_THEME_GIT_PROMPT_CONFLICTS="%{$fg[red]%}%{×%G%}" ZSH_THEME_GIT_PROMPT_CHANGED="%{$fg[cyan]%}%{+%G%}" ZSH_THEME_GIT_PROMPT_BEHIND="%{↓%G%}" ZSH_THEME_GIT_PROMPT_AHEAD="%{↑%G%}" ZSH_THEME_GIT_PROMPT_UNTRACKED="%{…%G%}" ZSH_THEME_GIT_PROMPT_CLEAN="%{$fg_bold[green]%}%{✔%G%}"
Exit Bash script on error
#!/bin/bash PWD="$(cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" # Determine "childlogdir" cd "$PWD/../" CHILDLOGDIR=`./script/run_with_carton.sh ./script/mediawords_query_config.pl "//supervisor/childlogdir"` if [[ -z "$CHILDLOGDIR" ]]; then echo "\"childlogdir\" is undefined in the configuration." exit 1 fi CHILDLOGDIR="$(cd "$CHILDLOGDIR" && pwd )" ./script/run_with_carton.sh ./script/mediawords_generate_supervisord_conf.pl cd "supervisor/" source "$PWD/supervisor_is_up_to_date.inc.sh" validate_supervisor_version cd "$PWD/" /usr/local/bin/supervisord --childlogdir "$CHILDLOGDIR" --configuration "$PWD/supervisord.conf"
#!/bin/bash set -e PWD="$(cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" # Determine "childlogdir" cd "$PWD/../" CHILDLOGDIR=`./script/run_with_carton.sh ./script/mediawords_query_config.pl "//supervisor/childlogdir"` if [[ -z "$CHILDLOGDIR" ]]; then echo "\"childlogdir\" is undefined in the configuration." exit 1 fi CHILDLOGDIR="$(cd "$CHILDLOGDIR" && pwd )" ./script/run_with_carton.sh ./script/mediawords_generate_supervisord_conf.pl cd "supervisor/" source "$PWD/supervisor_is_up_to_date.inc.sh" validate_supervisor_version cd "$PWD/" /usr/local/bin/supervisord --childlogdir "$CHILDLOGDIR" --configuration "$PWD/supervisord.conf"
Use the standard agent image
#!/bin/bash set -eu if [[ "${HYPER_SCHEDULER}" == "true" ]]; then echo "--- :hyper-sh: Starting hyper.sh job runner container" hyper \ --config /buildkite-secrets/hyper \ run \ -s "${HYPER_RUNNER_SIZE}" \ -d \ --volumes-from buildkite-data \ -e "BUILDKITE_AGENT_EXIT_AFTER_JOB=true" \ toolmantim/hyper-buildkite-agent:add-scheduler-support \ start \ --meta-data "queue=hyper-job:${BUILDKITE_JOB_ID}" \ --name "hyper-runner-%n" echo "--- :buildkite: Requeuing job to runner container" # XXX This command/endpoint doesn't exist yet buildkite-agent requeue "hyper-job:${BUILDKITE_JOB_ID}" else exec buildkite-agent bootstrap "$@" fi
#!/bin/bash set -eu if [[ "${HYPER_SCHEDULER}" == "true" ]]; then echo "--- :hyper-sh: Starting hyper.sh job runner container" hyper \ --config /buildkite-secrets/hyper \ run \ -s "${HYPER_RUNNER_SIZE}" \ -d \ --volumes-from buildkite-data \ -e "BUILDKITE_AGENT_EXIT_AFTER_JOB=true" \ toolmantim/hyper-buildkite-agent \ start \ --meta-data "queue=hyper-job:${BUILDKITE_JOB_ID}" \ --name "hyper-runner-%n" echo "--- :buildkite: Requeuing job to runner container" # XXX This command/endpoint doesn't exist yet buildkite-agent requeue "hyper-job:${BUILDKITE_JOB_ID}" else exec buildkite-agent bootstrap "$@" fi
Adjust selenium job for import change.
#!/bin/bash SCRIPT_DIR="$( cd "$(dirname "$0")" ; pwd -P )" . "${SCRIPT_DIR}/params.sh" ${ROOT_DIR}/pipeline/stage_integrationtests/trigger.sh \ ${FORWARD_OPTS[@]} \ --sourcedir "${BUILD_DIR}" \ --targetdir "${TEST_DIR}" $@ | ts
#!/bin/bash SCRIPT_DIR="$( cd "$(dirname "$0")" ; pwd -P )" . "${SCRIPT_DIR}/_import.sh" ${ROOT_DIR}/pipeline/stage_integrationtests/trigger.sh \ ${FORWARD_OPTS[@]} \ --sourcedir "${BUILD_DIR}" \ --targetdir "${TEST_DIR}" $@ | ts
Fix NVM for homebrew to the latest code
#!/usr/bin/env zsh # TODO: Slow, lazy load. export NVM_DIR="$HOME/.nvm" if which brew &> /dev/null; then NVM_DIR=$(brew --prefix nvm 2> /dev/null) fi [ -s "$NVM_DIR/nvm.sh" ] && . "$NVM_DIR/nvm.sh"
#!/usr/bin/env zsh # TODO: Slow, lazy load. export NVM_DIR="$HOME/.nvm" # User-specific installation. [ -s "$NVM_DIR/nvm.sh" ] && . "$NVM_DIR/nvm.sh" # Homebrew installation. [ -s "/usr/local/opt/nvm/nvm.sh" ] && . "/usr/local/opt/nvm/nvm.sh"
Test docker socket file with -S
#!/bin/bash set -e setup_ecr_credentials_helper if [ -f "/var/run/docker.pid" ];then rm /var/run/docker.pid fi if [ "$RUN_DOCKER_IN_DOCKER" == "1" ]; then /usr/sbin/sshd -D & /bin/dockerd \ --host=unix:///var/run/docker.sock \ --storage-driver=vfs \ "$@" else if [ -f "/var/run/docker.sock" ]; then chown 1000:1000 /var/run/docker.sock fi if [ -d "/data/jenkins-dood" ]; then chown 1000:1000 /data/jenkins-dood usermod -d /data/jenkins-dood jenkins fi /usr/sbin/sshd -D fi
#!/bin/bash set -e setup_ecr_credentials_helper if [ -f "/var/run/docker.pid" ];then rm /var/run/docker.pid fi if [ "$RUN_DOCKER_IN_DOCKER" == "1" ]; then /usr/sbin/sshd -D & /bin/dockerd \ --host=unix:///var/run/docker.sock \ --storage-driver=vfs \ "$@" else if [ -S "/var/run/docker.sock" ]; then docker_gid=$(ls -la /var/run/docker.sock | awk '{print $4}') groupadd -g $docker_gid docker usermod -a -G docker jenkins echo "Added group docker with id $docker_gid and added jenkins user to it" fi if [ -d "/data/jenkins-dood" ]; then chown 1000:1000 /data/jenkins-dood usermod -d /data/jenkins-dood jenkins fi /usr/sbin/sshd -D fi
Add Yarn Workspace command aliases
alias y="yarn" alias ya="yarn add" alias yad="yarn add --dev" alias yap="yarn add --peer" alias yb="yarn build" alias ycc="yarn cache clean" alias yga="yarn global add" alias ygls="yarn global list" alias ygrm="yarn global remove" alias ygu="yarn global upgrade" alias yh="yarn help" alias yi="yarn init" alias yin="yarn install" alias yls="yarn list" alias yout="yarn outdated" alias yp="yarn pack" alias yrm="yarn remove" alias yrun="yarn run" alias ys="yarn serve" alias yst="yarn start" alias yt="yarn test" alias yuc="yarn global upgrade && yarn cache clean" alias yui="yarn upgrade-interactive" alias yup="yarn upgrade"
alias y="yarn" alias ya="yarn add" alias yad="yarn add --dev" alias yap="yarn add --peer" alias yb="yarn build" alias ycc="yarn cache clean" alias yga="yarn global add" alias ygls="yarn global list" alias ygrm="yarn global remove" alias ygu="yarn global upgrade" alias yh="yarn help" alias yi="yarn init" alias yin="yarn install" alias yls="yarn list" alias yout="yarn outdated" alias yp="yarn pack" alias yrm="yarn remove" alias yrun="yarn run" alias ys="yarn serve" alias yst="yarn start" alias yt="yarn test" alias yuc="yarn global upgrade && yarn cache clean" alias yui="yarn upgrade-interactive" alias yup="yarn upgrade" alias yw="yarn workspace" alias yws="yarn workspaces"
Update preset to reflect recent changes.
#!/bin/bash CC=avr-gcc CXX=c++ LINK="-mmcu=atmega128 -Wl,-gc-sections" COMP="-O2 -ggdb -Wa,-mmcu=atmega128 -mmcu=atmega128 -ffunction-sections -fdata-sections" cmake -DARCH=AVR -DWORD=8 -DOPSYS=NONE -DSEED=LIBC -DSHLIB=OFF -DSTBIN=ON -DTIMER=NONE -DWITH="DV;BN;FB;EB;PB;CP;HF" -DBENCH=20 -DTESTS=20 -DCHECK=off -DVERBS=off -DSTRIP=on -DQUIET=on -DARITH=easy -DFB_METHD="INTEG;INTEG;QUICK;BASIC;ALMOS" $1
#!/bin/bash CC=avr-gcc CXX=c++ LINK="-mmcu=atmega128 -Wl,-gc-sections" COMP="-O2 -ggdb -Wa,-mmcu=atmega128 -mmcu=atmega128 -ffunction-sections -fdata-sections" cmake -DARCH=AVR -DWORD=8 -DOPSYS=NONE -DSEED=LIBC -DSHLIB=OFF -DSTBIN=ON -DTIMER=NONE -DWITH="DV;BN;FB;EB;PB;CP;HF" -DBENCH=20 -DTESTS=20 -DCHECK=off -DVERBS=off -DSTRIP=on -DQUIET=on -DARITH=easy -DFB_METHD="INTEG;INTEG;QUICK;QUICK;QUICK;EXGCD" $1
Reduce memory limit to 50 percent
#!/bin/bash /opt/envsubst < /envsubst_template.json > /conf/runtime.json exec java -XX:+UseContainerSupport -XX:MaxRAMPercentage=75.0 -jar /git-bridge.jar /conf/runtime.json
#!/bin/bash /opt/envsubst < /envsubst_template.json > /conf/runtime.json exec java -XX:+UseContainerSupport -XX:MaxRAMPercentage=50.0 -jar /git-bridge.jar /conf/runtime.json
Test started failing b/c not enough workers
#!/bin/bash set -x THIS=$0 SCRIPT=${THIS%.sh}.tcl OUTPUT=${THIS%.sh}.out bin/turbine -l -n 2 ${SCRIPT} >& ${OUTPUT} [[ ${?} == 0 ]] || exit 1 grep -q "container size: 5" ${OUTPUT} || exit 1 exit 0
#!/bin/bash set -x THIS=$0 SCRIPT=${THIS%.sh}.tcl OUTPUT=${THIS%.sh}.out bin/turbine -l -n 3 ${SCRIPT} >& ${OUTPUT} [[ ${?} == 0 ]] || exit 1 grep -q "container size: 5" ${OUTPUT} || exit 1 exit 0
Change how ysniff is invoked
#!/bin/bash # TODO: Click the I Agree button on YaleGuest source /home/pi/.bashrc ifconfig wlan0 up iwconfig wlan0 essid YaleGuest sleep 10 iwconfig wlan0 essid YaleGuest sleep 10 curl --data "email=YaleGuest@yale.edu&cmd=cmd" http://10.160.252.249/auth/index.html/u sudo airmon-ng start wlan0 sudo tcpdump -e -i mon0 | /home/pi/ysniff-software/ysniff.py
#!/bin/bash # TODO: Click the I Agree button on YaleGuest source /home/pi/.bashrc ifconfig wlan0 up iwconfig wlan0 essid YaleGuest sleep 10 iwconfig wlan0 essid YaleGuest sleep 10 curl --data "email=YaleGuest@yale.edu&cmd=cmd" http://10.160.252.249/auth/index.html/u sudo airmon-ng start wlan0 sudo tcpdump -e -i mon0 | sudo /home/pi/ysniff-software/ysniff.py
Update after smalltalkCI's API has been refactored
#!/bin/bash set -ex # project new command tests need this git config --global user.email "you@example.com" git config --global user.name "Your Name" $GS_HOME/bin/devKitCommandLine serverDoIt $1 << EOF | testReport | testReport := SmalltalkCI runCIFor: '$BASE/tests/smalltalkCI.ston' produceXMLLog: false. System commitTransaction. ((testReport suiteFailures > 0) or: [testReport suiteErrors > 0 ]) ifTrue: [ "exit with non-zero exit status" System logout ]. EOF
#!/bin/bash set -ex # project new command tests need this git config --global user.email "you@example.com" git config --global user.name "Your Name" $GS_HOME/bin/devKitCommandLine serverDoIt $1 << EOF | testReport | testReport := SmalltalkCIGemstone loadAndTest: '$BASE/tests/smalltalkCI.ston' produceXMLLog: false. System commitTransaction. ((testReport suiteFailures > 0) or: [testReport suiteErrors > 0 ]) ifTrue: [ "exit with non-zero exit status" System logout ]. EOF
Use arithmetic expression for break request check
#!/usr/bin/env bash # -*- coding: utf-8 -*- #-------------------------- ## @Synopsis Break management module functions ## @Copyright Copyright 2009, James Pic ## @License Apache #-------------------------- ## Is your break granted? #-------------------------- function break_request() { if [[ -z $break_previous ]]; then echo "Granted, enjoy" break_do elif [[ $(( $(date +%s) - $break_previous )) < $break_interval ]]; then echo "Denied, get back to work." else echo "Granted, enjoy" break_do fi } #-------------------------- ## Updates the previous break timestamp and saves for anti-cheat security. #-------------------------- function break_do() { break_previous=$(date +%s) break_conf_save }
#!/usr/bin/env bash # -*- coding: utf-8 -*- #-------------------------- ## @Synopsis Break management module functions ## @Copyright Copyright 2009, James Pic ## @License Apache #-------------------------- ## Is your break granted? #-------------------------- function break_request() { if [[ -z $break_previous ]]; then echo "Granted, enjoy" break_do elif (( $(( $(date +%s) - $break_previous )) < $break_interval )); then echo "Denied, get back to work." else echo "Granted, enjoy" break_do fi } #-------------------------- ## Updates the previous break timestamp and saves for anti-cheat security. #-------------------------- function break_do() { break_previous=$(date +%s) break_conf_save }
Call git submodule before creating packages
#!/bin/sh -e TAG=$1 PREV_TAG=$2 git checkout refs/tags/$TAG git log --pretty=fuller --date=short refs/tags/$PREV_TAG..HEAD > ChangeLog git submodule update --init ./configure --with-mruby && \ make dist-bzip2 && make dist-gzip && make dist-xz || echo "error" make distclean
#!/bin/sh -e TAG=$1 PREV_TAG=$2 git submodule update --init git checkout refs/tags/$TAG git log --pretty=fuller --date=short refs/tags/$PREV_TAG..HEAD > ChangeLog git submodule update --init ./configure --with-mruby && \ make dist-bzip2 && make dist-gzip && make dist-xz || echo "error" make distclean
Use forked evpath until changes make it upstream
#!/usr/bin/env bash set -e set -x shopt -s dotglob readonly name="EVPath" readonly ownership="EVPath Upstream <robot@adios2>" readonly subtree="thirdparty/EVPath/EVPath" readonly repo="https://github.com/GTkorvo/EVPath.git" readonly tag="master" readonly shortlog="true" readonly paths=" " extract_source () { git_archive } . "${BASH_SOURCE%/*}/../update-common.sh"
#!/usr/bin/env bash set -e set -x shopt -s dotglob readonly name="EVPath" readonly ownership="EVPath Upstream <robot@adios2>" readonly subtree="thirdparty/EVPath/EVPath" #readonly repo="https://github.com/GTkorvo/EVPath.git" #readonly tag="master" readonly repo="https://github.com/chuckatkins/EVPath.git" readonly tag="misc-cmake-updates" readonly shortlog="true" readonly paths=" " extract_source () { git_archive } . "${BASH_SOURCE%/*}/../update-common.sh"
Add rust base src path
# Set editor for emacs export EDITOR="emacsclient -t -a ''" export VISUAL="emacsclient -t -a ''" # export path to include personal bin path=(~/bin /usr/local/bin $path) # Get terminal color settings right if [ -e /usr/share/terminfo/x/xterm-256color ] && \ [ "$TERM" = "xterm" ]; then export TERM=xterm-256color fi if [ -e /usr/share/terminfo/s/screen-256color ] && \ [ "$TERM" = "screen" ]; then export TERM=screen-256color fi
# Set editor for emacs export EDITOR="emacsclient -t -a ''" export VISUAL="emacsclient -t -a ''" # export path to include personal bin path=(~/bin /usr/local/bin $path) # Get terminal color settings right if [ -e /usr/share/terminfo/x/xterm-256color ] && \ [ "$TERM" = "xterm" ]; then export TERM=xterm-256color fi if [ -e /usr/share/terminfo/s/screen-256color ] && \ [ "$TERM" = "screen" ]; then export TERM=screen-256color fi # Set rust source path for racer export RUST_SRC_PATH=~/src/rust/src
Correct docs target for API docs
# Java ./go javadocs || exit # Python ./go //py:setup //py:init py_docs || exit # Ruby ./go //rb:docs || exit git checkout rb/Gemfile.lock # switch to gh-pages and copy the files git checkout gh-pages || exit rm -rf docs/api/java docs/api/py docs/api/rb mv build/javadoc docs/api/java mv build/docs/api/py docs/api/py mv build/docs/api/rb docs/api/rb git add -A docs/api read -p "Do you want to commit the chages? (Y/n):" changes </dev/tty if [ -z $changes ]; then changes=Y fi case "$changes" in Y|y) echo "";; N|n) exit;; *) exit;; esac echo "Commiting changes" git commit -am "updating javadoc and py docs" echo "pushing to origin gh-pages" git push origin gh-pages echo "switching back to trunk branch" git checkout trunk
# Java ./go javadocs || exit # Python ./go //py:setup //py:init //py:docs || exit # Ruby ./go //rb:docs || exit git checkout rb/Gemfile.lock # switch to gh-pages and copy the files git checkout gh-pages || exit rm -rf docs/api/java docs/api/py docs/api/rb mv build/javadoc docs/api/java mv build/docs/api/py docs/api/py mv build/docs/api/rb docs/api/rb git add -A docs/api read -p "Do you want to commit the chages? (Y/n):" changes </dev/tty if [ -z $changes ]; then changes=Y fi case "$changes" in Y|y) echo "";; N|n) exit;; *) exit;; esac echo "Commiting changes" git commit -am "updating javadoc and py docs" echo "pushing to origin gh-pages" git push origin gh-pages echo "switching back to trunk branch" git checkout trunk
Revert "Use the latest PhantomJS precompiled binary." Let's see if it helps.
#/bin/sh set -e set -x CHROME_REVISION=142910 sh -e /etc/init.d/xvfb start && git submodule update --init if [[ "$WATIR_WEBDRIVER_BROWSER" = "chrome" ]]; then sudo apt-get install -y unzip libxss1 curl -L -O "http://commondatastorage.googleapis.com/chromium-browser-snapshots/Linux/$CHROME_REVISION/chrome-linux.zip" unzip chrome-linux.zip curl -L "http://commondatastorage.googleapis.com/chromium-browser-snapshots/Linux/$CHROME_REVISION/chrome-linux.test/chromedriver" > chrome-linux/chromedriver chmod +x chrome-linux/chromedriver fi if [[ "$WATIR_WEBDRIVER_BROWSER" = "phantomjs" ]]; then curl -L -O "https://dl.dropbox.com/u/2731643/phantomjs/phantomjs-latest.tar.bz2" mkdir phantomjs tar -xvjf phantomjs-latest.tar.bz2 -C phantomjs cat phantomjs/phantomjs.version chmod +x phantomjs/phantomjs sudo cp phantomjs/phantomjs /usr/local/phantomjs/bin/phantomjs phantomjs --version fi
#/bin/sh set -e set -x CHROME_REVISION=142910 sh -e /etc/init.d/xvfb start && git submodule update --init if [[ "$WATIR_WEBDRIVER_BROWSER" = "chrome" ]]; then sudo apt-get install -y unzip libxss1 curl -L -O "http://commondatastorage.googleapis.com/chromium-browser-snapshots/Linux/$CHROME_REVISION/chrome-linux.zip" unzip chrome-linux.zip curl -L "http://commondatastorage.googleapis.com/chromium-browser-snapshots/Linux/$CHROME_REVISION/chrome-linux.test/chromedriver" > chrome-linux/chromedriver chmod +x chrome-linux/chromedriver fi if [[ "$WATIR_WEBDRIVER_BROWSER" = "phantomjs" ]]; then curl -L -O "http://phantomjs.googlecode.com/files/phantomjs-1.8.1-linux-i686.tar.bz2" bzip2 -cd phantomjs-1.8.1-linux-i686.tar.bz2 | tar xvf - chmod +x phantomjs-1.8.1-linux-i686/bin/phantomjs sudo cp phantomjs-1.8.1-linux-i686/bin/phantomjs /usr/local/phantomjs/bin/phantomjs phantomjs --version fi
Fix bats location in Travis, maybe?
# This fixes a known bug in Bats (https://github.com/sstephenson/bats/issues/140#issuecomment-206756745) sudo sed -i "20s/.*/ _count=\$(bats-exec-test -c \"\$filename\"); : \"\$(( count+=_count ))\"/" /usr/local/libexec/bats/bats-exec-suite
# This fixes a known bug in Bats (https://github.com/sstephenson/bats/issues/140#issuecomment-206756745) sudo sed -i "20s/.*/ _count=\$(bats-exec-test -c \"\$filename\"); : \"\$(( count+=_count ))\"/" /usr/local/libexec/bats-exec-suite
Stop the attempt to set git bash completion from allowing the script to die.
#!/bin/bash # # Perform final, changes to the Vagrant box. # # Change default editor to Vi (Nano is for pussies) eselect editor set /usr/bin/vi DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" # Add git autocompletion and branch name to prompt # This depends on having bash-completion being already installed. cat ${DIR}/service-configs/bashrc.root.append >> ~/.bashrc cat ${DIR}/service-configs/bashrc.vagrant.append >> /home/vagrant/.bashrc # Enable git bash completion. source /etc/profile.d/bash-completion.sh eselect bashcomp enable --global git cat ${DIR}/service-configs/bash_profile.append >> ~/.bash_profile cat ${DIR}/service-configs/bash_profile.append >> /home/vagrant/.bash_profile # Correct permissions on bash files produced by the previous commands. chmod 600 /home/vagrant/.bash_profile chown vagrant:vagrant /home/vagrant/.bash_profile chmod 600 /home/vagrant/.bashrc chown vagrant:vagrant /home/vagrant/.bashrc
#!/bin/bash # # Perform final, changes to the Vagrant box. # # Change default editor to Vi (Nano is for pussies) eselect editor set /usr/bin/vi DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" # Add git autocompletion and branch name to prompt # This depends on having bash-completion being already installed. cat ${DIR}/service-configs/bashrc.root.append >> ~/.bashrc cat ${DIR}/service-configs/bashrc.vagrant.append >> /home/vagrant/.bashrc # Enable git bash completion. source /etc/profile.d/bash-completion.sh # I want this next line to work, but it doesn't. The "true" is to prevent the error stopping the script. eselect bashcomp enable --global git || true cat ${DIR}/service-configs/bash_profile.append >> ~/.bash_profile cat ${DIR}/service-configs/bash_profile.append >> /home/vagrant/.bash_profile # Correct permissions on bash files produced by the previous commands. chmod 600 /home/vagrant/.bash_profile chown vagrant:vagrant /home/vagrant/.bash_profile chmod 600 /home/vagrant/.bashrc chown vagrant:vagrant /home/vagrant/.bashrc
Fix network config in script.
#!/bin/bash apt update && apt upgrade -yqq apt install -yqq \ build-essential \ git \ libcap-dev \ libdw-dev \ libmicrohttpd-dev \ libncurses5-dev \ libnl-3-dev\ libnl-genl-3-dev \ libnm-dev \ libpcap-dev \ libprotobuf-dev \ libprotobuf-c-dev \ libprotoc-dev \ libsensors4-dev \ libsqlite3-dev \ libusb-1.0-0 \ libusb-1.0-0-dev \ pkg-config \ protobuf-compiler \ protobuf-c-compiler \ zlib1g-dev \ cd /root || exit git clone https://www.kismetwireless.net/git/kismet.git cd kismet || exit ./configure make make install make plugins make plugins-install make restricted-plugins make restricted-plugins-install
#!/bin/bash # https://www.kali.org/news/kali-on-krack/ if ! grep "unmanaged-devices=interface-name:wlan0" /etc/NetworkManager/NetworkManager.conf ; then cat << EOF >> /etc/NetworkManager/NetworkManager.conf [keyfile] unmanaged-devices=interface-name:wlan0 unmanaged-devices=interface-name:wlan1 EOF systemctl restart NetworkManager fi apt update && apt upgrade -yqq apt install -yqq \ build-essential \ git \ libcap-dev \ libdw-dev \ libmicrohttpd-dev \ libncurses5-dev \ libnl-3-dev\ libnl-genl-3-dev \ libnm-dev \ libpcap-dev \ libprotobuf-dev \ libprotobuf-c-dev \ libprotoc-dev \ libsensors4-dev \ libsqlite3-dev \ libusb-1.0-0 \ libusb-1.0-0-dev \ pkg-config \ protobuf-compiler \ protobuf-c-compiler \ zlib1g-dev \ cd /root || exit git clone https://www.kismetwireless.net/git/kismet.git cd kismet || exit ./configure make make install make plugins make plugins-install make restricted-plugins make restricted-plugins-install
Fix embedded files generation on OS X.
#!/bin/sh set -e cat <<EOF package blob var files = map [string] map [string] []byte { EOF ORIGINAL_PWD=${PWD} for dir in $@ do cd "${dir}" echo "\"$(basename ${dir})\": {" # Do not embed map files and the non-minified bootstrap files. # TODO(beorn7): There should be a better solution than hardcoding the # exclusion here. We might want to switch to a less makeshift way of # embedding files into the binary anyway... find . -type f \! -name \*.map \! -name bootstrap.js \! -name bootstrap-theme.css \! -name bootstrap.css | while read file do name=$(echo "${file}"|sed 's|\.\/||') echo -n "\"$name\": []byte(\"" gzip -9 -c "${file}" | xxd -p | tr -d '\n' | sed 's/\(..\)/\\x\1/g' echo "\")," echo done echo "}," cd "${ORIGINAL_PWD}" done echo '}'
#!/bin/sh set -e cat <<EOF package blob var files = map [string] map [string] []byte { EOF ORIGINAL_PWD=${PWD} for dir in $@ do cd "${dir}" echo "\"$(basename ${dir})\": {" # Do not embed map files and the non-minified bootstrap files. # TODO(beorn7): There should be a better solution than hardcoding the # exclusion here. We might want to switch to a less makeshift way of # embedding files into the binary anyway... find . -type f \! -name \*.map \! -name bootstrap.js \! -name bootstrap-theme.css \! -name bootstrap.css | while read file do name=$(echo "${file}"|sed 's|\.\/||') # Using printf here instead of "echo -n" because the latter doesn't work on Mac OS X: # http://hints.macworld.com/article.php?story=20071106192548833. printf "\"$name\": []byte(\"" # The second newline deletion at the end is required for Mac OS X as well, # as sed outputs a trailing newline there. gzip -9 -c "${file}" | xxd -p | tr -d '\n' | sed 's/\(..\)/\\x\1/g' | tr -d '\n' echo "\")," echo done echo "}," cd "${ORIGINAL_PWD}" done echo '}'
Revert "Retry failed release downloads"
#!/bin/bash # Copyright 2014 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Download and install release # This script assumes that the environment variable MASTER_RELEASE_TAR contains # the release tar to download and unpack. It is meant to be pushed to the # master and run. echo "Downloading release ($MASTER_RELEASE_TAR)" until gsutil cp $MASTER_RELEASE_TAR master-release.tgz; do sleep 1 ; echo "Retrying master download" ; done echo "Unpacking release" rm -rf master-release || false tar xzf master-release.tgz echo "Running release install script" sudo master-release/src/scripts/master-release-install.sh
#!/bin/bash # Copyright 2014 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Download and install release # This script assumes that the environment variable MASTER_RELEASE_TAR contains # the release tar to download and unpack. It is meant to be pushed to the # master and run. echo "Downloading release ($MASTER_RELEASE_TAR)" gsutil cp $MASTER_RELEASE_TAR master-release.tgz echo "Unpacking release" rm -rf master-release || false tar xzf master-release.tgz echo "Running release install script" sudo master-release/src/scripts/master-release-install.sh
Remove needless printing of how we print a dot
#!/bin/bash set +e # failure OK for now... set -x pushd /vagrant/sandcats MONGO_URL=mongodb://localhost/sandcats_mongo meteor run --settings /etc/sandcats-meteor-settings.json & popd # Wait for Meteor to come online, up to N seconds. for i in $(seq 90) do nc -z localhost 3000 retval=$? if [[ $retval == "0" ]]; then echo -n '+' break else sleep 1 echo -n '.' fi done # Make sure anything we prented before is newline-terminated. echo set -e # Failure is no longer OK! # Restart nginx, in case it is wants to be all 502-y sudo service nginx restart # Now, actually run the tests make action-run-tests
#!/bin/bash set +e # failure OK for now... set -x pushd /vagrant/sandcats MONGO_URL=mongodb://localhost/sandcats_mongo meteor run --settings /etc/sandcats-meteor-settings.json & popd # Wait for Meteor to come online, up to N seconds. set -x for i in $(seq 90) do nc -z localhost 3000 retval=$? if [[ $retval == "0" ]]; then echo -n '+' break else sleep 1 echo -n '.' fi done # Make sure anything we prented before is newline-terminated. echo set -e # Failure is no longer OK! # Restart nginx, in case it is wants to be all 502-y sudo service nginx restart # Now, actually run the tests make action-run-tests
Upgrade with nodered and cloud9
#!/bin/bash # script name: upgrade_jns.sh # last modified: 2017/03/05 # sudo: yes if [ $(whoami) != 'root' ]; then echo "Must be root to run $0" exit 1; fi START=$SECONDS # generate list of outdated packages echo ">>> CHECKING INSTALLATION FOR OUTDATED PACKAGES..." lst=(`pip3 list --outdated --format='legacy'|grep -o '^\S*'`) # process list of outdated packages if [ ${#lst[@]} -eq 0 ]; then echo ">>> INSTALLATION UP TO DATE" exit 1; else echo ">>> UPGRADING PACKAGES" for i in ${lst[@]}; do pip3 install ${i} --upgrade done fi ELAPSED=$(($SECONDS - $START)) echo $ELAPSED
#!/bin/bash # script name: upgrade_jns.sh # last modified: 2017/03/05 # sudo: yes if [ $(id -u) = 0 ]; then echo "to be run with no sudo" exit 1 fi START=$SECONDS sudo apt-get update sudo apt-get upgrade # generate list of outdated packages echo ">>> CHECKING INSTALLATION FOR OUTDATED PACKAGES..." lst=(`pip3 list --outdated --format='legacy'|grep -o '^\S*'`) # process list of outdated packages if [ ${#lst[@]} -eq 0 ]; then echo ">>> INSTALLATION UP TO DATE" exit 1; else echo ">>> UPGRADING PACKAGES" for i in ${lst[@]}; do sudo pip3 install ${i} --upgrade done fi if [[ -d $HOME/cloud9 ]]; then cd ${HOME}/cloud9 git pull origin master ./scripts/install-sdk.sh cd - fi if [[ -f /usr/bin/node-red ]]; then curl -sL https://raw.githubusercontent.com/node-red/raspbian-deb-package/master/resources/update-nodejs-and-nodered > /tmp/update-nodejs-and-nodered chmod u+x /tmp/update-nodejs-and-nodered ./expect_nodered.sh fi ELAPSED=$(($SECONDS - $START)) echo $ELAPSED
Add progress bar on script
#!/bin/bash CSVFILE=`pwd` CSVFILE+='/datas.csv' echo 'date,commits,filePath' > $CSVFILE if [ $# = 1 ] then cd $1 fi git ls-tree -r --name-only HEAD | while read filename; do echo "$(git log -1 --format="%ad" -- $filename),$(git log --oneline $filename | wc -l | tr -d ' '),$filename" >> $CSVFILE done
#!/bin/bash CSVFILE=`pwd` CSVFILE+='/datas.csv' echo 'date,commits,filePath' > $CSVFILE if [ $# = 1 ] then cd $1 fi nbFile=`git ls-tree -r --name-only HEAD | wc -l | tr -d ' '` echo "Number of files: $nbFile" git ls-tree -r --name-only HEAD | while ((i++)); read filename; do echo "$(git log -1 --format="%ad" -- $filename),$(git log --oneline $filename | wc -l | tr -d ' '),$filename" >> $CSVFILE percent=$(($i*100/$nbFile)) echo -en "\r" seq -f "=" -s '' $percent echo -en ">$percent%" done echo -en "\n"
Install specific version of bundler for Ruby
#!/bin/bash # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -e # connect to avro ruby root directory cd `dirname "$0"` # maintain our gems here export GEM_HOME=.gem/ export PATH="$PATH:.gem/bin" # bootstrap bundler gem install --no-rdoc --no-ri bundler bundle install case "$1" in test) bundle exec rake test ;; dist) bundle exec rake dist ;; clean) bundle exec rake clean rm -rf tmp avro.gemspec data.avr ;; *) echo "Usage: $0 {test|dist|clean}" exit 1 esac exit 0
#!/bin/bash # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -e # connect to avro ruby root directory cd `dirname "$0"` # maintain our gems here export GEM_HOME=.gem/ export PATH="$PATH:.gem/bin" # bootstrap bundler gem install --no-document -v 1.17.3 bundler bundle install case "$1" in test) bundle exec rake test ;; dist) bundle exec rake dist ;; clean) bundle exec rake clean rm -rf tmp avro.gemspec data.avr ;; *) echo "Usage: $0 {test|dist|clean}" exit 1 esac exit 0
Revert "tmp: force build-from-source on node"
#!/usr/bin/env bash set -euo pipefail ( [[ "${OS}" != "linux" ]] || { # if a node bottle doesn't exist, we need to compile node with clang instead of gcc # to get around "out of memory" issues brew_install llvm export PATH=$(brew --prefix)/opt/llvm/bin:${PATH} export C=clang export CXX=clang++ export LDFLAGS="-L$(brew --prefix)/opt/llvm/lib -Wl,-rpath,$(brew --prefix)/opt/llvm/lib" } brew uninstall node || true brew install --build-from-source node npm install --global npm@6 npm install --global json@9 echo_done ) echo_do "brew: Testing NodeJS packages..." exe_and_grep_q "node --version | head -1" "^v" exe_and_grep_q "npm --version | head -1" "^6\." exe_and_grep_q "json --version | head -1" "^json 9\." echo_done
#!/usr/bin/env bash set -euo pipefail ( [[ "${OS}" != "linux" ]] || { # if a node bottle doesn't exist, we need to compile node with clang instead of gcc # to get around "out of memory" issues brew_install llvm export PATH=$(brew --prefix)/opt/llvm/bin:${PATH} export C=clang export CXX=clang++ export LDFLAGS="-L$(brew --prefix)/opt/llvm/lib -Wl,-rpath,$(brew --prefix)/opt/llvm/lib" } echo_do "brew: Installing NodeJS packages..." BREW_FORMULAE="$(cat <<-EOF node EOF )" brew_install "${BREW_FORMULAE}" unset BREW_FORMULAE npm install --global npm@6 npm install --global json@9 echo_done ) echo_do "brew: Testing NodeJS packages..." exe_and_grep_q "node --version | head -1" "^v" exe_and_grep_q "npm --version | head -1" "^6\." exe_and_grep_q "json --version | head -1" "^json 9\." echo_done
Update test ci backend. Semgrep timeout to 60
#!/usr/bin/env bash set -e # run black - make sure everyone uses same python style black --skip-string-normalization --line-length 120 --check tests black --skip-string-normalization --line-length 120 --check src # run isort for import structure checkup with black profile isort --atomic --profile black -c src isort --atomic --profile black -c tests # change to src directory to run all the necessary scripts on the correct path cd src || exit # run mypy mypy . # run bandit - A security linter from OpenStack Security bandit -r . # python static analysis prospector --profile=../.prospector.yml --path=. --ignore-patterns=static # run semgrep semgrep --strict --error --config ../.semgrep_rules.yml . # back to root of the project cd .. # python tests py.test -c pytest_ci.ini -x --disable-socket -W error::RuntimeWarning --cov=src --cov-fail-under=100
#!/usr/bin/env bash set -e # run black - make sure everyone uses same python style black --skip-string-normalization --line-length 120 --check tests black --skip-string-normalization --line-length 120 --check src # run isort for import structure checkup with black profile isort --atomic --profile black -c src isort --atomic --profile black -c tests # change to src directory to run all the necessary scripts on the correct path cd src || exit # run mypy mypy . # run bandit - A security linter from OpenStack Security bandit -r . # python static analysis prospector --profile=../.prospector.yml --path=. --ignore-patterns=static # run semgrep semgrep --timeout 60 --strict --error --config ../.semgrep_rules.yml . # back to root of the project cd .. # python tests py.test -c pytest_ci.ini -x --disable-socket -W error::RuntimeWarning --cov=src --cov-fail-under=100
Fix broken conditional in gh-pages deployment.
#!/bin/bash set -e # Exit with nonzero exit code if anything fails # If we're on master, build for gh-pages & push to that branch if [ $TRAVIS_PULL_REQUEST = "false" ] && [ $TRAVIS_BRANCH = $SOURCE_BRANCH ]; then mv dist/* . rmdir dist git add . --all git commit -m "Build for gh-pages: ${SHA}" git checkout $TARGET_BRANCH || git checkout --orphan $TARGET_BRANCH git merge -s recursive -X theirs TEMP_BRANCH -m "Merge into gh-pages: ${SHA}" || true git status --porcelain | awk '{if ($1=="DU") print $2}' | xargs git rm git add . git commit -m "Merge into gh-pages: ${SHA}" ENCRYPTED_KEY_VAR="encrypted_${ENCRYPTION_LABEL}_key" ENCRYPTED_IV_VAR="encrypted_${ENCRYPTION_LABEL}_iv" ENCRYPTED_KEY=${!ENCRYPTED_KEY_VAR} ENCRYPTED_IV=${!ENCRYPTED_IV_VAR} openssl aes-256-cbc -K $ENCRYPTED_KEY -iv $ENCRYPTED_IV -in deploy_key.enc -out deploy_key -d chmod 600 deploy_key eval `ssh-agent -s` ssh-add deploy_key git push $SSH_REPO $TARGET_BRANCH git branch -D TEMP_BRANCH fi
#!/bin/bash set -e # Exit with nonzero exit code if anything fails # If we're on master, build for gh-pages & push to that branch if [[ $TRAVIS_PULL_REQUEST = "false" ]] && [[ $TRAVIS_BRANCH = $SOURCE_BRANCH ]]; then mv dist/* . rmdir dist git add . --all git commit -m "Build for gh-pages: ${SHA}" git checkout $TARGET_BRANCH || git checkout --orphan $TARGET_BRANCH git merge -s recursive -X theirs TEMP_BRANCH -m "Merge into gh-pages: ${SHA}" || true git status --porcelain | awk '{if ($1=="DU") print $2}' | xargs git rm git add . git commit -m "Merge into gh-pages: ${SHA}" ENCRYPTED_KEY_VAR="encrypted_${ENCRYPTION_LABEL}_key" ENCRYPTED_IV_VAR="encrypted_${ENCRYPTION_LABEL}_iv" ENCRYPTED_KEY=${!ENCRYPTED_KEY_VAR} ENCRYPTED_IV=${!ENCRYPTED_IV_VAR} openssl aes-256-cbc -K $ENCRYPTED_KEY -iv $ENCRYPTED_IV -in deploy_key.enc -out deploy_key -d chmod 600 deploy_key eval `ssh-agent -s` ssh-add deploy_key git push $SSH_REPO $TARGET_BRANCH git branch -D TEMP_BRANCH fi
Add glr for rebasing pull.
# Use `hub` as our git wrapper: # http://defunkt.github.com/hub/ hub_path=$(which hub) if (( $+commands[hub] )) then alias git=$hub_path fi # The rest of my fun git aliases alias gl='git pull --prune' alias glog="git log --graph --pretty=format:'%Cred%h%Creset %an: %s - %Creset %C(yellow)%d%Creset %Cgreen(%cr)%Creset' --abbrev-commit --date=relative" alias gp='git push origin HEAD' alias gd='git diff' alias gds='git diff --staged' alias gc='git commit' alias gca='git commit -a' alias gco='git checkout' alias gb='git branch' alias gs='git status -sb' # upgrade your git if -sb breaks for you. it's fun. alias grm="git status | grep deleted | awk '{\$1=\$2=\"\"; print \$0}' | \ perl -pe 's/^[ \t]*//' | sed 's/ /\\\\ /g' | xargs git rm" alias ga='git add'
# Use `hub` as our git wrapper: # http://defunkt.github.com/hub/ hub_path=$(which hub) if (( $+commands[hub] )) then alias git=$hub_path fi # The rest of my fun git aliases alias gl='git pull --prune' alias glr='git pull --rebase --prune' alias glog="git log --graph --pretty=format:'%Cred%h%Creset %an: %s - %Creset %C(yellow)%d%Creset %Cgreen(%cr)%Creset' --abbrev-commit --date=relative" alias gp='git push origin HEAD' alias gd='git diff' alias gds='git diff --staged' alias gc='git commit' alias gca='git commit -a' alias gco='git checkout' alias gb='git branch' alias gs='git status -sb' # upgrade your git if -sb breaks for you. it's fun. alias grm="git status | grep deleted | awk '{\$1=\$2=\"\"; print \$0}' | \ perl -pe 's/^[ \t]*//' | sed 's/ /\\\\ /g' | xargs git rm" alias ga='git add'
Remove node archive after extracting
# /bin/bash version=`node -v` versionMinusV=`echo $version | cut -c 2-` nodeRoot="node-$versionMinusV" if [ ! -d $nodeRoot ]; then url="https://codeload.github.com/nodejs/node/tar.gz/$version" curl $url -o "$version.tar.gz" tar -xf "$version.tar.gz" fi exec node http.test.js `pwd`/$nodeRoot
# /bin/bash version=`node -v` versionMinusV=`echo $version | cut -c 2-` nodeRoot="node-$versionMinusV" if [ ! -d $nodeRoot ]; then url="https://codeload.github.com/nodejs/node/tar.gz/$version" curl $url -o "$version.tar.gz" tar -xf "$version.tar.gz" rm "$version.tar.gz" fi exec node http.test.js `pwd`/$nodeRoot
Add apt-get update at beginning
#!/bin/bash # # This script is run by Vagrant when a new machine is provisioned. # sudo apt-get -y install git sudo apt-get -y install python-software-properties curl -sL https://deb.nodesource.com/setup_6.x | sudo -E bash - sudo apt-get update sudo apt-get -y install nodejs sudo apt-get -y install firefox sudo apt-get -y install libgl1-mesa-glx cd /home/vagrant/ && wget https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb sudo dpkg -i google-chrome-stable_current_amd64.deb sudo apt-get -y install -f #Not the prettiest way of doing this ^ but it works. cd /home/vagrant/pxt && rm -r * && rm -r .* cd /home/vagrant/pxt && git clone https://github.com/microsoft/pxt . git clone https://github.com/microsoft/pxt-microbit cd /home/vagrant/pxt sudo npm install -g jake sudo npm install -g typings sudo npm install sudo typings install sudo jake sudo npm install -g pxt sudo pxt target microbit cd pxt-microbit sudo npm install -g pxt sudo npm install pxt serve
#!/bin/bash # # This script is run by Vagrant when a new machine is provisioned. # sudo apt-get update sudo apt-get -y install git sudo apt-get -y install python-software-properties curl -sL https://deb.nodesource.com/setup_6.x | sudo -E bash - sudo apt-get update sudo apt-get -y install nodejs sudo apt-get -y install firefox sudo apt-get -y install libgl1-mesa-glx cd /home/vagrant/ && wget https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb sudo dpkg -i google-chrome-stable_current_amd64.deb sudo apt-get -y install -f #Not the prettiest way of doing this ^ but it works. cd /home/vagrant/pxt && rm -r * && rm -r .* cd /home/vagrant/pxt && git clone https://github.com/microsoft/pxt . git clone https://github.com/microsoft/pxt-microbit cd /home/vagrant/pxt sudo npm install -g jake sudo npm install -g typings sudo npm install sudo typings install sudo jake sudo npm install -g pxt sudo pxt target microbit cd pxt-microbit sudo npm install -g pxt sudo npm install pxt serve
Make backup script more portable
#!/bin/bash #Purpose = Backup wordpress directory to tar.gz and database to gz then upload to S3/DreamObjects #Created on 2016-05-04 #Author = Jed White #Version 1.0 #START TIME=`date +%b-%d-%y` # Include date in filename FILENAME=backup-$TIME.tar.gz # filename format DBFILE=wp-backup-$TIME.sql DBARCHIVE=wp-backup-$TIME.sql.gz SRCDIR=~/website # Source directory to backup DESDIR=~/backup # Local destination for backup files S3BUCKET=s3://bucketname/path/ # S3 Bucket to send backup to S3ENDPOINT="--endpoint-url http://objects.dreamhost.com" # Endpoint for DreamObjects CURRENTPATH="$PWD" tar -cpzf $DESDIR/$FILENAME $SRCDIR cd $SRCDIR wp db export $DESDIR/$DBFILE gzip $DESDIR/$DBFILE aws $S3ENDPOINT s3 cp $DESDIR/$FILENAME $S3BUCKET aws $S3ENDPOINT s3 cp $DESDIR/$DBARCHIVE $S3BUCKET rm -f $DESDIR/$FILENAME rm -f $DESDIR/$DBARCHIVE cd "$CURRENTPATH" #END
#!/bin/bash # Purpose: Backup wordpress directory to tar.gz and database to gz then upload to S3/DreamObjects # Created: 2016-05-04 # Author: Jed White # Version: 1.0 # Website information SITENAME="website" S3BUCKET="bucketname" # Setup TIME=`date +%Y-%m-%d` SITE="${SITENAME//./_}" FILENAME="${SITE}_files_backup-$TIME.tar.gz" DBFILE="${SITE}_wp_backup_$TIME.sql" DBARCHIVE=${DBFILE}.gz CURRENTPATH="$PWD" SRCDIR=~/${SITENAME} DESDIR=~/backups # Endpoint required for DreamObjects S3ENDPOINT="--endpoint-url http://objects.dreamhost.com" S3DESTINATION="s3://${S3BUCKET}/${SITE}" # Create backup files tar -cpzf $DESDIR/$FILENAME $SRCDIR cd $SRCDIR wp db export $DESDIR/$DBFILE gzip $DESDIR/$DBFILE # Upload to AWS aws $S3ENDPOINT s3 cp $DESDIR/$FILENAME $S3DESTINATION aws $S3ENDPOINT s3 cp $DESDIR/$DBARCHIVE $S3DESTINATION # Cleanup rm -f $DESDIR/$FILENAME rm -f $DESDIR/$DBARCHIVE cd "$CURRENTPATH"
Add backup of running pods and services
#!/bin/sh umask 022 cd /opt/jackett || exit exec mono --debug JackettConsole.exe --NoRestart -DataFolder=/var/jackett sleep 2
#!/bin/sh umask 022 cd /opt/jackett || exit export XDG_CONFIG_HOME=/var/jackett exec mono --debug JackettConsole.exe --NoRestart sleep 2
Add missing upstream to git script
#!/bin/bash cd ~/dotfiles git remote rm origin git remote add origin git@github.com:Benoth/dotfiles.git git push origin master echo "" echo "Do not forget to add in ~/.ssh/config :" echo " Host github.com" echo " HostName github.com" echo " User git" echo " IdentityFile ~/.ssh/keys/personnal-key.rsa" echo ""
#!/bin/bash cd ~/dotfiles git remote rm origin git remote add origin git@github.com:Benoth/dotfiles.git git push origin master git branch --set-upstream-to=origin/master master echo "" echo "Do not forget to add in ~/.ssh/config :" echo " Host github.com" echo " HostName github.com" echo " User git" echo " IdentityFile ~/.ssh/keys/personnal-key.rsa" echo ""
Fix yet another POSIX shell issue.
#!/bin/sh # # Deploy a snapshot build to Sonatype. Only non-pull requests will be deployed. # if [ "${TRAVIS_PULL_REQUEST}" == "false" ]; then mvn --settings scripts/mvn_settings.xml -DskipTests=true source:jar javadoc:jar deploy fi
#!/bin/sh # # Deploy a snapshot build to Sonatype. Only non-pull requests will be deployed. # if [ "${TRAVIS_PULL_REQUEST}" = "false" ]; then mvn --settings scripts/mvn_settings.xml -DskipTests=true source:jar javadoc:jar deploy fi
Verify on command line, exit code of bash script to hit server with curl.
#!/bin/bash OUTPUT="$(curl --request POST -d radsauce http://localhost:3100/foo -H "Content-Type: text/plain" 2>/dev/null )" echo "${OUTPUT}" # if [ "${OUTPUT}" = "radsauce" ]; then echo "gimme things 'cause it's the same: OUTPUT is ${OUTPUT}"; fi #else #echo "nope failure";
#!/bin/bash #terminal 1: run PORT=3100 node app.js #expected output after terminal 2 curl's: #radsauce #terminal 2: ./verifyscript.sh && echo "it worked" || echo "fail" #expected output: # radsauce # it worked OUTPUT="$(curl --request POST -d radsauce http://localhost:3100/foo -H "Content-Type: text/plain" 2>/dev/null )" echo "${OUTPUT}" if [ "${OUTPUT}" = "radsauce" ]; then #this next line is unnecessary, just exists to clarify purpose: echo "success, child process exit code 0: OUTPUT is ${OUTPUT}"; exit 0; else #this next line is unnecessary, just exists to clarify purpose: echo "failure, child process exit code 1: OUTPUT is ${OUTPUT}"; exit 1; fi
Remove project artifacts on CI completion
#!/bin/sh set -e set -x export CI=true mvn -U deploy mvn sonar:sonar
#!/bin/sh set -e cleanup() { mvn build-helper:remove-project-artifact } trap cleanup 0 INT TERM QUIT export CI=true set -x mvn -U deploy mvn sonar:sonar
Add CentOS 6.4 template to the old template build script.
#!/bin/bash #Go into the old_templates folder first; every other command will get run relative to this folder cd old_templates/ #Debian 7.1 cd ./debian-7.1-amd64 packer build --only=virtualbox template.json #Ubuntu 12.10 cd ../ubuntu-12.10-server-amd64 packer build --only=virtualbox template.json #Ubuntu 13.04 cd ../ubuntu-13.04-server-amd64 packer build --only=virtualbox template.json
#!/bin/bash #Go into the old_templates folder first; every other command will get run relative to this folder cd old_templates/ #Debian 7.1 cd ./debian-7.1-amd64 packer build --only=virtualbox template.json #CentOS 6.4 cd ../centos-6.4-amd64 packer build --only=virtualbox template.json #Ubuntu 12.10 cd ../ubuntu-12.10-server-amd64 packer build --only=virtualbox template.json #Ubuntu 13.04 cd ../ubuntu-13.04-server-amd64 packer build --only=virtualbox template.json
Add back the `-` alias to go to the previous directory
# Changing/making/removing directory setopt auto_pushd setopt pushd_ignore_dups setopt pushdminus alias -g ...='../..' alias -g ....='../../..' alias -g .....='../../../..' alias -g ......='../../../../..' alias 1='cd -' alias 2='cd -2' alias 3='cd -3' alias 4='cd -4' alias 5='cd -5' alias 6='cd -6' alias 7='cd -7' alias 8='cd -8' alias 9='cd -9' alias md='mkdir -p' alias rd=rmdir alias d='dirs -v | head -10' # List directory contents alias lsa='ls -lah' alias l='ls -lah' alias ll='ls -lh' alias la='ls -lAh' # Push and pop directories on directory stack alias pu='pushd' alias po='popd'
# Changing/making/removing directory setopt auto_pushd setopt pushd_ignore_dups setopt pushdminus alias -g ...='../..' alias -g ....='../../..' alias -g .....='../../../..' alias -g ......='../../../../..' alias -- -='cd -' alias 1='cd -' alias 2='cd -2' alias 3='cd -3' alias 4='cd -4' alias 5='cd -5' alias 6='cd -6' alias 7='cd -7' alias 8='cd -8' alias 9='cd -9' alias md='mkdir -p' alias rd=rmdir alias d='dirs -v | head -10' # List directory contents alias lsa='ls -lah' alias l='ls -lah' alias ll='ls -lh' alias la='ls -lAh' # Push and pop directories on directory stack alias pu='pushd' alias po='popd'
Validate that all the containers are started
#!/bin/bash set -o xtrace set -o errexit export PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" function print_failure { docker ps -a for failed in $(docker ps -a --format "{{.Names}}" --filter status=exited); do docker logs --tail=all $failed done echo "FAILED" exit 1 } # Populate globals.yml cat << EOF > /etc/kolla/globals.yml --- kolla_base_distro: "$1" kolla_install_type: "$2" kolla_internal_address: "169.254.169.10" docker_restart_policy: "never" network_interface: "eth0" neutron_external_interface: "fake_interface" EOF # Create dummy interface for neutron ip l a fake_interface type dummy # Actually do the deployment tools/kolla-ansible deploy || print_failure # TODO(SamYaple): Actually validate that all containers are started docker ps -a # TODO(SamYaple): Actually do functional testing of OpenStack
#!/bin/bash set -o xtrace set -o errexit export PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" function check_failure { docker ps -a failed_containers=$(docker ps -a --format "{{.Names}}" --filter status=exited) for failed in ${failed_containers}; do docker logs --tail all ${failed} done if [[ -n ${failed_containers} ]]; then echo 'FAILED' exit 1 fi } # Populate globals.yml cat << EOF > /etc/kolla/globals.yml --- kolla_base_distro: "$1" kolla_install_type: "$2" kolla_internal_address: "169.254.169.10" docker_restart_policy: "never" network_interface: "eth0" neutron_external_interface: "fake_interface" EOF # Create dummy interface for neutron ip l a fake_interface type dummy # Actually do the deployment tools/kolla-ansible deploy check_failure # TODO(SamYaple): Actually do functional testing of OpenStack
Add some system packages for Arch
if [[ ${ZSH_DOTFILES_OS} == "Mac" ]]; then brew install python3 httpie fd exa ripgrep htop ranger task timewarrior elif [[ ${ZSH_DOTFILES_OS} == "ArchLinux" ]]; then sudo pacman -Sy --needed httpie python neovim ranger fd exa ripgrep htop task timewarrior elif [[ ${ZSH_DOTFILES_OS} == "Debian" ]]; then sudo apt-get install httpie ranger curl -LO https://github.com/BurntSushi/ripgrep/releases/download/0.8.1/ripgrep_0.8.1_amd64.deb sudo dpkg -i ripgrep_0.8.1_amd64.deb fi
if [[ ${ZSH_DOTFILES_OS} == "Mac" ]]; then brew install python3 httpie fd exa ripgrep htop ranger task timewarrior elif [[ ${ZSH_DOTFILES_OS} == "ArchLinux" ]]; then sudo pacman -Sy --needed httpie python neovim ranger fd exa ripgrep htop task timew dep elif [[ ${ZSH_DOTFILES_OS} == "Debian" ]]; then sudo apt-get install httpie ranger curl -LO https://github.com/BurntSushi/ripgrep/releases/download/0.8.1/ripgrep_0.8.1_amd64.deb sudo dpkg -i ripgrep_0.8.1_amd64.deb fi
Use the same idiom for timeouts as in the other tests.
#!/bin/sh . $(dirname $0)/testsuite-common.sh setup_test run_daemon # wait for the pid file to appear limit=20 while [ ! -s "${pidfile}" ] ; do [ $((elapsed+=1)) -lt $limit ] || fail "timed out waiting for pid file to appear" sleep 1 done notice "pid file appeared after $elapsed seconds" # kill tsdfx kill "$(cat ${pidfile})" notice "killed daemon" # wait for the pid file to vanish limit=20 while [ -s "${pidfile}" ] ; do [ $((elapsed+=1)) -lt $limit ] || fail "timed out waiting for pid file to vanish" sleep 1 done notice "pid file vanished after $elapsed seconds" cleanup_test
#!/bin/sh . $(dirname $0)/testsuite-common.sh setup_test run_daemon # Timeout for various operations timeout=10 # wait for the pid file to appear elapsed=0 while [ ! -s "${pidfile}" ] ; do [ $((elapsed+=1)) -le "${timeout}" ] || fail "timed out waiting for pid file to appear" sleep 1 done notice "pid file appeared after $elapsed seconds" # kill tsdfx kill "$(cat ${pidfile})" notice "killed daemon" # wait for the pid file to vanish elapsed=0 while [ -s "${pidfile}" ] ; do [ $((elapsed+=1)) -le "${timeout}" ] || fail "timed out waiting for pid file to vanish" sleep 1 done notice "pid file vanished after $elapsed seconds" cleanup_test
Change to send data to SonarQube by branch
branch=$(git branch | sed -n -e 's/^\* \(.*\)/\1/p') echo 'Branch:' $branch dotnet ../tools/sonar-scanner-msbuild/SonarScanner.MSBuild.dll begin \ /k:"GeneticSharp" \ /v:$branch \ /d:sonar.organization="giacomelli-github" \ /d:sonar.host.url="https://sonarcloud.io" \ /d:sonar.login=$GeneticSharp_SonarQube_login \ /d:sonar.cs.opencover.reportsPaths="**/*.opencover.xml" \ /d:sonar.exclusions="**/*Test.cs,**/Samples/*.cs,MainWindow.cs,Program.cs,PropertyEditor.cs,*.xml" dotnet clean dotnet build -c release dotnet test GeneticSharp.Domain.UnitTests -f netcoreapp2.0 /p:CollectCoverage=true /p:CoverletOutputFormat=opencover dotnet test GeneticSharp.Extensions.UnitTests -f netcoreapp2.0 /p:CollectCoverage=true /p:CoverletOutputFormat=opencover dotnet test GeneticSharp.Infrastructure.Framework.UnitTests -f netcoreapp2.0 /p:CollectCoverage=true /p:CoverletOutputFormat=opencover dotnet ../tools/sonar-scanner-msbuild/SonarScanner.MSBuild.dll end \ /d:sonar.login=$GeneticSharp_SonarQube_login
branch=$(git branch | sed -n -e 's/^\* \(.*\)/\1/p') echo 'Branch:' $branch dotnet ../tools/sonar-scanner-msbuild/SonarScanner.MSBuild.dll begin \ /k:"GeneticSharp" \ /d:sonar.branch.name=$branch \ /d:sonar.organization="giacomelli-github" \ /d:sonar.host.url="https://sonarcloud.io" \ /d:sonar.login=$GeneticSharp_SonarQube_login \ /d:sonar.cs.opencover.reportsPaths="**/*.opencover.xml" \ /d:sonar.exclusions="**/*Test.cs,**/Samples/*.cs,MainWindow.cs,Program.cs,PropertyEditor.cs,*.xml" dotnet clean dotnet build -c release dotnet test GeneticSharp.Domain.UnitTests -f netcoreapp2.0 /p:CollectCoverage=true /p:CoverletOutputFormat=opencover dotnet test GeneticSharp.Extensions.UnitTests -f netcoreapp2.0 /p:CollectCoverage=true /p:CoverletOutputFormat=opencover dotnet test GeneticSharp.Infrastructure.Framework.UnitTests -f netcoreapp2.0 /p:CollectCoverage=true /p:CoverletOutputFormat=opencover dotnet ../tools/sonar-scanner-msbuild/SonarScanner.MSBuild.dll end \ /d:sonar.login=$GeneticSharp_SonarQube_login
Use rclone to delete results in parallel
#!/usr/bin/env bash #mkdir empty_dir empty_dir=$( mktemp -d ) rsync -a -L --delete --progress $empty_dir/ logs/ rsync -a -L --delete --progress $empty_dir/ output/ rm core* rmdir $empty_dir
#!/usr/bin/env bash rclone delete logs/ --multi-thread-streams=16 rclone delete output/ --multi-thread-streams=16 rclone purge logs/ rclone purge output/ mkdir logs mkdir output
Install asciidoc sources as well.
#!/bin/sh T="$1" for h in *.html howto/*.txt howto/*.html do diff -u -I'Last updated [0-9][0-9]-[A-Z][a-z][a-z]-' "$T/$h" "$h" || { echo >&2 "# install $h $T/$h" rm -f "$T/$h" mkdir -p `dirname "$T/$h"` cp "$h" "$T/$h" } done strip_leading=`echo "$T/" | sed -e 's|.|.|g'` for th in "$T"/*.html "$T"/howto/*.txt "$T"/howto/*.html do h=`expr "$th" : "$strip_leading"'\(.*\)'` case "$h" in index.html) continue ;; esac test -f "$h" && continue echo >&2 "# rm -f $th" rm -f "$th" done ln -sf git.html "$T/index.html"
#!/bin/sh T="$1" for h in *.html *.txt howto/*.txt howto/*.html do diff -u -I'Last updated [0-9][0-9]-[A-Z][a-z][a-z]-' "$T/$h" "$h" || { echo >&2 "# install $h $T/$h" rm -f "$T/$h" mkdir -p `dirname "$T/$h"` cp "$h" "$T/$h" } done strip_leading=`echo "$T/" | sed -e 's|.|.|g'` for th in "$T"/*.html "$T"/*.txt "$T"/howto/*.txt "$T"/howto/*.html do h=`expr "$th" : "$strip_leading"'\(.*\)'` case "$h" in index.html) continue ;; esac test -f "$h" && continue echo >&2 "# rm -f $th" rm -f "$th" done ln -sf git.html "$T/index.html"
Add docker, postgres, teamviewer, mountebank
#!/usr/bin/env bash ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" brew update brew upgrade brew install android-platform-tools brew install ansible brew install awscli brew install bash-git-prompt brew install carthage brew install cowsay brew install exercism brew install git brew install gitsh brew install gradle brew install imagemagick brew install leiningen brew install node brew install pyenv brew install python brew install ruby brew install scala brew install tig brew install tmux brew install watch brew cask install aerial brew cask install android-studio brew cask install appcode brew cask install backblaze brew cask install bonjour-browser brew cask install calibre brew cask install flux brew cask install google-chrome brew cask install intellij-idea brew cask install iterm2 brew cask install lastpass brew cask install pandora brew cask install p4merge brew cask install postman brew cask install razorsql brew cask install screenhero brew cask install sequel-pro brew cask install slack brew cask install sourcetree brew cask install spectacle brew cask install sublime-text brew cask install torbrowser brew cask install vagrant brew cask install virtualbox brew tap thoughtbot/formulae brew install gitsh brew cleanup brew cask cleanup npm install -g wscat
#!/usr/bin/env bash ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" brew update brew upgrade brew install android-platform-tools brew install ansible brew install awscli brew install bash-git-prompt brew install carthage brew install cowsay brew install docker brew install docker-compose brew install exercism brew install git brew install gitsh brew install gradle brew install imagemagick brew install leiningen brew install node brew install postgresql brew install pyenv brew install python brew install ruby brew install scala brew install tig brew install tmux brew install watch brew cask install aerial brew cask install android-studio brew cask install appcode brew cask install backblaze brew cask install bonjour-browser brew cask install calibre brew cask install flux brew cask install google-chrome brew cask install intellij-idea brew cask install iterm2 brew cask install lastpass brew cask install pandora brew cask install p4merge brew cask install postman brew cask install razorsql brew cask install screenhero brew cask install sequel-pro brew cask install slack brew cask install sourcetree brew cask install spectacle brew cask install sublime-text brew cask install teamviewer brew cask install torbrowser brew cask install vagrant brew cask install virtualbox brew tap thoughtbot/formulae brew install gitsh brew cleanup brew cask cleanup npm install -g wscat npm install -g mountebank
Fix to be more in line with the current command line args of the monitor.
#!/bin/sh if [ $# != 2 ]; then if [ $# != 3 ]; then echo "Usage: $0 <project/experiment> <my-ip> [stub-ip]" exit 1; fi fi PID=$1 EID=$2 SID=$3 SCRIPT=`which $0` SCRIPT_LOCATION=`dirname $SCRIPT` BIN_LOCATION=$SCRIPT_LOCATION/../libnetmon/ BIN=netmond if ! [ -x $BIN_LOCATION/$BIN ]; then echo "$BIN_LOCATION/$BIN missing - run 'gmake' in $BIN_LOCATION to build it" exit 1; fi echo "Starting up netmond for $PID $EID $SID"; $BIN_LOCATION/$BIN | python monitor.py ip-mapping.txt $PID $EID $SID
#!/bin/sh if [ $# != 3 ]; then if [ $# != 4 ]; then echo "Usage: $0 <project> <experiment> <my-ip> [stub-ip]" exit 1; fi SIP=$4 fi PID=$1 EID=$2 MIP=$3 SCRIPT=`which $0` SCRIPT_LOCATION=`dirname $SCRIPT` BIN_LOCATION=$SCRIPT_LOCATION/../libnetmon/ BIN=netmond if ! [ -x $BIN_LOCATION/$BIN ]; then echo "$BIN_LOCATION/$BIN missing - run 'gmake' in $BIN_LOCATION to build it" exit 1; fi echo "Starting up netmond for $PID/$EID $MIP $SIP"; $BIN_LOCATION/$BIN | python monitor.py ip-mapping.txt $PID/$EID $MIP $SIP
Use tracker token from Shared-frontend
#!/usr/bin/env bash gulp update-changelog --trackerToken $(lpass show -G "Shared-Pivotal UI/pui-tracker-token" --notes)
#!/usr/bin/env bash gulp update-changelog --trackerToken $(lpass show -G "Shared-frontend/pui-tracker-token" --notes)
Add my (vng) path for QtSDK.
# Add your path into this array KNOWN_QMAKE_PATHS=( \ /Users/Alex/QtSDK/Desktop/Qt/4.8.1/gcc/bin \ /Users/siarheirachytski/QtSDK/Desktop/Qt/474/gcc/bin \ ) # Prints path to directory with found qmake binary or prints nothing if not found # Returns 1 in case of not found and 0 in case of success PrintQmakePath() { local QMAKE_PATH QMAKE_PATH=$(which qmake) if [ $? -ne 0 ]; then # qmake binary is not in the path, look for it in the given array for path in "${KNOWN_QMAKE_PATHS[@]}"; do if [ -f "${path}/qmake" ]; then echo "${path}" return 0 fi done else echo "${QMAKE_PATH}" return 0 fi # Not found return 1 }
# Add your path into this array KNOWN_QMAKE_PATHS=( \ /Users/Alex/QtSDK/Desktop/Qt/4.8.1/gcc/bin \ /Users/siarheirachytski/QtSDK/Desktop/Qt/474/gcc/bin \ /Developer/QtSDK/Desktop/Qt/4.8.0/gcc/bin \ ) # Prints path to directory with found qmake binary or prints nothing if not found # Returns 1 in case of not found and 0 in case of success PrintQmakePath() { local QMAKE_PATH QMAKE_PATH=$(which qmake) if [ $? -ne 0 ]; then # qmake binary is not in the path, look for it in the given array for path in "${KNOWN_QMAKE_PATHS[@]}"; do if [ -f "${path}/qmake" ]; then echo "${path}" return 0 fi done else echo "${QMAKE_PATH}" return 0 fi # Not found return 1 }
Decrease number of big files for travis
#!/bin/bash N=1000 M=100 # get proper dirname cd $(dirname $0) DIRNAME=$PWD cd $OLDPWD # copy over echo "Copying files for tons.of.big.files ($N files in 1 file, then make $M copies of that file) ..." echo $TOOLS | tr ' ' '\n' | while read tool; do mkdir -p ${DIRNAME}/${tool}/src/js # reset file if exists echo '' > ${DIRNAME}/${tool}/src/js/bootstrap-big.js # copy all copies into single big for ((i=0;i<$N;i++)); do cat ${DIRNAME}/../node_modules/bootstrap/dist/js/bootstrap.js >> ${DIRNAME}/${tool}/src/js/bootstrap-big.js done # create copies of big files for ((i=1;i<$M;i++)); do cp ${DIRNAME}/${tool}/src/js/bootstrap-big.js ${DIRNAME}/${tool}/src/js/bootstrap-big-${i}.js done done
#!/bin/bash N=1000 M=10 # get proper dirname cd $(dirname $0) DIRNAME=$PWD cd $OLDPWD # copy over echo "Copying files for tons.of.big.files ($N files in 1 file, then make $M copies of that file) ..." echo $TOOLS | tr ' ' '\n' | while read tool; do mkdir -p ${DIRNAME}/${tool}/src/js # reset file if exists echo '' > ${DIRNAME}/${tool}/src/js/bootstrap-big.js # copy all copies into single big for ((i=0;i<$N;i++)); do cat ${DIRNAME}/../node_modules/bootstrap/dist/js/bootstrap.js >> ${DIRNAME}/${tool}/src/js/bootstrap-big.js done # create copies of big files for ((i=1;i<$M;i++)); do cp ${DIRNAME}/${tool}/src/js/bootstrap-big.js ${DIRNAME}/${tool}/src/js/bootstrap-big-${i}.js done done
Add dns component to subtree split script
#!/bin/bash # # Using git-subsplit # https://github.com/dflydev/git-subsplit GIT_SUBSPLIT=$(pwd)/$(dirname $0)/git-subsplit.sh $GIT_SUBSPLIT init https://github.com/react-php/react $GIT_SUBSPLIT update $GIT_SUBSPLIT publish " src/React/EventLoop:git@github.com:react-php/event-loop.git src/React/Stream/:git@github.com:react-php/stream.git src/React/Socket/:git@github.com:react-php/socket.git src/React/Http/:git@github.com:react-php/http.git src/React/Espresso/:git@github.com:react-php/espresso.git " --heads=master
#!/bin/bash # # Using git-subsplit # https://github.com/dflydev/git-subsplit GIT_SUBSPLIT=$(pwd)/$(dirname $0)/git-subsplit.sh $GIT_SUBSPLIT init https://github.com/react-php/react $GIT_SUBSPLIT update $GIT_SUBSPLIT publish " src/React/EventLoop:git@github.com:react-php/event-loop.git src/React/Stream/:git@github.com:react-php/stream.git src/React/Socket/:git@github.com:react-php/socket.git src/React/Http/:git@github.com:react-php/http.git src/React/Espresso/:git@github.com:react-php/espresso.git src/React/Dns/:git@github.com:react-php/dns.git " --heads=master
Allow [,; ] as domains separator
#!/bin/sh set -e # Update package list apk upgrade -q -U -a # Default server RAM if [ -z "$RAM" ]; then memory_limit=$(expr $(cat /sys/fs/cgroup/memory/memory.limit_in_bytes) / 1024 / 1024) memory_free=$(free -m | awk 'NR==2{printf $2}') export RAM=$(($memory_limit > $memory_free ? $memory_free : $memory_limit)) fi # Call sub scripts cd "$(dirname "$0")" for script in *; do if echo "$script" | grep -Eq "^[0-9]"; then /bin/sh "$script" fi done # Clean rm -rf /var/cache/apk/* rm -rf /tmp/* rm -rf "$(dirname "$0")"
#!/bin/sh set -e # Update package list apk upgrade -q -U -a # Default server RAM if [ -z "$RAM" ]; then memory_limit=$(expr $(cat /sys/fs/cgroup/memory/memory.limit_in_bytes) / 1024 / 1024) memory_free=$(free -m | awk 'NR==2{printf $2}') export RAM=$(($memory_limit > $memory_free ? $memory_free : $memory_limit)) fi # Fix domain separator export DOMAINS=$(echo "$DOMAINS" | sed -e 's/[,; ]\+/ /g') # Call sub scripts cd "$(dirname "$0")" for script in *; do if echo "$script" | grep -Eq "^[0-9]"; then /bin/sh "$script" fi done # Clean rm -rf /var/cache/apk/* rm -rf /tmp/* rm -rf "$(dirname "$0")"
Add user and change path
#!/usr/bin/env bash set -e THIS_PATH=$(dirname "$0") BASE_PATH=$(dirname "$THIS_PATH") cd $BASE_PATH #pip install --upgrade pip # install prog AND tests requirements : pip3 install -r requirements.txt pip2.7 install -v --install-option="--prefix=/home/travis/.local/" gi export PATH=$PATH:/home/travis/.local/ echo $PATH #pip install -e . pip3 install --upgrade -r test/requirements.txt #pyversion=$(python -c "import sys; print(''.join(map(str, sys.version_info[:2])))") #if test -e "test/requirements.py${pyversion}.txt" #then # pip install -r "test/requirements.py${pyversion}.txt" #fi
#!/usr/bin/env bash set -e THIS_PATH=$(dirname "$0") BASE_PATH=$(dirname "$THIS_PATH") cd $BASE_PATH #pip install --upgrade pip # install prog AND tests requirements : pip3 install -r requirements.txt pip2.7 install -v --user --install-option="--prefix=/home/travis/.local/" gi export PATH=$PATH:/home/travis/.local/lib/python2.7/site-packages/ echo $PATH #pip install -e . pip3 install --upgrade -r test/requirements.txt #pyversion=$(python -c "import sys; print(''.join(map(str, sys.version_info[:2])))") #if test -e "test/requirements.py${pyversion}.txt" #then # pip install -r "test/requirements.py${pyversion}.txt" #fi
Fix issue running in docker where /oe_layers_config may not exist yet
#!/bin/sh set -evx if [ ! -f /.dockerenv ]; then echo "This script is only intended to be run from within Docker" >&2 exit 1 fi for DIR in /mnt/config /oe_layers_config; do if [ ! -d "$DIR" ]; then echo "${DIR} does not exist" >&2 exit 1 fi done # Copy the config to the image rsync -av /mnt/config/ / echo 'Running Initial Layer Configuration for OnEarth' /usr/bin/oe_configure_layer --lcdir /oe_layers_config echo 'Starting Apache server' /usr/sbin/apachectl # Make sure that the Apache logs exist before tailing them touch /etc/httpd/logs/access.log /etc/httpd/logs/error.log # Tail the apache logs exec tail -qF /etc/httpd/logs/access.log /etc/httpd/logs/error.log
#!/bin/sh set -evx if [ ! -f /.dockerenv ]; then echo "This script is only intended to be run from within Docker" >&2 exit 1 fi # Copy the config to the image if [ ! -d "/mnt/config" ]; then echo "The /mnt/config directory does not exist" >&2 exit 1 fi rsync -av /mnt/config/ / echo 'Running Initial Layer Configuration for OnEarth' if [ ! -d "/oe_layers_config" ]; then echo "The /oe_layers_config directory does not exist" >&2 exit 1 fi /usr/bin/oe_configure_layer --lcdir /oe_layers_config echo 'Starting Apache server' /usr/sbin/apachectl # Make sure that the Apache logs exist before tailing them touch /etc/httpd/logs/access.log /etc/httpd/logs/error.log # Tail the apache logs exec tail -qF /etc/httpd/logs/access.log /etc/httpd/logs/error.log
Remove code we don't need.
#!/usr/bin/env bash # Licensed to the StackStorm, Inc ('StackStorm') under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # File containing common utility functions function join() { local IFS="$1"; shift; echo "$*"; } function get_st2_components() { local ST2_COMPONENTS=$(find ${ST2_REPO_PATH}/* -maxdepth 0 -name "st2*" -type d) local ST2_COMPONENTS_RUNNERS=$(find ${ST2_REPO_PATH}/contrib/runners/* -maxdepth 0 -type d) echo "${ST2_COMPONENTS}" echo "${ST2_COMPONENTS_RUNNERS}" }
#!/usr/bin/env bash # Licensed to the StackStorm, Inc ('StackStorm') under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # File containing common utility functions function join() { local IFS="$1"; shift; echo "$*"; } function get_st2_components() { local ST2_COMPONENTS=$(find ${ST2_REPO_PATH}/* -maxdepth 0 -name "st2*" -type d) echo "${ST2_COMPONENTS}" } function get_st2_components_runners() { local ST2_COMPONENTS_RUNNERS=$(find ${ST2_REPO_PATH}/contrib/runners/* -maxdepth 0 -type d) echo "${ST2_COMPONENTS_RUNNERS}" }
Use arg to avoid issue with www
#!/bin/bash -eux HAPROXY_PATH=/etc/haproxy CERTS_PATH=/root/.le IP=`curl http://icanhazip.com/` cd $HAPROXY_PATH trap exit SIGHUP SIGINT SIGTERM function issue_cert () { if [ "$(ping -c1 -n $domain | head -n1 | sed 's/.*(\([0-9]*\.[0-9]*\.[0-9]*\.[0-9]*\)).*/\1/g')" == "$IP" ]; then /le/le.sh issue /html-root $1 if [ -d $CERTS_PATH/$1 ]; then cat $CERTS_PATH/$1/$1.cer $CERTS_PATH/$1/ca.cer $CERTS_PATH/$1/$1.key > $HAPROXY_PATH/certs/$1.pem fi fi } while true; do certs="$(ls -1 ${HAPROXY_PATH}/certs | sed -e 's/\.pem//')" domains="$(cat ${HAPROXY_PATH}/haproxy.cfg | grep backend | cut -f2 -d' ' | grep -v letsencrypt)" letsencrypt="$(comm -13 <(echo "${certs}" | sort) <(echo "${domains}" | sort))" for domain in `echo "${letsencrypt}"`; do issue_cert $domain issue_cert www.$domain done inotifywait . done
#!/bin/bash -eux HAPROXY_PATH=/etc/haproxy CERTS_PATH=/root/.le IP=`curl http://icanhazip.com/` cd $HAPROXY_PATH trap exit SIGHUP SIGINT SIGTERM function issue_cert () { if [ "$(ping -c1 -n $1 | head -n1 | sed 's/.*(\([0-9]*\.[0-9]*\.[0-9]*\.[0-9]*\)).*/\1/g')" == "$IP" ]; then /le/le.sh issue /html-root $1 if [ -d $CERTS_PATH/$1 ]; then cat $CERTS_PATH/$1/$1.cer $CERTS_PATH/$1/ca.cer $CERTS_PATH/$1/$1.key > $HAPROXY_PATH/certs/$1.pem fi fi } while true; do certs="$(ls -1 ${HAPROXY_PATH}/certs | sed -e 's/\.pem//')" domains="$(cat ${HAPROXY_PATH}/haproxy.cfg | grep backend | cut -f2 -d' ' | grep -v letsencrypt)" letsencrypt="$(comm -13 <(echo "${certs}" | sort) <(echo "${domains}" | sort))" for domain in `echo "${letsencrypt}"`; do issue_cert $domain issue_cert www.$domain done inotifywait . done
Remove github plugin - not useful at all and breaks hub. Also add gitignore plugin.
ZSH=$HOME/.oh-my-zsh ZSH_THEME="lust" DISABLE_AUTO_UPDATE="true" plugins=(autoenv battery colored-man cp extract history gem git gitfast github git-extras lol mosh npm python rsync safe-paste screen tmux tmuxinator vundle) if [[ "$(uname -s)" == "Darwin" ]]; then plugins=(${plugins[@]} brew copydir copyfile osx sublime) elif [ -f /etc/debian_version ]; then plugins=(${plugins[@]} debian) fi
ZSH=$HOME/.oh-my-zsh ZSH_THEME="lust" DISABLE_AUTO_UPDATE="true" plugins=(autoenv battery colored-man cp extract history gem git gitfast gitignore git-extras lol mosh npm python rsync safe-paste screen tmux tmuxinator vundle) if [[ "$(uname -s)" == "Darwin" ]]; then plugins=(${plugins[@]} brew copydir copyfile osx sublime) elif [ -f /etc/debian_version ]; then plugins=(${plugins[@]} debian) fi
Build with Ninja where available
#!/usr/bin/env sh set -ex set | sort rm -rf build mkdir build cd build cmake .. make make src/mytest src/mytest
#!/usr/bin/env sh set -ex set | sort rm -rf build mkdir build cd build if command -v ninja; then cmake .. -GNinja ninja else cmake .. make fi src/mytest
Fix travis upload docs script
#!/bin/bash # Exit script on the first error set -e echo "" echo "=== Generating documentation =================" cargo doc if [ "$TRAVIS_BRANCH" == "master" ] && [ "$TRAVIS_PULL_REQUEST" == "false" ]; then echo "" echo "=== Uploading docs ===============" ghp-import -n target/doc git push -qf https://${GH_TOKEN}@github.com/${TRAVIS_REPO_SLUG}.git gh-pages fi
#!/bin/bash # Exit script on the first error set -e echo "" echo "=== Generating documentation =================" if [ "$TRAVIS_BRANCH" == "master" ] && [ "$TRAVIS_PULL_REQUEST" == "false" ]; then cargo doc echo "" echo "=== Uploading docs ===============" ghp-import -n target/doc git push -qf https://${GH_TOKEN}@github.com/${TRAVIS_REPO_SLUG}.git gh-pages fi
Add hdir prefix for Hadoop folder loop and run a particular command line ex : hdircmd $HDFS_FOLDER "hdfs dfs -ls "
alias hls='hadoop fs -ls ' alias hdu='hadoop fs -du -h ' # https://hadoop.apache.org/docs/r2.7.2/hadoop-project-dist/hadoop-hdfs/HdfsQuotaAdminGuide.html alias hquota='hadoop fs -count -q -h -v '
alias hls='hadoop fs -ls ' alias hdu='hadoop fs -du -h ' # https://hadoop.apache.org/docs/r2.7.2/hadoop-project-dist/hadoop-hdfs/HdfsQuotaAdminGuide.html alias hquota='hadoop fs -count -q -h -v ' hdirsize() { # MIN NUM OF ARG if [[ "$#" < "1" ]]; then echo "Usage : hdirsize HDFS_FOLDER [HDFS_MORE_ARG]. Run 'hdfs dfs -count -q' on each subfolders of HDFS_FOLDER" >&2 echo " HDFS_MORE_ARG : -h = human readable, -v = verbose" >&2 return -1 fi local HDFS_FOLDER=$1 local HDFS_MORE_ARG=${@:2} hdircmd $HDFS_FOLDER "hdfs dfs -count -q $HDFS_MORE_ARG" } hdirls() { # MIN NUM OF ARG if [[ "$#" < "1" ]]; then echo "Usage : hdirls HDFS_FOLDER [HDFS_MORE_ARG]. Run 'hdfs dfs -ls' on each subfolders of HDFS_FOLDER" >&2 return -1 fi local HDFS_FOLDER=$1 local HDFS_MORE_ARG=${@:2} hdircmd $HDFS_FOLDER "hdfs dfs -ls $HDFS_MORE_ARG" } hdircmd() { # MIN NUM OF ARG if [[ "$#" < "2" ]]; then echo "Usage : hdircmd HDFS_FOLDER HDFS_CMD.. . Run 'HDFS_CMD..' on each subfolders of HDFS_FOLDER" >&2 return -1 fi local HDFS_FOLDER=$1 local HDFS_CMD=${@:2} echo "hdfs dfs -ls $HDFS_FOLDER | awk -v COMMAND=\"$HDFS_CMD \" '{system(COMMAND \$8)}'" hdfs dfs -ls $HDFS_FOLDER | awk -v COMMAND="$HDFS_CMD " '{system(COMMAND $8)}' }
Clean up directory before running tests
#!/bin/bash set -euo pipefail IFS=$'\n\t' mkdir -p /home/drydock/go/src/github.com/etcinit ln -sf $(pwd) /home/drydock/go/src/github.com/etcinit/ cd /home/drydock/go/src/github.com/etcinit/gonduit glide install go build $(glide novendor) go test $(glide novendor)
#!/bin/bash set -euo pipefail IFS=$'\n\t' rm -r /home/drydock/go/src mkdir -p /home/drydock/go/src/github.com/etcinit ln -sf $(pwd) /home/drydock/go/src/github.com/etcinit/ cd /home/drydock/go/src/github.com/etcinit/gonduit glide install go build $(glide novendor) go test $(glide novendor)
Fix error when create nabu-3 schema
#!/usr/bin/env bash set -ex echo "Installing MySQL 5.7..." sudo service mysql stop sudo apt-get remove "^mysql.*" sudo apt-get autoremove sudo apt-get autoclean echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections wget https://dev.mysql.com/get/mysql-apt-config_0.8.6-1_all.deb sudo DEBIAN_FRONTEND=noninteractive dpkg -i mysql-apt-config_0.8.6-1_all.deb sudo rm -rf /var/lib/apt/lists/* sudo apt-get clean sudo apt-get update -q sudo apt-get install -q -y --allow-unauthenticated -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" mysql-server libmysqlclient-dev sudo mysql_upgrade echo "Restart mysql..." sudo mysql -e "use mysql; update user set authentication_string=PASSWORD('') where User='root'; update user set plugin='mysql_native_password';FLUSH PRIVILEGES;" sudo mysql -e "create schema if not exists \`nabu-3\` default charset set utf8mb4 default collate utf8mb4_general_ci" sudo mysql -e "grant all on 'nabu-3'.* to 'nabu-3'@'%' identified by 'nabu-3' with grant option"
#!/usr/bin/env bash set -ex echo "Installing MySQL 5.7..." sudo service mysql stop sudo apt-get remove "^mysql.*" sudo apt-get autoremove sudo apt-get autoclean echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections wget https://dev.mysql.com/get/mysql-apt-config_0.8.6-1_all.deb sudo DEBIAN_FRONTEND=noninteractive dpkg -i mysql-apt-config_0.8.6-1_all.deb sudo rm -rf /var/lib/apt/lists/* sudo apt-get clean sudo apt-get update -q sudo apt-get install -q -y --allow-unauthenticated -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" mysql-server libmysqlclient-dev sudo mysql_upgrade echo "Restart mysql..." sudo mysql -e "use mysql; update user set authentication_string=PASSWORD('') where User='root'; update user set plugin='mysql_native_password';FLUSH PRIVILEGES;" sudo mysql -e "create schema if not exists \`nabu-3\` default character set utf8mb4 default collate utf8mb4_general_ci" sudo mysql -e "grant all on 'nabu-3'.* to 'nabu-3'@'%' identified by 'nabu-3' with grant option"
Make use of the process_builder_args library function.
#!/bin/sh is_debug=1 for arg in "$@" ; do case "$arg" in LLVM_TOP=*) LLVM_TOP=`echo "$arg" | sed -e 's/LLVM_TOP=//'` ;; PREFIX=*) PREFIX=`echo "$arg" | sed -e 's/PREFIX=//'` ;; *=*) build_opts="$build_opts $arg" ;; --*) config_opts="$config_opts $arg" ;; esac done # See if we have previously been configured by sensing the presense # of the config.status scripts if test ! -x "config.status" ; then # We must configure so build a list of configure options config_options="--prefix=$PREFIX --with-llvmgccdir=$PREFIX" config_options="$config_options $config_opts" echo ./configure $config_options ./configure $config_options || (echo "Can't configure llvm" ; exit 1) fi echo make $build_opts '&&' make install $build_opts make $build_opts tools-only
#!/bin/sh # This includes the Bourne shell library from llvm-top. Since this file is # generally only used when building from llvm-top, it is safe to assume that # llvm is checked out into llvm-top in which case .. just works. . ../library.sh # Process the options passed in to us by the build script into standard # variables. process_builder_args "$@" # See if we have previously been configured by sensing the presense # of the config.status scripts if test ! -x "config.status" ; then # We must configure so build a list of configure options config_options="--prefix=$PREFIX --with-llvmgccdir=$PREFIX" config_options="$config_options $OPTIONS_DASH $OPTIONS_DASH_DASH" msg 0 Configuring $module with: msg 0 " ./configure" $config_options ./configure $config_options || (echo "Can't configure llvm" ; exit 1) fi msg 0 Building $module with: msg 0 " make" $OPTIONS_ASSIGN tools-only make $OPTIONS_ASSIGN tools-only
Fix the URL of the Engine.IO client repository
#!/usr/bin/env bash set -e CURRENTDIR=$(pwd) DESTDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) TEMPDIR=$(mktemp -d 2> /dev/null || mktemp -d -t 'tmp') cleanup () { cd "$CURRENTDIR" [ -d "$TEMPDIR" ] && rm -rf "$TEMPDIR" } trap cleanup INT TERM EXIT git clone https://github.com/Automattic/engine.io-client.git "$TEMPDIR" cd "$TEMPDIR" git checkout "$(git describe --tags --abbrev=0)" NODE_ENV=production npm install "$DESTDIR"/globalify.sh "$TEMPDIR" cd "$DESTDIR" find patches -name '*.patch' -exec patch -i {} \;
#!/usr/bin/env bash set -e CURRENTDIR=$(pwd) DESTDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) TEMPDIR=$(mktemp -d 2> /dev/null || mktemp -d -t 'tmp') cleanup () { cd "$CURRENTDIR" [ -d "$TEMPDIR" ] && rm -rf "$TEMPDIR" } trap cleanup INT TERM EXIT git clone https://github.com/socketio/engine.io-client.git "$TEMPDIR" cd "$TEMPDIR" git checkout "$(git describe --tags --abbrev=0)" NODE_ENV=production npm install "$DESTDIR"/globalify.sh "$TEMPDIR" cd "$DESTDIR" find patches -name '*.patch' -exec patch -i {} \;
Remove some node CLI tools
#!/usr/bin/env zsh set -e set -u node_modules=() command -v npm >/dev/null 2>&1 || exit 0 npm config set sign-git-tag true node_modules+=('bower@^1.8.0') node_modules+=('brunch@^2.8.2') node_modules+=('ember-cli@^2.7.0') node_modules+=('gh-home@^1.4.1') node_modules+=('grunt-cli@^1.2.0') node_modules+=('gulp-cli@^1.2.2') node_modules+=('jsctags@^5.0.1') node_modules+=('json-diff@^0.5.2') node_modules+=('tern@^0.21.0') npm install --global ${node_modules} exit
#!/usr/bin/env zsh set -e set -u node_modules=() command -v npm >/dev/null 2>&1 || exit 0 npm config set sign-git-tag true node_modules+=('bower@^1.8.0') node_modules+=('gh-home@^1.4.1') node_modules+=('jsctags@^5.0.1') node_modules+=('json-diff@^0.5.2') node_modules+=('tern@^0.21.0') npm install --global ${node_modules} exit
Use computed brew dir i/o /usr/bin
#!/usr/bin/env bash function pretty_print() { printf '\n\033[1m\033[34m%s\033[0m\n\n' "[macos-confs] ${1}…" } function install_brew_bash() { if ! command -v brew > /dev/null; then pretty_print "❗️ brew not installed" && exit 1; fi pretty_print "🍻 Installing GNU bash (through brew)" brew install bash pretty_print "⚠️ Changing from BSD bash (macOS) to GNU bash (brew)" if ! grep -q '/usr/local/bin/bash' /etc/shells; then echo '/usr/local/bin/bash' | sudo tee -a /etc/shells; chsh -s /usr/local/bin/bash; pretty_print "Auto-reloading your shell by executing a login shell" exec "$SHELL" -l fi; } install_brew_bash
#!/usr/bin/env bash BREW_PREFIX=$(brew --prefix) function pretty_print() { printf '\n\033[1m\033[34m%s\033[0m\n\n' "[macos-confs] ${1}…" } function install_brew_bash() { if ! command -v brew > /dev/null; then pretty_print "❗️ brew not installed" && exit 1; fi pretty_print "🍻 Installing GNU bash (through brew)" brew install bash pretty_print "⚠️ Changing from BSD bash (macOS) to GNU bash (brew)" if ! grep -q "${BREW_PREFIX}/bin/bash" /etc/shells; then echo "${BREW_PREFIX}/bin/bash" | sudo tee -a /etc/shells; chsh -s "${BREW_PREFIX}/bin/bash"; pretty_print "Auto-reloading your shell by executing a login shell" exec "$SHELL" -l fi; } install_brew_bash
Update deploy to elastic.io script.
#!/bin/bash set -e rm -rf lib npm version patch git checkout production git merge master grunt build git add -f lib/ libs=$(cat package.json | jq -r '.dependencies' | grep ':' | cut -d: -f1 | tr -d " " | tr -d '"') for lib in $libs; do git add -f node_modules/$lib done git commit -m "Add generated code and runtime dependencies for elastic.io environment." git push origin production git checkout master npm version patch git push origin master
#!/bin/bash set -e BRANCH_NAME='production' set +e git branch -D ${BRANCH_NAME} set -e rm -rf lib rm -rf node_modules npm version patch git branch ${BRANCH_NAME} git checkout ${BRANCH_NAME} npm install grunt build rm -rf node_modules npm install --production git add -f lib/ git add -f node_modules/ git commit -m "Add generated code and runtime dependencies for elastic.io environment." git push --force origin ${BRANCH_NAME} git checkout master VERSION=$(cat package.json | jq --raw-output .version) git push origin "v${VERSION}" npm version patch npm install
Fix bootstrap test to use local checked out edgestack
#!/bin/bash set -x CWD=$(pwd) TESTPATH="$(mktemp -d)" mkdir -p ${TESTPATH} cd "${TESTPATH}" echo '{ "name":"test","version":"1.0.0", "devDependencies": { "cross-env": "^3.1.4" }, "scripts":{"prod":"cross-env NODE_ENV=production edge build"} }' > package.json npm -q install ${CWD} node_modules/.bin/edge bootstrap --title="Test" --description="Test" --language="de-DE" npm -q install npm run prod node build/server/main.js & TEST_PID=$! sleep 2 IS_RUNNING=$(ps -p $TEST_PID -o pid=) curl -f http://localhost:1339 CURL_EXITCODE=$? kill -9 $TEST_PID rm -fr "${TESTPATH}" if [ $CURL_EXITCODE -ne 0 ]; then exit 2 fi if [ -n "$IS_RUNNING" ]; then exit 0 else exit 1 fi
#!/bin/bash set -x CWD=$(pwd) TESTPATH="$(mktemp -d)" yarn unlink || /usr/bin/true yarn link mkdir -p ${TESTPATH} cd "${TESTPATH}" echo '{ "name":"test","version":"1.0.0", "devDependencies": { "cross-env": "^3.1.4" }, "scripts":{"prod":"cross-env NODE_ENV=production edge build"} }' > package.json yarn add file:${CWD} yarn link edgestack node_modules/.bin/edge bootstrap --title="Test" --description="Test" --language="de-DE" yarn install yarn run prod node build/server/main.js & TEST_PID=$! sleep 2 IS_RUNNING=$(ps -p $TEST_PID -o pid=) curl -f http://localhost:1339 CURL_EXITCODE=$? kill -9 $TEST_PID # rm -fr "${TESTPATH}" echo "rm ${TESTPATH}" if [ $CURL_EXITCODE -ne 0 ]; then exit 2 fi if [ -n "$IS_RUNNING" ]; then exit 0 else exit 1 fi
Add git log and date time to published website
#!/bin/bash if [ ! -d ../javaplayland/web/doppio-jvm ] ; then echo 'wrong dir' exit 1 fi if [ ! -d ../codemoo/ ] ; then echo 'No web target directory' exit 1 fi rsync --exclude '*.DS_Store' --exclude '*.git' -av web/ ../codemoo mv ../codemoo/index.html ../codemoo/index2.html perl -p -i -e "s/WebTrafficAnalyticsHere/script/g" ../codemoo/index2.html rsync -av LICENSE.txt ../codemoo/ cp -pr gh-pages-config/ ../codemoo/ rm ../codemoo/_* ( cd ../codemoo; coffee -c scripts ) ( cd ../codemoo; coffee -c doppio-jvm/scripts/demo/ ) ( cd ../codemoo; git add -A . ) ( cd ../codemoo; git commit -m 'Publish' ) echo \( cd ../codemoo\; git push origin gh-pages \)
#!/bin/bash if [ ! -d ../javaplayland/web/doppio-jvm ] ; then echo 'wrong dir' exit 1 fi TGT=../codemoo if [ ! -d $TGT/ ] ; then echo 'No web target directory' exit 1 fi rsync --delete --exclude '*.DS_Store' --exclude '*.git' -av web/ $TGT mv $TGT/index.html $TGT/index2.html perl -p -i -e "s/WebTrafficAnalyticsHere/script/g" $TGT/index2.html rsync -av LICENSE.txt $TGT/ cp -pr gh-pages-config/ $TGT/ rm $TGT/_* #Compile ( cd $TGT; coffee -c scripts ) ( cd $TGT; coffee -c doppio-jvm/scripts/demo/ ) #Stamp date >$TGT/publish-date.txt git log -n 5 --oneline >$TGT/publish-recentcommits.txt ( cd $TGT; git add -A . ) ( cd $TGT; git commit -m 'Publish' ) echo "Files commited. Copy-paste the following to publish" echo \( cd $TGT\; git push origin gh-pages \)
Update example to get IP correctly
#!/usr/bin/env bash set -e label="myhost" zone="example.com" internal_token="00000000-0000-4000-0000-000000000000" external_token="11111111-1111-4111-1111-111111111111" internal_zone="internal.$zone" external_zone="external.$zone" curl --fail -sS >/dev/null -F zone="$external_zone" -F token="$external_token" -F label="$label" "https://my-dns.org/api/update-record" || echo >&2 "Updating external DNS failed" ip="$(hostname -I | head -n 1)" if [[ -n "$ip" ]]; then curl --fail -sS >/dev/null -F zone="$internal_zone" -F token="$internal_token" -F label="$label" -F data="$ip" "https://my-dns.org/api/update-record" || echo >&2 "Updating internal DNS failed" fi
#!/usr/bin/env bash set -e label="myhost" zone="example.com" internal_token="00000000-0000-4000-0000-000000000000" external_token="11111111-1111-4111-1111-111111111111" internal_zone="internal.$zone" external_zone="external.$zone" curl --fail -sS >/dev/null -F zone="$external_zone" -F token="$external_token" -F label="$label" "https://my-dns.org/api/update-record" || echo >&2 "Updating external DNS failed" ip="$(hostname -I | cut -d ' ' -f 1)" if [[ -n "$ip" ]]; then curl --fail -sS >/dev/null -F zone="$internal_zone" -F token="$internal_token" -F label="$label" -F data="$ip" "https://my-dns.org/api/update-record" || echo >&2 "Updating internal DNS failed" fi
Make the "README" a bit more descriptive.
#!/bin/bash echo 'Welcome to this virtual machine. To switch the user and drop privileges, type `user`.' echo
#!/bin/bash echo 'Welcome to this virtual machine. To switch the user and drop privileges, type `user`.' echo 'From there, type "ocaml" to start ocaml-multicore.' echo
Fix typo in version validation test
#! /usr/bin/env bash current_version=`python setup.py --version` make test # This part is executed only when merging a branch in master. if [[ "$TRAVIS_BRANCH" == "master" && "$TRAVIS_PULL_REQUEST" != false ]]; then # A version bump is requested by default, except: # - when changing a Markdown (.md) file # - or when only blank lines are added/removed # Note: git diff-index does not work with --ignore-blank-lines option. bumping_changes=`git --no-pager diff --ignore-blank-lines master openfisca_france | grep "^diff" | grep -v "\.md$"` if [[ -n "$bumping_changes" ]]; then if git rev-parse $current_version; then set +x echo "Version $version already exists. Please update version number in setup.py before merging this branch into master." exit 1 fi if git diff-index master --quiet CHANGELOG.md; then set +x echo "CHANGELOG.md has not been modified. Please update it before merging this branch into master." exit 1 fi fi fi
#! /usr/bin/env bash current_version=`python setup.py --version` make test # This part is executed only when merging a branch in master. if [[ "$TRAVIS_BRANCH" == "master" && "$TRAVIS_PULL_REQUEST" != false ]]; then # A version bump is requested by default, except: # - when changing a Markdown (.md) file # - or when only blank lines are added/removed # Note: git diff-index does not work with --ignore-blank-lines option. bumping_changes=`git --no-pager diff --ignore-blank-lines master openfisca_france | grep "^diff" | grep -v "\.md$"` if [[ -n "$bumping_changes" ]]; then if git rev-parse $current_version; then set +x echo "Version $current_version already exists. Please update version number in setup.py before merging this branch into master." exit 1 fi if git diff-index master --quiet CHANGELOG.md; then set +x echo "CHANGELOG.md has not been modified. Please update it before merging this branch into master." exit 1 fi fi fi
Disable FTL features we don't need
#!/bin/sh # Run tests on test lab gcloud firebase test android run \ --type instrumentation \ --app ftl-tests/dummy.apk \ --test build/outputs/apk/androidTest/debug/kotlin-extensions-debug-androidTest.apk \ --device model=Nexus6P,version=27,locale=en_US,orientation=portrait \ --timeout 20m \ --results-bucket cloud-test-android-devrel-ci
#!/bin/sh # Run tests on test lab gcloud firebase test android run \ --type instrumentation \ --app ftl-tests/dummy.apk \ --test build/outputs/apk/androidTest/debug/kotlin-extensions-debug-androidTest.apk \ --device model=Nexus6P,version=27,locale=en_US,orientation=portrait \ --timeout 30m \ --results-bucket cloud-test-android-devrel-ci \ --no-record-video \ --no-performance-metrics
Fix checking for proto 1
#!/bin/bash set -e function run_tests() { local version=$1 ccm create test -v $version -n 3 -s --debug ccm status if [[ $v == 1.2.* ]]; then go test -v ./... -proto 1 else go test -v ./... fi ccm stop --not-gently test ccm remove test } run_tests $1
#!/bin/bash set -e function run_tests() { local version=$1 ccm create test -v $version -n 3 -s --debug ccm status if [[ $version == 1.2.* ]]; then go test -v ./... -proto 1 else go test -v ./... fi ccm clear } run_tests $1
Test Objective-C on App Center (macOS)
#!/usr/bin/env bash -e cd $APPCENTER_SOURCE_DIRECTORY source appcenter/slack.sh ############# ### Build ### ############# npm run build ############ ### Test ### ############ brew install go boost # TODO run full test suite when we deprecate Travis export CI=true export FIXTURE=swift if script/test; then slack_notify_build_passed else slack_notify_build_failed exit 1 fi ############### ### Archive ### ############### cp -r dist $APPCENTER_OUTPUT_DIRECTORY/
#!/usr/bin/env bash -e cd $APPCENTER_SOURCE_DIRECTORY source appcenter/slack.sh ############# ### Build ### ############# npm run build ############ ### Test ### ############ brew install go boost # TODO run full test suite when we deprecate Travis export FIXTURE=swift,objective-c if script/test; then slack_notify_build_passed else slack_notify_build_failed exit 1 fi ############### ### Archive ### ############### cp -r dist $APPCENTER_OUTPUT_DIRECTORY/
Make sure this file is not executable
#!/bin/bash . $srcdir/../../../test_common.sh test_init test_api_xccdf_unittests.log test_run "xccdf:complex-check -- NAND is working properly" ./test_xccdf_shall_pass $srcdir/test_xccdf_complex_check_nand.xccdf.xml test_run "xccdf:complex-check -- single negation" ./test_xccdf_shall_pass $srcdir/test_xccdf_complex_check_single_negate.xccdf.xml test_run "Certain id's of xccdf_items may overlap" ./test_xccdf_shall_pass $srcdir/test_xccdf_overlaping_IDs.xccdf.xml test_run "Escaping of xml &amp within xccdf:value" $srcdir/test_xccdf_xml_escaping_value.sh test_run "xccdf:check-content-ref without @name" $srcdir/test_xccdf_check_content_ref_without_name_attr.sh test_exit
#!/bin/bash . $srcdir/../../../test_common.sh test_init test_api_xccdf_unittests.log test_run "xccdf:complex-check -- NAND is working properly" ./test_xccdf_shall_pass $srcdir/test_xccdf_complex_check_nand.xccdf.xml test_run "xccdf:complex-check -- single negation" ./test_xccdf_shall_pass $srcdir/test_xccdf_complex_check_single_negate.xccdf.xml test_run "Certain id's of xccdf_items may overlap" ./test_xccdf_shall_pass $srcdir/test_xccdf_overlaping_IDs.xccdf.xml test_run "Assert for environment" [ ! -x $srcdir/not_executable ] test_run "Escaping of xml &amp within xccdf:value" $srcdir/test_xccdf_xml_escaping_value.sh test_run "xccdf:check-content-ref without @name" $srcdir/test_xccdf_check_content_ref_without_name_attr.sh test_exit
Remove integration test from travis build
#!/bin/bash set -e export DISPLAY=:99.0 sh -e /etc/init.d/xvfb start npm run clean npm run build npm test npm run test:coverage export PATH="$HOME/miniconda/bin:$PATH" npm run test:integration npm run docs
#!/bin/bash set -e export DISPLAY=:99.0 sh -e /etc/init.d/xvfb start npm run clean npm run build npm test npm run test:coverage export PATH="$HOME/miniconda/bin:$PATH" #npm run test:integration npm run docs
Allow $EDITOR to be set up elsewhere
export EDITOR='subl'
# Only set this if we haven't set $EDITOR up somewhere else previously. if [ "$EDITOR" == "" ] ; then # Use sublime for my editor. export EDITOR='subl' fi
Build one wheel for py27, py32 and py33 as the dependencies are the same for all of them.
python setup.py sdist python setup.py bdist_wheel --python-tag py26 python setup.py bdist_wheel --python-tag py27 python setup.py bdist_wheel --python-tag py32 python setup.py bdist_wheel --python-tag py33 python setup.py bdist_wheel --python-tag py34
python setup.py sdist python setup.py bdist_wheel --python-tag py26 python setup.py bdist_wheel --python-tag py27.py32.py33 python setup.py bdist_wheel --python-tag py34
Support for index tarballs with CWL runs
#!/bin/bash set -ex -o pipefail BCBIO_VERSION="1.0.4" BCBIO_REVISION="v1.0.4" NS="quay.io/bcbio" TAG="${BCBIO_VERSION}-${BCBIO_REVISION}" # build bcbio base docker pull ubuntu:16.04 docker build --no-cache --build-arg "git_revision=${BCBIO_REVISION}" -t "${NS}/bcbio-base:${TAG}" -t "${NS}/bcbio-base:latest" - < Dockerfile.base # build bcbio + task specific tools for TOOL in ${TOOLS} do docker build --no-cache --build-arg "git_revision=${BCBIO_REVISION}" --build-arg "tool=${TOOL}" -t "${NS}/${TOOL}:${TAG}" -t "${NS}/${TOOL}:latest" -f Dockerfile.tools . done # log in to quay.io set +x # avoid leaking encrypted password into travis log docker login -u="bcbio+travis" -p="$QUAY_PASSWORD" quay.io # push images set -ex -o pipefail docker push "${NS}/bcbio-base:${TAG}" docker push "${NS}/bcbio-base:latest" for TOOL in ${TOOLS} do docker push "${NS}/${TOOL}:${TAG}" docker push "${NS}/${TOOL}:latest" done
#!/bin/bash set -ex -o pipefail BCBIO_VERSION="1.0.5a" BCBIO_REVISION="4a4b8de" NS="quay.io/bcbio" TAG="${BCBIO_VERSION}-${BCBIO_REVISION}" # build bcbio base docker pull ubuntu:16.04 docker build --no-cache --build-arg "git_revision=${BCBIO_REVISION}" -t "${NS}/bcbio-base:${TAG}" -t "${NS}/bcbio-base:latest" - < Dockerfile.base # build bcbio + task specific tools for TOOL in ${TOOLS} do docker build --no-cache --build-arg "git_revision=${BCBIO_REVISION}" --build-arg "tool=${TOOL}" -t "${NS}/${TOOL}:${TAG}" -t "${NS}/${TOOL}:latest" -f Dockerfile.tools . done # log in to quay.io set +x # avoid leaking encrypted password into travis log docker login -u="bcbio+travis" -p="$QUAY_PASSWORD" quay.io # push images set -ex -o pipefail docker push "${NS}/bcbio-base:${TAG}" docker push "${NS}/bcbio-base:latest" for TOOL in ${TOOLS} do docker push "${NS}/${TOOL}:${TAG}" docker push "${NS}/${TOOL}:latest" done
Update dcache automated downloader script
#!/bin/bash yesterday=`date --date="yesterday" +"%Y.%m.%d"` year=`date --date="yesterday" +"%Y"` month=`date --date="yesterday" +"%m"` billing_logs="/var/lib/dcache/billing/$year/$month" scp uct2-dc4.mwt2.org:$billing_logs/billing-$yesterday / rm -fr /tmp/faxbox_logs
#!/bin/bash yesterday=`date --date="yesterday" +"%Y.%m.%d"` year=`date --date="yesterday" +"%Y"` month=`date --date="yesterday" +"%m"` billing_logs="/var/lib/dcache/billing/$year/$month" log_dest="/mnt/log/mwt2/dcache-billing/" work_dir=`mktemp -d` cd $work_dir mkdir logs mkdir processed_logs scp uct2-dc4.mwt2.org:$billing_logs/billing-$yesterday logs/ scp uct2-dc4.mwt2.org:$billing_logs/billing-error-$yesterday logs/ $1/python/process_logs.py cd logs sha256sum billing* > raw_sums cd ../processed_logs sha256sum billing* > processed_sums cp logs/billing* $log_dest/raw/$year cp processed_logs/billing* $log_dest/raw/$year cat logs/raw_sums >> $log_dest/raw/$year/sha256sums cat processed_logs/processed_sums >> $log_dest/processed/$year/sha256sums rm -fr $work_dir
Fix Docker build when git metadata is not present
#!/bin/sh # DO NOT RUN LOCALLY - For docker build only! # WILL NUKE YOUR .git ! apk update apk add git git log -1 --pretty="format:%h" > VERSION.txt git log -1 --pretty="format:%ai %s" > DESCRIPTION.txt rm -rf ./.git go get -v go build go install apk del git rm -rf /var/cache/apk/*
#!/bin/sh # DO NOT RUN LOCALLY - For docker build only! # WILL NUKE YOUR .git ! apk update apk add git if [[ -d ".git" ]]; then git log -1 --pretty="format:%h" > VERSION.txt git log -1 --pretty="format:%ai %s" > DESCRIPTION.txt rm -rf ./.git else echo "unknown" > VERSION.txt echo "unknown" > DESCRIPTION.txt fi go get -v go build go install apk del git rm -rf /var/cache/apk/*
Use $WORKSPACE rather than hardcoding a path
#this checks for standard tool chains and sticks them in the path # MSP430 toolchain if [ -d "/opt/msp430" ]; then #Add to paths export MSP430ROOT="/opt/msp430" export PATH="${PATH}:${MSP430ROOT}/bin" fi #NodeJS if [ -d /usr/local/node ]; then export PATH="${PATH}:/usr/local/node/bin" fi if [ -d ${HOME}/Workspace/FlameGraph ]; then # useful: checkout https://github.com/brendangregg/FlameGraph.git to ~/Workspace for flame utils export PATH="${PATH}:${HOME}/Workspace/FlameGraph" fi
#this checks for standard tool chains and sticks them in the path # MSP430 toolchain if [ -d "/opt/msp430" ]; then #Add to paths export MSP430ROOT="/opt/msp430" export PATH="${PATH}:${MSP430ROOT}/bin" fi #NodeJS if [ -d /usr/local/node ]; then export PATH="${PATH}:/usr/local/node/bin" fi if [ -d ${WORKSPACE}/FlameGraph ]; then # useful: checkout https://github.com/brendangregg/FlameGraph.git to ~/Workspace for flame utils export PATH="${PATH}:${HOME}/Workspace/FlameGraph" fi
Add sleep between scope launch and top
#! /bin/bash . ./config.sh start_suite "Check scope exits cleanly within 10 seconds" scope_on $HOST1 launch scope_on $HOST1 stop sleep 10 # Save stdout for debugging output exec 3>&1 assert_raises "docker_on $HOST1 logs weavescope 2>&1 | grep 'app exiting' || (docker_on $HOST1 logs weavescope 2>&3 ; false)" assert_raises "docker_on $HOST1 logs weavescope 2>&1 | grep 'probe exiting' || (docker_on $HOST1 logs weavescope 2>&3 ; false)" assert_raises "docker_on $HOST1 inspect --format='{{.State.Running}}' weavescope" "false" scope_end_suite
#! /bin/bash . ./config.sh start_suite "Check scope exits cleanly within 10 seconds" scope_on $HOST1 launch sleep 10 scope_on $HOST1 stop sleep 10 # Save stdout for debugging output exec 3>&1 assert_raises "docker_on $HOST1 logs weavescope 2>&1 | grep 'app exiting' || (docker_on $HOST1 logs weavescope 2>&3 ; false)" assert_raises "docker_on $HOST1 logs weavescope 2>&1 | grep 'probe exiting' || (docker_on $HOST1 logs weavescope 2>&3 ; false)" assert_raises "docker_on $HOST1 inspect --format='{{.State.Running}}' weavescope" "false" scope_end_suite