Instruction stringlengths 14 778 | input_code stringlengths 0 4.24k | output_code stringlengths 1 5.44k |
|---|---|---|
Revert "rebase instead of merge" | #!/bin/bash
# Merge master to gh-pages, rebuild the prod cljs, and push.
set -e
git checkout gh-pages
git rebase master
rm resources/public/cljs/main.js
lein cljsbuild once prod
git add -f resources/public/cljs/main.js
git commit -m "Update gh-pages js"
git push
| #!/bin/bash
# Merge master to gh-pages, rebuild the prod cljs, and push.
set -e
git checkout gh-pages
git merge master
rm resources/public/cljs/main.js
lein cljsbuild once prod
git add -f resources/public/cljs/main.js
git commit -m "Update gh-pages js"
git push
|
Fix RHEL osimport init script | #!/bin/sh
ln -s $1/images/pxeboot/vmlinuz $2/boot/kernel && \
ln -s $1/images/pxeboot/initrd.img $1/boot/initramfs/distribution
mkdir -p $2/boot/media/EFI/BOOT && \
ln -s $1/EFI/BOOT/BOOTX64.EFI $1/1/EFI/BOOT/grubx64.efi $2/boot/media/EFI/BOOT/
| #!/bin/sh
sed -i s/centos/CentOS/ "s/rhel/Red Hat Enterprise Linux/" $2/profile.yaml
ln -s $1/images/pxeboot/vmlinuz $2/boot/kernel && \
ln -s $1/images/pxeboot/initrd.img $2/boot/initramfs/distribution
mkdir -p $2/boot/media/EFI/BOOT && \
ln -s $1/EFI/BOOT/BOOTX64.EFI $1/1/EFI/BOOT/grubx64.efi $2/boot/media/EFI/BOOT/
|
Reset one more 'dynamic file' | #!/bin/sh
git checkout Sensorama/Pods/Pods.xcodeproj/xcuserdata/wk.xcuserdatad/xcschemes/*.xcscheme
| #!/bin/sh
git checkout Sensorama/Pods/Pods.xcodeproj/xcuserdata/wk.xcuserdatad/xcschemes/*.xcscheme
git checkout Sensorama/Pods/Pods.xcodeproj/xcuserdata/wk.xcuserdatad/xcschemes/xcschememanagement.plist
|
Support of parallel bundle install | alias be="bundle exec"
alias bi="bundle install"
alias bl="bundle list"
alias bp="bundle package"
alias bo="bundle open"
alias bu="bundle update"
alias bers="bundle exec rake spec"
# The following is based on https://github.com/gma/bundler-exec
bundled_commands=(annotate berks cap capify cucumber foodcritic foreman guard jekyll kitchen knife middleman nanoc rackup rainbows rake rspec ruby shotgun spec spin spork strainer tailor taps thin thor unicorn unicorn_rails puma)
## Functions
_bundler-installed() {
which bundle > /dev/null 2>&1
}
_within-bundled-project() {
local check_dir=$PWD
while [ $check_dir != "/" ]; do
[ -f "$check_dir/Gemfile" ] && return
check_dir="$(dirname $check_dir)"
done
false
}
_run-with-bundler() {
if _bundler-installed && _within-bundled-project; then
bundle exec $@
else
$@
fi
}
## Main program
for cmd in $bundled_commands; do
eval "function unbundled_$cmd () { $cmd \$@ }"
eval "function bundled_$cmd () { _run-with-bundler $cmd \$@}"
alias $cmd=bundled_$cmd
if which _$cmd > /dev/null 2>&1; then
compdef _$cmd bundled_$cmd=$cmd
fi
done
| alias be="bundle exec"
alias bl="bundle list"
alias bp="bundle package"
alias bo="bundle open"
alias bu="bundle update"
alias bers="bundle exec rake spec"
if [[ "$(uname)" == 'Darwin' ]]
then
local cores_num="$(sysctl hw.ncpu | awk '{print $2}')"
else
local cores_num="$(nproc)"
fi
eval "alias bi='bundle install --jobs=$cores_num'"
# The following is based on https://github.com/gma/bundler-exec
bundled_commands=(annotate berks cap capify cucumber foodcritic foreman guard jekyll kitchen knife middleman nanoc rackup rainbows rake rspec ruby shotgun spec spin spork strainer tailor taps thin thor unicorn unicorn_rails puma)
## Functions
_bundler-installed() {
which bundle > /dev/null 2>&1
}
_within-bundled-project() {
local check_dir=$PWD
while [ $check_dir != "/" ]; do
[ -f "$check_dir/Gemfile" ] && return
check_dir="$(dirname $check_dir)"
done
false
}
_run-with-bundler() {
if _bundler-installed && _within-bundled-project; then
bundle exec $@
else
$@
fi
}
## Main program
for cmd in $bundled_commands; do
eval "function unbundled_$cmd () { $cmd \$@ }"
eval "function bundled_$cmd () { _run-with-bundler $cmd \$@}"
alias $cmd=bundled_$cmd
if which _$cmd > /dev/null 2>&1; then
compdef _$cmd bundled_$cmd=$cmd
fi
done
|
Use zsh instead of bash for initializing Emacs | #!/bin/bash
running=$(/usr/local/bin/emacsclient --eval '(daemonp)')
if [ $running ]; then
/usr/local/bin/emacsclient -c --no-wait
else
/usr/local/bin/emacs --daemon
/usr/local/bin/emacsclient -c --no-wait
fi
| #!/usr/local/bin/zsh
running=$(/usr/local/bin/emacsclient --eval '(daemonp)')
if [ $running ]; then
/usr/local/bin/emacsclient -c --no-wait
else
/usr/local/bin/emacs --daemon
/usr/local/bin/emacsclient -c --no-wait
fi
|
Use -f for docker tag | #!/usr/bin/env bash
set -ex
cd $(dirname $0)
stack image container
NAME=snoyberg/snoyman-webapps:sha-$(git rev-parse HEAD)
docker tag snoyberg/snoyman-webapps:latest $NAME
docker push $NAME
kubectl set image deployments/snoyman-webapps webapps=$NAME
| #!/usr/bin/env bash
set -ex
cd $(dirname $0)
stack image container
NAME=snoyberg/snoyman-webapps:sha-$(git rev-parse HEAD)
docker tag -f snoyberg/snoyman-webapps:latest $NAME
docker push $NAME
kubectl set image deployments/snoyman-webapps webapps=$NAME
|
Reset the trigger branch to master | #!/bin/bash
if [ "$TRAVIS_REPO_SLUG" == "openxc/openxc-android" ] && [ "$TRAVIS_JDK_VERSION" == "openjdk8" ] && [ "$TRAVIS_PULL_REQUEST" == "false" ] && [ "$TRAVIS_BRANCH" == "automate-javadoc" ]; then
cp -R library/build/docs/javadoc $HOME/javadoc-latest
cd $HOME
git config --global user.email "travis@travis-ci.org"
git config --global user.name "Travis-CI"
git clone --quiet --branch=master https://${GH_TOKEN}@github.com/openxc/openxc-android master > /dev/null
cd master
LATEST_TAG=$(git describe --abbrev=0 --tags)
cd ../
git clone --quiet --branch=gh-pages https://${GH_TOKEN}@github.com/openxc/openxc-android gh-pages > /dev/null
cd gh-pages
git rm -rf ./
echo "android.openxcplatform.com" > CNAME
cp -Rf $HOME/javadoc-latest/. ./
git add -f .
git commit -m "JavaDoc $LATEST_TAG - Travis Build $TRAVIS_BUILD_NUMBER"
git push -fq origin gh-pages > /dev/null
fi | #!/bin/bash
if [ "$TRAVIS_REPO_SLUG" == "openxc/openxc-android" ] && [ "$TRAVIS_JDK_VERSION" == "openjdk8" ] && [ "$TRAVIS_PULL_REQUEST" == "false" ] && [ "$TRAVIS_BRANCH" == "master" ]; then
cp -R library/build/docs/javadoc $HOME/javadoc-latest
cd $HOME
git config --global user.email "travis@travis-ci.org"
git config --global user.name "Travis-CI"
git clone --quiet --branch=master https://${GH_TOKEN}@github.com/openxc/openxc-android master > /dev/null
cd master
LATEST_TAG=$(git describe --abbrev=0 --tags)
cd ../
git clone --quiet --branch=gh-pages https://${GH_TOKEN}@github.com/openxc/openxc-android gh-pages > /dev/null
cd gh-pages
git rm -rf ./
echo "android.openxcplatform.com" > CNAME
cp -Rf $HOME/javadoc-latest/. ./
git add -f .
git commit -m "JavaDoc $LATEST_TAG - Travis Build $TRAVIS_BUILD_NUMBER"
git push -fq origin gh-pages > /dev/null
fi |
Remove perl package used by irssi | #! /usr/bin/env bash
# Unoffical Bash "strict mode"
# http://redsymbol.net/articles/unofficial-bash-strict-mode/
set -euo pipefail
#ORIGINAL_IFS=$IFS
IFS=$'\t\n' # Stricter IFS settings
# This script contains calls to package managers that should be the same across
# different operating systems. Since this script can be invoked on any
# operating system, we invoke it from the main setup.sh when setting up a new
# computer
# Ruby Packages
###############################################################################
# For tmux projects
gem install tmuxinator -v 3.0.1
# Python Packages
###############################################################################
# Install Pygments
pip install Pygments
# flake8 for linting
python -m pip install flake8
# csvkit for CSV utilities
pip install csvkit
# Spotify downloader
pip3 install spotdl
# Node.JS Packages
###############################################################################
# Install jslint for linting
npm install -g jslint
# Install xml2json tool
npm install -g xml2json-command
# Install pa11y tool for web accessibility checks
npm install -g pa11y
# Perl Packages
###############################################################################
# Setup cpan and install packages for irssi
cpan Lingua::Ispell
# Haskell Packages
###############################################################################
# Make sure our local list is up to date
cabal update
# For building tables on the command line
cabal install pandoc-placetable
| #! /usr/bin/env bash
# Unoffical Bash "strict mode"
# http://redsymbol.net/articles/unofficial-bash-strict-mode/
set -euo pipefail
#ORIGINAL_IFS=$IFS
IFS=$'\t\n' # Stricter IFS settings
# This script contains calls to package managers that should be the same across
# different operating systems. Since this script can be invoked on any
# operating system, we invoke it from the main setup.sh when setting up a new
# computer
# Ruby Packages
###############################################################################
# For tmux projects
gem install tmuxinator -v 3.0.1
# Python Packages
###############################################################################
# Install Pygments
pip install Pygments
# flake8 for linting
python -m pip install flake8
# csvkit for CSV utilities
pip install csvkit
# Spotify downloader
pip3 install spotdl
# Node.JS Packages
###############################################################################
# Install jslint for linting
npm install -g jslint
# Install xml2json tool
npm install -g xml2json-command
# Install pa11y tool for web accessibility checks
npm install -g pa11y
# Haskell Packages
###############################################################################
# Make sure our local list is up to date
cabal update
# For building tables on the command line
cabal install pandoc-placetable
|
Check Bashisms: Only ignore some files in `docker` | @INCLUDE_COMMON@
echo
echo ELEKTRA SCRIPTS BASHISMS TEST
echo
command -v checkbashisms >/dev/null 2>&1 || { echo "checkbashisms command needed for this test, aborting" >&2; exit 0; }
cd "@CMAKE_SOURCE_DIR@"
find -version > /dev/null 2>&1 > /dev/null && FIND=find || FIND='find -E'
# this way we also check subdirectories
# The script `check-env-dep` uses process substitution which is **not** a standard `sh` feature!
# See also: https://unix.stackexchange.com/questions/151925
scripts=$($FIND scripts/ -type f -not -regex '.+/docker/.+' -not -regex \
'.+(check-env-dep|Jenkinsfile(.daily)?|gitignore|kdb_zsh_completion|run_dev_env|sed|Vagrantfile|\.(cmake|fish|in|md|txt))$' | \
xargs)
checkbashisms $scripts
ret=$?
# 2 means skipped file, e.g. README.md, that is fine
# only 1, 3 and 4 are actually bad
test $ret -eq 0 || test $ret -eq 2
exit_if_fail "Possible bashisms detected, please check."
end_script
| @INCLUDE_COMMON@
echo
echo ELEKTRA SCRIPTS BASHISMS TEST
echo
command -v checkbashisms >/dev/null 2>&1 || { echo "checkbashisms command needed for this test, aborting" >&2; exit 0; }
cd "@CMAKE_SOURCE_DIR@"
find -version > /dev/null 2>&1 > /dev/null && FIND=find || FIND='find -E'
# this way we also check subdirectories
# The script `check-env-dep` uses process substitution which is **not** a standard `sh` feature!
# See also: https://unix.stackexchange.com/questions/151925
scripts=$($FIND scripts/ -type f -not -regex \
'.+(check-env-dep|gitignore|kdb_zsh_completion|run_dev_env|sed|(Docker|Jenkins|Vagrant)file.*|\.(cmake|fish|in|md|txt))$' | \
xargs)
checkbashisms $scripts
ret=$?
# 2 means skipped file, e.g. README.md, that is fine
# only 1, 3 and 4 are actually bad
test $ret -eq 0 || test $ret -eq 2
exit_if_fail "Possible bashisms detected, please check."
end_script
|
Change condition of Jenkins start up judgement. | #!/bin/bash
set -e
# Add common variables.
source ~/ci/config
source ~/ci/config.default
# Create OpenLDAP server.
#if [ ${#SLAPD_DOMAIN} -gt 0 -a ${#SLAPD_PASSWORD} -gt 0 ]; then
# source ~/openldap-docker/upgradeOpenLDAP.sh
#fi
# Upgrade Gerrit server container.
source ~/gerrit-docker/upgradeGerrit.sh
while [ -z "$(docker logs ${GERRIT_NAME} 2>&1 | tail -n 4 | grep "Gerrit Code Review [0-9..]* ready")" ]; do
echo "Waiting gerrit ready."
sleep 1
done
# Upgrade Jenkins server container.
source ~/jenkins-docker/upgradeJenkins.sh
while [ -z "$(docker logs ${JENKINS_NAME} 2>&1 | tail -n 5 | grep "Jenkins is fully up and running")" ]; do
echo "Waiting jenkins ready."
sleep 1
done
# Upgrade Redmine server container.
#source ~/redmine-docker/upgradeRedmine.sh
#
#while [ -z "$(docker logs ${REDMINE_NAME} 2>&1 | tail -n 5 | grep 'INFO success: nginx entered RUNNING state')" ]; do
# echo "Waiting redmine ready."
# sleep 1
#done
#
# Upgrade Nginx proxy server container.
source ~/nginx-docker/upgradeNginx.sh
| #!/bin/bash
set -e
# Add common variables.
source ~/ci/config
source ~/ci/config.default
# Create OpenLDAP server.
#if [ ${#SLAPD_DOMAIN} -gt 0 -a ${#SLAPD_PASSWORD} -gt 0 ]; then
# source ~/openldap-docker/upgradeOpenLDAP.sh
#fi
# Upgrade Gerrit server container.
source ~/gerrit-docker/upgradeGerrit.sh
while [ -z "$(docker logs ${GERRIT_NAME} 2>&1 | tail -n 4 | grep "Gerrit Code Review [0-9..]* ready")" ]; do
echo "Waiting gerrit ready."
sleep 1
done
# Upgrade Jenkins server container.
source ~/jenkins-docker/upgradeJenkins.sh
while [ -z "$(docker logs ${JENKINS_NAME} 2>&1 | tail -n 5 | grep "setting agent port for jnlp")" ]; do
echo "Waiting jenkins ready."
sleep 1
done
# Upgrade Redmine server container.
#source ~/redmine-docker/upgradeRedmine.sh
#
#while [ -z "$(docker logs ${REDMINE_NAME} 2>&1 | tail -n 5 | grep 'INFO success: nginx entered RUNNING state')" ]; do
# echo "Waiting redmine ready."
# sleep 1
#done
#
# Upgrade Nginx proxy server container.
source ~/nginx-docker/upgradeNginx.sh
|
Use our own version of minizip | #!/usr/bin/env sh
set -e
set -x
rm -rf /tmp/minizip
git clone https://github.com/nmoinvaz/minizip.git /tmp/minizip
cd /tmp/minizip
cmake .
sudo make install
| #!/usr/bin/env sh
set -e
set -x
rm -rf /tmp/minizip
git clone https://github.com/nmoinvaz/minizip.git /tmp/minizip
cd /tmp/minizip
/tmp/cmake/bin/cmake .
sudo make install
|
Use CC instead of gcc | #!/usr/bin/env bash
make std
make std-check
make sse2-check
gcc -O3 -finline-functions -fomit-frame-pointer -DNDEBUG -DDSFMT_MEXP=19937 \
-fPIC -fno-strict-aliasing --param max-inline-insns-single=1800 -Wmissing-prototypes \
-Wall -std=c99 -shared dSFMT.c -o libdSFMT.${SHLIB_EXT}
mkdir -p ${PREFIX}/lib
mkdir -p ${PREFIX}/include
cp libdSFMT.${SHLIB_EXT} ${PREFIX}/lib
cp dSFMT.h ${PREFIX}/include
| #!/usr/bin/env bash
make std
make std-check
make sse2-check
${CC} -O3 -finline-functions -fomit-frame-pointer -DNDEBUG -DDSFMT_MEXP=19937 \
-fPIC -fno-strict-aliasing --param max-inline-insns-single=1800 -Wmissing-prototypes \
-Wall -std=c99 -shared dSFMT.c -o libdSFMT.${SHLIB_EXT}
mkdir -p ${PREFIX}/lib
mkdir -p ${PREFIX}/include
cp libdSFMT.${SHLIB_EXT} ${PREFIX}/lib
cp dSFMT.h ${PREFIX}/include
|
Make tree work like the old MS-DOS tree command | alias :Gcommit='git commit'
alias :Gstatus='git status'
alias :Gw='git add'
alias ack='ag'
alias ack-grep='ag'
alias cd..='cd ..'
alias lla='ls -la'
alias mc='mc -s'
alias ooffice='libreoffice'
alias psg='ps aux | grep'
alias xv='geeqie'
| alias :Gcommit='git commit'
alias :Gstatus='git status'
alias :Gw='git add'
alias ack='ag'
alias ack-grep='ag'
alias cd..='cd ..'
alias lla='ls -la'
alias mc='mc -s'
alias ooffice='libreoffice'
alias psg='ps aux | grep'
alias tree='tree -d'
alias xv='geeqie'
|
Change rofi to use todoist script | #!/bin/bash
output="$(task projects | egrep -v '(Project|projects|---)' | \
awk '{print $1}' | sed '/^$/d' | rofi -dmenu -p 'TaskWarrior: ')"
task add project:$output
| #!/bin/bash
output="$(todo.py projects | rofi -dmenu -p 'Todoist: ')"
todo.py add $output
|
Remove client install from ci script | #!/bin/bash -ex
pushd `dirname $0`/.. > /dev/null
root=$(pwd -P)
popd > /dev/null
export GOPATH=$root/gogo
mkdir -p $GOPATH
###
go get github.com/venicegeo/pz-workflow
go install github.com/venicegeo/pz-workflow/server
go install github.com/venicegeo/pz-workflow/client
###
src=$GOPATH/bin/pz-workflow
# gather some data about the repo
source $root/ci/vars.sh
# stage the artifact for a mvn deploy
mv $src $root/$APP.$EXT
| #!/bin/bash -ex
pushd `dirname $0`/.. > /dev/null
root=$(pwd -P)
popd > /dev/null
export GOPATH=$root/gogo
mkdir -p $GOPATH
###
go get github.com/venicegeo/pz-workflow
go install github.com/venicegeo/pz-workflow/server
###
src=$GOPATH/bin/pz-workflow
# gather some data about the repo
source $root/ci/vars.sh
# stage the artifact for a mvn deploy
mv $src $root/$APP.$EXT
|
Remove tr, it buffers and kills input | #!/bin/sh
qemu-system-i386 -m 64M -M q35 -watchdog i6300esb -device rtl8139 -boot order=c -serial stdio -d cpu_reset -no-reboot -cdrom esrtk.iso 2>&1 | tr -d '\r' | tee run.log
| #!/bin/sh
qemu-system-i386 -m 64M -M q35 -watchdog i6300esb -device rtl8139 -boot order=c -serial stdio -d cpu_reset -no-reboot -cdrom esrtk.iso 2>&1 | tee run.log
|
Fix the way output is tarred up | #!/usr/bin/env bash
module load freesurfer/5.3.0
if [ $? != 0 ];
then
source /cvmfs/oasis.opensciencegrid.org/osg/modules/lmod/current/init/bash
module load freesurfer/5.3.0
fi
module load xz/5.2.2
date
start=`date +%s`
WD=$PWD
if [ "$OSG_WN_TMP" != "" ];
then
SUBJECTS_DIR=`mktemp -d --tmpdir=$OSG_WN_TMP`
else
SUBJECTS_DIR=`mktemp -d --tmpdir=$PWD`
fi
cp $1_recon1_output.tar.xz $SUBJECTS_DIR
cd $SUBJECTS_DIR
tar xvaf $1_recon1_output.tar.xz
rm $1_recon1_output.tar.xz
recon-all \
-s $1 \
-autorecon2-perhemi \
-hemi $2 \
-openmp $3
cd $SUBJECTS_DIR
tar cJf $WD/$1_recon2_$2_output.tar.xz $SUBJECTS_DIR/*
cd $WD | #!/usr/bin/env bash
module load freesurfer/5.3.0
if [ $? != 0 ];
then
source /cvmfs/oasis.opensciencegrid.org/osg/modules/lmod/current/init/bash
module load freesurfer/5.3.0
fi
module load xz/5.2.2
date
start=`date +%s`
WD=$PWD
if [ "$OSG_WN_TMP" != "" ];
then
SUBJECTS_DIR=`mktemp -d --tmpdir=$OSG_WN_TMP`
else
SUBJECTS_DIR=`mktemp -d --tmpdir=$PWD`
fi
cp $1_recon1_output.tar.xz $SUBJECTS_DIR
cd $SUBJECTS_DIR
tar xvaf $1_recon1_output.tar.xz
rm $1_recon1_output.tar.xz
recon-all \
-s $1 \
-autorecon2-perhemi \
-hemi $2 \
-openmp $3
cd $SUBJECTS_DIR
tar cJf $WD/$1_recon2_$2_output.tar.xz *
cd $WD |
Deal with different username and coc dependencies | #!/bin/sh
mkdir -p ~/repo/neovim-image/
curl -L https://github.com/neovim/neovim/releases/download/nightly/nvim.appimage > ~/repo/neovim-image/nvim.appimage
chmod +x ~/repo/neovim-image/nvim.appimage
| #!/bin/sh
mkdir -p ~/repo/neovim-image/
curl -L https://github.com/neovim/neovim/releases/download/nightly/nvim.appimage > ~/repo/neovim-image/nvim.appimage
chmod +x ~/repo/neovim-image/nvim.appimage
# Necessary for coc.nvim plugin
if ! [ -x "$(command -v nodejs)" ]; then
sudo apt-get install nodejs
fi
# Necessary for coc.nvim plugin
if ! [ -x "$(command -v yarn)" ]; then
curl --compressed -o- -L https://yarnpkg.com/install.sh | bash
fi
# .bashrc change
if ! [ -x "$(command -v vim | grep nvim)" ]; then
read -p "Add vim <= neovim alias? " -n 1 -r
if [[ $REPLY =~ ^[Yy]$ ]]; then
echo 'alias vim=~/repo/neovim-image/nvim.appimage' >> ~/.bashrc
fi
fi
|
Validate the contents of the pidfile before using them. | #!/bin/sh
. $(dirname $0)/testsuite-common.sh
setup_test
run_daemon
# Timeout for various operations
timeout=10
# wait for the pid file to appear
elapsed=0
while [ ! -s "${pidfile}" ] ; do
[ $((elapsed+=1)) -le "${timeout}" ] ||
fail "timed out waiting for pid file to appear"
sleep 1
done
notice "pid file appeared after $elapsed seconds"
# kill tsdfx
kill "$(cat ${pidfile})"
notice "killed daemon"
# wait for the pid file to vanish
elapsed=0
while [ -s "${pidfile}" ] ; do
[ $((elapsed+=1)) -le "${timeout}" ] ||
fail "timed out waiting for pid file to vanish"
sleep 1
done
notice "pid file vanished after $elapsed seconds"
cleanup_test
| #!/bin/sh
. $(dirname $0)/testsuite-common.sh
setup_test
run_daemon
# Timeout for various operations
timeout=10
# wait for the pid file to appear
elapsed=0
while [ ! -s "${pidfile}" ] ; do
[ $((elapsed+=1)) -le "${timeout}" ] ||
fail "timed out waiting for pid file to appear"
sleep 1
done
notice "pid file appeared after $elapsed seconds"
# kill tsdfx
pid=$(cat "${pidfile}")
expr "${pid}" : "^[1-9][0-9]*$" >/dev/null ||
fail "unexpected contents in pid file"
kill "${pid}"
notice "killed daemon"
# wait for the pid file to vanish
elapsed=0
while [ -s "${pidfile}" ] ; do
[ $((elapsed+=1)) -le "${timeout}" ] ||
fail "timed out waiting for pid file to vanish"
sleep 1
done
notice "pid file vanished after $elapsed seconds"
cleanup_test
|
Remove R output from nexus file | #!/bin/bash
module load R/3.2.0
module load gdal/1.11.2
module load geos/3.4.2
module load species_geo_coder/git
set -e
function logg() {
echo $(date +%Y%m%d-%H%M:) "$*"
}
echo "Geocoder is in: $(which geocoder.py)"
GEOCODERDIR=$(dirname $(which geocoder.py))
geocoder.py \
--path_script $GEOCODERDIR \
-l {{localities}} \
-p {{polygons}} \
{% if verbose %} -v {% endif %} \
{% if plot %} --plot {% endif %} \
{% if occurences > 1 %} -n {{occurences}} {% endif %} \
> {{outfile}}
{% if plot %}
zip plots.zip \
barchart_per_polygon.pdf \
barchart_per_species.pdf \
heatplot_coexistence.pdf \
map_samples_overview.pdf \
map_samples_per_polygon.pdf \
map_samples_per_species.pdf \
number_of_species_per_polygon.pdf
#'barchart_per_polygon', 'barchart_per_species',
# 'heatplot_coexistence', 'map_samples_overview',
# 'map_samples_per_polygon', 'map_samples_per_species',
# 'number_of_species_per_polygon']:
{% endif %}
| #!/bin/bash
module load R/3.2.0
module load gdal/1.11.2
module load geos/3.4.2
module load species_geo_coder/git
set -e
function logg() {
echo $(date +%Y%m%d-%H%M:) "$*"
}
echo "Geocoder is in: $(which geocoder.py)"
GEOCODERDIR=$(dirname $(which geocoder.py))
geocoder.py \
--path_script $GEOCODERDIR \
-l {{localities}} \
-p {{polygons}} \
{% if verbose %} -v {% endif %} \
{% if plot %} --plot {% endif %} \
{% if occurences > 1 %} -n {{occurences}} {% endif %} \
--out {{outfile}}
{% if plot %}
zip plots.zip \
barchart_per_polygon.pdf \
barchart_per_species.pdf \
heatplot_coexistence.pdf \
map_samples_overview.pdf \
map_samples_per_polygon.pdf \
map_samples_per_species.pdf \
number_of_species_per_polygon.pdf
#'barchart_per_polygon', 'barchart_per_species',
# 'heatplot_coexistence', 'map_samples_overview',
# 'map_samples_per_polygon', 'map_samples_per_species',
# 'number_of_species_per_polygon']:
{% endif %}
|
Add /sbin to PATH for Darwin. | pylith=`pwd`
if test ! -f bin/pylith; then
echo
echo "*** Error! ***"
echo
echo "Source this script from the top-level PyLith directory:"
echo
echo " cd [directory containing 'setup.sh']"
echo " source setup.sh"
echo
else
export PATH="$pylith/bin:/bin:/usr/bin:$PATH"
export PYTHONPATH="$pylith/lib/python2.7/site-packages"
echo "Ready to run PyLith."
fi
| pylith=`pwd`
if test ! -f bin/pylith; then
echo
echo "*** Error! ***"
echo
echo "Source this script from the top-level PyLith directory:"
echo
echo " cd [directory containing 'setup.sh']"
echo " source setup.sh"
echo
else
export PATH="$pylith/bin:/bin:/usr/bin:/sbin/:$PATH"
export PYTHONPATH="$pylith/lib/python2.7/site-packages"
echo "Ready to run PyLith."
fi
|
Add light packages to install pacman | #!/bin/bash
sudo pacman -S git \
py3status \
tmux \
neovim \
python-pynvim \
python \
cmake \
go \
go-tools \
npm \
mono \
alacritty \
fzf \
# Uncomment this if using regular arch linux
#sudo pacman -Sy i3-wm
#sudo pacman -Sy dmenu
## install yay
#cd /opt
#sudo git clone https://aur.archlinux.org/yay-git.git
#sudo chown -R $USERNAME:$USERNAME /yay-git
#cd yay-git
#makepkg -si
yay -S autojump
| #!/bin/bash
sudo pacman -S git \
py3status \
tmux \
neovim \
python-pynvim \
python \
cmake \
go \
go-tools \
npm \
mono \
alacritty \
fzf \
light
# Uncomment this if using regular arch linux
#sudo pacman -Sy i3-wm
#sudo pacman -Sy dmenu
## install yay
#cd /opt
#sudo git clone https://aur.archlinux.org/yay-git.git
#sudo chown -R $USERNAME:$USERNAME /yay-git
#cd yay-git
#makepkg -si
yay -S autojump
|
Use full name for template. | jsdoc -p -d confluence -t /usr/local/bin/jsdoc/templates/confluence mercury-doc-test.js webrtc.js webrtc/event.js webrtc/client.js webrtc/identity.js webrtc/endpoints.js webrtc/signaling.js webrtc/media.js
| jsdoc -p -d confluence -t /usr/local/bin/jsdoc/templates/confluence-jsdoc-template mercury-doc-test.js webrtc.js webrtc/event.js webrtc/client.js webrtc/identity.js webrtc/endpoints.js webrtc/signaling.js webrtc/media.js
|
Rename default branch to main | #!/bin/bash
alias g='git'
alias gb='git branch -vv'
alias gbr='git branch -vv -r'
alias gs='g status --untracked-files=no'
alias gsa='g status'
alias gsaa='g status --untracked-files=all'
alias gd='git diff'
alias gfa='g fetch --all'
alias gfat='gfa --tags'
alias gfap='gfa --prune --tags'
alias gp='git pull'
alias gr='git remote -v'
alias gpum='git pull upstream master'
| #!/bin/bash
alias g='git'
alias gb='git branch -vv'
alias gbr='git branch -vv -r'
alias gs='g status --untracked-files=no'
alias gsa='g status'
alias gsaa='g status --untracked-files=all'
alias gd='git diff'
alias gfa='g fetch --all'
alias gfat='gfa --tags'
alias gfap='gfa --prune --tags'
alias gp='git pull'
alias gr='git remote -v'
alias gpum='git pull upstream main'
|
Clean up /tmp/myweb on BeforeInstall | #!/bin/bash
if which yum &>/dev/null; then
yum update -y
yum install -y httpd
chkconfig httpd on
elif which apt-get &>/dev/null; then
if ! dpkg-query -l apache2 | grep -q "^ii"; then
echo "apache2 not installed, installing!"
apt-get update
apt-get -y upgrade
apt-get -y install apache2
else
echo "apache2 already installed, skipping!"
fi
fi
| #!/bin/bash
# Install Apache on Amazon Linux or Ubuntu (only if not previously installed)
if which yum &>/dev/null; then
yum update -y
yum install -y httpd
chkconfig httpd on
elif which apt-get &>/dev/null; then
if ! dpkg-query -l apache2 | grep -q "^ii"; then
echo "apache2 not installed, installing!"
apt-get update
apt-get -y upgrade
apt-get -y install apache2
else
echo "apache2 already installed, skipping!"
fi
fi
# Make sure /tmp/myweb doesn't exist so that the test in AfterInstall is really valid
[ -d /tmp/myweb ] && rm -Rf /tmp/myweb
|
Use a more unique postgresql port | #!/bin/bash -x
# Start PostgreSQL process for tests
PGSQL_DATA=`mktemp -d /tmp/gnocchi-psql-XXXXX`
PGSQL_PATH=`pg_config --bindir`
PGSQL_PORT=9823
${PGSQL_PATH}/pg_ctl initdb -D ${PGSQL_DATA}
LANGUAGE=C ${PGSQL_PATH}/pg_ctl -w -D ${PGSQL_DATA} -o "-k ${PGSQL_DATA} -p ${PGSQL_PORT}" start > /dev/null
export GNOCCHI_TEST_INDEXER_URL="postgresql:///template1?host=${PGSQL_DATA}&port=${PGSQL_PORT}"
mkdir $PGSQL_DATA/tooz
export GNOCCHI_COORDINATION_URL="${GNOCCHI_TEST_INDEXER_URL}"
$*
ret=$?
${PGSQL_PATH}/pg_ctl -w -D ${PGSQL_DATA} -o "-p $PGSQL_PORT" stop
rm -rf ${PGSQL_DATA}
exit $ret
| #!/bin/bash -x
# Start PostgreSQL process for tests
PGSQL_DATA=`mktemp -d /tmp/gnocchi-psql-XXXXX`
PGSQL_PATH=`pg_config --bindir`
PGSQL_PORT=9824
${PGSQL_PATH}/pg_ctl initdb -D ${PGSQL_DATA}
LANGUAGE=C ${PGSQL_PATH}/pg_ctl -w -D ${PGSQL_DATA} -o "-k ${PGSQL_DATA} -p ${PGSQL_PORT}" start > /dev/null
export GNOCCHI_TEST_INDEXER_URL="postgresql:///template1?host=${PGSQL_DATA}&port=${PGSQL_PORT}"
mkdir $PGSQL_DATA/tooz
export GNOCCHI_COORDINATION_URL="${GNOCCHI_TEST_INDEXER_URL}"
$*
ret=$?
${PGSQL_PATH}/pg_ctl -w -D ${PGSQL_DATA} -o "-p $PGSQL_PORT" stop
rm -rf ${PGSQL_DATA}
exit $ret
|
Configure PHP CodeSniffer to follow the PSR standards | #!/bin/bash
#Author Jean Silva <me@jeancsil.com>
set -e
set -o pipefail
php bin/phpunit --coverage-html reports/coverage
php bin/phpcs
php bin/phpmd src html cleancode,codesize,controversial,design,naming,unusedcode --reportfile reports/mess_detector.html | #!/bin/bash
#Author Jean Silva <me@jeancsil.com>
set -e
set -o pipefail
php bin/phpunit --coverage-html reports/coverage
php bin/phpcs -p --colors --standard=PSR2 src
php bin/phpmd src html cleancode,codesize,controversial,design,naming,unusedcode --reportfile reports/mess_detector.html
|
Install sccache in base images | #!/bin/bash
set -ex
# Install ccache from source.
# Needs specific branch to work with nvcc (ccache/ccache#145)
# Also pulls in a commit that disables documentation generation,
# as this requires asciidoc to be installed (which pulls in a LOT of deps).
pushd /tmp
git clone https://github.com/pietern/ccache -b ccbin
pushd ccache
./autogen.sh
./configure --prefix=/usr/local
make "-j$(nproc)" install
popd
popd
| #!/bin/bash
set -ex
# Install ccache from source.
# Needs specific branch to work with nvcc (ccache/ccache#145)
# Also pulls in a commit that disables documentation generation,
# as this requires asciidoc to be installed (which pulls in a LOT of deps).
pushd /tmp
git clone https://github.com/pietern/ccache -b ccbin
pushd ccache
./autogen.sh
./configure --prefix=/usr/local
make "-j$(nproc)" install
popd
popd
# Install sccache from binary release.
# Note: this release does NOT yet work with nvcc.
pushd /tmp
curl -LOs https://github.com/mozilla/sccache/releases/download/0.2.5/sccache-0.2.5-x86_64-unknown-linux-musl.tar.gz
tar -zxvf sccache-0.2.5-x86_64-unknown-linux-musl.tar.gz
mv sccache-0.2.5-x86_64-unknown-linux-musl/sccache /usr/local/bin/sccache
rm -rf sccache-0.2.5-x86_64-unknown-linux-musl*
popd
|
Remove data/ from linux deploy script. | #!/bin/bash
# Deploy data directory to device using adb.
~/Android/Sdk/platform-tools/adb push data /sdcard/Android/data/com.android.flickercladding/data/
| #!/bin/bash
# Deploy data directory to device using adb.
~/Android/Sdk/platform-tools/adb push data /sdcard/Android/data/com.android.flickercladding/
|
Print service type and hostname while updating | #!/bin/sh
#
# Protokollen - update single HTTP service host
#
if [ $# -ne 4 ]; then
echo "Usage: $0 <entity ID> <service ID> <service type> <hostname>";
exit 1
fi
entId="$1"
svcId="$2"
svcType="$3"
hostname="$4"
F="$hostname"."$$".json
cd "$(dirname $0)" || exit 1
./check_http_primary.py "$hostname" > "$F" && ./pk-import-http-prefs.php "$F" && rm -f "$F"
../bin/sslprobe "$hostname" > "$F" 2>/dev/null && ./pk-import-tls-statuses.php "$svcId" "$F" && rm -f "$F"
../bin/sslprobe "www.$hostname" > "$F" 2>/dev/null && ./pk-import-tls-statuses.php "$svcId" "$F" && rm -f "$F"
| #!/bin/sh
#
# Protokollen - update single HTTP service host
#
if [ $# -ne 4 ]; then
echo "Usage: $0 <entity ID> <service ID> <service type> <hostname>";
exit 1
fi
entId="$1"
svcId="$2"
svcType="$3"
hostname="$4"
F="$hostname"."$$".json
cd "$(dirname $0)" || exit 1
printf "%s\t%s\n" "$svcType" "$hostname"
./check_http_primary.py "$hostname" > "$F" && ./pk-import-http-prefs.php "$F" && rm -f "$F"
../bin/sslprobe "$hostname" > "$F" 2>/dev/null && ./pk-import-tls-statuses.php "$svcId" "$F" && rm -f "$F"
../bin/sslprobe "www.$hostname" > "$F" 2>/dev/null && ./pk-import-tls-statuses.php "$svcId" "$F" && rm -f "$F"
|
Add linux support on percentage display | #!/usr/bin/env bash
print_battery_percentage() {
# percentage displayed in the 2nd field of the 2nd row
pmset -g batt | awk 'NR==2 { gsub(/;/,""); print $2 }'
}
main() {
print_battery_percentage
}
main
| #!/usr/bin/env bash
command_exists() {
local command="$1"
type "$command" >/dev/null 2>&1
}
print_battery_percentage() {
# percentage displayed in the 2nd field of the 2nd row
if command_exists "pmset"; then
pmset -g batt | awk 'NR==2 { gsub(/;/,""); print $2 }'
elif command_exists "upower"; then
battery=$(upower -e | grep battery | head -1)
upower -i $battery | grep percentage | awk '{print $2}'
fi
}
main() {
print_battery_percentage
}
main
|
Add 'exaile' to common software list | #!/usr/bin/env bash
#
# This script is intended to install some common software for an Arch Linux
# automatically with minimal user interaction and install some common software.
# Certain configurations are personal preference and may be adapted.
#
set -e
# Needed software
sudo pacman -S openssh \
keepass \
wget \
firefox firefox-i18n-de \
thunderbird thunderbird-i18n-de \
libreoffice-still libreoffice-still-de \
gimp \
inkscape \
vlc \
texlive-most texlive-lang texmaker \
brasero
## Install 'yaourt' for AUR packages
# Install 'package-query' (dependency of yaourt)
curl -O https://aur.archlinux.org/cgit/aur.git/snapshot/package-query.tar.gz
tar -xvzf package-query.tar.gz
cd package-query
makepkg -si
cd ..
# Install 'yaourt'
curl -O https://aur.archlinux.org/cgit/aur.git/snapshot/yaourt.tar.gz
tar -xvzf yaourt.tar.gz
cd yaourt
makepkg -si
#yaourt -S exaile
| #!/usr/bin/env bash
#
# This script is intended to install some common software for an Arch Linux
# automatically with minimal user interaction and install some common software.
# Certain configurations are personal preference and may be adapted.
#
set -e
# Needed software
sudo pacman -S openssh \
keepass \
wget \
firefox firefox-i18n-de \
thunderbird thunderbird-i18n-de \
libreoffice-still libreoffice-still-de \
gimp \
inkscape \
vlc \
texlive-most texlive-lang texmaker \
brasero
## Install 'yaourt' for AUR packages
# Install 'package-query' (dependency of yaourt)
curl -O https://aur.archlinux.org/cgit/aur.git/snapshot/package-query.tar.gz
tar -xvzf package-query.tar.gz
cd package-query
makepkg -si
cd ..
# Install 'yaourt'
curl -O https://aur.archlinux.org/cgit/aur.git/snapshot/yaourt.tar.gz
tar -xvzf yaourt.tar.gz
cd yaourt
makepkg -si
yaourt -S exaile
|
Fix wrong service name mongodb -> mongod | #!/usr/bin/env bash
sudo service mongodb stop
sudo apt-get purge mongodb mongodb-clients mongodb-server mongodb-dev
sudo apt-get purge mongodb-10gen
sudo apt-get autoremove
# Import mongodb public GPG key
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv EA312927
echo "deb http://repo.mongodb.org/apt/ubuntu xenial/mongodb-org/3.2 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-3.2.list
sudo apt-get update -qq -y
sudo apt-get install -y mongodb-org
sudo rm -f /etc/mongod.conf
sudo cp ./config/mongodb_dev.conf /etc/mongod.conf
sudo service mongodb start | #!/usr/bin/env bash
sudo service mongodb stop
sudo apt-get purge mongodb mongodb-clients mongodb-server mongodb-dev
sudo apt-get purge mongodb-10gen
sudo apt-get autoremove
# Import mongodb public GPG key
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv EA312927
echo "deb http://repo.mongodb.org/apt/ubuntu xenial/mongodb-org/3.2 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-3.2.list
sudo apt-get update -qq -y
sudo apt-get install -y mongodb-org
sudo rm -f /etc/mongod.conf
sudo cp ./config/mongodb_dev.conf /etc/mongod.conf
sudo service mongod start |
Add few more brew cask applications | #
# Application installer (via brew-cask)
#
#!/bin/sh
set -e
# Apps
apps=(
android-file-transfer
atom
betterzipql
caffeine
dropbox
firefox
firefoxdeveloperedition
flash
flux
google-chrome
google-chrome-canary
google-drive
intellij-idea
iterm2
java7
keka
qlcolorcode
qlmarkdown
qlstephen
quicklook-csv
quicklook-json
skitch
skype
spotify
sublime-text3
transmission
virtualbox
vlc
zoomus
)
# fonts
fonts=(
font-clear-sans
font-hack
font-m-plus
font-roboto
)
# Specify the location of the apps
appdir="/Applications"
# Check if homebrew is installed
if test ! $(which brew); then
echo "Installing homebrew..."
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
fi
# Install homebrew-cask
echo "Tapping casks..."
# Tap alternative versions
brew tap caskroom/versions
# Tap the fonts
brew tap caskroom/fonts
# install apps
echo "Installing apps..."
brew cask install --appdir=$appdir ${apps[@]}
# install fonts
echo "Installing fonts..."
brew cask install ${fonts[@]}
# Tidy up
brew cask cleanup
exit 0
| #
# Application installer (via brew-cask)
#
#!/bin/sh
set -e
# Apps
apps=(
android-file-transfer
atom
betterzipql
caffeine
dropbox
firefox
firefoxdeveloperedition
flash
flux
google-chrome
google-chrome-canary
google-drive
intellij-idea
iterm2
java
keepassx
keka
qlcolorcode
qlmarkdown
qlstephen
quicklook-csv
quicklook-json
postman
skitch
skype
spotify
sublime-text3
transmission
virtualbox
vlc
zoomus
)
# fonts
fonts=(
font-clear-sans
font-hack
font-m-plus
font-roboto
)
# Specify the location of the apps
appdir="/Applications"
# Check if homebrew is installed
if test ! $(which brew); then
echo "Installing homebrew..."
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
fi
# Install homebrew-cask
echo "Tapping casks..."
# Tap alternative versions
brew tap caskroom/versions
# Tap the fonts
brew tap caskroom/fonts
# install apps
echo "Installing apps..."
brew cask install --appdir=$appdir ${apps[@]}
# install fonts
echo "Installing fonts..."
brew cask install ${fonts[@]}
# Tidy up
brew cask cleanup
exit 0
|
Truncate log files right at the end | #!/bin/bash -x
# Clean up leftover build files
rm -fr /home/*/{.ssh,.ansible,.cache}
rm -fr /root/{.ssh,.ansible,.cache}
rm -fr /root/'~'*
dd if=/dev/zero of=/EMPTY bs=1M
rm -f /EMPTY
sync
| #!/bin/bash -x
# Clean up leftover build files
rm -fr /home/*/{.ssh,.ansible,.cache}
rm -fr /root/{.ssh,.ansible,.cache}
rm -fr /root/'~'*
# Truncate any log files
find /var/log -type f -print0 | xargs -0 truncate -s0
dd if=/dev/zero of=/EMPTY bs=1M
rm -f /EMPTY
sync
|
Remove build number from artefact name. | #!/bin/bash
VIRTUALENV_DIR=/var/tmp/virtualenvs/$(echo ${JOB_NAME} | tr ' ' '-')
export PIP_DOWNLOAD_CACHE=/var/tmp/pip_download_cache
virtualenv --clear --no-site-packages $VIRTUALENV_DIR
source $VIRTUALENV_DIR/bin/activate
pip install -r requirements.txt
python fetch_csv.py --client-secrets /etc/google/oauth/client_secrets.json --oauth-tokens /var/lib/google/oauth/drive.db
python create_pages.py
mkdir -p artefacts
cd output
tar -zcvf ../artefacts/service-explorer-$BUILD_ID.tgz .
| #!/bin/bash
VIRTUALENV_DIR=/var/tmp/virtualenvs/$(echo ${JOB_NAME} | tr ' ' '-')
export PIP_DOWNLOAD_CACHE=/var/tmp/pip_download_cache
virtualenv --clear --no-site-packages $VIRTUALENV_DIR
source $VIRTUALENV_DIR/bin/activate
pip install -r requirements.txt
python fetch_csv.py --client-secrets /etc/google/oauth/client_secrets.json --oauth-tokens /var/lib/google/oauth/drive.db
python create_pages.py
mkdir -p artefacts
cd output
tar -zcvf ../artefacts/service-explorer.tgz .
|
Add add test for generating custom header | #!/usr/bin/env bash
source $(dirname ${BASH_SOURCE[0]})/utils.sh
test_call() {
imag-store create -p /test-call
if [[ ! $? -eq 0 ]]; then
err "Return value should be zero, was non-zero"
return 1;
fi
}
test_mkstore() {
imag-store create -p /test-mkstore || { err "Calling imag failed"; return 1; }
if [[ -d ${STORE} ]]; then
out "Store exists."
else
err "No store created"
return 1
fi
}
test_std_header() {
local expected=$(cat <<EOS
---
[imag]
links = []
version = "0.1.0"
---
EOS
)
imag-store create -p /test-std-header
local result=$(cat ${STORE}/test-std-header)
if [[ "$expected" == "$result" ]]; then
out "Expected store entry == result"
else
err "${STORE}/test differs from expected"
return 1
fi
}
invoke_tests \
test_call \
test_mkstore \
test_std_header \
test_std_header_plus_custom
| #!/usr/bin/env bash
source $(dirname ${BASH_SOURCE[0]})/utils.sh
test_call() {
imag-store create -p /test-call
if [[ ! $? -eq 0 ]]; then
err "Return value should be zero, was non-zero"
return 1;
fi
}
test_mkstore() {
imag-store create -p /test-mkstore || { err "Calling imag failed"; return 1; }
if [[ -d ${STORE} ]]; then
out "Store exists."
else
err "No store created"
return 1
fi
}
test_std_header() {
local expected=$(cat <<EOS
---
[imag]
links = []
version = "0.1.0"
---
EOS
)
imag-store create -p /test-std-header
local result=$(cat ${STORE}/test-std-header)
if [[ "$expected" == "$result" ]]; then
out "Expected store entry == result"
else
err "${STORE}/test differs from expected"
return 1
fi
}
test_std_header_plus_custom() {
local expected=$(cat <<EOS
---
[imag]
links = []
version = "0.1.0"
[zzz]
zzz = "z"
---
EOS
)
imag-store create -p /test-std-header-plus-custom entry -h zzz.zzz=z
local result=$(cat ${STORE}/test-std-header-plus-custom)
if [[ "$expected" == "$result" ]]; then
out "Expected store entry == result"
else
err "${STORE}/test differs from expected"
return 1
fi
}
invoke_tests \
test_call \
test_mkstore \
test_std_header \
test_std_header_plus_custom
|
Remove sorting which may rely on LOCALE of machine | #!/bin/sh
cat input | $JOSHUA/bin/joshua-decoder -m 500m -config joshua.config 2> log | sort > output
if [[ $? -ne 0 ]]; then
exit 1
fi
diff -u output output.expected > diff
if [[ $? -eq 0 ]]; then
rm -f output log diff
exit 0
else
exit 1
fi
| #!/bin/sh
cat input | $JOSHUA/bin/joshua-decoder -m 500m -config joshua.config 2> log > output
if [[ $? -ne 0 ]]; then
exit 1
fi
diff -u output output.expected > diff
if [[ $? -eq 0 ]]; then
rm -f output log diff
exit 0
else
exit 1
fi
|
Move tokens to local settings | ## assume colors work
export TERM=xterm-256color
## less options
export LESS='-g -i -M -R -S -w -z-4'
## GitHub API
github_token="$HOME/.github"
[[ -e $github_token ]] && source $github_token
## homebrew GitHub API
homebrew_token="$HOME/.homebrew"
[[ -e $homebrew_token ]] && source $homebrew_token
## path
source ~/.shell/path-edit.sh
path_front "bin" "$HOME/bin" "/usr/local/bin" "$HOME/.rbenv/bin"
path_back "/sbin" "/bin" "/usr/sbin" "/usr/bin" "$HOME/.cask/bin"
## aliases
source ~/.shell/aliases.sh
## local settings
[[ -e ~/.localrc ]] && source ~/.localrc
## pyenv
command -v pyenv >/dev/null 2>&1 && eval "$(pyenv init -)"
command -v pyenv-virtualenv-init >/dev/null 2>&1 && eval "$(pyenv virtualenv-init -)"
## rbenv
[[ -e ~/.rbenv ]] && eval "$(rbenv init -)"
## prompt
[[ -e ~/.liquidprompt/ ]] && source ~/.liquidprompt/liquidprompt
| ## assume colors work
export TERM=xterm-256color
## less options
export LESS='-g -i -M -R -S -w -z-4'
## path
source ~/.shell/path-edit.sh
path_front "bin" "$HOME/bin" "/usr/local/bin" "$HOME/.rbenv/bin"
path_back "/sbin" "/bin" "/usr/sbin" "/usr/bin" "$HOME/.cask/bin"
## aliases
source ~/.shell/aliases.sh
## local settings
[[ -e ~/.localrc ]] && source ~/.localrc
## pyenv
command -v pyenv >/dev/null 2>&1 && eval "$(pyenv init -)"
command -v pyenv-virtualenv-init >/dev/null 2>&1 && eval "$(pyenv virtualenv-init -)"
## rbenv
[[ -e ~/.rbenv ]] && eval "$(rbenv init -)"
## prompt
[[ -e ~/.liquidprompt/ ]] && source ~/.liquidprompt/liquidprompt
|
Migrate DB when starting production Docker container | #!/bin/bash
set -e
# When starting the application, cached versions of the datasets must first be built. This can be
# skipped when the PREBUILT_DATASETS environment variable is set. In that case, it is expected that
# you have already built the datasets and mounted them into tmp/atlas via Docker.
if [[ -z "${PREBUILT_DATASETS}" && ( "${RAILS_ENV}" == "production" || "${RAILS_ENV}" == "staging" )]]; then
echo "Building datasets..."
bundle exec rake deploy:load_etsource deploy:calculate_datasets --trace
echo "Starting server..."
fi
bundle exec --keep-file-descriptors puma -C config/puma.rb
| #!/bin/bash
set -e
# When starting the application, cached versions of the datasets must first be built. This can be
# skipped when the PREBUILT_DATASETS environment variable is set. In that case, it is expected that
# you have already built the datasets and mounted them into tmp/atlas via Docker.
if [[ -z "${PREBUILT_DATASETS}" && ( "${RAILS_ENV}" == "production" || "${RAILS_ENV}" == "staging" )]]; then
echo "Building datasets..."
bundle exec rake deploy:load_etsource deploy:calculate_datasets --trace
echo "Starting server..."
fi
bundle exec rails db:migrate
bundle exec --keep-file-descriptors puma -C config/puma.rb
|
Add command to prepare extraction | #!/bin/bash
. ./src/prepare_galaxy_tools/functions.sh
galaxy_tool_dir=$1
tool_dir=$2
current_dir=`pwd`
section_dir=extract
echo "Extract data..."
create_tool_section_dir $galaxy_tool_dir/$section_dir
| #!/bin/bash
. ./src/prepare_galaxy_tools/functions.sh
galaxy_tool_dir=$1
tool_dir=$2
current_dir=`pwd`
section_dir=extract
echo "Extract data..."
create_tool_section_dir $galaxy_tool_dir/$section_dir
echo " Extract sequence file..."
seq_file_extraction=$section_dir/extract_sequence_file
create_copy_tool_dir $tool_dir/$seq_file_extraction $galaxy_tool_dir/$seq_file_extraction
echo " Extract similarity search report..."
search_report_extraction=$section_dir/extract_similarity_search_report
create_copy_tool_dir $tool_dir/$search_report_extraction $galaxy_tool_dir/$search_report_extraction
|
Handle netrc and gnupg permissions on mac setup | #!/usr/bin/env bash
# This is a script for bootstrapping macOS setup
set -euo pipefail
if [[ ! -e ./manage.sh ]]; then
echo "This script must be run from the root of the dotfiles repo"
exit 1
fi
./manage.sh install
if [[ ! -e "$HOME/.bashrc" ]]; then
echo "Looks like the manage script failed, try and run it manually"
exit 1
fi
if ! command -v brew &> /dev/null; then
echo "You need to install homebrew"
exit 1
fi
open "$DOTFILES/osx/parsec.terminal"
"$DOTFILES/osx/defaults.sh"
# Add Terminal.app theme
open ./osx/parsec.terminal
# Install some default software
brew bundle --file="./osx/Brewfile"
brew bundle --file="./osx/Brewfile.cask"
# Set many default settings
./osx/defaults.sh
| #!/usr/bin/env bash
# This is a script for bootstrapping macOS setup
set -euo pipefail
if [[ ! -e ./manage.sh ]]; then
echo "This script must be run from the root of the dotfiles repo"
exit 1
fi
./manage.sh install
if [[ ! -e "$HOME/.bashrc" ]]; then
echo "Looks like the manage script failed, try and run it manually"
exit 1
fi
if ! command -v brew &> /dev/null; then
echo "You need to install homebrew"
exit 1
fi
# Set correct netrc permissions
touch "$HOME/.netrc"
chmod 0600 "$HOME/.netrc"
# Correct gnupg permissions after linking, this must be done before a private
# key is added otherwise it will bork the key setup
# https://superuser.com/a/954536
chown -R "$(whoami)" ~/.gnupg/
chmod 600 ~/.gnupg/*
chmod 700 ~/.gnupg
# Add Terminal.app theme
open ./osx/parsec.terminal
# Install some default software
brew bundle --file="./osx/Brewfile"
brew bundle --file="./osx/Brewfile.cask"
# Set many default settings
./osx/defaults.sh
|
Send access logs to both stdout and files Rotate access and error logs Log the ip address from X-Forwarded-For header | #!/bin/sh
set -e
cat > /etc/apache2/conf-available/dyn-vhost.conf <<EOF
UseCanonicalName Off
LogFormat "%V %h %l %u %t \"%r\" %s %b" vcommon
CustomLog "/srv/${WP_ENV}/logs/access_log" vcommon
ErrorLog "/srv/${WP_ENV}/logs/error_log"
VirtualDocumentRoot "/srv/${WP_ENV}/%0/htdocs"
<VirtualHost *:443>
SSLEngine on
SSLCertificateFile "/etc/apache2/ssl/server.cert"
SSLCertificateKeyFile "/etc/apache2/ssl/server.key"
</VirtualHost>
EOF
/bin/mkdir -p /srv/${WP_ENV}/logs
/bin/chown -R www-data: /srv
/usr/sbin/a2dissite 000-default
/usr/sbin/a2enmod ssl
/usr/sbin/a2enmod rewrite
/usr/sbin/a2enmod vhost_alias
/usr/sbin/a2enconf dyn-vhost
/usr/sbin/apache2ctl -DFOREGROUND
| #!/bin/sh
set -e
cat > /etc/apache2/conf-available/dyn-vhost.conf <<EOF
UseCanonicalName Off
SetEnvIf X-Forwarded-For "^(.*\..*\..*\..*)|(.*:.*:.*:.*:.*:.*:.*:.*)" proxied
LogFormat "%V %h %l %u %t \"%r\" %s %b" vcommon
LogFormat "%V %{X-Forwarded-For}i %l %u %t \"%r\" %s %b" vproxy
CustomLog "| /usr/bin/rotatelogs /srv/${WP_ENV}/logs/access_log.%Y%m%d 86400" vcommon env=!proxied
CustomLog "/dev/stdout" vcommon env=!proxied
CustomLog "| /usr/bin/rotatelogs /srv/${WP_ENV}/logs/access_log.%Y%m%d 86400" vproxy env=proxied
CustomLog "/dev/stdout" vcommon env=proxied
ErrorLog "| /usr/bin/rotatelogs /srv/${WP_ENV}/logs/error_log.%Y%m%d 86400"
VirtualDocumentRoot "/srv/${WP_ENV}/%0/htdocs"
<VirtualHost *:443>
SSLEngine on
SSLCertificateFile "/etc/apache2/ssl/server.cert"
SSLCertificateKeyFile "/etc/apache2/ssl/server.key"
</VirtualHost>
EOF
/bin/mkdir -p /srv/${WP_ENV}/logs
/bin/chown -R www-data: /srv
/usr/sbin/a2dissite 000-default
/usr/sbin/a2enmod ssl
/usr/sbin/a2enmod rewrite
/usr/sbin/a2enmod vhost_alias
/usr/sbin/a2enconf dyn-vhost
/usr/sbin/apache2ctl -DFOREGROUND
|
Change the default editor to macvim | alias ls="ls -FG"
alias oa='open -a' # App Launcher
export EDITOR='mate -w'
export PATH="~/bin:/usr/local/bin:/usr/local/sbin:/usr/local/mysql/bin:/usr/local/git/bin:$PATH"
export MANPATH="/usr/local/man:/usr/local/mysql/man:/usr/local/git/man:$MANPATH"
export M2_HOME=/usr/local/apache-maven
export M2=$M2_HOME/bin
export PATH=$M2:$PATH
export JDK_HOME=/System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK/Home
export JAVA_HOME=/System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK/Home
export PYTHONPATH="/usr/local/lib/python2.6/site-packages/:$PYTHONPATH"
if [[ -s $HOME/.rvm/scripts/rvm ]] ; then source $HOME/.rvm/scripts/rvm ; fi
| alias ls="ls -FG"
alias oa='open -a' # App Launcher
export EDITOR='mvim -f'
export PATH="~/bin:/usr/local/bin:/usr/local/sbin:/usr/local/mysql/bin:/usr/local/git/bin:$PATH"
export MANPATH="/usr/local/man:/usr/local/mysql/man:/usr/local/git/man:$MANPATH"
export M2_HOME=/usr/local/apache-maven
export M2=$M2_HOME/bin
export PATH=$M2:$PATH
export JDK_HOME=/System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK/Home
export JAVA_HOME=/System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK/Home
export PYTHONPATH="/usr/local/lib/python2.6/site-packages/:$PYTHONPATH"
if [[ -s $HOME/.rvm/scripts/rvm ]] ; then source $HOME/.rvm/scripts/rvm ; fi
|
Use bash instead of sh. | #!/bin/sh
#
# tmux start script from:
# http://www.huyng.com/posts/productivity-boost-with-tmux-iterm2-workspaces/
#
export PATH=$PATH:/usr/local/bin
# abort if we're already inside a TMUX session
[ "$TMUX" == "" ] || exit 0
# startup a "default" session if none currently exists
tmux has-session -t _default || tmux new-session -s _default -d
# present menu for user to choose which workspace to open
PS3="Please choose your session: "
options=($(tmux list-sessions -F "#S") "NEW SESSION" "ZSH")
echo "Available sessions"
echo "------------------"
echo " "
select opt in "${options[@]}"
do
case $opt in
"NEW SESSION")
read -p "Enter new session name: " SESSION_NAME
tmux new -s "$SESSION_NAME"
break
;;
"ZSH")
zsh --login
break;;
*)
tmux attach-session -t $opt
break
;;
esac
done
| #!/bin/bash
#
# tmux start script from:
# http://www.huyng.com/posts/productivity-boost-with-tmux-iterm2-workspaces/
#
export PATH=$PATH:/usr/local/bin
# abort if we're already inside a TMUX session
[ "$TMUX" == "" ] || exit 0
# startup a "default" session if none currently exists
tmux has-session -t _default || tmux new-session -s _default -d
# present menu for user to choose which workspace to open
PS3="Please choose your session: "
options=($(tmux list-sessions -F "#S") "NEW SESSION" "ZSH")
echo "Available sessions"
echo "------------------"
echo " "
select opt in "${options[@]}"
do
case $opt in
"NEW SESSION")
read -p "Enter new session name: " SESSION_NAME
tmux new -s "$SESSION_NAME"
break
;;
"ZSH")
zsh --login
break;;
*)
tmux attach-session -t $opt
break
;;
esac
done
|
Fix neovim python venv script to use new .python-version location | #!/usr/bin/env bash
if [ ! -d "$HOME/.pyenv" ] && [ -d "/usr/local/var/pyenv" ]; then
ln -snf /usr/local/var/pyenv "$HOME/.pyenv"
fi
python_version="$(cat "$HOME/.dotfiles/files/.python-version")"
neovim_venv="neovim3"
pyenv install --skip-existing "$python_version"
pyenv virtualenv --force "$python_version" "$neovim_venv"
eval "$(pyenv init -)"
pyenv activate "$neovim_venv"
pip install --force-reinstall pynvim
pyenv which python
| #!/usr/bin/env bash
if [ ! -d "$HOME/.pyenv" ] && [ -d "/usr/local/var/pyenv" ]; then
ln -snf /usr/local/var/pyenv "$HOME/.pyenv"
fi
python_version="$(cat "$HOME/.dotfiles/.python-version")"
neovim_venv="neovim3"
pyenv install --skip-existing "$python_version"
pyenv virtualenv --force "$python_version" "$neovim_venv"
eval "$(pyenv init -)"
pyenv activate "$neovim_venv"
pip install --force-reinstall pynvim
pyenv which python
|
Make output more readable and searchable | #!/bin/bash
# Install all non-obsolete android sdk packages.
function install_sdk {
android update sdk -u -s -a -t "$1"
}
# install_sdk 163
function fetch_non_obsoled_package_indices {
# Fetch the sdk list
android list sdk -u -s -a |\
# Filter obsoleted packages
sed '/\(Obsolete\)/d' |\
# Filter to take only the package index
sed 's/^[ ]*\([0-9]*\).*/\1/' |\
# Filter the empty lines
sed -n 's/^[^ $]/\0/p'
# Send the output to the for loop one by one
}
for package_index in $(fetch_non_obsoled_package_indices)
do
echo -ne "y" | install_sdk "${package_index}"
echo "====================================================================="
echo.
done
| #!/bin/bash
# Install all non-obsolete android sdk packages.
function install_sdk {
android update sdk -u -s -a -t "$1"
}
# install_sdk 163
function fetch_non_obsoled_package_indices {
# Fetch the sdk list
android list sdk -u -s -a |\
# Filter obsoleted packages
sed '/\(Obsolete\)/d' |\
# Filter to take only the package index
sed 's/^[ ]*\([0-9]*\).*/\1/' |\
# Filter the empty lines
sed -n 's/^[^ $]/\0/p'
# Send the output to the for loop one by one
}
for package_index in $(fetch_non_obsoled_package_indices)
do
echo "====================================================================="
echo "Start to install package: ${package_index}"
echo "====================================================================="
echo -e "y" | install_sdk "${package_index}"
echo
echo
done
|
Make sure config files are pristine. | #!/bin/bash
set -e
set -x
#
# This script helps a contributor to cut a new release of resourced-master.
#
# Prerequisites:
# - Ensure you(contributor) has Go 1.6.x or newer.
# - Ensure govendor is installed.
#
# Arguments:
# $VERSION: semantic version number (required)
#
: "${VERSION?You must set VERSION}"
CURRENT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
ROOT_DIR=$(dirname $CURRENT_DIR)
cd $ROOT_DIR
cp -r tests/config-files conf
rm -rf conf/config-files
govendor add +external
GOOS=darwin go build
tar cvzf resourced-master-darwin-$VERSION.tar.gz resourced-master static/ templates/ migrations/ conf/
GOOS=linux go build
tar cvzf resourced-master-linux-$VERSION.tar.gz resourced-master static/ templates/ migrations/ conf/
rm -rf $ROOT_DIR/conf
rm -f $ROOT_DIR/resourced-master | #!/bin/bash
set -e
set -x
#
# This script helps a contributor to cut a new release of resourced-master.
#
# Prerequisites:
# - Ensure you(contributor) has Go 1.6.x or newer.
# - Ensure govendor is installed.
#
# Arguments:
# $VERSION: semantic version number (required)
#
: "${VERSION?You must set VERSION}"
CURRENT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
ROOT_DIR=$(dirname $CURRENT_DIR)
cd $ROOT_DIR
git checkout tests/config-files/*.toml
cp -r tests/config-files conf
rm -rf conf/config-files
govendor add +external
GOOS=darwin go build
tar cvzf resourced-master-darwin-$VERSION.tar.gz resourced-master static/ templates/ migrations/ conf/
GOOS=linux go build
tar cvzf resourced-master-linux-$VERSION.tar.gz resourced-master static/ templates/ migrations/ conf/
rm -rf $ROOT_DIR/conf
rm -f $ROOT_DIR/resourced-master |
Add sanity checks for the autotool commands. | #!/bin/sh
LT=`which libtoolize`
if [ -x "$LT" ]; then
LT=libtoolize
else
LT=glibtoolize
fi
# copy libltdl into working repository
$LT -c -f --ltdl
# rebuild autotool files
autoreconf -f -i -W all
| #!/bin/sh
#
# Thomas Krennwallner <tkren@kr.tuwien.ac.at>
#
# 1. check for libtool, autoconf, automake, pkg-config
# 2. run libtoolize and autoreconf to create libltdl, configure, and Makefile.in's
#
LT=`which libtoolize`
GLT=`which glibtoolize`
if [ -x "$LT" ]; then
LT=libtoolize
elif [ -x "$GLT" ]; then
LT=glibtoolize
else
echo "libtoolize: command not found. Please install GNU libtool, GNU autoconf, and GNU automake."
exit 1
fi
ARC=`which autoreconf`
if [ ! -x "$ARC" ]; then
echo "autoreconf: command not found. Please install GNU autoconf and GNU automake."
exit 1
fi
AM=`which automake`
if [ ! -x "$AM" ]; then
echo "automake: command not found. Please install GNU automake."
exit 1
fi
PC=`which pkg-config`
if [ ! -x "$PC" ]; then
echo "pkg-config: command not found. Please install pkg-config."
exit 1
fi
# copy libltdl into working repository
$LT -c -f --ltdl
# rebuild autotool files
autoreconf -f -i -W all
|
Disable Python pin in conda. | #!/bin/bash
set -x
set -e
CONDA_PATH=${1:-~/conda}
wget -c https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh
chmod a+x Miniconda3-latest-Linux-x86_64.sh
if [ ! -d $CONDA_PATH -o ! -z "$CI" ]; then
./Miniconda3-latest-Linux-x86_64.sh -p $CONDA_PATH -b -f
fi
export PATH=$CONDA_PATH/bin:$PATH
echo "python==3.7" > $CONDA_PATH/conda-meta/pinned
#echo "conda-build==3.14.0" >> $CONDA_PATH/conda-meta/pinned
conda install -y python
conda update -y conda
conda install -y conda-build
conda install -y conda-verify
conda install -y anaconda-client
conda install -y jinja2
conda update -y --all
| #!/bin/bash
set -x
set -e
CONDA_PATH=${1:-~/conda}
wget -c https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh
chmod a+x Miniconda3-latest-Linux-x86_64.sh
if [ ! -d $CONDA_PATH -o ! -z "$CI" ]; then
./Miniconda3-latest-Linux-x86_64.sh -p $CONDA_PATH -b -f
fi
export PATH=$CONDA_PATH/bin:$PATH
#echo "python==3.7" > $CONDA_PATH/conda-meta/pinned
#echo "conda-build==3.14.0" >> $CONDA_PATH/conda-meta/pinned
conda install -y python
conda update -y conda
conda install -y conda-build
conda install -y conda-verify
conda install -y anaconda-client
conda install -y jinja2
conda update -y --all
|
Update script to call demo with csv path | #!/usr/bin/env bash
set -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
pushd $DIR/..
cabal build
dist/build/habsim/habsim > $DIR/flight_path.js
echo "Open $DIR/index.html in your browser."
popd
| #!/usr/bin/env bash
set -e
if [[ "$1" == "" ]] || [[ ! -f "$1" ]]; then
echo "Argument to script must be a Grib2 CSV file."
exit 1
fi
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
pushd $DIR/..
cabal build
dist/build/habsim/habsim "$1" > $DIR/flight_path.js
echo "Open $DIR/index.html in your browser."
popd
|
Change workdir to repo name | export ROOT_DIR=$( dirname "${BASH_SOURCE[0]}" )/../..
export BUILD_DIR="$ROOT_DIR/build"
export CICD_DIR="$ROOT_DIR/.cicd"
export HELPERS_DIR="$CICD_DIR/helpers"
export JOBS=${JOBS:-"$(getconf _NPROCESSORS_ONLN)"}
export MOUNTED_DIR='/workdir'
export DOCKER_CLI_EXPERIMENTAL='enabled'
export DOCKERHUB_CI_REGISTRY="docker.io/eosio/ci"
export DOCKERHUB_CONTRACTS_REGISTRY="docker.io/eosio/ci-contracts-builder"
export CI_REGISTRIES=("$DOCKERHUB_CI_REGISTRY" "$MIRROR_REGISTRY")
export CONTRACT_REGISTRIES=("$DOCKERHUB_CONTRACTS_REGISTRY" "$MIRROR_REGISTRY")
# capitalize each word in a string
function capitalize()
{
if [[ ! "$1" =~ 'mac' ]]; then # Don't capitalize mac
echo "$1" | awk '{$1=toupper(substr($1,1,1))substr($1,2)}1'
else
echo "$1"
fi
}
# load buildkite intrinsic environment variables for use in docker run
function buildkite-intrinsics()
{
BK_ENV=''
if [[ -f "$BUILDKITE_ENV_FILE" ]]; then
while read -r var; do
BK_ENV="$BK_ENV --env ${var%%=*}"
done < "$BUILDKITE_ENV_FILE"
fi
echo "$BK_ENV"
}
| export ROOT_DIR=$( dirname "${BASH_SOURCE[0]}" )/../..
export BUILD_DIR="$ROOT_DIR/build"
export CICD_DIR="$ROOT_DIR/.cicd"
export HELPERS_DIR="$CICD_DIR/helpers"
export JOBS=${JOBS:-"$(getconf _NPROCESSORS_ONLN)"}
export MOUNTED_DIR='/eos'
export DOCKER_CLI_EXPERIMENTAL='enabled'
export DOCKERHUB_CI_REGISTRY="docker.io/eosio/ci"
export DOCKERHUB_CONTRACTS_REGISTRY="docker.io/eosio/ci-contracts-builder"
export CI_REGISTRIES=("$DOCKERHUB_CI_REGISTRY" "$MIRROR_REGISTRY")
export CONTRACT_REGISTRIES=("$DOCKERHUB_CONTRACTS_REGISTRY" "$MIRROR_REGISTRY")
# capitalize each word in a string
function capitalize()
{
if [[ ! "$1" =~ 'mac' ]]; then # Don't capitalize mac
echo "$1" | awk '{$1=toupper(substr($1,1,1))substr($1,2)}1'
else
echo "$1"
fi
}
# load buildkite intrinsic environment variables for use in docker run
function buildkite-intrinsics()
{
BK_ENV=''
if [[ -f "$BUILDKITE_ENV_FILE" ]]; then
while read -r var; do
BK_ENV="$BK_ENV --env ${var%%=*}"
done < "$BUILDKITE_ENV_FILE"
fi
echo "$BK_ENV"
}
|
Set execute bit on downloaded wifi join script. | #!/bin/bash
# Sets up a Raspberry Pi for the Robotic Light Ballet project.
# This should be run on the Pi(s) that will be in the robots.
# Pull down and source the common functions
wget --no-check-certificate -O pi-setup-common.sh https://raw.githubusercontent.com/mitmuseumstudio/RoboticLightBallet/master/pi-setup/pi-setup-common.sh
. ./pi-setup-common.sh
SUPPLICANT=/etc/wpa_supplicant/wpa_supplicant.conf
PRESERVE $SUPPLICANT
FRESH $SUPPLICANT
cat <<EOF >> $SUPPLICANT
network={
ssid="studiobot"
psk="lightballet"
proto=RSN
key_mgmt=WPA-PSK
pairwise=CCMP
auth_alg=OPEN
}
EOF
# Pull down the wifi joining script and run it.
wget --no-check-certificate -O pi-robot-join-wifi.sh https://raw.githubusercontent.com/mitmuseumstudio/RoboticLightBallet/master/pi-setup/pi-robot-join-wifi.sh
# This will run until the Pi sees and has joined the network.
./pi-robot-join-wifi.sh
| #!/bin/bash
# Sets up a Raspberry Pi for the Robotic Light Ballet project.
# This should be run on the Pi(s) that will be in the robots.
# Pull down and source the common functions
wget --no-check-certificate -O pi-setup-common.sh https://raw.githubusercontent.com/mitmuseumstudio/RoboticLightBallet/master/pi-setup/pi-setup-common.sh
. ./pi-setup-common.sh
SUPPLICANT=/etc/wpa_supplicant/wpa_supplicant.conf
PRESERVE $SUPPLICANT
FRESH $SUPPLICANT
cat <<EOF >> $SUPPLICANT
network={
ssid="studiobot"
psk="lightballet"
proto=RSN
key_mgmt=WPA-PSK
pairwise=CCMP
auth_alg=OPEN
}
EOF
# Pull down the wifi joining script and run it.
wget --no-check-certificate -O pi-robot-join-wifi.sh https://raw.githubusercontent.com/mitmuseumstudio/RoboticLightBallet/master/pi-setup/pi-robot-join-wifi.sh
chmod +x ./pi-robot-join-wifi.sh
# This will run until the Pi sees and has joined the network.
./pi-robot-join-wifi.sh
|
Add sl_vm_domain to blobstore-local script | #!/bin/bash -e
cd $(dirname $0)
../1-click/generate-bosh-lite-in-sl-manifest.sh blobstore-local-bosh-lite > /tmp/bosh-lite-in-sl.yml
../fly-login.sh flintstone
# Hack: using sed to work around Concourse limitation. See bosh-create-env.sh for more details.
fly \
-t flintstone \
set-pipeline \
-p blobstore-local-bosh-lite \
-c <(spruce --concourse merge ~/workspace/1-click-bosh-lite-pipeline/template.yml ../1-click/recreate-bosh-lite-every-morning.yml) \
-v github-private-key="$(lpass show "Shared-Flintstone"/Github --notes --sync=no)" \
-v bosh-manifest="$(sed -e 's/((/_(_(/g' /tmp/bosh-lite-in-sl.yml )" \
-v bosh_lite_name='blobstore-local-bosh-lite' \
-v state_git_repo='git@github.com:cloudfoundry/bits-service-private-config.git'
rm -f /tmp/bosh-lite-in-sl.yml
fly -t flintstone expose-pipeline --pipeline blobstore-local-bosh-lite
| #!/bin/bash -e
cd $(dirname $0)
../1-click/generate-bosh-lite-in-sl-manifest.sh blobstore-local-bosh-lite > /tmp/bosh-lite-in-sl.yml
../fly-login.sh flintstone
# Hack: using sed to work around Concourse limitation. See bosh-create-env.sh for more details.
fly \
-t flintstone \
set-pipeline \
-p blobstore-local-bosh-lite \
-c <(spruce --concourse merge ~/workspace/1-click-bosh-lite-pipeline/template.yml ../1-click/recreate-bosh-lite-every-morning.yml) \
-v github-private-key="$(lpass show "Shared-Flintstone"/Github --notes --sync=no)" \
-v bosh-manifest="$(sed -e 's/((/_(_(/g' /tmp/bosh-lite-in-sl.yml )" \
-v bosh_lite_name='blobstore-local-bosh-lite' \
-v state_git_repo='git@github.com:cloudfoundry/bits-service-private-config.git' \
-v sl_vm_domain=flintstone.ams
rm -f /tmp/bosh-lite-in-sl.yml
fly -t flintstone expose-pipeline --pipeline blobstore-local-bosh-lite
|
Add linter to test script | #!/usr/bin/env bash
set -e
# Run complete test suite only these conditions hold
# * SauceLabs credentials are defined
# * The node version is 14.*
# * The jQuery version is 1.*
# - so only a single member machine in the matrix calls sauce
if [[ "$BROWSERSTACK_ACCESS_KEY" != "" && "$BROWSERSTACK_USERNAME" != "" && "$1" == "16.x" ]]; then
# Run build, basic tests and SauceLabs tests
echo "Running complete test suite..."
npm test
npm run dist:ci
npm run test:ci
sleep 3 # Wait for threads to exit?
else
# Run basic tests
echo "Running basic tests..."
npm test
sleep 3 # Wait for threads to exit?
fi
| #!/usr/bin/env bash
set -e
# Run complete test suite only these conditions hold
# * SauceLabs credentials are defined
# * The node version is 14.*
# * The jQuery version is 1.*
# - so only a single member machine in the matrix calls sauce
if [[ "$BROWSERSTACK_ACCESS_KEY" != "" && "$BROWSERSTACK_USERNAME" != "" && "$1" == "16.x" ]]; then
# Run build, basic tests and SauceLabs tests
echo "Running complete test suite..."
npm test
npm run lint
npm run dist:ci
npm run test:ci
sleep 3 # Wait for threads to exit?
else
# Run basic tests
echo "Running basic tests..."
npm test
sleep 3 # Wait for threads to exit?
fi
|
Switch back to service because of Ubuntu 12 | #!/usr/bin/env bash
sudo service mongodb stop
# Import mongodb public GPG key
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv EA312927
echo "deb http://repo.mongodb.org/apt/ubuntu xenial/mongodb-org/3.2 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-3.2.list
sudo apt-get update -qq -y
sudo apt-get install -y mongodb-org
sudo systemctl start mongod | #!/usr/bin/env bash
sudo service mongodb stop
# Import mongodb public GPG key
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv EA312927
echo "deb http://repo.mongodb.org/apt/ubuntu xenial/mongodb-org/3.2 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-3.2.list
sudo apt-get update -qq -y
sudo apt-get install -y mongodb-org
sudo service mongodb start |
Add an alias for top-level Brightline directory | alias reload!='. ~/.zshrc'
alias goreef='cd ~/Programming/brightline/reef'
alias wds='webpack-dev-server'
| alias reload!='. ~/.zshrc'
alias gobl="cd ~/Programming/brightline"
alias goreef='cd ~/Programming/brightline/reef'
alias wds='webpack-dev-server'
|
Cut raster into tiles on import | #!/bin/bash
if [ -z "$3" ]; then
echo "Usage: $0 <raster> <table> <database> [<source_srid>]" >&2
exit 1
fi
INPUTRASTERFILE=$1
TABLENAME=$2
export PGDATABASE=$3
S_SRS=
test -n "$4" && S_SRS="-s_srs EPSG:$4"
# get config
. /systemapic/config/env.sh || exit 1
# env vars
export PGUSER=$SYSTEMAPIC_PGSQL_USERNAME
export PGPASSWORD=$SYSTEMAPIC_PGSQL_PASSWORD
export PGHOST=postgis
# Reproject to EPSG:3857
RASTERFILE=/tmp/import_raster_$$.tif
gdalwarp -t_srs EPSG:3857 ${S_SRS} ${INPUTRASTERFILE} ${RASTERFILE} || exit 1
# import raster
set -o pipefail # needed to get an error if raster2pgsql errors out
raster2pgsql \
-s 3857 -I -C \
${RASTERFILE} $TABLENAME |
psql -q --set ON_ERROR_STOP=1
| #!/bin/bash
if [ -z "$3" ]; then
echo "Usage: $0 <raster> <table> <database> [<source_srid>]" >&2
exit 1
fi
INPUTRASTERFILE=$1
TABLENAME=$2
export PGDATABASE=$3
S_SRS=
test -n "$4" && S_SRS="-s_srs EPSG:$4"
# get config
. /systemapic/config/env.sh || exit 1
# env vars
export PGUSER=$SYSTEMAPIC_PGSQL_USERNAME
export PGPASSWORD=$SYSTEMAPIC_PGSQL_PASSWORD
export PGHOST=postgis
# Reproject to EPSG:3857
RASTERFILE=/tmp/import_raster_$$.tif
gdalwarp -t_srs EPSG:3857 ${S_SRS} ${INPUTRASTERFILE} ${RASTERFILE} || exit 1
TILESIZE="128x128"
# import raster
set -o pipefail # needed to get an error if raster2pgsql errors out
raster2pgsql \
-s 3857 -I -C -Y \
-t ${TILESIZE} \
${RASTERFILE} $TABLENAME |
psql -q --set ON_ERROR_STOP=1
|
Move bootstrap to bashrc so that dotstar is always initialized | #!/usr/bin/env bash
#set -e
#set -x
# Create symlink to project files in home directory.
DOT_STAR_ROOT="$( dirname $( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ))"
[ ! -L "${HOME}/.dot-star" ] && ln -vs "${DOT_STAR_ROOT}/" "${HOME}/.dot-star"
# Find existing bootstrap in bash profile.
line_number=$(grep --line-number "# .dotstar bootstrap" "${HOME}/.bash_profile" | cut -d ":" -f "1")
if [ ! -z "${line_number}" ]; then
# Remove installed bootstrap.
next_line_number="${line_number}"
(( next_line_number += 1 ))
sed -i "" "${line_number},${next_line_number}d" "${HOME}/.bash_profile"
fi
# Add bootstrap to bash profile.
echo -e "# .dotstar bootstrap\n[[ -r ~/.dot-star/bash/.bash_profile ]] && . ~/.dot-star/bash/.bash_profile" >> "$HOME/.bash_profile"
# Run post installation script.
source "${DOT_STAR_ROOT}/script/post_install.sh"
| #!/usr/bin/env bash
#set -e
#set -x
# Create symlink to project files in home directory.
DOT_STAR_ROOT="$( dirname $( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ))"
[ ! -L "${HOME}/.dot-star" ] && ln -vs "${DOT_STAR_ROOT}/" "${HOME}/.dot-star"
# Find existing bootstrap in bash profile.
line_number=$(grep --line-number "# .dotstar bootstrap" "${HOME}/.bash_profile" | cut -d ":" -f "1")
if [ ! -z "${line_number}" ]; then
# Remove installed bootstrap.
next_line_number="${line_number}"
(( next_line_number += 1 ))
sed -i "" "${line_number},${next_line_number}d" "${HOME}/.bash_profile"
fi
# Add bootstrap to bash profile.
echo -e "# .dotstar bootstrap\n[[ -r ~/.bashrc ]] && source ~/.bashrc" >> "$HOME/.bash_profile"
# Find existing bootstrap in bashrc.
line_number=$(grep --line-number "# .dotstar bootstrap" "${HOME}/.bashrc" | cut -d ":" -f "1")
if [ ! -z "${line_number}" ]; then
# Remove installed bootstrap.
next_line_number="${line_number}"
(( next_line_number += 1 ))
sed -i "" "${line_number},${next_line_number}d" "${HOME}/.bashrc"
fi
# Add bootstrap to bashrc.
echo -e "# .dotstar bootstrap\n[[ -r ~/.dot-star/bash/.bash_profile ]] && source ~/.dot-star/bash/.bash_profile" >> "$HOME/.bashrc"
# Run post installation script.
source "${DOT_STAR_ROOT}/script/post_install.sh"
|
Make a huge static library | #!/bin/sh -xe
#
# This script automates the build process described by webrtc:
# https://code.google.com/p/webrtc/source/browse/trunk/talk/app/webrtc/objc/README
#
gclient config http://webrtc.googlecode.com/svn/trunk
echo "target_os = ['mac']" >> .gclient
gclient sync
perl -i -wpe "s/target\_os \= \[\'mac\'\]/target\_os \= \[\'ios\', \'mac\']/g" .gclient
gclient sync
cd trunk
export GYP_DEFINES="build_with_libjingle=1 build_with_chromium=0 libjingle_objc=1 OS=ios target_arch=armv7"
export GYP_GENERATORS="ninja"
export GYP_GENERATOR_FLAGS="output_dir=out_ios"
export GYP_CROSSCOMPILE=1
gclient runhooks
ninja -C out_ios/Debug -t clean
ninja -C out_ios/Debug libjingle_peerconnection_objc_test
| #!/bin/bash -xe
#
# This script automates the build process described by webrtc:
# https://code.google.com/p/webrtc/source/browse/trunk/talk/app/webrtc/objc/README
#
gclient config http://webrtc.googlecode.com/svn/trunk
echo "target_os = ['mac']" >> .gclient
gclient sync
perl -i -wpe "s/target\_os \= \[\'mac\'\]/target\_os \= \[\'ios\', \'mac\']/g" .gclient
gclient sync
cd trunk
export GYP_DEFINES="build_with_libjingle=1 build_with_chromium=0 libjingle_objc=1 OS=ios target_arch=armv7"
export GYP_GENERATORS="ninja"
export GYP_GENERATOR_FLAGS="output_dir=out_ios"
export GYP_CROSSCOMPILE=1
gclient runhooks
ninja -C out_ios/Debug -t clean
ninja -C out_ios/Debug libjingle_peerconnection_objc_test
AR=`xcrun -f ar`
PWD=`pwd`
ROOT=$PWD
LIBS_OUT=`find $PWD/out_ios/Debug -d 1 -name '*.a'`
FATTYCAKES_OUT=out.huge
mkdir -p $FATTYCAKES_OUT
cd $FATTYCAKES_OUT
for LIB in $LIBS_OUT
do
$AR -x $LIB
done
$AR -q libfattycakes.a *.o
cd $ROOT
REVISION=`svn info $BRANCH | grep Revision | cut -f2 -d: | tr -d ' '`
echo "WEBRTC_REVISION=$REVISION" > build.properties
|
Build the Varnish docker image on vagrant boot | #!/bin/bash
# install docker
curl -sSL https://get.docker.io/ubuntu/ | sudo sh
# install nodejs
curl -sL https://deb.nodesource.com/setup | sudo bash -
sudo apt-get install --assume-yes nodejs
# install sails.js
npm install --global sails@0.10.5
rsync -av /vagrant/web /var/web
pushd /var/web && npm install && popd
| #!/bin/bash
# install docker
curl -sSL https://get.docker.io/ubuntu/ | sudo sh
# install nodejs
curl -sL https://deb.nodesource.com/setup | sudo bash -
sudo apt-get install --assume-yes nodejs
# install sails.js
npm install --global sails@0.10.5
# install the web app
rsync -av /vagrant/web /var/web
pushd /var/web && npm install && popd
# build the Varnish docker image
sudo docker build --force-rm --tag="varnish4" /vagrant/varnish4/ |
Change global functional tests timeout. | #!/bin/bash
#
# This file is part of the KubeVirt project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2017 Red Hat, Inc.
#
set -e
source hack/common.sh
source hack/config.sh
functest_docker_prefix=${manifest_docker_prefix-${docker_prefix}}
if [[ ${TARGET} == openshift* ]]; then
oc=${kubectl}
fi
${TESTS_OUT_DIR}/tests.test -kubeconfig=${kubeconfig} -tag=${docker_tag} -prefix=${functest_docker_prefix} -oc-path=${oc} -kubectl-path=${kubectl} -test.timeout 90m ${FUNC_TEST_ARGS}
| #!/bin/bash
#
# This file is part of the KubeVirt project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2017 Red Hat, Inc.
#
set -e
source hack/common.sh
source hack/config.sh
functest_docker_prefix=${manifest_docker_prefix-${docker_prefix}}
if [[ ${TARGET} == openshift* ]]; then
oc=${kubectl}
fi
${TESTS_OUT_DIR}/tests.test -kubeconfig=${kubeconfig} -tag=${docker_tag} -prefix=${functest_docker_prefix} -oc-path=${oc} -kubectl-path=${kubectl} -test.timeout 120m ${FUNC_TEST_ARGS}
|
Reduce the output of Miniconda wget | #!/bin/bash
set -x
set -e
CONDA_PATH=${1:-~/conda}
if [ $OS_NAME = 'windows' ]; then
if [ ! -d $CONDA_PATH -o ! -z "$CI" ]; then
choco install openssl.light
choco install miniconda3 --params="/AddToPath:1"
fi
export CONDA_PATH='/c/tools/miniconda3'
export PATH=$CONDA_PATH/bin/:$CONDA_PATH/Scripts/:$PATH
else
if [ $OS_NAME = 'linux' ]; then
sys_name=Linux
else
sys_name=MacOSX
fi
wget -c https://repo.continuum.io/miniconda/Miniconda3-latest-${sys_name}-x86_64.sh
chmod a+x Miniconda3-latest-${sys_name}-x86_64.sh
if [ ! -d $CONDA_PATH -o ! -z "$CI" ]; then
./Miniconda3-latest-${sys_name}-x86_64.sh -p $CONDA_PATH -b -f
fi
export PATH=$CONDA_PATH/bin:$PATH
fi
echo $PATH
conda info
conda list
echo "python==3.7.*" > $CONDA_PATH/conda-meta/pinned
conda install -y anaconda-client
conda install -y python
conda update -y conda
| #!/bin/bash
set -x
set -e
CONDA_PATH=${1:-~/conda}
if [ $OS_NAME = 'windows' ]; then
if [ ! -d $CONDA_PATH -o ! -z "$CI" ]; then
choco install openssl.light
choco install miniconda3 --params="/AddToPath:1"
fi
export CONDA_PATH='/c/tools/miniconda3'
export PATH=$CONDA_PATH/bin/:$CONDA_PATH/Scripts/:$PATH
else
if [ $OS_NAME = 'linux' ]; then
sys_name=Linux
else
sys_name=MacOSX
fi
wget --progress=dot:giga -c https://repo.continuum.io/miniconda/Miniconda3-latest-${sys_name}-x86_64.sh
chmod a+x Miniconda3-latest-${sys_name}-x86_64.sh
if [ ! -d $CONDA_PATH -o ! -z "$CI" ]; then
./Miniconda3-latest-${sys_name}-x86_64.sh -p $CONDA_PATH -b -f
fi
export PATH=$CONDA_PATH/bin:$PATH
fi
echo $PATH
conda info
conda list
echo "python==3.7.*" > $CONDA_PATH/conda-meta/pinned
conda install -y anaconda-client
conda install -y python
conda update -y conda
|
Fix CircleCI demo deploy data | #!/bin/bash
set -ex
[[ "$CIRCLE_BRANCH" != demo__* ]] && exit 0
DEBUG=False
DEV=True
DATABASE_URL=sqlite:///bedrock.db
DOCKER_CACHE_PATH=~/docker
MOFO_SECURITY_ADVISORIES_PATH=$DOCKER_CACHE_PATH/security_advisories
PROD_DETAILS_STORAGE=product_details.storage.PDDatabaseStorage
mkdir -p $DOCKER_CACHE_PATH
if [[ -f $DOCKER_CACHE_PATH/bedrock.db ]]; then
cp $DOCKER_CACHE_PATH/bedrock.db ./
fi
./manage.py migrate --noinput
./manage.py rnasync
./manage.py cron update_ical_feeds
./manage.py update_product_details
./manage.py update_externalfiles
./manage.py update_security_advisories
cp -f bedrock.db $DOCKER_CACHE_PATH/
if [[ -e $DOCKER_CACHE_PATH/image.tar ]]; then docker load --input ~/docker/image.tar; fi
echo "ENV GIT_SHA ${CIRCLE_SHA1}" >> Dockerfile
docker build -t "$DOCKER_IMAGE_TAG" --pull=true .
docker save "$DOCKER_IMAGE_TAG" > $DOCKER_CACHE_PATH/image.tar
| #!/bin/bash
set -ex
[[ "$CIRCLE_BRANCH" != demo__* ]] && exit 0
export DOCKER_CACHE_PATH=~/docker
mkdir -p $DOCKER_CACHE_PATH
if [[ -f $DOCKER_CACHE_PATH/bedrock.db ]]; then
cp $DOCKER_CACHE_PATH/bedrock.db ./
fi
# use settings in manage.py runs
cp .bedrock_demo_env .env
echo "MOFO_SECURITY_ADVISORIES_PATH=$DOCKER_CACHE_PATH/security_advisories" >> .env
./manage.py migrate --noinput
./manage.py rnasync
./manage.py cron update_ical_feeds
./manage.py cron cleanup_ical_events
./manage.py cron update_tweets
./manage.py update_product_details
./manage.py update_externalfiles
./manage.py update_security_advisories
./manage.py runscript update_firefox_os_feeds
cp -f bedrock.db $DOCKER_CACHE_PATH/
# don't include in built container
rm -f .env
if [[ -e $DOCKER_CACHE_PATH/image.tar ]]; then docker load --input $DOCKER_CACHE_PATH/image.tar; fi
echo "ENV GIT_SHA ${CIRCLE_SHA1}" >> Dockerfile
docker build -t "$DOCKER_IMAGE_TAG" --pull=true .
docker save "$DOCKER_IMAGE_TAG" > $DOCKER_CACHE_PATH/image.tar
|
Add alias for ubuntu to open browser window for how to commit part of a file in git | #Aliases for Ubuntu
#built ins declared before shared
alias grepc="grep -nrP --color=always"
source "${DOTFILES_DIR}aliases_shared.bash"
#alias for bash settings
alias settings="subl ~/.bash_aliases ~/.bashrc ~/.vimrc \"${DOTFILES_DIR}aliases_ubuntu.bash\" \"${DOTFILES_DIR}aliases_shared.bash\""
alias reload="source ~/.bashrc;"
#APT
alias apt="sudo apt-get"
#alias to open current directory in file browser
alias op="nautilus .;"
#node
alias node="nodejs"
#apache
alias apache="sudo service apache2"
#nginx
alias nginx="sudo service nginx"
#function ng() { sudo systemctl $1 nginx; }
#redis
#assumes redis installed and configured from:
#https://www.digitalocean.com/community/tutorials/how-to-install-and-configure-redis-on-ubuntu-16-04
alias redis_start="sudo systemctl start redis"
| #Aliases for Ubuntu
#built ins declared before shared
alias grepc="grep -nrP --color=always"
source "${DOTFILES_DIR}aliases_shared.bash"
#alias for bash settings
alias settings="subl ~/.bash_aliases ~/.bashrc ~/.vimrc \"${DOTFILES_DIR}aliases_ubuntu.bash\" \"${DOTFILES_DIR}aliases_shared.bash\""
alias reload="source ~/.bashrc;"
#GIT
#open stack overflow to add part of file in git to staging
alias git_part="sensible-browser \"http://stackoverflow.com/questions/1085162/commit-only-part-of-a-file-in-git\";"
#APT
alias apt="sudo apt-get"
#alias to open current directory in file browser
alias op="nautilus .;"
#node
alias node="nodejs"
#apache
alias apache="sudo service apache2"
#nginx
alias nginx="sudo service nginx"
#function ng() { sudo systemctl $1 nginx; }
#redis
#assumes redis installed and configured from:
#https://www.digitalocean.com/community/tutorials/how-to-install-and-configure-redis-on-ubuntu-16-04
alias redis_start="sudo systemctl start redis"
|
Use temporary dir as mount in backup. | #
# deta
#
# Copyright (c) 2011-2012 David Persson
#
# Distributed under the terms of the MIT License.
# Redistributions of files must retain the above copyright notice.
#
# @COPYRIGHT 2011-2012 David Persson <nperson@gmx.de>
# @LICENSE http://www.opensource.org/licenses/mit-license.php The MIT License
# @LINK http://github.com/davidpersson/deta
#
# This task restores backups from a source to a default temporary directory.
source $DETA/backup.sh
role SOURCE # The env where the backups are.
TMP=$(mktemp -d -t deta)
defer rm -rf $TMP
PS3="Select a previous backup to restore from (most recent first): "
select file in $(ls -t $SOURCE_PATH/*.tar.gpg); do
dearchive $file $TMP
break
done
msgok "Restored backup mounted in %s." $TMP
| #
# deta
#
# Copyright (c) 2011-2012 David Persson
#
# Distributed under the terms of the MIT License.
# Redistributions of files must retain the above copyright notice.
#
# @COPYRIGHT 2011-2012 David Persson <nperson@gmx.de>
# @LICENSE http://www.opensource.org/licenses/mit-license.php The MIT License
# @LINK http://github.com/davidpersson/deta
#
# This task restores backups from a source to a default temporary directory.
source $DETA/backup.sh
role SOURCE # The env where the backups are.
TMP=$(mktemp -d -t deta)
PS3="Select a previous backup to restore from (most recent first): "
select file in $(ls -t $SOURCE_PATH/*.tar.gpg); do
dearchive $file $TMP
break
done
msgok "Restored backup mounted in %s." $TMP
|
Use correct script name, not idea.sh | #!/bin/sh
# ------------------------------------------------------
# __product_full__ LightEdit mode script.
# ------------------------------------------------------
IDE_BIN_HOME="$(dirname "$(realpath "$0")")"
exec "$IDE_BIN_HOME/idea.sh" -e "$@" | #!/bin/sh
# ------------------------------------------------------
# __product_full__ LightEdit mode script.
# ------------------------------------------------------
IDE_BIN_HOME="$(dirname "$(realpath "$0")")"
exec "$IDE_BIN_HOME/__script_name__" -e "$@" |
Fix path to travis vhost | #!/usr/bin/env sh
set -ev
sudo apt-get update \
&& sudo apt-get install apache2 libapache2-mod-fastcgi \
&& sudo cp ~/.phpenv/versions/$(phpenv version-name)/etc/php-fpm.conf.default ~/.phpenv/versions/$(phpenv version-name)/etc/php-fpm.conf \
&& sudo a2enmod rewrite actions fastcgi alias \
&& echo "cgi.fix_pathinfo = 1" >> ~/.phpenv/versions/$(phpenv version-name)/etc/php.ini \
&& ~/.phpenv/versions/$(phpenv version-name)/sbin/php-fpm \
&& sudo cp -f build/travis/vhost-apache /etc/apache2/sites-available/default \
&& sudo sed -e "s?%TRAVIS_BUILD_DIR%?$(pwd)?g" --in-place /etc/apache2/sites-available/default \
&& sudo service apache2 restart
| #!/usr/bin/env sh
set -ev
sudo apt-get update \
&& sudo apt-get install apache2 libapache2-mod-fastcgi \
&& sudo cp ~/.phpenv/versions/$(phpenv version-name)/etc/php-fpm.conf.default ~/.phpenv/versions/$(phpenv version-name)/etc/php-fpm.conf \
&& sudo a2enmod rewrite actions fastcgi alias \
&& echo "cgi.fix_pathinfo = 1" >> ~/.phpenv/versions/$(phpenv version-name)/etc/php.ini \
&& ~/.phpenv/versions/$(phpenv version-name)/sbin/php-fpm \
&& sudo cp -f $TRAVIS_BUILD_DIR/ci/travis/vhost-apache /etc/apache2/sites-available/default \
&& sudo sed -e "s?%TRAVIS_BUILD_DIR%?$(pwd)?g" --in-place /etc/apache2/sites-available/default \
&& sudo service apache2 restart
|
Fix 'kubectl logs' bug & beautify failure output | #!/bin/bash
set -ex
echo "=== TEST FAILED OR TIMED OUT, DUMPING DEBUG INFO ==="
kubectl get all --all-namespaces
# TODO: Extend this to show kubectl describe output for failed pods, this will
# probably show why things are hanging.
kubectl version
kubectl get all --namespace kafka
kubectl describe pod -l app=pachd
kubectl describe pod -l suite=pachyderm,app=etcd
kubectl logs -l app=pachd | tail -n 100
sudo dmesg | tail -n 20
{ minikube logs | tail -n 20; } || true
top -b -n 1 | head -n 20
df -h
| #!/bin/bash
set -e
echo "=== TEST FAILED OR TIMED OUT, DUMPING DEBUG INFO ==="
# TODO: Extend this to show kubectl describe output for failed pods, this will
# probably show why things are hanging.
cmds=(
'kubectl get all --all-namespaces'
'kubectl version'
'kubectl get all --all-namespaces'
'kubectl describe pod -l suite=pachyderm,app=pachd'
'kubectl describe pod -l suite=pachyderm,app=etcd'
# Set --tail b/c by default 'kubectl logs' only outputs 10 lines if -l is set
'kubectl logs --tail=100 -l suite=pachyderm,app=pachd'
'kubectl logs --tail=100 -l suite=pachyderm,app=pachd --previous # if pachd restarted'
'sudo dmesg | tail -n 40'
'{ minikube logs | tail -n 40; } || true'
'top -b -n 1 | head -n 40'
'df -h'
)
for c in "${cmds[@]}"; do
echo "======================================================================"
echo "${c}"
echo "----------------------------------------------------------------------"
eval "${c}"
done
|
Create the canonical paths for atmosphere | WORKSPACE="/vagrant"
GIT_BRANCH="master"
SERVER_NAME="localhost"
ENV_FILE="$WORKSPACE/atmo-extras/clank_init/build_env/variables.yml@vagrant"
# Before running, ensure that `/vagrant` is present
# - if there isn't a `/vagrant` - someone might be
# trying to run this from the host of the vm -
# which is just a bag-of-hurt
#0. Begin, eh
cd "$WORKSPACE"
#sudo mkdir -p /opt/dev/atmosphere
#sudo chown vagrant:vagrant -R /opt/dev/atmosphere
#sudo mkdir -p /opt/env/atmo
#sudo chown vagrant:vagrant -R /opt/env/atmo
#1. Clone repos - pre-flight handles this
# ensure `atmo-extras` in place
# ensure `clank` in place
#2. Prepare execution of ratchet
virtualenv ratchet_env
. ratchet_env/bin/activate
pip install -r clank/ratchet_requirements.txt
#3. Running ratchet
cd "$WORKSPACE/clank"
PYTHONUNBUFFERED=1 python ratchet.py --workspace $WORKSPACE --env_file $ENV_FILE --skip "troposphere"
#4. Chown to vagrant, activate and run!
cd "$WORKSPACE"
sudo chown -R vagrant:vagrant .
cd "/opt/dev/atmosphere"
. "/opt/env/atmo/bin/activate"
| WORKSPACE="/vagrant"
GIT_BRANCH="master"
SERVER_NAME="localhost"
ENV_FILE="$WORKSPACE/atmo-extras/clank_init/build_env/variables.yml@vagrant"
# screw it - I'm writing functions...
create_if_does_not_exist() {
if [ ! -d "$1" ]; then
echo "making directory & chown-ing... ";
sudo mkdir -p "$1";
sudo chown vagrant:vagrant -R "$1";
fi
}
# Before running, ensure that `/vagrant` is present
# - if there isn't a `/vagrant` - someone might be
# trying to run this from the host of the vm -
# which is just a bag-of-hurt
#0. Begin, eh
cd "$WORKSPACE"
create_if_does_not_exist "/opt/dev/atmosphere"
create_if_does_not_exist "/opt/env/atmo"
#1. Clone repos - pre-flight handles this
# ensure `atmo-extras` in place
# ensure `clank` in place
#2. Prepare execution of ratchet
virtualenv ratchet_env
. ratchet_env/bin/activate
pip install -r clank/ratchet_requirements.txt
#3. Running ratchet
cd "$WORKSPACE/clank"
PYTHONUNBUFFERED=1 python ratchet.py --workspace $WORKSPACE --env_file $ENV_FILE --skip "troposphere"
#4. Chown to vagrant, activate and run!
cd "$WORKSPACE"
sudo chown -R vagrant:vagrant .
cd "/opt/dev/atmosphere"
. "/opt/env/atmo/bin/activate"
|
Update remotes for the repos in vendor and rbenv | #!/bin/bash
# if you use public key authentication for any of these git repos,
# make sure your key doesn't have a passphrase or the fetch will
# fail
cd $HOME
[ -d ".git" ] && git remote update &> /dev/null
cd $HOME/.vim
[ -d ".git" ] && git remote update &> /dev/null
if [ -d $HOME/repos ]; then
cd $HOME/repos
for dir in ./*/
do
dir=${dir%*/}
cd "$dir"
[ -d ".git" ] && git remote update &> /dev/null
cd ..
done
fi
| #!/bin/bash
# if you use public key authentication for any of these git repos,
# make sure your key doesn't have a passphrase or the fetch will
# fail
cd $HOME
[ -d ".git" ] && git remote update &> /dev/null
cd $HOME/.vim
[ -d ".git" ] && git remote update &> /dev/null
cd $HOME/.rbenv
[ -d ".git" ] && git remote update &> /dev/null
function update_all_repo_remotes
{
if [ -d "$1" ]; then
cd "$1"
for dir in ./*/
do
dir=${dir%*/}
cd "$dir"
[ -d ".git" ] && git remote update &> /dev/null
cd ..
done
fi
}
update_all_repo_remotes $HOME/repos
update_all_repo_remotes $HOME/vendor
|
Use sudo to install Ansible, clone main repository | #!/bin/bash
if [ ! -f /usr/bin/ansible-playbook ]
then
apt-get install software-properties-common
apt-add-repository ppa:ansible/ansible
apt-get update
apt-get install -y ansible
fi
# Clone repository
git clone https://github.hpe.com/olivier-jacques/the-app.git
cd the-app/vagrant/provision
echo "RUNNING ansible-playbook -c local " $@
ansible-playbook -c local "$@"
| #!/bin/bash
if [ ! -f /usr/bin/ansible-playbook ]
then
sudo apt-get install software-properties-common
sudo apt-add-repository ppa:ansible/ansible
sudo apt-get update
sudo apt-get install -y ansible
fi
# Clone repository
git clone https://github.com/devops-dojo/the-app.git
cd the-app/vagrant
ls -alrt
echo "RUNNING ansible-playbook -c local " $@
ansible-playbook -c local "$@"
|
Make sure that if you abort the server regression tests, then it doesn't say OK | #!/bin/bash
FILES=`perl -n -e 'print \$1, " " if /<stream\\s+(\\S+)>/i' test.conf`
rm -f /tmp/feed.ffm
../ffserver -d -f test.conf 2> /dev/null &
FFSERVER_PID=$!
sleep 1
(
cd data
for file in $FILES; do
wget --user-agent=NSPlayer -q --output-document=- http://localhost:9999/$file?date=19700101T000000Z | head --bytes=100000 > $file &
done
wait
# the status page is always different
md5sum $FILES | grep -v html > ffserver.regression
)
kill $FFSERVER_PID
wait > /dev/null 2>&1
if diff -u data/ffserver.regression $1 ; then
echo
echo Server regression test succeeded.
exit 0
else
echo
echo Server regression test: Error.
exit 1
fi
| #!/bin/bash
FILES=`perl -n -e 'print \$1, " " if /<stream\\s+(\\S+)>/i' test.conf`
rm -f /tmp/feed.ffm
../ffserver -d -f test.conf 2> /dev/null &
FFSERVER_PID=$!
sleep 1
(
cd data
rm -f $FILES;
for file in $FILES; do
wget --user-agent=NSPlayer -q --output-document=- http://localhost:9999/$file?date=19700101T000000Z | head --bytes=100000 > $file &
done
wait
# the status page is always different
md5sum $FILES | grep -v html > ffserver.regression
)
kill $FFSERVER_PID
wait > /dev/null 2>&1
if diff -u data/ffserver.regression $1 ; then
echo
echo Server regression test succeeded.
exit 0
else
echo
echo Server regression test: Error.
exit 1
fi
|
Use Miniconda instead of Miniconda3 | # as root user
apt-get update
# as vagrant user
echo "alias ipynb='ipython notebook --ip=0.0.0.0 /vagrant/'" >> .bashrc
source .bashrc
wget http://repo.continuum.io/miniconda/Miniconda-3.3.0-Linux-x86_64.sh
bash Miniconda3-3.3.0-Linux-x86_64.sh # accept license when prompted
rm Miniconda-3.3.0-Linux-x86_64.sh
conda update conda # make sure we're up to date
conda install pandas scikit-learn ipython-notebook --yes
| # as root user
apt-get update
# as vagrant user
echo "alias ipynb='ipython notebook --ip=0.0.0.0 /vagrant/'" >> .bashrc
source .bashrc
wget http://repo.continuum.io/miniconda/Miniconda-3.5.2-Linux-x86_64.sh # this is the version based on Python 2
bash Miniconda-3.5.2-Linux-x86_64.sh # accept license when prompted
rm Miniconda-3.5.2-Linux-x86_64.sh
conda update conda # make sure we're up to date
conda install pandas scikit-learn ipython-notebook --yes
|
Clean up the source release script. | #!/bin/bash
tag=$1
if [[ "$tag" == "" ]]; then
echo No tag specified
exit -1
fi
foldername=ogre_src_$tag
# You can set OGRE_RELEASE_CLONE_SOURCE to a local repo if you want to speed things up
if [[ "$OGRE_RELEASE_CLONE_SOURCE" == "" ]]; then
OGRE_RELEASE_CLONE_SOURCE=http://bitbucket.org/sinbad/ogre
fi
hg clone -r $tag $OGRE_RELEASE_CLONE_SOURCE $foldername
# Build configure
pushd $foldername
# delete repo, we only want working copy
rm -rf .hg
# Gen docs
cd Docs/src
. makedocs.sh
# remove unnecessary files
cd ../api/html
rm -f *.hhk *.hhc *.map *.md5 *.dot *.hhp *.plist *.xml ../*.tmp
popd
# tarball for Linux
rm -f $foldername.tar.bz2
/bin/tar -cvhjf $foldername.tar.bz2 $foldername
| #!/bin/bash
tag=$1
if [[ "$tag" == "" ]]; then
echo No tag specified
exit -1
fi
foldername=ogre_src_$tag
# You can set OGRE_RELEASE_CLONE_SOURCE to a local repo if you want to speed things up
if [[ "$OGRE_RELEASE_CLONE_SOURCE" == "" ]]; then
OGRE_RELEASE_CLONE_SOURCE=http://bitbucket.org/sinbad/ogre
fi
hg clone -r $tag $OGRE_RELEASE_CLONE_SOURCE $foldername
# Build configure
pushd $foldername
# delete repo, we only want working copy
rm -rf .hg
# Gen docs
cd Docs
bash ./src/makedocs.sh
# remove unnecessary files
cd ../api/html
rm -f *.hhk *.hhc *.map *.md5 *.dot *.hhp *.plist *.xml ../*.tmp
popd
# tarball for Linux
rm -f $foldername.tar.bz2
tar -cvhjf $foldername.tar.bz2 $foldername
|
Change snapshots to only ipfs_* indexes. | #!/bin/sh
curl -s -XPUT http://127.0.0.1:9200/_snapshot/{{ snapshot_name }}/snapshot_`date +'%y%m%d_%H%M'` | jq -e '.accepted' > /dev/null
| #!/bin/sh
curl -s -XPUT http://127.0.0.1:9200/_snapshot/{{ snapshot_name }}/snapshot_`date +'%y%m%d_%H%M'` -H 'Content-Type: application/json' -d '{"indices": "ipfs_*"}' | jq -e '.accepted' > /dev/null
|
Deploy to master instead of savanni | #!/bin/bash
if [ ! -e cloudcity.io ]; then
git clone https://savannidgerinel:${GITHUB_TOKEN}@github.com/cloudcity/cloudcity.io.git
fi
mkdir -p cloudcity.io/tsomi
cd cloudcity.io/tsomi
cat ../../index.html | sed 's/static\/tsomi.css/\/tsomi\/static\/tsomi.css/' | sed 's/js\/bundle.js/\/tsomi\/js\/bundle.js/' > index.html
cp -r ../../static .
cp -r ../../js .
git checkout -b savanni
git add *
pwd
ls -l
git status
git commit -m "deploy tsomi to production"
git push --set-upstream origin savanni
| #!/bin/bash
if [ ! -e cloudcity.io ]; then
git clone https://savannidgerinel:${GITHUB_TOKEN}@github.com/cloudcity/cloudcity.io.git
fi
mkdir -p cloudcity.io/tsomi
cd cloudcity.io/tsomi
cat ../../index.html | sed 's/static\/tsomi.css/\/tsomi\/static\/tsomi.css/' | sed 's/js\/bundle.js/\/tsomi\/js\/bundle.js/' > index.html
cp -r ../../static .
cp -r ../../js .
#git checkout -b savanni
git add *
pwd
ls -l
git status
git commit -m "deploy tsomi to production"
#git push --set-upstream origin savanni
git push
|
Use placeholder to refer to file name | #!/bin/bash
# Get arguments
usage()
{
cat << EOF
usage: $0 -w FILE STATE-TIME-FILES
This script gets per-thread state statistics.
OPTIONS:
-h Show this message
-w Worker stats file (mir-worker-stats)
STATE-TIME-FILES One or more state time files (*-state-time-*.rec)
EOF
}
WS=
while getopts "hw:" opt; do
case "$opt" in
h) usage ; exit 1 ;;
w) WS=$OPTARG ;;
?) usage ; exit 1 ;;
esac
done
shift $(( OPTIND - 1 ))
# Collect state time from all recorders
OUTF=accumulated-state-time.rec
rm -f $OUTF
cat "$@" > $OUTF
sed '/THREAD/d' $OUTF > $OUTF.body
sed '/THREAD/!d' $OUTF > $OUTF.header
LC_NUMERIC=C sort -n $OUTF.body > $OUTF.body.sorted
cat $OUTF.header $OUTF.body.sorted > $OUTF
rm -f $OUTF.header $OUTF.body*
# Process using R
rm -f $OUTF.info
SCRIPT="`readlink -e $0`"
SCRIPTPATH="`dirname $SCRIPT`"
Rscript $SCRIPTPATH/get-states.R accumulated-state-time.rec $WS
# Cleanup
rm -f $OUTF
| #!/bin/bash
# Get arguments
usage()
{
cat << EOF
usage: $0 -w FILE STATE-TIME-FILES
This script gets per-thread state statistics.
OPTIONS:
-h Show this message
-w Worker stats file (mir-worker-stats)
STATE-TIME-FILES One or more state time files (*-state-time-*.rec)
EOF
}
WS=
while getopts "hw:" opt; do
case "$opt" in
h) usage ; exit 1 ;;
w) WS=$OPTARG ;;
?) usage ; exit 1 ;;
esac
done
shift $(( OPTIND - 1 ))
# Collect state time from all recorders
OUTF=accumulated-state-time.rec
rm -f $OUTF
cat "$@" > $OUTF
sed '/THREAD/d' $OUTF > $OUTF.body
sed '/THREAD/!d' $OUTF > $OUTF.header
LC_NUMERIC=C sort -n $OUTF.body > $OUTF.body.sorted
cat $OUTF.header $OUTF.body.sorted > $OUTF
rm -f $OUTF.header $OUTF.body*
# Process using R
rm -f $OUTF.info
SCRIPT="`readlink -e $0`"
SCRIPTPATH="`dirname $SCRIPT`"
Rscript $SCRIPTPATH/get-states.R $OUTF $WS
# Cleanup
rm -f $OUTF
|
Update sed command to loop through FORMULAS before filtering out skips. | #!/usr/bin/env bash
set -e
# manually added by env var. Will not be filtered by skip-formulas.txt
if [[ -n ${TRAVIS_MANUAL_FORMULAE} ]]; then
echo "${TRAVIS_MANUAL_FORMULAE}"
fi
if [[ ! -z $TRAVIS_PULL_REQUEST_BRANCH ]]; then
# if on a PR, just analyze the changed files
FILES=$(git diff --diff-filter=AM --name-only $(git merge-base HEAD ${TRAVIS_BRANCH} ) )
elif [[ ! -z $TRAVIS_COMMIT_RANGE ]]; then
FILES=$(git diff --diff-filter=AM --name-only ${TRAVIS_COMMIT_RANGE/.../..} )
else
FILES=
fi
# keep formulas only
FORMULAS=$(sed -n -E 's#^Formula/(.+)\.rb$#\1#p' <<< $FILES)
# skip formulas
comm -1 -3 travis/skip-formulas.txt <(echo ${FORMULAS} | tr ' ' '\n' )
| #!/usr/bin/env bash
set -e
# manually added by env var. Will not be filtered by skip-formulas.txt
if [[ -n ${TRAVIS_MANUAL_FORMULAE} ]]; then
echo "${TRAVIS_MANUAL_FORMULAE}"
fi
if [[ ! -z $TRAVIS_PULL_REQUEST_BRANCH ]]; then
# if on a PR, just analyze the changed files
FILES=$(git diff --diff-filter=AM --name-only $(git merge-base HEAD ${TRAVIS_BRANCH} ) )
elif [[ ! -z $TRAVIS_COMMIT_RANGE ]]; then
FILES=$(git diff --diff-filter=AM --name-only ${TRAVIS_COMMIT_RANGE/.../..} )
else
FILES=
fi
FORMULAS=
for f in $FILES;do
FORMULAS="$FORMULAS $(echo $f | sed -n -E 's#^Formula/(.+)\.rb$#\1#p')"
done
# keep formulas only
#FORMULAS=$(sed -n -E 's#^Formula/(.+)\.rb$#\1#p' <<< $FILES)
# skip formulas
comm -1 -3 travis/skip-formulas.txt <(echo ${FORMULAS} | tr ' ' '\n' )
|
Fix bug of echo command | #!/bin/bash
# useful tool
yum install git
yum install tmux
yum install irssi
yum install fish
yum install luit
# editor
yum install vim
yum install emacs
# language
yum install go
yum install python
yum install gcc
yum install clang
yum install nodejs
yum install ruby
# browser
wget http://chrome.richardlloyd.org.uk/install_chrome.sh
chmod u+x install_chrome.sh
./install_chrome.sh
rm install_chrome.sh
# developer
yum install ctags
# command
echo "#!/bin/sh
ssh bbsu@ptt.cc" > /usr/local/bin/ptt
echo "#!/bin/sh
luit -encoding big5 telnet bs2.to" > /usr/local/bin/bs2
echo "#!/bin/sh
clang++ -std=c++14 -Wall -Wextra -pedantic -g3 -O2 ${1} -o ${1%.*} && ./${1%.*}" > /usr/local/bin/oop
chmod 755 /usr/local/bin/ptt /usr/local/bin/bs2 /usr/local/bin/oop
| #!/bin/bash
# useful tool
yum install git
yum install tmux
yum install irssi
yum install fish
yum install luit
# editor
yum install vim
yum install emacs
# language
yum install go
yum install python
yum install gcc
yum install clang
yum install nodejs
yum install ruby
# browser
wget http://chrome.richardlloyd.org.uk/install_chrome.sh
chmod u+x install_chrome.sh
./install_chrome.sh
rm install_chrome.sh
# developer
yum install ctags
# command
echo '#!/bin/sh
ssh bbsu@ptt.cc' > /usr/local/bin/ptt
echo '#!/bin/sh
luit -encoding big5 telnet bs2.to' > /usr/local/bin/bs2
echo '#!/bin/sh
clang++ -std=c++14 -Wall -Wextra -pedantic -g3 -O2 ${1} -o ${1%.*} && ./${1%.*}' > /usr/local/bin/oop
chmod 755 /usr/local/bin/ptt /usr/local/bin/bs2 /usr/local/bin/oop
|
Set verbose flag for file utilities | alias g=git
alias be='bundle exec'
alias docker-shell='docker run --rm --interactive --tty --volume "$PWD:/docker"'
alias ping='ping -c 10'
alias pu=pushd po=popd d='dirs -v'
if command -v exa >/dev/null 2>&1
then alias ls=exa ll='exa --long' la='exa --all' tree='exa --tree'
else alias ls='ls --color=auto' ll='ls -l --human-readable' la='ls --almost-all'
fi
if command -v bat >/dev/null
then alias cat='bat --style=plain'
else alias bat=cat
fi
if [ -f /usr/share/vim/vim81/macros/less.sh ]
then alias vless=/usr/share/vim/vim81/macros/less.sh
elif [ -f /usr/share/vim/vim80/macros/less.sh ]
then alias vless=/usr/share/vim/vim80/macros/less.sh
fi
source_maybe() {
if [ -f "$1" ]
then . "$1"
else return 1
fi
}
source_maybe ~/.config/posix-sh/local-config.sh
unset -f source_maybe
| alias g=git
alias be='bundle exec'
alias docker-shell='docker run --rm --interactive --tty --volume "$PWD:/docker"'
alias ping='ping -c 10'
alias pu=pushd po=popd d='dirs -v'
if command -v exa >/dev/null 2>&1
then alias ls=exa ll='exa --long' la='exa --all' tree='exa --tree'
else alias ls='ls --color=auto' ll='ls -l --human-readable' la='ls --almost-all'
fi
if command -v bat >/dev/null
then alias cat='bat --style=plain'
else alias bat=cat
fi
if [ -f /usr/share/vim/vim81/macros/less.sh ]
then alias vless=/usr/share/vim/vim81/macros/less.sh
elif [ -f /usr/share/vim/vim80/macros/less.sh ]
then alias vless=/usr/share/vim/vim80/macros/less.sh
fi
if [ "`uname`" = Darwin ]
then alias cp='cp -v' mv='mv -v' rm='rm -v'
else alias cp='cp --verbose' mv='mv --verbose' rm='rm --verbose'
fi
source_maybe() {
if [ -f "$1" ]
then . "$1"
else return 1
fi
}
source_maybe ~/.config/posix-sh/local-config.sh
unset -f source_maybe
|
Replace script echos with -x flag | #!/bin/sh
set -e
echo "----Running flake8----"
flake8 ai_graph_color tests
echo "----Running nosetests----"
nosetests tests
| #!/bin/sh
set -ex
flake8 ai_graph_color tests
nosetests tests
|
Remove deprecated Compose secrets key | #!/bin/sh
set -eu
export PASSWORD_STORE_DIR=${COMPOSE_PASSWORD_STORE_DIR}
COMPOSE_API_KEY=$(pass "compose/${AWS_ACCOUNT}/access_token")
SECRETS=$(mktemp secrets.yml.XXXXXX)
trap 'rm "${SECRETS}"' EXIT
cat > "${SECRETS}" << EOF
---
compose_api_key: ${COMPOSE_API_KEY}
compose_access_token: ${COMPOSE_API_KEY}
EOF
aws s3 cp "${SECRETS}" "s3://gds-paas-${DEPLOY_ENV}-state/compose-secrets.yml"
| #!/bin/sh
set -eu
export PASSWORD_STORE_DIR=${COMPOSE_PASSWORD_STORE_DIR}
COMPOSE_API_KEY=$(pass "compose/${AWS_ACCOUNT}/access_token")
SECRETS=$(mktemp secrets.yml.XXXXXX)
trap 'rm "${SECRETS}"' EXIT
cat > "${SECRETS}" << EOF
---
compose_api_key: ${COMPOSE_API_KEY}
EOF
aws s3 cp "${SECRETS}" "s3://gds-paas-${DEPLOY_ENV}-state/compose-secrets.yml"
|
Switch to recommended stdin password input | #!/bin/sh
DOCKER_TAG="$1"
docker login -u="$DOCKER_USERNAME" -p="$DOCKER_PASSWORD" \
&& docker build -t $DOCKER_USERNAME/$DOCKER_REPOSITORY:$DOCKER_TAG . \
&& docker push $DOCKER_USERNAME/$DOCKER_REPOSITORY:$DOCKER_TAG
| #!/bin/sh
DOCKER_TAG="$1"
echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin \
&& docker build -t $DOCKER_USERNAME/$DOCKER_REPOSITORY:$DOCKER_TAG . \
&& docker push $DOCKER_USERNAME/$DOCKER_REPOSITORY:$DOCKER_TAG
|
Save STDOUT ouput in log too. | #!/bin/sh
kill_em_all() {
echo "(CTRL+C was hit)"
kill 0
exit 0
}
trap 'kill_em_all' INT
# Start the NER server
NER_DIR="abstractor-server/stanford-ner-2013-04-04"
FLASK_DIR="abstractor-server/abstractor"
NER_JAR="stanford-ner-2013-04-04.jar"
cd "$NER_DIR" # Move to NER_DIR
# Run the NER server in the background
java -mx1000m -cp "$NER_JAR" edu.stanford.nlp.ie.NERServer \
-loadClassifier "classifiers/english.all.3class.distsim.crf.ser.gz" \
-port 9000 -outputFormat inlineXML 2> ../../ner-server.log &
cd - # Return to orginal dir
cd "$FLASK_DIR" # Move to Flask Python server directory
# Start the Python Flask server
python abstractor.py 2> ../../flask-server.log &
while :
do
echo "Press [CTRL+C] to stop.."
sleep 1
done
| #!/bin/sh
kill_em_all() {
echo "(CTRL+C was hit)"
kill 0
exit 0
}
trap 'kill_em_all' INT
# Start the NER server
NER_DIR="abstractor-server/stanford-ner-2013-04-04"
FLASK_DIR="abstractor-server/abstractor"
NER_JAR="stanford-ner-2013-04-04.jar"
cd "$NER_DIR" # Move to NER_DIR
# Run the NER server in the background
java -mx1000m -cp "$NER_JAR" edu.stanford.nlp.ie.NERServer \
-loadClassifier "classifiers/english.all.3class.distsim.crf.ser.gz" \
-port 9000 -outputFormat inlineXML > ../../ner-server.log 2> ../../ner-server.log &
cd - # Return to orginal dir
cd "$FLASK_DIR" # Move to Flask Python server directory
# Start the Python Flask server
python abstractor.py > ../../flask-server.log 2> ../../flask-server.log &
while :
do
echo "Press [CTRL+C] to stop.."
sleep 1
done
|
Use `file --dereference' instead of `readlink -f' + `file'. | #!/bin/sh
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This figures out the architecture of the version of Python we are building
# pyautolib against.
#
# python_arch.sh /usr/lib/libpython2.5.so.1.0
# python_arch.sh /path/to/sysroot/usr/lib/libpython2.4.so.1.0
#
python=$(readlink -f "$1")
if [ ! -r "$python" ]; then
echo unknown
exit 0
fi
file_out=$(file "$python")
if [ $? -ne 0 ]; then
echo unknown
exit 0
fi
echo $file_out | grep -qs "ARM"
if [ $? -eq 0 ]; then
echo arm
exit 0
fi
echo $file_out | grep -qs "MIPS"
if [ $? -eq 0 ]; then
echo mipsel
exit 0
fi
echo $file_out | grep -qs "x86-64"
if [ $? -eq 0 ]; then
echo x64
exit 0
fi
echo $file_out | grep -qs "Intel 80386"
if [ $? -eq 0 ]; then
echo ia32
exit 0
fi
exit 1
| #!/bin/sh
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This figures out the architecture of the version of Python we are building
# pyautolib against.
#
# python_arch.sh /usr/lib/libpython2.5.so.1.0
# python_arch.sh /path/to/sysroot/usr/lib/libpython2.4.so.1.0
#
file_out=$(file --dereference "$1")
if [ $? -ne 0 ]; then
echo unknown
exit 0
fi
echo $file_out | grep -qs "ARM"
if [ $? -eq 0 ]; then
echo arm
exit 0
fi
echo $file_out | grep -qs "MIPS"
if [ $? -eq 0 ]; then
echo mipsel
exit 0
fi
echo $file_out | grep -qs "x86-64"
if [ $? -eq 0 ]; then
echo x64
exit 0
fi
echo $file_out | grep -qs "Intel 80386"
if [ $? -eq 0 ]; then
echo ia32
exit 0
fi
exit 1
|
Fix lint error SC2140 for bosh_ensure_upload | #! /bin/sh
set -e -u
script_dir=$(cd "$(dirname "$0")" && pwd)
bosh -u admin -p "${BOSH_PASSWORD}" target https://10.0.0.6:25555 >/dev/null
bosh login admin "${BOSH_PASSWORD}" >/dev/null
existing_version=$("${script_dir}"/bosh_list_"${TYPE}"s.rb | awk -v name="${NAME}" -F"/" '$1 ~ name {print $2}')
if [[ "${existing_version}" != "${VERSION}" ]]; then
eval bosh upload "${TYPE}" "${URL}"
else
echo "${NAME}/${VERSION} is already uploaded"
fi
| #! /bin/sh
set -e -u
script_dir=$(cd "$(dirname "$0")" && pwd)
bosh -u admin -p "${BOSH_PASSWORD}" target https://10.0.0.6:25555 >/dev/null
bosh login admin "${BOSH_PASSWORD}" >/dev/null
existing_version=$("${script_dir}/bosh_list_${TYPE}s.rb" | awk -v name="${NAME}" -F"/" '$1 ~ name {print $2}')
if [[ "${existing_version}" != "${VERSION}" ]]; then
eval bosh upload "${TYPE}" "${URL}"
else
echo "${NAME}/${VERSION} is already uploaded"
fi
|
Exclude backup from check viewsurf | #!/bin/bash
set -e
function downloadURL {
URL="$1"
echo "Checking '$URL'..."
curl --fail "$URL" -o '/tmp/image'
}
function checkViewsurf {
echo ""
echo "Checking images of type '$1'" ...
IMAGES=$(grep "$1" "$CONFIGURATION_FILE" | grep -v -e "type" | tr -s " " | cut -d '"' -f 4)
for IMAGE in $IMAGES; do
LAST=$(curl --fail -s -o - "$IMAGE/last")
downloadURL "$IMAGE/${LAST}_tn.jpg"
downloadURL "$IMAGE/${LAST}.mp4"
done
}
function checkImage {
echo ""
echo "Checking images of type '$1'" ...
IMAGES=$(grep "$1" "$CONFIGURATION_FILE" | tr -s " " | cut -d '"' -f 4)
for IMAGE in $IMAGES; do
downloadURL "$IMAGE"
done
}
if [ -z "$1" ]; then
echo "Please specify the configuration (JSON) file"
exit -1
fi
CONFIGURATION_FILE="$1"
if [ ! -f "$CONFIGURATION_FILE" ]; then
echo "Cannot find $CONFIGURATION_FILE"
exit -1
fi
#checkImage "imageLD"
#checkImage "imageHD"
checkViewsurf "viewsurf"
| #!/bin/bash
set -e
function downloadURL {
URL="$1"
echo "Checking '$URL'..."
curl --fail "$URL" -o '/tmp/image'
}
function checkViewsurf {
echo ""
echo "Checking images of type '$1'" ...
IMAGES=$(grep "$1" "$CONFIGURATION_FILE" | grep -v -e "type" -e "backup" | tr -s " " | cut -d '"' -f 4)
for IMAGE in $IMAGES; do
LAST=$(curl --fail -s -o - "$IMAGE/last")
downloadURL "$IMAGE/${LAST}_tn.jpg"
downloadURL "$IMAGE/${LAST}.mp4"
done
}
function checkImage {
echo ""
echo "Checking images of type '$1'" ...
IMAGES=$(grep "$1" "$CONFIGURATION_FILE" | tr -s " " | cut -d '"' -f 4)
for IMAGE in $IMAGES; do
downloadURL "$IMAGE"
done
}
if [ -z "$1" ]; then
echo "Please specify the configuration (JSON) file"
exit -1
fi
CONFIGURATION_FILE="$1"
if [ ! -f "$CONFIGURATION_FILE" ]; then
echo "Cannot find $CONFIGURATION_FILE"
exit -1
fi
#checkImage "imageLD"
#checkImage "imageHD"
checkViewsurf "viewsurf"
|
Use the mirage dev development remote | PACKAGES="lwt ssl mirage cstruct ipaddr io-page crunch"
## different PPAs required to cover the test matrix
case "$OCAML_VERSION" in
3.12.1) ppa=avsm/ocaml312+opam11 ;;
4.00.1) ppa=avsm/ocaml40+opam11 ;;
4.01.0) ppa=avsm/ocaml41+opam11 ;;
*) echo Unknown $OCAML_VERSION,$OPAM_VERSION; exit 1 ;;
esac
## install OCaml and OPAM
echo "yes" | sudo add-apt-repository ppa:$ppa
sudo apt-get update -qq
sudo apt-get install -qq ocaml ocaml-native-compilers camlp4-extra opam
export OPAMYES=1
echo OCaml version
ocaml -version
echo OPAM versions
opam --version
opam --git-version
opam init git://github.com/ocaml/opam-repository
eval `opam config env`
## install Mirage
opam install $PACKAGES
## execute the build
cd $TRAVIS_BUILD_DIR
make configure MODE=$MIRAGE_BACKEND
make depend
make build
| PACKAGES="lwt ssl mirage cstruct ipaddr io-page crunch"
## different PPAs required to cover the test matrix
case "$OCAML_VERSION" in
3.12.1) ppa=avsm/ocaml312+opam11 ;;
4.00.1) ppa=avsm/ocaml40+opam11 ;;
4.01.0) ppa=avsm/ocaml41+opam11 ;;
4.02.0) ppa=avsm/ocaml42+opam11 ;;
*) echo Unknown $OCAML_VERSION,$OPAM_VERSION; exit 1 ;;
esac
## install OCaml and OPAM
echo "yes" | sudo add-apt-repository ppa:$ppa
sudo apt-get update -qq
sudo apt-get install -qq ocaml ocaml-native-compilers camlp4-extra opam
export OPAMYES=1
echo OCaml version
ocaml -version
echo OPAM versions
opam --version
opam --git-version
opam init git://github.com/ocaml/opam-repository > /dev/null 2>&1
opam remote add mirage-dev git://github.com/mirage/mirage-dev
eval `opam config env`
## install Mirage
opam install $PACKAGES
## execute the build
cd $TRAVIS_BUILD_DIR
make configure MODE=$MIRAGE_BACKEND
make depend
make build
|
Fix error with non-fat binaries | #!/usr/bin/env bash
set -euo pipefail
# skip if we run in debug
echo "Config: $CONFIGURATION"
if [[ "$CONFIGURATION" == *"Debug"* ]]; then
echo "Skip frameworks cleaning in debug version"
exit 0
fi
APP_PATH="${TARGET_BUILD_DIR}/${WRAPPER_NAME}"
# This script loops through the frameworks embedded in the application and
# removes unused architectures.
find "$APP_PATH" -name '*.framework' -type d | while read -r FRAMEWORK; do
FRAMEWORK_EXECUTABLE_NAME=$(defaults read "$FRAMEWORK/Info.plist" CFBundleExecutable)
FRAMEWORK_EXECUTABLE_PATH="$FRAMEWORK/$FRAMEWORK_EXECUTABLE_NAME"
echo "Executable is $FRAMEWORK_EXECUTABLE_PATH"
EXTRACTED_ARCHS=()
for ARCH in $ARCHS; do
echo "Extracting $ARCH from $FRAMEWORK_EXECUTABLE_NAME"
lipo -extract "$ARCH" "$FRAMEWORK_EXECUTABLE_PATH" -o "$FRAMEWORK_EXECUTABLE_PATH-$ARCH"
EXTRACTED_ARCHS+=("$FRAMEWORK_EXECUTABLE_PATH-$ARCH")
done
echo "Merging extracted architectures: ${ARCHS}"
lipo -o "$FRAMEWORK_EXECUTABLE_PATH-merged" -create "${EXTRACTED_ARCHS[@]}"
rm "${EXTRACTED_ARCHS[@]}"
echo "Replacing original executable with thinned version"
rm "$FRAMEWORK_EXECUTABLE_PATH"
mv "$FRAMEWORK_EXECUTABLE_PATH-merged" "$FRAMEWORK_EXECUTABLE_PATH"
done
| #!/usr/bin/env bash
set -euo pipefail
# skip if we run in debug
echo "Config: $CONFIGURATION"
if [[ "$CONFIGURATION" == *"Debug"* ]]; then
echo "Skip frameworks cleaning in debug version"
exit 0
fi
APP_PATH="${TARGET_BUILD_DIR}/${WRAPPER_NAME}"
# This script loops through the frameworks embedded in the application and
# removes unused architectures.
find "$APP_PATH" -name '*.framework' -type d | while read -r FRAMEWORK; do
FRAMEWORK_EXECUTABLE_NAME=$(defaults read "$FRAMEWORK/Info.plist" CFBundleExecutable)
FRAMEWORK_EXECUTABLE_PATH="$FRAMEWORK/$FRAMEWORK_EXECUTABLE_NAME"
echo "Executable is $FRAMEWORK_EXECUTABLE_PATH"
# skip if non-fat binary
[[ $(lipo -info "$FRAMEWORK_EXECUTABLE_PATH") == *"Non-fat"* ]] && continue;
EXTRACTED_ARCHS=()
for ARCH in $ARCHS; do
echo "Extracting $ARCH from $FRAMEWORK_EXECUTABLE_NAME"
lipo -extract "$ARCH" "$FRAMEWORK_EXECUTABLE_PATH" -o "$FRAMEWORK_EXECUTABLE_PATH-$ARCH"
EXTRACTED_ARCHS+=("$FRAMEWORK_EXECUTABLE_PATH-$ARCH")
done
echo "Merging extracted architectures: ${ARCHS}"
lipo -o "$FRAMEWORK_EXECUTABLE_PATH-merged" -create "${EXTRACTED_ARCHS[@]}"
rm "${EXTRACTED_ARCHS[@]}"
echo "Replacing original executable with thinned version"
rm "$FRAMEWORK_EXECUTABLE_PATH"
mv "$FRAMEWORK_EXECUTABLE_PATH-merged" "$FRAMEWORK_EXECUTABLE_PATH"
done
|
Make step marker a bit more visible | #!/bin/bash
set -e
dirname=$(dirname "$0")
cd "$dirname/.."
i=0
section() {
[ -z "$TRAVIS" ] || echo -en "travis_fold:start:$1\\r"
i=$((i+1))
echo "Step $i: $1..."
scripts/integrate/"$1".sh
[ -z "$TRAVIS" ] || echo -en "travis_fold:end:$1\\r"
}
section setup
section build
section test
section publish
section deploy
section relish
| #!/bin/bash
set -e
dirname=$(dirname "$0")
cd "$dirname/.."
i=0
section() {
[ -z "$TRAVIS" ] || echo -en "travis_fold:start:$1\\r"
i=$((i+1))
echo "---------- Step $i: $1 ----------"
scripts/integrate/"$1".sh
[ -z "$TRAVIS" ] || echo -en "travis_fold:end:$1\\r"
}
section setup
section build
section test
section publish
section deploy
section relish
|
Make this script a little more verbose | #!/bin/sh
CLI=bitcoin-cli
TESTADDR=`$CLI -regtest -datadir=regtest-datadir getnewaddress`
TESTTX=`$CLI -regtest -datadir=regtest-datadir sendtoaddress $TESTADDR 1.0`
$CLI -regtest -datadir=regtest-datadir gettransaction $TESTTX
echo Adresses
$CLI -regtest -datadir=regtest-datadir getaddressesbyaccount
echo Balance
$CLI -regtest -datadir=regtest-datadir getbalance
| #!/bin/sh
set -x
CLI=bitcoin-cli
TESTADDR=`$CLI -regtest -datadir=regtest-datadir getnewaddress`
echo "TESTADDR is $TESTADDR"
TESTTX=`$CLI -regtest -datadir=regtest-datadir sendtoaddress $TESTADDR 1.0`
echo sendtoaddress returned $?
$CLI -regtest -datadir=regtest-datadir gettransaction $TESTTX
$CLI -regtest -datadir=regtest-datadir setgenerate true 1
$CLI -regtest -datadir=regtest-datadir gettransaction $TESTTX
echo Adresses
$CLI -regtest -datadir=regtest-datadir getaddressesbyaccount
echo Balance
$CLI -regtest -datadir=regtest-datadir getbalance
|
Remove watchOS build step & use -quiet for xcodebuild | #!/usr/bin/env bash
# Run tests using SPM for macOS
swift test
# Run tests for tvOS
xcodebuild clean test -project Files.xcodeproj -scheme Files-tvOS -destination "platform=tvOS Simulator,name=Apple TV 1080p" CODE_SIGN_IDENTITY="" CODE_SIGNING_REQUIRED=NO ONLY_ACTIVE_ARCH=NO
# Build for watchOS
xcodebuild clean build -project Files.xcodeproj -scheme Files-watchOS CODE_SIGN_IDENTITY="" CODE_SIGNING_REQUIRED=NO ONLY_ACTIVE_ARCH=NO
| #!/usr/bin/env bash
# Run tests using SPM for macOS
swift test
# Run tests for tvOS
xcodebuild clean test -quiet -project Files.xcodeproj -scheme Files-tvOS -destination "platform=tvOS Simulator,name=Apple TV 1080p" CODE_SIGN_IDENTITY="" CODE_SIGNING_REQUIRED=NO ONLY_ACTIVE_ARCH=NO
|
Use 16.04 prebuild virusdetect in build tools | ##depends:none
# see ../docs/VirusDetect-compile.bash
curl -L http://$NIC_MIRROR/pub/sci/molbio/chipster/dist/tools_extras/virusdetect/bioperl_ubuntu-12.04.tar.gz | tar -xz -C ${TOOLS_PATH}/
curl -L http://$NIC_MIRROR/pub/sci/molbio/chipster/dist/tools_extras/virusdetect/virusdetect_ubuntu-12.04.tar.gz | tar -xz -C ${TOOLS_PATH}/
| ##depends:none
# see ../docs/VirusDetect-compile.bash
curl -L http://$NIC_MIRROR/pub/sci/molbio/chipster/dist/tools_extras/virusdetect/bioperl_ubuntu-16.04.tar.gz | tar -xz -C ${TOOLS_PATH}/
curl -L http://$NIC_MIRROR/pub/sci/molbio/chipster/dist/tools_extras/virusdetect/virusdetect_ubuntu-16.04.tar.gz | tar -xz -C ${TOOLS_PATH}/
|
Fix build by updating the local apt-get repo | #!/bin/bash
# Install Git (TODO(CD): Remove this, for testing only)
apt-get -y install git
# Install the latest Marathon
apt-get -y update
apt-get -y install marathon
# List installed versions of external systems
dpkg -l marathon mesos zookeeper | grep '^ii'
# Start zookeeper
/usr/share/zookeeper/bin/zkServer.sh start
# Start Mesos and redirect stdout to /dev/null
/usr/bin/mesos-local --num_slaves=2 --quiet &
# Start Marathon
/etc/init.d/marathon start
# Give all of the processes above some time.
sleep 2
# Run the tox integration tests
cd /dcos-cli
make clean all
| #!/bin/bash
# Need to first update the local repo before installing anything
apt-get -y update
# Install Git (TODO(CD): Remove this, for testing only)
apt-get -y install git
# Install the latest Marathon
apt-get -y install marathon
# List installed versions of external systems
dpkg -l marathon mesos zookeeper | grep '^ii'
# Start zookeeper
/usr/share/zookeeper/bin/zkServer.sh start
# Start Mesos and redirect stdout to /dev/null
/usr/bin/mesos-local --num_slaves=2 --quiet &
# Start Marathon
/etc/init.d/marathon start
# Give all of the processes above some time.
sleep 2
# Run the tox integration tests
cd /dcos-cli
make clean all
|
Add copyright and license information. | #!/bin/bash
for count in `seq -w 1 999`; do
CF3/cfdg -s 627 polaroids.cfdg polaroids/$count.png
done
| #!/bin/bash
# Copyright (c) 2017 Finn Ellis.
# Free to use and modify under the terms of the MIT license.
# See included LICENSE file for details.
for count in `seq -w 1 999`; do
CF3/cfdg -s 627 polaroids.cfdg polaroids/$count.png
done
|
Update Travis deploy to reflect repo changes | #!/bin/bash
# Based on https://github.com/hydrabolt/discord.js-site/blob/master/deploy/deploy.sh
set -e
if [ "$TRAVIS_BRANCH" != "indev" -o -n "$TRAVIS_TAG" -o "$TRAVIS_PULL_REQUEST" != "false" ]; then
echo -e "Not building for a non indev branch push - building without deploying."
npm run docs
exit 0
fi
echo -e "Building for a indev branch push - building and deploying."
REPO=$(git config remote.origin.url)
SHA=$(git rev-parse --verify HEAD)
TARGET_BRANCH="indev"
git clone $REPO dist -b $TARGET_BRANCH
npm run docs
rsync -vau docs/ dist/docs/
cd dist
git add --all .
git config user.name "Travis CI"
git config user.email "${COMMIT_EMAIL}"
git commit -m "Docs build: ${SHA}" || true
git push "https://${GH_TOKEN}@${GH_REF}" $TARGET_BRANCH
| #!/bin/bash
# Based on https://github.com/hydrabolt/discord.js-site/blob/master/deploy/deploy.sh
set -e
if [ "$TRAVIS_BRANCH" != "master" -o -n "$TRAVIS_TAG" -o "$TRAVIS_PULL_REQUEST" != "false" ]; then
echo -e "Not building for a non indev branch push - building without deploying."
npm run docs
exit 0
fi
echo -e "Building for a indev branch push - building and deploying."
REPO=$(git config remote.origin.url)
SHA=$(git rev-parse --verify HEAD)
TARGET_BRANCH="master"
git clone $REPO dist -b $TARGET_BRANCH
npm run docs
rsync -vau docs/ dist/docs/
cd dist
git add --all .
git config user.name "Travis CI"
git config user.email "${COMMIT_EMAIL}"
git commit -m "Docs build: ${SHA}" || true
git push "https://${GH_TOKEN}@${GH_REF}" $TARGET_BRANCH
|
Remove unnecessary double build in Travis build script | #!/usr/bin/env sh
log_file="travis/travis_build.log"
# Run custom commands, such as running your test cases
eval "Rscript ./travis/travis_build.R | tee -a '$log_file'"
# Search for errors in the output of your custom commands
err1="ERROR"
err2="WARNING"
err3="Failure (at"
err4="Failure(@"
err5="Error: "
if ! grep -q "$err1\|$err2\|$err3\|$err4\|$err5" $log_file; then
echo "No errors, warnings, or failures found."
else
printf "\n"
echo "*** grep results **********************"
grep -n "$err1" $log_file
grep -n "$err2" $log_file
grep -n "$err3" $log_file
grep -n "$err4" $log_file
grep -n "$err5" $log_file
echo "ERROR, WARNING, or Failure found. See grep results above."
printf "\n"
exit 1
fi
# Run the same commands Travis runs by default
R CMD build --no-build-vignettes --no-manual receptormarker
R CMD check --no-build-vignettes --no-manual --as-cran receptormarker
| #!/usr/bin/env sh
log_file="travis/travis_build.log"
# Run custom commands, such as running your test cases
eval "Rscript ./travis/travis_build.R | tee -a '$log_file'"
# Search for errors in the output of your custom commands
err1="ERROR"
err2="WARNING"
err3="Failure (at"
err4="Failure(@"
err5="Error: "
if ! grep -q "$err1\|$err2\|$err3\|$err4\|$err5" $log_file; then
echo "No errors, warnings, or failures found."
else
printf "\n"
echo "*** grep results **********************"
grep -n "$err1" $log_file
grep -n "$err2" $log_file
grep -n "$err3" $log_file
grep -n "$err4" $log_file
grep -n "$err5" $log_file
echo "ERROR, WARNING, or Failure found. See grep results above."
printf "\n"
exit 1
fi
# Run the same commands Travis runs by default
R CMD check --no-build-vignettes --no-manual --as-cran receptormarker
|
Create a ansible_$playbook_latest.log symlink for cron logs | #!/bin/bash
# Runs system wide ansible configured in /etc
set -eu
. /etc/default/system-ansible
if [[ -z "${SYS_ANSIBLE_SERIALIZED:-}" ]] ; then
export SYS_ANSIBLE_SERIALIZED=1
exec run-one $0 "$@"
fi
set +u
. /opt/ansible/bin/activate
set -u
logtag=$(basename $SYS_ANSIBLE_PLAYBOOK)_$(date +%Y%m%d%H%M%S)
cd $SYS_ANSIBLE_ROOT
git pull
ansible-galaxy install -r requirements.yml
ansible-playbook -i $SYS_ANSIBLE_INVENTORY $SYS_ANSIBLE_PLAYBOOK >> /var/www/html/cron-logs/ansible_$logtag.log 2>&1
| #!/bin/bash
# Runs system wide ansible configured in /etc
set -eu
. /etc/default/system-ansible
if [[ -z "${SYS_ANSIBLE_SERIALIZED:-}" ]] ; then
export SYS_ANSIBLE_SERIALIZED=1
exec run-one $0 "$@"
fi
set +u
. /opt/ansible/bin/activate
set -u
playbook=$(basename $SYS_ANSIBLE_PLAYBOOK)
timestamp=$(date +%Y%m%d%H%M%S)
logfile="/var/www/html/cron-logs/ansible_${playbook}_${timestamp}.log"
cd $SYS_ANSIBLE_ROOT
git pull
ansible-galaxy install -r requirements.yml
ansible-playbook -i $SYS_ANSIBLE_INVENTORY $SYS_ANSIBLE_PLAYBOOK >> $logfile 2>&1
rm -f /var/www/html/cron-logs/ansible_${playbook}_latest.log
ln -s $logfile /var/www/html/cron-logs/ansible_${playbook}_latest.log
|
Update installed version of python to resolve unicode issues | #!/bin/bash
# Get our directory
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
set -x
# if the proxy is around, use it
nc -z -w3 192.168.1.1 8123 && export http_proxy="http://192.168.1.1:8123"
gem install json_pure -v '~> 1.0' --no-ri --no-rdoc
gem install puppet -v '~> 3.0' --no-ri --no-rdoc
gem install fpm -v '~> 0.4.0' --no-ri --no-rdoc
chmod 755 /vagrant/package/package.sh
TRAVIS=1 su vagrant -l -c 'ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"'
su vagrant -l -c "brew install /vagrant/package/vagrant-scripts/dmgbuild.rb"
/vagrant/package/package.sh /vagrant/substrate-assets/substrate_darwin_x86_64.zip master
mkdir -p /vagrant/pkg
cp *.dmg /vagrant/pkg
| #!/bin/bash
# Get our directory
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
set -x
# if the proxy is around, use it
nc -z -w3 192.168.1.1 8123 && export http_proxy="http://192.168.1.1:8123"
export PATH="/usr/local/bin:$PATH"
gem install json_pure -v '~> 1.0' --no-ri --no-rdoc
gem install puppet -v '~> 3.0' --no-ri --no-rdoc
gem install fpm -v '~> 0.4.0' --no-ri --no-rdoc
chmod 755 /vagrant/package/package.sh
TRAVIS=1 su vagrant -l -c 'ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"'
pushd /tmp
su vagrant -c "brew install python"
rm /usr/bin/python
ln -s /usr/local/bin/python /usr/bin/python
su vagrant -c "brew install /vagrant/package/vagrant-scripts/dmgbuild.rb"
popd
/vagrant/package/package.sh /vagrant/substrate-assets/substrate_darwin_x86_64.zip master
mkdir -p /vagrant/pkg
cp *.dmg /vagrant/pkg
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.