Instruction
stringlengths
14
778
input_code
stringlengths
0
4.24k
output_code
stringlengths
1
5.44k
Correct order of copying "resources" files
mkdir -p ./data/$1/outputs/tex mkdir -p ./data/$1/inputs/md mkdir -p ./data/$1/outputs/report cp ./data/$1/resources/fui/fui-kompendium-blue.pdf ./data/$1/outputs/report/ifi-kompendium-forside-bm.pdf cp ./data/$1/resources/ifikompendium/ifikompendium.tex ./data/$1/outputs/report cp ./data/$1/resources/ifikompendium/ifikompendiumforside.sty ./data/$1/outputs/report if [ ! -d ./data/$1/resources ]; then cp -r ./resources ./data/$1/resources fi mkdir -p ./data/$1/inputs/tex if [ ! -f ./data/$1/inputs/tex/header.tex ]; then cp ./templates/header.tex ./data/$1/inputs/tex/header.tex fi if [ ! -f ./data/$1/inputs/tex/tail.tex ]; then cp ./templates/tail.tex ./data/$1/inputs/tex/tail.tex fi cd ./data/$1/inputs/md find . -iname "*.md" -type f -exec sh -c 'pandoc "${0}" -o "../../outputs/tex/${0%.md}.tex"' {} \;
mkdir -p ./data/$1/outputs/tex mkdir -p ./data/$1/inputs/md mkdir -p ./data/$1/outputs/report if [ ! -d ./data/$1/resources ]; then cp -r ./resources ./data/$1/resources fi cp ./data/$1/resources/fui/fui-kompendium-blue.pdf ./data/$1/outputs/report/ifi-kompendium-forside-bm.pdf cp ./data/$1/resources/ifikompendium/ifikompendium.tex ./data/$1/outputs/report cp ./data/$1/resources/ifikompendium/ifikompendiumforside.sty ./data/$1/outputs/report mkdir -p ./data/$1/inputs/tex if [ ! -f ./data/$1/inputs/tex/header.tex ]; then cp ./templates/header.tex ./data/$1/inputs/tex/header.tex fi if [ ! -f ./data/$1/inputs/tex/tail.tex ]; then cp ./templates/tail.tex ./data/$1/inputs/tex/tail.tex fi cd ./data/$1/inputs/md find . -iname "*.md" -type f -exec sh -c 'pandoc "${0}" -o "../../outputs/tex/${0%.md}.tex"' {} \;
Fix permissions (transfer ownership from root to cmbuild)
#!/bin/sh # Initialize ccache if needed if [ ! -f /srv/ccache/CACHEDIR.TAG ]; then CCACHE_DIR=/srv/ccache ccache -M 50G fi # Fix console permissions as long as https://github.com/docker/docker/issues/9806 is not fixed usermod --groups tty --append cmbuild chgrp tty /dev/console chmod g+rw /dev/console su -c "cd /home/cmbuild/android; screen -s /bin/bash" cmbuild
#!/bin/sh # Initialize ccache if needed if [ ! -f /srv/ccache/CACHEDIR.TAG ]; then CCACHE_DIR=/srv/ccache ccache -M 50G fi # Fix permissions. This is needed temporarily to migrate old projects which were still checked out as root. # It can be removed at some point in the future. # For performance reasons, only search on the first level. for i in "/home/cmbuild/android" "/srv/ccache"; do if [ $(find $i -maxdepth 1 -user root | wc -l) -gt 0 ]; then chown -R cmbuild:cmbuild $i fi done # Fix console permissions as long as https://github.com/docker/docker/issues/9806 is not fixed usermod --groups tty --append cmbuild chgrp tty /dev/console chmod g+rw /dev/console su -c "cd /home/cmbuild/android; screen -s /bin/bash" cmbuild
Copy the helper scripts to the undercloud
#!/bin/bash set -eux set -o pipefail if [ ! -f /.dockerenv ]; then echo "This must be run within a docker container"; exit 0; fi dt="$(date "+%Y-%m-%d_%H-%M_%s")"; echo "Running quickstart"; URL=$1; bash ~/clean.sh; source openrc.sh; rm -rf ~/reproduce/ && mkdir ~/reproduce; WORKSPACE=~/reproduce/ rm -f reproducer-quickstart.sh; wget $URL; bash -x reproducer-quickstart.sh \ --workspace $WORKSPACE \ --create-virtualenv true \ --remove-stacks-keypairs true \ --nodestack-prefix repro \ --autorun;
#!/bin/bash set -eux set -o pipefail if [ ! -f /.dockerenv ]; then echo "This must be run within a docker container"; exit 0; fi dt="$(date "+%Y-%m-%d_%H-%M_%s")"; echo "Running quickstart"; URL=$1; bash ~/clean.sh; source openrc.sh; rm -rf ~/reproduce/ && mkdir ~/reproduce; WORKSPACE=~/reproduce/ rm -f reproducer-quickstart.sh; wget $URL; bash -x reproducer-quickstart.sh \ --workspace $WORKSPACE \ --create-virtualenv true \ --remove-stacks-keypairs true \ --nodestack-prefix repro \ --autorun; export $(awk '/subnode-0/ {print $2}' reproduce/multinode_hosts); scp -r zuul@$ansible_host:scripts ~/scripts
Add git fetch all prune alias
if command -v hub &>/dev/null; then # hub function git(){hub "$@"} fi # git alias g=git alias gst='git status -sb' alias gp='git push' alias gpo='gp origin master' alias gps='gp stage' alias gd='git diff' alias gdc='git diff --cached' alias ga='git add' alias gcl='git config --list' alias gc='git commit -v' alias gca='git commit -v -a' alias gco='git checkout' alias gb='git branch' alias gba='git branch -a' alias gx=gitx alias gl='git l' alias gpl='git pl' alias gr='git remote -v'
if command -v hub &>/dev/null; then # hub function git(){hub "$@"} fi # git alias g=git alias gst='git status -sb' alias gp='git push' alias gpo='gp origin master' alias gps='gp stage' alias gd='git diff' alias gdc='git diff --cached' alias ga='git add' alias gcl='git config --list' alias gc='git commit -v' alias gca='git commit -v -a' alias gco='git checkout' alias gb='git branch' alias gba='git branch -a' alias gx=gitx alias gl='git l' alias gpl='git pl' alias gr='git remote -v' alias gfp='git fetch --all --prune'
Allow to pass parameters to rubocop
function rubocop_changed_in_branch() { rubocop `git_files_changed_vs_origin_master` || return $? }
function rubocop_changed_in_branch() { rubocop $@ `git_files_changed_vs_origin_master` || return $? }
Move only exiftool to lib folder
#!/bin/bash mkdir tmp pushd tmp wget http://www.sno.phy.queensu.ca/~phil/exiftool/Image-ExifTool-10.02.tar.gz gzip -dc Image-ExifTool-10.02.tar.gz | tar -xf - pushd Image-ExifTool-10.02 perl Makefile.PL make test make PREFIX=. install cp exiftool ../exiftool cp lib ../lib -r popd rm tmp -rf rm Image-ExifTool-10.02* -rf
#!/bin/bash mkdir tmp pushd tmp wget http://www.sno.phy.queensu.ca/~phil/exiftool/Image-ExifTool-10.02.tar.gz gzip -dc Image-ExifTool-10.02.tar.gz | tar -xf - pushd Image-ExifTool-10.02 perl Makefile.PL make test make PREFIX=. install cp exiftool ../lib/exiftool # cp lib ../lib -r popd rm tmp -rf rm Image-ExifTool-10.02* -rf
Configure OpenSSL version for Ruby compilation
#!/usr/bin/env sh if [ -d "${HOME}/.rbenv" ] ; then PATH="${PATH}:${HOME}/.rbenv/bin" fi if which rbenv &>/dev/null ; then PATH="${PATH}:$(rbenv prefix)/bin" eval "$(rbenv init -)" fi if [ -d "${HOME}/.rbenv/plugins/ruby-build" ] ; then PATH="${PATH}:${HOME}/.rbenv/plugins/ruby-build/bin" fi
#!/usr/bin/env sh if [ -d "${HOME}/.rbenv" ] ; then PATH="${PATH}:${HOME}/.rbenv/bin" fi if which rbenv &>/dev/null ; then PATH="${PATH}:$(rbenv prefix)/bin" eval "$(rbenv init -)" fi if [ -d "${HOME}/.rbenv/plugins/ruby-build" ] ; then PATH="${PATH}:${HOME}/.rbenv/plugins/ruby-build/bin" if which brew >/dev/null 2>&1 ; then if brew --prefix openssl@1.1 >/dev/null 2>&1 ; then RUBY_CONFIGURE_OPTS="--with-openssl-dir=$(brew --prefix openssl@1.1)" export RUBY_CONFIGURE_OPTS fi fi fi
Test first, build docs later. Faster test feedback from hudson.
# $WORKSPACE # $JOB_NAME echo echo echo Building MarineMap docs : revision $MERCURIAL_REVISION echo echo python build_docs.py -d /var/www/marinemap-docs -j /usr/local/jsdoc-toolkit-read-only/jsdoc-toolkit echo echo echo Testing MarineMap : revision $MERCURIAL_REVISION echo echo coverage run run_tests.py echo echo echo Analyzing coverage : revision $MERCURIAL_REVISION echo echo coverage xml --omit /usr/share
# $WORKSPACE # $JOB_NAME echo echo echo Testing MarineMap : revision $MERCURIAL_REVISION echo echo coverage run run_tests.py echo echo echo Building MarineMap docs : revision $MERCURIAL_REVISION echo echo python build_docs.py -d /var/www/marinemap-docs -j /usr/local/jsdoc-toolkit-read-only/jsdoc-toolkit echo echo echo Analyzing coverage : revision $MERCURIAL_REVISION echo echo coverage xml --omit /usr/share
Use 4-space indent in shellscript
#!/bin/bash set -e wrap () { COLUMNS=$(tput cols || echo 80) echo "$@" | fmt -w $((COLUMNS - 5)) } cd "${0%/*}" if [[ ! -d venv ]]; then echo "First run; creating virtual environment..." wrap "Using ${PYTHON_EXE:="$(which python3)"} as the Python interpreter. Set the PYTHON_EXE variable to use a different Python interpreter." "$PYTHON_EXE" -m venv venv source venv/bin/activate pip install -U pip setuptools wheel pip install -Ur requirements.txt echo else source venv/bin/activate fi echo "Starting uWSGI..." if [[ ! -f uwsgi.ini ]]; then cp uwsgi_example.ini uwsgi.ini fi uwsgi --ini uwsgi.ini "$@"
#!/bin/bash set -e wrap () { COLUMNS=$(tput cols || echo 80) echo "$@" | fmt -w $((COLUMNS - 5)) } cd "${0%/*}" if [[ ! -d venv ]]; then echo "First run; creating virtual environment..." wrap "Using ${PYTHON_EXE:="$(which python3)"} as the Python interpreter. Set the PYTHON_EXE variable to use a different Python interpreter." "$PYTHON_EXE" -m venv venv source venv/bin/activate pip install -U pip setuptools wheel pip install -Ur requirements.txt echo else source venv/bin/activate fi echo "Starting uWSGI..." if [[ ! -f uwsgi.ini ]]; then cp uwsgi_example.ini uwsgi.ini fi uwsgi --ini uwsgi.ini "$@"
Fix seednode uninstallation script paths
#!/bin/sh service bitcoin stop service bisq stop userdel bisq rm -rf /root/bisq userdel bitcoin rm -rf /bitcoin
#!/bin/sh echo "[*] Uninstalling Bitcoin and Bisq, will delete all data!!" sudo rm -rf /root/bisq sudo systemctl stop bitcoin sudo systemctl stop bisq sudo systemctl disable bitcoin sudo systemctl disable bisq sleep 10 sudo userdel -f -r bisq sleep 10 sudo userdel -f -r bitcoin echo "[*] Done!"
Exit if result code is non zero
#! /bin/sh project="ci-build" echo "Attempting to build $project for Windows" /Applications/Unity/Unity.app/Contents/MacOS/Unity \ -batchmode \ -nographics \ -silent-crashes \ -logFile $(pwd)/unity.log \ -projectPath $(pwd) \ -buildWindowsPlayer "$(pwd)/Build/windows/$project.exe" \ -quit echo "Attempting to build $project for OS X" /Applications/Unity/Unity.app/Contents/MacOS/Unity \ -batchmode \ -nographics \ -silent-crashes \ -logFile $(pwd)/unity.log \ -projectPath $(pwd) \ -buildOSXUniversalPlayer "$(pwd)/Build/osx/$project.app" \ -quit echo 'Logs from OS X build' cat $(pwd)/unity.log echo "Attempting to build $project for Linux" /Applications/Unity/Unity.app/Contents/MacOS/Unity \ -batchmode \ -nographics \ -silent-crashes \ -logFile $(pwd)/unity.log \ -projectPath $(pwd) \ -buildLinuxUniversalPlayer "$(pwd)/Build/linux/$project.exe" \ -quit echo 'Logs from build' cat $(pwd)/unity.log
#! /bin/sh project="ci-build" echo "Attempting to build $project for Windows" /Applications/Unity/Unity.app/Contents/MacOS/Unity \ -batchmode \ -nographics \ -silent-crashes \ -logFile $(pwd)/unity.log \ -projectPath $(pwd) \ -buildWindowsPlayer "$(pwd)/Build/windows/$project.exe" \ -quit rc=$?; if [[ $rc != 0 ]]; then exit $rc; fi echo "Attempting to build $project for OS X" /Applications/Unity/Unity.app/Contents/MacOS/Unity \ -batchmode \ -nographics \ -silent-crashes \ -logFile $(pwd)/unity.log \ -projectPath $(pwd) \ -buildOSXUniversalPlayer "$(pwd)/Build/osx/$project.app" \ -quit rc=$?; if [[ $rc != 0 ]]; then exit $rc; fi echo 'Logs from OS X build' cat $(pwd)/unity.log echo "Attempting to build $project for Linux" /Applications/Unity/Unity.app/Contents/MacOS/Unity \ -batchmode \ -nographics \ -silent-crashes \ -logFile $(pwd)/unity.log \ -projectPath $(pwd) \ -buildLinuxUniversalPlayer "$(pwd)/Build/linux/$project.exe" \ -quit rc=$?; if [[ $rc != 0 ]]; then exit $rc; fi echo 'Logs from build' cat $(pwd)/unity.log
Enable pa11ycrawler on pull requests. It will only fail the build if it totally breaks.
#!/usr/bin/env bash set -e ############################################################################### # # Usage: # To run just tests, without pa11ycrawler: # ./scripts/accessibility-tests.sh # # To run tests, followed by pa11ycrawler: # RUN_PA11YCRAWLER=1 ./scripts/accessibility-tests.sh # ############################################################################### echo "Setting up for accessibility tests..." source scripts/jenkins-common.sh echo "Running explicit accessibility tests..." SELENIUM_BROWSER=phantomjs paver test_a11y --with-xunitmp echo "Generating coverage report..." paver a11y_coverage if [ "$RUN_PA11YCRAWLER" = "1" ] then # The settings that we use are installed with the pa11ycrawler module export SCRAPY_SETTINGS_MODULE='pa11ycrawler.settings' echo "Running pa11ycrawler against test course..." paver pa11ycrawler --fasttest --skip-clean --fetch-course --with-html echo "Generating coverage report..." paver pa11ycrawler_coverage fi
#!/usr/bin/env bash set -e ############################################################################### # # Usage: # To run just tests, without pa11ycrawler: # ./scripts/accessibility-tests.sh # # To run tests, followed by pa11ycrawler: # RUN_PA11YCRAWLER=1 ./scripts/accessibility-tests.sh # ############################################################################### echo "Setting up for accessibility tests..." source scripts/jenkins-common.sh echo "Running explicit accessibility tests..." SELENIUM_BROWSER=phantomjs paver test_a11y --with-xunitmp echo "Generating coverage report..." paver a11y_coverage # Force the following if statement to always be true RUN_PA11YCRAWLER=1 if [ "$RUN_PA11YCRAWLER" = "1" ] then # The settings that we use are installed with the pa11ycrawler module export SCRAPY_SETTINGS_MODULE='pa11ycrawler.settings' echo "Running pa11ycrawler against test course..." paver pa11ycrawler --fasttest --skip-clean --fetch-course --with-html echo "Generating coverage report..." paver pa11ycrawler_coverage fi
Use `git config core.editor` for old version of git 1.7.1
#!/bin/bash if [ $# -eq 0 ]; then echo "Usage: $0 keep_commit_count" 1>&2 exit 1 fi # Number of commits to keep (default: 3) keep_commits=$1 # Calculate the end commit index old_commits=`git log --pretty=oneline | wc -l` end=$(($old_commits - 1 - $keep_commits)) if [ $end -lt 2 ]; then echo "No need to squash" 1>&2 exit 2 fi # Do squash if [ `uname -s` = Darwin -a `which sed` = /usr/bin/sed ]; then editor="sed -i '' -e '2,${end}s/^pick /squash /'" else editor="sed -i -e '2,${end}s/^pick /squash /'" fi initial_commit=`git log --pretty=format:%H | tail -1` git -c core.editor="$editor" rebase -i $initial_commit # Do garbage collection git reflog expire --expire=now --all git gc --prune=now --aggressive
#!/bin/bash if [ $# -eq 0 ]; then echo "Usage: $0 keep_commit_count" 1>&2 exit 1 fi # Number of commits to keep (default: 3) keep_commits=$1 # Calculate the end commit index old_commits=`git log --pretty=oneline | wc -l` end=$(($old_commits - 1 - $keep_commits)) if [ $end -lt 2 ]; then echo "No need to squash" 1>&2 exit 2 fi # Do squash if [ `uname -s` = Darwin -a `which sed` = /usr/bin/sed ]; then editor="sed -i '' -e '2,${end}s/^pick /squash /'" else editor="sed -i -e '2,${end}s/^pick /squash /'" fi initial_commit=`git log --pretty=format:%H | tail -1` if [ "`git -c 2>&1 | head -1`" = "Unknown option: -c" ]; then # Save old core.editor value old_editor=`git config -l -f .git/config | sed -n 's/^core\.editor=//p'` git config core.editor "$editor" git rebase -i $initial_commit # Restore core.editor value if [ $old_editor ]; then git config core.editor "$old_editor" else git config --unset core.editor fi else git -c core.editor="$editor" rebase -i $initial_commit fi # Do garbage collection git reflog expire --expire=now --all git gc --prune=now --aggressive
Kill caffeinate at the end of script execution
if [ "$FUNCTIONS_LOADED" != 'TRUE' ]; then DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" source "${DIR}/../functions.sh" fi cleanup_brew() { brew cleanup --force rm -rf "$(brew --cache)" } final_message() { clear echo "All the automated scripts have now finished." echo "Dotfiles setup is complete! ♥️ ♥️ ♥️" } cleanup_brew final_message
if [ "$FUNCTIONS_LOADED" != 'TRUE' ]; then DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" source "${DIR}/../functions.sh" fi cleanup_brew() { brew cleanup --force rm -rf "$(brew --cache)" } final_message() { clear echo "All the automated scripts have now finished." echo "Dotfiles setup is complete! ♥️ ♥️ ♥️" } cleanup_brew killall caffeinate # computer can go back to sleep final_message
Upgrade to mariadb 10.3 on travis
#!/bin/sh remove_mysql(){ service mysql stop apt-get -qq autoremove --purge mysql-server mysql-client mysql-common rm -rf /etc/mysql||true rm -rf /var/lib/mysql||true } remove_mysql service mysql stop apt-get install python-software-properties apt-key adv --recv-keys --keyserver hkp://keyserver.ubuntu.com:80 0xcbcb082a1bb943db add-apt-repository 'deb [arch=amd64,i386] http://mirrors.accretive-networks.net/mariadb/repo/10.2/ubuntu precise main' apt-get update apt-get install mariadb-server service mysql start
#!/bin/sh remove_mysql(){ service mysql stop apt-get -qq autoremove --purge mysql-server mysql-client mysql-common rm -rf /etc/mysql||true rm -rf /var/lib/mysql||true } remove_mysql service mysql stop apt-get install software-properties-common apt-key adv --recv-keys --keyserver hkp://keyserver.ubuntu.com:80 0xcbcb082a1bb943db add-apt-repository 'deb [arch=amd64,i386,ppc64el] http://mariadb.mirror.iweb.com/repo/10.3/ubuntu trusty main' apt-get update apt-get install mariadb-server service mysql start
Fix gopath which was pointing to the Godeps folder
#!/bin/bash set -v # Set Environment echo ${PATH} | grep -q "${HOME}/bin" || { echo "Adding ${HOME}/bin to PATH" export PATH="${PATH}:${HOME}/bin" } # Install Go 1.5 mkdir -p ~/bin curl -sL -o ~/bin/gimme https://raw.githubusercontent.com/travis-ci/gimme/master/gimme chmod +x ~/bin/gimme eval "$(gimme 1.5)" # pcap.h is required to build skydive sudo yum -y install libpcap-devel # Get the Go dependencies export GOPATH=$HOME go get -f -u github.com/axw/gocov/gocov go get -f -u github.com/mattn/goveralls go get -f -u golang.org/x/tools/cmd/cover go get -f -u github.com/golang/lint/golint export GOPATH=`pwd`/Godeps/_workspace export PATH=$PATH:$GOPATH/bin # Fake install of project mkdir -p ${GOPATH}/src/github.com/redhat-cip/ ln -s $(pwd) ${GOPATH}/src/github.com/redhat-cip/skydive
#!/bin/bash set -v # Set Environment echo ${PATH} | grep -q "${HOME}/bin" || { echo "Adding ${HOME}/bin to PATH" export PATH="${PATH}:${HOME}/bin" } # Install Go 1.5 mkdir -p ~/bin curl -sL -o ~/bin/gimme https://raw.githubusercontent.com/travis-ci/gimme/master/gimme chmod +x ~/bin/gimme eval "$(gimme 1.5)" # pcap.h is required to build skydive sudo yum -y install libpcap-devel export GOPATH=$WORKSPACE # Get the Go dependencies go get -f -u github.com/axw/gocov/gocov go get -f -u github.com/mattn/goveralls go get -f -u golang.org/x/tools/cmd/cover go get -f -u github.com/golang/lint/golint export PATH=$PATH:$GOPATH/bin
Add shortcut for easier Git backpedalling
# Global aliases alias l='ls -alh' alias g='git status' alias u='pbpaste | uglifyjs --mangle 2>/dev/null | pbcopy' alias strip-meta='exiftool $@ "-All=" -overwrite_original' # Global function for full-screening the terminal window. fit(){ # Make sure we're running interactively. [[ $- == *i* ]] && { osascript -e 'tell application "Terminal" activate set bounds of window 1 to {0, 0, 1440, 800} set position of window 1 to {0, 0} end tell'; }; } export fit; fit # Various other crap { rm ~/.DS_Store; dsclean ~/Desktop; } > /dev/null 2>&1
# Global aliases alias l='ls -alh' alias g='git status' alias u='pbpaste | uglifyjs --mangle 2>/dev/null | pbcopy' alias strip-meta='exiftool $@ "-All=" -overwrite_original' alias fuck-this-shit='git reset --hard HEAD; git clean -fd' # Global function for full-screening the terminal window. fit(){ # Make sure we're running interactively. [[ $- == *i* ]] && { osascript -e 'tell application "Terminal" activate set bounds of window 1 to {0, 0, 1440, 800} set position of window 1 to {0, 0} end tell'; }; } export fit; fit # Various other crap { rm ~/.DS_Store; dsclean ~/Desktop; } > /dev/null 2>&1
Add a bindkey for autosuggestion
# ZSH_AUTOSUGGEST_HIGHLIGHT_STYLE="fg=#ff00ff,bg=cyan,bold,underline" ZSH_AUTOSUGGEST_STRATEGY=(completion history)
# ZSH_AUTOSUGGEST_HIGHLIGHT_STYLE="fg=#ff00ff,bg=cyan,bold,underline" ZSH_AUTOSUGGEST_STRATEGY=(completion history) bindkey "^Y" autosuggest-accept #bindkey autosuggest-execute #bindkey autosuggest-clear #bindkey autosuggest-fetch #bindkey autosuggest-disable #bindkey autosuggest-enable #bindkey autosuggest-toggle
Set up global git ignore file
#!/bin/bash #set name and email git config --global user.name "Alan Christianson" git config --global user.email "git@c10n.net"
#!/bin/bash SRC_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) #set name and email git config --global user.name "Alan Christianson" git config --global user.email "git@c10n.net" # set up global gitignore ln -s $SRC_DIR/gitignore_global ~/.gitignore_global git config --global core.excludesfile ~/.gitignore_global
Revert "git push and run"
#!/bin/sh if [ $TRAVIS_PULL_REQUEST != "false" ]; then echo "Testing pull request" gulp buildAndTest elif [ $TRAVIS_BRANCH = "master" ]; then echo "Testing and deploying to production" # Add deploy ssh key eval "$(ssh-agent -s)" #start the ssh agent ssh-add .travis/id_rsa gulp cd:pushAndRun elif [ $TRAVIS_BRANCH = "staging" ]; then echo "Deploying to staging and testing" # Add deploy ssh key eval "$(ssh-agent -s)" #start the ssh agent ssh-add .travis/id_rsa gulp cd:pushAndRun else echo "Testing branch $TRAVIS_BRANCH" gulp buildAndTest fi
#!/bin/sh if [ $TRAVIS_PULL_REQUEST != "false" ]; then echo "Testing pull request" gulp buildAndTest elif [ $TRAVIS_BRANCH = "master" ]; then echo "Testing and deploying to production" # Add deploy ssh key eval "$(ssh-agent -s)" #start the ssh agent ssh-add .travis/id_rsa gulp cd elif [ $TRAVIS_BRANCH = "staging" ]; then echo "Deploying to staging and testing" # Add deploy ssh key eval "$(ssh-agent -s)" #start the ssh agent ssh-add .travis/id_rsa gulp cd:pushAndRun else echo "Testing branch $TRAVIS_BRANCH" gulp buildAndTest fi
Remove .init hack and fix media acces right
#!/bin/bash SCRIPT_DIR="$(dirname "$0")" source "$SCRIPT_DIR/app_base.sh" # django init python $manage wait-for-db python $manage syncdb --noinput python $manage migrate --noinput python $manage bower_install -- --allow-root python $manage collectstatic --noinput if [ ! -f .init ]; then chown -R www-data:www-data $media python $manage timeside-create-admin-user python $manage timeside-create-boilerplate touch .init fi if [ $DEBUG = "False" ]; then python $manage update_index --workers $processes & fi # app start if [ $1 = "--runserver" ] then python $manage runserver_plus 0.0.0.0:8000 else # static files auto update watchmedo shell-command --patterns="*.js;*.css" --recursive \ --command='python '$manage' collectstatic --noinput' $src & uwsgi --socket :$port --wsgi-file $wsgi --chdir $app --master \ --processes $processes --threads $threads \ --uid $uid --gid $gid \ --py-autoreload $autoreload fi
#!/bin/bash SCRIPT_DIR="$(dirname "$0")" source "$SCRIPT_DIR/app_base.sh" # django init python $manage wait-for-db python $manage syncdb --noinput python $manage migrate --noinput python $manage bower_install -- --allow-root python $manage collectstatic --noinput # timeside setup python $manage timeside-create-admin-user python $manage timeside-create-boilerplate # fix media access rights chown www-data:www-data $media for dir in $(ls $media); do if [ ! $(stat -c %U $media/$dir) = 'www-data' ]; then chown www-data:www-data $media/$dir fi done if [ $DEBUG = "False" ]; then python $manage update_index --workers $processes & fi # app start if [ $1 = "--runserver" ] then python $manage runserver_plus 0.0.0.0:8000 else # static files auto update watchmedo shell-command --patterns="*.js;*.css" --recursive \ --command='python '$manage' collectstatic --noinput' $src & uwsgi --socket :$port --wsgi-file $wsgi --chdir $app --master \ --processes $processes --threads $threads \ --uid $uid --gid $gid \ --py-autoreload $autoreload fi
Switch to sphinx=1.5.1 for py3.6 support
#!/bin/bash CONDA_INSTALL="conda install -q -y" PIP_INSTALL="pip install -q" # Deactivate any environment set +v source deactivate set -v # Display root environment (for debugging) conda list # Clean up any left-over from a previous build # (note workaround for https://github.com/conda/conda/issues/2679: # `conda env remove` issue) conda remove --all -q -y -n $CONDA_ENV # Scipy, CFFI, jinja2 and IPython are optional dependencies, but exercised in the test suite conda create -n $CONDA_ENV -q -y python=$PYTHON set +v source activate $CONDA_ENV set -v # Install llvmdev (separate channel, for now) $CONDA_INSTALL -c numba llvmdev="4.0*" # Install enum34 for Python < 3.4 if [ $PYTHON \< "3.4" ]; then $CONDA_INSTALL enum34; fi # Install dependencies for building the docs # Note: sphinx 1.5.4 has a bug $CONDA_INSTALL sphinx=1.4 sphinx_rtd_theme pygments # Install dependencies for code coverage (codecov.io) if [ "$RUN_COVERAGE" == "yes" ]; then $PIP_INSTALL codecov coveralls; fi
#!/bin/bash CONDA_INSTALL="conda install -q -y" PIP_INSTALL="pip install -q" # Deactivate any environment set +v source deactivate set -v # Display root environment (for debugging) conda list # Clean up any left-over from a previous build # (note workaround for https://github.com/conda/conda/issues/2679: # `conda env remove` issue) conda remove --all -q -y -n $CONDA_ENV # Scipy, CFFI, jinja2 and IPython are optional dependencies, but exercised in the test suite conda create -n $CONDA_ENV -q -y python=$PYTHON set +v source activate $CONDA_ENV set -v # Install llvmdev (separate channel, for now) $CONDA_INSTALL -c numba llvmdev="4.0*" # Install enum34 for Python < 3.4 if [ $PYTHON \< "3.4" ]; then $CONDA_INSTALL enum34; fi # Install dependencies for building the docs # Note: sphinx 1.5.4 has a bug $CONDA_INSTALL sphinx=1.5.1 sphinx_rtd_theme pygments # Install dependencies for code coverage (codecov.io) if [ "$RUN_COVERAGE" == "yes" ]; then $PIP_INSTALL codecov coveralls; fi
Remove -only-exposed from docker-gen statement so that it will pick up containers on internal networks
#!/bin/bash # SIGTERM-handler term_handler() { [[ -n "$docker_gen_pid" ]] && kill $docker_gen_pid [[ -n "$letsencrypt_service_pid" ]] && kill $letsencrypt_service_pid source /app/functions.sh remove_all_location_configurations exit 143; # 128 + 15 -- SIGTERM } trap 'term_handler' INT QUIT KILL TERM /app/letsencrypt_service & letsencrypt_service_pid=$! docker-gen -watch -only-exposed -notify '/app/update_certs' -wait 15s:60s /app/letsencrypt_service_data.tmpl /app/letsencrypt_service_data & docker_gen_pid=$! # wait "indefinitely" while [[ -e /proc/$docker_gen_pid ]]; do wait $docker_gen_pid # Wait for any signals or end of execution of docker-gen done # Stop container properly term_handler
#!/bin/bash # SIGTERM-handler term_handler() { [[ -n "$docker_gen_pid" ]] && kill $docker_gen_pid [[ -n "$letsencrypt_service_pid" ]] && kill $letsencrypt_service_pid source /app/functions.sh remove_all_location_configurations exit 143; # 128 + 15 -- SIGTERM } trap 'term_handler' INT QUIT KILL TERM /app/letsencrypt_service & letsencrypt_service_pid=$! docker-gen -watch -notify '/app/update_certs' -wait 15s:60s /app/letsencrypt_service_data.tmpl /app/letsencrypt_service_data & docker_gen_pid=$! # wait "indefinitely" while [[ -e /proc/$docker_gen_pid ]]; do wait $docker_gen_pid # Wait for any signals or end of execution of docker-gen done # Stop container properly term_handler
Add check if user is root
#!/bin/bash usage() { cat << EOF usage: $0 options This script creates and sets up VM. OPTIONS: -h Show this message -i IP for server -n hostName for server EOF } HOSTNAME= IP= while getopts “hn:i:” OPTION do case $OPTION in h) usage exit 1 ;; n) HOSTNAME=$OPTARG ;; i) IP=$OPTARG ;; p) ?) usage exit ;; esac done if [[ -z $IP ]] || [[ -z $HOSTNAME ]] then usage exit 1 fi sudo ./createvm.sh $HOSTNAME $IP sleep 30 knife bootstrap $IP -x brain -N $HOSTNAME -P password --sudo knife node run_list add $HOSTNAME 'role[simple_webserver]' sleep 15 knife ssh name:$HOSTNAME "sudo chef-client" -x brain -a ipaddress -P password
#!/bin/bash if [ "$(id -u)" == "0" ]; then echo "This script must be run as user" 1>&2 exit 1 fi sudo usage() { cat << EOF usage: $0 options This script creates and sets up VM. OPTIONS: -h Show this message -i IP for server -n hostName for server EOF } HOSTNAME= IP= while getopts “hn:i:” OPTION do case $OPTION in h) usage exit 1 ;; n) HOSTNAME=$OPTARG ;; i) IP=$OPTARG ;; p) ?) usage exit ;; esac done if [[ -z $IP ]] || [[ -z $HOSTNAME ]] then usage exit 1 fi sudo ./createvm.sh $HOSTNAME $IP sleep 30 knife bootstrap $IP -x brain -N $HOSTNAME -P password --sudo knife node run_list add $HOSTNAME 'role[simple_webserver]' sleep 15 knife ssh name:$HOSTNAME "sudo chef-client" -x brain -a ipaddress -P password
Add some debugging to the sass compilation script.
#!/bin/bash LIBSASS_VERSION="3.3.4" SASSC_VERSION="3.3.4" SASS_LIBSASS_PATH="$(pwd)/libsass" git clone git@github.com:sass/libsass.git cd libsass git checkout ${LIBSASS_VERSION} cd ../ git clone https://github.com/sass/sassc.git cd sassc git checkout ${SASSC_VERSION} make && make install
#!/bin/bash set -ex LIBSASS_VERSION="3.3.4" SASSC_VERSION="3.3.4" SASS_LIBSASS_PATH="$(pwd)/libsass" git clone git@github.com:sass/libsass.git cd libsass git checkout ${LIBSASS_VERSION} cd ../ git clone https://github.com/sass/sassc.git cd sassc git checkout ${SASSC_VERSION} make && make install
Initialize the submodules instead of update them.
function die() { echo "${@}" exit 1 } # Add <strong>.old</strong> to any existing Vim file in the home directory for i in $HOME/.vim $HOME/.vimrc $HOME/.gvimrc; do if [[ ( -e $i ) || ( -h $i ) ]]; then echo "${i} has been renamed to ${i}.old" mv "${i}" "${i}.old" || die "Could not move ${i} to ${i}.old" fi done # Clone Vim Creeper into .vim echo "Cloning Vim Creeper." git clone https://github.com/rondale-sc/vim-creeper.git $HOME/.vim \ || die "Could not clone the repository to ${HOME}/.vim" echo "Linking .vimrc and .gvimrc." ln -s $HOME/.vim/vimrc $HOME/.vimrc ln -s $HOME/.vim/gvimrc $HOME/.gvimrc echo "Updating submodules." cd $HOME/.vim && git submodule foreach git pull origin master
function die() { echo "${@}" exit 1 } # Add <strong>.old</strong> to any existing Vim file in the home directory for i in $HOME/.vim $HOME/.vimrc $HOME/.gvimrc; do if [[ ( -e $i ) || ( -h $i ) ]]; then echo "${i} has been renamed to ${i}.old" mv "${i}" "${i}.old" || die "Could not move ${i} to ${i}.old" fi done # Clone Vim Creeper into .vim echo "Cloning Vim Creeper." git clone https://github.com/rondale-sc/vim-creeper.git $HOME/.vim \ || die "Could not clone the repository to ${HOME}/.vim" echo "Linking .vimrc and .gvimrc." ln -s $HOME/.vim/vimrc $HOME/.vimrc ln -s $HOME/.vim/gvimrc $HOME/.gvimrc echo "Updating submodules." cd $HOME/.vim && git submodule update --init
Remove extraneous ';' on closing 'extern "C"'
#!/bin/sh objroot=$1 cat <<EOF #ifndef JEMALLOC_H_ #define JEMALLOC_H_ #ifdef __cplusplus extern "C" { #endif EOF for hdr in jemalloc_defs.h jemalloc_rename.h jemalloc_macros.h \ jemalloc_protos.h jemalloc_typedefs.h jemalloc_mangle.h ; do cat "${objroot}include/jemalloc/${hdr}" \ | grep -v 'Generated from .* by configure\.' \ | sed -e 's/^#define /#define /g' \ | sed -e 's/ $//g' echo done cat <<EOF #ifdef __cplusplus }; #endif #endif /* JEMALLOC_H_ */ EOF
#!/bin/sh objroot=$1 cat <<EOF #ifndef JEMALLOC_H_ #define JEMALLOC_H_ #ifdef __cplusplus extern "C" { #endif EOF for hdr in jemalloc_defs.h jemalloc_rename.h jemalloc_macros.h \ jemalloc_protos.h jemalloc_typedefs.h jemalloc_mangle.h ; do cat "${objroot}include/jemalloc/${hdr}" \ | grep -v 'Generated from .* by configure\.' \ | sed -e 's/^#define /#define /g' \ | sed -e 's/ $//g' echo done cat <<EOF #ifdef __cplusplus } #endif #endif /* JEMALLOC_H_ */ EOF
Upgrade Java 11 version in CI image
#!/bin/bash set -e case "$1" in java8) echo "https://github.com/adoptium/temurin8-binaries/releases/download/jdk8u322-b06/OpenJDK8U-jdk_x64_linux_hotspot_8u322b06.tar.gz" ;; java11) echo "https://github.com/adoptium/temurin11-binaries/releases/download/jdk-11.0.14%2B9/OpenJDK11U-jdk_x64_linux_hotspot_11.0.14_9.tar.gz" ;; java17) echo "https://github.com/adoptium/temurin17-binaries/releases/download/jdk-17.0.2%2B8/OpenJDK17U-jdk_x64_linux_hotspot_17.0.2_8.tar.gz" ;; *) echo $"Unknown java version" exit 1 esac
#!/bin/bash set -e case "$1" in java8) echo "https://github.com/adoptium/temurin8-binaries/releases/download/jdk8u322-b06/OpenJDK8U-jdk_x64_linux_hotspot_8u322b06.tar.gz" ;; java11) echo "https://github.com/adoptium/temurin11-binaries/releases/download/jdk-11.0.14.1%2B1/OpenJDK11U-jdk_x64_linux_hotspot_11.0.14.1_1.tar.gz" ;; java17) echo "https://github.com/adoptium/temurin17-binaries/releases/download/jdk-17.0.2%2B8/OpenJDK17U-jdk_x64_linux_hotspot_17.0.2_8.tar.gz" ;; *) echo $"Unknown java version" exit 1 esac
Fix docker config custom env copy
#!/bin/sh set -e # Populate config directory if [ -z "$(ls -A /config)" ]; then cp /app/support/docker/production/config/* /config fi # Always copy default and custom env configuration file, in cases where new keys were added cp /app/config/default.yaml /config cp /app/config/custom-environment-variables.yaml /config chown -R peertube:peertube /config # first arg is `-f` or `--some-option` # or first arg is `something.conf` if [ "${1#-}" != "$1" ] || [ "${1%.conf}" != "$1" ]; then set -- npm "$@" fi # allow the container to be started with `--user` if [ "$1" = 'npm' -a "$(id -u)" = '0' ]; then chown -R peertube:peertube /data exec gosu peertube "$0" "$@" fi exec "$@"
#!/bin/sh set -e # Populate config directory if [ -z "$(ls -A /config)" ]; then cp /app/support/docker/production/config/* /config fi # Always copy default and custom env configuration file, in cases where new keys were added cp /app/config/default.yaml /config cp /app/support/docker/production/config/custom-environment-variables.yaml /config chown -R peertube:peertube /config # first arg is `-f` or `--some-option` # or first arg is `something.conf` if [ "${1#-}" != "$1" ] || [ "${1%.conf}" != "$1" ]; then set -- npm "$@" fi # allow the container to be started with `--user` if [ "$1" = 'npm' -a "$(id -u)" = '0' ]; then chown -R peertube:peertube /data exec gosu peertube "$0" "$@" fi exec "$@"
Switch to use boost 1.80.0
mkdir third_party_sources ||: cd third_party_sources git clone --branch v1.2.9 https://github.com/madler/zlib.git wget https://boostorg.jfrog.io/artifactory/main/release/1.76.0/source/boost_1_76_0.tar.gz tar zxvf boost_1_76_0.tar.gz mv boost_1_76_0 boost rm boost_1_76_0.tar.gz wget https://github.com/postgres/postgres/archive/REL9_6_2.tar.gz tar zxvf REL9_6_2.tar.gz mv postgres-REL9_6_2 libpq rm REL9_6_2.tar.gz
mkdir third_party_sources ||: cd third_party_sources git clone --branch v1.2.9 https://github.com/madler/zlib.git wget https://boostorg.jfrog.io/artifactory/main/release/1.80.0/source/boost_1_80_0.tar.gz tar zxvf boost_1_80_0.tar.gz mv boost_1_80_0 boost rm boost_1_80_0.tar.gz wget https://github.com/postgres/postgres/archive/REL9_6_2.tar.gz tar zxvf REL9_6_2.tar.gz mv postgres-REL9_6_2 libpq rm REL9_6_2.tar.gz
Set bash as default shell to get tab autocompletion working at SSH login
#!/bin/sh # Create user with given user name and add it the group of sudoers. # The password of the new user is disabled. # # Parameters: # $1 - string, user name of the new user # $2 - optional, string, name of the group of sudoers, defaults to 'sudo' # # Note: # If the user exists already, it is left unchanged. username="$1" sudoers="${2:-sudo}" if test -z "$(getent passwd "$username")" then echo "Create user $username with disabled password (default behavior)" sudo useradd --create-home --groups "$sudoers" "$username" else echo 'User enz exists already' fi
#!/bin/sh # Create user with given user name and add it the group of sudoers. # The password of the new user is disabled. # # Parameters: # $1 - string, user name of the new user # $2 - optional, string, name of the group of sudoers, defaults to 'sudo' # # Note: # If the user exists already, it is left unchanged. username="$1" sudoers="${2:-sudo}" if test -z "$(getent passwd "$username")" then echo "Create user $username with disabled password (default behavior)" sudo useradd --create-home --shell /bin/bash --groups "$sudoers" "$username" else echo 'User enz exists already' fi
Make cron job not fail because broken links
#!/bin/bash cd "$(dirname "${BASH_SOURCE[0]}")/.." \ || exit 1 # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ./.travis/is-master.sh \ || exit 0 # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - gem install awesome_bot \ && find . -name "*.md" \ -not -path "./node_modules/*" \ -not -path "./dist/*" \ -not -path "./coverage/*" \ -exec awesome_bot \ --allow-dupe \ --allow-redirect \ --set-timeout 150 \ --white-list "example1.com,example2.com,example3.com,github.com/sonarwhal/" \ {} +; exitCode=$? node .travis/report-broken-links.js exit $exitCode
#!/bin/bash cd "$(dirname "${BASH_SOURCE[0]}")/.." \ || exit 1 # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ./.travis/is-master.sh \ || exit 0 # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - gem install awesome_bot \ && find . -name "*.md" \ -not -path "./node_modules/*" \ -not -path "./dist/*" \ -not -path "./coverage/*" \ -exec awesome_bot \ --allow-dupe \ --allow-redirect \ --set-timeout 150 \ --white-list "example1.com,example2.com,example3.com,github.com/sonarwhal/" \ {} +; node .travis/report-broken-links.js
Fix error on CentOS 7
yum -y erase gtk2 libX11 hicolor-icon-theme freetype bitstream-vera-fonts yum -y clean all # Remove traces of mac address from network configuration sed -i /HWADDR/d /etc/sysconfig/network-scripts/ifcfg-eth0 rm -f /etc/udev/rules.d/70-persistent-net.rules
yum -y erase gtk2 libX11 hicolor-icon-theme freetype bitstream-vera-fonts yum -y clean all # Remove traces of mac address from network configuration sed -i /HWADDR/d /etc/sysconfig/network-scripts/ifcfg-eth0 rm -f /etc/udev/rules.d/70-persistent-net.rules # Since there is no MAC address anymore, we need to identify the card echo 'DEVICE="eth0"' >> /etc/sysconfig/network-scripts/ifcfg-eth0
Fix Powerline font installer script
#!/bin/sh if [[ -f $HOME/.env/powerline ]]; then echo "Looks like Powerline fonts have already been installed. Bye." exit 0 fi echo "Installing Powerline fonts..." if [[ ! -d $HOME/fonts ]]; then git clone https://github.com/powerline/fonts.git --depth=1 # Run the installer: cd fonts; ./install.sh # Touch a file to say we are done: touch .env/powerline else echo "Powerline fonts are already downloaded. Did you run the installer? fi exit 0
#!/bin/sh if [[ -f $HOME/.env/powerline ]]; then echo "Looks like Powerline fonts have already been installed. Bye." exit 0 fi echo "Installing Powerline fonts..." if [[ ! -d $HOME/fonts ]]; then cd $HOME git clone https://github.com/powerline/fonts.git --depth=1 # Run the installer: cd fonts; ./install.sh; cd .. # Clean up: rm -rf $HOME/fonts # Touch a file to say we are done: touch .env/powerline else echo "Powerline fonts are already downloaded. Did you run the installer?" fi exit 0
Make sure errors get propogated.
#! /bin/sh # # Drive HTML generation for a Python manual. # # This is probably *not* useful outside of the standard Python documentation. # # The first arg is required and is the designation for which manual to build; # api, ext, lib, ref, or tut. All other args are passed on to latex2html. WORKDIR=`pwd` cd `dirname $0`/.. srcdir=`pwd` cd $WORKDIR part=$1; shift 1 TEXINPUTS=$srcdir/$part:$TEXINPUTS export TEXINPUTS if [ -d $part ] ; then rm -f $part/*.html fi echo "latex2html -init_file $srcdir/perl/l2hinit.perl -dir $part" \ "${1:+$@} $srcdir/$part/$part.tex" latex2html \ -init_file $srcdir/perl/l2hinit.perl \ -address '<hr>Send comments to <a href="mailto:python-docs@python.org">python-docs@python.org</a>.' \ -dir $part \ ${1:+$@} \ $srcdir/$part/$part.tex echo '(cd '$part'; '$srcdir'/tools/node2label.pl *.html)' cd $part $srcdir/tools/node2label.pl *.html
#! /bin/sh # # Drive HTML generation for a Python manual. # # This is probably *not* useful outside of the standard Python documentation, # but suggestions are welcome and should be sent to <python-docs@python.org>. # # The first arg is required and is the designation for which manual to build; # api, ext, lib, ref, or tut. All other args are passed on to latex2html. WORKDIR=`pwd` cd `dirname $0`/.. srcdir=`pwd` cd $WORKDIR part=$1; shift 1 TEXINPUTS=$srcdir/$part:$TEXINPUTS export TEXINPUTS if [ -d $part ] ; then rm -f $part/*.html fi echo "latex2html -init_file $srcdir/perl/l2hinit.perl -dir $part" \ "${1:+$@} $srcdir/$part/$part.tex" latex2html \ -init_file $srcdir/perl/l2hinit.perl \ -address '<hr>Send comments to <a href="mailto:python-docs@python.org">python-docs@python.org</a>.' \ -dir $part \ ${1:+$@} \ $srcdir/$part/$part.tex || exit $? echo "(cd $part; $srcdir/tools/node2label.pl \*.html)" cd $part $srcdir/tools/node2label.pl *.html || exit $?
Use usleep for waiting 10sec
#!/bin/bash sleep --help sleep 10s if curl http://localhost:3000 | grep -q '<app-root></app-root>'; then echo -e "\e[32mSmoke test passed!\e[0m" exit 0 else echo -e "\e[31mSmoke test failed!\e[0m" exit 1 fi
#!/bin/bash usleep 10000 if curl http://localhost:3000 | grep -q '<app-root></app-root>'; then echo -e "\e[32mSmoke test passed!\e[0m" exit 0 else echo -e "\e[31mSmoke test failed!\e[0m" exit 1 fi
Allow passing arguments to tox.
#!/bin/sh THIS_PATH=$0 if [ `expr $0 : '\/'` = 0 ]; then THIS_PATH="`pwd`/$THIS_PATH"; fi THIS_DIR="`dirname $THIS_PATH`" cd $THIS_DIR PYTHONPATH=$THIS_DIR export PYTHONPATH tox
#!/bin/sh THIS_PATH=$0 if [ `expr $0 : '\/'` = 0 ]; then THIS_PATH="`pwd`/$THIS_PATH"; fi THIS_DIR="`dirname $THIS_PATH`" cd $THIS_DIR PYTHONPATH=$THIS_DIR export PYTHONPATH exec tox ${1+"${@}"}
Install pip in galaxy virtualenv environment
cd /usr/local/galaxy/work/galaxy_in_docker_custom_bit_wf/setup_scripts python bit-workflow_install_docker.py
cd /usr/local/galaxy/work/galaxy_in_docker_custom_bit_wf/setup_scripts source /usr/local/galaxy/galaxy-dist/.venv/bin/activate; pip install python-dateutil bioblend pandas grequests GitPython pip-tools python bit-workflow_install_docker.py
Use build instead of install
#/bin/bash # Set the GOPATH if it is not set or if is different when running as another user (sudo) # export GOPATH="" # Linux export GOOS="linux" export GOARCH="amd64" echo "Compiling... (OS: $GOOS, ARCH: $GOARCH)" go install export GOARCH="386" echo "Compiling... (OS: $GOOS, ARCH: $GOARCH)" go install # Windows export GOOS="windows" export GOARCH="amd64" echo "Compiling... (OS: $GOOS, ARCH: $GOARCH)" go install export GOARCH="386" echo "Compiling... (OS: $GOOS, ARCH: $GOARCH)" go install # Darwin export GOOS="darwin" export GOARCH="amd64" echo "Compiling... (OS: $GOOS, ARCH: $GOARCH)" go install export GOARCH="386" echo "Compiling... (OS: $GOOS, ARCH: $GOARCH)" go install
#/bin/bash # Set the GOPATH if it is not set or if is different when running as another user (sudo) # export GOPATH="" PATH_SCRIPT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" PATH_BUILD="$GOPATH/bin/dbf_release" mkdir -p $PATH_BUILD # Linux export GOOS="linux" export GOARCH="amd64" echo "Compiling... (OS: $GOOS, ARCH: $GOARCH)" go build -o "$PATH_BUILD/linux_64" export GOARCH="386" echo "Compiling... (OS: $GOOS, ARCH: $GOARCH)" go build -o "$PATH_BUILD/linux_32" # Windows export GOOS="windows" export GOARCH="amd64" echo "Compiling... (OS: $GOOS, ARCH: $GOARCH)" go build -o "$PATH_BUILD/win_64" export GOARCH="386" echo "Compiling... (OS: $GOOS, ARCH: $GOARCH)" go build -o "$PATH_BUILD/win_32" # Darwin export GOOS="darwin" export GOARCH="amd64" echo "Compiling... (OS: $GOOS, ARCH: $GOARCH)" go build -o "$PATH_BUILD/mac_64" export GOARCH="386" echo "Compiling... (OS: $GOOS, ARCH: $GOARCH)" go build -o "$PATH_BUILD/mac_32"
Add PYTHONPATH to travis test script
#!/usr/bin/env bash set -eux python_version=$(python --version 2>&1 | cut -d ' ' -f2) db_file=geoip_${python_version}.mmdb python maxmindupdater/__main__.py $db_file $LICENSE_KEY GeoIP2-Country # Ensure all python versions produce the same result (and that they actually produced a result) for other_db_file in geoip_*.mmdb ; do cmp $db_file $other_db_file done
#!/usr/bin/env bash set -eux python_version=$(python --version 2>&1 | cut -d ' ' -f2) db_file=geoip_${python_version}.mmdb PYTHONPATH=`pwd` python maxmindupdater/__main__.py $db_file $LICENSE_KEY GeoIP2-Country # Ensure all python versions produce the same result (and that they actually produced a result) for other_db_file in geoip_*.mmdb ; do cmp $db_file $other_db_file done
Hide Jenkins's 8080 port. Expose Jenkins's 50000 port.
#!/bin/bash set -e JENKINS_NAME=${JENKINS_NAME:-jenkins-master} GERRIT_NAME=${GERRIT_NAME:-gerrit} JENKINS_IMAGE_NAME=${JENKINS_IMAGE_NAME:-openfrontier/jenkins} LOCAL_VOLUME=~/jenkins_volume${SUFFIX} JENKINS_OPTS=${JENKINS_OPTS:---prefix=/jenkins} mkdir -p "${LOCAL_VOLUME}" docker run --name ${JENKINS_NAME} --link ${GERRIT_NAME}:gerrit -p 8088:8080 -v ${LOCAL_VOLUME}:/var/jenkins_home -d ${JENKINS_IMAGE_NAME} ${JENKINS_OPTS}
#!/bin/bash set -e JENKINS_NAME=${JENKINS_NAME:-jenkins-master} GERRIT_NAME=${GERRIT_NAME:-gerrit} JENKINS_IMAGE_NAME=${JENKINS_IMAGE_NAME:-openfrontier/jenkins} LOCAL_VOLUME=~/jenkins_volume${SUFFIX} JENKINS_OPTS=${JENKINS_OPTS:---prefix=/jenkins} mkdir -p "${LOCAL_VOLUME}" docker run --name ${JENKINS_NAME} --link ${GERRIT_NAME}:gerrit -p 50000:50000 -v ${LOCAL_VOLUME}:/var/jenkins_home -d ${JENKINS_IMAGE_NAME} ${JENKINS_OPTS}
Use minimal config for generating the help file
#!/bin/bash # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. sledge=$PWD/cli/sledge_cli.exe line=$(\ $sledge help -recursive -expand \ | grep -n "== subcommands ===" \ | cut -d : -f1,1) line=$(($line+1)) $sledge help -recursive | sed -e "s/sledge_cli.exe/sledge/g" $sledge h -r -e \ | tail -n +$line \ | sed -e "/^$/d;s/ \(.*\) .*/\1/g" \ | while read cmd; do \ printf "\n====== sledge $cmd ======\n\n"; \ $sledge $cmd -help | sed -e "s/sledge_cli.exe/sledge/g"; \ done
#!/bin/bash # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. sledge=$PWD/cli/sledge_cli.exe export SLEDGE_CONFIG=$PWD/../../test/config line=$(\ $sledge help -recursive -expand \ | grep -n "== subcommands ===" \ | cut -d : -f1,1) line=$(($line+1)) $sledge help -recursive | sed -e "s/sledge_cli.exe/sledge/g" $sledge h -r -e \ | tail -n +$line \ | sed -e "/^$/d;s/ \(.*\) .*/\1/g" \ | while read cmd; do \ printf "\n====== sledge $cmd ======\n\n"; \ $sledge $cmd -help | sed -e "s/sledge_cli.exe/sledge/g"; \ done
Update PHP version and modules to install
#!/usr/bin/env bash function setup_test_requirements() { readonly LOG_FILE="/opt/easyengine/logs/install.log" # Adding software-properties-common for add-apt-repository. apt-get install -y software-properties-common # Adding ondrej/php repository for installing php, this works for all ubuntu flavours. add-apt-repository -y ppa:ondrej/php apt-get update # Installing php-cli, which is the minimum requirement to run EasyEngine apt-get -y install php7.2-cli php_modules=( pcntl curl sqlite3 ) if command -v php > /dev/null 2>&1; then # Reading the php version. default_php_version="$(readlink -f /usr/bin/php | gawk -F "php" '{ print $2}')" for module in "${php_modules[@]}"; do if ! php -m | grep $module >> $LOG_FILE 2>&1; then echo "$module not installed. Installing..." apt install -y php$default_php_version-$module else echo "$module is already installed" fi done fi } setup_test_requirements
#!/usr/bin/env bash function setup_test_requirements() { readonly LOG_FILE="/opt/easyengine/logs/install.log" # Adding software-properties-common for add-apt-repository. apt-get install -y software-properties-common # Adding ondrej/php repository for installing php, this works for all ubuntu flavours. add-apt-repository -y ppa:ondrej/php apt-get update # Installing php-cli, which is the minimum requirement to run EasyEngine apt-get -y install php7.3-cli php_modules=( pcntl curl sqlite3 zip ) if command -v php > /dev/null 2>&1; then # Reading the php version. default_php_version="$(readlink -f /usr/bin/php | gawk -F "php" '{ print $2}')" for module in "${php_modules[@]}"; do if ! php -m | grep $module >> $LOG_FILE 2>&1; then echo "$module not installed. Installing..." apt install -y php$default_php_version-$module else echo "$module is already installed" fi done fi } setup_test_requirements
Exit 1 not Exit 0
#!/bin/bash # These tests will bring up a single-node Kubernetes cluster and test against examples # WARNING: This will actively create Docker containers on your machine as well as remove them # do not run this on a production cluster / machine. # Check requirements! if ! hash go 2>/dev/null; then echo "ERROR: go required" exit 0 fi if ! hash docker 2>/dev/null; then echo "ERROR: docker required" exit 0 fi if ! hash kubectl 2>/dev/null; then echo "ERROR: kubectl required" exit 0 fi # First off, we have to compile the latest binary make bin ##################### # KUBERNETES TESTS ## ##################### # Now we can start our Kubernetes cluster! ./script/test_ci/kubernetes.sh start # And we're off! Let's test those example files ./script/test_ci/kubernetes.sh test # Stop our Kubernetes cluster ./script/test_ci/kubernetes.sh stop
#!/bin/bash # These tests will bring up a single-node Kubernetes cluster and test against examples # WARNING: This will actively create Docker containers on your machine as well as remove them # do not run this on a production cluster / machine. # Check requirements! if ! hash go 2>/dev/null; then echo "ERROR: go required" exit 1 fi if ! hash docker 2>/dev/null; then echo "ERROR: docker required" exit 1 fi if ! hash kubectl 2>/dev/null; then echo "ERROR: kubectl required" exit 1 fi # First off, we have to compile the latest binary make bin ##################### # KUBERNETES TESTS ## ##################### # Now we can start our Kubernetes cluster! ./script/test_ci/kubernetes.sh start # And we're off! Let's test those example files ./script/test_ci/kubernetes.sh test # Stop our Kubernetes cluster ./script/test_ci/kubernetes.sh stop
Write keys from env to .s3cfg.
#!/bin/sh cd `dirname $0` node update.js node liquor.js node render.js ./node_modules/s3-cli/cli.js sync -P --region 'us-west-2' --default-mime-type 'text/html' out/ s3://www.belltowncrime.com/ ./node_modules/s3-cli/cli.js put -P --region 'us-west-2' --default-mime-type 'text/css' main.css s3://www.belltowncrime.com/main.css
#!/bin/sh cd `dirname $0` node update.js node liquor.js node render.js cat > ~/.s3cfg << EOF [default] access_key = $AWS_KEY_ID secret_key = $AWS_SECRET_ACCESS_KEY EOF ./node_modules/s3-cli/cli.js sync -P --region 'us-west-2' --default-mime-type 'text/html' out/ s3://www.belltowncrime.com/ ./node_modules/s3-cli/cli.js put -P --region 'us-west-2' --default-mime-type 'text/css' main.css s3://www.belltowncrime.com/main.css
Add chown to make sure permissions are ok
#!/bin/bash sed -i \ -e 's/ErrorLog.*/ErrorLog \/dev\/stderr/' \ -e 's/CustomLog.*/CustomLog \/dev\/stdout combined/' \ /etc/apache2/sites-available/000-default.conf sed -i \ -e 's/AllowOverride None/AllowOverride All/' \ /etc/apache2/apache2.conf sed -i \ -e 's/log_errors = On/log_errors = Off/' \ -e 's/display_errors = Off/display_errors = On/' \ -e 's/display_startup_errors = Off/display_startup_errors = On/' \ /etc/php5/apache2/php.ini # enable mcrypt ln -fs ../../mods-available/mcrypt.ini /etc/php5/apache2/conf.d/20-mcrypt.ini ln -fs ../../mods-available/mcrypt.ini /etc/php5/cli/conf.d/20-mcrypt.ini source /etc/apache2/envvars set -m trap 'kill $(jobs -p) && wait' EXIT /usr/sbin/apache2 -DFOREGROUND & wait
#!/bin/bash sed -i \ -e 's/ErrorLog.*/ErrorLog \/dev\/stderr/' \ -e 's/CustomLog.*/CustomLog \/dev\/stdout combined/' \ /etc/apache2/sites-available/000-default.conf sed -i \ -e 's/AllowOverride None/AllowOverride All/' \ /etc/apache2/apache2.conf sed -i \ -e 's/log_errors = On/log_errors = Off/' \ -e 's/display_errors = Off/display_errors = On/' \ -e 's/display_startup_errors = Off/display_startup_errors = On/' \ /etc/php5/apache2/php.ini # enable mcrypt ln -fs ../../mods-available/mcrypt.ini /etc/php5/apache2/conf.d/20-mcrypt.ini ln -fs ../../mods-available/mcrypt.ini /etc/php5/cli/conf.d/20-mcrypt.ini # make sure that file permissions are ok chown www-data:www-data -R /var/www/* source /etc/apache2/envvars set -m trap 'kill $(jobs -p) && wait' EXIT /usr/sbin/apache2 -DFOREGROUND & wait
Make container exit after generating new configuration, as noted in README
#! /bin/sh rm -rf /var/run/* rm -f "/config/Library/Application Support/Plex Media Server/plexmediaserver.pid" mkdir -p /var/run/dbus chown messagebus:messagebus /var/run/dbus dbus-uuidgen --ensure dbus-daemon --system --fork sleep 1 avahi-daemon -D sleep 1 HOME=/config start_pms & sleep 5 tail -f /config/Library/Application\ Support/Plex\ Media\ Server/Logs/**/*.log
#! /bin/sh rm -rf /var/run/* rm -f "/config/Library/Application Support/Plex Media Server/plexmediaserver.pid" mkdir -p /var/run/dbus chown messagebus:messagebus /var/run/dbus dbus-uuidgen --ensure dbus-daemon --system --fork sleep 1 avahi-daemon -D sleep 1 if [ -f /config/Library/Application\ Support/Plex\ Media\ Server/Preferences.xml ]; then echo "$(date -R) Starting Plex" HOME=/config start_pms tail -F /config/Library/Application\ Support/Plex\ Media\ Server/Logs/**/*.log else echo "$(date -R) Starting Plex and generating new configuration" HOME=/config start_pms & while [ ! -f /config/Library/Application\ Support/Plex\ Media\ Server/Preferences.xml ]; do sleep 1 echo "$(date -R) waiting..." done echo "$(date -R) Configuration generated. Please edit" echo "$(date -R) Library/Application Support/Plex Media Server/Preferences.xml" echo "$(date -R) and add allowedNetworks parameter, then restart the container." exit 0 fi
Remove extra empty line after "Build YouCompleteMe"
#!/bin/bash DOTVIM="${DOTFILES_CURRENT_SOURCE_DIR}/.vim" BUNDLE="${DOTVIM}/bundle" # Create .vim folder if needed [ -d "$DOTVIM" ] && mkdir -p $DOTVIM # Install vim vundle dotfiles_install_remote_component GITHUB VundleVim/Vundle.vim ".vim/bundle/Vundle.vim" # Install vim plugins vim +PluginInstall +qall # Install config dotfiles_install_component .vim $HOME/.vim dotfiles_install_component .vimrc $HOME/.vimrc # Build YouCompleteMe if [[ -z $(find "${BUNDLE}/YouCompleteMe/third_party/ycmd" -name "libclang.*") ]]; then print_info COMPONENT "Building YouCompleteMe.vim" python2 $BUNDLE/YouCompleteMe/install.py --clang-completer else print_info COMPONENT "YouCompleteMe.vim already built" fi
#!/bin/bash DOTVIM="${DOTFILES_CURRENT_SOURCE_DIR}/.vim" BUNDLE="${DOTVIM}/bundle" # Create .vim folder if needed [ -d "$DOTVIM" ] && mkdir -p $DOTVIM # Install vim vundle dotfiles_install_remote_component GITHUB VundleVim/Vundle.vim ".vim/bundle/Vundle.vim" # Install vim plugins vim +PluginInstall +qall # Install config dotfiles_install_component .vim $HOME/.vim dotfiles_install_component .vimrc $HOME/.vimrc # Build YouCompleteMe if [[ -z $(find "${BUNDLE}/YouCompleteMe/third_party/ycmd" -name "libclang.*") ]]; then print_info COMPONENT "Building YouCompleteMe.vim" python2 $BUNDLE/YouCompleteMe/install.py --clang-completer else print_info COMPONENT "YouCompleteMe.vim already built" fi
Add alias to install common node globals
alias npr="npm -s run" alias ntr="npm -s run test --" alias ntw="npm -s run test -- --watch" alias lnm="find . -name "node_modules" -type d -prune" alias nnm="find . -name "node_modules" -type d -prune -exec rm -rf '{}' +" alias nnmi="nnm && npm i" alias npv="node -p \"require('./package.json').version\"" alias babel-nodemon="nodemon --exec babel-node -- "
alias npr="npm -s run" alias ntr="npm -s run test --" alias ntw="npm -s run test -- --watch" alias lnm="find . -name "node_modules" -type d -prune" alias nnm="find . -name "node_modules" -type d -prune -exec rm -rf '{}' +" alias nnmi="nnm && npm i" alias npv="node -p \"require('./package.json').version\"" alias babel-nodemon="nodemon --exec babel-node -- " alias npmiglobals='npm i -g nodemon bunyan jira-cl git-branch-select git-commits-since'
Add missing `usr` to CDT path
#!/bin/bash mkdir -p ${PREFIX}/x86_64-conda_cos6-linux-gnu/sysroot pushd ${PREFIX}/x86_64-conda_cos6-linux-gnu/sysroot > /dev/null 2>&1 cp -Rf "${SRC_DIR}"/binary/* .
#!/bin/bash mkdir -p ${PREFIX}/x86_64-conda_cos6-linux-gnu/sysroot/usr pushd ${PREFIX}/x86_64-conda_cos6-linux-gnu/sysroot/usr > /dev/null 2>&1 cp -Rf "${SRC_DIR}"/binary/* .
Reduce memory limit to 50 percent
#!/bin/bash /opt/envsubst < /envsubst_template.json > /conf/runtime.json exec java -XX:+UseContainerSupport -XX:MaxRAMPercentage=75.0 -jar /git-bridge.jar /conf/runtime.json
#!/bin/bash /opt/envsubst < /envsubst_template.json > /conf/runtime.json exec java -XX:+UseContainerSupport -XX:MaxRAMPercentage=50.0 -jar /git-bridge.jar /conf/runtime.json
Include a column for Raspberry Pi revision
#!/usr/bin/env bash set -o errexit set -o pipefail set -o nounset script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" . $script_dir/config.sh mkdir -p $output_dir echo Sending output to $output_file # Write headers to output file, if it does not yet exist if [[ ! -f $output_file ]] ; then echo Creating $output_file $script_dir/speedtest-cli-extras/bin/speedtest-csv --header > $output_file fi # TODO Read https://www.raspberrypi.org/learning/networking-lessons/lessons/ # TODO Include a column for hostname - so I can use hostname to indicate location # TODO Include a column for connection type - WiFi or Ethernet # TODO Include a column for Raspberry Pi type # For WiFi: iwgetid --raw ... gives ESSID: # iwconfig ... gives more info # For Ethernet: ifconfig eth0 # ifconfig -s #SEE ALSO # route(8), netstat(8), arp(8), rarp(8), iptables(8), ifup(8), interfaces(5). # http://physics.nist.gov/cuu/Units/binary.html - Prefixes for binary multiples $script_dir/speedtest-cli-extras/bin/speedtest-csv --no-share >> $output_file
#!/usr/bin/env bash set -o errexit set -o pipefail set -o nounset script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" . $script_dir/config.sh mkdir -p $output_dir echo Sending output to $output_file # Write headers to output file, if it does not yet exist if [[ ! -f $output_file ]] ; then echo Creating $output_file (echo -ne "pi_revision\\t"; $script_dir/speedtest-cli-extras/bin/speedtest-csv --header ) > $output_file fi # TODO Read https://www.raspberrypi.org/learning/networking-lessons/lessons/ # TODO Include a column for hostname - so I can use hostname to indicate location # TODO Include a column for connection type - WiFi or Ethernet # For WiFi: iwgetid --raw ... gives ESSID: # iwconfig ... gives more info # For Ethernet: ifconfig eth0 # ifconfig -s #SEE ALSO # route(8), netstat(8), arp(8), rarp(8), iptables(8), ifup(8), interfaces(5). # http://physics.nist.gov/cuu/Units/binary.html - Prefixes for binary multiples # For meaning of pi_revision values, see https://elinux.org/RPi_HardwareHistory pi_revision=`cat /proc/cpuinfo | grep 'Revision' | awk '{print $3}' | sed 's/^1000//'` (echo -ne "$pi_revision\\t"; $script_dir/speedtest-cli-extras/bin/speedtest-csv --no-share ) >> $output_file
Update device ips in tmux-scripts
#!/bin/zsh tmux new-session -s devices -n "devices" -d tmux split-window -h -p 50 tmux send-keys "ping 10.0.0.13" Enter tmux split-window -v -p 80 tmux send-keys "ping 10.0.0.30" Enter tmux split-window -v -p 80 tmux send-keys "ping 10.0.0.31" Enter tmux split-window -v -p 80 tmux send-keys "ping 10.0.0.33" Enter tmux split-window -v -p 70 tmux send-keys "ping 10.0.0.33" Enter tmux split-window -v tmux send-keys "ping 10.0.0.34" Enter tmux select-pane -t 1 tmux split-window -v -p 80 tmux send-keys "ping 10.0.0.35" Enter tmux split-window -v -p 80 tmux send-keys "ping 10.0.0.36" Enter tmux split-window -v -p 80 tmux send-keys "ping 10.0.0.37" Enter tmux split-window -v -p 70 tmux send-keys "ping 10.0.0.38" Enter tmux split-window -v tmux send-keys "ping 10.0.0.39" Enter tmux select-window -t devices:1 tmux a -t devices
#!/bin/zsh tmux new-session -s devices -n "devices" -d tmux split-window -h -p 50 tmux send-keys "ping 10.0.0.40" Enter tmux split-window -v -p 80 tmux send-keys "ping 10.0.0.41" Enter tmux split-window -v -p 80 tmux send-keys "ping 10.0.0.42" Enter tmux split-window -v -p 80 tmux send-keys "ping 10.0.0.43" Enter tmux split-window -v -p 70 tmux send-keys "ping 10.0.0.44" Enter tmux split-window -v tmux send-keys "ping 10.0.0.45" Enter tmux select-pane -t 1 tmux split-window -v -p 80 tmux send-keys "ping 10.0.0.46" Enter tmux split-window -v -p 80 tmux send-keys "ping 10.0.0.47" Enter tmux split-window -v -p 80 tmux send-keys "ping 10.0.0.48" Enter tmux split-window -v -p 70 tmux send-keys "ping 10.0.0.49" Enter tmux select-window -t devices:1 tmux a -t devices
Add "cabal update" to the script
#!/usr/bin/env bash set -e cabal sandbox init git clone https://github.com/elm-lang/elm-compiler.git cd elm-compiler git checkout tags/0.15.1 --quiet cabal sandbox init --sandbox .. cabal install -j cd .. git clone https://github.com/elm-lang/elm-package.git cd elm-package git checkout tags/0.5.1 --quiet cabal sandbox init --sandbox .. cabal install -j cd .. git clone https://github.com/elm-lang/elm-make.git cd elm-make git checkout tags/0.2 --quiet cabal sandbox init --sandbox .. cabal install -j cd .. git clone https://github.com/elm-lang/elm-reactor.git cd elm-reactor git checkout tags/0.3.2 --quiet cabal sandbox init --sandbox .. cabal install -j cd .. git clone https://github.com/elm-lang/elm-repl.git cd elm-repl git checkout tags/0.4.2 --quiet cabal sandbox init --sandbox .. cabal install -j cd ..
#!/usr/bin/env bash set -e cabal update cabal sandbox init git clone https://github.com/elm-lang/elm-compiler.git cd elm-compiler git checkout tags/0.15.1 --quiet cabal sandbox init --sandbox .. cabal install -j cd .. git clone https://github.com/elm-lang/elm-package.git cd elm-package git checkout tags/0.5.1 --quiet cabal sandbox init --sandbox .. cabal install -j cd .. git clone https://github.com/elm-lang/elm-make.git cd elm-make git checkout tags/0.2 --quiet cabal sandbox init --sandbox .. cabal install -j cd .. git clone https://github.com/elm-lang/elm-reactor.git cd elm-reactor git checkout tags/0.3.2 --quiet cabal sandbox init --sandbox .. cabal install -j cd .. git clone https://github.com/elm-lang/elm-repl.git cd elm-repl git checkout tags/0.4.2 --quiet cabal sandbox init --sandbox .. cabal install -j cd ..
Add exit if variables not set
#!/bin/sh EXECUTORS=${EXECUTORS:-1} NAME=${NAME:-client-$(hostname)} mkdir -p /var/jenkins java -jar swarm-client.jar -fsroot /var/jenkins -executors ${EXECUTORS} -labels ${LABELS} -name ${NAME} -master ${MASTER} -username ${USER_NAME} -password ${USER_PASSWORD}
#!/bin/sh if [[ -z "${LABELS}" ]]; then echo "LABELS must be set." exit 1 fi if [[ -z "${MASTER}" ]]; then echo "MASTER must be set." exit 1 fi if [[ -z "${USER_NAME}" ]]; then echo "USER_NAME must be set." exit 1 fi if [[ -z "${USER_PASSWORD}" ]]; then echo "USER_PASSWORD must be set." exit 1 fi EXECUTORS=${EXECUTORS:-1} NAME=${NAME:-client-$(hostname)} mkdir -p /var/jenkins java -jar swarm-client.jar -fsroot /var/jenkins -executors ${EXECUTORS} -labels ${LABELS} -name ${NAME} -master ${MASTER} -username ${USER_NAME} -password ${USER_PASSWORD}
Add TODO note for format
#!/bin/sh -e # Format output of alias print dump into more readable format cat ${HOME}/.zshrc | grep -e '^alias' | awk -F'=' 'BEGIN { print "ALIAS | COMMAND"; print "---------------------------------------"; } { baselen = 10; fieldlen = length($1); tablen = baselen - fieldlen; printf " " $1 for (i = 0; i < tablen; i++) printf " " printf " | " \ "\t" $2 \ "\n" } END { print "---------------------------------------"; }'
#!/bin/sh -e # Format output of alias print dump into more readable format cat ${HOME}/.zshrc | grep -e '^alias' | awk -F'=' 'BEGIN { print "ALIAS | COMMAND"; print "---------------------------------------"; } { # TODO # Specify length for both columns ($1, $2) # baselen = 10; fieldlen = length($1); tablen = baselen - fieldlen; printf " " $1 for (i = 0; i < tablen; i++) printf " " printf " | " \ "\t" $2 \ "\n" } END { print "---------------------------------------"; }'
Fix some corner cases in test script
#!/bin/bash if [ -z "$MAMBO_PATH" ]; then MAMBO_PATH=/opt/ibm/systemsim-p8/ fi if [ -z "$MAMBO_BINARY" ]; then MAMBO_BINARY="/run/pegasus/power8" fi if [ ! -x "$MAMBO_PATH/$MAMBO_BINARY" ]; then echo 'Could not find executable MAMBO_BINARY. Skipping hello_world test'; exit 0; fi if [ ! -x `which expect` ]; then echo 'Could not find expect binary. Skipping hello_world test'; exit 0; fi export SKIBOOT_ZIMAGE=`pwd`/test/hello_world/hello_kernel/hello_kernel # Currently getting some core dumps from mambo, so disable them! OLD_ULIMIT_C=`ulimit -c` ulimit -c 0 ( cd external/mambo; cat <<EOF | expect set timeout 30 spawn $MAMBO_PATH/$MAMBO_BINARY -n -f skiboot.tcl expect { timeout { send_user "\nTimeout waiting for hello world\n"; exit 1 } eof { send_user "\nUnexpected EOF\n;" exit 1 } "Hello World!" } close exit 0 EOF ) ulimit -c $OLD_ULIMIT_C echo exit 0;
#!/bin/bash if [ -z "$MAMBO_PATH" ]; then MAMBO_PATH=/opt/ibm/systemsim-p8/ fi if [ -z "$MAMBO_BINARY" ]; then MAMBO_BINARY="/run/pegasus/power8" fi if [ ! -x "$MAMBO_PATH/$MAMBO_BINARY" ]; then echo 'Could not find executable MAMBO_BINARY. Skipping hello_world test'; exit 0; fi if [ -n "$KERNEL" ]; then echo 'Please rebuild skiboot without KERNEL set. Skipping hello_world test'; exit 0; fi if [ ! `command -v expect` ]; then echo 'Could not find expect binary. Skipping hello_world test'; exit 0; fi export SKIBOOT_ZIMAGE=`pwd`/test/hello_world/hello_kernel/hello_kernel # Currently getting some core dumps from mambo, so disable them! OLD_ULIMIT_C=`ulimit -c` ulimit -c 0 ( cd external/mambo; cat <<EOF | expect set timeout 30 spawn $MAMBO_PATH/$MAMBO_BINARY -n -f skiboot.tcl expect { timeout { send_user "\nTimeout waiting for hello world\n"; exit 1 } eof { send_user "\nUnexpected EOF\n;" exit 1 } "Hello World!" } close exit 0 EOF ) ulimit -c $OLD_ULIMIT_C echo exit 0;
Use `latest` docker images instead of `sdk` tag
#!/bin/bash if [[ $DOCKER_IMAGE == *"i386"* ]] then ENTRYPOINT=linux32 COMMAND=bin/bash else ENTRYPOINT=bin/bash COMMAND= fi if [[ $DOCKER_IMAGE == *"rpm"* ]] then docker run -e INTEGRATIONS_REPO=$INTEGRATIONS_REPO -e INTEGRATION=$INTEGRATION -e VERSION=$VERSION -e BUILD_ITERATION=$BUILD_ITERATION -e RPM_SIGNING_PASSPHRASE=$RPM_SIGNING_PASSPHRASE -e OMNIBUS_BRANCH=$OMNIBUS_BRANCH -e OMNIBUS_RUBY_BRANCH=$OMNIBUS_RUBY_BRANCH -v /home/ubuntu/keys:/keys -v /home/ubuntu/pkg:/dd-agent-omnibus/pkg --entrypoint $ENTRYPOINT $DOCKER_IMAGE:sdk $COMMAND -l /dd-agent-omnibus/build_integration.sh else docker run -e INTEGRATIONS_REPO=$INTEGRATIONS_REPO -e INTEGRATION=$INTEGRATION -e VERSION=$VERSION -e BUILD_ITERATION=$BUILD_ITERATION -e OMNIBUS_BRANCH=$OMNIBUS_BRANCH -e OMNIBUS_RUBY_BRANCH=$OMNIBUS_RUBY_BRANCH -v /home/ubuntu/pkg:/dd-agent-omnibus/pkg --entrypoint $ENTRYPOINT $DOCKER_IMAGE:sdk $COMMAND -l /dd-agent-omnibus/build_integration.sh fi
#!/bin/bash if [[ $DOCKER_IMAGE == *"i386"* ]] then ENTRYPOINT=linux32 COMMAND=bin/bash else ENTRYPOINT=bin/bash COMMAND= fi if [[ $DOCKER_IMAGE == *"rpm"* ]] then docker run -e INTEGRATIONS_REPO=$INTEGRATIONS_REPO -e INTEGRATION=$INTEGRATION -e VERSION=$VERSION -e BUILD_ITERATION=$BUILD_ITERATION -e RPM_SIGNING_PASSPHRASE=$RPM_SIGNING_PASSPHRASE -e OMNIBUS_BRANCH=$OMNIBUS_BRANCH -e OMNIBUS_RUBY_BRANCH=$OMNIBUS_RUBY_BRANCH -v /home/ubuntu/keys:/keys -v /home/ubuntu/pkg:/dd-agent-omnibus/pkg --entrypoint $ENTRYPOINT $DOCKER_IMAGE:latest $COMMAND -l /dd-agent-omnibus/build_integration.sh else docker run -e INTEGRATIONS_REPO=$INTEGRATIONS_REPO -e INTEGRATION=$INTEGRATION -e VERSION=$VERSION -e BUILD_ITERATION=$BUILD_ITERATION -e OMNIBUS_BRANCH=$OMNIBUS_BRANCH -e OMNIBUS_RUBY_BRANCH=$OMNIBUS_RUBY_BRANCH -v /home/ubuntu/pkg:/dd-agent-omnibus/pkg --entrypoint $ENTRYPOINT $DOCKER_IMAGE:latest $COMMAND -l /dd-agent-omnibus/build_integration.sh fi
Add flow to test script
#!/usr/bin/env bash LINT_FAIL= TEST_FAIL= BUILD_FAIL= function command_exists { local result result="$(node -p "'$1' in require('./package.json').scripts")" if [[ "$result" == "true" ]]; then return 0 fi return 1 } function run_script_in_packages { for pkg in packages/*; do if [[ ! -f "$pkg/package.json" ]]; then continue fi pushd "$pkg" > /dev/null local SKIP=false command_exists "$1" || { echo "Package $(basename "$PWD") has no script '$1'. Skipping..." SKIP=true } if [[ "$SKIP" != "true" ]]; then yarn run "$1" || echo "'$1' script failed in '$(basename "$PWD")'" fi popd > /dev/null done } run_script_in_packages build run_script_in_packages lint run_script_in_packages test if [[ ! -z "$TEST_FAIL" ]]; then echo Tests failed. BUILD_FAIL=1 fi if [[ ! -z "$LINT_FAIL" ]]; then echo Lint failed. BUILD_FAIL=1 fi if [[ ! -z "$BUILD_FAIL" ]]; then exit 1 fi
#!/usr/bin/env bash LINT_FAIL= TEST_FAIL= BUILD_FAIL= function command_exists { local result result="$(node -p "'$1' in require('./package.json').scripts")" if [[ "$result" == "true" ]]; then return 0 fi return 1 } function run_script_in_packages { for pkg in packages/*; do if [[ ! -f "$pkg/package.json" ]]; then continue fi pushd "$pkg" > /dev/null local SKIP=false command_exists "$1" || { echo "Package $(basename "$PWD") has no script '$1'. Skipping..." SKIP=true } if [[ "$SKIP" != "true" ]]; then yarn run "$1" || echo "'$1' script failed in '$(basename "$PWD")'" fi popd > /dev/null done } run_script_in_packages build run_script_in_packages lint run_script_in_packages flow run_script_in_packages test if [[ ! -z "$TEST_FAIL" ]]; then echo Tests failed. BUILD_FAIL=1 fi if [[ ! -z "$LINT_FAIL" ]]; then echo Lint failed. BUILD_FAIL=1 fi if [[ ! -z "$FLOW_FAIL" ]]; then echo Type checks failed. BUILD_FAIL=1 fi if [[ ! -z "$BUILD_FAIL" ]]; then exit 1 fi
Use 'mainline' docker for armv6 and the cached package for armv7. Strange thing.
# Args: "$@" == the minimum packages to install, e.g. docker, git os_install(){ # Update the system and use pacman to install all the packages # The two commands may be combined, but I leave it as is for now. os_upgrade # pacman -S $@ --noconfirm --needed # Install this manually, docker v1.7.1 pacman -S bridge-utils iproute2 device-mapper sqlite git curl -sSL https://s3.amazonaws.com/docker-armv7/docker-1:1.7.1-2-armv7h.pkg.tar.xz > /var/cache/pacman/pkg/docker-1:1.7.1-2-armv7h.pkg.tar.xz pacman -U /var/cache/pacman/pkg/docker-1:1.7.1-2-armv7h.pkg.tar.xz # Add more commands here, archlinux specific } os_upgrade(){ pacman -Syu --noconfirm }
# Args: "$@" == the minimum packages to install, e.g. docker, git os_install(){ # Update the system and use pacman to install all the packages # The two commands may be combined, but I leave it as is for now. os_upgrade # pacman -S $@ --noconfirm --needed if [[ $MACHINE == "rpi" ]]; then pacman -S $@ --noconfirm --needed else # for armv7 # Install this manually, docker v1.7.1 pacman -S bridge-utils iproute2 device-mapper sqlite git --noconfirm --needed curl -sSL https://s3.amazonaws.com/docker-armv7/docker-1:1.7.1-2-armv7h.pkg.tar.xz > /var/cache/pacman/pkg/docker-1:1.7.1-2-armv7h.pkg.tar.xz pacman -U /var/cache/pacman/pkg/docker-1:1.7.1-2-armv7h.pkg.tar.xz --noconfirm fi # Add more commands here, archlinux specific } os_upgrade(){ pacman -Syu --noconfirm }
Use Service Account Key during in destroy_bosh script
#!/bin/sh -e [ -z "$DEBUG" ] || set -x . "$(dirname "$0")/lib/environment.sh" set -x export BOSH_LOG_LEVEL=debug export BOSH_LOG_PATH="${KUBO_DEPLOYMENT_DIR}/bosh.log" bosh-cli int kubo-lock/metadata --path=/gcp_service_account > "$PWD/key.json" cp "kubo-lock/metadata" "${KUBO_ENVIRONMENT_DIR}/director.yml" cp "$PWD/s3-bosh-creds/creds.yml" "${KUBO_ENVIRONMENT_DIR}" cp "$PWD/s3-bosh-state/state.json" "${KUBO_ENVIRONMENT_DIR}" "${KUBO_DEPLOYMENT_DIR}/bin/destroy_bosh" "${KUBO_ENVIRONMENT_DIR}"
#!/bin/sh -e [ -z "$DEBUG" ] || set -x . "$(dirname "$0")/lib/environment.sh" set -x export BOSH_LOG_LEVEL=debug export BOSH_LOG_PATH="${KUBO_DEPLOYMENT_DIR}/bosh.log" bosh-cli int kubo-lock/metadata --path=/gcp_service_account > "$PWD/key.json" cp "kubo-lock/metadata" "${KUBO_ENVIRONMENT_DIR}/director.yml" cp "$PWD/s3-bosh-creds/creds.yml" "${KUBO_ENVIRONMENT_DIR}" cp "$PWD/s3-bosh-state/state.json" "${KUBO_ENVIRONMENT_DIR}" "${KUBO_DEPLOYMENT_DIR}/bin/destroy_bosh" "${KUBO_ENVIRONMENT_DIR}" "$PWD/key.json"
Make the build fail if a sub-component fails to build
#!/usr/bin/env bash declare -a repositories=("zipkin4net" "zipkin4net-aspnetcore") for i in "${repositories[@]}"; do if [ -d $i ]; then pushd $i ./buildAndTest.sh popd else echo "Can't build $i" exit 4 fi done
#!/usr/bin/env bash declare -a repositories=("zipkin4net" "zipkin4net-aspnetcore") for i in "${repositories[@]}"; do if [ -d $i ]; then pushd $i ./buildAndTest.sh if [ $? -ne 0 ]; then echo "Compilation of $i failed, exiting" exit 1 fi popd else echo "Can't build $i" exit 4 fi done
Change order of application of cumulus repository
echo "yes" | sudo add-apt-repository ppa:avsm/ocaml41+opam12 sudo apt-get update -qq sudo apt-get install -qq ocaml ocaml-native-compilers camlp4-extra opam sudo apt-get install libpcre3-dev libssl-dev export OPAMYES=1 opam init eval `opam config env` case $OCSIGENSERVER3 in true) opam repository add cumulus https://github.com/Cumulus/opam-cumulus.git;; false) opam repository add cumulus https://github.com/Cumulus/opam-cumulus.git git clone -b 3.0.0-cohttp https://github.com/ocsigen/ocsigenserver.git cd ocsigenserver opam repository add 3.0.0-cohttp ./opam-3.0.0-cohttp/ opam update opam pin add --no-action ocsigenserver . cd .. ;; *) echo Unknown variable '$OCSIGENSERVER3': $OCSIGENSERVER3 esac opam install cumulus-deps make
echo "yes" | sudo add-apt-repository ppa:avsm/ocaml41+opam12 sudo apt-get update -qq sudo apt-get install -qq ocaml ocaml-native-compilers camlp4-extra opam sudo apt-get install libpcre3-dev libssl-dev export OPAMYES=1 opam init eval `opam config env` case $OCSIGENSERVER3 in true) opam repository add cumulus https://github.com/Cumulus/opam-cumulus.git;; false) git clone -b 3.0.0-cohttp https://github.com/ocsigen/ocsigenserver.git cd ocsigenserver opam repository add 3.0.0-cohttp ./opam-3.0.0-cohttp/ opam update opam pin add --no-action ocsigenserver . opam repository add cumulus https://github.com/Cumulus/opam-cumulus.git cd .. ;; *) echo Unknown variable '$OCSIGENSERVER3': $OCSIGENSERVER3 esac opam install cumulus-deps make
Remove some compile options for PPC
#!/bin/sh basepath="$(cd "$(dirname "$0")" && pwd)" builddir="$basepath/../build" mkdir -p "$builddir" cd "$builddir" cmake .. -G "Unix Makefiles" -DCMAKE_CXX_FLAGS:STRING="-march=native -m64 -Ofast -flto" "$@"
#!/bin/sh basepath="$(cd "$(dirname "$0")" && pwd)" builddir="$basepath/../build" mkdir -p "$builddir" cd "$builddir" cmake .. -G "Unix Makefiles" -DCMAKE_CXX_FLAGS:STRING="-Ofast" "$@"
Add location to script call
#!/bin/sh # Only forward if the pause file doesn't exist if [ ! -f ./pause ]; then python forwardNotif.py fi
#!/bin/bash # Only forward if the pause file doesn't exist if [ ! -f ${BASH_SOURCE[0]%/*}/pause ]; then python ${BASH_SOURCE[0]%/*}/forwardNotif.py fi
Add iptables rules, ntpdate setup
set -ex apt-get install ruby rubygems || yum install -y ruby rubygems || pkgin install ruby rubygems || false gem update --system gem install facter gem install puppet mkdir -p /etc/puppet /var/lib /var/log /var/run cat >/etc/puppet/puppet.conf <<EOF [main] logdir = /var/log/puppet rundir = /var/run/puppet ssldir = \$vardir/ssl vardir = /var/lib/puppet pluginsync = true server = "puppet-master-centos.atomicobject.localnet" environment = "production" EOF echo "You should ensure that the hostname is properly set, in both /etc/hosts and in your network config." echo "After this, run: puppet master --mkuser"
set -ex # Get ruby, rubygems, and npdtae apt-get install ruby rubygems ntpdate || yum install -y ruby rubygems nptdate || false # Run ntpdate to ensure the system date is correct ntpdate pool.ntpd.org # Update rubygems, and pull down facter and then puppet gem update --system gem install facter gem install puppet mkdir -p /etc/puppet /var/lib /var/log /var/run cat >/etc/puppet/puppet.conf <<EOF [main] logdir = /var/log/puppet rundir = /var/run/puppet ssldir = \$vardir/ssl vardir = /var/lib/puppet pluginsync = true EOF # Add firewall rule to allow puppet agent connections iptables -I INPUT 1 -p tcp --dport 8140 -j ACCEPT # Save firewall configure OS=`facter osfamily` OS=`echo $OS | tr [:upper:] [:lower:]` case $OS in "redhat") echo "Detected Redhat Family" iptables-save > /etc/sysconfig/iptables ;; "debian") echo "Detected Debian Family" iptables-save > /etc/iptables.rules ;; *) echo "Uknown OS" ;; esac echo "You should ensure that your intended hostname is properly set, in both /etc/hosts and in your network config." echo "Currently, the hostname is: $(facter fqdn)" echo "After this, run: puppet master --mkuser"
Fix wd terminal prompt getting bash mixed up when doing: Up, Ctrl + A
#!/bin/bash wd() { ./auto/with-deploytools bash -c "echo -e \"$( (cat ~/.dotfiles/kubernetes/aliases.zsh; printf 'PS1="\\n\\e[36;1m$PS1\\e[0m"') | base64)\" | base64 -d >> ~/.bashrc && bash" }
#!/bin/bash wd() { ./auto/with-deploytools bash -c "echo -e \"$( (cat ~/.dotfiles/kubernetes/aliases.zsh; printf 'PS1="\\[\\n\\e[36;1m\\]$PS1\\[\\e[0m\\]"') | base64)\" | base64 -d >> ~/.bashrc && bash" }
Correct path to yml settings
#!/bin/sh java -jar target/dropwizard-example-1.0.0-SNAPSHOT.jar db migrate example.yml & sleep 5 java -jar target/dropwizard-example-1.0.0-SNAPSHOT.jar server example.yml wait
#!/bin/sh java -jar /target/dropwizard-example-1.0.0-SNAPSHOT.jar db migrate /target/example.yml & sleep 5 java -jar /target/dropwizard-example-1.0.0-SNAPSHOT.jar server /target/example.yml wait
Make the installation of linters verbose
#!/usr/bin/env bash set -o errexit set -o nounset set -o pipefail if [ ! $(command -v gometalinter) ] then go get github.com/alecthomas/gometalinter gometalinter --install --vendor fi gometalinter \ --exclude='error return value not checked.*(Close|Log|Print).*\(errcheck\)$' \ --exclude='.*_test\.go:.*error return value not checked.*\(errcheck\)$' \ --exclude='duplicate of.*_test.go.*\(dupl\)$' \ --disable=aligncheck \ --disable=gotype \ --disable=gas \ --disable=vetshadow \ --cyclo-over=15 \ --tests \ --deadline=600s \ --vendor \ ./...
#!/usr/bin/env bash set -o errexit set -o nounset set -o pipefail if [ ! $(command -v gometalinter) ] then go get github.com/alecthomas/gometalinter gometalinter --install --vendor --debug fi gometalinter \ --exclude='error return value not checked.*(Close|Log|Print).*\(errcheck\)$' \ --exclude='.*_test\.go:.*error return value not checked.*\(errcheck\)$' \ --exclude='duplicate of.*_test.go.*\(dupl\)$' \ --disable=aligncheck \ --disable=gotype \ --disable=gas \ --disable=vetshadow \ --cyclo-over=15 \ --tests \ --deadline=600s \ --vendor \ ./...
Exit 1 if PID/process isn't found
# ------------------------------------------------------------------------------ # ~zozo/.config/bash/functions/pid.bash # ------------------------------------------------------------------------------ case $OSTYPE in cygwin) flags_pid='-asW' ;; *) flags_pid='-cx -o pid,command' ;; esac pidof() { # return the PID of process $1 declare proc="$1" command ps $flags_pid | egrep -io "\<[[:digit:]]+ .*\<$proc[^\\]*(\.exe)?$" | cut -d" " -f1 } pidis() { # return the process with PID $1 declare pid="$1" command ps ${flags_pid/pid,/} -p $pid | tail -n+2 }
# ------------------------------------------------------------------------------ # ~zozo/.config/bash/functions/pid.bash # ------------------------------------------------------------------------------ case $OSTYPE in cygwin) flags_pid='-asW' ;; *) flags_pid='-cx -o pid,command' ;; esac pidof() { # return the PID of process $1 or exit false declare proc="$1" pid pid=$(command ps $flags_pid | egrep -io "\<[[:digit:]]+ .*\<$proc[^\\]*(\.exe)?$" | cut -d" " -f1) [[ $pid ]] && echo "$pid" } pidis() { # return the process with PID $1 or exit false declare pid="$1" proc proc=$(command ps ${flags_pid/pid,/} -p $pid | tail -n+2) [[ $proc ]] && echo "$proc" }
Implement test for not detecting play2.2
#!/bin/sh . ${BUILDPACK_TEST_RUNNER_HOME}/lib/test_utils.sh testDetectWithConfFileDetectsPlayApp() { mkdir -p ${BUILD_DIR}/play-app/conf touch ${BUILD_DIR}/play-app/conf/application.conf detect assertAppDetected "Play!" } testDetectWithConfFileDetectsPlayApp() { mkdir -p ${BUILD_DIR}/play-app/conf/application.conf detect assertNoAppDetected } testNoConfFileDoesNotDetectPlayApp() { mkdir -p ${BUILD_DIR}/play-app/conf detect assertNoAppDetected } testConfFileWithModulesDirectoryDoesNotDetectPlayApp() { mkdir -p ${BUILD_DIR}/play-app/modules/conf touch ${BUILD_DIR}/play-app/modules/conf/application.conf detect assertNoAppDetected } testPlay20NotDetected() { mkdir ${BUILD_DIR}/project touch ${BUILD_DIR}/project/Build.scala mkdir ${BUILD_DIR}/conf touch ${BUILD_DIR}/conf/application.conf detect assertNoAppDetected }
#!/bin/sh . ${BUILDPACK_TEST_RUNNER_HOME}/lib/test_utils.sh testDetectWithConfFileDetectsPlayApp() { mkdir -p ${BUILD_DIR}/play-app/conf touch ${BUILD_DIR}/play-app/conf/application.conf detect assertAppDetected "Play!" } testDetectWithConfFileDetectsPlayApp() { mkdir -p ${BUILD_DIR}/play-app/conf/application.conf detect assertNoAppDetected } testNoConfFileDoesNotDetectPlayApp() { mkdir -p ${BUILD_DIR}/play-app/conf detect assertNoAppDetected } testConfFileWithModulesDirectoryDoesNotDetectPlayApp() { mkdir -p ${BUILD_DIR}/play-app/modules/conf touch ${BUILD_DIR}/play-app/modules/conf/application.conf detect assertNoAppDetected } testPlay20NotDetected() { mkdir ${BUILD_DIR}/project touch ${BUILD_DIR}/project/Build.scala mkdir ${BUILD_DIR}/conf touch ${BUILD_DIR}/conf/application.conf detect assertNoAppDetected } testPlay22NotDetected() { touch ${BUILD_DIR}/build.sbt mkdir ${BUILD_DIR}/conf touch ${BUILD_DIR}/conf/application.conf detect assertNoAppDetected }
Copy XDebug settings to correct PHP folder
cp /vagrant/config/xdebug.ini /etc/php/7.0/mods-available/xdebug.ini
cp /vagrant/config/xdebug.ini /etc/php/7.1/mods-available/xdebug.ini
Remove the `-c` xcpretty option
#!/bin/bash set -o pipefail : ${SCHEME:="XCDYouTubeKit iOS Static Library"} : ${CONFIGURATION:="Release"} : ${DESTINATION:="platform=iOS Simulator,name=iPhone 5s"} COMMAND="env NSUnbufferedIO=YES xcodebuild clean test -project XCDYouTubeKit.xcodeproj -scheme '${SCHEME}' -configuration '${CONFIGURATION}' -destination '${DESTINATION}'" for BUILD_SETTING in OBJROOT RUN_CLANG_STATIC_ANALYZER IPHONEOS_DEPLOYMENT_TARGET MACOSX_DEPLOYMENT_TARGET; do VALUE=`eval echo \\$"${BUILD_SETTING}"` if [ ! -z "${VALUE}" ]; then COMMAND+=" ${BUILD_SETTING}='${VALUE}'" unset ${BUILD_SETTING} fi done COMMAND+=" | tee xcodebuild.log" xcpretty --version > /dev/null 2>&1 && COMMAND+=" | xcpretty -c" && [ "${TRAVIS}" == "true" ] && xcpretty-travis-formatter > /dev/null 2>&1 && COMMAND+=" -f `xcpretty-travis-formatter`" set -x eval "${COMMAND}" && rm xcodebuild.log
#!/bin/bash set -o pipefail : ${SCHEME:="XCDYouTubeKit iOS Static Library"} : ${CONFIGURATION:="Release"} : ${DESTINATION:="platform=iOS Simulator,name=iPhone 5s"} COMMAND="env NSUnbufferedIO=YES xcodebuild clean test -project XCDYouTubeKit.xcodeproj -scheme '${SCHEME}' -configuration '${CONFIGURATION}' -destination '${DESTINATION}'" for BUILD_SETTING in OBJROOT RUN_CLANG_STATIC_ANALYZER IPHONEOS_DEPLOYMENT_TARGET MACOSX_DEPLOYMENT_TARGET; do VALUE=`eval echo \\$"${BUILD_SETTING}"` if [ ! -z "${VALUE}" ]; then COMMAND+=" ${BUILD_SETTING}='${VALUE}'" unset ${BUILD_SETTING} fi done COMMAND+=" | tee xcodebuild.log" xcpretty --version > /dev/null 2>&1 && COMMAND+=" | xcpretty" && [ "${TRAVIS}" == "true" ] && xcpretty-travis-formatter > /dev/null 2>&1 && COMMAND+=" -f `xcpretty-travis-formatter`" set -x eval "${COMMAND}" && rm xcodebuild.log
Check that git is clean before deploying the site
#!/bin/sh CURRENT_BRANCH=`git symbolic-ref -q HEAD | sed -e 's|^refs/heads/||'` npm run generate-site git fetch origin gh-pages git checkout -B gh-pages origin/gh-pages rm `git ls-files | grep -v '^\.gitignore$'` if [ ! -f ".gitignore" ]; then echo "node_modules" > .gitignore fi cp -r site-build/* . if [ "`git status --porcelain`" != "" ]; then \ (git add -A . && \ git commit -m "Updated site" && \ git push origin +gh-pages:gh-pages) fi git checkout $CURRENT_BRANCH
#!/bin/sh if [ -n "$(git describe --always --dirty | grep -- -dirty)" ] then echo "Working tree is dirty, please commit or stash your changes, then try again" exit 1 fi CURRENT_BRANCH=`git symbolic-ref -q HEAD | sed -e 's|^refs/heads/||'` npm run generate-site git fetch origin gh-pages git checkout -B gh-pages origin/gh-pages rm `git ls-files | grep -v '^\.gitignore$'` if [ ! -f ".gitignore" ]; then echo "node_modules" > .gitignore fi cp -r site-build/* . if [ "`git status --porcelain`" != "" ]; then \ (git add -A . && \ git commit -m "Updated site" && \ git push origin +gh-pages:gh-pages) fi git checkout $CURRENT_BRANCH
Add ZSH git alias to g
alias reload!='source ~/.zshrc; rehash' alias cls='clear' alias less='less --quiet' alias df='df -h' alias du='du -hs' alias history='fc -il 1' alias hg='history | grep' # Fast directory change alias -g ..='cd ..' alias -g ...='cd ../..' alias -g ....='cd ../../..' alias -g .....='cd ../../../..' alias -g ......='cd ../../../../..' alias -g .......='cd ../../../../../..' # Always ask for confirmation before overwriting alias cp='cp -i' alias mv='mv -i' alias rm='rm -i' alias dc='docker-compose' alias pbcopy='tr -d "\n" | /usr/bin/pbcopy' alias mailcatcher='mailcatcher --ip=0.0.0.0' alias phpdebug='PHP_IDE_CONFIG="serverName=localhost" XDEBUG_CONFIG="idekey=PHPSTORM" php -d xdebug.remote_host=localhost -d xdebug.remote_connect_back=0'
alias reload!='source ~/.zshrc; rehash' alias cls='clear' alias less='less --quiet' alias df='df -h' alias du='du -hs' alias history='fc -il 1' alias hg='history | grep' alias g='git' # Fast directory change alias -g ..='cd ..' alias -g ...='cd ../..' alias -g ....='cd ../../..' alias -g .....='cd ../../../..' alias -g ......='cd ../../../../..' alias -g .......='cd ../../../../../..' # Always ask for confirmation before overwriting alias cp='cp -i' alias mv='mv -i' alias rm='rm -i' alias dc='docker-compose' alias pbcopy='tr -d "\n" | /usr/bin/pbcopy' alias mailcatcher='mailcatcher --ip=0.0.0.0' alias phpdebug='PHP_IDE_CONFIG="serverName=localhost" XDEBUG_CONFIG="idekey=PHPSTORM" php -d xdebug.remote_host=localhost -d xdebug.remote_connect_back=0'
Add a static file serving the build's git revision
#!/bin/bash if [ "$(tr '[:upper:]' '[:lower:]' <<<"$NODE_ENV")" = "production" ]; then npm run build:prod fi exec "$@"
#!/bin/bash if [ "$(tr '[:upper:]' '[:lower:]' <<<"$NODE_ENV")" = "production" ]; then npm run build:prod git rev-parse HEAD >.build/revision fi exec "$@"
Change tmux session name to 0
#!/bin/bash SESSION=x DEV_ROOT=~/dev WORK_REPOSITORY=actano/rplan function tmux-select { tmux select-window -t $SESSION:$1 tmux select-pane -t $2 } function cd_to_work { tmux send-keys "cd $WORK_REPOSITORY" C-m } cd $DEV_ROOT tmux -2 new-session -d -s $SESSION tmux rename-window -t $SESSION:0 'rplan' tmux-select 0 0 cd_to_work tmux send-keys "nvim" C-m tmux new-window -t $SESSION:1 -n 'ag+git' tmux split-window -h tmux-select 1 1 cd_to_work tmux-select 1 0 cd_to_work tmux new-window -t $SESSION:2 -n 'docker' tmux split-window -h tmux-select 2 1 cd_to_work tmux-select 2 0 cd_to_work # Set default window tmux select-window -t $SESSION:0 # Attach to session tmux -2 attach-session -t $SESSION
#!/bin/bash SESSION=0 DEV_ROOT=~/dev WORK_REPOSITORY=actano/rplan function tmux-select { tmux select-window -t $SESSION:$1 tmux select-pane -t $2 } function cd_to_work { tmux send-keys "cd $WORK_REPOSITORY" C-m } cd $DEV_ROOT tmux -2 new-session -d -s $SESSION tmux rename-window -t $SESSION:0 'rplan' tmux-select 0 0 cd_to_work tmux send-keys "nvim" C-m tmux new-window -t $SESSION:1 -n 'ag+git' tmux split-window -h tmux-select 1 1 cd_to_work tmux-select 1 0 cd_to_work tmux new-window -t $SESSION:2 -n 'docker' tmux split-window -h tmux-select 2 1 cd_to_work tmux-select 2 0 cd_to_work # Set default window tmux select-window -t $SESSION:0 # Attach to session tmux -2 attach-session -t $SESSION
Fix private config on OSX.
#!/bin/bash # cd "$(dirname "$0")" git pull git submodule update --init --recursive --quiet function doIt() { rsync --exclude ".DS_Store" -av ./public/ ~ echo for file in $(find private/ -type f -exec ls {} \; 2> /dev/null | sed 's/private\///'); do if [[ $file != '.gitignore' ]]; then echo "Adding private config to $file" cat ./private/$file >> ~/$file fi done unset file } if [ "$1" == "--force" -o "$1" == "-f" ]; then doIt else read -p "This may overwrite existing files in your home directory. Are you sure? (y/n) " -n 1 echo if [[ $REPLY =~ ^[Yy]$ ]]; then doIt fi fi unset doIt source ~/.bash_profile
#!/bin/bash cd "$(dirname "$0")" git pull git submodule update --init --recursive --quiet function doIt() { rsync --exclude ".DS_Store" -av ./public/ ~ echo for file in $(find private -type f -exec ls {} \; 2> /dev/null | sed 's/private\///'); do if [[ $file != '.gitignore' ]]; then echo "Adding private config to $file" cat ./private/$file >> ~/$file fi done unset file } if [ "$1" == "--force" -o "$1" == "-f" ]; then doIt else read -p "This may overwrite existing files in your home directory. Are you sure? (y/n) " -n 1 echo if [[ $REPLY =~ ^[Yy]$ ]]; then doIt fi fi unset doIt source ~/.bash_profile
Set bash instead of sh
#!/bin/sh # # init IndexData ubuntu deb repository sources_list_d=/etc/apt/sources.list.d indexdata_list=indexdata.list apt_key=http://ftp.indexdata.dk/debian/indexdata.asc deb_url=http://ftp.indexdata.dk set -e init_apt() { if [[ "$OSTYPE" == "linux-gnu" ]]; then file="$sources_list_d/$indexdata_list" os=ubuntu if [ ! -e $file ]; then codename=$(lsb_release -c -s) wget -O- $apt_key | sudo apt-key add - sudo sh -c "echo deb $deb_url/${os} ${codename} main > $file.tmp" sudo mv -f $file.tmp $file sudo apt-get update -qq sudo apt-get install -y libyaz5-dev fi elif [[ "$OSTYPE" == "darwin"* ]]; then brew install yaz fi } init_apt
#!/bin/bash # # init IndexData ubuntu deb repository sources_list_d=/etc/apt/sources.list.d indexdata_list=indexdata.list apt_key=http://ftp.indexdata.dk/debian/indexdata.asc deb_url=http://ftp.indexdata.dk set -e init_apt() { if [[ "$OSTYPE" == "linux-gnu" ]]; then file="$sources_list_d/$indexdata_list" os=ubuntu if [ ! -e $file ]; then codename=$(lsb_release -c -s) wget -O- $apt_key | sudo apt-key add - sudo sh -c "echo deb $deb_url/${os} ${codename} main > $file.tmp" sudo mv -f $file.tmp $file sudo apt-get update -qq sudo apt-get install -y libyaz5-dev fi elif [[ "$OSTYPE" == "darwin"* ]]; then brew install yaz fi } init_apt
Fix TextEdit's plain text related config
#!/bin/bash cd "$(dirname "${BASH_SOURCE[0]}")" \ && . "../../utils.sh" # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - print_in_purple "\n TextEdit\n\n" execute "defaults write com.apple.TextEdit PlainTextEncoding -int 4 && \ defaults write com.apple.TextEdit PlainTextEncodingForWrite -int 4" \ "Open and save files as UTF-8 encoded" execute "defaults write com.apple.TextEdit RichText -" \ "Use plain text mode for new documents" killall "TextEdit" &> /dev/null
#!/bin/bash cd "$(dirname "${BASH_SOURCE[0]}")" \ && . "../../utils.sh" # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - print_in_purple "\n TextEdit\n\n" execute "defaults write com.apple.TextEdit PlainTextEncoding -int 4 && \ defaults write com.apple.TextEdit PlainTextEncodingForWrite -int 4" \ "Open and save files as UTF-8 encoded" execute "defaults write com.apple.TextEdit RichText 0" \ "Use plain text mode for new documents" killall "TextEdit" &> /dev/null
Make sure crond is running in the background
#!/bin/sh set -e if [ -n "$SSH_PRIVATE_KEY" ]; then mkdir -p ~/.ssh rm -f ~/.ssh/id_rsa echo "-----BEGIN RSA PRIVATE KEY-----" > ~/.ssh/id_rsa echo "$SSH_PRIVATE_KEY" | fold -w 64 >> ~/.ssh/id_rsa echo "-----END RSA PRIVATE KEY-----" >> ~/.ssh/id_rsa chmod 0400 ~/.ssh/id_rsa fi if [ -n "$SSH_KNOWN_HOSTS" ]; then mkdir -p ~/.ssh rm -f ~/.ssh/known_hosts echo "$SSH_KNOWN_HOSTS" > ~/.ssh/known_hosts chmod 0644 ~/.ssh/known_hosts fi if [ -n "$S3_BUCKET" ]; then echo "Setting up S3-Sync" chmod +x /usr/local/bin/s3-sync.sh touch /var/log/s3-sync.log echo "*/5 * * * * /usr/local/bin/s3-sync.sh > /var/log/s3-sync.log" | /usr/bin/crontab - echo "Doing initial sync with S3" /usr/local/bin/s3-sync.sh fi chown jenkins /var/jenkins_home exec /usr/local/bin/jenkins.sh
#!/bin/sh set -e if [ -n "$SSH_PRIVATE_KEY" ]; then mkdir -p ~/.ssh rm -f ~/.ssh/id_rsa echo "-----BEGIN RSA PRIVATE KEY-----" > ~/.ssh/id_rsa echo "$SSH_PRIVATE_KEY" | fold -w 64 >> ~/.ssh/id_rsa echo "-----END RSA PRIVATE KEY-----" >> ~/.ssh/id_rsa chmod 0400 ~/.ssh/id_rsa fi if [ -n "$SSH_KNOWN_HOSTS" ]; then mkdir -p ~/.ssh rm -f ~/.ssh/known_hosts echo "$SSH_KNOWN_HOSTS" > ~/.ssh/known_hosts chmod 0644 ~/.ssh/known_hosts fi if [ -n "$S3_BUCKET" ]; then echo "Setting up S3-Sync" chmod +x /usr/local/bin/s3-sync.sh touch /var/log/s3-sync.log echo "*/5 * * * * /usr/local/bin/s3-sync.sh > /var/log/s3-sync.log" | /usr/bin/crontab - echo "Doing initial sync with S3" /usr/local/bin/s3-sync.sh echo "Starting Crond service..." /usr/sbin/crond fi chown jenkins /var/jenkins_home exec /usr/local/bin/jenkins.sh
Add intermediate step of capitalization before converting string
source Base/BaseUtil.sh source String/StringUtil.sh MessageBuilderUtil(){ local time=$(BaseUtil timestamp) buildMessage(){ echo "${time} [$(StringUtil toUpperCase ${1})] $(StringUtil replace $2 - space)" } $@ }
source Base/BaseUtil.sh source String/StringUtil.sh MessageBuilderUtil(){ local time=$(BaseUtil timestamp) buildMessage(){ _message=$(StringUtil capitalize $2) echo "${time} [$(StringUtil toUpperCase ${1})] $(StringUtil replace $_message - space)" } $@ }
Use ffmpeg for codec copy of foscam mkvs
#!/bin/bash -e folder=$1 camName=$2 basePath=$3 #http://stackoverflow.com/q/11448885 threshold=`date -d "5 minutes ago" +%Y%m%d%H%M%S` #http://unix.stackexchange.com/a/84859/50868 shopt -s nullglob for path in $basePath/raw/$folder/record/*.mkv ; do file=$(basename $path) #http://stackoverflow.com/a/5257398/316108 parts=(${file//_/ }) #http://www.catonmat.net/blog/bash-one-liners-explained-part-two/ date=${parts[1]:0:4}-${parts[1]:4:2}-${parts[1]:6:2} time=${parts[2]:0:2}:${parts[2]:2:2}:${parts[2]:4:2} vidTimestamp=${parts[1]:0:4}${parts[1]:4:2}${parts[1]:6:2}${parts[2]:0:2}${parts[2]:2:2}${parts[2]:4:2} if test "$vidTimestamp" -lt "$threshold" then mkdir -p $basePath/processed/$date/$camName mv $path $basePath/processed/$date/$camName/$time.mkv fi done
#!/bin/bash -e folder=$1 camName=$2 basePath=$3 #http://stackoverflow.com/q/11448885 threshold=`date -d "5 minutes ago" +%Y%m%d%H%M%S` #http://unix.stackexchange.com/a/84859/50868 shopt -s nullglob for path in $basePath/raw/$folder/record/*.mkv ; do file=$(basename $path) #http://stackoverflow.com/a/5257398/316108 parts=(${file//_/ }) #http://www.catonmat.net/blog/bash-one-liners-explained-part-two/ date=${parts[1]:0:4}-${parts[1]:4:2}-${parts[1]:6:2} time=${parts[2]:0:2}:${parts[2]:2:2}:${parts[2]:4:2} vidTimestamp=${parts[1]:0:4}${parts[1]:4:2}${parts[1]:6:2}${parts[2]:0:2}${parts[2]:2:2}${parts[2]:4:2} if test "$vidTimestamp" -lt "$threshold" then mkdir -p $basePath/processed/$date/$camName # need to do a codec copy here since foscam cameras make some kind of corrupt mkv file # ffmpeg fixes it so mkvmerge can operate on it later ffmpeg -i $path -codec copy $basePath/processed/$date/$camName/$time.mkv rm $path fi done
Use yarn in test script
#!/bin/bash export POSTGRES_PASSWORD="uleash" echo "starting postgres in docker " HASH=`docker run -P --name unleash-postgres -e POSTGRES_PASSWORD=$POSTGRES_PASSWORD -d postgres:9.3` export PGPORT=`docker ps| grep unleash-post| awk '{print $(NF-1)}'| awk -F "->" '{print $1}'| awk -F \: '{print $2}'` echo "PGPORT: $PGPORT" echo "" # ----------- Wait for postgres to start ----------- if [ -z "$DOCKER_HOST" ] then export database_host="127.0.0.1" else export database_host=$(echo $DOCKER_HOST |awk -F \/ '{print $NF}'| awk -F \: '{print $1}') fi for i in `seq 1 120`; do echo -n "." sleep 1 netcat -z $database_host $PGPORT && echo "postgres is up and running in docker in $i seconds!" && break done export TEST_DATABASE_URL=postgres://postgres:$POSTGRES_PASSWORD@$database_host:$PGPORT/postgres npm install DATABASE_URL=$TEST_DATABASE_URL ./node_modules/.bin/db-migrate up npm test docker stop $HASH docker rm $HASH
#!/bin/bash export POSTGRES_PASSWORD="uleash" echo "starting postgres in docker " HASH=`docker run -P --name unleash-postgres -e POSTGRES_PASSWORD=$POSTGRES_PASSWORD -d postgres:9.3` export PGPORT=`docker ps| grep unleash-post| awk '{print $(NF-1)}'| awk -F "->" '{print $1}'| awk -F \: '{print $2}'` echo "PGPORT: $PGPORT" echo "" # ----------- Wait for postgres to start ----------- if [ -z "$DOCKER_HOST" ] then export database_host="127.0.0.1" else export database_host=$(echo $DOCKER_HOST |awk -F \/ '{print $NF}'| awk -F \: '{print $1}') fi for i in `seq 1 120`; do echo -n "." sleep 1 netcat -z $database_host $PGPORT && echo "postgres is up and running in docker in $i seconds!" && break done export TEST_DATABASE_URL=postgres://postgres:$POSTGRES_PASSWORD@$database_host:$PGPORT/postgres yarn DATABASE_URL=$TEST_DATABASE_URL ./node_modules/.bin/db-migrate up yarn test docker stop $HASH docker rm $HASH
Fix path to om tool
#!/bin/bash -eu function main() { local cwd cwd="${1}" chmod +x tool-om/om-linux local om om="tool-om/om-linux" printf 'Waiting for opsman to come up' until $(curl --output /dev/null --silent --head --fail -k ${OPSMAN_URI}); do printf '.' sleep 5 done om --target "${OPSMAN_URI}" \ --skip-ssl-validation \ import-installation \ --installation "${cwd}/opsmgr-settings/${OPSMAN_SETTINGS_FILENAME}" \ --decryption-passphrase "${OPSMAN_PASSPHRASE}" } main "${PWD}"
#!/bin/bash -eu function main() { local cwd cwd="${1}" chmod +x tool-om/om-linux local om="tool-om/om-linux" printf "Waiting for %s to come up" "$OPSMAN_URI" until $(curl --output /dev/null --silent --head --fail -k ${OPSMAN_URI}); do printf '.' sleep 5 done printf '\n' $om --target "${OPSMAN_URI}" \ --skip-ssl-validation \ import-installation \ --installation "${cwd}/opsmgr-settings/${OPSMAN_SETTINGS_FILENAME}" \ --decryption-passphrase "${OPSMAN_PASSPHRASE}" } main "${PWD}"
Move files from vagrant home, if we are using virtualbox
mv ~/sources.list /etc/apt/sources.list mv ~/sshd /etc/pam.d/sshd echo GEM_HOME="$HOME/.gem" >> /etc/environment echo PATH="$HOME/.gem/ruby/2.2.0/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games" >> /etc/environment echo "Stopping puppet and chef" service puppet stop service chef-client stop echo "Adding brightbox's ruby repository" apt-add-repository ppa:brightbox/ruby-ng echo "Adding chis-lea's node js repository" add-apt-repository ppa:chris-lea/node.js echo "Updating apt" apt-get update echo "Installing dependencies" apt-get install -y build-essential git-core zlib1g-dev libssl-dev \ libreadline-dev libyaml-dev subversion maven2 gradle nodejs rdiff-backup \ zip ruby2.2 ruby2.2-dev ruby-switch libsqlite3-dev sqlite3 libxml2-dev libxslt1-dev \ libcurl4-openssl-dev libffi-dev openjdk-7-jdk echo "Switching to ruby2.2" ruby-switch --set ruby2.2
if grep 'vagrant' /etc/passwd then user_home=/home/vagrant else user_home=$HOME fi mv $user_home/sources.list /etc/apt/sources.list mv $user_home/sshd /etc/pam.d/sshd echo GEM_HOME="$user_home/.gem" >> /etc/environment echo PATH="$user_home/.gem/ruby/2.2.0/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games" >> /etc/environment echo "Stopping puppet and chef" service puppet stop service chef-client stop echo "Adding brightbox's ruby repository" apt-add-repository ppa:brightbox/ruby-ng echo "Adding chis-lea's node js repository" add-apt-repository ppa:chris-lea/node.js echo "Updating apt" apt-get update echo "Installing dependencies" apt-get install -y build-essential git-core zlib1g-dev libssl-dev \ libreadline-dev libyaml-dev subversion maven2 gradle nodejs rdiff-backup \ zip ruby2.2 ruby2.2-dev ruby-switch libsqlite3-dev sqlite3 libxml2-dev libxslt1-dev \ libcurl4-openssl-dev libffi-dev openjdk-7-jdk echo "Switching to ruby2.2" ruby-switch --set ruby2.2
Provision global installation of jasmine
if ! [ -e ~/.nvm/nvm.sh ]; then curl https://raw.githubusercontent.com/creationix/nvm/v0.17.2/install.sh | bash fi source ~/.nvm/nvm.sh nvm list 0.10 || nvm install 0.10 nvm exec 0.10 which grunt || nvm exec 0.10 npm install -g grunt-cli nvm list 0.11 || nvm install 0.11 nvm exec 0.11 which grunt || nvm exec 0.11 npm install -g grunt-cli nvm alias default 0.10
if ! [ -e ~/.nvm/nvm.sh ]; then curl https://raw.githubusercontent.com/creationix/nvm/v0.17.2/install.sh | bash fi source ~/.nvm/nvm.sh nvm list 0.10 || nvm install 0.10 nvm exec 0.10 which grunt || nvm exec 0.10 npm install -g grunt-cli jasmine nvm list 0.11 || nvm install 0.11 nvm exec 0.11 which grunt || nvm exec 0.11 npm install -g grunt-cli jasmine nvm alias default 0.10
Fix pip install that fails to download.
#!/bin/bash set -o errexit set -o xtrace # Prepare environment if [ -z "${USER}" ]; then USER="docker" fi export USER # Setup compiler if [ -z "${CC}" ]; then CC="gcc" fi export CC if [ "${CC}" = "clang" ]; then if [ -z "${CXX}" ]; then CXX="clang++" fi COMPILER_PACKAGES="clang-3.4" # Ubuntu-specific apt package name else if [ -z "${CXX}" ]; then CXX="g++" fi COMPILER_PACKAGES="${CC} ${CXX}" # Ubuntu-specific apt package names fi export CXX # Install OS dependencies, assuming stock ubuntu:latest apt-get update apt-get install -y \ wget \ git \ ${COMPILER_PACKAGES} \ build-essential \ python \ python2.7 \ python2.7-dev wget https://bootstrap.pypa.io/get-pip.py -O - | python pip install --upgrade --ignore-installed setuptools pip install wheel # Move into root of nupic repository pushd `git rev-parse --show-toplevel` # Build installable python packages python setup.py bdist_wheel # Install nupic wheel and dependencies, including nupic.bindings artifact in # wheelwhouse/ pip install -f wheelhouse/ dist/nupic-`cat VERSION`*.whl # Invoke tests python setup.py test # Return to original path popd
#!/bin/bash set -o errexit set -o xtrace # Prepare environment if [ -z "${USER}" ]; then USER="docker" fi export USER # Setup compiler if [ -z "${CC}" ]; then CC="gcc" fi export CC if [ "${CC}" = "clang" ]; then if [ -z "${CXX}" ]; then CXX="clang++" fi COMPILER_PACKAGES="clang-3.4" # Ubuntu-specific apt package name else if [ -z "${CXX}" ]; then CXX="g++" fi COMPILER_PACKAGES="${CC} ${CXX}" # Ubuntu-specific apt package names fi export CXX # Install OS dependencies, assuming stock ubuntu:latest apt-get update apt-get install -y \ wget \ git \ ${COMPILER_PACKAGES} \ build-essential \ python \ python2.7 \ python2.7-dev \ python-pip pip install --upgrade --ignore-installed setuptools pip install wheel # Move into root of nupic repository pushd `git rev-parse --show-toplevel` # Build installable python packages python setup.py bdist_wheel # Install nupic wheel and dependencies, including nupic.bindings artifact in # wheelwhouse/ pip install -f wheelhouse/ dist/nupic-`cat VERSION`*.whl # Invoke tests python setup.py test # Return to original path popd
Make sure we install the latest package in case of multiple rebuilds of same name
apt-get install -y php-pear php5-dbg gdb apt-get install -y libssl-dev libsasl2-dev libpcre3-dev pkg-config ls -1 /phongo/mongodb*.tgz | sort -n -r | xargs sudo pecl install php -m | grep -q mongodb || echo "extension=mongodb.so" >> `php --ini | grep "Loaded Configuration" | sed -e "s|.*:\s*||"` pecl run-tests -q -p mongodb
apt-get install -y php-pear php5-dbg gdb apt-get install -y libssl-dev libsasl2-dev libpcre3-dev pkg-config ls -1 /phongo/mongodb*.tgz | sort -n -r | xargs sudo pecl install -f php -m | grep -q mongodb || echo "extension=mongodb.so" >> `php --ini | grep "Loaded Configuration" | sed -e "s|.*:\s*||"` pecl run-tests -q -p mongodb
Remove the use of timeout
set -e DICTIONARY_FILE=${1} PATTERN_FILE=${2} PATOUT_FILE=${3} TRANSLATE_FILE=${4} LEFT_HYPHEN_MIN=${5} LEFT_HYPHEN_MAX=${6} HYPH_LEVEL=${7} PAT_START=${8} PAT_FINISH=${9} GOOD_WEIGHT=${10} BAD_WEIGHT=${11} THRESHOLD=${12} FIFO=tmp rm -f $FIFO mkfifo $FIFO if which gtimeout >/dev/null; then TIMEOUT=gtimeout; else TIMEOUT=timeout fi $TIMEOUT 3 patgen $DICTIONARY_FILE $PATTERN_FILE $PATOUT_FILE $TRANSLATE_FILE <$FIFO & echo $LEFT_HYPHEN_MIN $LEFT_HYPHEN_MAX >$FIFO echo $HYPH_LEVEL $HYPH_LEVEL >$FIFO echo $PAT_START $PAT_FINISH >$FIFO echo $GOOD_WEIGHT $BAD_WEIGHT $THRESHOLD >$FIFO echo y >$FIFO wait $! ret=$? rm -f $FIFO if [ $ret == 0 ]; then touch $PATOUT_FILE pattmp.$HYPH_LEVEL else rm -f $PATOUT_FILE pattmp.$HYPH_LEVEL fi exit $ret
set -e DICTIONARY_FILE=${1} PATTERN_FILE=${2} PATOUT_FILE=${3} TRANSLATE_FILE=${4} LEFT_HYPHEN_MIN=${5} LEFT_HYPHEN_MAX=${6} HYPH_LEVEL=${7} PAT_START=${8} PAT_FINISH=${9} GOOD_WEIGHT=${10} BAD_WEIGHT=${11} THRESHOLD=${12} FIFO=tmp rm -f $FIFO mkfifo $FIFO patgen $DICTIONARY_FILE $PATTERN_FILE $PATOUT_FILE $TRANSLATE_FILE <$FIFO & echo $LEFT_HYPHEN_MIN $LEFT_HYPHEN_MAX >$FIFO echo $HYPH_LEVEL $HYPH_LEVEL >$FIFO echo $PAT_START $PAT_FINISH >$FIFO echo $GOOD_WEIGHT $BAD_WEIGHT $THRESHOLD >$FIFO echo y >$FIFO wait $! ret=$? rm -f $FIFO if [ $ret == 0 ]; then touch $PATOUT_FILE pattmp.$HYPH_LEVEL else rm -f $PATOUT_FILE pattmp.$HYPH_LEVEL fi exit $ret
Add a comment about printing environment info
#!/bin/bash pwd git branch git pull npm run build echo "Restarting legislation-explorer service..." sudo systemctl restart legislation-explorer.service
#!/bin/bash # Print some environment info useful when reading CircleCI logs. pwd git branch git pull npm run build echo "Restarting legislation-explorer service..." sudo systemctl restart legislation-explorer.service
Install system dependency - libmariadb-dev
#!/bin/bash set -e # Check for merge conflicts before proceeding python -m compileall -f "${GITHUB_WORKSPACE}" if grep -lr --exclude-dir=node_modules "^<<<<<<< " "${GITHUB_WORKSPACE}" then echo "Found merge conflicts" exit 1 fi # install wkhtmltopdf wget -O /tmp/wkhtmltox.tar.xz https://github.com/frappe/wkhtmltopdf/raw/master/wkhtmltox-0.12.3_linux-generic-amd64.tar.xz tar -xf /tmp/wkhtmltox.tar.xz -C /tmp sudo mv /tmp/wkhtmltox/bin/wkhtmltopdf /usr/local/bin/wkhtmltopdf sudo chmod o+x /usr/local/bin/wkhtmltopdf # install cups sudo apt-get install libcups2-dev # install redis sudo apt-get install redis-server
#!/bin/bash set -e # Check for merge conflicts before proceeding python -m compileall -f "${GITHUB_WORKSPACE}" if grep -lr --exclude-dir=node_modules "^<<<<<<< " "${GITHUB_WORKSPACE}" then echo "Found merge conflicts" exit 1 fi # install wkhtmltopdf wget -O /tmp/wkhtmltox.tar.xz https://github.com/frappe/wkhtmltopdf/raw/master/wkhtmltox-0.12.3_linux-generic-amd64.tar.xz tar -xf /tmp/wkhtmltox.tar.xz -C /tmp sudo mv /tmp/wkhtmltox/bin/wkhtmltopdf /usr/local/bin/wkhtmltopdf sudo chmod o+x /usr/local/bin/wkhtmltopdf # install cups sudo apt-get install libcups2-dev # install redis sudo apt-get install redis-server # install redis sudo apt-get install libmariadb-dev
Fix "docker --name" command line option.
#!/bin/sh cd $(dirname $0) SOURCE=$(pwd)/android CONTAINER=cyanogenmod REPOSITORY=stucki/cyanogenmod # Create a shared folder which will be used as working directory. test -d $SOURCE || mkdir $SOURCE # Try to start an existing/stopped container with the given name $CONTAINER. Otherwise, run a new one. docker start -i $CONTAINER 2>/dev/null || docker run -v $SOURCE:/home/cmbuild/android -i -t -name $CONTAINER $REPOSITORY sh -c "screen -s /bin/bash" exit $?
#!/bin/sh cd $(dirname $0) SOURCE=$(pwd)/android CONTAINER=cyanogenmod REPOSITORY=stucki/cyanogenmod # Create a shared folder which will be used as working directory. test -d $SOURCE || mkdir $SOURCE # Try to start an existing/stopped container with the given name $CONTAINER. Otherwise, run a new one. docker start -i $CONTAINER 2>/dev/null || docker run -v $SOURCE:/home/cmbuild/android -i -t --name $CONTAINER $REPOSITORY sh -c "screen -s /bin/bash" exit $?
Disable JIT on CI Postgres to make wheel tests predictable
#!/bin/bash set -e -x if [ -z "${PGVERSION}" ]; then echo "Missing PGVERSION environment variable." exit 1 fi if [[ "${TRAVIS_OS_NAME}" == "linux" && "${BUILD}" == *wheels* ]]; then sudo service postgresql stop ${PGVERSION} echo "port = 5432" | \ sudo tee --append /etc/postgresql/${PGVERSION}/main/postgresql.conf if [[ "${BUILD}" == *wheels* ]]; then # Allow docker guests to connect to the database echo "listen_addresses = '*'" | \ sudo tee --append /etc/postgresql/${PGVERSION}/main/postgresql.conf echo "host all all 172.17.0.0/16 trust" | \ sudo tee --append /etc/postgresql/${PGVERSION}/main/pg_hba.conf fi sudo service postgresql start ${PGVERSION} fi if [ "${TRAVIS_OS_NAME}" == "osx" ]; then brew update >/dev/null brew upgrade pyenv eval "$(pyenv init -)" if ! (pyenv versions | grep "${PYTHON_VERSION}$"); then pyenv install ${PYTHON_VERSION} fi pyenv global ${PYTHON_VERSION} pyenv rehash # Install PostgreSQL if brew ls --versions postgresql > /dev/null; then brew remove --force --ignore-dependencies postgresql fi brew install postgresql@${PGVERSION} brew services start postgresql@${PGVERSION} fi
#!/bin/bash set -e -x if [ -z "${PGVERSION}" ]; then echo "Missing PGVERSION environment variable." exit 1 fi if [[ "${TRAVIS_OS_NAME}" == "linux" && "${BUILD}" == *wheels* ]]; then sudo service postgresql stop ${PGVERSION} echo "port = 5432" | \ sudo tee --append /etc/postgresql/${PGVERSION}/main/postgresql.conf if [[ "${BUILD}" == *wheels* ]]; then # Allow docker guests to connect to the database echo "listen_addresses = '*'" | \ sudo tee --append /etc/postgresql/${PGVERSION}/main/postgresql.conf echo "host all all 172.17.0.0/16 trust" | \ sudo tee --append /etc/postgresql/${PGVERSION}/main/pg_hba.conf if [ "${PGVERSION}" -ge "11" ]; then # Disable JIT to avoid unpredictable timings in tests. echo "jit = off" | \ sudo tee --append /etc/postgresql/${PGVERSION}/main/postgresql.conf fi fi sudo service postgresql start ${PGVERSION} fi if [ "${TRAVIS_OS_NAME}" == "osx" ]; then brew update >/dev/null brew upgrade pyenv eval "$(pyenv init -)" if ! (pyenv versions | grep "${PYTHON_VERSION}$"); then pyenv install ${PYTHON_VERSION} fi pyenv global ${PYTHON_VERSION} pyenv rehash # Install PostgreSQL if brew ls --versions postgresql > /dev/null; then brew remove --force --ignore-dependencies postgresql fi brew install postgresql@${PGVERSION} brew services start postgresql@${PGVERSION} fi
Add --yes flag to elm-make
#! /usr/bin/env bash elm-test if [ $? -ne 0 ]; then exit 1; fi; elm-make --warn src/Main.elm
#! /usr/bin/env bash elm-test if [ $? -ne 0 ]; then exit 1; fi; elm-make --yes --warn src/Main.elm
Remove origin/ and replace '/' in branch names in tar file
#sass sass:static/css -r templates/sass/bourbon/lib/bourbon.rb --style :compressed if [ -z "${GIT_COMMIT}" ]; then GIT_COMMIT=$(git rev-parse HEAD) fi if [ -z "${GIT_BRANCH}" ]; then GIT_BRANCH=$(git symbolic-ref -q HEAD) GIT_BRANCH=${GIT_BRANCH##refs/heads/} GIT_BRANCH=${GIT_BRANCH:-HEAD} fi if [ -z "${BUILD_NUMBER}" ]; then BUILD_NUMBER=dev fi ID=mitx-${GIT_BRANCH}-${BUILD_NUMBER}-${GIT_COMMIT} REPO_ROOT=$(dirname $0)/.. BUILD_DIR=${REPO_ROOT}/build mkdir -p ${BUILD_DIR} tar -v --exclude=.git --exclude=build --transform="s#^#mitx/#" -czf ${BUILD_DIR}/${ID}.tgz ${REPO_ROOT}
#sass sass:static/css -r templates/sass/bourbon/lib/bourbon.rb --style :compressed if [ -z "${GIT_COMMIT}" ]; then GIT_COMMIT=$(git rev-parse HEAD) fi if [ -z "${GIT_BRANCH}" ]; then GIT_BRANCH=$(git symbolic-ref -q HEAD) GIT_BRANCH=${GIT_BRANCH##refs/heads/} GIT_BRANCH=${GIT_BRANCH:-HEAD} fi GIT_BRANCH=${GIT_BRANCH##origin/} GIT_BRANCH=${GIT_BRANCH//\//_} if [ -z "${BUILD_NUMBER}" ]; then BUILD_NUMBER=dev fi ID=mitx-${GIT_BRANCH}-${BUILD_NUMBER}-${GIT_COMMIT} REPO_ROOT=$(dirname $0)/.. BUILD_DIR=${REPO_ROOT}/build mkdir -p ${BUILD_DIR} tar -v --exclude=.git --exclude=build --transform="s#^#mitx/#" -czf ${BUILD_DIR}/${ID}.tgz ${REPO_ROOT}
Use 1998 ISO C++ standard
mkdir _build && cd _build cmake .. -DCMAKE_INSTALL_PREFIX=$PREFIX -DCMAKE_BUILD_TYPE=Release make all -j4 make install
export CXXFLAGS="-std=c++98" mkdir _build && cd _build cmake .. -DCMAKE_INSTALL_PREFIX=$PREFIX -DCMAKE_BUILD_TYPE=Release make all -j4 make install
Revert "Whoops, forgot to include this dummy precmd implementation."
export PAGER=less export LC_CTYPE=en_US.UTF-8 # speed stuff. #setopt no_beep setopt auto_cd setopt multios setopt cdablevarS if [[ x$WINDOW != x ]] then SCREEN_NO="%B$WINDOW%b " else SCREEN_NO="" fi PS1="%n@%m:%~%# " # Setup the prompt with pretty colors setopt prompt_subst export LSCOLORS="Gxfxcxdxbxegedabagacad" function oh_my_zsh_theme_precmd() { # Blank function; override this in your themes } source "$ZSH/themes/$ZSH_THEME.zsh-theme"
export PAGER=less export LC_CTYPE=en_US.UTF-8 # speed stuff. #setopt no_beep setopt auto_cd setopt multios setopt cdablevarS if [[ x$WINDOW != x ]] then SCREEN_NO="%B$WINDOW%b " else SCREEN_NO="" fi PS1="%n@%m:%~%# " # Setup the prompt with pretty colors setopt prompt_subst export LSCOLORS="Gxfxcxdxbxegedabagacad" source "$ZSH/themes/$ZSH_THEME.zsh-theme"
Copy syslog into the workspace
#!/usr/bin/env bash FILENAME=${1} #pull all images /usr/local/bin/docker-compose -f docker-compose.yml -f ${FILENAME} pull #run docker file /usr/local/bin/docker-compose -f docker-compose.yml -f ${FILENAME} run contract_tests #cleaning after tests contract_test_result=$? echo "========== Logging output from containers ==========" /usr/local/bin/docker-compose logs echo "========== Logging output from syslog ==========" cat ../logs/requisition/messages echo "========== Logging nginx settings ==========" /usr/local/bin/docker-compose exec nginx cat /etc/nginx/conf.d/default.conf /usr/local/bin/docker-compose -f docker-compose.yml -f ${FILENAME} down $2 #don't remove the $2 in the line above #CI will append -v to it, so all dangling volumes are removed after the job runs exit ${contract_test_result} #this line above makes sure when jenkins runs this script #the build success/failure result is determined by contract test run not by the clean up run
#!/usr/bin/env bash FILENAME=${1} #pull all images /usr/local/bin/docker-compose -f docker-compose.yml -f ${FILENAME} pull #run docker file /usr/local/bin/docker-compose -f docker-compose.yml -f ${FILENAME} run contract_tests #cleaning after tests contract_test_result=$? echo "========== Logging output from containers ==========" /usr/local/bin/docker-compose logs echo "========== Logging output from syslog ==========" cp ../logs/requisition/messages build/syslog echo "========== Logging nginx settings ==========" /usr/local/bin/docker-compose exec nginx cat /etc/nginx/conf.d/default.conf /usr/local/bin/docker-compose -f docker-compose.yml -f ${FILENAME} down $2 #don't remove the $2 in the line above #CI will append -v to it, so all dangling volumes are removed after the job runs exit ${contract_test_result} #this line above makes sure when jenkins runs this script #the build success/failure result is determined by contract test run not by the clean up run
Use id_rsa.pub for the pubkey alias
# Pipe my public key to clipboard alias pubkey="more ~/.ssh/id_dsa.public | pbcopy | echo '=> Public key copied!'"
# Pipe my public key to clipboard alias pubkey="more ~/.ssh/id_rsa.pub | pbcopy | echo '=> Public key copied!'"