Instruction
stringlengths
14
778
input_code
stringlengths
0
4.24k
output_code
stringlengths
1
5.44k
Add input method and ctags pkg.
#!/bin/sh sudo apt-get install vim git tmux zsh build-essential gitk git-gui curl docky screen dos2unix tree sudo apt-get install texlive-science texlive-publishers texlive-math-extra texlive-bibtex-extra latex-beamer texlive-xetex texlive-lang-cjk
#!/bin/sh sudo apt-get install vim git tmux zsh build-essential gitk git-gui curl docky screen dos2unix tree exuberant-ctags ibus-sunpinyin sudo apt-get install texlive-science texlive-publishers texlive-math-extra texlive-bibtex-extra latex-beamer texlive-xetex texlive-lang-cjk
Add deploy commandline for vesselinfo upload
#!/bin/bash # Clone the repo git clone --recursive git@github.com:SkyTruth/pelagos.git # Initialize GAE virutalenv #TODO: Put this into a script in the pelagos repo virtualenv pelagos/gae/virtualenvloader/gaevirtualenv source pelagos/gae/virtualenvloader/gaevirtualenv/bin/activate pip install -r pelagos/gae-requirements.txt # Deploy # Update app.yaml to match project gcloud preview app deploy pelagos/gae --project skytruth-pelagos-dev
#!/bin/bash # Clone the repos git clone --recursive git@github.com:SkyTruth/pelagos.git git clone --recursive git@github.com:SkyTruth/pelagos-data.git # Initialize GAE virutalenv #TODO: Put this into a script in the pelagos repo virtualenv pelagos/gae/virtualenvloader/gaevirtualenv source pelagos/gae/virtualenvloader/gaevirtualenv/bin/activate pip install -r pelagos/gae-requirements.txt # Deploy # Update app.yaml to match project gcloud preview app deploy pelagos/gae --project skytruth-pelagos-dev #Upload vessselinfo (will prompt for password) #TODO: direct log files cfreated by uploader to a tempfolder and cleanup afterward appcfg.py upload_data --config_file=./pelagos-data/data/vesselinfo/vesselinfo-bulkloader.yaml \ --filename=./pelagos-data/data/vesselinfo/vessel-info-2014-08-28.csv --kind=VesselInfo \ --num_threads=4 --url=http://skytruth-pelagos-dev.appspot.com/_ah/remote_api \ --rps_limit=500 --email=paul@skytruth.org
Exit immediately if a command exits with a non-zero status
#!/usr/bin/env bash # # Script to initialize angular app # @log startup time START_SERVER_TEIM=$(date +%s) rm -rf app # copy doc directory if [ "$APP_ENV" = "development" ]; then # save symlinks ln -s build/docs app else # resolve symlinks cp -Lr build/docs app fi # copy javascript files cp build/*.js app/ # copy img, javascript and other files for home page cp -r home app/home # copy home page cp docs/src/templates/home.html app/ cd app cat > "main.js" << EOF var express = require('express'); var connect = require('connect'); var app = express(); app.use(connect.compress()); console.log(__dirname); app.get('^(/|home\.html )$', function(req, res) { res.sendfile('home.html'); }); app.use(express.static(__dirname + '/')); // HTML5 URL Support app.get('^\/?(guide|api|cookbook|misc|tutorial|ui)(/)?*$', function(req, res) { res.sendfile('index.html'); }); var port = process.env.PORT || 8000; console.log('SERVER RUN ON PORT: ', port); app.listen(port); EOF END_SERVER_TEIM=$(date +%s) # @log startup time echo "SERVER START TIME: $((END_SERVER_TEIM - START_SERVER_TEIM))" node main.js
#!/usr/bin/env bash # # Script to initialize angular app set -e # @log startup time START_SERVER_TEIM=$(date +%s) rm -rf app # copy doc directory if [ "$APP_ENV" = "development" ]; then # save symlinks ln -s build/docs app else # resolve symlinks cp -Lr build/docs app fi # copy javascript files cp build/*.js app/ # copy img, javascript and other files for home page cp -r home app/home # copy home page cp docs/src/templates/home.html app/ cd app cat > "main.js" << EOF var express = require('express'); var connect = require('connect'); var app = express(); app.use(connect.compress()); console.log(__dirname); app.get('^(/|home\.html )$', function(req, res) { res.sendfile('home.html'); }); app.use(express.static(__dirname + '/')); // HTML5 URL Support app.get('^\/?(guide|api|cookbook|misc|tutorial|ui)(/)?*$', function(req, res) { res.sendfile('index.html'); }); var port = process.env.PORT || 8000; console.log('SERVER RUN ON PORT: ', port); app.listen(port); EOF END_SERVER_TEIM=$(date +%s) # @log startup time echo "SERVER START TIME: $((END_SERVER_TEIM - START_SERVER_TEIM))" node main.js
Remove debugging from simple starter
#!/usr/bin/env bash set -ex echo "Starting nginx .. " nginx -p `pwd` -c static_server/nginx.conf echo "nginx started" function stop_nginx { echo "Stopping nginx .. " kill "$(cat /tmp/nginx.pid)" echo "nginx Stopped" } && trap stop_nginx EXIT echo "Start initial build" jekyll build echo "Starting tracking master" while : do git fetch \ && [[ $(git diff master origin/master) ]] \ && git pull \ && jekyll build sleep 1 done
#!/usr/bin/env bash set -e echo "Starting nginx .. " nginx -p `pwd` -c static_server/nginx.conf echo "nginx started" function stop_nginx { echo "Stopping nginx .. " kill "$(cat /tmp/nginx.pid)" echo "nginx Stopped" } && trap stop_nginx EXIT echo "Start initial build" jekyll build echo "Starting tracking master" while : do git fetch \ && [[ $(git diff master origin/master) ]] \ && git pull \ && jekyll build sleep 1 done
Use this branch name for now
#!/usr/bin/env bash -e cd $APPCENTER_SOURCE_DIRECTORY ############################## ### Deploy Xcode extension ### ############################## if [ "$APPCENTER_BRANCH" == "master" ]; then appcenter \ build queue \ --app quicktype/quicktype-xcode \ --branch master \ --token $APPCENTER_TOKEN fi
#!/usr/bin/env bash -e cd $APPCENTER_SOURCE_DIRECTORY ############################## ### Deploy Xcode extension ### ############################## if [ "$APPCENTER_BRANCH" == "appcenter" ]; then appcenter \ build queue \ --app quicktype/quicktype-xcode \ --branch master \ --token $APPCENTER_TOKEN fi
Add header; restore PDF creation
#!/bin/bash set -eu if [[ ${#*} != 1 ]] then echo "Provide MODE!" exit 1 fi MODE=$1 # JWPLOT: https://github.com/jmjwozniak/jwplot jwplot $MODE.{cfg,eps,data} # convert $MODE.{eps,pdf}
#!/bin/bash set -eu # PLOT SH # Wraps JWPLOT, EPS->PDF conversion # Provide a MODE: load or rate if [[ ${#*} != 1 ]] then echo "Provide MODE!" exit 1 fi MODE=$1 # JWPLOT: https://github.com/jmjwozniak/jwplot jwplot $MODE.{cfg,eps,data} convert $MODE.{eps,pdf}
Stop devtest job to see if we can get the mapview jobs running more timely
#!/usr/bin/env bash # 2017-05-09 ND: Add second query and destination .json file to output # for `mapview_24h_clustered_devtest.sql` source db_settings.env psql -f mapview_schema.sql psql -f mapview_24h_processing.sql psql -q -f mapview_24h_clustered.sql |\ aws s3 cp - "${EXPORT_TARGET}"/json/view24h.json \ --acl public-read \ --cache-control "max-age=300" psql -q -f mapview_24h_clustered_devtest.sql |\ aws s3 cp - "${EXPORT_TARGET}"/json/view24h_devtest.json \ --acl public-read \ --cache-control "max-age=300"
#!/usr/bin/env bash # 2017-05-09 ND: Add second query and destination .json file to output # for `mapview_24h_clustered_devtest.sql` source db_settings.env psql -f mapview_schema.sql psql -f mapview_24h_processing.sql psql -q -f mapview_24h_clustered.sql |\ aws s3 cp - "${EXPORT_TARGET}"/json/view24h.json \ --acl public-read \ --cache-control "max-age=300" # psql -q -f mapview_24h_clustered_devtest.sql |\ # aws s3 cp - "${EXPORT_TARGET}"/json/view24h_devtest.json \ # --acl public-read \ # --cache-control "max-age=300"
Add pip --user paths to PATH
# -*- Mode: sh; coding: utf-8; indent-tabs-mode: nil; tab-width: 2 -*- # vim:set expandtab tabstop=2 fenc=utf-8 fileformat=unix filetype=sh: # extend $PATH if [ -d ${HOME}/.local/bin ]; then PATH="${HOME}/.local/bin:${PATH}" fi if [ -d ${HOME}/.composer/vendor/bin ]; then PATH="${HOME}/.composer/vendor/bin:${PATH}" fi # add ./bin to path PATH="./bin:$PATH" export CDPATH="$HOME/workspace:$HOME/playground:$CDPATH" export GOPATH="$HOME/workspace/golang"
# -*- Mode: sh; coding: utf-8; indent-tabs-mode: nil; tab-width: 2 -*- # vim:set expandtab tabstop=2 fenc=utf-8 fileformat=unix filetype=sh: # extend $PATH if [ -d ${HOME}/.local/bin ]; then PATH="${HOME}/.local/bin:${PATH}" fi if [ -d ${HOME}/.composer/vendor/bin ]; then PATH="${HOME}/.composer/vendor/bin:${PATH}" fi python --version 1>/dev/null 2>&1 && { _python_version="$(python --version 2>&1 | cut -d ' ' -f 2 | cut -d '.' -f 1,2)"; PATH="~/Library/Python/$_python_version/bin/:$PATH"; } python3 --version 1>/dev/null 2>&1 && { _python_version="$(python3 --version | cut -d ' ' -f 2 | cut -d '.' -f 1,2)"; PATH="~/Library/Python/$_python_version/bin/:$PATH"; } PATH="./bin:$PATH" export CDPATH="$HOME/workspace:$HOME/playground:$CDPATH" export GOPATH="$HOME/workspace/golang" #export PYTHONPATH="$PYTHONPATH:$HOME/.local/lib/python$(python --version)/site-packages"
Fix name of sms_conv tag in wikipedia tag mangling script (review by @jerith).
#!/bin/bash if [ $# -ne 3 ]; then echo "usage:" echo " $0 <user email> <USSD conv key> <SMS conv key>" exit 1 fi email_address="$1" ussd_conv="CONVERSATION:wikipedia_ussd:$2" sms_conv="TRANSPORT_TAG:wikipedia_sms:$3" . $(dirname $0)/mangle_routing_table_utils.sh assert_routing_exists $ussd_conv assert_routing_exists $sms_conv sms_tag=$(get_default_tag $sms_conv) || exit 1 # If any of these steps fail, stop immediately set -e mrt --remove $sms_conv default $sms_tag default mrt --remove $sms_tag default $sms_conv default mrt --add $ussd_conv sms_content $sms_tag default mrt --add $sms_tag default $ussd_conv sms_content
#!/bin/bash if [ $# -ne 3 ]; then echo "usage:" echo " $0 <user email> <USSD conv key> <SMS conv key>" exit 1 fi email_address="$1" ussd_conv="CONVERSATION:wikipedia_ussd:$2" sms_conv="CONVERSATION:wikipedia_sms:$3" . $(dirname $0)/mangle_routing_table_utils.sh assert_routing_exists $ussd_conv assert_routing_exists $sms_conv sms_tag=$(get_default_tag $sms_conv) || exit 1 # If any of these steps fail, stop immediately set -e mrt --remove $sms_conv default $sms_tag default mrt --remove $sms_tag default $sms_conv default mrt --add $ussd_conv sms_content $sms_tag default mrt --add $sms_tag default $ussd_conv sms_content
Fix location of sphinx directory
#!/usr/bin/env bash set -e #------------------------------------------------------------------------------- # Make SRPM to get a list of build dependencies #------------------------------------------------------------------------------- git submodule update --init --recursive ./packaging/make-srpm.sh dnf builddep -y build/SRPMS/* #------------------------------------------------------------------------------- # Generate a docs folder - run this from the root of the git repository. #------------------------------------------------------------------------------- rm -rf build mkdir build && cd build sphinx-build-3 -q -b html "${PWD}/sphinx" "${PWD}/build/html/" cmake -DENABLE_HTML_DOCS=TRUE .. make doc
#!/usr/bin/env bash set -e #------------------------------------------------------------------------------- # Make SRPM to get a list of build dependencies #------------------------------------------------------------------------------- git submodule update --init --recursive ./packaging/make-srpm.sh dnf builddep -y build/SRPMS/* #------------------------------------------------------------------------------- # Generate a docs folder - run this from the root of the git repository. #------------------------------------------------------------------------------- rm -rf build SPHINX_DIR="${PWD}/sphinx" mkdir build && cd build sphinx-build-3 -q -b html "${SPHINX_DIR}" "${PWD}/build/html/" cmake -DENABLE_HTML_DOCS=TRUE .. make doc
Support lancet completion when using shell integration
if [ -z "$LANCET_BIN" ]; then LANCET_BIN=$(which lancet); fi function lancet_helper() { export LANCET_SHELL_HELPER=$(mktemp -u -t lancet_) $LANCET_BIN $@ if [ $? -a -f $LANCET_SHELL_HELPER ] ; then source $LANCET_SHELL_HELPER fi rm -f $LANCET_SHELL_HELPER unset LANCET_SHELL_HELPER } alias lancet=lancet_helper
if [ -z "$LANCET_BIN" ]; then LANCET_BIN=$(which lancet); fi function lancet_helper() { export LANCET_SHELL_HELPER=$(mktemp -u -t lancet_) $LANCET_BIN $@ if [ $? -a -f $LANCET_SHELL_HELPER ] ; then source $LANCET_SHELL_HELPER fi rm -f $LANCET_SHELL_HELPER unset LANCET_SHELL_HELPER } compdef _lancet lancet_helper alias lancet=lancet_helper
Package man page and LICENSE with executable
# Build script shamelessly stolen from ripgrep :) cargo build --target $TARGET --release build_dir=$(mktemp -d 2>/dev/null || mktemp -d -t tmp) out_dir=$(pwd) name="${PROJECT_NAME}-${TRAVIS_TAG}-${TARGET}" mkdir "$build_dir/$name" cp target/$TARGET/release/watchexec "$build_dir/$name/" pushd $build_dir tar czf "$out_dir/$name.tar.gz" * popd
# Build script shamelessly stolen from ripgrep :) cargo build --target $TARGET --release build_dir=$(mktemp -d 2>/dev/null || mktemp -d -t tmp) out_dir=$(pwd) name="${PROJECT_NAME}-${TRAVIS_TAG}-${TARGET}" mkdir "$build_dir/$name" cp target/$TARGET/release/watchexec "$build_dir/$name/" cp {doc/watchexec.1,LICENSE} "$build_dir/$name/" pushd $build_dir tar czf "$out_dir/$name.tar.gz" * popd
Update Shebang in Linux Wrapper Script to /usr/bin/env bash
#!/bin/bash export SRC_DIR=$(cd "$(dirname "$0")"; pwd) $SRC_DIR/__AUTO_UPDATE_BUNDLE__/exo_browser/exo_browser \ --raw $SRC_DIR/__AUTO_UPDATE_BUNDLE__/breach_core
#!/usr/bin/env bash export SRC_DIR=$(cd "$(dirname "$0")"; pwd) $SRC_DIR/__AUTO_UPDATE_BUNDLE__/exo_browser/exo_browser \ --raw $SRC_DIR/__AUTO_UPDATE_BUNDLE__/breach_core
Move actual monit calls to outside the loop, as it seems the stop commands repeatedly put the tasks to "pending"
#!/bin/bash for x in {0..120} do echo -n "." # DEPRECATED: should be removed in the near future sudo monit stop recalculate > /dev/null 2>&1 # When monit is already busy quiting OR starting, it can throw an error message: # monit: action failed -- Other action already in progress -- please try again later # We don't care for that message, so we just pipe every message to /dev/null sudo monit stop resque > /dev/null 2>&1 i=`sudo monit status | grep --after-context=1 --extended-regexp 'resque' | grep --extended-regexp 'not monitored$' | wc --lines` if [ "$i" -eq "1" ]; then echo -e "\nDone."; # Nice little line-break exit 0 fi sleep 1 done echo "Execution failed" exit 1
#!/bin/bash # DEPRECATED: should be removed in the near future sudo monit stop recalculate > /dev/null 2>&1 # When monit is already busy quiting OR starting, it can throw an error message: # monit: action failed -- Other action already in progress -- please try again later # We don't care for that message, so we just pipe every message to /dev/null sudo monit stop resque > /dev/null 2>&1 for x in {0..120} do echo -n "." i=`sudo monit status | grep --after-context=1 --extended-regexp 'resque' | grep --extended-regexp 'not monitored$' | wc --lines` if [ "$i" -eq "1" ]; then echo -e "\nDone."; # Nice little line-break exit 0 fi sleep 1 done echo "Execution failed" exit 1
Fix for version.txt creation on linux
#!/bin/sh if [ -z "$1" ] then echo "$0 <outpath>" elif which -s svn then if [ -d .svn ] then LC_ALL=C svn info >"$1/version.txt" else REL=$(basename $PWD) echo "Version: $REL" >$1/version.txt fi fi
#!/bin/sh if [ -z "$1" ] then echo "$0 <outpath>" elif which svn >/dev/null then if [ -d .svn ] then LC_ALL=C svn info >"$1/version.txt" else REL=$(basename $PWD) echo "Version: $REL" >$1/version.txt fi fi
Update script with directory option
#!/bin/bash function deploy_proxy { if hash apigeetool 2>/dev/null; then printf "\n\nUsing apigeetool to deploy the proxy to the $env environment in the $org org...\n\n" apigeetool deployproxy -o $org -e $env -n learn-edge -L $url -u $username -p $password -V printf "\nIf 'State: deployed', then your API Proxy is ready to be invoked.\n" printf "\nRun 'invoke.sh'\n" else printf "\n\n****Exiting: You must install apigeetool: npm -install apigeetool -g\n\n" fi }
#!/bin/bash function deploy_proxy { if hash apigeetool 2>/dev/null; then printf "\n\nUsing apigeetool to deploy the proxy to the $env environment in the $org org...\n\n" apigeetool deployproxy -o $org -e $env -n learn-edge -L $url -u $username -d . -p $password -V printf "\nIf 'State: deployed', then your API Proxy is ready to be invoked.\n" printf "\nRun 'invoke.sh'\n" else printf "\n\n****Exiting: You must install apigeetool: npm -install apigeetool -g\n\n" fi }
Configure /data as a persisted directory.
#!/bin/bash # Copyright (C) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Setup a persistent directory for /var/lib/openshift BOOT2DOCKER_DATA=`blkid -o device -l -t LABEL=boot2docker-data` PARTNAME=`echo "$BOOT2DOCKER_DATA" | sed 's/.*\///'` mkdir -p /mnt/$PARTNAME/var/lib/minishift ln -s /mnt/$PARTNAME/var/lib/minishift /var/lib/minishift
#!/bin/bash # Copyright (C) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Setup a persistent directory for /var/lib/openshift BOOT2DOCKER_DATA=`blkid -o device -l -t LABEL=boot2docker-data` PARTNAME=`echo "$BOOT2DOCKER_DATA" | sed 's/.*\///'` mkdir -p /mnt/$PARTNAME/var/lib/minishift ln -s /mnt/$PARTNAME/var/lib/minishift /var/lib/minishift mkdir -p /mnt/$PARTNAME/data ln -s /mnt/$PARTNAME/data /data
Add file completion as optional argument
# ------------------------------------------------------------------------------ # FILE: n98-magerun.plugin.zsh # DESCRIPTION: oh-my-zsh n98-magerun plugin file. Adapted from composer plugin # AUTHOR: Andrew Dwyer (andrewrdwyer at gmail dot com) # VERSION: 1.0.0 # ------------------------------------------------------------------------------ # n98-magerun basic command completion _n98_magerun_get_command_list () { n98-magerun.phar --no-ansi | sed "1,/Available commands/d" | awk '/^ +[a-z\-:]+/ { print $1 }' } _n98_magerun () { compadd `_n98_magerun_get_command_list` } compdef _n98_magerun n98-magerun.phar compdef _n98_magerun n98-magerun # Aliases alias n98='n98-magerun.phar' alias mage='n98-magerun.phar' alias magefl='n98-magerun.phar cache:flush' # Install n98-magerun into the current directory alias mage-get='wget https://raw.github.com/netz98/n98-magerun/master/n98-magerun.phar'
# ------------------------------------------------------------------------------ # FILE: n98-magerun.plugin.zsh # DESCRIPTION: oh-my-zsh n98-magerun plugin file. Adapted from composer plugin # AUTHOR: Andrew Dwyer (andrewrdwyer at gmail dot com) # VERSION: 1.0.0 # ------------------------------------------------------------------------------ # n98-magerun basic command completion _n98_magerun_get_command_list () { $_comp_command1 --no-ansi | sed "1,/Available commands/d" | awk '/^ +[a-z\-:]+/ { print $1 }' } _n98_magerun () { _arguments '1: :->command' '*:optional arg:_files' case $state in command) compadd $(_n98_magerun_get_command_list) ;; *) esac } compdef _n98_magerun n98-magerun.phar compdef _n98_magerun n98-magerun # Aliases alias n98='n98-magerun.phar' alias mage='n98-magerun.phar' alias magefl='n98-magerun.phar cache:flush' # Install n98-magerun into the current directory alias mage-get='wget https://raw.github.com/netz98/n98-magerun/master/n98-magerun.phar'
Add sc-launch as a shortcut for start & enable
user_commands=( list-units is-active status show help list-unit-files is-enabled list-jobs show-environment) sudo_commands=( start stop reload restart try-restart isolate kill reset-failed enable disable reenable preset mask unmask link load cancel set-environment unset-environment) for c in $user_commands; do; alias sc-$c="systemctl $c"; done for c in $sudo_commands; do; alias sc-$c="sudo systemctl $c"; done
user_commands=( list-units is-active status show help list-unit-files is-enabled list-jobs show-environment) sudo_commands=( start stop reload restart try-restart isolate kill reset-failed enable disable reenable preset mask unmask link load cancel set-environment unset-environment) for c in $user_commands; do; alias sc-$c="systemctl $c"; done for c in $sudo_commands; do; alias sc-$c="sudo systemctl $c"; done alias sc-launch="sudo systemctl start $c && sudo systemctl enable $c"
Remove /etc/salt/pki/minion/minion_master.pub when switching masters
#!/bin/sh LM=$(cat /etc/susecon2017/local_master) sed -i "s/^master.*\$/master: $LM/" /etc/salt/minion.d/ceph.conf
#!/bin/sh LOCAL_MASTER=$(cat /etc/susecon2017/local_master) sed -i "s/^master.*\$/master: $LOCAL_MASTER/" /etc/salt/minion.d/ceph.conf rm -f /etc/salt/pki/minion/minion_master.pub
Correct user changement in boot script
#!/bin/bash USER=pi TVJS_REPO=https://github.com/SamyPesse/tv.js.git TVJS_DIR="/home/${USER}/tv.js" OUT="/home/${USER}/tvjs.log" case "$1" in update) echo "updating tv.js from $TVJS_REPO" rm -rf $TVJS_DIR mkdir $TVJS_DIR git clone $TVJS_REPO $TVJS_DIR cd $TVJS_DIR rake install > $OUT 2>$OUT & rake build > $OUT 2>$OUT & ;; start) echo "starting tv.js: $TVJS_DIR" cd $TVJS_DIR rake run > $OUT 2>$OUT & export DISPLAY=:0.0 chromium --kiosk http://localhost:8888 ;; stop) killall $RAKE ;; *) echo "usage: $0 (start|stop)" esac exit 0
#!/bin/bash USER=pi TVJS_REPO=https://github.com/SamyPesse/tv.js.git TVJS_DIR="/home/${USER}/tv.js" OUT="/home/${USER}/tvjs.log" case "$1" in update) echo "updating tv.js from $TVJS_REPO" su - $USER rm -rf $TVJS_DIR mkdir $TVJS_DIR git clone $TVJS_REPO $TVJS_DIR cd $TVJS_DIR rake install > $OUT 2>$OUT & rake build > $OUT 2>$OUT & ;; start) echo "starting tv.js: $TVJS_DIR" su - $USER cd $TVJS_DIR rake run > $OUT 2>$OUT & export DISPLAY=:0.0 chromium --kiosk http://localhost:8888 ;; stop) killall $RAKE ;; *) echo "usage: $0 (start|stop)" esac exit 0
Extend Newton comparison (different skip factors)
#!/bin/sh LOOPS="$1" shift [ -z "$LOOPS" ] && exit 1 run_measure() { echo "$1" shift ./measure-newton.sh "$LOOPS" "$@" 2>&1 | tail -n 1 | sed 's/^[>]*//' } run_measure "Without any agent" run_measure "With bare agent" --std-agent-bare run_measure "With agent, without measuring" --std-agent=no-measuring run_measure "With agent, with measuring" --std-agent
#!/bin/sh LOOPS="$1" shift [ -z "$LOOPS" ] && exit 1 run_measure() { echo "$1" shift ./measure-newton.sh "$LOOPS" "$@" 2>&1 | tail -n 1 | sed 's/^[>]*//' } run_measure "Without any agent" run_measure "With bare agent" --std-agent-bare run_measure "With agent, without measuring" --std-agent=no-measuring run_measure "With agent, with measuring (unobtrusive)" "--std-agent=,skip.factor=1000" run_measure "With agent, with measuring" "--std-agent" run_measure "With agent, with measuring (intensive)" "--std-agent=,skip.factor=2" run_measure "With agent, with measuring (very intensive)" "--std-agent=,skip.factor=1"
Remove PUPPET_MAJ_VERSION check for unit modules
#!/bin/bash # # This script is used by Puppet OpenStack modules to prepare # modules before running dependencies. # set -ex if [ -n "${GEM_HOME}" ]; then GEM_BIN_DIR=${GEM_HOME}/bin/ export PATH=${PATH}:${GEM_BIN_DIR} fi if [ "${PUPPET_MAJ_VERSION}" = 4 ]; then export PUPPET_BASE_PATH=/etc/puppetlabs/code else export PUPPET_BASE_PATH=/etc/puppet fi export SCRIPT_DIR=$(cd `dirname $0` && pwd -P) export PUPPETFILE_DIR=${PUPPETFILE_DIR:-${PUPPET_BASE_PATH}/modules} source $SCRIPT_DIR/functions print_header 'Start (install_modules_unit.sh)' print_header 'Install Modules' install_modules print_header 'Module List' puppet module list --modulepath ./spec/fixtures/modules print_header 'Done (install_modules_unit.sh)'
#!/bin/bash # # This script is used by Puppet OpenStack modules to prepare # modules before running dependencies. # set -ex if [ -n "${GEM_HOME}" ]; then GEM_BIN_DIR=${GEM_HOME}/bin/ export PATH=${PATH}:${GEM_BIN_DIR} fi export PUPPET_BASE_PATH=/etc/puppetlabs/code export SCRIPT_DIR=$(cd `dirname $0` && pwd -P) export PUPPETFILE_DIR=${PUPPETFILE_DIR:-${PUPPET_BASE_PATH}/modules} source $SCRIPT_DIR/functions print_header 'Start (install_modules_unit.sh)' print_header 'Install Modules' install_modules print_header 'Module List' puppet module list --modulepath ./spec/fixtures/modules print_header 'Done (install_modules_unit.sh)'
Deploy sqlplus code to all dbs, including EGRNARLS
#!/bin/bash # Copyright [2009-2014] EMBL-European Bioinformatics Institute # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Deploy all packages, functions, and procedures to all Oracle instances # using the automatically generated script install_all_code.sql from the sql folder. # find directory of this script DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" STARTING_DIR=`pwd` cd $DIR cd ../sql echo "@install_all_code.sql" | sqlplus $ORACLE_USER'/'$ORACLE_PASSWORD'@EGRNAPRO' echo "@install_all_code.sql" | sqlplus $ORACLE_USER'/'$ORACLE_PASSWORD'@EGRNADEV' echo "@install_all_code.sql" | sqlplus $ORACLE_USER'/'$ORACLE_PASSWORD'@EGRNATST' # return to the original location cd $STARTING_DIR
#!/bin/bash # Copyright [2009-2014] EMBL-European Bioinformatics Institute # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Deploy all packages, functions, and procedures to all Oracle instances # using the automatically generated script install_all_code.sql from the sql folder. # find directory of this script DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" STARTING_DIR=`pwd` cd $DIR cd ../sql echo "@install_all_code.sql" | sqlplus $ORACLE_USER'/'$ORACLE_PASSWORD'@EGRNAPRO' echo "@install_all_code.sql" | sqlplus $ORACLE_USER'/'$ORACLE_PASSWORD'@EGRNADEV' echo "@install_all_code.sql" | sqlplus $ORACLE_USER'/'$ORACLE_PASSWORD'@EGRNATST' echo "@install_all_code.sql" | sqlplus $ORACLE_USER'/'$ORACLE_PASSWORD'@EGRNARLS' # return to the original location cd $STARTING_DIR
Add evaluation using moses bleu script
#!/bin/sh MODEL=$1 INPUT='/u/vineeku6/storage/train-data/ellipsis/unk-200-v2/valid_src.txt' GOLD='/u/vineeku6/storage/train-data/ellipsis/unk-200-v2/valid_target.txt' OUT=out-$MODEL.txt python translate_all.py $MODEL.npz $INPUT $OUT python convert_symbols.py $INPUT $OUT python report.py $INPUT $OUT $GOLD $MODEL
#!/bin/sh MODEL=$1 INPUT='/u/vineeku6/storage/train-data/ellipsis/raw-5/valid_src.txt' GOLD='/u/vineeku6/storage/train-data/ellipsis/raw-5/valid_target.txt' OUT=out-$MODEL.txt python translate_all.py $MODEL.npz $INPUT $OUT --all #python translate_all.py $MODEL.npz $INPUT $OUT python convert_symbols.py $INPUT $OUT ./multi-bleu.perl $GOLD.nounk < $OUT.nounk &> bleu-$MODEL.out python report.py $INPUT $OUT $GOLD $MODEL
Move mount path to a variable
#!/bin/sh -eux SSH_USER="${SSH_USERNAME:-vagrant}" SSH_USER_HOME="${SSH_USER_HOME:-/home/${SSH_USER}}" case "$PACKER_BUILDER_TYPE" in virtualbox-iso|virtualbox-ovf) echo "==> Installing VirtualBox guest additions" VER=$(cat "$SSH_USER_HOME"/.vbox_version) ISO="VBoxGuestAdditions_$VER.iso"; mkdir -p /tmp/vbox; mount -o loop" $SSH_USER_HOME/$ISO" /tmp/vbox; sh /tmp/vbox/VBoxLinuxAdditions.run \ || echo "VBoxLinuxAdditions.run exited $? and is suppressed." \ "For more read https://www.virtualbox.org/ticket/12479"; umount /tmp/vbox; rm -rf /tmp/vbox; rm -f "$SSH_USER_HOME"/*.iso; ;; esac
#!/bin/sh -eux SSH_USER="${SSH_USERNAME:-vagrant}" SSH_USER_HOME="${SSH_USER_HOME:-/home/${SSH_USER}}" case "$PACKER_BUILDER_TYPE" in virtualbox-iso|virtualbox-ovf) echo "==> Installing VirtualBox guest additions" VER=$(cat "$SSH_USER_HOME"/.vbox_version) ISO="VBoxGuestAdditions_$VER.iso"; MOUNT_PATH="/tmp/vbox" mkdir -p "$MOUNT_PATH"; mount -o loop" $SSH_USER_HOME/$ISO" "$MOUNT_PATH"; sh "$MOUNT_PATH"/VBoxLinuxAdditions.run \ || echo "VBoxLinuxAdditions.run exited $? and is suppressed." \ "For more read https://www.virtualbox.org/ticket/12479"; umount "$MOUNT_PATH"; rm -rf "$MOUNT_PATH"; rm -f "$SSH_USER_HOME"/*.iso; ;; esac
Add some prompt info when starting swap instance.
#!/bin/sh SSDB_CFG=ssdb.conf.template REDIS_CFG=redis.conf.template SSDB=../../../build/ssdb-server REDIS=../../../build/redis-server if [ -n "$1" ];then REDISPORT=$1 else REDISPORT=6379 fi DIR=./tmp/${REDISPORT}_dir cleanup(){ killall -9 ssdb-server killall -9 redis-server rm -r $DIR } prepare_dir() { mkdir -p $DIR cp $SSDB_CFG $REDIS_CFG $DIR } startSSDBServer(){ sed -i "s/6379/$REDISPORT/g" $SSDB_CFG $SSDB -d $SSDB_CFG } startRedisServer(){ sed -i "s/port [0-9]\{1,\}/port $REDISPORT/g" $REDIS_CFG $REDIS $REDIS_CFG &> /dev/null & } startServer(){ cd $DIR startSSDBServer sleep 1 startRedisServer sleep 1 } cleanup prepare_dir startServer
#!/bin/sh SSDB_CFG=ssdb.conf.template REDIS_CFG=redis.conf.template SSDB=../../../build/ssdb-server REDIS=../../../build/redis-server if [ -n "$1" ];then REDISPORT=$1 else REDISPORT=6379 fi DIR=./tmp/${REDISPORT}_dir prepare_dir() { mkdir -p $DIR cp $SSDB_CFG $REDIS_CFG $DIR } startSSDBServer(){ if [[ ! -f "$SSDB" ]]; then echo "$SSDB not exists, must build it at first!" exit 0 fi sed -i "s/6379/$REDISPORT/g" $SSDB_CFG mv $SSDB_CFG ${SSDB_CFG}_${REDISPORT} $SSDB -d ${SSDB_CFG}_${REDISPORT} -s restart &> /dev/null } startRedisServer(){ if [[ ! -f "$REDIS" ]]; then echo "$REDIS not exists, must build it at first!" exit 0 fi sed -i "s/port [0-9]\{1,\}/port $REDISPORT/g" $REDIS_CFG $REDIS $REDIS_CFG &> /dev/null & } prepare_dir cd $DIR startSSDBServer startRedisServer if [ "" == "`ps -ef | grep ssdb-server | grep "_$REDISPORT"`" ]; then echo "ssdb-server at port $REDISPORT not start!" exit 0 fi if [ "" == "`ps -ef | grep redis-server | grep ":$REDISPORT"`" ]; then echo "redis-server at port $REDISPORT not start!" exit 0 fi echo "Start SWAP server at port $REDISPORT success!"
Add a notice to PHPUnit saying it'll take a while
#!/bin/bash echo "Installing Pear" yum install -y php-pear echo "Adding Pear channels" pear channel-discover pear.phpunit.de pear channel-discover pear.symfony.com echo "Installing PHPUnit" pear install phpunit/PHPUnit echo "PHPUnit and Pear installed"
#!/bin/bash echo "Installing Pear" yum install -y php-pear echo "Adding Pear channels" pear channel-discover pear.phpunit.de pear channel-discover pear.symfony.com echo "Installing PHPUnit - note this might take a while" pear install phpunit/PHPUnit echo "PHPUnit and Pear installed"
Rewrite the script to be sourced instead
#!/bin/bash # Install a custom Go version, https://golang.org/ # # Add at least the following environment variables to your project configuration # (otherwise the defaults below will be used). # * GO_VERSION # # Include in your builds via # \curl -sSL https://raw.githubusercontent.com/codeship/scripts/master/languages/go.sh | bash -s GO_VERSION=${GO_VERSION:="1.4.2"} # check required parameters GOROOT=${GOROOT:?'You need to configure the GOROOT environment variable! Please set it to "/tmp/go"'} if [ ! $(echo $PATH | grep "$GOROOT/bin") ]; then >&2 echo "Please add '$GOOROT/bin' to the beginning of the PATH." fi set -e CACHED_DOWNLOAD="${HOME}/cache/go${GO_VERSION}.linux-amd64.tar.gz" mkdir -p "${GOROOT}" wget --continue --output-document "${CACHED_DOWNLOAD}" "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" tar -xaf "${CACHED_DOWNLOAD}" --strip-components=1 --directory "${GOROOT}"
#!/bin/bash # Install a custom Go version, https://golang.org/ # # Add at least the following environment variables to your project configuration # (otherwise the defaults below will be used). # * GO_VERSION # # Include in your builds via # \curl -sSL https://raw.githubusercontent.com/codeship/scripts/master/languages/go.sh | bash -s GO_VERSION=${GO_VERSION:="1.4.2"} # check required parameters export GOROOT="/tmp/go" export PATH="${GOROOT}/bin:${PATH}" set -e CACHED_DOWNLOAD="${HOME}/cache/go${GO_VERSION}.linux-amd64.tar.gz" mkdir -p "${GOROOT}" wget --continue --output-document "${CACHED_DOWNLOAD}" "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" tar -xaf "${CACHED_DOWNLOAD}" --strip-components=1 --directory "${GOROOT}"
Fix to shell script. Now it works like Makefile
#!/bin/sh RESULT_DIR="result" OUTPUT_DIR="../temp" FILE="thesis" mkdir $RESULT_DIR cd tex mkdir $OUTPUT_DIR pdflatex -output-directory="$OUTPUT_DIR" -synctex=1 -interaction=nonstopmode ${FILE}.tex bibtex $OUTPUT_DIR/${FILE}.aux makeindex $OUTPUT_DIR/${FILE}.idx pdflatex -output-directory="$OUTPUT_DIR" -synctex=1 -interaction=nonstopmode ${FILE}.tex pdflatex -output-directory="$OUTPUT_DIR" -synctex=2 -interaction=nonstopmode ${FILE}.tex cat $OUTPUT_DIR/${FILE}.pdf > ../$RESULT_DIR/${FILE}.pdf cd ..
#!/bin/sh RESULT_DIR="../result" OUTPUT_DIR="../temp" FILE="thesis" cd tex mkdir $RESULT_DIR mkdir $OUTPUT_DIR cp literatur.bib $OUTPUT_DIR pdflatex -output-directory="$OUTPUT_DIR" -synctex=1 -interaction=nonstopmode ${FILE}.tex cd $OUTPUT_DIR bibtex ${FILE}.aux makeindex ${FILE}.idx cd ../tex pdflatex -output-directory="$OUTPUT_DIR" -synctex=1 -interaction=nonstopmode ${FILE}.tex pdflatex -output-directory="$OUTPUT_DIR" -synctex=2 -interaction=nonstopmode ${FILE}.tex cat $OUTPUT_DIR/${FILE}.pdf > $RESULT_DIR/${FILE}.pdf cd ..
Stop execution of tests when a container step fails
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" cd "${SCRIPT_DIR}" find . -type d -mindepth 2 -path "*/dockerfiles/*" | sort --reverse | while read directory; do echo "Running ${directory}" && pushd "${directory}" && bash "1_build.sh" && bash "2_start.sh" && bash "3_test.sh" && bash "4_stop.sh" && popd done
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" cd "${SCRIPT_DIR}" find . -type d -mindepth 2 -path "*/dockerfiles/*" | sort --reverse | while read directory; do printf '=%.0s' {1..80} echo -e "\nRunning ${directory}" pushd "${directory}" bash "1_build.sh" if [[ $? -ne 0 ]]; then exit 1 fi bash "2_start.sh" if [[ $? -ne 0 ]]; then exit 1 fi bash "3_test.sh" if [[ $? -ne 0 ]]; then exit 1 fi bash "4_stop.sh" if [[ $? -ne 0 ]]; then exit 1 fi popd done
Revert "Version numbers are weird."
#!/bin/bash # Shell script for installing hyper's dependencies on Travis. In particular, # this upgrades the OpenSSL version used on Travis. set -e set -x sudo add-apt-repository -y "ppa:lukasaoz/openssl101-ppa" sudo apt-get -y update sudo apt-get install -y --force-yes openssl libssl1.0.0 libcrypto1.0.0 pip install . pip install -r test_requirements.txt
#!/bin/bash # Shell script for installing hyper's dependencies on Travis. In particular, # this upgrades the OpenSSL version used on Travis. set -e set -x sudo add-apt-repository -y "ppa:lukasaoz/openssl101-ppa" sudo apt-get -y update sudo apt-get install -y --force-yes openssl libssl libcrypto pip install . pip install -r test_requirements.txt
Update RabbitMQ recipe (docker container creation)
#!/usr/bin/env bash echo "\033[92m# Creating Redis container \033[0m" docker run --name redis-store -d -p 6379:6379 redis echo "\033[92m# Creating PostgreSQL container \033[0m" docker run --name postgres-store -d -e POSTGRES_USER=postgres -e POSTGRES_PASSWORD=$DATABASE_PASSWORD -p 5432:5432 postgres echo "\033[92m# Creating MongoDB container \033[0m" docker run --name mongo-store -d -p 27017:27017 mongo echo "\033[92m# Creating RabbitMQ container \033[0m" docker run --name rabbit-store --hostname rabbitmq-default -d -p 5672:5672 rabbitmq
#!/usr/bin/env bash echo "\033[92m# Creating Redis container \033[0m" docker run --name redis-store -d -p 6379:6379 redis echo "\033[92m# Creating PostgreSQL container \033[0m" docker run --name postgres-store -d -e POSTGRES_USER=postgres -e POSTGRES_PASSWORD=$DATABASE_PASSWORD -p 5432:5432 postgres echo "\033[92m# Creating MongoDB container \033[0m" docker run --name mongo-store -d -p 27017:27017 mongo echo "\033[92m# Creating RabbitMQ container \033[0m" docker run --name rabbit-store --hostname rabbitmq-default -d -e RABBITMQ_DEFAULT_USER=rabbit -e RABBITMQ_DEFAULT_PASS=$RABBITMQ_PASSWORD -p 5672:5672 rabbitmq
Increase limit of handled builds 2 -> 20 for buildbot logs uploader
#! /bin/bash # Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # Runs a single instance of the log uploader. # invocation: upload_runner.sh <master name> # This script is intended to be run from cron. UPLOAD_SCRIPT=~/buildbot/build/scripts/master/upload_logs_to_storage.py LIMIT=2 function msg_exit() { echo 'Upload script already running, exiting.' exit 0 } function usage() { echo "Usage: $0 <master name> <bucket name>" exit 1 } if [ -z "$1" ]; then usage fi if [ -z "$2" ]; then usage fi mastername="$1" bucketname="$2" ( flock -n 9 || msg_exit $UPLOAD_SCRIPT --master-name=$mastername --bucket=$bucketname --limit=$LIMIT ) 9>/var/lock/upload_logs_to_storage-$mastername # the '9' on the previous line is the file descriptor used by flock above.
#! /bin/bash # Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # Runs a single instance of the log uploader. # invocation: upload_runner.sh <master name> # This script is intended to be run from cron. UPLOAD_SCRIPT=~/buildbot/build/scripts/master/upload_logs_to_storage.py LIMIT=20 function msg_exit() { echo 'Upload script already running, exiting.' exit 0 } function usage() { echo "Usage: $0 <master name> <bucket name>" exit 1 } if [ -z "$1" ]; then usage fi if [ -z "$2" ]; then usage fi mastername="$1" bucketname="$2" ( flock -n 9 || msg_exit $UPLOAD_SCRIPT --master-name=$mastername --bucket=$bucketname --limit=$LIMIT ) 9>/var/lock/upload_logs_to_storage-$mastername # the '9' on the previous line is the file descriptor used by flock above.
Add code for variable replacement on build time
#!/usr/bin/env bash if ! [[ -f ./.env ]]; then cp .env.dist .env; fi export myUID=`id -u ${whoami}` export myGID=`id -g ${whoami}` export myIP=`ifconfig $(netstat -rn | grep -E "^default|^0.0.0.0" | head -1 | awk '{print $NF}') | grep 'inet ' | awk '{print $2}' | grep -Eo '([0-9]*\.){3}[0-9]*'` cp src/main/resources/start.rb docker/ docker-compose -f docker-compose_deploy.yml build --no-cache --force-rm docker-compose -f docker-compose_deploy.yml stop docker-compose -f docker-compose_deploy.yml rm -f docker-compose -f docker-compose_deploy.yml up -d
#!/usr/bin/env bash if ! [[ -f ./.env ]]; then cp .env.dist .env; fi export myUID=`id -u ${whoami}` export myGID=`id -g ${whoami}` export myIP=`ifconfig $(netstat -rn | grep -E "^default|^0.0.0.0" | head -1 | awk '{print $NF}') | grep 'inet ' | awk '{print $2}' | grep -Eo '([0-9]*\.){3}[0-9]*'` cp docker/solr/Dockerfile_orig docker/solr/Dockerfile cp docker/redis/Dockerfile_orig docker/redis/Dockerfile cp docker/Dockerfile_orig docker/Dockerfile sed -i '' "s|<UID>|${myUID}|" ./docker/solr/Dockerfile sed -i '' "s|<GID>|${myGID}|" ./docker/solr/Dockerfile sed -i '' "s|<UID>|${myUID}|" ./docker/redis/Dockerfile sed -i '' "s|<GID>|${myGID}|" ./docker/redis/Dockerfile sed -i '' "s|<UID>|${myUID}|" ./docker/Dockerfile sed -i '' "s|<GID>|${myGID}|" ./docker/Dockerfile #mvn clean package #cp target/nlh-importer-verticle-1.0-SNAPSHOT.jar docker/lib/ cp src/main/resources/start.rb docker/ #docker-compose -f docker-compose_deploy.yml build --force-rm --no-cache docker-compose -f docker-compose_deploy.yml build --force-rm docker-compose -f docker-compose_deploy.yml stop docker-compose -f docker-compose_deploy.yml rm -f docker-compose -f docker-compose_deploy.yml up -d
Reduce hermes build box noise with sleep OPS-4107
#!/bin/bash # This is only relevant for AWS instances, and shouldnt be added or run otherwise. # This script exists because when we build amis we take a snapshot and when we take this snapshot # the best practice is to reboot the instance since if you do not do this reboot the instance's # file system integrity cannot be guaranteed. # Since we monitor hermes, this causes errors that are not a problem to be logged when hermes fails to run correctly # on build boxes. # This script is run before hermes is started, preventing it from booting during builds. # This is a hack to return 1 if build box, 0 otherwise aws sts get-caller-identity --output=text --query 'Arn' | grep -iv 'gocd' > /dev/null
#!/bin/bash # This is only relevant for AWS instances, and shouldnt be added or run otherwise. # This script exists because when we build amis we take a snapshot and when we take this snapshot # the best practice is to reboot the instance since if you do not do this reboot the instance's # file system integrity cannot be guaranteed. # Since we monitor hermes, this causes errors that are not a problem to be logged when hermes fails to run correctly # on build boxes. # This script is run before hermes is started, preventing it from booting during builds. # Default startup timeout in systemd is 60 seconds, sleep 50 means we should return before the timeout sleep_time=50 # This is a hack to sleep and then return 1 if on a build box # The sleep slows down the looping caused by systemd trying to start the service again if it failed. # Just returning 1 causes tons of "Unit entered failed state" messages. This will reduce them to 1 a minute or so. if aws sts get-caller-identity --output=text --query 'Arn' | grep -q 'gocd'; then echo "Detected build server, sleeping ${sleep_time} seconds to reduce log noise" sleep $sleep_time exit 1 fi
Change setup ansible clone url to use https
#!/bin/bash sudo apt-get -y install git python-pip sudo pip install jinja2 u=$(whoami) g=$(groups | awk '{print $1}') mkdir -p /opt/stack || (sudo mkdir -p /opt/stack && sudo chown $u:$g /opt/stack) cd /opt/stack if [ ! -d ansible ]; then git clone git://github.com/ansible/ansible.git --recursive else cd ansible git pull --rebase git submodule update --init --recursive fi echo echo "Run the following commands to proceed: " echo echo "source env-vars" echo "source /opt/stack/ansible/hacking/env-setup" echo
#!/bin/bash sudo apt-get -y install git python-pip sudo pip install jinja2 u=$(whoami) g=$(groups | awk '{print $1}') mkdir -p /opt/stack || (sudo mkdir -p /opt/stack && sudo chown $u:$g /opt/stack) cd /opt/stack if [ ! -d ansible ]; then git clone https://github.com/ansible/ansible.git --recursive else cd ansible git pull --rebase git submodule update --init --recursive fi echo echo "Run the following commands to proceed: " echo echo "source env-vars" echo "source /opt/stack/ansible/hacking/env-setup" echo
Update script to add opbeat releases
#!/bin/bash cd /home/deploy/Env/logtacts/ source bin/activate cd /home/deploy/logtacts/ git pull --rebase pip install -r requirements.txt kill -HUP `cat /tmp/logtacts-master.pid`
#!/bin/bash cd /home/deploy/Env/logtacts/ source bin/activate cd /home/deploy/logtacts/ source .env git pull --rebase pip install -r requirements.txt kill -HUP `cat /tmp/logtacts-master.pid` curl https://intake.opbeat.com/api/v1/organizations/$OPBEAT_ORG_ID/apps/$OPBEAT_APP_ID/releases/ \     -H "Authorization: Bearer $OPBEAT_SECRET_KEY" \     -d rev=`git log -n 1 --pretty=format:%H` \     -d branch=`git rev-parse --abbrev-ref HEAD` \     -d status=completed
Install boxkeeper SSH keys when provisioning
#!/bin/bash set -e set -u set -x KEEPER_USERNAME="boxkeeper" RETURN_DIR=$(pwd) if id -u "$KEEPER_USERNAME" >/dev/null 2>&1; then echo -n "User '$KEEPER_USERNAME' already exists... skipping" else echo -n "Creating '$KEEPER_USERNAME' user..." useradd -G wheel $KEEPER_USERNAME echo "$KEEPER_USERNAME:vagrant" | chpasswd echo " done" fi cd $RETURN_DIR
#!/bin/bash set -e set -u set -x KEEPER_USERNAME="boxkeeper" KEEPER_HOMEDIR="/home/$KEEPER_USERNAME" KEEPER_SSHDIR="$KEEPER_HOMEDIR/.ssh" KEEPER_KEYS="$KEEPER_SSHDIR/authorized_keys" RETURN_DIR=$(pwd) if id -u "$KEEPER_USERNAME" >/dev/null 2>&1; then echo -n "User '$KEEPER_USERNAME' already exists... skipping" else echo -n "Creating '$KEEPER_USERNAME' user..." useradd -G wheel $KEEPER_USERNAME echo "$KEEPER_USERNAME:vagrant" | chpasswd echo " done" fi mkdir -p $KEEPER_SSHDIR chmod 0700 $KEEPER_SSHDIR touch $KEEPER_KEYS chmod 0600 $KEEPER_KEYS curl -sS https://raw.githubusercontent.com/lightster/.ssh/master/id_rsa.lightster-air.pub \ https://raw.githubusercontent.com/lightster/.ssh/master/id_rsa.lightster-air.pub \ > $KEEPER_KEYS chown -R $KEEPER_USERNAME:$KEEPER_USERNAME $KEEPER_SSHDIR cd $RETURN_DIR
Use dotnet to push package
echo "Deploying DnDGen.EventGen to NuGet" ApiKey=$1 Source=$2 echo "Nuget Source is $Source" echo "Nuget API Key is $ApiKey (should be secure)" echo "Pushing DnDGen.EventGen" nuget push ./DnDGen.EventGen/bin/Release/DnDGen.EventGen.*.nupkg -Verbosity detailed -ApiKey $ApiKey -Source $Source
echo "Deploying DnDGen.EventGen to NuGet" ApiKey=$1 Source=$2 echo "Nuget Source is $Source" echo "Nuget API Key is $ApiKey (should be secure)" echo "Pushing DnDGen.EventGen" dotnet nuget push ./DnDGen.EventGen/bin/Release/DnDGen.EventGen.*.nupkg -v normal --api-key $ApiKey --source $Source
Fix Rails versions test script
#! /bin/bash set -e for RAILS_VERSION in "3.2.0" "4.0.0" "4.1.0" "5.0.0" do bundle update bundle exec rspec spec done
#! /bin/bash set -e for RAILS_VERSION in "3.2.0" "4.0.0" "4.1.0" "4.2.0" "5.0.0" do export RAILS_VERSION echo "Rails $RAILS_VERSION" bundle update bundle exec rspec spec done unset RAILS_VERSION
Update to support zsh 4.2.6
autoload -U is-at-least if is-at-least 5.1.0; then __CGITC_CMD="local arr=(\"\${(@s/#/)line}\") line=\$arr[1]" else __CGITC_CMD="line=\$(echo \"\$line\" | cut -d '#' -f 1)" fi while read line; do # Strip out comments eval $__CGITC_CMD # Skip empty lines if [ -z "$line" ]; then; continue; fi # Split a line into two local key=$line[(w)1] local value=${line#* } value="${value#"${value%%[![:space:]]*}"}" # Remove leading whitespaces value="${value%"${value##*[![:space:]]}"}" # Remove trailing whitespaces alias $key=$value done < ${0:a:h}/abbreviations unset __CGITC_CMD
autoload -U is-at-least if is-at-least 5.1.0; then __CGITC_CMD="local arr=(\"\${(@s/#/)line}\") line=\$arr[1]" else __CGITC_CMD="line=\$(echo \"\$line\" | cut -d '#' -f 1)" fi while read line; do # Strip out comments eval $__CGITC_CMD # Skip empty lines if [ -z "$line" ]; then; continue; fi # Split a line into two local key=$line[(w)1] local value=${line#* } value="${value#"${value%%[![:space:]]*}"}" # Remove leading whitespaces value="${value%"${value##*[![:space:]]}"}" # Remove trailing whitespaces alias $key=$value done < "${0%/*}/abbreviations" unset __CGITC_CMD
Use Perl version-agnostic way of switching to Media Cloud's Perl library
#!/bin/bash ## These 3 lines are mandatory. export PERLBREW_ROOT=$HOME/perl5/perlbrew export PERLBREW_HOME=$HOME/.perlbrew #echo source ${PERLBREW_ROOT}/etc/bashrc unset MANPATH source ${PERLBREW_ROOT}/etc/bashrc #if [[ -z "$PERLBREW_BASHRD_VERSION" ]] # then # echo "if branch taken" # source ~/perl5/perlbrew/etc/bashrc #fi #Switch to the right version and filter useless message. perlbrew use perl-5.16.3@mediacloud 2> >(grep -v 'manpath: warning: $MANPATH set, ignoring /etc/manpath.config') if [ $? -ne 0 ]; then echo "Unable to run 'perlbrew use perl-5.16.3@mediacloud'" exit 1 fi # NOTE: We filter the useless MANPATH warning message this way because there was no good way to get rid of it. # perlbrew use will fail unless $MANPATH is set but then it generates this warning.
#!/bin/bash ## These 3 lines are mandatory. export PERLBREW_ROOT=$HOME/perl5/perlbrew export PERLBREW_HOME=$HOME/.perlbrew #echo source ${PERLBREW_ROOT}/etc/bashrc unset MANPATH source ${PERLBREW_ROOT}/etc/bashrc # Switch to whatever version has the "mediacloud" library perlbrew use @mediacloud # NOTE: We filter the useless MANPATH warning message this way because there was no good way to get rid of it. # perlbrew use will fail unless $MANPATH is set but then it generates this warning.
Increase memory for master API
#!/bin/sh JAVA_OPTS="${JAVA_OPTS} -DconfigFileLocation=/config/frontend-config.properties "
#!/bin/sh JAVA_OPTS="${JAVA_OPTS} -DconfigFileLocation=/config/frontend-config.properties -Xmx512M"
Make this a little more general.
#!/bin/bash # AUTHOR: Jeremy Wall (jw), jeremy@marzhillstudios.com set -x file=$1 cargo build --release sudo dtrace -c "target/release/ucg build ${file}" -o out.stacks -n 'profile-997 /execname == "ucg"/ { @[ustack(100)] = count(); }' stackcollapse.pl out.stacks > collapsed.stacks cat collapsed.stacks | flamegraph.pl --minwidth 2.5 > perf_graph.svg rm -f out.stacks collapsed.stacks
#!/bin/bash # AUTHOR: Jeremy Wall (jw), jeremy@marzhillstudios.com set -x cmd=$1 cargo build --release sudo dtrace -c "target/release/ucg ${cmd}" -o out.stacks -n 'profile-997 /execname == "ucg"/ { @[ustack(100)] = count(); }' stackcollapse.pl out.stacks > collapsed.stacks cat collapsed.stacks | flamegraph.pl --minwidth 2.5 > perf_graph.svg rm -f out.stacks collapsed.stacks
Clean tmp folder after unzipping KoreBuild
#!/usr/bin/env bash repoFolder="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" cd $repoFolder koreBuildZip="https://github.com/aspnet/KoreBuild/archive/dev.zip" if [ ! -z $KOREBUILD_ZIP ]; then koreBuildZip=$KOREBUILD_ZIP fi buildFolder=".build" buildFile="$buildFolder/KoreBuild.sh" if test ! -d $buildFolder; then echo "Downloading KoreBuild from $koreBuildZip" tempFolder="/tmp/KoreBuild-$(uuidgen)" mkdir $tempFolder localZipFile="$tempFolder/korebuild.zip" retries=6 until (wget -O $localZipFile $koreBuildZip 2>/dev/null || curl -o $localZipFile --location $koreBuildZip 2>/dev/null) do echo "Failed to download '$koreBuildZip'" if [ "$retries" -le 0 ]; then exit 1 fi retries=$((retries - 1)) echo "Waiting 10 seconds before retrying. Retries left: $retries" sleep 10s done unzip -q -d $tempFolder $localZipFile mkdir $buildFolder cp -r $tempFolder/**/build/** $buildFolder chmod +x $buildFile # Cleanup if test ! -d $tempFolder; then rm -rf $tempFolder fi fi $buildFile -r $repoFolder "$@"
#!/usr/bin/env bash repoFolder="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" cd $repoFolder koreBuildZip="https://github.com/aspnet/KoreBuild/archive/dev.zip" if [ ! -z $KOREBUILD_ZIP ]; then koreBuildZip=$KOREBUILD_ZIP fi buildFolder=".build" buildFile="$buildFolder/KoreBuild.sh" if test ! -d $buildFolder; then echo "Downloading KoreBuild from $koreBuildZip" tempFolder="/tmp/KoreBuild-$(uuidgen)" mkdir $tempFolder localZipFile="$tempFolder/korebuild.zip" retries=6 until (wget -O $localZipFile $koreBuildZip 2>/dev/null || curl -o $localZipFile --location $koreBuildZip 2>/dev/null) do echo "Failed to download '$koreBuildZip'" if [ "$retries" -le 0 ]; then exit 1 fi retries=$((retries - 1)) echo "Waiting 10 seconds before retrying. Retries left: $retries" sleep 10s done unzip -q -d $tempFolder $localZipFile mkdir $buildFolder cp -r $tempFolder/**/build/** $buildFolder chmod +x $buildFile # Cleanup if test -d $tempFolder; then rm -rf $tempFolder fi fi $buildFile -r $repoFolder "$@"
Revert "Invoke gradle with daemon flag."
#!/bin/bash export JAVA_HOME=$(/usr/libexec/java_home -v1.7) export GVM_SERVICE="http://localhost:8080" export GVM_DIR="/tmp/gvm" ./gradlew --daemon -i clean test
#!/bin/bash export JAVA_HOME=$(/usr/libexec/java_home -v1.7) export GVM_SERVICE="http://localhost:8080" export GVM_DIR="/tmp/gvm" ./gradlew -i clean test
Remove destination support from checkout script
#! /bin/bash # If any command here fails, exit the script set -e DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" source $DIR/config.sh # Validate branch name # TODO: support just pieces of branch name if [[ $1 == "" ]] then echo "Usage: $0 branch [destination]" exit 1 fi # Check if destination was given as option if [[ $2 == "local" ]] then IPA_DIR=~/dev/freeipa fi pushd $IPA_DIR # Checkout to the given branch echo "Checking out ot the branch $1" git checkout $1 popd
#! /bin/bash # If any command here fails, exit the script set -e DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" source $DIR/config.sh # Validate branch name # TODO: support just pieces of branch name if [[ $1 == "" ]] then echo "Usage: $0 branch " exit 1 fi pushd $IPA_DIR # Checkout to the given branch echo "Checking out ot the branch $1" git checkout $1 popd
Update the call to generate_rvs.py.
# Check the types in everything. pytype cabby # RVS bazel build cabby/rvs:* bazel query cabby/rvs:* | xargs bazel test bazel-bin/cabby/rvs/generate_rvs --ref_poi "Empire State Building" --goal_poi "pharmacy" # Geo bazel-bin/cabby/geo/geo_computation --orig_lat 40.749102 --orig_lon -73.984076 -dest_lat 40.748432 --dest_lon -73.982473 bazel-bin/cabby/geo/map_processing/map_processor --region Pittsburgh --level 18 bazel-bin/cabby/geo/map_processing/map_processor --region Manhattan --level 18 # Wikidata bazel test cabby/data/wikidata:query_test bazel build cabby/data/wikidata/extract_geofenced_wikidata_items bazel-bin/cabby/data/wikidata/extract_geofenced_wikidata_items --region Pittsburgh
# Check the types in everything. pytype cabby # RVS bazel build cabby/rvs:* bazel query cabby/rvs:* | xargs bazel test bazel-bin/cabby/rvs/generate_rvs --start_lat 40.753628 --start_lon -73.985085 --goal_lat 40.748432 --goal_lon -73.98247 # Geo bazel-bin/cabby/geo/geo_computation --orig_lat 40.749102 --orig_lon -73.984076 -dest_lat 40.748432 --dest_lon -73.982473 bazel-bin/cabby/geo/map_processing/map_processor --region Pittsburgh --level 18 bazel-bin/cabby/geo/map_processing/map_processor --region Manhattan --level 18 # Wikidata bazel test cabby/data/wikidata:query_test bazel build cabby/data/wikidata/extract_geofenced_wikidata_items bazel-bin/cabby/data/wikidata/extract_geofenced_wikidata_items --region Pittsburgh
Return 0 instead of exitting, makes multiple asserts work
#!/bin/sh # vim: set ts=4 : STEVE_OUTPUT=steve-output function AssertEqual { if [ -z "$1" ] || [ -z "$2" ] then echo "AssertEqual expects two arguments" > /dev/stderr exit 1 fi if [ "$1" != "$2" ] then TestFailure "Expected \"$1\" to be \"$2\"" exit 1 fi exit 0 } function OutputContains { if [ -z "$1" ] then echo "OutputContains expects message parameter" > /dev/stderr exit 1 fi grep "$1" steve-output > /dev/null if [ $? -ne 0 ] then TestFailure "Expected \"$1\" to be present in output but it wasn't found" exit 1 fi exit 0 } function TestFailure { if [ -z "$1" ] then echo "TestFailure expects message parameter" > /dev/stderr exit 1 fi echo "$1" > /dev/stderr }
#!/bin/sh # vim: set ts=4 : STEVE_OUTPUT=steve-output function AssertEqual { if [ -z "$1" ] || [ -z "$2" ] then echo "AssertEqual expects two arguments" > /dev/stderr exit 1 fi if [ "$1" != "$2" ] then TestFailure "Expected \"$1\" to be \"$2\"" exit 1 fi return 0 } function OutputContains { if [ -z "$1" ] then echo "OutputContains expects message parameter" > /dev/stderr exit 1 fi grep "$1" steve-output > /dev/null if [ $? -ne 0 ] then TestFailure "Expected \"$1\" to be present in output but it wasn't found" exit 1 fi return 0 } function TestFailure { if [ -z "$1" ] then echo "TestFailure expects message parameter" > /dev/stderr exit 1 fi echo "$1" > /dev/stderr }
Copy phar instead of moving
#!/bin/bash composer --no-dev update && box build && mv ./build/stati.phar ./build/stati && cd ../stati-paginate-plugin/ && box build && cd ../stati/ && cd ../stati-related-plugin/ && box build && cd ../stati/ && cd ../stati-categories-plugin/ && box build && cd ../stati/ && cp ../stati-paginate-plugin/build/paginate.phar ./build/ && cp ../stati-related-plugin/build/related.phar ./build/ && cp ../stati-categories-plugin/build/categories.phar build/ && cd ./build/ && zip stati.zip *.phar stati && cd ../ && composer update
#!/bin/bash composer --no-dev update && box build && cp ./build/stati.phar ./build/stati && cd ../stati-paginate-plugin/ && box build && cd ../stati/ && cd ../stati-related-plugin/ && box build && cd ../stati/ && cd ../stati-categories-plugin/ && box build && cd ../stati/ && cp ../stati-paginate-plugin/build/paginate.phar ./build/ && cp ../stati-related-plugin/build/related.phar ./build/ && cp ../stati-categories-plugin/build/categories.phar build/ && cd ./build/ && zip stati.zip *.phar stati && cd ../ && composer update
Adjust test script a bit
#!/bin/sh gst-launch --gst-plugin-path=`pwd`/src/.libs v4l2src ! video/x-raw-rgb,width=320, height=240 ! dmtx ! ffmpegcolorspace ! xvimagesink
#!/bin/sh gst-launch --gst-plugin-path=`pwd`/src/.libs \ v4l2src ! video/x-raw-rgb,width=320, height=240,fps=10 ! \ dmtx scale=2 ! \ queue ! ffmpegcolorspace ! xvimagesink
Add git for fuseki container
# # Fuseki container # docker kill /fuseki docker rm /fuseki docker build -t 0xffea/saucy-server-fuseki - <<EOL FROM 0xffea/saucy-server-existdb-amd64 MAINTAINER David Höppner <0xffea@gmail.com> EXPOSE 3030 ADD https://raw.github.com/beijingren/dedalus-infrastructure/master/scripts/start-fuseki.sh /root/start-fuseki.sh RUN chmod 0755 /root/start-fuseki.sh CMD ["/root/start-fuseki.sh"] EOL docker run -d -name fuseki -p 3030:3030 -v /var/lib/volume1:/docker/volume1:rw -t 0xffea/saucy-server-fuseki
# # Fuseki container # docker kill /fuseki docker rm /fuseki docker build -t 0xffea/saucy-server-fuseki - <<EOL FROM 0xffea/saucy-server-existdb-amd64 MAINTAINER David Höppner <0xffea@gmail.com> RUN export DEBIAN_FRONTEND=noninteractive RUN apt-get -qy install \ git EXPOSE 3030 ADD https://raw.github.com/beijingren/dedalus-infrastructure/master/scripts/start-fuseki.sh /root/start-fuseki.sh RUN chmod 0755 /root/start-fuseki.sh CMD ["/root/start-fuseki.sh"] EOL docker run -d -name fuseki -p 3030:3030 -v /var/lib/volume1:/docker/volume1:rw -t 0xffea/saucy-server-fuseki
Fix little bit typo error
#!/bin/sh curpath=`dirname $0` nohup ${curpath}/run_agent.sh $1 >> /${TMPDIR:=/tmp}/ngrinder.log & 2>&1
#!/bin/sh curpath=`dirname $0` nohup ${curpath}/run_agent.sh $1 >> ${TMPDIR:=/tmp}/ngrinder.log & 2>&1
Use deadline scheduler for disks on Postgres master
#!/bin/sh umount /mnt yes | mdadm --create /dev/md0 --raid-devices=2 --level=1 /dev/xvdb /dev/xvdc blockdev --setra 4096 /dev/xvdb blockdev --setra 4096 /dev/xvdc blockdev --setra 4096 /dev/md0 mkfs.xfs /dev/md0 echo "/dev/md0 /srv xfs noatime,barrier 1 1" >> /etc/fstab mount /srv pg_ctlcluster 9.1 main stop mv /var/lib/postgresql /srv ln -s /srv/postgresql/ /var/lib pg_ctlcluster 9.1 main start
#!/bin/sh umount /mnt yes | mdadm --create /dev/md0 --raid-devices=2 --level=1 /dev/xvdb /dev/xvdc blockdev --setra 4096 /dev/xvdb blockdev --setra 4096 /dev/xvdc blockdev --setra 4096 /dev/md0 echo deadline > /sys/block/xvdb/queue/scheduler echo deadline > /sys/block/xvdc/queue/scheduler mkfs.xfs /dev/md0 echo "/dev/md0 /srv xfs noatime,barrier 1 1" >> /etc/fstab mount /srv pg_ctlcluster 9.1 main stop mv /var/lib/postgresql /srv ln -s /srv/postgresql/ /var/lib pg_ctlcluster 9.1 main start
Add beets music library manager
#!/bin/sh # # Description: installs python and python packages if [ ! -x /usr/local/bin/brew ]; then echo "ERROR: Homebrew must be installed to run the python.sh installer script" exit 1 fi if [ ! -x /usr/local/bin/python ]; then echo "Installing python..." brew install python --framework --with-brewed-openssl fi echo "Current python: `which python`" echo "Installing python packages..." pip2 install --user neovim if [ -x $CONFIGS_DIR/python_local.sh ]; then $CONFIGS_DIR/python_local.sh fi exit 0
#!/bin/sh # # Description: installs python and python packages if [ ! -x /usr/local/bin/brew ]; then echo "ERROR: Homebrew must be installed to run the python.sh installer script" exit 1 fi if [ ! -x /usr/local/bin/python ]; then echo "Installing python..." brew install python --framework --with-brewed-openssl fi echo "Current python: `which python`" echo "Installing python packages..." pip2 install --user neovim pip install beets if [ -x $CONFIGS_DIR/python_local.sh ]; then $CONFIGS_DIR/python_local.sh fi exit 0
Install Bazel in the Kokoro build script.
#!/bin/bash # Fail on any error. set -e # Display commands being run. # WARNING: please only enable 'set -x' if necessary for debugging, and be very # careful if you handle credentials (e.g. from Keystore) with 'set -x': # statements like "export VAR=$(cat /tmp/keystore/credentials)" will result in # the credentials being printed in build logs. # Additionally, recursive invocation with credentials as command-line # parameters, will print the full command, with credentials, in the build logs. # set -x # Code under repo is checked out to ${KOKORO_ARTIFACTS_DIR}/git. # The final directory name in this path is determined by the scm name specified # in the job configuration. cd "${KOKORO_ARTIFACTS_DIR}/git/localtoast" bazel build ...
#!/bin/bash # Fail on any error. set -e # Install Bazel. use_bazel.sh 4.2.1 command -v bazel bazel version # Code under repo is checked out to ${KOKORO_ARTIFACTS_DIR}/git. # The final directory name in this path is determined by the scm name specified # in the job configuration. cd "${KOKORO_ARTIFACTS_DIR}/git/localtoast" bazel build ...
Make user:group the owner of /etc/appname/*
#!/usr/bin/env bash set -e APP_NAME="<%= name %>" APP_USER="<%= user %>" APP_GROUP="<%= group %>" HOME="<%= home %>" HOME_LOGS="${HOME}/log" LOGS="/var/log/${APP_NAME}" chown -R ${APP_USER}.${APP_GROUP} ${HOME} # link app log directory to /var/log/NAME rm -rf ${HOME_LOGS} ln -fs ${LOGS} ${HOME_LOGS} chown -R ${APP_USER}.${APP_GROUP} ${LOGS} # Add default conf.d file [ -f /etc/${APP_NAME}/conf.d/other ] || cat > /etc/${APP_NAME}/conf.d/other <<CONF # This file contains variables set via \`${APP_NAME} config:set\` # Database URL. E.g. : mysql2://root:pass@127.0.0.1/my-app-db export DATABASE_URL=db_adapter://db_user:db_password@db_host/db_name export PORT=\${PORT:=6000} CONF chmod -R 0600 /etc/${APP_NAME}/conf.d
#!/usr/bin/env bash set -e APP_NAME="<%= name %>" APP_USER="<%= user %>" APP_GROUP="<%= group %>" APP_HOME="<%= home %>" HOME_LOGS="${APP_HOME}/log" LOGS="/var/log/${APP_NAME}" chown -R ${APP_USER}.${APP_GROUP} ${APP_HOME} # link app log directory to /var/log/NAME rm -rf ${HOME_LOGS} ln -fs ${LOGS} ${HOME_LOGS} chown -R ${APP_USER}.${APP_GROUP} ${LOGS} # Add default conf.d file [ -f /etc/${APP_NAME}/conf.d/other ] || cat > /etc/${APP_NAME}/conf.d/other <<CONF # This file contains variables set via \`${APP_NAME} config:set\` # Database URL. E.g. : mysql2://root:pass@127.0.0.1/my-app-db export DATABASE_URL=db_adapter://db_user:db_password@db_host/db_name export PORT=\${PORT:=6000} CONF chown -R ${APP_USER}.${APP_GROUP} /etc/${APP_NAME} chmod -R 0600 /etc/${APP_NAME}
Clean up and fix +1/-0 syntax to work as expected
## # dircycle plugin: enables cycling through the directory # stack using Ctrl+Shift+Left/Right eval "insert-cycledleft () { zle push-line; LBUFFER='pushd -q +1'; zle accept-line }" zle -N insert-cycledleft bindkey "\e[1;6D" insert-cycledleft eval "insert-cycledright () { zle push-line; LBUFFER='pushd -q +0'; zle accept-line }" zle -N insert-cycledright bindkey "\e[1;6C" insert-cycledright
# enables cycling through the directory stack using # Ctrl+Shift+Left/Right # # left/right direction follows the order in which directories # were visited, like left/right arrows do in a browser # NO_PUSHD_MINUS syntax: # pushd +N: start counting from left of `dirs' output # pushd -N: start counting from right of `dirs' output setopt nopushdminus insert-cycledleft () { zle push-line LBUFFER='pushd -q +1' zle accept-line } zle -N insert-cycledleft insert-cycledright () { zle push-line LBUFFER='pushd -q -0' zle accept-line } zle -N insert-cycledright bindkey "\e[1;6D" insert-cycledleft bindkey "\e[1;6C" insert-cycledright
Remove -e: it doesn't work with some versions of bash.
#!/usr/bin/env bash -e # Runs a CI build for castanet. Assumes: # # 1) RVM is installed in the build user's home directory and can be # activated by sourcing ~/.rvm/scripts/rvm. # # 2) rvm_install_on_use_flag and rvm_create_on_use_flag are set. # # 3) The Ruby environment to use is provided as the first argument. if [ -z $1 ]; then echo "Ruby environment not given; aborting" exit 1 fi . ~/.rvm/scripts/rvm set +e rvm use $1@castanet gem list -i rake if [ $? -ne 0 ]; then gem install rake --no-rdoc --no-ri fi set -e rake -f init.rakefile bundle exec rake udaeta:install_dependencies --trace bundle exec rake ci --trace
#!/usr/bin/env bash # Runs a CI build for castanet. Assumes: # # 1) RVM is installed in the build user's home directory and can be # activated by sourcing ~/.rvm/scripts/rvm. # # 2) rvm_install_on_use_flag and rvm_create_on_use_flag are set. # # 3) The Ruby environment to use is provided as the first argument. if [ -z $1 ]; then echo "Ruby environment not given; aborting" exit 1 fi . ~/.rvm/scripts/rvm set +e rvm use $1@castanet gem list -i rake if [ $? -ne 0 ]; then gem install rake --no-rdoc --no-ri fi set -e rake -f init.rakefile bundle exec rake udaeta:install_dependencies --trace bundle exec rake ci --trace
Document source of repos for deps
#!/bin/sh (cd ../../ekmett/bifunctors && cabal clean && cabal install --allow-newer) # (cd ../../ekmett/comonad && cabal clean && cabal install --allow-newer) (cd ../../ekmett/free && cabal clean && cabal install --allow-newer) (cd ../../alanz/HUnit && cabal clean && cabal install --allow-newer) cabal install --allow-newer -f-semigroups contravariant # cabal clean && cabal install --dependencies-only --allow-newer cabal clean && cabal install --enable-tests --dependencies-only --allow-newer # cabal clean && cabal configure --enable-tests --allow-newer
#!/bin/sh # https://github.com/ekmett/bifunctors.git master (cd ../../ekmett/bifunctors && cabal clean && cabal install --allow-newer) # https://github.com/ekmett/free.git master (cd ../../ekmett/free && cabal clean && cabal install --allow-newer) # https://github.com/alanz/HUnit.git ghc-head (cd ../../alanz/HUnit && cabal clean && cabal install --allow-newer) cabal install --allow-newer -f-semigroups contravariant # cabal clean && cabal install --dependencies-only --allow-newer cabal clean && cabal install --enable-tests --dependencies-only --allow-newer # cabal clean && cabal configure --enable-tests --allow-newer
Allow the old version being given as parameter
#!/bin/sh OLD_VERSION=$(git describe --abbrev=0 --tags | tr -d 'v\n') VERSION="$1" if [ -z "$VERSION" ]; then exit 1 fi README="README.md" echo "Replace $OLD_VERSION with $VERSION in $README" sed -i -e "s/$OLD_VERSION/$VERSION/g" "$README" git add "$README" git commit -a -m "Setting version to $VERSION" git tag -a -s "v$VERSION" -m "Releasing $VERSION" LATEST_VERSION_SBT="latestVersion.sbt" echo "Update $LATEST_VERSION_SBT" LATEST_VERSION_DEF="latestVersion in ThisBuild :=" sed -i -e "s/$LATEST_VERSION_DEF \"$OLD_VERSION\"/$LATEST_VERSION_DEF \"$VERSION\"/" \ "$LATEST_VERSION_SBT" sed -i -e "s/\/\/ NEXT_VERSION/, \"$VERSION\"\n \/\/ NEXT_VERSION/" "$LATEST_VERSION_SBT" sbt scalafmtSbt git add "$LATEST_VERSION_SBT" git commit -a -m "Set $VERSION as latestVersion" #git push #git push --tags
#!/bin/sh OLD_VERSION=$(git describe --abbrev=0 --tags | tr -d 'v\n') if [ -n "$2" ]; then OLD_VERSION="$2" fi VERSION="$1" if [ -z "$VERSION" ]; then exit 1 fi README="README.md" echo "Replace $OLD_VERSION with $VERSION in $README" sed -i -e "s/$OLD_VERSION/$VERSION/g" "$README" git add "$README" git commit -a -m "Setting version to $VERSION" git tag -a -s "v$VERSION" -m "Releasing $VERSION" LATEST_VERSION_SBT="latestVersion.sbt" echo "Update $LATEST_VERSION_SBT" LATEST_VERSION_DEF="latestVersion in ThisBuild :=" sed -i -e "s/$LATEST_VERSION_DEF \"$OLD_VERSION\"/$LATEST_VERSION_DEF \"$VERSION\"/" \ "$LATEST_VERSION_SBT" sed -i -e "s/\/\/ NEXT_VERSION/, \"$VERSION\"\n \/\/ NEXT_VERSION/" "$LATEST_VERSION_SBT" sbt scalafmtSbt git add "$LATEST_VERSION_SBT" git commit -a -m "Set $VERSION as latestVersion" #git push #git push --tags
Test for old school CSV download.
#!/usr/bin/env bash set -e set -x AUTH="$1" curl -i -H "Accept: application/json" -H "content-type: application/json" -H "Authorization: $AUTH" --data "@event.json" -X POST http://misp.local/events curl -H "Authorization: $AUTH" -X GET http://misp.local/events/csv/download/1 | sed -e 's/^M//g' | cut -d, -f2 --complement | sort > 1.csv cat 1.csv cut -d, -f2 --complement event.csv | sort > compare.csv diff compare.csv 1.csv
#!/usr/bin/env bash set -e set -x AUTH="$1" curl -i -H "Accept: application/json" -H "content-type: application/json" -H "Authorization: $AUTH" --data "@event.json" -X POST http://misp.local/events curl -H "Authorization: $AUTH" -X GET http://misp.local/events/csv/download/1/ignore:1 | sed -e 's/^M//g' | cut -d, -f2 --complement | sort > 1.csv cat 1.csv cut -d, -f2 --complement event.csv | sort > compare.csv diff compare.csv 1.csv
Use --compressed in curl invocation since it's returning gzipped content randomly
#!/bin/bash set -e cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" versions=( "$@" ) if [ ${#versions[@]} -eq 0 ]; then versions=( */ ) fi versions=( "${versions[@]%/}" ) for version in "${versions[@]}"; do fullVersion="$(curl -sSL "http://cache.ruby-lang.org/pub/ruby/$version/" \ | grep -E '<a href="ruby-'"$version"'.[^"]+\.tar\.bz2' \ | grep -vE 'preview|rc' \ | sed -r 's!.*<a href="ruby-([^"]+)\.tar\.bz2.*!\1!' \ | sort -V | tail -1)" ( set -x sed -ri 's/^(ENV RUBY_MAJOR) .*/\1 '"$version"'/' "$version/Dockerfile" sed -ri 's/^(ENV RUBY_VERSION) .*/\1 '"$fullVersion"'/' "$version/Dockerfile" sed -ri 's/^(FROM ruby):.*/\1:'"$fullVersion"'/' "$version/"*"/Dockerfile" ) done
#!/bin/bash set -e cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" versions=( "$@" ) if [ ${#versions[@]} -eq 0 ]; then versions=( */ ) fi versions=( "${versions[@]%/}" ) for version in "${versions[@]}"; do fullVersion="$(curl -sSL --compressed "http://cache.ruby-lang.org/pub/ruby/$version/" \ | grep -E '<a href="ruby-'"$version"'.[^"]+\.tar\.bz2' \ | grep -vE 'preview|rc' \ | sed -r 's!.*<a href="ruby-([^"]+)\.tar\.bz2.*!\1!' \ | sort -V | tail -1)" ( set -x sed -ri 's/^(ENV RUBY_MAJOR) .*/\1 '"$version"'/' "$version/Dockerfile" sed -ri 's/^(ENV RUBY_VERSION) .*/\1 '"$fullVersion"'/' "$version/Dockerfile" sed -ri 's/^(FROM ruby):.*/\1:'"$fullVersion"'/' "$version/"*"/Dockerfile" ) done
Add remote pruning to gitcleanup
alias git=hub alias gitoneline="git log --graph --decorate --pretty=format:\"%C(auto)%h%d %Cblue%an%Creset: %C(auto)%s\"" alias gitonelineall="git log --graph --decorate --all --pretty=format:\"%C(auto)%h%d %Cblue%an%Creset: %C(auto)%s\"" alias gitpretty="git log --graph --decorate --name-status" alias gitprettyall="git log --graph --decorate --name-status --all" alias gitreset="git reset HEAD\^" # convenience function to go back one commit alias gitpush="git push origin HEAD" function gitmergecommit() { git log $1..HEAD --ancestry-path --merges } function gitmerged() { git branch --merged $@ | sed -e '/^*/d' } function gitcleanup() { git branch -d $(gitmerged) } function gitshowsvn() { git show `git svn find-rev r$1` } function gitsvnrebase() { if [[ $1 != "-l" ]]; then git svn fetch; fi git for-each-ref --shell --format='git co %(refname:short); git svn rebase -l;' refs/heads | \ while read entry do eval "$entry" done }
alias git=hub alias gitoneline="git log --graph --decorate --pretty=format:\"%C(auto)%h%d %Cblue%an%Creset: %C(auto)%s\"" alias gitonelineall="git log --graph --decorate --all --pretty=format:\"%C(auto)%h%d %Cblue%an%Creset: %C(auto)%s\"" alias gitpretty="git log --graph --decorate --name-status" alias gitprettyall="git log --graph --decorate --name-status --all" alias gitreset="git reset HEAD\^" # convenience function to go back one commit alias gitpush="git push origin HEAD" function gitmergecommit() { git log $1..HEAD --ancestry-path --merges } function gitmerged() { git branch --merged $@ | sed -e '/^*/d' } function gitcleanup() { git branch -d $(gitmerged); git remote prune origin } function gitshowsvn() { git show `git svn find-rev r$1` } function gitsvnrebase() { if [[ $1 != "-l" ]]; then git svn fetch; fi git for-each-ref --shell --format='git co %(refname:short); git svn rebase -l;' refs/heads | \ while read entry do eval "$entry" done }
Build f/ws too if they're present
#!/usr/bin/env bash cd ag-grid #./node_modules/.bin/gulp stylus #./node_modules/.bin/webpack gulp webpack cd ../ag-grid-enterprise #./node_modules/.bin/webpack gulp webpack cd ..
#!/usr/bin/env bash cd ag-grid gulp webpack cd ../ag-grid-enterprise gulp webpack if [ -d "../ag-grid-angular" ]; then cd ../ag-grid-angular npm run clean-build fi if [ -d "../ag-grid-react" ]; then cd ../ag-grid-react gulp fi if [ -d "../ag-grid-vue" ]; then cd ../ag-grid-vue gulp fi if [ -d "../ag-grid-aurelia" ]; then cd ../ag-grid-aurelia npm run build fi
Fix toxiproxy server download script
#!/bin/bash -e VERSION='v2.0.0rc2' TOXIPROXY_LOG_DIR=${CIRCLE_ARTIFACTS:-'/tmp'} echo "[start toxiproxy]" curl --silent https://github.com/Shopify/toxiproxy/releases/download/$VERSION/toxiproxy-server-linux-amd64 -o ./bin/toxiproxy-server chmod +x ./bin/toxiproxy-server nohup bash -c "./bin/toxiproxy-server > ${TOXIPROXY_LOG_DIR}/toxiproxy.log 2>&1 &"
#!/bin/bash -e VERSION='v2.0.0rc2' TOXIPROXY_LOG_DIR=${CIRCLE_ARTIFACTS:-'/tmp'} echo "[start toxiproxy]" curl --silent -L https://github.com/Shopify/toxiproxy/releases/download/$VERSION/toxiproxy-server-linux-amd64 -o ./bin/toxiproxy-server chmod +x ./bin/toxiproxy-server nohup bash -c "./bin/toxiproxy-server > ${TOXIPROXY_LOG_DIR}/toxiproxy.log 2>&1 &"
Speed up npm package list check considerably
#!/usr/bin/env bash # # Node package manager (NPM) set -e # Packages to install globally PACKAGES=( "autoprefixer" "bower" "csscomb" "grunt" "gulp" "http-server" "live-server" "nodemon" "nvm" "localtunnel" ) echo "" echo " Running Node install script" # Check for npm if command -v npm >/dev/null 2>&1 ; then echo " Looks like npm is installed. Checking for packages to install." echo "" # Install npm packages globally for PACKAGE in ${PACKAGES[@]} ; do if ! npm -g list | grep -q "${PACKAGE}" ; then npm install -g $PACKAGE else echo " * ${PACKAGE} already installed." fi done fi exit 0
#!/usr/bin/env bash # # Node package manager (NPM) set -e # Cache already installed global packages INSTALLED_PACKAGES=( $(find `npm root -g` -type d -maxdepth 1 -not -path '*/\.*' -print0 | while IFS= read -r -d '' dirname; do echo ${dirname##*/}; done) ) # Packages to install globally PACKAGES=( "autoprefixer" "bower" "csscomb" "grunt" "gulp" "http-server" "live-server" "localtunnel" "nodemon" "nvm" ) echo "" echo " Running Node install script" # Check for npm if command -v npm >/dev/null 2>&1 ; then echo " Looks like npm is installed. Checking for packages to install." echo "" # Install npm packages globally for PACKAGE in ${PACKAGES[@]} ; do if ! echo ${INSTALLED_PACKAGES[@]} | grep -q "${PACKAGE}" ; then npm install -g $PACKAGE else echo " * ${PACKAGE} already installed." fi done fi exit 0
Use vars for SID in fixpacks script
sidadm=poqadm db2sid=db2poq DB2INSTALLER=/tmp/DB2_LUW_10.5_FP5_RDBMS_LINUX_/LINUXX86_64 SID=POQ set -e su - $sidadm stopsap all exit tar -jcvf /backup/$db2sid.tar.bz2 /db2/$db2sid su - $db2sid $DB2DIR/bin/ipclean $DB2DIR/das/bin/db2admin stop db2ls exit cd $DB2INSTALLER/ESE/disk1/ # responda DB2DIR ; no ./installFixPack -f db2lib /db2/$db2sid/db2_software/instance/db2iupdt $db2sid cd $DB2INSTALLER/ chmod a+rwx -R . su - $db2sid startdb exit su - db2poq -c "cd $DB2INSTALLER ; ./db6_update_db.sh -d $SID" su - db2poq -c "cd $DB2INSTALLER ; db2 -z db6_update_db_out.log -tvf db6_update_db_out.sql ; " su - $sidadm startsap all exit
sidadm=ecqadm db2sid=db2ecq DB2INSTALLER=/tmp/DB2_LUW_10.5_FP5_RDBMS_LINUX_/LINUXX86_64 SID=ECQ # rsync -avz /sapmedia/IBM_DB2/DB2_LUW_10.5_FP5_RDBMS_LINUX_ /tmp/ set -e su - $sidadm stopsap all exit tar -jcvf /backup/$db2sid.tar.bz2 /db2/$db2sid su - $db2sid $DB2DIR/bin/ipclean $DB2DIR/das/bin/db2admin stop db2ls exit cd $DB2INSTALLER/ESE/disk1/ # responda DB2DIR ; no ./installFixPack -f db2lib /db2/$db2sid/db2_software/instance/db2iupdt $db2sid cd $DB2INSTALLER/ chmod a+rwx -R . su - $db2sid startdb exit su - $db2sid -c "cd $DB2INSTALLER ; ./db6_update_db.sh -d $SID" su - $db2sid -c "cd $DB2INSTALLER ; db2 -z db6_update_db_out.log -tvf db6_update_db_out.sql ; " su - $sidadm startsap all exit
Revert "ci: docker -> podman"
#!/bin/bash set -e TAG="registry.gitlab.gnome.org/gnome/glib-networking/master:v5" cd "$(dirname "$0")" podman build --build-arg HOST_USER_ID="$UID" --tag "${TAG}" . if [ "$1" = "--push" ]; then podman login registry.gitlab.gnome.org podman push $TAG else podman run --rm \ --volume "$(pwd)/..:/home/user/app" --workdir "/home/user/app" \ --tty --interactive "${TAG}" bash fi
#!/bin/bash set -e TAG="registry.gitlab.gnome.org/gnome/glib-networking/master:v4" cd "$(dirname "$0")" sudo docker build --build-arg HOST_USER_ID="$UID" --tag "${TAG}" --file "Dockerfile" . if [ "$1" = "--push" ]; then sudo docker login registry.gitlab.gnome.org sudo docker push $TAG else sudo docker run --rm \ --volume "$(pwd)/..:/home/user/app" --workdir "/home/user/app" \ --tty --interactive "${TAG}" bash fi
Remove subfolders in compressed tarball
#!/bin/bash TAR_EXTENSION=.tar.gz # Check architecture and set variables if [[ ! $check_and_set ]]; then . 0-check-and-set.sh $1 fi tar cfz $build_dir/$rootfs_dir$TAR_EXTENSION $build_dir/$rootfs_dir echo echo "$build_dir/$rootfs_dir$TAR_EXTENSION created"
#!/bin/bash TAR_EXTENSION=.tar.gz # Check architecture and set variables if [[ ! $check_and_set ]]; then . 0-check-and-set.sh $1 fi cd $build_dir tar cfz $rootfs_dir$TAR_EXTENSION $rootfs_dir cd - >/dev/null echo echo "$build_dir/$rootfs_dir$TAR_EXTENSION created"
Revert sudo for centos7 dir
#!/bin/bash -e # ___ ___ _ _ ___ ___ _ _____ ___ ___ # / __| __| \| | __| _ \ /_\_ _| __| \ # | (_ | _|| .` | _|| / / _ \| | | _|| |) | # \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____ # | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _| # | |) | (_) | | .` | (_) || | | _|| |) | | | | # |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_| # echo -n "Running oso-centos7-saml-sso... " docker run -ti --net=host --rm=true --name saml-sso oso-centos7-saml-sso $@ echo "Done."
#!/bin/bash -e # ___ ___ _ _ ___ ___ _ _____ ___ ___ # / __| __| \| | __| _ \ /_\_ _| __| \ # | (_ | _|| .` | _|| / / _ \| | | _|| |) | # \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____ # | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _| # | |) | (_) | | .` | (_) || | | _|| |) | | | | # |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_| # echo -n "Running oso-centos7-saml-sso... " sudo docker run -ti --net=host --rm=true --name saml-sso oso-centos7-saml-sso $@ echo "Done."
Allow test db to be created
#!/bin/bash set -euo pipefail sudo su postgres <<'EOF' psql -c 'DROP SCHEMA public CASCADE;' stagecraft psql -c 'DROP DATABASE stagecraft;' /usr/lib/postgresql/9.3/bin/createdb --port='5432' --owner='postgres' --template=template0 --encoding 'SQL_ASCII' --locale=en_GB.UTF-8 'stagecraft' psql -c 'CREATE SCHEMA public;' stagecraft psql -c 'ALTER SCHEMA public OWNER TO stagecraft;' stagecraft psql -f '$1' stagecraft psql -c "ALTER USER stagecraft WITH PASSWORD 'securem8'" EOF # psql -c 'DROP FUNCTION streaming_slave_check();' # pg_dump --encoding=SQL_ASCII --oids --clean stagecraft -f '/tmp/stagecraft.utf8.sql' # psql -c 'drop database stagecraft;' # /usr/lib/postgresql/9.3/bin/createdb --port='5432' --owner='postgres' --template=template0 --encoding 'SQL_ASCII' --locale=en_GB.UTF-8 'stagecraft' # psql -f '/tmp/stagecraft.utf8.sql' stagecraft # psql -c "ALTER USER stagecraft WITH PASSWORD 'securem8'"
#!/bin/bash set -euo pipefail sudo su postgres <<'EOF' psql -c 'DROP SCHEMA public CASCADE;' stagecraft psql -c 'DROP DATABASE stagecraft;' /usr/lib/postgresql/9.3/bin/createdb --port='5432' --owner='postgres' --template=template0 --encoding 'SQL_ASCII' --locale=en_GB.UTF-8 'stagecraft' psql -c 'CREATE SCHEMA public;' stagecraft psql -c 'ALTER SCHEMA public OWNER TO stagecraft;' stagecraft psql -f '$1' stagecraft psql -c "ALTER USER stagecraft WITH PASSWORD 'securem8'" psql -c "ALTER USER stagecraft WITH CREATEDB" EOF # psql -c 'DROP FUNCTION streaming_slave_check();' # pg_dump --encoding=SQL_ASCII --oids --clean stagecraft -f '/tmp/stagecraft.utf8.sql' # psql -c 'drop database stagecraft;' # /usr/lib/postgresql/9.3/bin/createdb --port='5432' --owner='postgres' --template=template0 --encoding 'SQL_ASCII' --locale=en_GB.UTF-8 'stagecraft' # psql -f '/tmp/stagecraft.utf8.sql' stagecraft # psql -c "ALTER USER stagecraft WITH PASSWORD 'securem8'"
Move networkd setup to mesos unit
#!/bin/bash set -e FAULT_DOMAIN_SCRIPT=/opt/mesosphere/bin/detect_fault_domain if [ -x $FAULT_DOMAIN_SCRIPT ]; then export MESOS_DOMAIN="$($FAULT_DOMAIN_SCRIPT)" fi exec "$@"
#!/bin/bash set -e FAULT_DOMAIN_SCRIPT=/opt/mesosphere/bin/detect_fault_domain if [ -x $FAULT_DOMAIN_SCRIPT ]; then export MESOS_DOMAIN="$($FAULT_DOMAIN_SCRIPT)" fi function coreos_networkd_config() { network_config="/etc/systemd/network/dcos.network" sudo tee $network_config > /dev/null<<'EOF' [Match] Type=bridge Name=docker* m-* d-* vtep* [Link] Unmanaged=yes EOF } distro="$(source /etc/os-release && echo "${ID}")" if [[ "${distro}" == 'coreos' ]]; then if systemctl list-unit-files | grep systemd-networkd.service > /dev/null; then coreos_networkd_config if systemctl is-enabled systemd-networkd > /dev/null; then sudo systemctl restart systemd-networkd fi fi fi exec "$@"
Copy local properties to RS
# hack to get latest ResearchStack code since artficts aren't being published if [[ $TRAVIS_BRANCH != 'master' ]]; then git clone -b develop https://github.com/ResearchStack/ResearchStack.git pushd ResearchStack ./gradlew install popd fi
# hack to get latest ResearchStack code since artficts aren't being published if [[ $TRAVIS_BRANCH != 'master' ]]; then git clone -b develop https://github.com/ResearchStack/ResearchStack.git pushd ResearchStack cp ../local.properties . ./gradlew install popd fi
Remove --stderr switch from Travis test runner
#!/bin/bash travisdir=$(dirname $(readlink /proc/$$/fd/255)) testdir="$travisdir/../tests" testedcomponents=(`cat "$travisdir/tested-components"`) result=0 for tested in "${testedcomponents[@]}" do echo "$tested:" phpunit -c $testdir/phpunit.xml --stderr $testdir/$tested result=$(($result || $?)) done exit $result
#!/bin/bash travisdir=$(dirname $(readlink /proc/$$/fd/255)) testdir="$travisdir/../tests" testedcomponents=(`cat "$travisdir/tested-components"`) result=0 for tested in "${testedcomponents[@]}" do echo "$tested:" phpunit -c $testdir/phpunit.xml $testdir/$tested result=$(($result || $?)) done exit $result
Fix missing library issue on i386/armhf
#! /bin/bash if [[ $(uname -m) == "x86_64" ]] then wget --backups=0 "https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-ubuntu1604-3.2.7.tgz" tar -zxf ./mongodb-linux-x86_64-ubuntu1604-3.2.7.tgz --strip-components=1 else IFS=" " read -a links <<< $(apt-get -y --print-uris install mongodb | egrep -o "https?://[^']+") for link in ${links[@]} do wget --backups=0 ${link} done IFS=" " read -a deb_pkgs <<< $(ls ./ | egrep -o "mongo.+\.deb") for pkg in ${deb_pkgs[@]} do dpkg-deb -R ${pkg} ./ done fi
#! /bin/bash if [[ $(uname -m) == "x86_64" ]] then wget --backups=0 "https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-ubuntu1604-3.2.7.tgz" tar -zxf ./mongodb-linux-x86_64-ubuntu1604-3.2.7.tgz --strip-components=1 else IFS=" " read -a links <<< $(apt-get -y --print-uris install mongodb | egrep -o "https?://[^']+") for link in ${links[@]} do wget --backups=0 ${link} done IFS=" " read -a deb_pkgs <<< $(ls ./ | egrep "\.deb") for pkg in ${deb_pkgs[@]} do echo "Extracting ${pkg}..." dpkg-deb -R ${pkg} ./ done fi
Upgrade to Splash V2 Standards
echo Clone Prestashop # Clone Prestashop into Build Folder cd $TRAVIS_BUILD_DIR git clone --depth=50 --branch=$PS_VERSION https://github.com/Prestashop/Prestashop.git $TRAVIS_BUILD_DIR ls -al $TRAVIS_BUILD_DIR # PrestaShop configuration cp tests/parameters.yml.travis app/config/parameters.yml
echo Clone Prestashop # Clone Prestashop into Build Folder cd $TRAVIS_BUILD_DIR git clone --depth=50 --branch=$PS_VERSION https://github.com/Prestashop/Prestashop.git $TRAVIS_BUILD_DIR ls -al $TRAVIS_BUILD_DIR # PrestaShop configuration # cp tests/parameters.yml.travis app/config/parameters.yml cp app/config/parameters.yml.dist app/config/parameters.yml
Update theme in VS Code installation script
#!/bin/sh # Installs the Visual Studio Code (Flatpak) with favorite packages flatpak remote-add --if-not-exists flathub \ https://dl.flathub.org/repo/flathub.flatpakrepo && flatpak install flathub \ com.visualstudio.code.oss && flatpak run com.visualstudio.code.oss \ --install-extension ms-ceintl.vscode-language-pack-pt-br \ --install-extension pkief.material-icon-theme \ --install-extension equinusocio.vsc-material-theme \ --install-extension EditorConfig.EditorConfig \ --install-extension formulahendry.auto-close-tag \ --install-extension formulahendry.auto-rename-tag \ --install-extension dbaeumer.vscode-eslint \ --install-extension octref.vetur \ --install-extension ms-python.python \ --install-extension rust-lang.rust
#!/bin/sh # Installs the Visual Studio Code (Flatpak) with favorite packages flatpak remote-add --if-not-exists flathub \ https://dl.flathub.org/repo/flathub.flatpakrepo && flatpak install flathub \ com.visualstudio.code.oss && flatpak run com.visualstudio.code.oss \ --install-extension ms-ceintl.vscode-language-pack-pt-br \ --install-extension pkief.material-icon-theme \ --install-extension zhuangtongfa.material-theme \ --install-extension EditorConfig.EditorConfig \ --install-extension formulahendry.auto-close-tag \ --install-extension formulahendry.auto-rename-tag \ --install-extension dbaeumer.vscode-eslint \ --install-extension octref.vetur \ --install-extension ms-python.python \ --install-extension rust-lang.rust
Change the commit we're using for keystone.
#!/usr/bin/env sh set -ex pip install keystonemiddleware pip install python-keystoneclient keystone_commit=stable/mitaka ./ci/keystone/keystone.sh setup
#!/usr/bin/env sh set -ex pip install keystonemiddleware pip install python-keystoneclient # The exact commit we use here is somewhat arbitrary, but we want # something that (a) won't change out from under our feet, and (b) # works with our existing tests. keystone_commit=10.0.0.0b2 ./ci/keystone/keystone.sh setup
Use 127.0.0.1 instead of localhost
#! /bin/sh # # Copyright 2011 Couchbase, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # We don't want to run memory debugging on java ;) unset LD_PRELOAD unset MALLOC_DEBUG unset UMEM_DEBUG # This is a wrapper script to start the Couchbase Mock server. # We could have started it directly from the C code, but by using # a script it's a bit easier to test it manually ;) exec java \ -jar tests/CouchbaseMock.jar \ --nodes=10 \ --host=localhost \ --port=0 \ "$@"
#! /bin/sh # # Copyright 2011 Couchbase, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # We don't want to run memory debugging on java ;) unset LD_PRELOAD unset MALLOC_DEBUG unset UMEM_DEBUG # This is a wrapper script to start the Couchbase Mock server. # We could have started it directly from the C code, but by using # a script it's a bit easier to test it manually ;) exec java \ -jar tests/CouchbaseMock.jar \ --nodes=10 \ --host=127.0.0.1 \ --port=0 \ "$@"
Make start script for docker retain JAVA_OPTS
#!/bin/bash # if nobody manually set a host to list on then go with $HOSTNAME if [ -z "$MB_JETTY_HOST" ]; then export MB_JETTY_HOST=$HOSTNAME fi # Metabase Database Info - this is just about what db the Metabase application uses for internal storage # AWS Elastic Beanstalk w/ RDS if [ ! -z "$RDS_HOSTNAME" ]; then # EEK: this is a bit fragile. if user picks a non-standard port for their db we are screwed :( if [ "$MB_DB_PORT" == "3306" ]; then export MB_DB_TYPE=mysql else export MB_DB_TYPE=postgres fi export MB_DB_DBNAME=$RDS_DB_NAME export MB_DB_USER=$RDS_USERNAME export MB_DB_PASS=$RDS_PASSWORD export MB_DB_HOST=$RDS_HOSTNAME export MB_DB_PORT=$RDS_PORT fi # Setup Java Options JAVA_OPTS="-Dlogfile.path=target/log -XX:+CMSClassUnloadingEnabled -XX:+UseConcMarkSweepGC -server" if [ ! -z "$JAVA_TIMEZONE" ]; then JAVA_OPTS="${JAVA_OPTS} -Duser.timezone=${JAVA_TIMEZONE}" fi # Launch the application exec java $JAVA_OPTS -jar /app/metabase.jar
#!/bin/bash # if nobody manually set a host to list on then go with $HOSTNAME if [ -z "$MB_JETTY_HOST" ]; then export MB_JETTY_HOST=$HOSTNAME fi # Metabase Database Info - this is just about what db the Metabase application uses for internal storage # AWS Elastic Beanstalk w/ RDS if [ ! -z "$RDS_HOSTNAME" ]; then # EEK: this is a bit fragile. if user picks a non-standard port for their db we are screwed :( if [ "$MB_DB_PORT" == "3306" ]; then export MB_DB_TYPE=mysql else export MB_DB_TYPE=postgres fi export MB_DB_DBNAME=$RDS_DB_NAME export MB_DB_USER=$RDS_USERNAME export MB_DB_PASS=$RDS_PASSWORD export MB_DB_HOST=$RDS_HOSTNAME export MB_DB_PORT=$RDS_PORT fi # Setup Java Options JAVA_OPTS="${JAVA_OPTS} -Dlogfile.path=target/log -XX:+CMSClassUnloadingEnabled -XX:+UseConcMarkSweepGC -server" if [ ! -z "$JAVA_TIMEZONE" ]; then JAVA_OPTS="${JAVA_OPTS} -Duser.timezone=${JAVA_TIMEZONE}" fi # Launch the application exec java $JAVA_OPTS -jar /app/metabase.jar
Fix renamed interface name for splash, too
#!/bin/sh # Setup_splash, takes 1 argument: 1=net . /etc/functions.sh . $dir/functions.sh net=$1 handle_splash() { config_get network "$1" network if [ "$network" == "${netrenamed}dhcp" ]; then if [ "$cleanup" == 1 ]; then section_cleanup luci_splash.$1 else if [ -z "${1/cfg[0-9a-fA-F]*/}" ]; then section_rename luci_splash $1 ${netrenamed}dhcp fi fi fi } config_load luci_splash config_foreach handle_splash iface uci batch << EOF set luci_splash.${netrenamed}dhcp="iface" set luci_splash.${netrenamed}dhcp.network="${net}dhcp" set luci_splash.${netrenamed}dhcp.zone="freifunk" EOF echo " network: ${netrenamed}dhcp" uci commit
#!/bin/sh # Setup_splash, takes 1 argument: 1=net . /etc/functions.sh . $dir/functions.sh net=$1 handle_splash() { config_get network "$1" network if [ "$network" == "${netrenamed}dhcp" ]; then if [ "$cleanup" == 1 ]; then section_cleanup luci_splash.$1 else if [ -z "${1/cfg[0-9a-fA-F]*/}" ]; then section_rename luci_splash $1 ${netrenamed}dhcp fi fi fi } config_load luci_splash config_foreach handle_splash iface uci batch << EOF set luci_splash.${netrenamed}dhcp="iface" set luci_splash.${netrenamed}dhcp.network="${netrenamed}dhcp" set luci_splash.${netrenamed}dhcp.zone="freifunk" EOF echo " network: ${netrenamed}dhcp" uci commit
Remove useless download of JSON.sh since it is vendored in anyway.
_dl_cmd init "initialize $DOCKERLITE_ROOT and create the root (empty) image" _dl_init () { curl -s https://raw.github.com/dominictarr/JSON.sh/master/JSON.sh > JSON.sh chmod +x JSON.sh cd $DOCKERLITE_ROOT mkdir -p images mkdir -p containers mkdir -p networks EMPTY=empty$RANDOM ZEROHASH=$(_dl_zerohash) _dl_btrfs subvol create $EMPTY mkdir -p images/$ZEROHASH mkdir -p images/$ZEROHASH/metadata _dl_seti $ZEROHASH name 'empty image (base of all other images)' _dl_btrfs subvol snapshot -r $EMPTY images/$ZEROHASH/rootfs _dl_btrfs subvol delete $EMPTY } _dl_mkloop () { [ -d $DOCKERLITE_ROOT ] || mkdir -p $DOCKERLITE_ROOT mountpoint -q $DOCKERLITE_ROOT && return [ -f $DOCKERLITE_LOOPFILE ] || { dd if=/dev/zero of=$DOCKERLITE_LOOPFILE bs=1024k count=$DOCKERLITE_LOOPSIZE mkfs -t btrfs $DOCKERLITE_LOOPFILE } mount $DOCKERLITE_LOOPFILE $DOCKERLITE_ROOT }
_dl_cmd init "initialize $DOCKERLITE_ROOT and create the root (empty) image" _dl_init () { # If for some reason, we cannot "vendor in" JSON.sh, we could download # it here, when the image store is initialized. #curl -s https://raw.github.com/dominictarr/JSON.sh/master/JSON.sh > JSON.sh #chmod +x JSON.sh cd $DOCKERLITE_ROOT mkdir -p images mkdir -p containers mkdir -p networks EMPTY=empty$RANDOM ZEROHASH=$(_dl_zerohash) _dl_btrfs subvol create $EMPTY mkdir -p images/$ZEROHASH mkdir -p images/$ZEROHASH/metadata _dl_seti $ZEROHASH name 'empty image (base of all other images)' _dl_btrfs subvol snapshot -r $EMPTY images/$ZEROHASH/rootfs _dl_btrfs subvol delete $EMPTY } _dl_mkloop () { [ -d $DOCKERLITE_ROOT ] || mkdir -p $DOCKERLITE_ROOT mountpoint -q $DOCKERLITE_ROOT && return [ -f $DOCKERLITE_LOOPFILE ] || { dd if=/dev/zero of=$DOCKERLITE_LOOPFILE bs=1024k count=$DOCKERLITE_LOOPSIZE mkfs -t btrfs $DOCKERLITE_LOOPFILE } mount $DOCKERLITE_LOOPFILE $DOCKERLITE_ROOT }
Use utils to create glance service and endpoint
openstack user create --domain default --password $1 glance openstack role add --project service --user glance admin openstack service create --name glance --description "OpenStack Image service" image openstack endpoint create --region RegionOne image public http://$2:9292 openstack endpoint create --region RegionOne image internal http://$2:9292 openstack endpoint create --region RegionOne image admin http://$2:9292
#!/bin/sh source /opt/local/bin/os-utils.sh create_or_get_user glance $1 get_or_add_user_project_role admin glance service create_or_get_service glance image "OpenStack Image service" create_or_get_endpoint image public http://$2:9292 create_or_get_endpoint image internal http://$2:9292 create_or_get_endpoint image admin http://$2:9292
Use double brackets in generation test
#!/bin/bash set -eu echo "===> Generating API server..." cd _example ../bin/apig gen --all if [ ! $(git status | grep 'nothing to commit') ]; then echo " x Generator artifact and example application are different." git --no-pager diff exit 1 fi echo "===> Building API server..." go get ./... go build if [ $? -gt 0 ]; then echo " x Failed to build generated API server." exit 1 fi echo " o Generation test PASSED!"
#!/bin/bash set -eu echo "===> Generating API server..." cd _example ../bin/apig gen --all if [[ ! $(git status | grep 'nothing to commit') ]]; then echo " x Generator artifact and example application are different." git --no-pager diff exit 1 fi echo "===> Building API server..." go get ./... go build if [[ $? -gt 0 ]]; then echo " x Failed to build generated API server." exit 1 fi echo " o Generation test PASSED!" q
Add some comment about aws-sdk version
#!/bin/bash echo -n "* Executing apt-get update" sudo apt-get update echo -n "* Attempting to install puppet" sudo apt-get install -y --force-yes puppet echo " - Done" sudo /usr/bin/logger -t autobootstrap "installed puppet" echo "* installing gems" sudo gem install hiera-eyaml sudo gem install aws-sdk -v '~> 2.6.11' sudo gem install hiera-eyaml-kms echo " - Done" sudo /usr/bin/logger -t autobootstrap "installed puppet gems"
#!/bin/bash echo -n "* Executing apt-get update" sudo apt-get update echo -n "* Attempting to install puppet" sudo apt-get install -y --force-yes puppet echo " - Done" sudo /usr/bin/logger -t autobootstrap "installed puppet" echo "* installing gems" sudo gem install hiera-eyaml # aws-codedeploy-agent 1.0-1.1067 depends on aws-sdk-core ~2.6.11 and will ERROR on newer versions (~2.7). # We'll probably have to update again once they release the newest version with commit: # https://github.com/aws/aws-codedeploy-agent/commit/50db2ec9013cfe8f1a857de53c806d6c67d8d07b sudo gem install aws-sdk -v '~> 2.6.11' sudo gem install hiera-eyaml-kms echo " - Done" sudo /usr/bin/logger -t autobootstrap "installed puppet gems"
Check for adg.h existence in the proper path
#!/bin/sh # Run this to generate all the initial makefiles, etc. srcdir=`dirname $0` test -z "$srcdir" && srcdir=. if test ! -f $srcdir/configure.ac -o ! -f $srcdir/adg/adg.h; then echo "**Error**: '$srcdir' does not look like the top-level adg directory" exit 1 fi cd $srcdir glib-gettextize -f || exit $? # autoreconf interaction with libtool has been broken for ages: # explicitely calling libtoolize avoid some problems libtoolize --automake || exit $? autoreconf -is -Wall || exit $? ./configure "$@" && echo "Now type 'make' to compile $PROJECT."
#!/bin/sh # Run this to generate all the initial makefiles, etc. srcdir=`dirname $0` test -z "$srcdir" && srcdir=. if test ! -f $srcdir/configure.ac -o ! -f $srcdir/src/adg/adg.h; then echo "**Error**: '$srcdir' does not look like the top-level adg directory" exit 1 fi cd $srcdir glib-gettextize -f || exit $? # autoreconf interaction with libtool has been broken for ages: # explicitely calling libtoolize avoid some problems libtoolize --automake || exit $? autoreconf -is -Wall || exit $? ./configure "$@" && echo "Now type 'make' to compile $PROJECT."
Fix oshinko-rest version after merge
if [ -n "$OSHINKO_SERVER_TAG" ] then TAG="$OSHINKO_SERVER_TAG" elif [ -d .git ] then GIT_TAG=`git describe --tags --abbrev=0 2> /dev/null | head -n1` GIT_COMMIT=`git log -n1 --pretty=format:%h 2> /dev/null` TAG="${GIT_TAG}-${GIT_COMMIT}" else TAG="unknown" fi APP=oshinko-rest-server TAG_APPNAME_FLAGS="-X github.com/radanalyticsio/oshinko-cli/rest/version.gitTag=$TAG -X github.com/radanalyticsio/oshinko-cli/rest/version.appName=$APP"
if [ -n "$OSHINKO_SERVER_TAG" ] then TAG="$OSHINKO_SERVER_TAG" elif [ -d ../.git ] then GIT_TAG=`git describe --tags --abbrev=0 2> /dev/null | head -n1` GIT_COMMIT=`git log -n1 --pretty=format:%h 2> /dev/null` TAG="${GIT_TAG}-${GIT_COMMIT}" else TAG="unknown" fi APP=oshinko-rest-server TAG_APPNAME_FLAGS="-X github.com/radanalyticsio/oshinko-cli/rest/version.gitTag=$TAG -X github.com/radanalyticsio/oshinko-cli/rest/version.appName=$APP"
Switch back to using default VIM
# Quick Directory tab completions c() { cd ~/Development/$1; } _c() { _files -W ~/Development -/; } compdef _c c h() { cd ~/$1; } _h() { _files -W ~/ -/; } compdef _h h # My aliases alias reload!='. ~/.zshrc' # Reloads my zsh configurion alias cls='clear' # Clear screen alias pubkey="clipcopy ~/.ssh/id_rsa.pub | echo '=> Public key copied to pasteboard.'" # Convert vi and vim to nvim alias vi="nvim" alias vim="nvim"
# Quick Directory tab completions c() { cd ~/Development/$1; } _c() { _files -W ~/Development -/; } compdef _c c h() { cd ~/$1; } _h() { _files -W ~/ -/; } compdef _h h # My aliases alias reload!='. ~/.zshrc' # Reloads my zsh configurion alias cls='clear' # Clear screen alias pubkey="clipcopy ~/.ssh/id_rsa.pub | echo '=> Public key copied to pasteboard.'" # Convert vi and vim to nvim alias vi="vim"
Check that the project directory exists.
#!/bin/bash DATADIR="$(dirname "$0")" PROJECT="$1" if [[ "$PROJECT" == "" ]]; then echo "Usage: $0 projectdir" exit 1 fi echo "==> Prepending changelog." CLTARGET="$PROJECT/debian/changelog" CLDIST="$CLTARGET.dist" CLPRE="$DATADIR/changelog-pre.txt" needscl=1 if [[ -f "$CLDIST" ]]; then # Check if the changelog has already been modified. lines=$(wc -l "$CLPRE" | cut -d ' ' -f 1) targetmd5=$(head -n "$lines" "$CLTARGET" | md5sum | cut -d ' ' -f 1) premd5=$(md5sum "$CLPRE" | cut -d ' ' -f 1) if [[ "$targetmd5" = "$premd5" ]]; then needscl=0 fi fi if [[ $needscl == 1 ]]; then echo "--> Updating changelog from $CLPRE" cp "$CLTARGET" "$CLDIST" cat "$CLPRE" "$CLDIST" > "$CLTARGET" else echo "--> Changelog up-to-date." fi exit 1 echo "==> Building package." ( cd "$PROJECT" ; debuild -i -us -uc -b )
#!/bin/bash DATADIR="$(dirname "$0")" PROJECT="$1" if [[ "$PROJECT" == "" ]]; then echo "Usage: $0 projectdir" >&2 exit 1 fi if [[ ! -d "$PROJECT" ]]; then echo "Project is not a directory: $PROJECT" >&2 exit 1 fi echo "==> Prepending changelog." CLTARGET="$PROJECT/debian/changelog" CLDIST="$CLTARGET.dist" CLPRE="$DATADIR/changelog-pre.txt" needscl=1 if [[ -f "$CLDIST" ]]; then # Check if the changelog has already been modified. lines=$(wc -l "$CLPRE" | cut -d ' ' -f 1) targetmd5=$(head -n "$lines" "$CLTARGET" | md5sum | cut -d ' ' -f 1) premd5=$(md5sum "$CLPRE" | cut -d ' ' -f 1) if [[ "$targetmd5" = "$premd5" ]]; then needscl=0 fi fi if [[ $needscl == 1 ]]; then echo "--> Updating changelog from $CLPRE" cp "$CLTARGET" "$CLDIST" cat "$CLPRE" "$CLDIST" > "$CLTARGET" else echo "--> Changelog up-to-date." fi exit 1 echo "==> Building package." ( cd "$PROJECT" ; debuild -i -us -uc -b )
Print .def files as well as other files
#!/bin/sh # This is useful because it prints out all of the source files. Useful for # greps. find . -name \*.\[chyl\]\* | grep -v Lexer.cpp | grep -v llvmAsmParser.cpp | grep -v llvmAsmParser.h | grep -v '~$' | grep -v '\.ll$' | grep -v test | grep -v .flc | grep -v Sparc.burm.c
#!/bin/sh # This is useful because it prints out all of the source files. Useful for # greps. find . -name \*.\[cdhyl\]\* | grep -v Lexer.cpp | grep -v llvmAsmParser.cpp | grep -v llvmAsmParser.h | grep -v '~$' | grep -v '\.ll$' | grep -v test | grep -v .flc | grep -v Sparc.burm.c | grep -v '\.d$' | grep -v '\.dir$'
Revert "Fix build on Travis CI"
#!/usr/bin/env bash # # This script will deploy artifacts of the project. # set -e CD_DIR=$(cd "$(dirname "$0")"; pwd) ROOT_DIR=${CD_DIR}/../ MVNW="${ROOT_DIR}/mvnw -B" RELEASE_TYPE=${1:-snapshot} VERSION="$2" MAVEN_RELEASE_SETTINGS=${CD_DIR}/settings.xml GPG_TTY=$(tty) export GPG_TTY if [ "${RELEASE_TYPE}" == "release" ] && [ ! -z "${VERSION}" ]; then echo -e "[INFO] on a tag -> set pom.xml <version> to ${VERSION}" ${MVNW} versions:set -DnewVersion=${VERSION} else echo -e "[INFO] not on a tag -> keep snapshot version in pom.xml" fi ${MVNW} source:jar deploy -s ${MAVEN_RELEASE_SETTINGS} -DskipTests=true -P publishing
#!/usr/bin/env bash # # This script will deploy artifacts of the project. # set -e CD_DIR=$(cd "$(dirname "$0")"; pwd) ROOT_DIR=${CD_DIR}/../ MVNW="${ROOT_DIR}/mvnw -B" RELEASE_TYPE=${1:-snapshot} VERSION="$2" MAVEN_RELEASE_SETTINGS=${CD_DIR}/settings.xml if [ "${RELEASE_TYPE}" == "release" ] && [ ! -z "${VERSION}" ]; then echo -e "[INFO] on a tag -> set pom.xml <version> to ${VERSION}" ${MVNW} versions:set -DnewVersion=${VERSION} else echo -e "[INFO] not on a tag -> keep snapshot version in pom.xml" fi ${MVNW} source:jar deploy -s ${MAVEN_RELEASE_SETTINGS} -DskipTests=true -P publishing
Improve headless test running script
#!/bin/bash # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the LICENSE file # in the root directory of this source tree. set -xeuo pipefail if [ `adb devices | wc -l` -lt "3" ] then echo "No devices are connected. Make sure emulator is booted with flipper sample app running" exit 1 fi yarn build-headless --mac unzip -u dist/Flipper-headless.zip -d /tmp (cd headless-tests && yarn test)
#!/bin/bash # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the LICENSE file # in the root directory of this source tree. set -euo pipefail if [ `adb devices | wc -l` -lt "3" ] then echo "ERROR: No devices are connected. Make sure emulator is booted with flipper sample app running" exit 1 fi api_version=$(adb shell getprop ro.build.version.sdk) if [ "$api_version" != "24" ]; then echo "WARNING: Emulator has api version $api_version. Should be using API 24 for snapshot test to pass. ( Must match the one we request from oneworld at https://fburl.com/diffusion/op67q916 )" fi yarn build-headless --mac unzip -o dist/Flipper-headless.zip -d /tmp (cd headless-tests && yarn test)
Update comment to reference .dotfiles_profile
#!/usr/bin/env bash cd $(dirname $BASH_SOURCE) SETUP_DIR=$(pwd) # Only symlink bashrc and zshrc if they don't exist. # If they already exist but do not 'source ~/.dotfiles', then append it. for rc in bashrc zshrc; do if [ ! -e ~/.$rc ]; then ln -sfv $SETUP_DIR/$rc ~/.$rc elif ! grep -q -F 'source ~/.dotfiles_profile' ~/.$rc; then echo 'source ~/.dotfiles_profile' >> ~/.$rc fi done # Symlink dotfiles. for file in aliases dotfiles_profile exports gitconfig gitignore path; do mkdir -pv dotfiles.old [ -e ~/.$file ] && mv -v ~/.$file dotfiles.old/.$file ln -sfv $SETUP_DIR/$file ~/.$file done # macOS-specific installations. if [ $(uname -s) = 'Darwin' ]; then # Prevents macOS terminal from displaying last login on start. ln -sfv $SETUP_DIR/.hushlogin ~/.hushlogin # Install Homebrew if it doesn't exist. [ -z "$(which brew)" ] && ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" # Install Homebrew packages. brew install git imagemagick python python3 ruby tree zsh # Install vim with Python 3 flag. brew install vim --with-python3 fi # Load ~/.dotfiles which loads the other dotfiles. source ~/.dotfiles_profile unset SETUP_DIR
#!/usr/bin/env bash cd $(dirname $BASH_SOURCE) SETUP_DIR=$(pwd) # Only symlink bashrc and zshrc if they don't exist. # If they already exist but do not 'source ~/.dotfiles_prompt', then append it. for rc in bashrc zshrc; do if [ ! -e ~/.$rc ]; then ln -sfv $SETUP_DIR/$rc ~/.$rc elif ! grep -q -F 'source ~/.dotfiles_profile' ~/.$rc; then echo 'source ~/.dotfiles_profile' >> ~/.$rc fi done # Symlink dotfiles. for file in aliases dotfiles_profile exports gitconfig gitignore path; do mkdir -pv dotfiles.old [ -e ~/.$file ] && mv -v ~/.$file dotfiles.old/.$file ln -sfv $SETUP_DIR/$file ~/.$file done # macOS-specific installations. if [ $(uname -s) = 'Darwin' ]; then # Prevents macOS terminal from displaying last login on start. ln -sfv $SETUP_DIR/.hushlogin ~/.hushlogin # Install Homebrew if it doesn't exist. [ -z "$(which brew)" ] && ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" # Install Homebrew packages. brew install git imagemagick python python3 ruby tree zsh # Install vim with Python 3 flag. brew install vim --with-python3 fi # Load ~/.dotfiles which loads the other dotfiles. source ~/.dotfiles_profile unset SETUP_DIR
Use ruby-1.8.7-p358 instead of the system installed debian version
#!/usr/bin/env bash set -e set -x eval "$(rbenv init -)" rbenv local 1.8.7-debian bundle install --deployment --path=.bundle/gems bundle exec rake spec
#!/usr/bin/env bash set -e set -x eval "$(rbenv init -)" rbenv local 1.8.7-p358 bundle install --deployment --path=.bundle/gems bundle exec rake spec
Install Automaton on set up
#!/usr/bin/env sh ln -s ~/.config/ghci ~/.ghci mkdir -p ~/.cache/psql_history # Neovim spell NVIM_SPELL_DEST=~/.local/share/nvim/site/spell/ NVIM_SPELL_EXTENSION=.utf-8.spl NVIM_SPELL_SOURCE=http://ftp.vim.org/vim/runtime/spell/ mkdir -p ${NVIM_SPELL_DEST} for l in en es; do curl ${NVIM_SPELL_SOURCE}${l}${NVIM_SPELL_EXTENSION} \ --output ${NVIM_SPELL_DEST}${l}${NVIM_SPELL_EXTENSION} done
#!/usr/bin/env sh ln -s ~/.config/ghci ~/.ghci mkdir -p ~/.cache/psql_history # Neovim git clone git@github.com:tssm/neovim-automaton \ ~/.local/share/nvim/site/pack/automaton/start/neovim-automaton NVIM_SPELL_DEST=~/.local/share/nvim/site/spell/ NVIM_SPELL_EXTENSION=.utf-8.spl NVIM_SPELL_SOURCE=http://ftp.vim.org/vim/runtime/spell/ mkdir -p ${NVIM_SPELL_DEST} for l in en es; do curl ${NVIM_SPELL_SOURCE}${l}${NVIM_SPELL_EXTENSION} \ --output ${NVIM_SPELL_DEST}${l}${NVIM_SPELL_EXTENSION} done
Use a different command to showcase scaling down
#!/bin/bash # deploy a replicated service -- one instance on every node # NOTE: must be run on the master node DESCRIPTOR=`pwd`/descriptors/replicated-service-definition.yml STACK_NAME=replicated-test echo Deploying Stack docker stack deploy --compose-file ${DESCRIPTOR} ${STACK_NAME} sleep 2 clear echo List all stacks docker stack ls sleep 2 clear echo Listing all services in the stack docker stack services ${STACK_NAME} sleep 2 clear echo Listing all tasks in the stack watch docker stack ps ${STACK_NAME} clear echo Scale the services up SERVICE_ID=$(docker stack services --quiet ${STACK_NAME}) docker service scale ${SERVICE_ID}=64 echo Listing all services in the stack watch docker stack services ${STACK_NAME} sleep 2 clear echo Scale the services down SERVICE_ID=$(docker stack services --quiet ${STACK_NAME}) docker service scale ${SERVICE_ID}=1 echo Listing all services in the stack watch docker service ps ${SERVICE_ID} clear
#!/bin/bash # deploy a replicated service -- one instance on every node # NOTE: must be run on the master node DESCRIPTOR=`pwd`/descriptors/replicated-service-definition.yml STACK_NAME=replicated-test echo Deploying Stack docker stack deploy --compose-file ${DESCRIPTOR} ${STACK_NAME} sleep 2 clear echo List all stacks docker stack ls sleep 2 clear echo Listing all services in the stack docker stack services ${STACK_NAME} sleep 2 clear echo Listing all tasks in the stack watch docker stack ps ${STACK_NAME} clear echo Scale the services up SERVICE_ID=$(docker stack services --quiet ${STACK_NAME}) docker service scale ${SERVICE_ID}=64 echo Listing all services in the stack watch docker stack services ${STACK_NAME} sleep 2 clear echo Scale the services down SERVICE_ID=$(docker stack services --quiet ${STACK_NAME}) docker service scale ${SERVICE_ID}=1 echo Listing all services in the stack watch docker stack services ${STACK_NAME} clear
Fix kumquat_admin_email for php-fpm.d value and path
#!/usr/bin/env bash # Configure PHP sendmail return-path if possible if mdata-get kumquat_admin_email 1>/dev/null 2>&1; then echo "php_admin_value[sendmail_path] = /usr/sbin/sendmail -t -i -f $(mdata-get kumquat_admin_email)" \ >> /opt/local/etc/fpm.d/pool-www.conf fi # Enable PHP-FPM /usr/sbin/svcadm enable svc:/pkgsrc/php-fpm:default
#!/usr/bin/env bash # Configure PHP sendmail return-path if possible if mdata-get kumquat_admin_email 1>/dev/null 2>&1; then echo "php_admin_value[sendmail_path] = /usr/sbin/sendmail -t -i -f $(mdata-get kumquat_admin_email)" \ >> /opt/local/etc/php-fpm.d/pool-www.conf fi # Enable PHP-FPM /usr/sbin/svcadm enable svc:/pkgsrc/php-fpm:default
Make sed reference more portable
log "setting hostname, IPs and resolvers" : ${RESOLVERS:=8.8.8.8 8.8.4.4} RESOLVERS=(${RESOLVERS}) echo "${HOSTNAME}" > /etc/nodename /bin/hostname ${HOSTNAME} sed -i'' '/nameserver/d' /etc/resolv.conf for HOST in ${RESOLVERS[@]}; do echo "nameserver ${HOST}" >> /etc/resolv.conf done if [ ${#NET_INTERFACES[@]} -gt 0 ]; then echo "${NET0_IP}"$'\t'"${HOSTNAME}" >> /etc/inet/hosts if [ ${#NET_INTERFACES[@]} -gt 1 ]; then echo "${NET1_IP}"$'\t'"${ZONENAME}"$'\t'"loghost" >> /etc/inet/hosts fi fi log "checking if we can reach the Internets" NETWORKING=no if dig www.joyent.com +short +time=2 +tries=1 >/dev/null 2>&1 && \ ping www.joyent.com 2 >/dev/null 2>&1 && \ curl -m 5 -s -I http://www.joyent.com >/dev/null; then NETWORKING=yes else log "continuing with no apparent Internet access" fi
log "setting hostname, IPs and resolvers" : ${RESOLVERS:=8.8.8.8 8.8.4.4} RESOLVERS=(${RESOLVERS}) echo "${HOSTNAME}" > /etc/nodename /bin/hostname ${HOSTNAME} sed '/nameserver/d' /etc/resolv.conf > /tmp/resolv.conf.tmp && \ mv /tmp/resolv.conf.tmp /etc/resolv.conf for HOST in ${RESOLVERS[@]}; do echo "nameserver ${HOST}" >> /etc/resolv.conf done if [ ${#NET_INTERFACES[@]} -gt 0 ]; then echo "${NET0_IP}"$'\t'"${HOSTNAME}" >> /etc/inet/hosts if [ ${#NET_INTERFACES[@]} -gt 1 ]; then echo "${NET1_IP}"$'\t'"${ZONENAME}"$'\t'"loghost" >> /etc/inet/hosts fi fi log "checking if we can reach the Internets" NETWORKING=no if dig www.joyent.com +short +time=2 +tries=1 >/dev/null 2>&1 && \ ping www.joyent.com 2 >/dev/null 2>&1 && \ curl -m 5 -s -I http://www.joyent.com >/dev/null; then NETWORKING=yes else log "continuing with no apparent Internet access" fi