Instruction
stringlengths
14
778
input_code
stringlengths
0
4.24k
output_code
stringlengths
1
5.44k
Add contents test for app.rb
#!/usr/bin/env roundup describe 'sinatra-gen: generates a starter Sinatra project' sinatra_gen='./sinatra-gen' after() { rm -rf foo } it_displays_help_when_called_without_arguments() { $sinatra_gen | grep USAGE } it_displays_help_when_called_with_h() { $sinatra_gen -h | grep USAGE } it_displays_help_when_called_with_help() { $sinatra_gen --help | grep USAGE } it_fails_when_called_with_an_unknown_argument() { $sinatra_gen -f 2>&1 | grep UNKNOWN $sinatra_gen --he 2>&1 | grep UNKNOWN } it_displays_the_unknown_argument_in_the_error_message() { $sinatra_gen -f 2>&1 | grep "\-f" $sinatra_gen --he 2>&1 | grep "--he" } it_creates_a_directory_with_the_name_provided() { $sinatra_gen foo test -d foo } it_does_not_create_a_directory_if_it_already_exists() { mkdir foo $sinatra_gen foo 2>&1 | grep EXISTS } it_creates_an_app_dot_rb_file() { $sinatra_gen foo test -f foo/app.rb }
#!/usr/bin/env roundup describe 'sinatra-gen: generates a starter Sinatra project' sinatra_gen='./sinatra-gen' after() { rm -rf foo } it_displays_help_when_called_without_arguments() { $sinatra_gen | grep USAGE } it_displays_help_when_called_with_h() { $sinatra_gen -h | grep USAGE } it_displays_help_when_called_with_help() { $sinatra_gen --help | grep USAGE } it_fails_when_called_with_an_unknown_argument() { $sinatra_gen -f 2>&1 | grep UNKNOWN $sinatra_gen --he 2>&1 | grep UNKNOWN } it_displays_the_unknown_argument_in_the_error_message() { $sinatra_gen -f 2>&1 | grep "\-f" $sinatra_gen --he 2>&1 | grep "--he" } it_creates_a_directory_with_the_name_provided() { $sinatra_gen foo test -d foo } it_does_not_create_a_directory_if_it_already_exists() { mkdir foo $sinatra_gen foo 2>&1 | grep EXISTS } it_creates_an_app_dot_rb_file() { $sinatra_gen foo test -f foo/app.rb grep Bundler foo/app.rb grep mustache foo/app.rb }
Test compilation of xen as well
bash -ex .travis-opam.sh eval `opam config env` opam install mirage --yes opam source mirage-skeleton cd mirage-skeleton.dev && make
bash -ex .travis-opam.sh eval `opam config env` opam install mirage --yes opam source mirage-skeleton cd mirage-skeleton.dev MODE=unix make MODE=xen make
Update Script to Re-use Development Resources scripts
#!/usr/bin/env bash echo 'deb http://www.rabbitmq.com/debian/ testing main' | sudo tee /etc/apt/sources.list.d/rabbitmq.list && sudo apt-get update && sudo apt-get install rabbitmq-server sudo apt-get install cmake libboost-dev openssl libssl-dev libblkid-dev e2fslibs-dev libboost-all-dev libaudit-dev software-properties-common build-essential mesa-common-dev libgl1-mesa-dev cd /tmp/ if ls /usr/local/lib/librabbitmq.* 1> /dev/null 2>&1 ; then echo "Rabbitmq already setup" else git clone https://github.com/alanxz/rabbitmq-c mkdir rabbitmq-c/build && cd rabbitmq-c/build cmake .. cmake --build . sudo cp librabbitmq/*.a /usr/local/lib/ sudo cp librabbitmq/*.so* /usr/local/lib/ fi cd /tmp/ if ls /usr/local/libSimpleAmqpClient.* 1> /dev/null 2>&1 || [ -d /usr/local/include/SimpleAmqpClient ]; then echo "SimpleAmqpClient already setup" else git clone https://github.com/alanxz/SimpleAmqpClient mkdir SimpleAmqpClient/build && cd SimpleAmqpClient/build cmake -DRabbitmqc_INCLUDE_DIR=../../rabbitmq-c/librabbitmq -DRabbitmqc_LIBRARY=../../rabbitmq-c/build/librabbitmq .. make sudo mkdir /usr/local/include/SimpleAmqpClient sudo cp *.so* /usr/local/lib/ sudo cp ../src/SimpleAmqpClient/*.h /usr/local/include/SimpleAmqpClient fi
#!/usr/bin/env bash if [ "$(id -u)" != "0" ]; then echo "Permission Denied. Please run as root" exit 1 fi echo 'deb http://www.rabbitmq.com/debian/ testing main' | sudo tee /etc/apt/sources.list.d/rabbitmq.list && sudo apt-get update && sudo apt-get install rabbitmq-server sudo apt-get install cmake libboost-dev openssl libssl-dev libblkid-dev e2fslibs-dev libboost-all-dev libaudit-dev software-properties-common build-essential mesa-common-dev libgl1-mesa-dev ( cd /tmp/ git clone https://github.com/UCSolarCarTeam/Development-Resources.git sudo ./Development-Resources/InstallScripts/googletest-setup.sh sudo ./Development-Resources/InstallScripts/rabbitmq-setup.sh ) if [ ! -f build/config.ini ]; then cp src/config.ini.example build/config.ini fi
Use npm for package instalation
#!/bin/sh if [ ! -d elation ]; then git clone https://github.com/jbaicoianu/elation.git cd elation git clone https://github.com/jbaicoianu/elation-engine.git components/engine git clone https://github.com/jbaicoianu/cyclone-physics-js.git components/physics git clone https://github.com/jbaicoianu/elation-share.git components/share #git clone https://github.com/jbaicoianu/janusweb.git components/janusweb ln -s `pwd`/.. components/janusweb ./elation web init ./elation component enable engine physics share janusweb fi
#!/bin/bash echo "Installing dependencies..." npm install echo "done" echo echo "Creating directory tree..." DEPENDENCYPATHS=$(npm ls -parseable) declare -A dependencies for DEP in $DEPENDENCYPATHS; do DEPNAME=$(basename $DEP) dependencies[$DEPNAME]=$DEP done if [ ! -d elation ]; then ln -s ${dependencies["elation"]} cd elation/components ln -s ${dependencies["elation-engine"]} engine ln -s ${dependencies["elation-share"]} share ln -s ${dependencies["cyclone-physics"]} physics ln -s ${dependencies["janusweb"]} janusweb cd .. ./elation web init ./elation component enable engine physics share janusweb fi echo "done"
Upgrade Java 17 version in CI image
#!/bin/bash set -e case "$1" in java8) echo "https://github.com/adoptium/temurin8-binaries/releases/download/jdk8u322-b06/OpenJDK8U-jdk_x64_linux_hotspot_8u322b06.tar.gz" ;; java11) echo "https://github.com/adoptium/temurin11-binaries/releases/download/jdk-11.0.14%2B9/OpenJDK11U-jdk_x64_linux_hotspot_11.0.14_9.tar.gz" ;; java17) echo "https://github.com/adoptium/temurin17-binaries/releases/download/jdk-17.0.1%2B12/OpenJDK17U-jdk_x64_linux_hotspot_17.0.1_12.tar.gz" ;; *) echo $"Unknown java version" exit 1 esac
#!/bin/bash set -e case "$1" in java8) echo "https://github.com/adoptium/temurin8-binaries/releases/download/jdk8u322-b06/OpenJDK8U-jdk_x64_linux_hotspot_8u322b06.tar.gz" ;; java11) echo "https://github.com/adoptium/temurin11-binaries/releases/download/jdk-11.0.14%2B9/OpenJDK11U-jdk_x64_linux_hotspot_11.0.14_9.tar.gz" ;; java17) echo "https://github.com/adoptium/temurin17-binaries/releases/download/jdk-17.0.2%2B8/OpenJDK17U-jdk_x64_linux_hotspot_17.0.2_8.tar.gz" ;; *) echo $"Unknown java version" exit 1 esac
Fix conda dev artifact builds
#!/usr/bin/env bash set -e if [ $TRAVIS_OS_NAME == "linux" ]; then MINICONDA_URL="https://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh" else MINICONDA_URL="https://repo.continuum.io/miniconda/Miniconda-latest-MacOSX-x86_64.sh" fi wget -O miniconda.sh $MINICONDA_URL MINICONDA=$TRAVIS_BUILD_DIR/miniconda bash miniconda.sh -b -p $MINICONDA export PATH="$MINICONDA/bin:$PATH" conda update -y -q conda conda info -a conda config --set show_channel_urls yes conda config --add channels conda-forge conda config --add channels apache conda install --yes conda-build jinja2 anaconda-client cd $TRAVIS_BUILD_DIR conda build conda.recipe CONDA_PACKAGE=`conda build --output conda.recipe | grep bz2` if [ $TRAVIS_BRANCH == "master" ] && [ $TRAVIS_PULL_REQUEST == "false" ]; then anaconda --token $ANACONDA_TOKEN upload $CONDA_PACKAGE --user apache --channel dev; fi
#!/usr/bin/env bash set -e if [ $TRAVIS_OS_NAME == "linux" ]; then MINICONDA_URL="https://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh" else MINICONDA_URL="https://repo.continuum.io/miniconda/Miniconda-latest-MacOSX-x86_64.sh" fi wget -O miniconda.sh $MINICONDA_URL MINICONDA=$HOME/miniconda bash miniconda.sh -b -p $MINICONDA export PATH="$MINICONDA/bin:$PATH" conda update -y -q conda conda install -y -q conda-build conda info -a conda config --set show_channel_urls yes conda config --add channels conda-forge conda config --add channels apache conda install --yes jinja2 anaconda-client cd $TRAVIS_BUILD_DIR conda build conda.recipe CONDA_PACKAGE=`conda build --output conda.recipe | grep bz2` if [ $TRAVIS_BRANCH == "master" ] && [ $TRAVIS_PULL_REQUEST == "false" ]; then anaconda --token $ANACONDA_TOKEN upload $CONDA_PACKAGE --user apache --channel dev; fi
Update install script to point towards new bucket
#!/usr/bin/env bash DIR=`dirname $0` TILE_DIR=$DIR/../tiles if [ ! -d "$TILE_DIR" ]; then mkdir "$TILE_DIR" fi TILES="01-ne.country 02-ne.province 03-tiger.zipcode 04-mb.place" for NAME in $TILES; do if [ ! -f "$TILE_DIR/$NAME.mbtiles" ]; then echo "Downloading $NAME..." curl -s -o "$TILE_DIR/$NAME.mbtiles" "http://mapbox-carmen.s3.amazonaws.com/dev/$NAME.mbtiles" fi done
#!/usr/bin/env bash DIR=`dirname $0` TILE_DIR=$DIR/../tiles if [ ! -d "$TILE_DIR" ]; then mkdir "$TILE_DIR" fi TILES="01-ne.country 02-ne.province 03-tiger.zipcode 04-mb.place" for NAME in $TILES; do if [ ! -f "$TILE_DIR/$NAME.mbtiles" ]; then echo "Downloading $NAME..." curl -s -o "$TILE_DIR/$NAME.mbtiles" "https://s3.amazonaws.com/mapbox/carmen/dev/$NAME.mbtiles" fi done
Fix for version.txt creation on linux
#!/bin/sh if [ -z "$1" ] then echo "$0 <outpath>" elif which -s svn then if [ -d .svn ] then LC_ALL=C svn info >"$1/version.txt" else REL=$(basename $PWD) echo "Version: $REL" >$1/version.txt fi fi
#!/bin/sh if [ -z "$1" ] then echo "$0 <outpath>" elif which svn >/dev/null then if [ -d .svn ] then LC_ALL=C svn info >"$1/version.txt" else REL=$(basename $PWD) echo "Version: $REL" >$1/version.txt fi fi
Update setup-disks to not use RAID.
#!/bin/sh umount /mnt yes | mdadm --create /dev/md0 --raid-devices=2 --level=1 /dev/xvdb /dev/xvdc # Add disk to mdadm.conf so it will be enabled on boot mdadm --examine --scan | sed 's/ metadata=1.2//; s/ name.*//; s|md/0|md0|' >> /etc/mdadm/mdadm.conf blockdev --setra 4096 /dev/xvdb blockdev --setra 4096 /dev/xvdc blockdev --setra 4096 /dev/md0 echo deadline > /sys/block/xvdb/queue/scheduler echo deadline > /sys/block/xvdc/queue/scheduler mkfs.xfs /dev/md0 echo "/dev/md0 /srv xfs nofail,noatime,barrier 1 1" >> /etc/fstab mount /srv # TODO use systemctl instead of pg_ctlcluster on CentOS pg_ctlcluster 9.5 main stop mv /var/lib/postgresql /srv ln -s /srv/postgresql/ /var/lib mv /tmp /srv mkdir /tmp mount --bind /srv/tmp /tmp echo "/srv/tmp /tmp bind nofail,defaults,bind 0 0" >> /etc/fstab # TODO use systemctl instead of pg_ctlcluster on CentOS pg_ctlcluster 9.5 main start # Disable /mnt line for /dev/xvdb umount /mnt sed -i 's|^/dev/xvdb|#/dev/xvdb|' /etc/fstab # Update the initrd so we can use the new array post-boot update-initramfs -u
#!/bin/sh umount /mnt LOCALDISK=/dev/nvme0n1 mkfs.xfs $LOCALDISK echo "$LOCALDISK /srv xfs nofail,noatime 1 1" >> /etc/fstab mount /srv # TODO use systemctl instead of pg_ctlcluster on CentOS pg_ctlcluster 9.5 main stop mv /var/lib/postgresql /srv ln -s /srv/postgresql/ /var/lib mv /tmp /srv mkdir /tmp mount --bind /srv/tmp /tmp echo "/srv/tmp /tmp bind nofail,defaults,bind 0 0" >> /etc/fstab # TODO use systemctl instead of pg_ctlcluster on CentOS pg_ctlcluster 9.5 main start # Disable /mnt line for /dev/xvdb umount /mnt sed -i 's|^/dev/xvdb|#/dev/xvdb|' /etc/fstab
Fix strict key checking deadlocking deploy
#!/usr/bin/env bash mkdir -p ~/.ssh echo "$SFTP_KEY" | base64 --decode > ~/.ssh/id_rsa BASE_DIR="${SFTP_DIR}/${TRAVIS_REPO_SLUG}/${TRAVIS_BUILD_NUMBER}_${TRAVIS_COMMIT}" NATIVES_DIR="$BASE_DIR/natives" BASE_LOC="${SFTP_USER}@${SFTP_HOST}" ssh "$BASE_LOC" "mkdir -p '$NATIVES_DIR'" mv "$(ls TachiServer/build/libs | grep TachiServer-all)" /tmp/server.jar rsync -v -e ssh /tmp/server.jar "$BASE_LOC$BASE_DIR" ls -1 bootui/tachiweb-bootstrap/dist | grep -i tachiweb* | while read x; do BIN_PATH="$(realpath "bootui/tachiweb-bootstrap/dist/$x")" rsync -v -e ssh "$BIN_PATH" "$BASE_LOC$NATIVES_DIR" done
#!/usr/bin/env bash mkdir -p ~/.ssh echo "$SFTP_KEY" | base64 --decode > ~/.ssh/id_rsa BASE_DIR="${SFTP_DIR}/${TRAVIS_REPO_SLUG}/${TRAVIS_BUILD_NUMBER}_${TRAVIS_COMMIT}" NATIVES_DIR="$BASE_DIR/natives" BASE_LOC="${SFTP_USER}@${SFTP_HOST}" ssh -o "StrictHostKeyChecking no" "$BASE_LOC" "mkdir -p '$NATIVES_DIR'" mv "$(ls TachiServer/build/libs | grep TachiServer-all)" /tmp/server.jar rsync -v -e ssh /tmp/server.jar "$BASE_LOC$BASE_DIR" ls -1 bootui/tachiweb-bootstrap/dist | grep -i tachiweb* | while read x; do BIN_PATH="$(realpath "bootui/tachiweb-bootstrap/dist/$x")" rsync -v -e ssh "$BIN_PATH" "$BASE_LOC$NATIVES_DIR" done
Add npm and Gulp instructions.
#!/bin/bash echo ',-------------------------------------------------.' echo '| Run: gulp watch |' echo '`-------------------------------------------------'"'" docker run \ --rm \ -it \ --name debiki-dev-gulp \ -v="`pwd`/../:/opt/debiki/" \ debiki-dev-gulp:v0 \ /bin/bash # vim: fdm=marker et ts=2 sw=2 tw=0 list
#!/bin/bash echo ',---------------------------------------------------------.' echo '| Run: gulp watch |' echo '| But first, if not already done, run: |' echo '| npm install |' echo '| GIT_WORK_TREE=/opt/debiki bower --allow-root install |' echo '`--------------------------------------------------------'"'" docker run \ --rm \ -it \ --name debiki-dev-gulp \ -v="`pwd`/../:/opt/debiki/" \ debiki-dev-gulp:v0 \ /bin/bash # vim: fdm=marker et ts=2 sw=2 tw=0 list
Move dependencies first in build order
#!/bin/bash set -o errexit set -o nounset set -o pipefail function bigPrint() { echo "" echo "====================================" echo "$1" echo "====================================" echo "" } export APPS_TO_BUILD="sk-config twixtykit mpeg-munger sk-node sk-schema sk-static sk-time sk-client pipeland bellamie gort shoko broadcast-scheduler vertex-scheduler overlay autosync"
#!/bin/bash set -o errexit set -o nounset set -o pipefail function bigPrint() { echo "" echo "====================================" echo "$1" echo "====================================" echo "" } export APPS_TO_BUILD="sk-config sk-schema sk-client twixtykit mpeg-munger sk-node sk-static sk-time pipeland bellamie gort shoko broadcast-scheduler vertex-scheduler overlay autosync"
Use only bash parameter expansion
#!/bin/bash set -eux basedir=$1 specdir=${basedir}/openstack/ WORKSPACE=${WORKSPACE:-$basedir} echo "run renderspec over specfiles from ${specdir}" for spec in ${specdir}/**/*.spec.j2; do mkdir -p $WORKSPACE/logs/ for specstyle in "suse" "fedora"; do echo "run ${spec} for ${specstyle}" renderspec --spec-style ${specstyle} ${spec} \ -o $WORKSPACE/logs/`basename ${spec}`.${specstyle} done done
#!/bin/bash set -eux basedir=$1 specdir=${basedir}/openstack/ WORKSPACE=${WORKSPACE:-$basedir} echo "run renderspec over specfiles from ${specdir}" for spec in ${specdir}/**/*.spec.j2; do mkdir -p $WORKSPACE/logs/ for specstyle in "suse" "fedora"; do echo "run ${spec} for ${specstyle}" renderspec --spec-style ${specstyle} ${spec} \ -o $WORKSPACE/logs/${spec##*/}.${specstyle} done done
Allow cd tab to directories selection
# matches case insensitive for lowercase zstyle ':completion:*' matcher-list 'm:{a-z}={A-Z}' # pasting with tabs doesn't perform completion zstyle ':completion:*' insert-tab pending
# matches case insensitive for lowercase zstyle ':completion:*' matcher-list 'm:{a-z}={A-Z}' # pasting with tabs doesn't perform completion zstyle ':completion:*' insert-tab pending ## case-insensitive (all),partial-word and then substring completion if [ "x$CASE_SENSITIVE" = "xtrue" ]; then zstyle ':completion:*' matcher-list 'r:|[._-]=* r:|=*' 'l:|=* r:|=*' unset CASE_SENSITIVE else zstyle ':completion:*' matcher-list 'm:{a-zA-Z}={A-Za-z}' 'r:|[._-]=* r:|=*' 'l:|=* r:|=*' fi zstyle ':completion:*' list-colors '' zstyle ':completion:*:*:*:*:*' menu select zstyle ':completion:*:*:kill:*:processes' list-colors '=(#b) #([0-9]#) ([0-9a-z-]#)*=01;34=0=01' zstyle ':completion:*:*:*:*:processes' command "ps -u `whoami` -o pid,user,comm -w -w" # disable named-directories autocompletion zstyle ':completion:*:cd:*' tag-order local-directories directory-stack path-directories cdpath=(.)
Change order of parameters, to work on other os
#!/usr/bin/env bash set -e { echo "all::" for x in *.ys; do echo "all:: run-$x" echo "run-$x:" echo " @echo 'Running $x..'" echo " @../../yosys -ql ${x%.ys}.log $x -w 'Yosys has only limited support for tri-state logic at the moment.'" done for s in *.sh; do if [ "$s" != "run-test.sh" ]; then echo "all:: run-$s" echo "run-$s:" echo " @echo 'Running $s..'" echo " @bash $s" fi done } > run-test.mk exec ${MAKE:-make} -f run-test.mk
#!/usr/bin/env bash set -e { echo "all::" for x in *.ys; do echo "all:: run-$x" echo "run-$x:" echo " @echo 'Running $x..'" echo " @../../yosys -ql ${x%.ys}.log -w 'Yosys has only limited support for tri-state logic at the moment.' $x" done for s in *.sh; do if [ "$s" != "run-test.sh" ]; then echo "all:: run-$s" echo "run-$s:" echo " @echo 'Running $s..'" echo " @bash $s" fi done } > run-test.mk exec ${MAKE:-make} -f run-test.mk
Disable automatic udev rules for network interfaces in CentOS
#!/bin/sh -eux if [ -s /etc/oracle-release ]; then distro = 'oracle' elif [ -s /etc/enterprise-release ]; then distro = 'oracle' elif [ -s /etc/redhat-release ]; then # should ouput 'centos' OR 'red hat' distro=`cat /etc/redhat-release | sed 's/^\(CentOS\|Red Hat\).*/\1/i' | tr '[:upper:]' '[:lower:]'` fi # Remove development and kernel source packages yum -y remove gcc cpp kernel-devel kernel-headers perl; if [ "$distro" != 'red hat' ]; then yum -y clean all; fi # Clean up network interface persistence rm -f /etc/udev/rules.d/70-persistent-net.rules; for ndev in `ls -1 /etc/sysconfig/network-scripts/ifcfg-*`; do if [ "`basename $ndev`" != "ifcfg-lo" ]; then sed -i '/^HWADDR/d' "$ndev"; sed -i '/^UUID/d' "$ndev"; fi done rm -f VBoxGuestAdditions_*.iso VBoxGuestAdditions_*.iso.?;
#!/bin/sh -eux if [ -s /etc/oracle-release ]; then distro = 'oracle' elif [ -s /etc/enterprise-release ]; then distro = 'oracle' elif [ -s /etc/redhat-release ]; then # should ouput 'centos' OR 'red hat' distro=`cat /etc/redhat-release | sed 's/^\(CentOS\|Red Hat\).*/\1/i' | tr '[:upper:]' '[:lower:]'` fi # Remove development and kernel source packages yum -y remove gcc cpp kernel-devel kernel-headers perl; if [ "$distro" != 'red hat' ]; then yum -y clean all; fi # Clean up network interface persistence rm -f /etc/udev/rules.d/70-persistent-net.rules; mkdir -p /etc/udev/rules.d/70-persistent-net.rules; rm -f /lib/udev/rules.d/75-persistent-net-generator.rules; rm -rf /dev/.udev/; for ndev in `ls -1 /etc/sysconfig/network-scripts/ifcfg-*`; do if [ "`basename $ndev`" != "ifcfg-lo" ]; then sed -i '/^HWADDR/d' "$ndev"; sed -i '/^UUID/d' "$ndev"; fi done rm -f VBoxGuestAdditions_*.iso VBoxGuestAdditions_*.iso.?;
Add gcc-libs to geos runtime deps and use patchelf to fix rpath
pkg_name=geos pkg_origin=core pkg_version=3.6.2 pkg_description="GEOS (Geometry Engine - Open Source) is a C++ port of the ​Java Topology Suite (JTS)." pkg_upstream_url=http://trac.osgeo.org/geos pkg_license=('LGPL') pkg_maintainer="The Habitat Maintainers <humans@habitat.sh>" pkg_source=http://download.osgeo.org/geos/geos-${pkg_version}.tar.bz2 pkg_shasum=045a13df84d605a866602f6020fc6cbf8bf4c42fb50de237a08926e1d7d7652a pkg_build_deps=( core/gcc core/make ) pkg_deps=( core/glibc ) pkg_bin_dirs=(bin) pkg_include_dirs=(include) pkg_lib_dirs=(lib)
pkg_name=geos pkg_origin=core pkg_version=3.6.2 pkg_description="GEOS (Geometry Engine - Open Source) is a C++ port of the ​Java Topology Suite (JTS)." pkg_upstream_url=http://trac.osgeo.org/geos pkg_license=('LGPL') pkg_maintainer="The Habitat Maintainers <humans@habitat.sh>" pkg_source=http://download.osgeo.org/geos/geos-${pkg_version}.tar.bz2 pkg_shasum=045a13df84d605a866602f6020fc6cbf8bf4c42fb50de237a08926e1d7d7652a pkg_build_deps=( core/gcc core/make core/patchelf ) pkg_deps=( core/glibc core/gcc-libs ) pkg_bin_dirs=(bin) pkg_include_dirs=(include) pkg_lib_dirs=(lib) do_install() { do_default_install build_line "Patching ELF binaries:" find "$pkg_prefix/lib" -type f -executable \ -exec sh -c 'file -i "$1" | grep -q "x-sharedlib; charset=binary"' _ {} \; \ -print \ -exec patchelf --set-rpath "${LD_RUN_PATH}" {} \; }
Add isort to install prerequisites
#!/bin/bash os=$(uname -s) case $os in "Darwin") brew install \ cmake \ coreutils \ cscope \ ctags \ git \ gnu-sed \ google-java-format \ gradle \ grep \ hub \ macvim \ maven \ node \ openssl \ pstree \ python \ python3 \ readline \ sqlite \ tmux \ tree \ ;; "Linux" ) sudo apt-get install \ build-essential \ cmake \ exuberant-ctags \ git-all \ kompare \ python-dev \ python3-dev \ python3-pip \ scala \ tmux \ tree \ vim ;; esac pip3 install black cmake_format flake8 mypy pylint
#!/bin/bash os=$(uname -s) case $os in "Darwin") brew install \ cmake \ coreutils \ cscope \ ctags \ git \ gnu-sed \ google-java-format \ gradle \ grep \ hub \ macvim \ maven \ node \ openssl \ pstree \ python \ python3 \ readline \ sqlite \ tmux \ tree \ ;; "Linux" ) sudo apt-get install \ build-essential \ cmake \ exuberant-ctags \ git-all \ kompare \ python-dev \ python3-dev \ python3-pip \ scala \ tmux \ tree \ vim ;; esac pip3 install \ black \ cmake_format \ flake8 \ isort \ mypy \ pylint
Add checks for gettext and libtool; weird bugs happen without these
#!/bin/bash . ./config.sh function build_toolchain_file () { if [ ! $1 ] then echo 'No tool chain file passed to "build_toolchain_file"' exit 1 fi echo "SET(CMAKE_SYSTEM_NAME Windows)" > $1 echo "SET(CMAKE_C_COMPILER $CC)" >> $1 echo "SET(CMAKE_C_COMPILER_ENV_VAR $CC)" >> $1 echo "SET(CMAKE_CXX_COMPILER $CXX)" >> $1 echo "SET(CMAKE_RC_COMPILER $WINDRES)" >> $1 echo "SET(CMAKE_FIND_ROOT_PATH $HOST_PREFIX)" >> $1 echo "SET(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)" >> $1 echo "SET(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)" >> $1 echo "SET(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)" >> $1 } export -f build_toolchain_file for module in `cat modules_list` do export MODULE=$module cd $BASE_DIR/source/$MODULE if [ -f ../../patches/$MODULE.patch ] then patch -p0 < ../../patches/$MODULE.patch fi ../../modules/build-$MODULE.sh || exit 1 done
#!/bin/bash . ./config.sh if [ ! `which msgfmt` ] then echo "Couldn't find msgfmt program, install gettext" exit 1 fi if [ ! `which libtool` ] then echo "Couldn't find libtool program, install libtool" exit 1 fi function build_toolchain_file () { if [ ! $1 ] then echo 'No tool chain file passed to "build_toolchain_file"' exit 1 fi echo "SET(CMAKE_SYSTEM_NAME Windows)" > $1 echo "SET(CMAKE_C_COMPILER $CC)" >> $1 echo "SET(CMAKE_C_COMPILER_ENV_VAR $CC)" >> $1 echo "SET(CMAKE_CXX_COMPILER $CXX)" >> $1 echo "SET(CMAKE_RC_COMPILER $WINDRES)" >> $1 echo "SET(CMAKE_FIND_ROOT_PATH $HOST_PREFIX)" >> $1 echo "SET(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)" >> $1 echo "SET(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)" >> $1 echo "SET(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)" >> $1 } export -f build_toolchain_file for module in `cat modules_list` do export MODULE=$module cd $BASE_DIR/source/$MODULE if [ -f ../../patches/$MODULE.patch ] then patch -p0 < ../../patches/$MODULE.patch fi ../../modules/build-$MODULE.sh || exit 1 done
Make the script work with vanilla osx sed
#!/usr/bin/env bash newAppName=$1 appRoot=`dirname $0`/.. [ -z "${newAppName}" ] && echo 'Missing required parameter newAppName' && exit 1 grep -rI 'PepperoniAppTemplate' --exclude='rename.sh' $appRoot/* | tr ':' ' ' | awk '{print $1}' | uniq |xargs -I{} sed -i "s/PepperoniAppTemplate/${newAppName}/g" {} grep -rI 'pepperoniapptemplate' --exclude='rename.sh' $appRoot/* | tr ':' ' ' | awk '{print $1}' | uniq |xargs -I{} sed -i "s/pepperoniapptemplate/`echo $newAppName | tr '[:upper:]' '[:lower:]'`/g" {} for fileToMove in `find $appRoot/ios -name '*PepperoniAppTemplate*' -depth`; do mv $fileToMove `echo $fileToMove | sed "s/\(.*\)PepperoniAppTemplate/\1$newAppName/g"` done
#!/usr/bin/env bash newAppName=$1 appRoot=`dirname $0`/.. [ -z "${newAppName}" ] && echo 'Missing required parameter newAppName' && exit 1 grep -rI 'PepperoniAppTemplate' --exclude='rename.sh' $appRoot/* | tr ':' ' ' | awk '{print $1}' | uniq | xargs -I{} sed -i.bak "s/PepperoniAppTemplate/${newAppName}/g" {} grep -rI 'pepperoniapptemplate' --exclude='rename.sh' $appRoot/* | tr ':' ' ' | awk '{print $1}' | uniq | xargs -I{} sed -i.bak "s/pepperoniapptemplate/`echo $newAppName | tr '[:upper:]' '[:lower:]'`/g" {} find . -name '*.bak' -exec rm {} \; for fileToMove in `find $appRoot/ios -depth -name '*PepperoniAppTemplate*'`; do mv $fileToMove `echo $fileToMove | sed "s/\(.*\)PepperoniAppTemplate/\1$newAppName/g"` done
Enable parallel building of cmake tool.
#! /bin/bash # # CK installation script for cmake. # # See CK LICENSE.txt for licensing details. # See CK COPYRIGHT.txt for copyright details. # # Environment variables defined by CK: # PACKAGE_DIR # INSTALL_DIR function exit_if_error() { message=${1:-"unknown"} if [ "${?}" != "0" ]; then echo "Error: ${message}!" exit 1 fi } SRC_DIR=${INSTALL_DIR}/${PACKAGE_NAME2_LINUX} cd ${SRC_DIR} env CC=${CK_CC} CXX=${CK_CXX} ./bootstrap exit_if_error "Bootsrap failed" make exit_if_error "Building CMake failed" cd ${INSTALL_DIR} ln -s ${SRC_DIR}/bin . return 0
#! /bin/bash # # CK installation script for cmake. # # See CK LICENSE.txt for licensing details. # See CK COPYRIGHT.txt for copyright details. # # Environment variables defined by CK: # PACKAGE_DIR # INSTALL_DIR function exit_if_error() { message=${1:-"unknown"} if [ "${?}" != "0" ]; then echo "Error: ${message}!" exit 1 fi } SRC_DIR=${INSTALL_DIR}/${PACKAGE_NAME2_LINUX} cd ${SRC_DIR} env CC=${CK_CC} CXX=${CK_CXX} ./bootstrap exit_if_error "Bootsrap failed" make -j exit_if_error "Building CMake failed" cd ${INSTALL_DIR} ln -s ${SRC_DIR}/bin . return 0
Switch to using headless chrome for feature browser tests
#!/bin/bash ./test/config_cci.sh # master branch test if [ "$HEROKU_TEST_RUN_BRANCH" == "master" ]; then # Create scratch org config as default org cci org scratch browsertest_classic browsertests_master --default # Install latest beta cci flow run ci_beta_install exit_status=$? if [ "$exit_status" = "1" ]; then echo "Flow execution failed, failing test" cci org scratch_delete browsertests_master exit 1 fi # Run the browser tests cci flow run browsertests -o browsertests__use_saucelabs True exit_status=$? # Delete the scratch org cci org scratch_delete browsertest_master if [ "$exit_status" = "1" ]; then echo "Flow execution failed, failing test" exit 1 fi # All other branches else # Create scratch org config as default org cci org scratch browsertest_classic_namespaced browsertests_feature --default # Deploy unmanaged metadata cci flow run dev_org exit_status=$? if [ "$exit_status" = "1" ]; then echo "Flow execution failed, failing test" cci org scratch_delete browsertests_feature exit 1 fi # Run the browser tests cci flow run browsertests -o browsertests__use_saucelabs True exit_status=$? # Delete the scratch org cci org scratch_delete browsertest_feature if [ "$exit_status" = "1" ]; then echo "Flow execution failed, failing test" exit 1 fi fi
#!/bin/bash ./test/config_cci.sh # master branch test if [ "$HEROKU_TEST_RUN_BRANCH" == "master" ]; then # Create scratch org config as default org cci org scratch browsertest_classic browsertests_master --default # Install latest beta cci flow run ci_beta_install exit_status=$? if [ "$exit_status" = "1" ]; then echo "Flow execution failed, failing test" cci org scratch_delete browsertests_master exit 1 fi # Run the browser tests cci flow run ci_browsertests exit_status=$? # Delete the scratch org cci org scratch_delete browsertest_master if [ "$exit_status" = "1" ]; then echo "Flow execution failed, failing test" exit 1 fi # All other branches else # Create scratch org config as default org cci org scratch browsertest_classic_namespaced browsertests_feature --default # Deploy unmanaged metadata cci flow run dev_org exit_status=$? if [ "$exit_status" = "1" ]; then echo "Flow execution failed, failing test" cci org scratch_delete browsertests_feature exit 1 fi # Run the browser tests cci flow run browsertests exit_status=$? # Delete the scratch org cci org scratch_delete browsertest_feature if [ "$exit_status" = "1" ]; then echo "Flow execution failed, failing test" exit 1 fi fi
Change search google for googledotcom
#!/bin/bash echo [$@] echo "command=xdg-open \"http://www.google.com.hk/webhp?hl=zh-CN#safe=strict&hl=zh-CN&q=$@\"" echo "icon=" echo "subtext=Search on google for $@"
#!/bin/bash echo [$@] echo "command=xdg-open \"https://www.google.com/search?q=$@\"" echo "icon=" echo "subtext=Search on google for $@"
Use secretive for ssh when present
SSH_ENV=$HOME/.ssh/environment if [ -n "$SSH_CONNECTION" ]; then return fi function start_agent { echo -n "Initialising new SSH agent... " /usr/bin/ssh-agent > ${SSH_ENV} chmod 600 ${SSH_ENV} source ${SSH_ENV} } if [ -f "${SSH_ENV}" ]; then source ${SSH_ENV} > /dev/null #cygwin: ps -efp ${SSH_AGENT_PID} | grep ssh-agent$ > /dev/null || { ps ${SSH_AGENT_PID} > /dev/null || { start_agent; } else start_agent; fi
SSH_ENV=$HOME/.ssh/environment if [ -n "$SSH_CONNECTION" ]; then return fi function start_agent { echo -n "Initialising new SSH agent... " /usr/bin/ssh-agent > ${SSH_ENV} chmod 600 ${SSH_ENV} source ${SSH_ENV} } if [ -x "${HOME}/Library/Containers/com.maxgoedjen.Secretive.SecretAgent/Data/socket.ssh" ]; then export SSH_AUTH_SOCK="${HOME}/Library/Containers/com.maxgoedjen.Secretive.SecretAgent/Data/socket.ssh" elif [ -f "${SSH_ENV}" ]; then source ${SSH_ENV} > /dev/null #cygwin: ps -efp ${SSH_AGENT_PID} | grep ssh-agent$ > /dev/null || { ps ${SSH_AGENT_PID} > /dev/null || { start_agent; } else start_agent; fi
Add info so Jenkins can make a git commit
#!/bin/bash -e module load git/2.8.4 git clone git@github.com:MeteoSwiss-APN/cosmo-pompa cd cosmo-pompa git remote add -f testsuite git@github.com:C2SM-RCM/testsuite git subtree pull --prefix cosmo/test/testsuite/src/ testsuite ${BRANCH} --squash -m "Update testsuite" cd cosmo/test test -f ./jenkins/jenkins.sh || exit 1 ./jenkins/jenkins.sh test
#!/bin/bash -e module load git/2.8.4 git clone git@github.com:MeteoSwiss-APN/cosmo-pompa cd cosmo-pompa git config user.email "jenkins@cscs.ch" git config user.name "Mr. Jenkins" git remote add -f testsuite git@github.com:C2SM-RCM/testsuite git subtree pull --prefix cosmo/test/testsuite/src/ testsuite ${BRANCH} --squash -m "Update testsuite" cd cosmo/test test -f ./jenkins/jenkins.sh || exit 1 ./jenkins/jenkins.sh test
Remove the source folders that are created by integration tests
#!/usr/bin/env bash set -ex bundle exec rspec --tag ~fly pushd dockerfiles/depwatcher shards crystal spec --no-debug popd bundle exec rake
#!/usr/bin/env bash set -ex cd "$( dirname "${BASH_SOURCE[0]}" )/.." bundle exec rspec --tag ~fly pushd dockerfiles/depwatcher shards crystal spec --no-debug popd bundle exec rake rm -rf source-*-latest
Update to new alpine image, but one dependency still broken so that this image is unusable till a new loggly plugin version gets released.
#!/bin/bash # # A helper script for ENTRYPOINT. set -e if [ -n "${LOGGLY_ENV_FILE}" ]; then source ${LOGGLY_ENV_FILE} fi # Resetting conf file on each startup cp /opt/fluentd/fluent.conf /etc/fluent.conf loggly_tag="fluentdloggly" if [ -n "${LOGGLY_TAG}" ]; then loggly_tag=${LOGGLY_TAG} fi loggly_match="**" if [ -n "${LOGGLY_MATCH}" ]; then loggly_match=${LOGGLY_MATCH} fi if [ -n "${LOGGLY_TOKEN}" ]; then cat >> /etc/fluent/fluent.conf <<_EOF_ <match ${loggly_match}> type loggly loggly_url https://logs-01.loggly.com/inputs/${LOGGLY_TOKEN}/tag/${loggly_tag} </match> _EOF_ fi unset LOGGLY_TOKEN # Invoke entrypoint of parent container if [ "$1" = 'fluentd' ]; then /etc/fluent/docker-entrypoint.sh $@ fi exec "$@"
#!/bin/bash # # A helper script for ENTRYPOINT. set -e if [ -n "${LOGGLY_ENV_FILE}" ]; then source ${LOGGLY_ENV_FILE} fi # Resetting conf file on each startup cat > /opt/fluentd/generatedconf.d/generated-loggly-output.conf <<_EOF_ _EOF_ loggly_tag="fluentdloggly" if [ -n "${LOGGLY_TAG}" ]; then loggly_tag=${LOGGLY_TAG} fi loggly_match="**" if [ -n "${LOGGLY_MATCH}" ]; then loggly_match=${LOGGLY_MATCH} fi if [ -n "${LOGGLY_TOKEN}" ]; then cat >> /opt/fluentd/generatedconf.d/generated-loggly-output.conf <<_EOF_ <match ${loggly_match}> @type loggly loggly_url https://logs-01.loggly.com/inputs/${LOGGLY_TOKEN}/tag/${loggly_tag} </match> _EOF_ fi unset LOGGLY_TOKEN # Invoke entrypoint of parent container # Invoke entrypoint of parent container if [ "$1" = 'fluentd' ]; then exec /opt/fluentd/docker-entrypoint.sh $@ fi
Add requirements to travis for dcat tests
#!/bin/bash set -e echo "This is travis-build.bash..." echo "Installing the packages that CKAN requires..." sudo apt-get update -qq sudo apt-get install postgresql-$PGVERSION solr-jetty libcommons-fileupload-java:amd64=1.2.2-1 echo "Installing CKAN and its Python dependencies..." git clone https://github.com/ckan/ckan cd ckan python setup.py develop pip install -r requirements.txt --allow-all-external pip install -r dev-requirements.txt --allow-all-external cd - echo "Creating the PostgreSQL user and database..." sudo -u postgres psql -c "CREATE USER ckan_default WITH PASSWORD 'pass';" sudo -u postgres psql -c 'CREATE DATABASE ckan_test WITH OWNER ckan_default;' echo "Initialising the database..." cd ckan paster db init -c test-core.ini cd - echo "Installing ckanext-sweden and its requirements..." python setup.py develop pip install -r ckanext/sweden/blog/requirements.txt pip install -r requirements.txt pip install -r dev-requirements.txt echo "Moving test.ini into a subdir..." mkdir subdir mv test.ini subdir echo "travis-build.bash is done."
#!/bin/bash set -e echo "This is travis-build.bash..." echo "Installing the packages that CKAN requires..." sudo apt-get update -qq sudo apt-get install postgresql-$PGVERSION solr-jetty libcommons-fileupload-java:amd64=1.2.2-1 echo "Installing CKAN and its Python dependencies..." git clone https://github.com/ckan/ckan cd ckan python setup.py develop pip install -r requirements.txt --allow-all-external pip install -r dev-requirements.txt --allow-all-external cd - echo "Creating the PostgreSQL user and database..." sudo -u postgres psql -c "CREATE USER ckan_default WITH PASSWORD 'pass';" sudo -u postgres psql -c 'CREATE DATABASE ckan_test WITH OWNER ckan_default;' echo "Initialising the database..." cd ckan paster db init -c test-core.ini cd - echo "Installing ckanext-harvest" git clone https://github.com/ckan/ckanext-harvest cd ckanext-harvest git checkout stable pip install -r pip-requirements.txt python setup.py develop cd - echo "Installing ckanext-dcat" git clone https://github.com/ckan/ckanext-dcat cd ckanext-dcat # tmp pip install lxml python setup.py develop cd - echo "Installing ckanext-sweden and its requirements..." python setup.py develop pip install -r ckanext/sweden/blog/requirements.txt pip install -r requirements.txt pip install -r dev-requirements.txt echo "Moving test.ini into a subdir..." mkdir subdir mv test.ini subdir echo "travis-build.bash is done."
Add git to vagrant shell provisioner
#!/usr/bin/env bash apt-get update apt-get install -y software-properties-common apt-get install -y python-software-properties apt-add-repository -y ppa:chris-lea/node.js apt-get update apt-get install -y openjdk-7-jdk apt-get install -y nodejs npm install -g grunt-cli npm install -g bower
#!/usr/bin/env bash apt-get update apt-get install -y software-properties-common apt-get install -y python-software-properties apt-add-repository -y ppa:chris-lea/node.js apt-get update apt-get install -y openjdk-7-jdk apt-get install -y git apt-get install -y nodejs npm install -g grunt-cli npm install -g bower
Add branch, sha, and build information to the BUILDNAME.
#!/bin/bash # This is a script to build the modules and run the test suite in the base # Docker container. set -x set -o cd /usr/src/ITKUltrasound-build cmake \ -G Ninja \ -DITK_DIR:PATH=/usr/src/ITK-build \ -DITKUltrasound_USE_VTK:BOOL=ON \ -DPYTHON_EXECUTABLE:FILEPATH=/usr/bin/python3 \ -DCMAKE_BUILD_TYPE:STRING=Release \ -DBUILDNAME:STRING=External-Ultrasound \ /usr/src/ITKUltrasound ctest -VV -D Experimental
#!/bin/bash # This is a script to build the modules and run the test suite in the base # Docker container. set -x set -o cd /usr/src/ITKUltrasound branch=$(git rev-parse --abbrev-ref HEAD) date=$(date +%F_%H_%M_%S) sha=$(git rev-parse --short HEAD) cd /usr/src/ITKUltrasound-build cmake \ -G Ninja \ -DITK_DIR:PATH=/usr/src/ITK-build \ -DITKUltrasound_USE_VTK:BOOL=ON \ -DPYTHON_EXECUTABLE:FILEPATH=/usr/bin/python3 \ -DCMAKE_BUILD_TYPE:STRING=Release \ -DBUILDNAME:STRING=External-Ultrasound-${branch}-${date}-${sha} \ /usr/src/ITKUltrasound ctest -VV -D Experimental
Update of the prep script
#!/bin/bash prep_ubuntu() { apt-get update apt-get -y upgrade echo "** Preparing Ubuntu for kamikaze2 **" cd /opt/scripts/tools/ git pull sh update_kernel.sh --bone-kernel --lts-4_1 touch /etc/pm/sleep.d/wireless apt-get -y install unzip iptables sed -i 's/PermitRootLogin prohibit-password/PermitRootLogin yes/' /etc/ssh/sshd_config } remove_unneeded_packages() { echo "** Remove unneded packages **" rm -rf /etc/apache2/sites-enabled rm -rf /root/.c9 rm -rf /usr/local/lib/node_modules rm -rf /var/lib/cloud9 rm -rf /usr/lib/node_modules/ apt-get purge -y apache2 apache2-bin apache2-data apache2-utils } install_repo() { cat >/etc/apt/sources.list.d/testing.list <<EOL #### Kamikaze #### deb [arch=armhf] http://kamikaze.thing-printer.com/debian/ stretch main EOL wget -q http://kamikaze.thing-printer.com/debian/public.gpg -O- | apt-key add - apt-get update } prep_ubuntu remove_unneeded_packages install_repo reboot
#!/bin/bash prep_ubuntu() { apt-get update apt-get -y upgrade echo "** Preparing Ubuntu for kamikaze2 **" cd /opt/scripts/tools/ git pull sh update_kernel.sh --bone-kernel --lts-4_1 touch /etc/pm/sleep.d/wireless apt-get -y install unzip iptables sed -i 's/PermitRootLogin prohibit-password/PermitRootLogin yes/' /etc/ssh/sshd_config } remove_unneeded_packages() { echo "** Remove unneded packages **" rm -rf /etc/apache2/sites-enabled rm -rf /root/.c9 rm -rf /usr/local/lib/node_modules rm -rf /var/lib/cloud9 rm -rf /usr/lib/node_modules/ apt-get purge -y apache2 apache2-bin apache2-data apache2-utils } install_repo() { cat >/etc/apt/sources.list.d/testing.list <<EOL #### Kamikaze #### deb [arch=armhf] http://kamikaze.thing-printer.com/debian/ stretch main EOL wget -q http://kamikaze.thing-printer.com/debian/public.gpg -O- | apt-key add - apt-get update } prep() { prep_ubuntu remove_unneeded_packages install_repo } prep echo "Now reboot into the new kernel and run make-kamikaze-2.1.sh"
Fix paths in install script [rev: none]
#!/bin/bash # Copyright 2015 Hewlett-Packard Development Company, L.P. # Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. ## INSTALLATION SETTINGS ## ## If you change these, you need to change them in find.conf as well NAME="find" BASE_DIR="/opt/$NAME" HOME_DIR="$BASE_DIR/home" USER="$NAME" GROUP="$NAME" ## ## TODO - cd to directory script is in? useradd $USER mkdir $BASE_DIR mkdir $HOME_DIR cp ../../$NAME.jar $BASE_DIR chown -R $USER:$GROUP $BASE_DIR cp init/upstart/$NAME.conf /etc/init chmod +x /etc/init/$NAME.conf service $NAME start
#!/bin/bash # Copyright 2015 Hewlett-Packard Development Company, L.P. # Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. ## INSTALLATION SETTINGS ## ## If you change these, you need to change them in find.conf as well NAME="find" BASE_DIR="/opt/$NAME" HOME_DIR="$BASE_DIR/home" USER="$NAME" GROUP="$NAME" ## ## TODO - cd to directory script is in? useradd $USER mkdir $BASE_DIR mkdir $HOME_DIR cp ../../../$NAME.war $BASE_DIR chown -R $USER:$GROUP $BASE_DIR cp $NAME.conf /etc/init chmod +x /etc/init/$NAME.conf service $NAME start
Enable environment variables for proxy enablement
#!/bin/bash set -eu -o pipefail # shellcheck disable=SC1091 OS_ID=$( . /etc/os-release echo "$ID" ) readonly OS_ID echo 'Install Node.js' # Check the latest version from https://nodejs.org/en/ and https://github.com/nodesource/distributions case $OS_ID in ol) curl -sL https://rpm.nodesource.com/setup_16.x | sudo bash - sudo yum -y install nodejs gcc-c++ make ;; ubuntu) curl -sL https://deb.nodesource.com/setup_16.x | sudo bash - sudo apt -y install nodejs build-essential ;; esac
#!/bin/bash set -eu -o pipefail # shellcheck disable=SC1091 OS_ID=$( . /etc/os-release echo "$ID" ) readonly OS_ID echo 'Install Node.js' # Check the latest version from https://nodejs.org/en/ and https://github.com/nodesource/distributions case $OS_ID in ol) curl -sL https://rpm.nodesource.com/setup_16.x | sudo -E bash - sudo yum -y install nodejs gcc-c++ make ;; ubuntu) curl -sL https://deb.nodesource.com/setup_16.x | sudo -E bash - sudo apt -y install nodejs build-essential ;; esac
Update install script to move code to @charlesfleche mac devbox
#!/bin/bash rsync --archive --progress --human-readable Py-Previz $HOME/Library/Preferences/MAXON/CINEMA\ 4D\ R17_89538A46/plugins rsync --archive --progress --human-readable /Users/charles/src/previz-exporters/previz/previz $HOME/Library/Preferences/MAXON/CINEMA\ 4D\ R17_89538A46/plugins/Py-Previz/res/lib/python/site-packages
#!/bin/bash REPO_SRC=/home/charles/src/github/Previz-app PLUGIN_SRC=$REPO_SRC/Py-Previz/Py-Previz PREVIZ_MODULE_SRC=$REPO_SRC/previz-python-api/previz REQUESTS_SRC=$REPO_SRC/Py-Previz/third-party/requests REQUESTS_TOOLBELT_SRC=$REPO_SRC/Py-Previz/third-party/requests_toolbelt PLUGIN_DST='Nialls-MacBook-Pro.local:/Users/charles/Library/Preferences/MAXON/CINEMA4D/plugins/Py-Previz' DEPS_DST=$PLUGIN_DST/res/lib/python/site-packages rsync --archive --progress --human-readable --delete "$PLUGIN_SRC/" "$PLUGIN_DST" rsync --archive --progress --human-readable \ $PREVIZ_MODULE_SRC \ $REQUESTS_SRC \ $REQUESTS_TOOLBELT_SRC \ $DEPS_DST
Remove extraneous leanproject get mathlib
#!/bin/bash # run from project root rm -rf leanpkg.path rm -rf _target cp leanpkg_miniF2F.toml leanpkg.toml leanpkg configure mkdir ./_target/deps/minif2f/lean/scripts/ cp ./_target/deps/mathlib/scripts/mk_all.sh ./_target/deps/minif2f/lean/scripts/mk_all.sh bash ./_target/deps/minif2f/lean/scripts/mk_all.sh leanproject get-mathlib-cache cd _target/deps/minif2f && leanproject get-mathlib-cache && cd ../../../ leanpkg build
#!/bin/bash # run from project root rm -rf leanpkg.path rm -rf _target cp leanpkg_miniF2F.toml leanpkg.toml leanpkg configure mkdir ./_target/deps/minif2f/lean/scripts/ cp ./_target/deps/mathlib/scripts/mk_all.sh ./_target/deps/minif2f/lean/scripts/mk_all.sh bash ./_target/deps/minif2f/lean/scripts/mk_all.sh leanproject get-mathlib-cache # cd _target/deps/minif2f && leanproject get-mathlib-cache && cd ../../../ leanpkg build
Set gopath before calling go get
#!/usr/bin/env bash set -o errexit set -o nounset set -o pipefail set -o xtrace # Install and initialize helm/tiller HELM_URL=https://storage.googleapis.com/kubernetes-helm HELM_TARBALL=helm-v2.4.2-linux-amd64.tar.gz wget -q ${HELM_URL}/${HELM_TARBALL} tar xzfv ${HELM_TARBALL} # Clean up tarball rm -f ${HELM_TARBALL} # Housekeeping linux-amd64/helm init --upgrade # Run test framework cd /src/k8s.io/charts/test/ go get -v ./... go run ./helm-test/main.go
#!/usr/bin/env bash set -o errexit set -o nounset set -o pipefail set -o xtrace # Install and initialize helm/tiller HELM_URL=https://storage.googleapis.com/kubernetes-helm HELM_TARBALL=helm-v2.4.2-linux-amd64.tar.gz wget -q ${HELM_URL}/${HELM_TARBALL} tar xzfv ${HELM_TARBALL} # Clean up tarball rm -f ${HELM_TARBALL} # Housekeeping linux-amd64/helm init --upgrade # Run test framework export GOPATH=/src cd /src/k8s.io/charts/test/ go get -v ./... go run ./helm-test/main.go
Add debug output to check preinstalled packages on Travis CI
#!/bin/sh set -e # Use empty macstrap config as we want to test macstrap and not the installation of the brew packages rm -rf ~/.macstrap/macstrap.cfg cp ~/.macstrap/test/macstrap-test.cfg ~/.macstrap/macstrap.cfg # Uninstall preinstalled brew packages from Travis CI brew uninstall postgis postgresql # Start with the installation macstrap install
#!/bin/sh set -e # Use empty macstrap config as we want to test macstrap and not the installation of the brew packages rm -rf ~/.macstrap/macstrap.cfg cp ~/.macstrap/test/macstrap-test.cfg ~/.macstrap/macstrap.cfg # List all preinstalled brew packages on Travis CI brew deps --include-build --tree $(brew leaves) # Uninstall preinstalled brew packages from Travis CI brew uninstall postgis # Start with the installation macstrap install
Return sudoers file to normal
#!/bin/bash if [ ! -f /usr/local/bin/brew ]; then #Install homebrew ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" fi #No password sudo sudo sed -i .bak -e 's/^# \(%wheel.*ALL=(ALL) NOPASSWD: ALL\)/\1/g' /etc/sudoers sudo dscl . append /Groups/wheel GroupMembership $USER #Install Ansible brew install ansible #Run Ansible PYTHONIOENCODING='utf-8' ansible-playbook desktop.yml #Return to normal password sudo sudo dscl . delete /Groups/wheel GroupMembership $USER
#!/bin/bash if [ ! -f /usr/local/bin/brew ]; then #Install homebrew ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" fi #No password sudo sudo sed -i .bak -e 's/^# \(%wheel.*ALL=(ALL) NOPASSWD: ALL\)/\1/g' /etc/sudoers sudo dscl . append /Groups/wheel GroupMembership $USER #Install Ansible brew install ansible #Run Ansible PYTHONIOENCODING='utf-8' ansible-playbook desktop.yml #Return to normal password sudo sudo sed -i .bak -e 's/^\(%wheel.*ALL=(ALL) NOPASSWD: ALL\)/#\1/g' /etc/sudoers sudo dscl . delete /Groups/wheel GroupMembership $USER
Fix path to autostart scripts
#!/bin/sh set -e newVersion=1.05 revision=UnitTestingFramework-v$newVersion filesToWatch="procedures docu helper INSTALL.txt" for i in `ls procedures/*.ipf`; do sed -i "s/#pragma version=.*/#pragma version=$newVersion/" $i done if [ ! -z "$(git status -s --untracked-files=no $filesToWatch)" ]; then echo "Aborting, please commit the changes first" exit 0 fi basename=$revision zipFile=$basename.zip folder=releases/$basename rm -rf $folder rm -rf $zipfile mkdir -p $folder cp -r procedures docu/examples Readme.md helper $folder # copy and rename manual cp docu/refman.pdf $folder/Manual-$basename.pdf # copy autorun script into example6 folder cp $folder/helper/autorun-test.bat $folder/examples/Example6 git rev-parse $revision > internalVersion cd releases && zip -m -z -q -r $basename.zip $basename/* < ../internalVersion && cd .. rmdir $folder rm internalVersion
#!/bin/sh set -e newVersion=1.05 revision=UnitTestingFramework-v$newVersion filesToWatch="procedures docu helper INSTALL.txt" for i in `ls procedures/*.ipf`; do sed -i "s/#pragma version=.*/#pragma version=$newVersion/" $i done if [ ! -z "$(git status -s --untracked-files=no $filesToWatch)" ]; then echo "Aborting, please commit the changes first" exit 0 fi basename=$revision zipFile=$basename.zip folder=releases/$basename rm -rf $folder rm -rf $zipfile mkdir -p $folder cp -r procedures docu/examples Readme.md helper $folder # copy and rename manual cp docu/refman.pdf $folder/Manual-$basename.pdf # copy autorun scripts into example6 folder cp $folder/helper/autorun*.bat $folder/examples/Example6 git rev-parse $revision > internalVersion cd releases && zip -m -z -q -r $basename.zip $basename/* < ../internalVersion && cd .. rmdir $folder rm internalVersion
Use beta agent and --disconnect-after-job for macOS
#!/bin/bash set -e set -o pipefail set -u guestinfo() { local key="guestinfo.$1" local tool="/Library/Application Support/VMware Tools/vmware-tools-daemon" local value=$("$tool" --cmd "info-get $key") if [[ -n $value ]]; then echo "$value" else echo >&2 "Missing $key" fi } echo "--- Querying VMware guestinfo" vmdk=$(guestinfo vmkite-vmdk) name=$(guestinfo vmkite-name) token=$(guestinfo vmkite-buildkite-agent-token) [[ -n $vmdk && -n $name && -n $token ]] || exit 10 echo "--- Starting buildkite-agent" export BUILDKITE_AGENT_TOKEN="$token" export BUILDKITE_AGENT_NAME="$name" export BUILDKITE_AGENT_META_DATA="vmkite-vmdk=$vmdk,vmkite-guestid=darwin13_64Guest" export BUILDKITE_BOOTSTRAP_SCRIPT_PATH="/Users/vmkite/buildkite-agent/bootstrap.sh" export BUILDKITE_BUILD_PATH="/Users/vmkite/buildkite-builds" su vmkite -c "/usr/local/bin/buildkite-agent start" echo "--- Buildkite exited with $?, shutting down machine" shutdown -h now
#!/bin/bash set -e set -o pipefail set -u guestinfo() { local key="guestinfo.$1" local tool="/Library/Application Support/VMware Tools/vmware-tools-daemon" local value=$("$tool" --cmd "info-get $key") if [[ -n $value ]]; then echo "$value" else echo >&2 "Missing $key" fi } echo "--- Querying VMware guestinfo" vmdk=$(guestinfo vmkite-vmdk) name=$(guestinfo vmkite-name) token=$(guestinfo vmkite-buildkite-agent-token) [[ -n $vmdk && -n $name && -n $token ]] || exit 10 echo "--- Starting buildkite-agent" export BUILDKITE_AGENT_TOKEN="$token" export BUILDKITE_AGENT_NAME="$name" export BUILDKITE_AGENT_META_DATA="vmkite-vmdk=$vmdk,vmkite-guestid=darwin13_64Guest" export BUILDKITE_BUILD_PATH="/Users/vmkite/buildkite-builds" su vmkite -c "/usr/local/bin/buildkite-agent start --disconnect-after-job" echo "--- Buildkite exited with $?, shutting down machine" shutdown -h now
Make test container quicker to start and stop.
#!/bin/bash echo "Starting phantomjs container..." if ! `docker inspect -f {{.State.Running}} phantomjs`; then (set -x; docker start phantomjs) echo "phantomjs started." else echo "phantomjs already running. Skipping." fi echo "Removing dev app container..." (set -x; docker rm -f app) echo "Spinning up test app container..." echo "*** Run unit tests with 'sh /datahub/provisions/docker/run-unit-tests.sh'." echo "*** Run functional tests with 'sh /datahub/provisions/docker/run-functional-tests.sh'." echo "*** Run specific tests with commands like 'python manage.py test core'." (set -x; docker run -ti --rm \ -e "DATAHUB_DOCKER_TESTING=true" \ -e "DJANGO_LIVE_TEST_SERVER_ADDRESS=0.0.0.0:8000" \ --volumes-from logs \ --volumes-from data \ --net=datahub_dev \ -v /vagrant:/datahub \ -w /datahub/src \ datahuborg/datahub /bin/bash) echo "Bringing back dev app container..." (set -x; docker create --name app \ --env 'USER=vagrant' \ --volumes-from logs \ --volumes-from data \ --net=datahub_dev \ -v /vagrant:/datahub \ datahuborg/datahub gunicorn --config=provisions/gunicorn/config_dev.py browser.wsgi) (set -x; docker start app)
#!/bin/bash echo "Starting phantomjs container..." if ! `docker inspect -f {{.State.Running}} phantomjs`; then (set -x; docker start phantomjs) echo "phantomjs started." else echo "phantomjs already running. Skipping." fi echo "Stopping app container..." (set -x; docker stop app) echo "Spinning up test app container..." echo "*** Run unit tests with 'sh /datahub/provisions/docker/run-unit-tests.sh'." echo "*** Run functional tests with 'sh /datahub/provisions/docker/run-functional-tests.sh'." echo "*** Run specific tests with commands like 'python manage.py test core'." echo "*** Run a debuggable server with 'python manage.py runserver 0.0.0.0:8000'." (set -x; docker run -ti --rm \ -e "DATAHUB_DOCKER_TESTING=true" \ -e "DJANGO_LIVE_TEST_SERVER_ADDRESS=0.0.0.0:8000" \ --volumes-from logs \ --volumes-from data \ --net=datahub_dev \ -v /vagrant:/datahub \ -w /datahub/src \ datahuborg/datahub /bin/bash) echo "Bringing back app container..." (set -x; docker start app)
Fix cassandra build step for cached image
#!/bin/bash docker-compose exec -T cassandra ./wait-for-it docker-compose exec -T dynamodb ./wait-for-it for keyspace in auditors global_lookups page_views; do docker-compose exec -T cassandra cqlsh -e "CREATE KEYSPACE ${keyspace} WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };" done (docker-compose exec -T web bundle exec rails db:create db:migrate > /tmp/possible_migration_error.txt 2>&1; cat /tmp/possible_migration_error.txt) && rm /tmp/possible_migration_error.txt docker-compose exec -T web script/rails runner "require 'switchman/test_helper'; Switchman::TestHelper.recreate_persistent_test_shards"
#!/bin/bash docker-compose exec -T cassandra ./wait-for-it docker-compose exec -T dynamodb ./wait-for-it for keyspace in auditors global_lookups page_views; do docker-compose exec -T cassandra cqlsh -e "CREATE KEYSPACE IF NOT EXISTS ${keyspace} WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };" done (docker-compose exec -T web bundle exec rails db:create db:migrate > /tmp/possible_migration_error.txt 2>&1; cat /tmp/possible_migration_error.txt) && rm /tmp/possible_migration_error.txt docker-compose exec -T web script/rails runner "require 'switchman/test_helper'; Switchman::TestHelper.recreate_persistent_test_shards"
Add full paths since shell scripts don't work well with virtualenv
if [ ! -d ".virtualenvs" ]; then mkdir .virtualenvs fi virtualenv .virtualenvs/flask-test-app . .virtualenvs/flask-test-app/bin/activate pip install -r requirements.txt python run.py
if [ ! -d ".virtualenvs" ]; then mkdir .virtualenvs fi virtualenv .virtualenvs/flask-test-app .virtualenvs/flask-test-app/bin/pip install -r requirements.txt .virtualenvs/flask-test-app/bin/python run.py
Fix `cd` call in Command-T building script
#!/bin/sh . scripts/common/main.sh . scripts/common/build.sh prepare_build vim-plugins/command-t --url git://github.com/wincent/Command-T cd "$BUILD_DIRECTORY" cd command-t/ruby/command-t ruby extconf.rb make cd ../.. mv .git ../.git-command-t cd .. tar czf "$DDIR/${TARGET}.tar.gz" command-t mv .git-command-t command-t/.git cd "$DDIR" git add ${TARGET}.tar.gz git commit -m "Update Command-T $COMMIT_MESSAGE_FOOTER"
#!/bin/sh . scripts/common/main.sh . scripts/common/build.sh prepare_build vim-plugins/command-t --url git://github.com/wincent/Command-T cd "$BUILD_DIRECTORY/ruby/command-t" ruby extconf.rb make cd ../.. mv .git ../.git-command-t cd .. tar czf "$DDIR/${TARGET}.tar.gz" command-t mv .git-command-t command-t/.git cd "$DDIR" git add ${TARGET}.tar.gz git commit -m "Update Command-T $COMMIT_MESSAGE_FOOTER"
Use apt instead of apt-get
#! /bin/bash alias show='apt-cache show' alias search='apt-cache search' alias files='dpkg -L' alias selections='dpkg --get-selections' alias install='sudo apt-get install' alias reinstall='sudo apt-get install --reinstall' alias update='sudo apt-get update' alias upgrade='sudo apt-get upgrade' alias remove='sudo apt-get remove' alias purge='sudo apt-get remove --purge' alias autoremove='sudo apt-get autoremove'
#! /bin/bash alias show='apt-cache show' alias search='apt-cache search' alias files='dpkg -L' alias selections='dpkg --get-selections' alias install='sudo apt install' alias reinstall='sudo apt install --reinstall' alias update='sudo apt update' alias upgrade='sudo apt upgrade' alias remove='sudo apt remove' alias purge='sudo apt remove --purge' alias autoremove='sudo apt autoremove'
Clear the console on exit if possible
# If our homedir is encrypted with eCryptfs then don't unmount it on # exit if any tmux sessions are running. if [ -d $HOME/.ecryptfs ]; then if $(tmux ls 2>&1 >/dev/null); then rm -f $HOME/.ecryptfs/auto-umount else touch $HOME/.ecryptfs/auto-umount fi fi
# If our homedir is encrypted with eCryptfs then don't unmount it on # exit if any tmux sessions are running. if [ -d $HOME/.ecryptfs ]; then if $(tmux ls 2>&1 >/dev/null); then rm -f $HOME/.ecryptfs/auto-umount else touch $HOME/.ecryptfs/auto-umount fi fi # Clear the console on exit if this is not a nested shell session if [ "$SHLVL" = 1 ]; then [ -x /usr/bin/clear_console ] && /usr/bin/clear_console -q fi
Update flushdns for OS X 10.10.4
# OS X 10.10 alias flushdns="sudo discoveryutil udnsflushcaches" # MacOS X 10.9 # alias flushdns="dscacheutil -flushcache; sudo killall -HUP mDNSResponder" # MacOS X 10.7 - 10.8 # alias flushdns="sudo killall -HUP mDNSResponder" # MacOS X 10.5 - 10.6 # alias flushdns="sudo dscacheutil -flushcache"
# OS X 10.10.4 alias flushdns="dscacheutil -flushcache; sudo killall -HUP mDNSResponder" # OS X 10.10 #alias flushdns="sudo discoveryutil udnsflushcaches" # MacOS X 10.9 # alias flushdns="dscacheutil -flushcache; sudo killall -HUP mDNSResponder" # MacOS X 10.7 - 10.8 # alias flushdns="sudo killall -HUP mDNSResponder" # MacOS X 10.5 - 10.6 # alias flushdns="sudo dscacheutil -flushcache"
Add `zsh alias` for npm dist-tags.
## # node.zsh # ------- # @description zsh configuration for git. ## export NVM_DIR="$HOME/.nvm" . "/usr/local/opt/nvm/nvm.sh" alias nr='npm run'
## # node.zsh # ------- # @description zsh configuration for git. ## export NVM_DIR="$HOME/.nvm" . "/usr/local/opt/nvm/nvm.sh" alias nr='npm run' alias ndt='npm dist-tag'
Fix usage example script name.
#!/usr/bin/env bash set -o errexit set -o nounset if [ "$#" -ne 1 ]; then echo 'Usage: ./set-config.sh <config_path>' exit 1 fi config_path=${1} # Construct Ansible extra_vars flags. If `config_path` is set, all files # directly under the directory with extension `.yaml` or `.yml` will be added. # The search for config files _will not_ descend into subdirectories. extra_vars=() for config_file in $( find -L "${config_path}" -maxdepth 1 -type f -a \( -name '*.yaml' -o -name '*.yml' \) | sort ); do extra_vars+=( --extra-vars "@${config_file}") done echo "Extra vars:" echo " ${extra_vars[*]}" PYTHONPATH=../python-modules \ ANSIBLE_CONFIG=conf/ansible/ansible.cfg \ ansible-playbook provisioners/ansible/playbooks/fetch-library.yaml \ -i conf/ansible/inventory/hosts \ --module-path provisioners/ansible/library/ \ "${extra_vars[@]}"
#!/usr/bin/env bash set -o errexit set -o nounset if [ "$#" -ne 1 ]; then echo 'Usage: ./fetch-library.sh <config_path>' exit 1 fi config_path=${1} # Construct Ansible extra_vars flags. If `config_path` is set, all files # directly under the directory with extension `.yaml` or `.yml` will be added. # The search for config files _will not_ descend into subdirectories. extra_vars=() for config_file in $( find -L "${config_path}" -maxdepth 1 -type f -a \( -name '*.yaml' -o -name '*.yml' \) | sort ); do extra_vars+=( --extra-vars "@${config_file}") done echo "Extra vars:" echo " ${extra_vars[*]}" PYTHONPATH=../python-modules \ ANSIBLE_CONFIG=conf/ansible/ansible.cfg \ ansible-playbook provisioners/ansible/playbooks/fetch-library.yaml \ -i conf/ansible/inventory/hosts \ --module-path provisioners/ansible/library/ \ "${extra_vars[@]}"
Fix another bug with the auto_deploy
if [ $EUID -eq 0 ]; then # User is Root echo "Pulling from origin" git pull origin if [ -z $1 ]; then path="/var/www/html" else path=$1 fi echo "Deleting content of" $path mv $path/res /tmp/res mv $path/userSettings /tmp/userSettings rm -rf $path/ mv /tmp/res $path/res mv /tmp/userSettings $path/userSettings echo "Copying www to " $path cp -R www/ $path/ echo "Changing the permission of" $path chmod -R a+w $path/* echo "Restarting Apache" service apache2 restart else echo "User must be root to deploy" exit 1 fi
if [ $EUID -eq 0 ]; then # User is Root echo "Pulling from origin" git pull origin if [ -z $1 ]; then path="/var/www/html" else path=$1 fi echo "Deleting content of" $path mv $path/res /tmp/res rm -rf /tmp/userSettings rm -rf /tmp/res mv $path/userSettings /tmp/userSettings rm -rf $path/ mv /tmp/res $path/ mv /tmp/userSettings $path/ echo "Copying www to " $path cp -R www/ $path/ echo "Changing the permission of" $path chmod -R a+w $path/* echo "Restarting Apache" service apache2 restart else echo "User must be root to deploy" exit 1 fi
Use 'grep -Fxf' instead of 'comm' because input is not sorted
#!/bin/bash # # get-latest-common-snap.sh # Copyright (C) 2021 Olaf Lessenich <xai@linux.com> # # Distributed under terms of the MIT license. # set -eu srcsnaps="$(zfs list -t snapshot -o name -H $1 | sed 's_.\+@_@_')" targetsnaps="$(zfs list -t snapshot -o name -H $2 | sed 's_.\+@_@_')" common="$(comm -12 --nocheck-order <(echo "$srcsnaps") <(echo "$targetsnaps") | tail -n1)" srclatest="$(echo "$srcsnaps" | tail -n1)" targetlatest="$(echo "$targetsnaps" | tail -n1)" echo "${1}${srclatest}" echo "${2}${targetlatest}" echo "Common: "$common"" echo "zfs -RI send ${1}${common} ${1}${srclatest} | zfs recv -u ${2}"
#!/bin/bash # # get-latest-common-snap.sh # Copyright (C) 2021 Olaf Lessenich <xai@linux.com> # # Distributed under terms of the MIT license. # set -eu srcsnaps="$(zfs list -t snapshot -o name -H $1 | sed 's_.\+@_@_')" targetsnaps="$(zfs list -t snapshot -o name -H $2 | sed 's_.\+@_@_')" common="$(grep -Fxf <(echo "$srcsnaps") <(echo "$targetsnaps") | tail -n1)" srclatest="$(echo "$srcsnaps" | tail -n1)" targetlatest="$(echo "$targetsnaps" | tail -n1)" echo "${1}${srclatest}" echo "${2}${targetlatest}" echo "Common: "$common"" echo "zfs -RI send ${1}${common} ${1}${srclatest} | zfs recv -u ${2}"
Stop script on Bash errors
#!/bin/bash working_dir=`dirname $0` cd $working_dir set -u set -o errexit if [ `getconf LONG_BIT` != '64' ]; then echo "Install failed, you must have a 64 bit OS." exit 1 fi echo "Installing Perlbrew..." curl -LsS http://install.perlbrew.pl | bash echo "Loading Perlbrew environment variables..." set +u source ~/perl5/perlbrew/etc/bashrc set -u echo "Running 'perlbrew init'..." perlbrew init echo "Running 'perlbrew install'..." nice perlbrew install perl-5.16.3 -Duseithreads -Dusemultiplicity -Duse64bitint -Duse64bitall -Duseposix -Dusethreads -Duselargefiles -Dccflags=-DDEBIAN echo "Switching to installed Perl..." perlbrew switch perl-5.16.3 echo "Installing cpanm..." perlbrew install-cpanm echo "Creating 'mediacloud' library..." perlbrew lib create mediacloud echo "Switching to 'mediacloud' library..." perlbrew switch perl-5.16.3@mediacloud echo "Done installing Perl with Perlbrew."
#!/bin/bash working_dir=`dirname $0` cd $working_dir set -e set -u set -o errexit if [ `getconf LONG_BIT` != '64' ]; then echo "Install failed, you must have a 64 bit OS." exit 1 fi echo "Installing Perlbrew..." curl -LsS http://install.perlbrew.pl | bash echo "Loading Perlbrew environment variables..." set +u source ~/perl5/perlbrew/etc/bashrc set -u echo "Running 'perlbrew init'..." perlbrew init echo "Running 'perlbrew install'..." nice perlbrew install perl-5.16.3 -Duseithreads -Dusemultiplicity -Duse64bitint -Duse64bitall -Duseposix -Dusethreads -Duselargefiles -Dccflags=-DDEBIAN echo "Switching to installed Perl..." perlbrew switch perl-5.16.3 echo "Installing cpanm..." perlbrew install-cpanm echo "Creating 'mediacloud' library..." perlbrew lib create mediacloud echo "Switching to 'mediacloud' library..." perlbrew switch perl-5.16.3@mediacloud echo "Done installing Perl with Perlbrew."
Remove restriction to succesfully mount guest additions. Some desktops (ex. gnome) will mount disk when it's inserted so additional mount will fail with error 'already mounted' and whole process won't succed despite it still can.
#!/bin/bash if [ "$(id -u)" != "0" ]; then echo "This script must be run as root" exit 1 fi echo "Installing vbox guest additions" apt-get install build-essential module-assistant && m-a prepare && mount /media/cdrom && sh /media/cdrom/VBoxLinuxAdditions.run
#!/bin/bash if [ "$(id -u)" != "0" ]; then echo "This script must be run as root" exit 1 fi echo "Installing vbox guest additions" apt-get install build-essential module-assistant && m-a prepare && mount /media/cdrom sh /media/cdrom/VBoxLinuxAdditions.run
Fix apt-get warning for unsigned sources
#!/bin/sh if [ -z "$TRAVIS_OS_NAME" ]; then TRAVIS_OS_NAME=linux fi if [ "$TRAVIS_OS_NAME" = "linux" ]; then sudo apt-get install -qq cdbs cmake libboost-dev lcov libmysqlclient-dev mysql-community-source gem install coveralls-lcov mkdir 3rdParty mkdir 3rdParty/include mkdir 3rdParty/lib else echo "Unknown OS ($TRAVIS_OS_NAME). Stopping build ..." exit 1 fi
#!/bin/sh if [ -z "$TRAVIS_OS_NAME" ]; then TRAVIS_OS_NAME=linux fi if [ "$TRAVIS_OS_NAME" = "linux" ]; then sudo apt-get install -qq --force-yes cdbs cmake libboost-dev lcov libmysqlclient-dev mysql-community-source gem install coveralls-lcov mkdir 3rdParty mkdir 3rdParty/include mkdir 3rdParty/lib else echo "Unknown OS ($TRAVIS_OS_NAME). Stopping build ..." exit 1 fi
Create function for navigating to top-most Finder
# Create a directory and navigate to it function mkcd () { mkdir "$@" && cd "$@"; } # List directory contents on cd function cd() { builtin cd "$@" && l; } # Generate .gitignore based on parameters function gitignore() { curl -L -s https://www.gitignore.io/api/"$@" > .gitignore; } function json() { if [ -t 0 ]; then # JSON as argument python -mjson.tool <<< "$*" | pygmentize -l javascript; else # JSON from pipe python -mjson.tool | pygmentize -l javascript; fi; }
# Create a directory and navigate to it function mkcd () { mkdir "$@" && cd "$@"; } # List directory contents on cd function cd() { builtin cd "$@" && l; } # Change working directory to the top-most Finder window location function cdf() { cd "$(osascript -e 'tell app "Finder" to POSIX path of (insertion location as alias)')"; } # Generate .gitignore based on parameters function gitignore() { curl -L -s https://www.gitignore.io/api/"$@" > .gitignore; } function json() { if [ -t 0 ]; then # JSON as argument python -mjson.tool <<< "$*" | pygmentize -l javascript; else # JSON from pipe python -mjson.tool | pygmentize -l javascript; fi; }
Fix flaskext.genshi for Python 3
#!/bin/bash set -e virtualenv -p python3 . . bin/activate pip install -r requirements.txt rm -f $0
#!/bin/bash set -e virtualenv -p python3 . . bin/activate pip install -r requirements.txt sed -i 's/\.iteritems/.items/g' lib/*/site-packages/flaskext/genshi.py rm -f $0
Add JMX and JFR to JLink
#!/bin/sh if [ $# -ne 2 ]; then echo "Expecting two arguments, the JAR and the target directory." exit 1 fi INPUT_JAR=$1 TARGET_DIR=$2 echo "Will create an executable for '$INPUT_JAR'." echo "Will store the executable in directory '$TARGET_DIR'." java -version # Retrieve a list of required JDK modules from the input JAR. JDEPS_CMD="jdeps --print-module-deps --ignore-missing-deps $INPUT_JAR" echo "Calling jdeps like so: " echo " $JDEPS_CMD" DEPENDENCIES=$($JDEPS_CMD) echo "jdeps returned:" echo "$DEPENDENCIES" # Call JLink with these dependencies. JLINK_CMD="jlink --compress=2 --no-header-files --no-man-pages --strip-debug --add-modules $DEPENDENCIES --output $TARGET_DIR" echo "Calling jlink like so: " echo " $JLINK_CMD" JLINK_RESULT=$($JLINK_CMD) echo "jlink returned:" echo "$JLINK_RESULT" echo "Finished."
#!/bin/sh if [ $# -ne 2 ]; then echo "Expecting two arguments, the JAR and the target directory." exit 1 fi INPUT_JAR=$1 TARGET_DIR=$2 echo "Will create an executable for '$INPUT_JAR'." echo "Will store the executable in directory '$TARGET_DIR'." java -version # Retrieve a list of required JDK modules from the input JAR. JDEPS_CMD="jdeps --print-module-deps --ignore-missing-deps $INPUT_JAR" echo "Calling jdeps like so: " echo " $JDEPS_CMD" DEPENDENCIES=$($JDEPS_CMD) echo "jdeps returned:" echo "$DEPENDENCIES" # Call JLink with these dependencies; add JMX and JFR on top, as those are runtime monitoring dependencies. JLINK_CMD="jlink --compress=2 --no-header-files --no-man-pages --strip-debug --add-modules $DEPENDENCIES,jdk.management.jfr,jdk.management.agent --output $TARGET_DIR" echo "Calling jlink like so: " echo " $JLINK_CMD" JLINK_RESULT=$($JLINK_CMD) echo "jlink returned:" echo "$JLINK_RESULT" echo "Finished."
Revert "Revert "create PYTHONPATH from package list""
#!/usr/bin/env bash PACKAGE="peek" set -o nounset set -o errexit echo "Retrieving latest version tag" VER=$(git describe --tags `git rev-list --tags --max-count=1`) echo "Setting version to $VER" sed -i "s;.*version.*;__version__ = '${VER}';" ${PACKAGE}/__init__.py echo "===========================================" echo "Building Sphinx documentation for '${PACKAGE}'!" echo "===========================================" echo "Removing old documentation in build folder..." rm -fr dist/docs/* echo "Creating link to the packages..." ln -s ../peek-agent/peek_agent/ peek_agent ln -s ../peek-client/peek_client/ peek_client ln -s ../peek-platform/peek_platform/ peek_platform ln -s ../peek-server/peek_server/ peek_server ln -s ../peek-worker/peek_worker/ peek_worker echo "Updating module rst files. This will overwrite old rst files." export PYTHONPATH="$(dirname `pwd`)" sphinx-apidoc -f -l -d 6 -o docs . '*Test.py' 'setup.py' sphinx-build -b html docs dist/docs echo "Removing old module rst files..." rm -fr docs/peek* docs/modules.rst echo "Cleaning up links..." rm -fr peek_agent peek_client peek_platform peek_server peek_worker echo "Opening created documentation..." start dist/docs/index.html
#!/usr/bin/env bash PACKAGE="peek" set -o nounset set -o errexit echo "Retrieving latest version tag" VER=$(git describe --tags `git rev-list --tags --max-count=1`) echo "Setting version to $VER" sed -i "s;.*version.*;__version__ = '${VER}';" ${PACKAGE}/__init__.py echo "===========================================" echo "Building Sphinx documentation for '${PACKAGE}'!" echo "===========================================" echo "Removing old documentation in build folder..." rm -fr dist/docs/* echo "Creating link to the packages..." rm -fr peek_agent peek_client peek_platform peek_server peek_worker ln -s ../peek-agent/peek_agent/ ln -s ../peek-client/peek_client/ ln -s ../peek-platform/peek_platform/ ln -s ../peek-server/peek_server/ ln -s ../peek-worker/peek_worker/ echo "Creating Python Path" source ./pip_common.sh PYTHONPATH="" for pkg in $PACKAGES; do PYTHONPATH="${PYTHONPATH}:`pwd`/../${pkg}" done export PYTHONPATH sphinx-apidoc -f -l -d 6 -o docs . '*Test.py' 'setup.py' sphinx-build -b html docs dist/docs echo "Removing old module rst files..." rm -fr docs/peek* docs/modules.rst echo "Cleaning up links..." rm -fr peek_agent peek_client peek_platform peek_server peek_worker echo "Opening created documentation..." start dist/docs/index.html echo $PYTHONPATH
Change login code and notify before push
#!/bin/bash USERNAME='johandry' IMG_NAME='godevenv' FULL_IMG_NAME="${USERNAME}/${IMG_NAME}" build() { echo -e "\033[93;1mBuilding the image\033[0m" docker build -t ${FULL_IMG_NAME} . cat ${HOME}/.docker/config.json | tr -d '\n' | grep -q '"https://index.docker.io/.*":.*{.*"auth": ".\{1,\}"' || ( echo -e "\033[93;1mLogin to DockerHub as ${USERNAME}\033[0m." docker login -u ${dkr_username} ) } if [[ "$1" == "--build" ]] then build shift fi if [[ "$1" == "--push" ]] then # If the image is not there, build it docker images | grep -q ${FULL_IMG_NAME} || build docker push ${FULL_IMG_NAME} fi echo -e "\033[93;1mRunning the container\033[0m" docker run --rm -it --name "${IMG_NAME}" -v "${PWD}/workspace":/root/workspace -w /root/workspace ${FULL_IMG_NAME} [[ $? -eq 125 ]] && echo -e "\033[91;1mERROR\033[0m: Build the image and (optional) push it with $0 --build [ --push ]"
#!/bin/bash USERNAME='johandry' IMG_NAME='godevenv' FULL_IMG_NAME="${USERNAME}/${IMG_NAME}" build() { echo -e "\033[93;1mBuilding the image\033[0m" docker build -t ${FULL_IMG_NAME} . } if [[ "$1" == "--build" ]] then build shift fi if [[ "$1" == "--push" ]] then # If the image is not there, build it docker images | grep -q ${FULL_IMG_NAME} || build # Login to DockerHub if you are not cat ${HOME}/.docker/config.json | tr -d '\n' | grep -q '"https://index.docker.io/.*":.*{.*"auth": ".\{1,\}"' || ( echo -e "\033[93;1mLogin to DockerHub as ${USERNAME}\033[0m." docker login -u ${dkr_username} ) # Push the new image echo -e "\033[93;1mPushing the new image to DockerHub\033[0m" docker push ${FULL_IMG_NAME} fi echo -e "\033[93;1mRunning the container\033[0m" docker run --rm -it --name "${IMG_NAME}" -v "${PWD}/workspace":/root/workspace -w /root/workspace ${FULL_IMG_NAME} [[ $? -eq 125 ]] && echo -e "\033[91;1mERROR\033[0m: Build the image and (optional) push it with $0 --build [ --push ]"
Update scripts to latest version
#!/usr/bin/env sh test -e ~/.coursier/coursier || ( \ mkdir -p ~/.coursier && \ curl -Lso ~/.coursier/coursier https://git.io/vgvpD && \ chmod +x ~/.coursier/coursier \ ) ~/.coursier/coursier launch -q -P \ com.lihaoyi:ammonite_2.12.2:0.9.3 \ is.cir:ciris-core_2.12:0.3.1 \ is.cir:ciris-enumeratum_2.12:0.3.1 \ is.cir:ciris-generic_2.12:0.3.1 \ is.cir:ciris-refined_2.12:0.3.1 \ is.cir:ciris-squants_2.12:0.3.1 \ -- --predef 'import ciris._,ciris.enumeratum._,ciris.generic._,ciris.refined._,ciris.squants._' < /dev/tty
#!/usr/bin/env sh test -e ~/.coursier/coursier || ( \ mkdir -p ~/.coursier && \ curl -Lso ~/.coursier/coursier https://git.io/vgvpD && \ chmod +x ~/.coursier/coursier \ ) ~/.coursier/coursier launch -q -P \ com.lihaoyi:ammonite_2.12.2:0.9.3 \ is.cir:ciris-core_2.12:0.3.2 \ is.cir:ciris-enumeratum_2.12:0.3.2 \ is.cir:ciris-generic_2.12:0.3.2 \ is.cir:ciris-refined_2.12:0.3.2 \ is.cir:ciris-squants_2.12:0.3.2 \ -- --predef 'import ciris._,ciris.enumeratum._,ciris.generic._,ciris.refined._,ciris.squants._' < /dev/tty
Update script for commonly used Python modules/programs.
#!/bin/bash pip list | awk '{ print $1 }' | egrep -i "(pip)|(livestreamer)|(youtube-dl)|(thefuck)|(tldr)|(zenmap)|(paramiko)|(clf)|(Fabric)|(speedtest-cli)" > /tmp/pip_list.txt pip_upgrade(){ while read package; do sudo pip install "$package" --upgrade done < /tmp/pip_list.txt &> /dev/null ; return 0 || return 1 } if [[ -f /tmp/pip_list.txt ]] ; then echo "Updating..." pip_upgrade else echo "Error: No update list found." exit 1 fi if [[ pip_upgrade -eq 0 ]] ; then echo "Done." rm /tmp/pip_list.txt else echo "There was an error. Please try again." fi
#!/bin/bash pip list | awk '{ print $1 }' | egrep -i "(pip)|(livestreamer)|(youtube-dl)|\ (thefuck)|(tldr)|(zenmap)|(paramiko)|(clf)|(Fabric)|\ (speedtest-cli)" > /tmp/pip_list.txt pip_upgrade(){ while read package; do sudo pip install "$package" --upgrade done < /tmp/pip_list.txt &> /dev/null ; return 0 || return 1 } if [[ -f /tmp/pip_list.txt ]] ; then echo "Updating..." pip_upgrade else echo "Error: No update list found." exit 1 fi if [[ pip_upgrade -eq 0 ]] ; then echo "Done." rm /tmp/pip_list.txt else echo "There was an error. Please try again." fi
Drop rpmfusion from Fedora install script
#!/usr/bin/env bash sudo rpm -Uvh http://download1.rpmfusion.org/free/fedora/rpmfusion-free-release-23.noarch.rpm sudo dnf install -y \ git \ python \ python-devel \ python-pip \ python-setuptools \ python-virtualenv \ pygobject2-devel \ python-virtualenvwrapper \ libtool \ libffi-devel \ openssl-devel \ autoconf \ bison \ swig \ glib2-devel \ s3cmd \ portaudio-devel \ mpg123 \ screen \ curl \ pkgconfig \ libicu-devel \ automake # upgrade virtualenv to latest from pypi sudo pip install --upgrade virtualenv
#!/usr/bin/env bash sudo dnf install -y \ git \ python \ python-devel \ python-pip \ python-setuptools \ python-virtualenv \ pygobject2-devel \ python-virtualenvwrapper \ libtool \ libffi-devel \ openssl-devel \ autoconf \ bison \ swig \ glib2-devel \ s3cmd \ portaudio-devel \ mpg123 \ screen \ curl \ pkgconfig \ libicu-devel \ automake # upgrade virtualenv to latest from pypi sudo pip install --upgrade virtualenv
Fix Wordpress configuration file path.
#!/bin/sh # Exit on any command failures set -e LOCAL=/home/mike/Documents/Backup/Local REMOTE=/home/mike/Documents/Backup/Remote # Backup Wordpress files cd /var/www/ tar jvcf $LOCAL/www.tar.bz2 * # Backup Wordpress database USERNAME=$(grep DB_USER /var/www/wp-config.php | awk -F "'" '{print $4}') DATABASE=$(grep DB_NAME /var/www/wp-config.php | awk -F "'" '{print $4}') PASSWORD=$(grep DB_PASSWORD /var/www/wp-config.php | awk -F "'" '{print $4}') mysqldump -u "$USERNAME" --database "$DATABASE" --password="$PASSWORD" \ | bzip2 -c > $LOCAL/wordpress.sql.bz2 # Backup private Git repositories cd /home/mike/Documents/Git/ tar jvcf $LOCAL/git-private.tar.bz2 * # Copy to NFS backup share cp -v $LOCAL/www.tar.bz2 $REMOTE cp -v $LOCAL/wordpress.sql.bz2 $REMOTE cp -v $LOCAL/git-private.tar.bz2 $REMOTE
#!/bin/sh # Exit on any command failures set -e LOCAL=/home/mike/Documents/Backup/Local REMOTE=/home/mike/Documents/Backup/Remote # Backup website files cd /var/www/ tar jvcf $LOCAL/www.tar.bz2 * # Backup Wordpress database USERNAME=$(grep DB_USER /var/www/wordpress/wp-config.php | awk -F "'" '{print $4}') DATABASE=$(grep DB_NAME /var/www/wordpress/wp-config.php | awk -F "'" '{print $4}') PASSWORD=$(grep DB_PASSWORD /var/www/wordpress/wp-config.php | awk -F "'" '{print $4}') mysqldump -u "$USERNAME" --database "$DATABASE" --password="$PASSWORD" \ | bzip2 -c > $LOCAL/wordpress.sql.bz2 # Backup private Git repositories cd /home/mike/Documents/Git/ tar jvcf $LOCAL/git-private.tar.bz2 * # Copy to NFS backup share cp -v $LOCAL/www.tar.bz2 $REMOTE cp -v $LOCAL/wordpress.sql.bz2 $REMOTE cp -v $LOCAL/git-private.tar.bz2 $REMOTE
Exclude Emacs backup files during sync
#! /bin/bash targets="10:4a:7d:15:64:d6 ac:7b:a1:d3:b6:02" dirs="$HOME/org $HOME/.gnupg $HOME/.password-store" # Make sure the ARP tables are refreshed nmap -sn $(ip -br addr | awk '/UP/ { print $3 }') > /dev/null 2>&1 for target in $targets; do line=$(ip neigh | grep $target) if [[ -n $line ]]; then ip=$(echo $line | awk '{ print $1 }') for dir in $dirs; do rsync -a $dir/ $ip:$dir done fi done
#! /bin/bash targets="10:4a:7d:15:64:d6 ac:7b:a1:d3:b6:02" dirs="$HOME/org $HOME/.gnupg $HOME/.password-store" # Make sure the ARP tables are refreshed nmap -sn $(ip -br addr | awk '/UP/ { print $3 }') > /dev/null 2>&1 for target in $targets; do line=$(ip neigh | grep $target) if [[ -n $line ]]; then ip=$(echo $line | awk '{ print $1 }') for dir in $dirs; do rsync -a --exclude='*~' --exclude='#*#' --exclude='.#*' $dir/ $ip:$dir done fi done
Use --symlink with zip to avoid duplicating files in zip archive.
#!/bin/sh set -e version=$1 root=$2 echo echo '>>> Building JavaScript with Babel' echo '>>> npm run build' echo npm run build echo echo '>>> Install production dependencies' echo '>>> mkdir -p proddeps' mkdir -p proddeps echo '>>> cp package.json proddeps' cp package.json proddeps echo '>>> cd proddeps && npm i --production' (cd proddeps && npm i --production) echo echo echo '>>> Copy files to deployment directory' echo cp -a index.js lib proddeps/node_modules $root mkdir $root/fonts cp -a fonts/font-stylesheets $root/fonts rm -f code.zip echo echo '>>> Zipping' echo ">>> cd $root && zip -r ../deploy-$version.zip *" echo (cd $root && zip -9 -r ../deploy-$version.zip *)
#!/bin/sh set -e version=$1 root=$2 echo echo '>>> Building JavaScript with Babel' echo '>>> npm run build' echo npm run build echo echo '>>> Install production dependencies' echo '>>> mkdir -p proddeps' mkdir -p proddeps echo '>>> cp package.json proddeps' cp package.json proddeps echo '>>> cd proddeps && npm i --production' (cd proddeps && npm i --production) echo echo echo '>>> Copy files to deployment directory' echo cp -a index.js lib proddeps/node_modules $root mkdir $root/fonts cp -a fonts/font-stylesheets $root/fonts rm -f code.zip echo echo '>>> Zipping' echo ">>> cd $root && zip -r9 --symlink ../deploy-$version.zip *" echo (cd $root && zip -r9 --symlink ../deploy-$version.zip *)
Make sure git is on master when commiting on Travis
git config --global user.email "weecologydeploy@weecology.org" git config --global user.name "Weecology Deploy Bot" git add predictions/* docs/* git commit -m "Update forecasts: Travis Build $TRAVIS_BUILD_NUMBER" git remote add deploy https://${GITHUB_TOKEN}@github.com/weecology/portalPredictions.git > /dev/null 2>&1 git push --quiet deploy master > /dev/null 2>&1
git config --global user.email "weecologydeploy@weecology.org" git config --global user.name "Weecology Deploy Bot" git checkout master git add predictions/* docs/* git commit -m "Update forecasts: Travis Build $TRAVIS_BUILD_NUMBER" git remote add deploy https://${GITHUB_TOKEN}@github.com/weecology/portalPredictions.git > /dev/null 2>&1 git push --quiet deploy master > /dev/null 2>&1
Add a remove-orphaned packages alias for Arch
#!/bin/zsh if [[ ! -f /etc/arch-release ]]; then return fi prepend-path $HOME/.bin/arch if [[ -d $HOME/build || -d $HOME/opt/build ]]; then if [[ ! -f ${XDG_DATA_HOME:-$HOME/.local/share}/aur-update ]]; then echo "${fg[red]}Found an AUR build folder, but no update data available. Please configure aur-update in cron.${terminfo[sgr0]}" echo "" else source ${XDG_DATA_HOME:-$HOME/.local/share}/aur-update fi fi alias pacman-orphans="pacman -Qtdq"
#!/bin/zsh if [[ ! -f /etc/arch-release ]]; then return fi prepend-path $HOME/.bin/arch if [[ -d $HOME/build || -d $HOME/opt/build ]]; then if [[ ! -f ${XDG_DATA_HOME:-$HOME/.local/share}/aur-update ]]; then echo "${fg[red]}Found an AUR build folder, but no update data available. Please configure aur-update in cron.${terminfo[sgr0]}" echo "" else source ${XDG_DATA_HOME:-$HOME/.local/share}/aur-update fi fi alias pacman-orphans="pacman -Qtdq" alias pacman-remove-orphans="pacman -Rns \$(pacman -Qtdq)"
Fix broken syntax highlighting with vim.
#!/bin/sh # Copyright 2002-2004 The Apache Software Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ----- Verify and Set Required Environment Variables ------------------------- if [ "$TERM" = "cygwin" ] ; then S=';' else S=':' fi # ----- Set Up The Runtime Classpath ------------------------------------------ OLD_ANT_HOME="$ANT_HOME" unset ANT_HOME CP=$CLASSPATH export CP unset CLASSPATH CLASSPATH="`echo ./lib/endorsed/*.jar | tr ' ' $S`" export CLASSPATH echo Using classpath: \"$CLASSPATH\" "$PWD/./tools/ant/bin/ant" -logger org.apache.tools.ant.NoBannerLogger -emacs $@ unset CLASSPATH CLASSPATH=$CP export CLASSPATH ANT_HOME=OLD_ANT_HOME export ANT_HOME # ----- Clean back the environment ------------------------------------------ unset OLD_ANT_HOME unset CP
#!/bin/sh # Copyright 2002-2004 The Apache Software Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ----- Verify and Set Required Environment Variables ------------------------- if [ "$TERM" = "cygwin" ] ; then S=';' else S=':' fi # ----- Set Up The Runtime Classpath ------------------------------------------ OLD_ANT_HOME="$ANT_HOME" unset ANT_HOME CP=$CLASSPATH export CP unset CLASSPATH CLASSPATH="`echo ./lib/endorsed/*.jar | tr ' ' $S`" export CLASSPATH echo "Using classpath: $CLASSPATH" "$PWD/./tools/ant/bin/ant" -logger org.apache.tools.ant.NoBannerLogger -emacs $@ unset CLASSPATH CLASSPATH=$CP export CLASSPATH ANT_HOME=OLD_ANT_HOME export ANT_HOME # ----- Clean back the environment ------------------------------------------ unset OLD_ANT_HOME unset CP
Enable a second current path in CDPATH
unsetopt auto_cd # with cdpath enabled, auto_cd gives too many false positives cdpath=( $HOME/code \ $HOME/code/work/current \ $HOME/code/work \ $HOME/code/vim \ $HOME/code/alfred \ $HOME ) _cdpath_directories() { modified_in_last_days=${1:-999} echo "${CDPATH//:/\n}" | while read dir; do find -L "$dir" \ -not -path '*/\.*' \ -type d \ -atime -"$modified_in_last_days" \ -maxdepth 1 done } _is_a_git_repo() { while read dir; do if [[ -d "$dir/.git" ]]; then basename "$dir" fi done } tm-select-session() { project=$(projects | fzf --reverse) if [ ! -z "$project" ]; then (cd "$project" && tat) fi } projects() { _cdpath_directories $1 | _is_a_git_repo } itree() { if [ -f .gitignore ]; then tree -I "$(cat .gitignore | paste -s -d'|' -)" -C | less -R else tree -I node_modules -C fi }
unsetopt auto_cd # with cdpath enabled, auto_cd gives too many false positives cdpath=( $HOME/code \ $HOME/code/work/current \ $HOME/code/work/current-two \ $HOME/code/work \ $HOME/code/vim \ $HOME/code/alfred \ $HOME ) _cdpath_directories() { modified_in_last_days=${1:-999} echo "${CDPATH//:/\n}" | while read dir; do find -L "$dir" \ -not -path '*/\.*' \ -type d \ -atime -"$modified_in_last_days" \ -maxdepth 1 done } _is_a_git_repo() { while read dir; do if [[ -d "$dir/.git" ]]; then basename "$dir" fi done } tm-select-session() { project=$(projects | fzf --reverse) if [ ! -z "$project" ]; then (cd "$project" && tat) fi } projects() { _cdpath_directories $1 | _is_a_git_repo } itree() { if [ -f .gitignore ]; then tree -I "$(cat .gitignore | paste -s -d'|' -)" -C | less -R else tree -I node_modules -C fi }
Install tarantool 1.10 from brew directly
#!/usr/bin/env bash set -x brew update if [[ "${TARANTOOL_VERSION}" != "none" ]]; then if [[ "${TARANTOOL_VERSION}" == "2_x" ]]; then brew install .ci/tarantool.rb --HEAD else brew install tarantool # brew install .ci/tarantool.rb fi fi
#!/usr/bin/env bash set -x brew update if [[ "${TARANTOOL_VERSION}" != "none" ]]; then if [[ "${TARANTOOL_VERSION}" == "2_x" ]]; then brew install .ci/tarantool.rb --HEAD else brew install tarantool fi fi
Add a revert of Xcode select
#!/bin/sh # make sure all users on this machine are members of the _developer group /usr/sbin/dseditgroup -o edit -a everyone -t group _developer # enable developer mode /usr/sbin/DevToolsSecurity -enable # accept Xcode license /Applications/Xcode.app/Contents/Developer/usr/bin/xcodebuild -license accept # install embedded packages for PKG in /Applications/Xcode.app/Contents/Resources/Packages/*.pkg; do /usr/sbin/installer -pkg "$PKG" -target / done
#!/bin/sh # make sure all users on this machine are members of the _developer group /usr/sbin/dseditgroup -o edit -a everyone -t group _developer # enable developer mode /usr/sbin/DevToolsSecurity -enable # accept Xcode license /Applications/Xcode.app/Contents/Developer/usr/bin/xcodebuild -license accept # avoid any conflit with previous command line tools installed xcode-select -r # install embedded packages for PKG in /Applications/Xcode.app/Contents/Resources/Packages/*.pkg; do /usr/sbin/installer -pkg "$PKG" -target / done
Set OUT_DIR for Android build system
#!/bin/sh jagen_sdk='android' # Many Android build and utility scripts assume Bash shell jagen_shell="/bin/bash" jagen_target_prefix="/system" : ${jagen_sdk_dir:?} : ${jagen_android_product:?} jagen_sdk_staging_dir="$jagen_sdk_dir/out/target/product/$jagen_android_product" export KDIR="$jagen_sdk_staging_dir/obj/KERNEL_OBJ" if in_flags ccache; then export USE_CCACHE=1 fi
#!/bin/sh jagen_sdk='android' # Many Android build and utility scripts assume Bash shell jagen_shell="/bin/bash" jagen_target_prefix="/system" : ${jagen_sdk_dir:?} : ${jagen_android_product:?} # set output directory of Android build system export OUT_DIR="${jagen_out_dir:?}/android" jagen_sdk_staging_dir="$OUT_DIR/target/product/$jagen_android_product" # for out of tree Linux kernel modules export KDIR="$jagen_sdk_staging_dir/obj/KERNEL_OBJ" if in_flags ccache; then export USE_CCACHE=1 fi
Add test for map/head from prelude/data.list
#!/bin/bash bin="../.cabal-sandbox/bin/ghc-imported-from" # TODO Automate these. HUnit? $bin Muddle.hs Muddle Maybe 11 11 echo "" echo "---------------------------------------------------------" echo "" $bin Muddle.hs Muddle Just 12 7 echo "" echo "---------------------------------------------------------" echo "" $bin Muddle.hs Muddle Just 16 10 echo "" echo "---------------------------------------------------------" echo "" $bin Muddle.hs Muddle String 20 14 echo "" echo "---------------------------------------------------------" echo "" $bin Muddle.hs Muddle Int 22 23 echo "" echo "---------------------------------------------------------" echo "" $bin Muddle.hs Muddle DL.length 23 5 echo "" echo "---------------------------------------------------------" echo "" $bin Muddle.hs Muddle print 25 8 echo "" echo "---------------------------------------------------------" echo "" $bin Muddle.hs Muddle DM.fromList 27 5 echo "" echo "---------------------------------------------------------" echo "" $bin Muddle.hs Muddle Safe.headMay 29 6 echo "" echo "---------------------------------------------------------" echo "" $bin Hiding.hs Hiding head 12 5
#!/bin/bash bin="../.cabal-sandbox/bin/ghc-imported-from" # TODO Automate these. HUnit? $bin Muddle.hs Muddle Maybe 11 11 echo "" echo "---------------------------------------------------------" echo "" $bin Muddle.hs Muddle Just 12 7 echo "" echo "---------------------------------------------------------" echo "" $bin Muddle.hs Muddle Just 16 10 echo "" echo "---------------------------------------------------------" echo "" $bin Muddle.hs Muddle String 20 14 echo "" echo "---------------------------------------------------------" echo "" $bin Muddle.hs Muddle Int 22 23 echo "" echo "---------------------------------------------------------" echo "" $bin Muddle.hs Muddle DL.length 23 5 echo "" echo "---------------------------------------------------------" echo "" $bin Muddle.hs Muddle print 25 8 echo "" echo "---------------------------------------------------------" echo "" $bin Muddle.hs Muddle DM.fromList 27 5 echo "" echo "---------------------------------------------------------" echo "" $bin Muddle.hs Muddle Safe.headMay 29 6 echo "" echo "---------------------------------------------------------" echo "" $bin Hiding.hs Hiding map 12 5 echo "" echo "---------------------------------------------------------" echo "" $bin Hiding.hs Hiding head 12 5
Use easy_install to get pip
#!/bin/bash -xue function install_deb { sudo aptitude install -y python-dev libffi-dev python-pip } function install_centos { # GCC installed automatically in debian because of recommended packages # Doing that manually in centos. sudo yum -y install python-devel libffi-devel epel-release gcc # pip is not in the standard repo sudo yum -y install python-pip } function is_centos { [[ -f /etc/centos-release ]] } function is_deb { [[ -f /etc/debian_version ]] } function install { if is_deb; then install_deb elif is_centos; then install_centos else echo "Unknown distribution" exit 1 fi sudo pip install tox } install
#!/bin/bash -xue function install_deb { sudo aptitude install -y python-dev libffi-dev } function install_centos { # GCC installed automatically in debian because of recommended packages # Doing that manually in centos. sudo yum -y install python-devel libffi-devel epel-release gcc wget } function is_centos { [[ -f /etc/centos-release ]] } function is_deb { [[ -f /etc/debian_version ]] } function install { if is_deb; then install_deb elif is_centos; then install_centos else echo "Unknown distribution" exit 1 fi wget https://bootstrap.pypa.io/ez_setup.py -O - | sudo python - sudo easy_install pip sudo pip install tox } install
Add the server initiating script
#!/bin/bash # main script to start the recommendation server cd ../ source venv/bin/activate cd components if command -v python3; then python3 server.py elif command -v python; then python server.py else echo "Python 3 is required for the command!" exit 1 fi
#!/bin/bash # main script to start the recommendation server cd ../ cd components if command -v python3; then python3 server.py elif command -v python; then python server.py else echo "Python 3 is required for the command!" exit 1 fi
Fix the example to provide expected results.
#!/bin/sh exec 3<data1.i exec 4<data2.i cat <&4 | while read H; do cat <&3 | while read F; do echo $H $F done done
#!/bin/sh exec 4<data2.i function f() { exec 3<data1.i cat <&3 | while read F; do echo $H $F done } cat <&4 | while read H; do f done
Fix error in Doxygen install script.
#!/bin/bash # Copyright (c) 2014-2015 Kartik Kumar (me@kartikkumar.com) # Distributed under the MIT License. # See accompanying file LICENSE.md or copy at http://opensource.org/licenses/MIT set -ev # Fetch and build updated version of Doxygen from source. wget http://ftp.stack.nl/pub/users/dimitri/doxygen-1.8.11.src.tar.gz tar -xzvf doxygen-1.8.11.linux.bin.tar.gz cd doxygen-1.8.11 && ./configure --prefix=$HOME/doxygen && make && make install
#!/bin/bash # Copyright (c) 2014-2015 Kartik Kumar (me@kartikkumar.com) # Distributed under the MIT License. # See accompanying file LICENSE.md or copy at http://opensource.org/licenses/MIT set -ev # Fetch and build updated version of Doxygen from source. wget http://ftp.stack.nl/pub/users/dimitri/doxygen-1.8.11.src.tar.gz tar -xzvf doxygen-1.8.11.src.tar.gz cd doxygen-1.8.11 && ./configure --prefix=$HOME/doxygen && make && make install
Add homebrew bin to PATH
#!/bin/bash set -eux PROVISION_DIR="$HOME" export HOMEBREW_NO_AUTO_UPDATE=1 install_buildkite() { echo "Installing buildkite-agent" /usr/local/bin/brew tap buildkite/buildkite /usr/local/bin/brew install --devel buildkite-agent cp /tmp/buildkite-hooks/* /usr/local/etc/buildkite-agent/hooks/ rm -rf /tmp/buildkite-hooks } install_launchd_daemon() { local script="vmkite-buildkite-agent.sh" local plist="com.macstadium.vmkite-buildkite-agent.plist" echo "Installing launchd service" sudo cp "${PROVISION_DIR}/$script" "/usr/local/bin/$script" sudo cp "${PROVISION_DIR}/$plist" "/Library/LaunchDaemons/$plist" sudo chmod 0755 "/usr/local/bin/$script" sudo launchctl load "/Library/LaunchDaemons/$plist" } install_utils() { /usr/local/bin/brew install awscli jq } install_utils install_buildkite install_launchd_daemon # Write a version file so we can track which build this refers to cat << EOF > /etc/vmkite-info BUILDKITE_VERSION=$(buildkite-agent --version) BUILDKITE_BUILD_NUMBER=$BUILDKITE_BUILD_NUMBER BUILDKITE_BRANCH=$BUILDKITE_BRANCH BUILDKITE_COMMIT=$BUILDKITE_COMMIT EOF
#!/bin/bash set -eux PROVISION_DIR="$HOME" export HOMEBREW_NO_AUTO_UPDATE=1 export PATH=/usr/local/bin:$PATH install_buildkite() { echo "Installing buildkite-agent" brew tap buildkite/buildkite brew install --devel buildkite-agent cp /tmp/buildkite-hooks/* /usr/local/etc/buildkite-agent/hooks/ rm -rf /tmp/buildkite-hooks } install_launchd_daemon() { local script="vmkite-buildkite-agent.sh" local plist="com.macstadium.vmkite-buildkite-agent.plist" echo "Installing launchd service" sudo cp "${PROVISION_DIR}/$script" "/usr/local/bin/$script" sudo cp "${PROVISION_DIR}/$plist" "/Library/LaunchDaemons/$plist" sudo chmod 0755 "/usr/local/bin/$script" sudo launchctl load "/Library/LaunchDaemons/$plist" } install_utils() { brew install awscli jq } install_utils install_buildkite install_launchd_daemon # Write a version file so we can track which build this refers to cat << EOF > /etc/vmkite-info BUILDKITE_VERSION=$(buildkite-agent --version) BUILDKITE_BUILD_NUMBER=$BUILDKITE_BUILD_NUMBER BUILDKITE_BRANCH=$BUILDKITE_BRANCH BUILDKITE_COMMIT=$BUILDKITE_COMMIT EOF
Install nitor-deploy-tools before using them
#!/bin/bash # Copyright 2016-2017 Nitor Creations Oy # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -xe if [ -z "$1" -o "$1" = "latest" ]; then DEPLOYTOOLS_VERSION="" else DEPLOYTOOLS_VERSION="==$1" fi source $(n-include common_tools.sh) ln -snf /usr/bin/lpass_$(system_type_and_version) /usr/bin/lpass pip install -U pip setuptools awscli boto3 "nitor-deploy-tools$DEPLOYTOOLS_VERSION"
#!/bin/bash # Copyright 2016-2017 Nitor Creations Oy # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -xe if [ -z "$1" -o "$1" = "latest" ]; then DEPLOYTOOLS_VERSION="" else DEPLOYTOOLS_VERSION="==$1" fi pip install -U pip setuptools awscli boto3 "nitor-deploy-tools$DEPLOYTOOLS_VERSION" source $(n-include common_tools.sh) ln -snf /usr/bin/lpass_$(system_type_and_version) /usr/bin/lpass
Move $HOME:/bin forward in $PATH
export PATH="/usr/local/bin:/usr/local/sbin:$DOT/bin:$HOME/bin:$PATH"
export PATH="$HOME/bin:/usr/local/bin:/usr/local/sbin:$DOT/bin:$PATH"
Make sure ssh dir has permission 700
#!/bin/sh # License: CC0 1.0 Universal # https://creativecommons.org/publicdomain/zero/1.0/legalcode set -e SCRIPT_PATH=.travis . $SCRIPT_PATH/travis-doc-upload.cfg [ "$TRAVIS_BRANCH" = master ] [ "$TRAVIS_PULL_REQUEST" = false ] [ "$TRAVIS_RUST_VERSION" = "$DOC_RUST_VERSION" ] echo "Publishing docs..." eval key=\$encrypted_${SSH_KEY_TRAVIS_ID}_key eval iv=\$encrypted_${SSH_KEY_TRAVIS_ID}_iv mkdir -p ~/.ssh openssl aes-256-cbc -K $key -iv $iv -in $SCRIPT_PATH/id_rsa.enc -out ~/.ssh/id_rsa -d chmod 600 ~/.ssh/id_rsa git clone --branch gh-pages git@github.com:$DOCS_REPO deploy_docs cd deploy_docs git config user.name "doc upload bot" git config user.email "nobody@example.com" rm -rf $PROJECT_NAME mv ../target/doc $PROJECT_NAME git add -A $PROJECT_NAME git commit -qm "doc upload for $PROJECT_NAME ($TRAVIS_REPO_SLUG)" for i in {0..5}; do git push -q origin gh-pages && break # redo when push fails git pull -r || break # give up if rebase fails done echo "Doc upload completed"
#!/bin/sh # License: CC0 1.0 Universal # https://creativecommons.org/publicdomain/zero/1.0/legalcode set -e SCRIPT_PATH=.travis . $SCRIPT_PATH/travis-doc-upload.cfg [ "$TRAVIS_BRANCH" = master ] [ "$TRAVIS_PULL_REQUEST" = false ] [ "$TRAVIS_RUST_VERSION" = "$DOC_RUST_VERSION" ] echo "Publishing docs..." eval key=\$encrypted_${SSH_KEY_TRAVIS_ID}_key eval iv=\$encrypted_${SSH_KEY_TRAVIS_ID}_iv mkdir -p ~/.ssh chmod 700 .ssh openssl aes-256-cbc -K $key -iv $iv -in $SCRIPT_PATH/id_rsa.enc -out ~/.ssh/id_rsa -d chmod 600 ~/.ssh/id_rsa git clone --branch gh-pages git@github.com:$DOCS_REPO deploy_docs cd deploy_docs git config user.name "doc upload bot" git config user.email "nobody@example.com" rm -rf $PROJECT_NAME mv ../target/doc $PROJECT_NAME git add -A $PROJECT_NAME git commit -qm "doc upload for $PROJECT_NAME ($TRAVIS_REPO_SLUG)" for i in {0..5}; do git push -q origin gh-pages && break # redo when push fails git pull -r || break # give up if rebase fails done echo "Doc upload completed"
Remove gh_pages branch before pushing new documentation out
#!/bin/bash rev=$(git rev-parse --short HEAD) cd target/doc echo '<meta http-equiv=refresh content=0;url=rustyline/index.html>' > index.html git init git config user.name "Katsu Kawakami" git config user.email "kkawa1570@gmail.com" git remote add upstream "https://$GH_TOKEN@github.com/kkawakam/rustyline.git" git fetch upstream && git reset upstream/gh-pages touch . git add -A . git commit -m "rebuild pages at ${rev}" git push -f -q upstream HEAD:gh-pages > /dev/null 2>&1
#!/bin/bash rev=$(git rev-parse --short HEAD) cd target/doc echo '<meta http-equiv=refresh content=0;url=rustyline/index.html>' > index.html git init git config user.name "Katsu Kawakami" git config user.email "kkawa1570@gmail.com" git remote add upstream "https://$GH_TOKEN@github.com/kkawakam/rustyline.git" git fetch upstream git push origin --delete gh_pages > /dev/null 2>&1 touch . git add -A . git commit -m "rebuild pages at ${rev}" git push -f -q upstream gh-pages > /dev/null 2>&1
Fix travis: exit with non-zero if no VCF output per caller.
#!/bin/bash -xe source ~/.profile git clone -b dev https://github.com/GooglingTheCancerGenome/sv-callers.git cd sv-callers/snakemake CALLERS="['manta','delly','lumpy','gridss']" ECHO=$1 MODE=$2 SCH=$3 SAMPLES=$([ "$ECHO" -eq "1" ] && echo "samples_dummy.csv" || echo "samples.csv") USE_CONDA=$([ "$ECHO" -eq "0" ] && echo "--use-conda" || echo "") snakemake -C echo_run=$ECHO samples=$SAMPLES mode=$MODE \ enable_callers="$CALLERS" $USE_CONDA \ --latency-wait 30 --jobs \ --cluster "xenon -vvv scheduler $SCH --location local:// submit \ --name smk.{rule} --procs-per-node=1 --start-single-process --inherit-env \ --max-run-time 15 --working-directory ." if [ "$ECHO" -eq "0" ]; then find data -name workspace | xargs rm -fr find data -name *.vcf | xargs grep -cv "#" fi
#!/bin/bash -xe source ~/.profile git clone -b dev https://github.com/GooglingTheCancerGenome/sv-callers.git cd sv-callers/snakemake CALLERS=(manta delly lumpy gridss) STR_CALLERS="[$(printf "'%s'," "${CALLERS[@]}"|sed 's/,$//')]" ECHO=$1 MODE=$2 SCH=$3 SAMPLES=$([ "$ECHO" -eq "1" ] && echo "samples_dummy.csv" || echo "samples.csv") USE_CONDA=$([ "$ECHO" -eq "0" ] && echo "--use-conda" || echo "") echo "Selected callers: $STR_CALLERS" snakemake -C echo_run=$ECHO samples=$SAMPLES mode=$MODE \ enable_callers="$STR_CALLERS" $USE_CONDA \ --latency-wait 30 --jobs \ --cluster "xenon -vvv scheduler $SCH --location local:// submit \ --name smk.{rule} --procs-per-node=1 --start-single-process --inherit-env \ --max-run-time 15 --working-directory ." if [ "$ECHO" -eq "0" ]; then for c in "${CALLERS[@]}"; do vcf_file="$c.vcf" find data -name $vcf_file | xargs grep -cv "#" || exit 1 done fi
Add .github logo.svg .prettierrc.yml to exluded files for Buildkite
#!/bin/bash set -ex git clone git@github.com:timsuchanek/last-git-changes.git cd last-git-changes npm install npm run build cd .. node last-git-changes/bin.js --exclude='docs,fixtures,README.md,LICENSE,CONTRIBUTING.md' export CHANGED_COUNT=$(node last-git-changes/bin.js --exclude='docs,fixtures,README.md,LICENSE,CONTRIBUTING.md' | wc -l) echo $CHANGED_COUNT if [ $CHANGED_COUNT -gt 0 ]; then buildkite-agent pipeline upload .buildkite/trigger.yml else echo "Nothing changed" fi
#!/bin/bash set -ex git clone git@github.com:timsuchanek/last-git-changes.git cd last-git-changes npm install npm run build cd .. node last-git-changes/bin.js --exclude='docs,fixtures,README.md,LICENSE,CONTRIBUTING.md,.github,logo.svg,.prettierrc.yml' export CHANGED_COUNT=$(node last-git-changes/bin.js --exclude='docs,fixtures,README.md,LICENSE,CONTRIBUTING.md,.github,logo.svg,.prettierrc.yml' | wc -l) echo $CHANGED_COUNT if [ $CHANGED_COUNT -gt 0 ]; then buildkite-agent pipeline upload .buildkite/trigger.yml else echo "Nothing changed" fi
Remove cleaning up docker images.
#!/bin/sh _dockerize(){ ./pundun-docker build pundun-$1 $2 ./pundun-docker run pundun-$1 $2 ./pundun-docker fetch_package pundun-$1 $2 ./pundun-docker stop pundun-$1 $2 ./pundun-docker rm pundun-$1 $2 mkdir -p ../archive/$2 mv packages/* ../archive/$2/ docker push pundunlabs/pundun-$1:$2 } tag="$(git ls-remote --tags https://github.com/pundunlabs/pundun.git | cut -d "/" -f 3 |grep -v -|grep -v {| sort -n -t. -k1 -k2 -k3 -r | head -n1)" if [ "$(docker images -q pundunlabs/pundun-$tag:centos-6.7 2> /dev/null)" = "" ]; then { _dockerize $tag centos-6.7 _dockerize $tag ubuntu-16.04 docker rm -v $(docker ps -a -q -f status=exited) docker images -q --filter "dangling=true" | xargs docker rmi } else echo "image already pulled." fi #Cleanup dangling images
#!/bin/sh _dockerize(){ ./pundun-docker build pundun-$1 $2 ./pundun-docker run pundun-$1 $2 ./pundun-docker fetch_package pundun-$1 $2 ./pundun-docker stop pundun-$1 $2 ./pundun-docker rm pundun-$1 $2 mkdir -p ../archive/$2 mv packages/* ../archive/$2/ docker push pundunlabs/pundun-$1:$2 } tag="$(git ls-remote --tags https://github.com/pundunlabs/pundun.git | cut -d "/" -f 3 |grep -v -|grep -v {| sort -n -t. -k1 -k2 -k3 -r | head -n1)" if [ "$(docker images -q pundunlabs/pundun-$tag:centos-6.7 2> /dev/null)" = "" ]; then { _dockerize $tag centos-6.7 _dockerize $tag ubuntu-16.04 } else echo "image already pulled." fi #Cleanup dangling images
Set Docker image NS to openfaas
#!/bin/sh set -e export dockerfile="Dockerfile" export arch=$(uname -m) export eTAG="latest-dev" if [ "$arch" = "armv7l" ] ; then dockerfile="Dockerfile.armhf" eTAG="latest-armhf-dev" fi echo "$1" if [ "$1" ] ; then eTAG=$1 if [ "$arch" = "armv7l" ] ; then eTAG="$1-armhf" fi fi NS=alexellis echo Building $NS/gateway:$eTAG GIT_COMMIT_MESSAGE=$(git log -1 --pretty=%B 2>&1 | head -n 1) GIT_COMMIT_SHA=$(git rev-list -1 HEAD) VERSION=$(git describe --all --exact-match `git rev-parse HEAD` | grep tags | sed 's/tags\///' || echo dev) docker build --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy \ --build-arg GIT_COMMIT_MESSAGE="$GIT_COMMIT_MESSAGE" --build-arg GIT_COMMIT_SHA=$GIT_COMMIT_SHA \ --build-arg VERSION=${VERSION:-dev} \ -t $NS/gateway:$eTAG . -f $dockerfile --no-cache
#!/bin/sh set -e export dockerfile="Dockerfile" export arch=$(uname -m) export eTAG="latest-dev" if [ "$arch" = "armv7l" ] ; then dockerfile="Dockerfile.armhf" eTAG="latest-armhf-dev" fi echo "$1" if [ "$1" ] ; then eTAG=$1 if [ "$arch" = "armv7l" ] ; then eTAG="$1-armhf" fi fi NS=openfaas echo Building $NS/gateway:$eTAG GIT_COMMIT_MESSAGE=$(git log -1 --pretty=%B 2>&1 | head -n 1) GIT_COMMIT_SHA=$(git rev-list -1 HEAD) VERSION=$(git describe --all --exact-match `git rev-parse HEAD` | grep tags | sed 's/tags\///' || echo dev) docker build --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy \ --build-arg GIT_COMMIT_MESSAGE="$GIT_COMMIT_MESSAGE" --build-arg GIT_COMMIT_SHA=$GIT_COMMIT_SHA \ --build-arg VERSION=${VERSION:-dev} \ -t $NS/gateway:$eTAG . -f $dockerfile --no-cache
Use GH_REPO_TOKEN passed from Travis CI.
set -e mkdir docs-build && cd docs-build git clone -b gh-pages git@github.com:twoporeguys/librpc.git && cd librpc git config --global push.default simple git config user.name "Travis CI" git config user.email "travis@travis-ci.org" rm -rf * doxygen ${DOXYFILE} if [ -d "html" ] && [ -f "html/index.html" ]; then git add --all git commit -m "Deploy code docs to GitHub Pages (build ${TRAVIS_BUILD_NUMBER})" -m "Commit: ${TRAVIS_COMMIT}" git push --force git@github.com:twoporeguys/librpc.git > /dev/null 2>&1 else echo '' >&2 echo 'Warning: No documentation (html) files have been found!' >&2 echo 'Warning: Not going to push the documentation to GitHub!' >&2 exit 1 fi
set -e mkdir docs-build && cd docs-build git clone -b gh-pages git@github.com:twoporeguys/librpc.git && cd librpc git config --global push.default simple git config user.name "Travis CI" git config user.email "travis@travis-ci.org" rm -rf * doxygen ${DOXYFILE} if [ -d "html" ] && [ -f "html/index.html" ]; then git add --all git commit -m "Deploy code docs to GitHub Pages (build ${TRAVIS_BUILD_NUMBER})" -m "Commit: ${TRAVIS_COMMIT}" git push --force https://${GH_REPO_TOKEN}@github.com/twoporeguys/librpc.git > /dev/null 2>&1 else echo '' >&2 echo 'Warning: No documentation (html) files have been found!' >&2 echo 'Warning: Not going to push the documentation to GitHub!' >&2 exit 1 fi
Update script to include ssh_config. Maybe that should be the otherway around --> gogs
#!/bin/sh # Configure Git Replication sed -i 's/__GITLAB_IP__/'${GITLAB_PORT_80_TCP_ADDR}'/g' /home/gerrit/gerrit/etc/replication.config sed -i 's/__GITLAB_USER__/'${GITLAB_USER}'/g' /home/gerrit/gerrit/etc/replication.config sed -i 's/__GITLAB_PASSWORD__/'${GITLAB_PASSWORD}'/g' /home/gerrit/gerrit/etc/replication.config sed -i 's/__GITLAB_PROJ_ROOT__/'${GITLAB_PROJ_ROOT}'/g' /home/gerrit/gerrit/etc/replication.config # Configure Gerrit sed -i 's/__AUTH_TYPE__/'${AUTH_TYPE}'/g' /home/gerrit/gerrit/etc/gerrit.config service supervisor start
#!/bin/sh # Configure Git Replication sed -i 's/__GITLAB_IP__/'${GITLAB_PORT_80_TCP_ADDR}'/g' /home/gerrit/gerrit/etc/replication.config sed -i 's/__GITLAB_USER__/'${GITLAB_USER}'/g' /home/gerrit/gerrit/etc/replication.config sed -i 's/__GITLAB_PASSWORD__/'${GITLAB_PASSWORD}'/g' /home/gerrit/gerrit/etc/replication.config sed -i 's/__GITLAB_PROJ_ROOT__/'${GITLAB_PROJ_ROOT}'/g' /home/gerrit/gerrit/etc/replication.config # Configure Gerrit sed -i 's/__AUTH_TYPE__/'${AUTH_TYPE}'/g' /home/gerrit/gerrit/etc/gerrit.config # Add ssh key imported by Kubernetes to access gogs RUN echo "Host Gogs" >> /etc/ssh/ssh_config RUN echo "Hostname gogs-http-service.default.local" >> /etc/ssh/ssh_config RUN echo "IdentityFile /etc/secret-volume/id-rsa" >> /etc/ssh/ssh_config service supervisor start
Fix problems when using sSH keys with a non-root users.
#!/bin/sh set -e # If we there is an ssh key injected via lagoon and kubernetes, we use that if [ -f /var/run/secrets/lagoon/sshkey/ssh-privatekey ]; then cp -f /var/run/secrets/lagoon/sshkey/ssh-privatekey /home/.ssh/key # If there is an env variable SSH_PRIVATE_KEY we use that elif [ ! -z "$SSH_PRIVATE_KEY" ]; then echo -e "$SSH_PRIVATE_KEY" > /home/.ssh/key # If there is an env variable LAGOON_SSH_PRIVATE_KEY we use that elif [ ! -z "$LAGOON_SSH_PRIVATE_KEY" ]; then echo -e "$LAGOON_SSH_PRIVATE_KEY" > /home/.ssh/key fi if [ -f /home/.ssh/key ]; then # add a new line to the key. OpenSSH is very picky that keys are always end with a newline echo >> /home/.ssh/key # Fix permissions of SSH key chmod 400 /home/.ssh/key fi
#!/bin/sh set -e # If we there is an ssh key injected via lagoon and kubernetes, we use that if [ -f /var/run/secrets/lagoon/sshkey/ssh-privatekey ]; then cp -f /var/run/secrets/lagoon/sshkey/ssh-privatekey /home/.ssh/key # If there is an env variable SSH_PRIVATE_KEY we use that elif [ ! -z "$SSH_PRIVATE_KEY" ]; then echo -e "$SSH_PRIVATE_KEY" > /home/.ssh/key # If there is an env variable LAGOON_SSH_PRIVATE_KEY we use that elif [ ! -z "$LAGOON_SSH_PRIVATE_KEY" ]; then echo -e "$LAGOON_SSH_PRIVATE_KEY" > /home/.ssh/key fi if [ -f /home/.ssh/key ]; then # add a new line to the key. OpenSSH is very picky that keys are always end with a newline echo >> /home/.ssh/key # Fix permissions of SSH key chmod 600 /home/.ssh/key fi
Add verbose logging to curl S3 uploading.
# Adapted from https://gist.github.com/chrismdp/6c6b6c825b07f680e710 function rbx_s3_upload { local url bucket path dest src date acl content_type data signature url=$1 bucket=$2 dest=$3 src=$4 path=${5:-/} date=$(date +"%a, %d %b %Y %T %z") acl="x-amz-acl:public-read" content_type=$(file --mime-type -b "$src") data="PUT\n\n$content_type\n$date\n$acl\n/$bucket$path$dest" signature=$(echo -en "${data}" | openssl sha1 -hmac "${AWS_SECRET_ACCESS_KEY}" -binary | base64) curl -X PUT -T "$src" \ -H "Host: $bucket.s3-us-west-2.amazonaws.com" \ -H "Date: $date" \ -H "Content-Type: $content_type" \ -H "$acl" \ -H "Authorization: AWS ${AWS_ACCESS_KEY_ID}:$signature" \ "$url$path$dest" } function rbx_s3_download { local url file path url=$1 file=$2 path=${3:-/} curl -o "$file" "$url$path$file" }
# Adapted from https://gist.github.com/chrismdp/6c6b6c825b07f680e710 function rbx_s3_upload { local url bucket path dest src date acl content_type data signature url=$1 bucket=$2 dest=$3 src=$4 path=${5:-/} date=$(date +"%a, %d %b %Y %T %z") acl="x-amz-acl:public-read" content_type=$(file --mime-type -b "$src") data="PUT\n\n$content_type\n$date\n$acl\n/$bucket$path$dest" signature=$(echo -en "${data}" | openssl sha1 -hmac "${AWS_SECRET_ACCESS_KEY}" -binary | base64) curl -X PUT -T "$src" \ -H "Host: $bucket.s3-us-west-2.amazonaws.com" \ -H "Date: $date" \ -H "Content-Type: $content_type" \ -H "$acl" \ -H "Authorization: AWS ${AWS_ACCESS_KEY_ID}:$signature" \ --verbose \ "$url$path$dest" } function rbx_s3_download { local url file path url=$1 file=$2 path=${3:-/} curl -o "$file" "$url$path$file" }
Use command mode of su for running Hive command
#!/bin/bash set -euo pipefail -x . ${BASH_SOURCE%/*}/common.sh cleanup_docker_containers start_docker_containers # generate test data exec_in_hadoop_master_container su hive -s /usr/bin/hive -f /files/sql/create-test.sql stop_unnecessary_hadoop_services HADOOP_MASTER_IP=$(hadoop_master_ip) # run product tests pushd ${PROJECT_ROOT} set +e ./mvnw -B -pl presto-hive-hadoop2 test -P test-hive-hadoop2 \ -Dhive.hadoop2.timeZone=UTC \ -DHADOOP_USER_NAME=hive \ -Dhive.hadoop2.metastoreHost=localhost \ -Dhive.hadoop2.metastorePort=9083 \ -Dhive.hadoop2.databaseName=default \ -Dhive.hadoop2.metastoreHost=hadoop-master \ -Dhive.hadoop2.timeZone=Asia/Kathmandu \ -Dhive.metastore.thrift.client.socks-proxy=${PROXY}:1180 \ -Dhadoop-master-ip=${HADOOP_MASTER_IP} EXIT_CODE=$? set -e popd cleanup_docker_containers exit ${EXIT_CODE}
#!/bin/bash set -euo pipefail -x . ${BASH_SOURCE%/*}/common.sh cleanup_docker_containers start_docker_containers # generate test data exec_in_hadoop_master_container su hive -c '/usr/bin/hive -f /files/sql/create-test.sql' stop_unnecessary_hadoop_services HADOOP_MASTER_IP=$(hadoop_master_ip) # run product tests pushd ${PROJECT_ROOT} set +e ./mvnw -B -pl presto-hive-hadoop2 test -P test-hive-hadoop2 \ -Dhive.hadoop2.timeZone=UTC \ -DHADOOP_USER_NAME=hive \ -Dhive.hadoop2.metastoreHost=localhost \ -Dhive.hadoop2.metastorePort=9083 \ -Dhive.hadoop2.databaseName=default \ -Dhive.hadoop2.metastoreHost=hadoop-master \ -Dhive.hadoop2.timeZone=Asia/Kathmandu \ -Dhive.metastore.thrift.client.socks-proxy=${PROXY}:1180 \ -Dhadoop-master-ip=${HADOOP_MASTER_IP} EXIT_CODE=$? set -e popd cleanup_docker_containers exit ${EXIT_CODE}
Update push branch to gh-pages
#!/bin/bash if [ "$TRAVIS_REPO_SLUG" == "openxc/openxc-android" ] && [ "$TRAVIS_JDK_VERSION" == "openjdk8" ] && [ "$TRAVIS_PULL_REQUEST" == "false" ] && [ "$TRAVIS_BRANCH" == "automate-javadoc" ]; then cp -R library/build/docs/javadoc $HOME/javadoc-latest cd $HOME git config --global user.email "travis@travis-ci.org" git config --global user.name "Travis-CI" git clone --quiet --branch=master https://${GH_TOKEN}@github.com/openxc/openxc-android master > /dev/null cd master LATEST_TAG=$(git describe --abbrev=0 --tags) cd ../ git clone --quiet --branch=automate-gh-pages https://${GH_TOKEN}@github.com/openxc/openxc-android automate-gh-pages > /dev/null cd automate-gh-pages git rm -rf ./ cp -Rf $HOME/javadoc-latest/. ./ git add -f . git commit -m "JavaDoc $LATEST_TAG - Travis Build $TRAVIS_BUILD_NUMBER" git push -fq origin automate-gh-pages > /dev/null fi
#!/bin/bash if [ "$TRAVIS_REPO_SLUG" == "openxc/openxc-android" ] && [ "$TRAVIS_JDK_VERSION" == "openjdk8" ] && [ "$TRAVIS_PULL_REQUEST" == "false" ] && [ "$TRAVIS_BRANCH" == "automate-javadoc" ]; then cp -R library/build/docs/javadoc $HOME/javadoc-latest cd $HOME git config --global user.email "travis@travis-ci.org" git config --global user.name "Travis-CI" git clone --quiet --branch=master https://${GH_TOKEN}@github.com/openxc/openxc-android master > /dev/null cd master LATEST_TAG=$(git describe --abbrev=0 --tags) cd ../ git clone --quiet --branch=gh-pages https://${GH_TOKEN}@github.com/openxc/openxc-android gh-pages > /dev/null cd gh-pages git rm -rf ./ cp -Rf $HOME/javadoc-latest/. ./ git add -f . git commit -m "JavaDoc $LATEST_TAG - Travis Build $TRAVIS_BUILD_NUMBER" git push -fq origin gh-pages > /dev/null fi
Change to alias and remove deprecated GREP_COLOR
# is x grep argument available? grep-flag-available() { echo | grep $1 "" >/dev/null 2>&1 } # color grep results GREP_OPTIONS="--color=auto" # ignore VCS folders (if the necessary grep flags are available) VCS_FOLDERS="{.bzr,.cvs,.git,.hg,.svn}" if grep-flag-available --exclude-dir=.cvs; then GREP_OPTIONS+=" --exclude-dir=$VCS_FOLDERS" elif grep-flag-available --exclude=.cvs; then GREP_OPTIONS+=" --exclude=$VCS_FOLDERS" fi # export grep settings export GREP_OPTIONS="$GREP_OPTIONS" export GREP_COLOR='1;32' # clean up unset VCS_FOLDERS unfunction grep-flag-available
# is x grep argument available? grep-flag-available() { echo | grep $1 "" >/dev/null 2>&1 } # color grep results GREP_OPTIONS="--color=auto" # ignore VCS folders (if the necessary grep flags are available) VCS_FOLDERS="{.bzr,.cvs,.git,.hg,.svn}" if grep-flag-available --exclude-dir=.cvs; then GREP_OPTIONS+=" --exclude-dir=$VCS_FOLDERS" elif grep-flag-available --exclude=.cvs; then GREP_OPTIONS+=" --exclude=$VCS_FOLDERS" fi # export grep settings alias grep="grep $GREP_OPTIONS" # clean up unset GREP_OPTIONS unset VCS_FOLDERS unfunction grep-flag-available
Set new relic app name
#!/bin/bash # This script will quit on the first error that is encountered. set -e CIRCLE=$1 DEPLOY_DATE=`date "+%FT%T%z"` heroku config:set --app=eggtimer \ ADMIN_EMAIL="egg.timer.app@gmail.com" \ ADMIN_NAME="egg timer" \ DJANGO_SETTINGS_MODULE=eggtimer.settings.production \ DJANGO_SECRET_KEY=$DJANGO_SECRET_KEY \ DEPLOY_DATE="$DEPLOY_DATE" \ > /dev/null if [ $CIRCLE ] then git push git@heroku.com:eggtimer.git $CIRCLE_SHA1:refs/heads/master else git push heroku master fi heroku run python manage.py syncdb --noinput --app=eggtimer heroku run python manage.py migrate --noinput --app=eggtimer
#!/bin/bash # This script will quit on the first error that is encountered. set -e CIRCLE=$1 DEPLOY_DATE=`date "+%FT%T%z"` heroku config:set --app=eggtimer \ NEW_RELIC_APP_NAME='eggtimer' \ ADMIN_EMAIL="egg.timer.app@gmail.com" \ ADMIN_NAME="egg timer" \ DJANGO_SETTINGS_MODULE=eggtimer.settings.production \ DJANGO_SECRET_KEY=$DJANGO_SECRET_KEY \ DEPLOY_DATE="$DEPLOY_DATE" \ > /dev/null if [ $CIRCLE ] then git push git@heroku.com:eggtimer.git $CIRCLE_SHA1:refs/heads/master else git push heroku master fi heroku run python manage.py syncdb --noinput --app=eggtimer heroku run python manage.py migrate --noinput --app=eggtimer
Use --no-daemon to get rid of a line in the output
#!/bin/sh echo Look at https://centic9.github.io/github-version-statistics/ for results cd `dirname $0` git fetch && \ git rebase origin/master && \ rm -rf build && \ ./gradlew clean && \ ./gradlew check installDist && \ build/install/github-version-statistics/bin/github-version-statistics && \ ./gradlew processResults && \ git add stats* && git add docs && git ci -m "Add daily results" && \ git push
#!/bin/sh echo Look at https://centic9.github.io/github-version-statistics/ for results cd `dirname $0` git fetch && \ git rebase origin/master && \ rm -rf build && \ ./gradlew --no-daemon clean && \ ./gradlew --no-daemon check installDist && \ build/install/github-version-statistics/bin/github-version-statistics && \ ./gradlew --no-daemon processResults && \ git add stats* && git add docs && git ci -m "Add daily results" && \ git push
Add build outcome to seed data
#!/bin/bash function aBuild { START=$RANDOM END=$[ $START + ( $RANDOM % 100 ) ] echo '{"start":' $START ', "end":' $END '}' } function send { JOB=$1 BUILD=$2 curl -H "Content-Type: application/json" --data @- -XPUT "http://localhost:3000/builds/${JOB}/${BUILD}" } aBuild | send "someBuild" 1 aBuild | send "someBuild" 2 aBuild | send "someBuild" 3 aBuild | send "anotherBuild" 1 aBuild | send "yetAnotherBuild" 1 aBuild | send "yetAnotherBuild" 2
#!/bin/bash function aBuild { START=$RANDOM END=$[ $START + ( $RANDOM % 100 ) ] if [[ $[ $RANDOM % 2 ] -eq 0 ]]; then OUTCOME="pass" else OUTCOME="fail" fi echo '{"start": '$START', "end": '$END', "outcome": "'$OUTCOME'"}' } function send { JOB=$1 BUILD=$2 curl -H "Content-Type: application/json" --data @- -XPUT "http://localhost:3000/builds/${JOB}/${BUILD}" } aBuild | send "someBuild" 1 aBuild | send "someBuild" 2 aBuild | send "someBuild" 3 aBuild | send "anotherBuild" 1 aBuild | send "yetAnotherBuild" 1 aBuild | send "yetAnotherBuild" 2
Add some additional echo statements since we're no longer using an init.d script
#!/bin/bash set -e echo "Starting Redis..." service redis-server restart echo "Starting Openvas..." echo "Starting gsad" gsad --http-only openvassd openvasmd echo "This may take a minute or two..." openvasmd --rebuild echo "Checking setup" curl -s --insecure --location -o openvas-check-setup.sh https://svn.wald.intevation.org/svn/openvas/trunk/tools/openvas-check-setup chmod 0755 ./openvas-check-setup.sh ./openvas-check-setup.sh --v8 --server echo "Done."
#!/bin/bash set -e echo "Starting Redis..." service redis-server restart echo "Starting Openvas..." echo "Starting gsad" gsad --http-only echo "Starting openvas-scanner..." openvassd echo "Starting openvas-manager" openvasmd echo "This may take a minute or two..." openvasmd --rebuild echo "Checking setup" curl -s --insecure --location -o openvas-check-setup.sh https://svn.wald.intevation.org/svn/openvas/trunk/tools/openvas-check-setup chmod 0755 ./openvas-check-setup.sh ./openvas-check-setup.sh --v8 --server echo "Done."
Drop BLD environment variable as it is unused
if [ ! -z "${EXT_CC}" ] && [ "${EXT_CC}" != "<UNDEFINED>" ]; then CC="${EXT_CC}" fi if [ ! -z "${EXT_CXX}" ] && [ "${EXT_CXX}" != "<UNDEFINED>" ]; then CXX="${EXT_CXX}" fi # CONFIGURE SRC="$(pwd)" mkdir -pv build cd build BLD="$(pwd)" cmake "${SRC}"\ -DCMAKE_MACOSX_RPATH=false \ \ -DCMAKE_PREFIX_PATH="${PREFIX}" \ \ -DCMAKE_SHARED_LINKER_FLAGS="${LDFLAGS} -Wl,-rpath,${PREFIX}/lib -L${PREFIX}/lib" \ \ -DCMAKE_CXX_LINK_FLAGS="${CXXFLAGS} -Wl,-rpath,${PREFIX}/lib -L${PREFIX}/lib" \ -DCMAKE_CXX_FLAGS="${CXXFLAGS} -Wl,-rpath,${PREFIX}/lib -L${PREFIX}/lib" \ \ -DBOOST_ROOT="${PREFIX}" \ -DVIGRA_ROOT="${PREFIX}" \ \ -DPYTHON_EXECUTABLE="${PYTHON}" \ \ # BUILD (in parallel) make -j${CPU_COUNT} # "install" to the build prefix (conda will relocate these files afterwards) make install
if [ ! -z "${EXT_CC}" ] && [ "${EXT_CC}" != "<UNDEFINED>" ]; then CC="${EXT_CC}" fi if [ ! -z "${EXT_CXX}" ] && [ "${EXT_CXX}" != "<UNDEFINED>" ]; then CXX="${EXT_CXX}" fi # CONFIGURE SRC="$(pwd)" mkdir -pv build cd build cmake "${SRC}"\ -DCMAKE_MACOSX_RPATH=false \ \ -DCMAKE_PREFIX_PATH="${PREFIX}" \ \ -DCMAKE_SHARED_LINKER_FLAGS="${LDFLAGS} -Wl,-rpath,${PREFIX}/lib -L${PREFIX}/lib" \ \ -DCMAKE_CXX_LINK_FLAGS="${CXXFLAGS} -Wl,-rpath,${PREFIX}/lib -L${PREFIX}/lib" \ -DCMAKE_CXX_FLAGS="${CXXFLAGS} -Wl,-rpath,${PREFIX}/lib -L${PREFIX}/lib" \ \ -DBOOST_ROOT="${PREFIX}" \ -DVIGRA_ROOT="${PREFIX}" \ \ -DPYTHON_EXECUTABLE="${PYTHON}" \ \ # BUILD (in parallel) make -j${CPU_COUNT} # "install" to the build prefix (conda will relocate these files afterwards) make install
Create one more German dictionary "-de"
#!/bin/zsh # # Copyright 2013 by Idiap Research Institute, http://www.idiap.ch # # See the file COPYING for the licence associated with this software. # # Author(s): # Phil Garner, April 2013 # source ../Common.sh chdir local # Submit to a multithreading queue that has 8 threads (with max 8GB # each) adding up to 64GB total. We need the memory, not the threads. export USE_GE=1 export GE_OPTIONS="-l q1dm -l mem_free=64G -pe pe_mth 8" export IN_DICT=dictionary.txt create-phsaurus.sh export IN_DICT=dictionary-6.txt create-phsaurus.sh
#!/bin/zsh # # Copyright 2013 by Idiap Research Institute, http://www.idiap.ch # # See the file COPYING for the licence associated with this software. # # Author(s): # Phil Garner, April 2013 # source ../Common.sh chdir local # Submit to a multithreading queue that has 8 threads (with max 8GB # each) adding up to 64GB total. We need the memory, not the threads. export USE_GE=1 export GE_OPTIONS="-l q1wm -pe pe_mth 8" rm -f job-file export IN_DICT=dictionary.txt create-phsaurus.sh rm -f job-file export IN_DICT=dictionary-ext.txt create-phsaurus.sh rm -f job-file export IN_DICT=dictionary-de.txt create-phsaurus.sh
Fix unable to access SSL connect error
#!/bin/bash # # install Git on RHEL yum -y remove git yum -y install curl-devel expat-devel gettext-devel openssl-devel zlib-devel gcc perl-ExtUtils-MakeMaker asciidoc xmlto export GIT_VER=$1 cd /opt/src && wget https://www.kernel.org/pub/software/scm/git/git-${GIT_VER}.tar.gz && tar xzf git-${GIT_VER}.tar.gz && cd /opt/src/git-${GIT_VER} || exit 1 make prefix=/opt/git-${GIT_VER} all && make prefix=/opt/git-${GIT_VER} install && make prefix=/opt/git-${GIT_VER} install-man || exit 1 ln -s /opt/git-${GIT_VER} /opt/git cat > /etc/profile.d/git.sh <<EOF export PATH=\$PATH:/opt/git/bin export MANPATH=/opt/git/share/man:\$MANPATH EOF
#!/bin/bash # # install Git on RHEL yum -y remove git yum -y install nss curl libcurl curl-devel expat-devel gettext-devel openssl-devel zlib-devel gcc perl-ExtUtils-MakeMaker asciidoc xmlto export GIT_VER=$1 cd /opt/src && wget https://www.kernel.org/pub/software/scm/git/git-${GIT_VER}.tar.gz && tar xzf git-${GIT_VER}.tar.gz && cd /opt/src/git-${GIT_VER} || exit 1 make prefix=/opt/git-${GIT_VER} all && make prefix=/opt/git-${GIT_VER} install && make prefix=/opt/git-${GIT_VER} install-man || exit 1 ln -s /opt/git-${GIT_VER} /opt/git cat > /etc/profile.d/git.sh <<EOF export PATH=\$PATH:/opt/git/bin export MANPATH=/opt/git/share/man:\$MANPATH EOF