Instruction
stringlengths
14
778
input_code
stringlengths
0
4.24k
output_code
stringlengths
1
5.44k
Make parameter for number the number of threads to use with `make`
#! /bin/sh make -j1 V=s >> compile.log 2>&1
#! /bin/sh if [[ -z "$1" ]] then THREADS=1 else THREADS=$1 fi make -j${THREADS} V=s >> compile.log 2>&1
Build docker base images first
if [[ -f build_docker_images.sh ]]; then cd .. fi for image in `ls -d dockerfiles/*`; do echo '**********************************************' echo Building: $image echo '**********************************************' docker build $image done
#!/bin/bash if [[ -f build_docker_images.sh ]]; then cd .. fi echo '**********************************************' echo "Updating base Ubuntu image..." echo '**********************************************' docker pull ubuntu base_images=`ls -d dockerfiles/base*` images=`ls -d dockerfiles/* | grep -v base_` build_image () { container="mltp/`basename $1`" echo '**********************************************' echo Building: $container echo '**********************************************' docker build -t $container $image } for image in $base_images; do build_image $image done for image in $images; do build_image $image done
Add Sonar host to CI coverage script
#!/usr/bin/env bash if [[ $(bc -l <<< "$(java -version 2>&1 | awk -F '\"' '/version/ {print $2}' | awk -F'.' '{print $1"."$2}') >= 11") -eq 1 ]]; then export BRANCH=$(if [ "$TRAVIS_PULL_REQUEST" == "false" ]; then echo $TRAVIS_BRANCH; else echo "pull-request-$TRAVIS_PULL_REQUEST"; fi) mvn -Pcoverage -B verify sonar:sonar -Dsonar.projectKey=xlate_staedi -Dsonar.branch.name=$BRANCH else echo "Not Java 11" fi
#!/usr/bin/env bash if [[ $(bc -l <<< "$(java -version 2>&1 | awk -F '\"' '/version/ {print $2}' | awk -F'.' '{print $1"."$2}') >= 11") -eq 1 ]]; then export BRANCH=$(if [ "$TRAVIS_PULL_REQUEST" == "false" ]; then echo $TRAVIS_BRANCH; else echo "pull-request-$TRAVIS_PULL_REQUEST"; fi) mvn -Pcoverage -B verify sonar:sonar -Dsonar.host.url=https://sonarcloud.io -Dsonar.projectKey=xlate_staedi -Dsonar.branch.name=$BRANCH else echo "Not Java 11" fi
Refactor to use the post-deploy script
#!/bin/bash # sanity check if [[ -a /etc/supervisor/conf.d/supervisord.conf ]]; then exit 0 fi # supervisor cat > /etc/supervisor/conf.d/supervisord.conf <<EOF [supervisord] nodaemon=true [program:alaveteli] directory=/opt/alaveteli command=bundle exec thin [program:rsyslog] command=/usr/sbin/rsyslogd -n -c3 EOF cd /opt/alaveteli bundle exec rake themes:install
#!/bin/bash # sanity check if [[ -a /etc/supervisor/conf.d/supervisord.conf ]]; then exit 0 fi # supervisor cat > /etc/supervisor/conf.d/supervisord.conf <<EOF [supervisord] nodaemon=true [program:alaveteli] directory=/opt/alaveteli command=bundle exec thin [program:rsyslog] command=/usr/sbin/rsyslogd -n -c3 EOF cd /opt/alaveteli script/rails-post-deploy
Store the server's PID in a file that's not shared with the host filesystem
#!/bin/bash service postgresql start service redis-server start # Required for celery pid_file=/app/_citydash_django.pid server_out=/app/django.log server_err=/app/error.log if [ -f $pid_file ]; then pid=$(cat $pid_file); echo "Killing server ($pid)"; kill -KILL $(cat $pid_file); rm -f $pid_file fi; nohup python /app/manage.py runserver 0.0.0.0:3000 >$server_out 2>$server_err& echo $! >$pid_file
#!/bin/bash service postgresql start service redis-server start # Required for celery pid_file=/var/run/_citydash_django.pid server_out=/app/django.log server_err=/app/error.log if [ -f $pid_file ]; then pid=$(cat $pid_file); echo "Killing server ($pid)"; kill -KILL $(cat $pid_file); rm -f $pid_file fi; nohup python /app/manage.py runserver 0.0.0.0:3000 >$server_out 2>$server_err& echo $! >$pid_file
Add additional scope for android canary
# This file contains constants for the shell scripts which interact with the # Skia's GCE instances. GCUTIL=`which gcutil` # Set all constants in compute_engine_cfg.py as env variables. $(python ../compute_engine_cfg.py) if [ $? != "0" ]; then echo "Failed to read compute_engine_cfg.py!" exit 1 fi # If this is true, then the VM instances will be set up with auth scopes # appropriate for the android merge bot. if [ "$VM_IS_ANDROID_MERGE" = 1 ]; then SCOPES="https://www.googleapis.com/auth/gerritcodereview,$SCOPES" fi # TODO(rmistry): Investigate moving the below constants to compute_engine_cfg.py CHROME_MASTER_HOST=/tmp/chrome_master_host REQUIRED_FILES_FOR_LINUX_BOTS=(/tmp/.boto \ /tmp/.bot_password \ /tmp/.netrc \ $CHROME_MASTER_HOST) REQUIRED_FILES_FOR_WIN_BOTS=(/tmp/chrome-bot.txt \ /tmp/.boto \ /tmp/.bot_password \ /tmp/.netrc \ $CHROME_MASTER_HOST) GCOMPUTE_CMD="$GCUTIL --project=$PROJECT_ID" GCOMPUTE_SSH_CMD="$GCOMPUTE_CMD --zone=$ZONE ssh --ssh_user=$PROJECT_USER"
# This file contains constants for the shell scripts which interact with the # Skia's GCE instances. GCUTIL=`which gcutil` # Set all constants in compute_engine_cfg.py as env variables. $(python ../compute_engine_cfg.py) if [ $? != "0" ]; then echo "Failed to read compute_engine_cfg.py!" exit 1 fi # If this is true, then the VM instances will be set up with auth scopes # appropriate for the android merge bot. if [ "$VM_IS_ANDROID_MERGE" = 1 ]; then SCOPES="https://www.googleapis.com/auth/androidbuild.internal,https://www.googleapis.com/auth/gerritcodereview,$SCOPES" fi # TODO(rmistry): Investigate moving the below constants to compute_engine_cfg.py CHROME_MASTER_HOST=/tmp/chrome_master_host REQUIRED_FILES_FOR_LINUX_BOTS=(/tmp/.boto \ /tmp/.bot_password \ /tmp/.netrc \ $CHROME_MASTER_HOST) REQUIRED_FILES_FOR_WIN_BOTS=(/tmp/chrome-bot.txt \ /tmp/.boto \ /tmp/.bot_password \ /tmp/.netrc \ $CHROME_MASTER_HOST) GCOMPUTE_CMD="$GCUTIL --project=$PROJECT_ID" GCOMPUTE_SSH_CMD="$GCOMPUTE_CMD --zone=$ZONE ssh --ssh_user=$PROJECT_USER"
Fix Circle CI pre-machine step
#!/bin/bash -ex case "$1" in pre_machine) # have docker bind to localhost docker_opts='DOCKER_OPTS="$DOCKER_OPTS -H tcp://127.0.0.1:2375"' sudo sh -c "echo '$docker_opts' >> /etc/default/docker" cat /etc/default/docker ;; dependencies) mvn clean install -T 2 -Dmaven.javadoc.skip=true -DskipTests=true -B -V ;; pre_test) # clean the artifacts dir from the previous build rm -rf artifacts && mkdir artifacts ;; test) # expected parallelism: 2x. needs to be set in the project settings via CircleCI's UI. case $CIRCLE_NODE_INDEX in 0) # run all tests *except* helios-system-tests sed -i'' 's/<module>helios-system-tests<\/module>//' pom.xml mvn test -B ;; 1) # run helios-system-tests mvn test -B -pl helios-system-tests ;; esac ;; post_test) # collect artifacts into the artifacts dir find . -regex ".*/target/.*-[0-9]\.jar" | xargs -I {} mv {} artifacts find . -regex ".*/target/.*-SNAPSHOT\.jar" | xargs -I {} mv {} artifacts find . -regex ".*/target/.*\.deb" | xargs -I {} mv {} artifacts ;; esac
#!/bin/bash -ex case "$1" in pre_machine) # have docker bind to localhost docker_opts='DOCKER_OPTS="$DOCKER_OPTS -H tcp://127.0.0.1:2375"' sudo sh -c "echo '$docker_opts' >> /etc/default/docker" cat /etc/default/docker # Edit pom files to have correct version syntax for i in $(find . -name pom.xml -not -path './.rvm*'); do sed -i "s/\${revision}/0/g" $i; done ;; dependencies) mvn clean install -T 2 -Dmaven.javadoc.skip=true -DskipTests=true -B -V ;; pre_test) # clean the artifacts dir from the previous build rm -rf artifacts && mkdir artifacts ;; test) # expected parallelism: 2x. needs to be set in the project settings via CircleCI's UI. case $CIRCLE_NODE_INDEX in 0) # run all tests *except* helios-system-tests sed -i'' 's/<module>helios-system-tests<\/module>//' pom.xml mvn test -B ;; 1) # run helios-system-tests mvn test -B -pl helios-system-tests ;; esac ;; post_test) # collect artifacts into the artifacts dir find . -regex ".*/target/.*-[0-9]\.jar" | xargs -I {} mv {} artifacts find . -regex ".*/target/.*-SNAPSHOT\.jar" | xargs -I {} mv {} artifacts find . -regex ".*/target/.*\.deb" | xargs -I {} mv {} artifacts ;; esac
Use a tmp file instead of pipe to avoid human intervention
#!/bin/bash # install chef server from opscode repo via apt CHEF_SERVER=$(hostname) CHEF_PASSWORD=${CHEF_PASSWORD:-ChefServer} echo "deb http://apt.opscode.com/ `lsb_release -cs`-0.10 main" | sudo tee /etc/apt/sources.list.d/opscode.list sudo mkdir -p /etc/apt/trusted.gpg.d gpg --keyserver keys.gnupg.net --recv-keys 83EF826A gpg --export packages@opscode.com | sudo tee /etc/apt/trusted.gpg.d/opscode-keyring.gpg > /dev/null sudo apt-get update sudo apt-get install -y opscode-keyring # permanent upgradeable keyring sudo apt-get install -y debconf-utils sudo apt-get -y upgrade cat | sudo debconf-set-selections << EOF # New password for the 'admin' user in the Chef Server WebUI: chef-server-webui chef-server-webui/admin_password password ${CHEF_PASSWORD} # New password for the 'chef' AMQP user in the RabbitMQ vhost "/chef": chef-solr chef-solr/amqp_password password ${CHEF_PASSWORD} # URL of Chef Server (e.g., http://chef.example.com:4000): chef chef/chef_server_url string http://${CHEF_SERVER}:4000 EOF sudo apt-get -y install chef chef-server chef-server-api chef-expander
#!/bin/bash # install chef server from opscode repo via apt CHEF_SERVER=$(hostname) CHEF_PASSWORD=${CHEF_PASSWORD:-ChefServer} echo "deb http://apt.opscode.com/ `lsb_release -cs`-0.10 main" | sudo tee /etc/apt/sources.list.d/opscode.list sudo mkdir -p /etc/apt/trusted.gpg.d gpg --keyserver keys.gnupg.net --recv-keys 83EF826A gpg --export packages@opscode.com | sudo tee /etc/apt/trusted.gpg.d/opscode-keyring.gpg > /dev/null sudo apt-get update sudo apt-get install -y opscode-keyring # permanent upgradeable keyring sudo apt-get install -y debconf-utils sudo apt-get -y upgrade cat > /tmp/chef_seed << EOF # New password for the 'admin' user in the Chef Server WebUI: chef-server-webui chef-server-webui/admin_password password ${CHEF_PASSWORD} # New password for the 'chef' AMQP user in the RabbitMQ vhost "/chef": chef-solr chef-solr/amqp_password password ${CHEF_PASSWORD} # URL of Chef Server (e.g., http://chef.example.com:4000): chef chef/chef_server_url string http://${CHEF_SERVER}:4000 EOF sudo debconf-set-selections < /tmp/chef_seed rm -rf /tmp/chef_seed sudo apt-get -y install chef chef-server chef-server-api chef-expander
Make script work for linux as well.
#!/bin/sh if [ ! $# == 1 ]; then echo "Usage: $0 <release_version>" exit 1 fi if [ ! -f epgp.toc ]; then echo "You must run this script from the root of the epgp directory!" exit 1 fi EPGP_DIR=$PWD RELEASE_ZIP="$EPGP_DIR/epgp-$1.zip" pushd .. zip -r "$RELEASE_ZIP" epgp -x \*/.svn/\* -x \*/scripts/\* -x \*/wiki/\* -x \*~ popd unzip "$RELEASE_ZIP" pushd epgp sed -i "" -e "s/@VERSION@/$1/" epgp.toc popd zip -u -r "$RELEASE_ZIP" epgp/epgp.toc echo "Release file at $RELEASE_ZIP" echo "Now you can tag this release by executing: svn import \"$EPGP_DIR/epgp\" https://epgp.googlecode.com/svn/tags/epgp-$1"
#!/bin/sh set -x if [ ! $# == 1 ]; then echo "Usage: $0 <release_version>" exit 1 fi if [ ! -f epgp.toc ]; then echo "You must run this script from the root of the epgp directory!" exit 1 fi EPGP_DIR=$PWD RELEASE_ZIP="$EPGP_DIR/epgp-$1.zip" pushd .. zip -r "$RELEASE_ZIP" epgp -x \*/.svn/\* -x \*/scripts/\* -x \*/wiki/\* -x \*~ popd unzip "$RELEASE_ZIP" pushd epgp sed --in-place=tmp --expression="s/@VERSION@/$1/" epgp.toc popd zip -u -r "$RELEASE_ZIP" epgp/epgp.toc echo "Release file at $RELEASE_ZIP" echo "Now you can tag this release by executing: svn import \"$EPGP_DIR/epgp\" https://epgp.googlecode.com/svn/tags/epgp-$1"
Fix firefox launch and python path on Travis
#!/bin/bash npm run clean npm run build npm test npm run test:coverage npm run test:integration npm run docs
#!/bin/bash export DISPLAY=:99.0 sh -e /etc/init.d/xvfb start npm run clean npm run build npm test npm run test:coverage export PATH="$HOME/miniconda/bin:$PATH" npm run test:integration npm run docs
Fix group import so that test modules must be imported explicitly
package(){ if [[ ${1} ]]; then local files=($(find ${projectDir}bash-toolbox/${1//./\/} -type f -iname "*.sh")) if [ ${#files[@]} -lt 3 ]; then echo -e "[ ERROR ] \033[0;31m"The package \"${1}\" is not large enough to import using package."\033[0m" echo -e "[ ERROR ] \033[0;31m"Please use \"include\" instead on files from ${1}."\033[0m" exit fi for file in ${files[@]}; do if [[ ${file} != *lib* ]]; then source ${file} fi if [[ ${1} != *test* ]]; then if [[ ${file} != *test* && ${file} != *Test* ]]; then source ${file} fi fi done else echo -e "[ ERROR ] \033[0;31m"Cannot import an empty package."\033[0m" exit fi }
package(){ if [[ ${1} ]]; then local files=($(find ${projectDir}bash-toolbox/${1//./\/} -type f -iname "*.sh")) if [ ${#files[@]} -lt 3 ]; then echo -e "[ ERROR ] \033[0;31m"The package \"${1}\" is not large enough to import using package."\033[0m" echo -e "[ ERROR ] \033[0;31m"Please use \"include\" instead on files from ${1}."\033[0m" exit fi for file in ${files[@]}; do if [[ ${file} != *lib* ]]; then if [[ ${file} == *test* && ${file} == *Test* ]]; then if [[ ${1} == *test* ]]; then source ${file} fi fi source ${file} done else echo -e "[ ERROR ] \033[0;31m"Cannot import an empty package."\033[0m" exit fi }
Enable autotools testing on Travis.
#!/bin/bash set -e if [ "x$AUTOTOOLS" == "xyes" ]; then autoreconf ./configure make fi make test TEST_FLAGS=--skip
#!/bin/bash set -e if [ "x$AUTOTOOLS" == "xyes" ]; then autoreconf ./configure --enable-tests \ --with-sassc-dir=$SASS_SASSC_PATH \ --with-sass-spec-dir=$SASS_SPEC_PATH make fi make test TEST_FLAGS=--skip
Replace locale setting method in Tmux 2.5 provision
#!/usr/bin/env bash # override PS1 prompt echo 'export PS1="\$ "' >> /home/vagrant/.bashrc # simplify irb prompt echo 'IRB.conf[:PROMPT_MODE] = :SIMPLE' >> /home/vagrant/.irbrc chown -R vagrant:vagrant /home/vagrant/.irbrc sudo apt-get update sudo apt-get install -y make sudo apt-get install -y git-core expect vim gawk sudo apt-get install -y python-software-properties software-properties-common # install Tmux 2.5 VERSION=2.5 sudo apt-get -y remove tmux sudo apt-get -y install wget tar libevent-dev libncurses-dev wget https://github.com/tmux/tmux/releases/download/${VERSION}/tmux-${VERSION}.tar.gz tar xf tmux-${VERSION}.tar.gz rm -f tmux-${VERSION}.tar.gz cd tmux-${VERSION} ./configure make sudo make install cd - sudo rm -rf /usr/local/src/tmux-* sudo mv tmux-${VERSION} /usr/local/src # clone a repo used later for tests git clone https://github.com/tmux-plugins/tmux-example-plugin /home/vagrant/tmux-example-plugin chown -R vagrant:vagrant /home/vagrant/tmux-example-plugin sudo locale-gen "en_US.UTF-8" sudo echo LC_ALL=en_US.UFT-8 > /etc/default/locale sudo echo LANG=en_US.UFT-8 >> /etc/default/locale
#!/usr/bin/env bash # override PS1 prompt echo 'export PS1="\$ "' >> /home/vagrant/.bashrc # simplify irb prompt echo 'IRB.conf[:PROMPT_MODE] = :SIMPLE' >> /home/vagrant/.irbrc chown -R vagrant:vagrant /home/vagrant/.irbrc sudo apt-get update sudo apt-get install -y make sudo apt-get install -y git-core expect vim gawk sudo apt-get install -y python-software-properties software-properties-common # install Tmux 2.5 VERSION=2.5 sudo apt-get -y remove tmux sudo apt-get -y install wget tar libevent-dev libncurses-dev wget https://github.com/tmux/tmux/releases/download/${VERSION}/tmux-${VERSION}.tar.gz tar xf tmux-${VERSION}.tar.gz rm -f tmux-${VERSION}.tar.gz cd tmux-${VERSION} ./configure make sudo make install cd - sudo rm -rf /usr/local/src/tmux-* sudo mv tmux-${VERSION} /usr/local/src # clone a repo used later for tests git clone https://github.com/tmux-plugins/tmux-example-plugin /home/vagrant/tmux-example-plugin chown -R vagrant:vagrant /home/vagrant/tmux-example-plugin sudo locale-gen "en_US.UTF-8" sudo update-locale LC_ALL=en_US.UTF-8 LANG=en_US.UTF-8
Remove network configuration and ssh key.
set -o errexit set -o nounset cat << EOF > /etc/conf.d/net config_enp1s0=("dhcp") config_enp2s0="10.0.0.2/24" EOF echo 'hostname="keykeeper"' > /etc/conf.d/hostname cat /dev/null > /etc/issue cp /usr/share/zoneinfo/Europe/Stockholm /etc/localtime [ -d /root/.ssh ] || mkdir /root/.ssh; chmod 700 /root/.ssh cat << EOF > /root/.ssh/authorized_keys ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAID4bv1RXziZSjHkKY5kDbxboNUGkHEpBivdX8fdvl7Zt EOF sed -i -e 's/^#s0:/s0:/1' /etc/inittab sed -i -e 's|^\(s0:.*\) -L [0-9]*|\1 -L 9600|1' /etc/inittab cat >> /etc/default/grub <<EOF # Console on serial. GRUB_TERMINAL=serial GRUB_CMDLINE_LINUX="console=tty0 console=ttyS0,115200n8" GRUB_SERIAL_COMMAND="serial --speed=115200 --unit=0 --word=8 --parity=no --stop=1" EOF env-update /usr/local/keykeeper/bin/install.sh
set -o errexit set -o nounset echo 'hostname="keykeeper"' > /etc/conf.d/hostname cat /dev/null > /etc/issue cp /usr/share/zoneinfo/Europe/Stockholm /etc/localtime sed -i -e 's/^#s0:/s0:/1' /etc/inittab sed -i -e 's|^\(s0:.*\) -L [0-9]*|\1 -L 9600|1' /etc/inittab cat >> /etc/default/grub <<EOF # Console on serial. GRUB_TERMINAL=serial GRUB_CMDLINE_LINUX="console=tty0 console=ttyS0,115200n8" GRUB_SERIAL_COMMAND="serial --speed=115200 --unit=0 --word=8 --parity=no --stop=1" EOF env-update /usr/local/keykeeper/bin/install.sh
Add realms to db list.
#!/bin/bash mongo sli --eval 'db.adminCommand("listDatabases").databases.forEach( function (d) {if (d.name != "local" && d.name != "admin" && d.name != "config") db.getSiblingDB(d.name).dropDatabase();})' mongo sli ../indexes/sli_indexes.js mongo is ../indexes/is_indexes.js mongo ingestion_batch_job ../indexes/ingestion_batch_job_indexes.js mongo 02f7abaa9764db2fa3c1ad852247cd4ff06b2c0a ../indexes/tenantDb_indexes.js mongo d36f43474916ad310100c9711f21b65bd8231cc6 ../indexes/tenantDb_indexes.js
#!/bin/bash mongo sli --eval 'db.adminCommand("listDatabases").databases.forEach( function (d) {if (d.name != "local" && d.name != "admin" && d.name != "config") db.getSiblingDB(d.name).dropDatabase();})' mongo sli ../indexes/sli_indexes.js mongo is ../indexes/is_indexes.js mongo ingestion_batch_job ../indexes/ingestion_batch_job_indexes.js mongo 02f7abaa9764db2fa3c1ad852247cd4ff06b2c0a ../indexes/tenantDb_indexes.js mongo d36f43474916ad310100c9711f21b65bd8231cc6 ../indexes/tenantDb_indexes.js mongo f25ce1b8a399bd8621a57427a20039b4b13935db ../indexes/tenantDb_indexes.js mongo eb962e0ee6c86d75b55e8f861737c50ca308e021 ../indexes/tenantDb_indexes.js
Copy folders in one command
#!/bin/bash # firefox extension development is lovely echo Creating necessary folders rm data -r mkdir data/ mkdir data/icons/ echo Copying files cp scripts/ data/ -r cp styles/ data/ -r cp images/ data/images/ -r cp icons/18.png data/icons/18.png cp manifest.json data/manifest.json echo Creating a package cfx xpi
#!/bin/bash # firefox extension development is lovely echo Creating necessary folders rm data -r mkdir data/ mkdir data/icons/ echo Copying files cp -r scripts/ styles/ images/ data/ cp icons/18.png data/icons/18.png cp manifest.json data/manifest.json echo Creating a package cfx xpi
Add SSH provision for vagrant user
#!/bib/bash set -eu # Update distro apt-get -y update apt-get -y upgrade apt-get -y install linux-headers-$(uname -r) build-essential
#!/bib/bash set -eu # Update distro apt-get -y update apt-get -y upgrade apt-get -y install linux-headers-$(uname -r) build-essential # Setup sudo for vagrant sed -i -e 's/^%sudo.*/%sudo ALL=(ALL) NOPASSWD: ALL/' /etc/sudoers echo 'vagrant ALL=NOPASSWD:ALL' > /etc/sudoers.d/vagrant
Fix path for test runner
#!/usr/bin/env bash set -e -v -x pushd ${BASH_SOURCE%/*}/.. dotnet test -c Release --no-build --filter "Tarantool!=1.8" tests/progaudi.tarantool.tests/progaudi.tarantool.tests.csproj -- -parallel assemblies pushd tarantool kill -KILL $(cat tarantool.pid) brew uninstall tarantool brew install tarantool --HEAD tarantool tarantool.lua dotnet test -c Release --no-build --filter "Tarantool=1.8" tests/progaudi.tarantool.tests/progaudi.tarantool.tests.csproj -- -parallel assemblies popd popd
#!/usr/bin/env bash set -e -v -x pushd ${BASH_SOURCE%/*}/.. dotnet test -c Release --no-build --filter "Tarantool!=1.8" tests/progaudi.tarantool.tests/progaudi.tarantool.tests.csproj -- -parallel assemblies pushd tarantool kill -KILL $(cat tarantool.pid) brew uninstall tarantool brew install tarantool --HEAD tarantool tarantool.lua popd dotnet test -c Release --no-build --filter "Tarantool=1.8" tests/progaudi.tarantool.tests/progaudi.tarantool.tests.csproj -- -parallel assemblies popd
Upgrade Java 11 version in CI image
#!/bin/bash set -e case "$1" in java8) echo "https://github.com/AdoptOpenJDK/openjdk8-binaries/releases/download/jdk8u275-b01/OpenJDK8U-jdk_x64_linux_hotspot_8u275b01.tar.gz" ;; java11) echo "https://github.com/AdoptOpenJDK/openjdk11-binaries/releases/download/jdk-11.0.9%2B11.1/OpenJDK11U-jdk_x64_linux_hotspot_11.0.9_11.tar.gz" ;; java15) echo "https://github.com/AdoptOpenJDK/openjdk15-binaries/releases/download/jdk-15.0.1%2B9/OpenJDK15U-jdk_x64_linux_hotspot_15.0.1_9.tar.gz" ;; *) echo $"Unknown java version" exit 1 esac
#!/bin/bash set -e case "$1" in java8) echo "https://github.com/AdoptOpenJDK/openjdk8-binaries/releases/download/jdk8u275-b01/OpenJDK8U-jdk_x64_linux_hotspot_8u275b01.tar.gz" ;; java11) echo "https://github.com/AdoptOpenJDK/openjdk11-binaries/releases/download/jdk-11.0.9.1%2B1/OpenJDK11U-jdk_x64_linux_hotspot_11.0.9.1_1.tar.gz" ;; java15) echo "https://github.com/AdoptOpenJDK/openjdk15-binaries/releases/download/jdk-15.0.1%2B9/OpenJDK15U-jdk_x64_linux_hotspot_15.0.1_9.tar.gz" ;; *) echo $"Unknown java version" exit 1 esac
Use correct YUI compressor command
#!/bin/bash -e [ -z "$DEBUG" ] || set -x main() { minify_css minify_js prepare_output } minify_css() { [ -z "$DIRECTORY_CSS" ] || find "web-application/$DIRECTORY_CSS" -name "*.css" -exec yuicompressor {} -v --type css --output={} \; } minify_js() { [ -z "$DIRECTORY_JS" ] || find "web-application/$DIRECTORY_JS" -name "*.js" -exec yuicompressor {} -v --type js --output={} \; } prepare_output() { cp -rv web-application/. web-application-min } main
#!/bin/bash -e [ -z "$DEBUG" ] || set -x main() { minify_css minify_js prepare_output } minify_css() { [ -z "$DIRECTORY_CSS" ] || find "web-application/$DIRECTORY_CSS" -name "*.css" -exec yui-compressor {} -v --type css --output={} \; } minify_js() { [ -z "$DIRECTORY_JS" ] || find "web-application/$DIRECTORY_JS" -name "*.js" -exec yui-compressor {} -v --type js --output={} \; } prepare_output() { cp -rv web-application/. web-application-min } main
Check the actual assembly version. Don't deploy if it doesn't match the tag version.
#!/bin/bash VERSION=`echo "$1" | perl -ne 'print /^v(\d+\.\d+(?:\.\d+(?:\.\d+)?)?)$/'` if [ "$VERSION" = "" ]; then echo 'Wrong version format. Package will not be created.' exit fi nuget pack FbxSharp.nuspec -Properties version=$VERSION && \ nuget push -ApiKey $NUGET_APIKEY FbxSharp.$VERSION.nupkg
#!/bin/bash VERSION=`echo "$1" | perl -ne 'print /^v(\d+\.\d+(?:\.\d+(?:\.\d+)?)?)$/'` if [ "$VERSION" = "" ]; then echo 'Wrong version format. Package will not be created.' exit 1 fi AVERSION=`grep AssemblyVersion AssemblyInfo.cs | perl -npe 's/^.*?\"//;s/\".*$//'` if [ "$VERSION" != "$AVERSION ] then echo "Tag doesn't match assembly version. Package will not be created." exit 1 fi nuget pack FbxSharp.nuspec -Properties version=$VERSION && \ nuget push -ApiKey $NUGET_APIKEY FbxSharp.$VERSION.nupkg
Remove ending / from path
#!/bin/bash abspath () { case `uname -s` in CYGWIN*) echo $(cygpath -ua "$1") ;; Darwin) #[[ $(echo $1 | awk '/^\//') == $1 ]] && echo "$1" || echo "$PWD/$1" [[ ${1:0:1} == "/" ]] && echo "$1" || echo "$PWD/$1" ;; Linux) echo $(readlink -f "$1") ;; *) if [[ ${1:0:1} == "/" ]]; then echo "$1" elif [[ ${1:0:2} == "./" ]]; then echo "$PWD/${1:2}" else echo "$PWD/$1" fi ;; esac } extpath () { case `uname -s` in CYGWIN*) echo $(cygpath -da "$1") ;; *) echo $(abspath "$1") ;; esac } ROOT=$(abspath `dirname "$0"`) ROM=$(abspath "$1") ROMX=$(extpath "$1") cd "$ROOT/patch_tools" tar cvzf ../patch_tools.tgz *.jar updatecert.pem updatekey.pk8 boot cygwin mac linux cd "$ROOT" tar cvzf patch_patches.tgz patches zip autopatcher.zip auto_patcher patch_tools.tgz patch_patches.tgz
#!/bin/bash abspath () { case `uname -s` in CYGWIN*) echo $(cygpath -ua "$1") | sed 's:/$::g' ;; Darwin) #[[ $(echo $1 | awk '/^\//') == $1 ]] && echo "$1" || echo "$PWD/$1" [[ ${1:0:1} == "/" ]] && echo "$1" || echo "$PWD/$1" ;; Linux) echo $(readlink -f "$1") ;; *) if [[ ${1:0:1} == "/" ]]; then echo "$1" elif [[ ${1:0:2} == "./" ]]; then echo "$PWD/${1:2}" else echo "$PWD/$1" fi ;; esac } extpath () { case `uname -s` in CYGWIN*) echo $(cygpath -da "$1") ;; *) echo $(abspath "$1") ;; esac } ROOT=$(abspath `dirname "$0"`) ROM=$(abspath "$1") ROMX=$(extpath "$1") cd "$ROOT/patch_tools" tar cvzf ../patch_tools.tgz *.jar updatecert.pem updatekey.pk8 boot cygwin mac linux cd "$ROOT" tar cvzf patch_patches.tgz patches zip autopatcher.zip auto_patcher patch_tools.tgz patch_patches.tgz
Improve gradlew console output for CI
#!/bin/bash echo ">> Building..."; DIRECTORY=$(basename ${PWD}); CUSTOM_SETTINGS_GRADLE_FILE="../settings.gradle.all"; if [ -f $CUSTOM_SETTINGS_GRADLE_FILE ]; then ../gradlew -c $CUSTOM_SETTINGS_GRADLE_FILE :$DIRECTORY:clean :$DIRECTORY:assembleRelease :$DIRECTORY:copyReleaseApkToOutputDirs; RESULT=$?; else ../gradlew :$DIRECTORY:clean :$DIRECTORY:assembleRelease :$DIRECTORY:copyReleaseApkToOutputDirs; RESULT=$?; fi echo ">> Building... DONE"; exit $RESULT;
#!/bin/bash echo ">> Building..."; IS_CI=false; if [[ ! -z "${CI}" ]]; then IS_CI=true; fi echo "app-android/build.sh: IS_CI '${IS_CI}'"; GRADLE_ARGS=""; if [ $IS_CI = true ]; then GRADLE_ARGS=" --console=plain"; fi DIRECTORY=$(basename ${PWD}); CUSTOM_SETTINGS_GRADLE_FILE="../settings.gradle.all"; if [ -f $CUSTOM_SETTINGS_GRADLE_FILE ]; then ../gradlew -c $CUSTOM_SETTINGS_GRADLE_FILE :$DIRECTORY:clean :$DIRECTORY:assembleRelease :$DIRECTORY:copyReleaseApkToOutputDirs $GRADLE_ARGS; RESULT=$?; else ../gradlew :$DIRECTORY:clean :$DIRECTORY:assembleRelease :$DIRECTORY:copyReleaseApkToOutputDirs $GRADLE_ARGS; RESULT=$?; fi echo ">> Building... DONE"; exit $RESULT;
Change url of simple test client
#!/bin/bash echo "Trying to access the backend on localhost" curl localhost:8081
#!/bin/bash echo "Trying to access the backend on localhost" curl localhost:8081/simple-mon/test
Remove ingress qdisc when container is shutdown
#!/bin/bash ID=$1 IFNAME=$2 MAC=$3 IP=$4 echo $1 $2 $3 $4
#!/bin/bash ID=$1 IFNAME=$2 MAC=$3 IP=$4 tc qdisc delete dev $IFNAME ingress 2> /dev/null
Fix disabling KVM for integration tests
#!/bin/bash set -o errexit set -o nounset set -o pipefail set -o errtrace /start.sh -novirtlet ./autogen.sh ./configure make make install if ! VIRTLET_DISABLE_KVM=1 make check; then find . -name test-suite.log | xargs cat >&2 echo >&2 "***** libvirtd.log *****" cat /var/log/libvirt/libvirtd.log >&2 exit 1 fi
#!/bin/bash set -o errexit set -o nounset set -o pipefail set -o errtrace VIRTLET_DISABLE_KVM=1 /start.sh -novirtlet ./autogen.sh ./configure make make install if ! VIRTLET_DISABLE_KVM=1 make check; then find . -name test-suite.log | xargs cat >&2 echo >&2 "***** libvirtd.log *****" cat /var/log/libvirt/libvirtd.log >&2 exit 1 fi
Test greeen status of regression test in jenkins
# Test site regression style # This setup is thought to be run periodically possibly triggered from soruce code change/checkin/commit # Must run "install_regression_server.sh" first to prepare server # If you want no password prompts, you must allso install a public key on the target server # Enter global password first if [ -z "$PW_RINFO" ]; then echo "Enter sudo password: " read PW_RINFO fi EXIT_STATUS=0 function exit_on_error { if [ $EXIT_STATUS -ne 0 ];then echo "$1 returned $EXIT_STATUS! Exiting!" exit $EXIT_STATUS fi } # Test Admin fab -p $PW_RINFO target.regression -R admin app.admin.testAll EXIT_STATUS=$? exit_on_error "Admin module" # Test Checker fab -p $PW_RINFO target.regression -R checker app.checker.testAll EXIT_STATUS=$? exit_on_error "Checker module"
# Test site regression style # This setup is thought to be run periodically possibly triggered from soruce code change/checkin/commit # Must run "install_regression_server.sh" first to prepare server # If you want no password prompts, you must allso install a public key on the target server # Enter global password first if [ -z "$PW_RINFO" ]; then echo "Enter sudo password: " read PW_RINFO fi # Test Admin fab -p $PW_RINFO target.regression -R admin app.admin.testAll EXIT_STATUS=$? if [ $EXIT_STATUS -ne 0 ];then echo "Admin module returned $EXIT_STATUS! Exiting!" exit $EXIT_STATUS fi # Test Checker fab -p $PW_RINFO target.regression -R checker app.checker.testAll EXIT_STATUS=$? if [ $EXIT_STATUS -ne 0 ];then echo "Checker module returned $EXIT_STATUS! Exiting!" exit $EXIT_STATUS fi
Make sure and export authentication information!
#!/bin/bash python manage.py dumpdata --indent 4 farms > farms/fixtures/initial_data.json python manage.py dumpdata --indent 4 admin emailuser contact_info > contact_info/fixtures/initial_data.json
#!/bin/bash python manage.py dumpdata --indent 4 farms > farms/fixtures/initial_data.json python manage.py dumpdata --indent 4 admin auth contact_info > contact_info/fixtures/initial_data.json
Fix pkg_scv_run to work with hab 0.9.3+
pkg_origin=core pkg_name=haproxy pkg_description="The Reliable, High Performance TCP/HTTP Load Balancer" pkg_version=1.6.5 pkg_maintainer='The Habitat Maintainers <humans@habitat.sh>' pkg_license=('GPL-2.0' 'LGPL-2.1') pkg_source=http://www.haproxy.org/download/1.6/src/haproxy-1.6.5.tar.gz pkg_upstream_url="http://git.haproxy.org/git/haproxy-1.6.git/" pkg_shasum=c4b3fb938874abbbbd52782087117cc2590263af78fdce86d64e4a11acfe85de pkg_svc_run='bin/haproxy -f config/haproxy.conf -db' pkg_expose=(8080) pkg_deps=(core/zlib core/pcre core/openssl) pkg_build_deps=( core/coreutils core/gcc core/pcre core/make core/openssl core/zlib ) pkg_bin_dirs=(bin) do_build() { make USE_PCRE=1 \ USE_PCRE_JIT=1 \ TARGET=linux2628 \ USE_OPENSSL=1 \ USE_ZLIB=1 \ ADDINC="$CFLAGS" \ ADDLIB="$LDFLAGS" } do_install() { mkdir -p "$pkg_prefix"/bin cp haproxy "$pkg_prefix"/bin }
pkg_origin=core pkg_name=haproxy pkg_description="The Reliable, High Performance TCP/HTTP Load Balancer" pkg_version=1.6.5 pkg_maintainer='The Habitat Maintainers <humans@habitat.sh>' pkg_license=('GPL-2.0' 'LGPL-2.1') pkg_source=http://www.haproxy.org/download/1.6/src/haproxy-1.6.5.tar.gz pkg_upstream_url="http://git.haproxy.org/git/haproxy-1.6.git/" pkg_shasum=c4b3fb938874abbbbd52782087117cc2590263af78fdce86d64e4a11acfe85de pkg_svc_run='haproxy -f config/haproxy.conf -db' pkg_expose=(8080) pkg_deps=(core/zlib core/pcre core/openssl) pkg_build_deps=( core/coreutils core/gcc core/pcre core/make core/openssl core/zlib ) pkg_bin_dirs=(bin) do_build() { make USE_PCRE=1 \ USE_PCRE_JIT=1 \ TARGET=linux2628 \ USE_OPENSSL=1 \ USE_ZLIB=1 \ ADDINC="$CFLAGS" \ ADDLIB="$LDFLAGS" } do_install() { mkdir -p "$pkg_prefix"/bin cp haproxy "$pkg_prefix"/bin }
Move variable inside method definition
include base.util.BaseUtil include string.util.StringUtil LoggerUtil(){ local time=$(BaseUtil timestamp log) getLogMsg(){ echo "[${time}] [ $(StringUtil toUpperCase ${1}) ] $(StringUtil replace $(StringUtil capitalize ${2}) - space)" } $@ }
include base.util.BaseUtil include string.util.StringUtil LoggerUtil(){ getLogMsg(){ local time=$(BaseUtil timestamp log) echo "[${time}] [ $(StringUtil toUpperCase ${1}) ] $(StringUtil replace $(StringUtil capitalize ${2}) - space)" } $@ }
Use latest version of Marathon
#!/bin/bash -ex cd $(dirname $0) rpm -i http://repos.mesosphere.io/el/7/noarch/RPMS/mesosphere-el-repo-7-1.noarch.rpm yum -q -y install mesos-0.22.1 marathon-0.8.2 mesosphere-zookeeper-3.4.6
#!/bin/bash -ex cd $(dirname $0) rpm -i http://repos.mesosphere.io/el/7/noarch/RPMS/mesosphere-el-repo-7-1.noarch.rpm yum -q -y install mesos-0.22.1 marathon-0.9.0 mesosphere-zookeeper-3.4.6
Monitor dock script requires hyphens now
#!/usr/bin/env bash IN="eDP1" EXT="DP1" xrandr > /dev/null xrandr | grep "$EXT disconnected" > /dev/null if [ $? -eq 0 ]; then xrandr --output $IN --primary --mode 1920x1080 --output $EXT --off xmodmap -e "pointer = 1 2 3" 2> /dev/null echo "-> Laptop mode" else xrandr --output $IN --mode 1920x1080 --output $EXT --primary --mode 1920x1080 --right-of $IN xmodmap -e "pointer = 3 2 1" 2> /dev/null echo "-> Desktop mode" fi
#!/usr/bin/env bash IN="eDP-1" EXT="DP-1" xrandr > /dev/null xrandr | grep "$EXT disconnected" > /dev/null if [ $? -eq 0 ]; then xrandr --output $IN --primary --mode 1920x1080 --output $EXT --off xmodmap -e "pointer = 1 2 3" 2> /dev/null echo "-> Laptop mode" else xrandr --output $IN --mode 1920x1080 --output $EXT --primary --mode 1920x1080 --right-of $IN xmodmap -e "pointer = 3 2 1" 2> /dev/null echo "-> Desktop mode" fi
Update to bukkit 1.8.8 Fixes MethodNotFound exception in 1.9
RT_JAR=/usr/lib/jvm/java-8-openjdk/jre/lib/rt.jar INCLUDES_DIR=../ BUKKIT_TARGET=bukkit-1.7.9B1938.jar DST_DIR=. javac -Xlint:all -bootclasspath "$RT_JAR:$INCLUDES_DIR/$BUKKIT_TARGET" -d ./ src/* jar -cfe $DST_DIR/NoSpam.jar net/simpvp/NoSpam/NoSpam ./* rm -rf net/
RT_JAR=/usr/lib/jvm/java-8-openjdk/jre/lib/rt.jar INCLUDES_DIR=../ BUKKIT_TARGET=bukkit-1.8.8B1.jar DST_DIR=. javac -Xlint:all -bootclasspath "$RT_JAR:$INCLUDES_DIR/$BUKKIT_TARGET" -d ./ src/* jar -cfe $DST_DIR/NoSpam.jar net/simpvp/NoSpam/NoSpam ./* rm -rf net/
Fix base path in ACF9 control script
pushd jrun4/bin > /dev/null case $1 in start) ./jrun -start cfusion>/dev/null& ;; stop) ./jrun -stop cfusion>/dev/null& ;; esac popd > /dev/null
pushd $WORK_DIR/jrun4/bin > /dev/null case $1 in start) ./jrun -start cfusion>/dev/null& ;; stop) ./jrun -stop cfusion>/dev/null& ;; esac popd > /dev/null
Fix typo in gke cluster release script
#!/bin/bash script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" project="cilium-ci" region="us-west1" if [ ! -f "${script_dir}/cluster_uri" ]; then echo "Cluster uri file not found, exiting" exit 1 fi cluster_uri="$(cat "${script_dir}/cluster-uri")" cluster_name=${cluster_uri##*/} gcloud container clusters delete --quiet --zone ${region} "${cluster_uri}" export KUBECONFIG="${script_dir}/resize-kubeconfig" gcloud container clusters get-credentials --project "${project}" --region "europe-west4" management-cluster-0 kubectl delete containerclusters.container.cnrm.cloud.google.com -n test-clusters "${cluster_name}" kubectl delete containernodepools.container.cnrm.cloud.google.com -n test-clusters "${cluster_name}" rm -f "${script_dir}/cluster-uri" "${script_dir}/cluster-name" "${script_dir}/cluster-version" "${script_dir}/registry-adder.yaml"
#!/bin/bash script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" project="cilium-ci" region="us-west1" if [ ! -f "${script_dir}/cluster-uri" ]; then echo "Cluster uri file not found, exiting" exit 1 fi cluster_uri="$(cat "${script_dir}/cluster-uri")" cluster_name=${cluster_uri##*/} gcloud container clusters delete --quiet --zone ${region} "${cluster_uri}" export KUBECONFIG="${script_dir}/resize-kubeconfig" gcloud container clusters get-credentials --project "${project}" --region "europe-west4" management-cluster-0 kubectl delete containerclusters.container.cnrm.cloud.google.com -n test-clusters "${cluster_name}" kubectl delete containernodepools.container.cnrm.cloud.google.com -n test-clusters "${cluster_name}" rm -f "${script_dir}/cluster-uri" "${script_dir}/cluster-name" "${script_dir}/cluster-version" "${script_dir}/registry-adder.yaml"
Switch liquidprompt to antigen bundle. Add zsh-syntax-highliting, oh-my-zsh and ruby
# Window title - for Timing.app <https://itunes.apple.com/us/app/timing/id431511738?mt=12> echo -ne "\e]1;${USER}@${HOST%%.*}:${PWD/#$HOME/~}\a" # SSH - Print out the fingerprint and comment of the default public key for this user@host sshkeyfingerprint if (( $? != 0 )); then echo "No SSH key found" sshnewkey "${USER}@${HOST}" fi # Liquid Prompt # Only load in interactive shells, not from a script or from scp [[ $- = *i* ]] && source ~/dev/shell/liquidprompt/liquidprompt
# Window title - for Timing.app <https://itunes.apple.com/us/app/timing/id431511738?mt=12> echo -ne "\e]1;${USER}@${HOST%%.*}:${PWD/#$HOME/~}\a" # SSH - Print out the fingerprint and comment of the default public key for this user@host sshkeyfingerprint if (( $? != 0 )); then echo "No SSH key found" sshnewkey "${USER}@${HOST}" fi # Antigen source ~/dev/shell/antigen/antigen.zsh # Antigen Bundles antigen bundle nojhan/liquidprompt antigen bundle zsh-users/zsh-syntax-highlighting antigen bundle robbyrussell/oh-my-zsh plugins/ruby
Fix installation guide in documentation
# Configure repositories yum -y install epel-release yum -y install https://repos.fedorapeople.org/repos/openstack/EOL/openstack-icehouse/rdo-release-icehouse-4.noarch.rpm sed -i 's,openstack/openstack-icehouse/,openstack/EOL/openstack-icehouse/,' /etc/yum.repos.d/rdo-release.repo yum -y install http://opennodecloud.com/centos/7/nodeconductor-release.rpm # Install and enable services yum -y install mariadb-server nodeconductor-wsgi redis systemctl enable httpd systemctl enable mariadb systemctl enable nodeconductor-celery systemctl enable nodeconductor-celerybeat systemctl enable redis # Start MySQL and Redis systemctl start mariadb systemctl start redis # Create MySQL database mysql -e "CREATE DATABASE nodeconductor CHARACTER SET = utf8" mysql -e "CREATE USER 'nodeconductor'@'localhost' IDENTIFIED BY 'nodeconductor'" mysql -e "GRANT ALL PRIVILEGES ON nodeconductor.* to 'nodeconductor'@'localhost'" # Init NodeConductor database nodeconductor migrate --noinput chown -R nodeconductor:nodeconductor /var/log/nodeconductor # Start Celery and Apache systemctl start httpd curl --head http://localhost/api/ systemctl start nodeconductor-celery systemctl start nodeconductor-celerybeat
# Configure repositories yum -y install epel-release yum -y install https://repos.fedorapeople.org/repos/openstack/openstack-kilo/rdo-release-kilo-1.noarch.rpm yum -y install http://opennodecloud.com/centos/7/nodeconductor-release.rpm # Install and enable services yum -y install mariadb-server nodeconductor-wsgi redis systemctl enable httpd systemctl enable mariadb systemctl enable nodeconductor-celery systemctl enable nodeconductor-celerybeat systemctl enable redis # Start MySQL and Redis systemctl start mariadb systemctl start redis # Create MySQL database mysql -e "CREATE DATABASE nodeconductor CHARACTER SET = utf8" mysql -e "CREATE USER 'nodeconductor'@'localhost' IDENTIFIED BY 'nodeconductor'" mysql -e "GRANT ALL PRIVILEGES ON nodeconductor.* to 'nodeconductor'@'localhost'" # Init NodeConductor database nodeconductor migrate --noinput chown -R nodeconductor:nodeconductor /var/log/nodeconductor # Start Celery and Apache systemctl start httpd curl --head http://localhost/api/ systemctl start nodeconductor-celery systemctl start nodeconductor-celerybeat
Upgrade Java 16 version in CI image
#!/bin/bash set -e case "$1" in java8) echo "https://github.com/AdoptOpenJDK/openjdk8-binaries/releases/download/jdk8u292-b10/OpenJDK8U-jdk_x64_linux_hotspot_8u292b10.tar.gz" ;; java11) echo "https://github.com/AdoptOpenJDK/openjdk11-binaries/releases/download/jdk-11.0.11%2B9/OpenJDK11U-jdk_x64_linux_hotspot_11.0.11_9.tar.gz" ;; java16) echo "https://github.com/AdoptOpenJDK/openjdk16-binaries/releases/download/jdk-16%2B36/OpenJDK16-jdk_x64_linux_hotspot_16_36.tar.gz" ;; *) echo $"Unknown java version" exit 1 esac
#!/bin/bash set -e case "$1" in java8) echo "https://github.com/AdoptOpenJDK/openjdk8-binaries/releases/download/jdk8u292-b10/OpenJDK8U-jdk_x64_linux_hotspot_8u292b10.tar.gz" ;; java11) echo "https://github.com/AdoptOpenJDK/openjdk11-binaries/releases/download/jdk-11.0.11%2B9/OpenJDK11U-jdk_x64_linux_hotspot_11.0.11_9.tar.gz" ;; java16) echo "https://github.com/AdoptOpenJDK/openjdk16-binaries/releases/download/jdk-16.0.1%2B9/OpenJDK16U-jdk_x64_linux_hotspot_16.0.1_9.tar.gz" ;; *) echo $"Unknown java version" exit 1 esac
Check node version on travis-ci
#!/bin/sh node -v case "$PLATFORM" in "clj") lein do clean, javac, test ;; "cljs_browser") lein do clean, javac, test ;; # TODO phantomjs "cljs_node") lein do clean, javac, cljsbuild once node node resources/test/node/app.js ;; esac
#!/bin/sh echo "Node version: " node -v echo "-----" case "$PLATFORM" in "clj") lein do clean, javac, test ;; "cljs_browser") lein do clean, javac, test ;; # TODO phantomjs "cljs_node") lein do clean, javac, cljsbuild once node node resources/test/node/app.js ;; esac
Update script for print and ebook varieties
#!/bin/bash # # Publishes nightly builds of LYSA: http://learnyou.org/lysa.html # # For a list of dependencies, see # https://github.com/learnyou/lysa/blob/master/en/README.md # This will be the name of this nightly build FNOM=`date -u +"lysa-%Y-%m-%d.pdf"` # The directory from which the nighlies are served LYSADIR=/usr/share/nginx/nightly.learnyou.org/lysa cleanup () { cd rm -rf lysa } # Start in home directory cd # Clone the lysa repo git clone git://github.com/learnyou/lysa.git cd lysa/en git submodule init git submodule update cd .. # If nothing has changed from a day ago, exit: if [[ `git whatchanged --since='24 hours ago'` == "" ]] ; then cleanup exit 0 fi # Build LYSA make # Publish the nightly # English cp lysa-en.pdf ${LYSADIR}/en/${FNOM} cd $LYSADIR/en ln -sf ${FNOM} lysa-latest.pdf cleanup
#!/bin/bash # # Publishes nightly builds of LYSA: http://learnyou.org/lysa.html # # For a list of dependencies, see # https://github.com/learnyou/lysa/blob/master/en/README.md # This will be the name of this nightly build FNOM_EBOOK=`date -u +"lysa-%Y-%m-%d-ebook.pdf"` FNOM_PRINT=`date -u +"lysa-%Y-%m-%d-print.pdf"` # The directory from which the nighlies are served LYSADIR=/usr/share/nginx/nightly.learnyou.org/lysa cleanup () { cd rm -rf lysa } # Start in home directory cd # Clone the lysa repo git clone git://github.com/learnyou/lysa.git cd lysa/en git submodule init git submodule update cd .. # If nothing has changed from a day ago, exit: if [[ `git whatchanged --since='24 hours ago'` == "" ]] ; then cleanup exit 0 fi # Build LYSA make # Publish the nightly # English cp lysa-en-ebook.pdf ${LYSADIR}/en/${FNOM_EBOOK} cp lysa-en-print.pdf ${LYSADIR}/en/${FNOM_PRINT} cd $LYSADIR/en ln -sf ${FNOM_EBOOK} lysa-latest-ebook.pdf ln -sf ${FNOM_PRINT} lysa-latest-print.pdf cleanup
Make build commands more explicit.
#! /bin/bash set -euo pipefail VERSION="$1" # Check dependencies. twine -h tox # Check that NEWS file is up-to-date. grep "$VERSION" NEWS.rst || echo "Version $VERSION missing in NEWS file." # Check for uncommited changes. set +e git diff --quiet && git diff --cached --quiet retcode=$? set -e if [[ $retcode != 0 ]]; then echo "There are uncommited changes:" git status exit 1 fi git pull # Bump version. sed -i -e "s/__version__ = '.*'/__version__ = '$VERSION'/" vulture/core.py git commit -am "Update version number to ${VERSION} for release." git tag -a "v$VERSION" -m "v$VERSION" HEAD python setup.py sdist bdist_wheel twine upload dist/vulture-${VERSION}.tar.gz dist/vulture-${VERSION}-py2.py3-none-any.whl git push git push --tags
#! /bin/bash set -euo pipefail VERSION="$1" # Check dependencies. twine -h tox # Check that NEWS file is up-to-date. grep "$VERSION" NEWS.rst || echo "Version $VERSION missing in NEWS file." # Check for uncommited changes. set +e git diff --quiet && git diff --cached --quiet retcode=$? set -e if [[ $retcode != 0 ]]; then echo "There are uncommited changes:" git status exit 1 fi git pull # Bump version. sed -i -e "s/__version__ = '.*'/__version__ = '$VERSION'/" vulture/core.py git commit -am "Update version number to ${VERSION} for release." git tag -a "v$VERSION" -m "v$VERSION" HEAD python3 setup.py sdist bdist_wheel --universal python3 -m twine upload dist/vulture-${VERSION}.tar.gz dist/vulture-${VERSION}-py2.py3-none-any.whl git push git push --tags
Work around Debian patching of Python distutils.
#!/bin/sh set -Ceu : ${PYTHON:=python} root=`cd -- "$(dirname -- "$0")" && pwd` platform=`"${PYTHON}" -c 'import distutils.util as u; print u.get_platform()'` version=`"${PYTHON}" -c 'import sys; print sys.version[0:3]'` libdir="${root}/build/lib.${platform}-${version}" export PYTHONPATH="${libdir}${PYTHONPATH:+:${PYTHONPATH}}" bindir="${root}/build/scripts-${version}" export PATH="${bindir}${PATH:+:${PATH}}" exec "$@"
#!/bin/sh set -Ceu : ${PYTHON:=python} root=`cd -- "$(dirname -- "$0")" && pwd` platform=`"${PYTHON}" -c 'import distutils.util as u; print u.get_platform()'` version=`"${PYTHON}" -c 'import sys; print sys.version[0:3]'` # The lib directory varies depending on # # (a) whether there are extension modules (here, no); and # (b) whether some Debian maintainer decided to patch the local Python # to behave as though there were. # # But there's no obvious way to just ask distutils what the name will # be. There's no harm in naming a pathname that doesn't exist, other # than a handful of microseconds of runtime, so we'll add both. libdir="${root}/build/lib" plat_libdir="${libdir}.${platform}-${version}" export PYTHONPATH="${libdir}:${plat_libdir}${PYTHONPATH:+:${PYTHONPATH}}" bindir="${root}/build/scripts-${version}" export PATH="${bindir}${PATH:+:${PATH}}" exec "$@"
Add 'back' option to update script.
#!/bin/bash # Script to copy asyncio files to the standard library tree. # Optional argument is the root of the Python 3.4 tree. # Assumes you have already created Lib/asyncio and # Lib/test/test_asyncio in the destination tree. CPYTHON=${1-$HOME/cpython} if [ ! -d $CPYTHON ] then echo Bad destination $CPYTHON exit 1 fi if [ ! -f asyncio/__init__.py ] then echo Bad current directory exit 1 fi maybe_copy() { SRC=$1 DST=$CPYTHON/$2 if cmp $DST $SRC then return fi echo ======== $SRC === $DST ======== diff -u $DST $SRC echo -n "Copy $SRC? [y/N] " read X case $X in [yY]*) echo Copying $SRC; cp $SRC $DST;; *) echo Not copying $SRC;; esac } for i in `(cd asyncio && ls *.py)` do if [ $i == selectors.py ] then maybe_copy asyncio/$i Lib/$i else maybe_copy asyncio/$i Lib/asyncio/$i fi done for i in `(cd tests && ls *.py sample.???)` do if [ $i == test_selectors.py ] then continue fi maybe_copy tests/$i Lib/test/test_asyncio/$i done maybe_copy overlapped.c Modules/overlapped.c
#!/bin/bash # Script to copy asyncio files to the standard library tree. # Optional argument is the root of the Python 3.4 tree. # Assumes you have already created Lib/asyncio and # Lib/test/test_asyncio in the destination tree. CPYTHON=${1-$HOME/cpython} if [ ! -d $CPYTHON ] then echo Bad destination $CPYTHON exit 1 fi if [ ! -f asyncio/__init__.py ] then echo Bad current directory exit 1 fi maybe_copy() { SRC=$1 DST=$CPYTHON/$2 if cmp $DST $SRC then return fi echo ======== $SRC === $DST ======== diff -u $DST $SRC echo -n "Copy $SRC? [y/N/back] " read X case $X in [yY]*) echo Copying $SRC; cp $SRC $DST;; back) echo Copying TO $SRC; cp $DST $SRC;; *) echo Not copying $SRC;; esac } for i in `(cd asyncio && ls *.py)` do if [ $i == selectors.py ] then maybe_copy asyncio/$i Lib/$i else maybe_copy asyncio/$i Lib/asyncio/$i fi done for i in `(cd tests && ls *.py sample.???)` do if [ $i == test_selectors.py ] then continue fi maybe_copy tests/$i Lib/test/test_asyncio/$i done maybe_copy overlapped.c Modules/overlapped.c
Update help to reflect new mnemonics.
#!/bin/bash # # Copyright 2012 Marco Vermeulen # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # function __gvmtool_help { echo "" echo "Usage: gvm <command> <candidate> [version]" echo " gvm offline <enable|disable>" echo "" echo " command : install, uninstall, list, use, current, version, default, selfupdate, broadcast, help, offline" echo " candidate : groovy, grails, griffon, gradle, lazybones, vertx" echo " version : optional, defaults to latest stable if not provided" echo "" echo "eg: gvm install groovy" }
#!/bin/bash # # Copyright 2012 Marco Vermeulen # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # function __gvmtool_help { echo "" echo "Usage: gvm <command> <candidate> [version]" echo " gvm offline <enable|disable>" echo "" echo " commands:" echo " install or i [candidate] [version]" echo " uninstall or rm [candidate] <version>" echo " list or ls [candidate]" echo " use or u [candidate] <version>" echo " default or d [candidate] <version>" echo " current or c <candidate>" echo " version or v" echo " broadcast or b" echo " help or h" echo " offline <enable|disable>" echo " selfupdate" echo "" echo " candidate : groovy, grails, griffon, gradle, lazybones, vertx" echo " version : where optional, defaults to latest stable if not provided" echo "" echo "eg: gvm install groovy" }
Comment is no longer useless
# Behavior used by all our scripts abort() { errcode=$? [ "$errcode" != "0" ] && echo "Command failed with error $errcode" >&2 exit $errcode } trap 'abort' INT TERM EXIT set -e
# Behavior used by all our scripts - trap errors and report to user if any command fails abort() { errcode=$? [ "$errcode" != "0" ] && echo "Command failed with error $errcode" >&2 exit $errcode } trap 'abort' INT TERM EXIT set -e
Initialize rvm before running bundle.
#!/bin/bash PORT=3000 service postgresql start service redis-server start service varnish start cd /Windshaft-cartodb node app.js development & cd /CartoDB-SQL-API node app.js development & cd /cartodb script/restore_redis bundle exec script/resque & bundle exec rails s -p $PORT
#!/bin/bash PORT=3000 service postgresql start service redis-server start service varnish start cd /Windshaft-cartodb node app.js development & cd /CartoDB-SQL-API node app.js development & cd /cartodb source /usr/local/rvm/scripts/rvm bundle exec script/restore_redis bundle exec script/resque & bundle exec rails s -p $PORT
Use precmd hook for updating OS X proxy icon
# Set Apple Terminal.app resume directory # based on this answer: http://superuser.com/a/315029 # 2012-10-26: (javageek) Changed code using the updated answer # Tell the terminal about the working directory whenever it changes. if [[ "$TERM_PROGRAM" == "Apple_Terminal" ]] && [[ -z "$INSIDE_EMACS" ]]; then update_terminal_cwd() { # Identify the directory using a "file:" scheme URL, including # the host name to disambiguate local vs. remote paths. # Percent-encode the pathname. local URL_PATH='' { # Use LANG=C to process text byte-by-byte. local i ch hexch LANG=C for ((i = 1; i <= ${#PWD}; ++i)); do ch="$PWD[i]" if [[ "$ch" =~ [/._~A-Za-z0-9-] ]]; then URL_PATH+="$ch" else hexch=$(printf "%02X" "'$ch") URL_PATH+="%$hexch" fi done } local PWD_URL="file://$HOST$URL_PATH" #echo "$PWD_URL" # testing printf '\e]7;%s\a' "$PWD_URL" } # Register the function so it is called whenever the working # directory changes. autoload add-zsh-hook add-zsh-hook chpwd update_terminal_cwd # Tell the terminal about the initial directory. update_terminal_cwd fi
# Set Apple Terminal.app resume directory # based on this answer: http://superuser.com/a/315029 # 2012-10-26: (javageek) Changed code using the updated answer # Tell the terminal about the working directory whenever it changes. if [[ "$TERM_PROGRAM" == "Apple_Terminal" ]] && [[ -z "$INSIDE_EMACS" ]]; then update_terminal_cwd() { # Identify the directory using a "file:" scheme URL, including # the host name to disambiguate local vs. remote paths. # Percent-encode the pathname. local URL_PATH='' { # Use LANG=C to process text byte-by-byte. local i ch hexch LANG=C for ((i = 1; i <= ${#PWD}; ++i)); do ch="$PWD[i]" if [[ "$ch" =~ [/._~A-Za-z0-9-] ]]; then URL_PATH+="$ch" else hexch=$(printf "%02X" "'$ch") URL_PATH+="%$hexch" fi done } local PWD_URL="file://$HOST$URL_PATH" #echo "$PWD_URL" # testing printf '\e]7;%s\a' "$PWD_URL" } # Register the function so it is called whenever the working # directory changes. autoload add-zsh-hook add-zsh-hook precmd update_terminal_cwd # Tell the terminal about the initial directory. update_terminal_cwd fi
Fix syntax error in launch script
#!/bin/sh -e # # Script used to setup and run virtualhere ARCH=`uname -m` && \ if [ "$ARCH" == "x86_64" ]; then \ ARCH_VR=vhusbdx86_64; \ else \ ARCH_VR=vhusbdarm; \ fi echo '*** Listing all usb-devices. Use this list to adjust AllowedDevices in config.ini ...' lsusb || echo 'Execution of command lsusb failed. Make sure you have access to USB-Devices on the host.' cd data echo '*** Deleting previous bus_usb_*-files from data directory ...' find . -name '*bus_usb_*' -delete echo '*** Checking for new version of VirtualHere ...' wget -N https://www.virtualhere.com/sites/default/files/usbserver/$ARCH_VR || echo 'Checking for remote copy of VirtualHere failed' chmod +x ./$ARCH_VR ./$ARCH_VR exit 0
#!/bin/sh -e # # Script used to setup and run virtualhere ARCH=`uname -m` if [ "$ARCH" = "x86_64" ] then echo "Your system architecture was identified as $ARCH. The x86_64 version of the virtualhere-client will be used." ARCH_VR=vhusbdx86_64 else echo "Your system architecture was identified as $ARCH. The ARM version of the virtualhere-client will be used." ARCH_VR=vhusbdarm fi echo '*** Listing all usb-devices. Use this list to adjust AllowedDevices in config.ini ...' lsusb || echo 'Execution of command lsusb failed. Make sure you have access to USB-Devices on the host.' cd data echo '*** Deleting previous bus_usb_*-files from data directory ...' find . -name '*bus_usb_*' -delete echo '*** Checking for new version of VirtualHere ...' wget -N https://www.virtualhere.com/sites/default/files/usbserver/$ARCH_VR || echo 'Checking for remote copy of VirtualHere failed' chmod +x ./$ARCH_VR ./$ARCH_VR exit 0
Add command example for production
#!/bin/bash PORT=$1 bundle exec ruby app.rb -p $PORT
#!/bin/bash PORT=$1 bundle exec ruby app.rb -p $PORT # for production # bundle exec ruby app.rb -p $PORT -e production
Stop generating useless diffs on unchanged files
#!/bin/sh for a in *.ui; do pyuic4 $a -o Ui_`basename $a .ui`.py -x; done pyrcc4 icons.qrc -o icons_rc.py
#!/bin/sh for a in *.ui do sed -i 's/^# Created.*$//' $a pyuic4 $a -o Ui_`basename $a .ui`.py -x done pyrcc4 icons.qrc -o icons_rc.py
Add show hidden files in Finder
#!/usr/bin/env bash # AirDrop: Use it over every network interface defaults write com.apple.NetworkBrowser BrowseAllInterfaces 1 # Finder: Show ~/Library chflags nohidden ~/Library # Finder: Show icons for external harddrives and removable media on desktop defaults write com.apple.finder ShowExternalHardDrivesOnDesktop -bool true defaults write com.apple.finder ShowRemovableMediaOnDesktop -bool true # Safari: Setup for development defaults write com.apple.Safari IncludeInternalDebugMenu -bool true defaults write com.apple.Safari IncludeDevelopMenu -bool true defaults write com.apple.Safari WebKitDeveloperExtrasEnabledPreferenceKey -bool true defaults write com.apple.Safari "com.apple.Safari.ContentPageGroupIdentifier.WebKit2DeveloperExtrasEnabled" -bool true defaults write NSGlobalDomain WebKitDeveloperExtras -bool true # Menu bar: Disable transparency defaults write NSGlobalDomain AppleEnableMenuBarTransparency -bool false # Gatekeeper: No Limitations spctl --master-disable # Calendar: 14 days in week view defaults write com.apple.iCal CalUIDebugDefaultDaysInWeekView 14 # MacAppStore: Show debug menu defaults write com.apple.appstore ShowDebugMenu -bool true ############################################### # Restart Finder for the changes to take effect ############################################### kill Finder
#!/usr/bin/env bash # AirDrop: Use it over every network interface defaults write com.apple.NetworkBrowser BrowseAllInterfaces 1 # Finder: Show ~/Library chflags nohidden ~/Library # Finder: Show icons for external harddrives and removable media on desktop defaults write com.apple.finder ShowExternalHardDrivesOnDesktop -bool true defaults write com.apple.finder ShowRemovableMediaOnDesktop -bool true # Finder: Show hidden files # defaults write com.apple.Finder AppleShowAllFiles YES # Safari: Setup for development defaults write com.apple.Safari IncludeInternalDebugMenu -bool true defaults write com.apple.Safari IncludeDevelopMenu -bool true defaults write com.apple.Safari WebKitDeveloperExtrasEnabledPreferenceKey -bool true defaults write com.apple.Safari "com.apple.Safari.ContentPageGroupIdentifier.WebKit2DeveloperExtrasEnabled" -bool true defaults write NSGlobalDomain WebKitDeveloperExtras -bool true # Menu bar: Disable transparency defaults write NSGlobalDomain AppleEnableMenuBarTransparency -bool false # Gatekeeper: No Limitations spctl --master-disable # Calendar: 14 days in week view defaults write com.apple.iCal CalUIDebugDefaultDaysInWeekView 14 # MacAppStore: Show debug menu defaults write com.apple.appstore ShowDebugMenu -bool true ############################################### # Restart Finder for the changes to take effect ############################################### kill Finder
Rename windows binary to have .exe
#!/usr/bin/env bash set -e my_dir="$( cd $(dirname $0) && pwd )" release_dir="$( cd ${my_dir} && cd ../.. && pwd )" workspace_dir="$( cd ${release_dir} && cd ../../../.. && pwd )" source ${release_dir}/ci/tasks/utils.sh export GOPATH=${workspace_dir} export PATH=${GOPATH}/bin:${PATH} # inputs semver_dir="${workspace_dir}/version-semver" # outputs output_dir=${workspace_dir}/out semver="$(cat ${semver_dir}/number)" timestamp=$(date -u +"%Y-%m-%dT%H:%M:%SZ") pushd ${release_dir} > /dev/null git_rev=`git rev-parse --short HEAD` version="${semver}-${git_rev}-${timestamp}" echo -e "\n building artifact..." go build -ldflags "-X main.version=${version}" \ -o "out/s3cli-${semver}-${GOOS}-amd64" \ github.com/pivotal-golang/s3cli echo -e "\n sha1 of artifact..." sha1sum out/s3cli-${semver}-${GOOS}-amd64 mv out/s3cli-${semver}-${GOOS}-amd64 ${output_dir}/ popd > /dev/null
#!/usr/bin/env bash set -e my_dir="$( cd $(dirname $0) && pwd )" release_dir="$( cd ${my_dir} && cd ../.. && pwd )" workspace_dir="$( cd ${release_dir} && cd ../../../.. && pwd )" source ${release_dir}/ci/tasks/utils.sh export GOPATH=${workspace_dir} export PATH=${GOPATH}/bin:${PATH} # inputs semver_dir="${workspace_dir}/version-semver" # outputs output_dir=${workspace_dir}/out semver="$(cat ${semver_dir}/number)" timestamp=$(date -u +"%Y-%m-%dT%H:%M:%SZ") binname="s3cli-${semver}-${GOOS}-amd64" if [ $GOOS = "windows" ]; then binname="$binname.exe"; fi pushd ${release_dir} > /dev/null git_rev=`git rev-parse --short HEAD` version="${semver}-${git_rev}-${timestamp}" echo -e "\n building artifact..." go build -ldflags "-X main.version=${version}" \ -o "out/$binname" \ github.com/pivotal-golang/s3cli echo -e "\n sha1 of artifact..." sha1sum "out/$binname" mv "out/$binname" ${output_dir}/ popd > /dev/null
Add -q and -y flags to apt-get.
#!/bin/sh set -ex wget http://kent.dl.sourceforge.net/project/boost/boost/1.56.0/boost_1_56_0.tar.bz2 tar -xjf boost_1_56_0.tar.bz2 cd boost_1_56_0 ./bootstrap.sh ./b2 --with-test BOOST_ROOT=$PWD && export BOOST_ROOT sudo apt-get update sudo apt-get install liblua5.2-dev
#!/bin/sh set -ex wget http://kent.dl.sourceforge.net/project/boost/boost/1.56.0/boost_1_56_0.tar.bz2 tar -xjf boost_1_56_0.tar.bz2 cd boost_1_56_0 ./bootstrap.sh ./b2 --with-test BOOST_ROOT=$PWD && export BOOST_ROOT sudo apt-get update sudo apt-get install -q -y liblua5.2-dev
Fix dangling ref to enumerators branch
#!/bin/bash set -e # Add the openc_bot to the Gemfile: if [ ! -f Gemfile ]; then echo "source 'https://rubygems.org'" >> Gemfile echo "gem 'openc_bot', :git => 'https://github.com/openc/openc_bot.git', :branch => 'enumerators-and-iterators'" >> Gemfile echo "gem 'mechanize'" >> Gemfile fi echo "/db/*" >> .gitignore echo "/data/*" >> .gitignore echo "/tmp/*" >> .gitignore echo "/pids/*" >> .gitignore echo "!.gitkeep" >> .gitignore mkdir -p db mkdir -p data mkdir -p tmp mkdir -p pids touch db/.gitkeep touch data/.gitkeep touch tmp/.gitkeep touch pids/.gitkeep bundle install # create the bot bundle exec openc_bot rake bot:create_simple_bot bundle install
#!/bin/bash set -e # Add the openc_bot to the Gemfile: if [ ! -f Gemfile ]; then echo "source 'https://rubygems.org'" >> Gemfile echo "gem 'openc_bot', :git => 'https://github.com/openc/openc_bot.git'" >> Gemfile echo "gem 'mechanize'" >> Gemfile fi echo "/db/*" >> .gitignore echo "/data/*" >> .gitignore echo "/tmp/*" >> .gitignore echo "/pids/*" >> .gitignore echo "!.gitkeep" >> .gitignore mkdir -p db mkdir -p data mkdir -p tmp mkdir -p pids touch db/.gitkeep touch data/.gitkeep touch tmp/.gitkeep touch pids/.gitkeep bundle install # create the bot bundle exec openc_bot rake bot:create_simple_bot bundle install
Revise user_is_admin to return 0 or 1
#------------------------------------------------------------------------------- # # install/alias.zsh # Admin installation management aliases # #------------------------------------------------------------------------------- # # Determines whether the current $USER is in the admin group. # function user_is_admin { return $(dsmemberutil checkmembership -U "${USER}" -G "admin") == "user is a member of the group" } if [[ user_is_admin ]]; then echo "user is an admin" else echo "user is not an admin" fi # Only define these aliases for admin users if [[ $(dsmemberutil checkmembership -U "${USER}" -G "admin") == "user is a member of the group" ]]; then # update # Invokes the admin package update script alias update='~/.dotfiles/install/update.sh' # cleanup # Cleans up old gems, checks Homebrew function cleanup { gem cleanup # Print any warnings about the current homebrew setup, they will need to be # resolved manually. brew doctor # Check homebrew cask brew cask doctor } fi
#------------------------------------------------------------------------------- # # install/alias.zsh # Admin installation management aliases # #------------------------------------------------------------------------------- # # Determines whether the current $USER is in the admin group. # function user_is_admin { if [[ $(dsmemberutil checkmembership -U "${USER}" -G "admin") == "user is a member of the group" ]]; then return 0 # true else return 1 # false fi } if [[ user_is_admin ]]; then echo "user is an admin" else echo "user is not an admin" fi # Only define these aliases for admin users if [[ $(dsmemberutil checkmembership -U "${USER}" -G "admin") == "user is a member of the group" ]]; then # update # Invokes the admin package update script alias update='~/.dotfiles/install/update.sh' # cleanup # Cleans up old gems, checks Homebrew function cleanup { gem cleanup # Print any warnings about the current homebrew setup, they will need to be # resolved manually. brew doctor # Check homebrew cask brew cask doctor } fi
Merge remote-tracking branch 'upstream/master' into base-on-starter
#!/bin/bash travis login travis sync rm -f deploy_key.* ssh-keygen -t rsa -b 4096 -f deploy_key travis encrypt-file deploy_key | \ grep -o -P "encrypted_\K[0-9a-f]+" | \ head -n 1 | > travis-encryption-label
#!/bin/bash travis login travis sync rm -f deploy_key.* ssh-keygen -t rsa -b 4096 -f deploy_key travis encrypt-file deploy_key | \ grep -o -P "encrypted_\K[0-9a-f]+" | \ head -n 1 \ > travis-encryption-label
Add alias for npm global install
eval "$(npm completion 2>/dev/null)" # npm package names are lowercase # - https://twitter.com/substack/status/23122603153150361 # Thus, we've used camelCase for the following aliases: # Install and save to dependencies in your package.json # npms is used by https://www.npmjs.com/package/npms alias npmS="npm i -S " # Install and save to dev-dependencies in your package.json # npmd is used by https://github.com/dominictarr/npmd alias npmD="npm i -D " # Execute command from node_modules folder based on current directory # i.e npmE gulp alias npmE='PATH="$(npm bin)":"$PATH"'
eval "$(npm completion 2>/dev/null)" # npm package names are lowercase # - https://twitter.com/substack/status/23122603153150361 # Thus, we've used camelCase for the following aliases: # Install dependencies globally alias npmg="npm i -g " # Install and save to dependencies in your package.json # npms is used by https://www.npmjs.com/package/npms alias npmS="npm i -S " # Install and save to dev-dependencies in your package.json # npmd is used by https://github.com/dominictarr/npmd alias npmD="npm i -D " # Execute command from node_modules folder based on current directory # i.e npmE gulp alias npmE='PATH="$(npm bin)":"$PATH"'
Add a todo for later.
#!/bin/bash #---------------------------Base System Install - should be run on every system------------------------------ apt-get update apt-get install -y curl apt-get install -y git apt-get install -y vim apt-get install -y zsh sh -c "$(curl -fsSL https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh)" mkdir /bs cd /bs git clone https://github.com/thegeekkid/zshconfig.git cd zshconfig git checkout teamproject cp terminalparty.zsh-theme ~/.oh-my-zsh/themes/terminalparty.zsh-theme cp zshrc ~/.zshrc apt-get install -y build-essential apt-get install -y php5 #---------------------------------------End Base System Install---------------------------------------------- #------------------------------------------Install Jenkins--------------------------------------------------- wget -q -O - https://jenkins-ci.org/debian/jenkins-ci.org.key | sudo apt-key add - sudo sh -c 'echo deb http://pkg.jenkins-ci.org/debian binary/ > /etc/apt/sources.list.d/jenkins.list' sudo apt-get update sudo apt-get install jenkins #----------------------------------------Finish Install Jenkins----------------------------------------------
#!/bin/bash #---------------------------Base System Install - should be run on every system------------------------------ apt-get update apt-get install -y curl apt-get install -y git apt-get install -y vim apt-get install -y zsh sh -c "$(curl -fsSL https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh)" mkdir /bs cd /bs git clone https://github.com/thegeekkid/zshconfig.git cd zshconfig git checkout teamproject cp terminalparty.zsh-theme ~/.oh-my-zsh/themes/terminalparty.zsh-theme cp zshrc ~/.zshrc apt-get install -y build-essential apt-get install -y php5 #---------------------------------------End Base System Install---------------------------------------------- #------------------------------------------Install Jenkins--------------------------------------------------- wget -q -O - https://jenkins-ci.org/debian/jenkins-ci.org.key | sudo apt-key add - sudo sh -c 'echo deb http://pkg.jenkins-ci.org/debian binary/ > /etc/apt/sources.list.d/jenkins.list' sudo apt-get update sudo apt-get install jenkins #----------------------------------------Finish Install Jenkins---------------------------------------------- #TODO: Configure Jenkins
Use {} instead of () for shell variable.
#!/bin/sh # Upload whatever cert the named server presents to the pilot log set -e export PYTHONPATH=$(PYTHONPATH):../python SERVER=$1 TMP=`mktemp /tmp/cert.XXXXXX` openssl s_client -connect $SERVER:443 -showcerts < /dev/null | tee $TMP if ./ct --ct_server=ct.googleapis.com/pilot --http_log --logtostderr --ct_server_submission=$TMP upload then echo Done else echo Try fixing the chain TMP2=`mktemp /tmp/cert.XXXXXX` ./fix-chain.py $TMP | tee $TMP2 ./ct --ct_server=ct.googleapis.com/pilot --http_log --logtostderr --ct_server_submission=$TMP2 upload rm $TMP2 fi rm $TMP
#!/bin/sh # Upload whatever cert the named server presents to the pilot log set -e export PYTHONPATH=${PYTHONPATH}:../python SERVER=$1 TMP=`mktemp /tmp/cert.XXXXXX` openssl s_client -connect $SERVER:443 -showcerts < /dev/null | tee $TMP if ./ct --ct_server=ct.googleapis.com/pilot --http_log --logtostderr --ct_server_submission=$TMP upload then echo Done else echo Try fixing the chain TMP2=`mktemp /tmp/cert.XXXXXX` ./fix-chain.py $TMP | tee $TMP2 ./ct --ct_server=ct.googleapis.com/pilot --http_log --logtostderr --ct_server_submission=$TMP2 upload rm $TMP2 fi rm $TMP
Add sed command to replace newlines
# Return portion after match sed -n -e 's/FROM //p' Dockerfile # Replace something in file. sed can take different delimiters. Here it's #. g for global, everywhere where the expression is found sed -i 's#FROM #FROM armhf/#g' Dockerfile # -i to do apply changes to the same file # Remove lines FROM TO. http://www.yourownlinux.com/2015/04/sed-command-in-linux-delete-lines-from-file.html sed -i '55,56 d' Dockerfile # Remove last line in file sed -i '$ d' Dockerfile # Delete lines containing a pattern. Can use !d to delete lines NOT containing pattern sed '/PATTERN/ d' FILE # Return the line with the match grep "^s[0-9].*" FILE # grep with OR \| grep "^s[0-9].*\|wave.*" actionLabel.txt
# Return portion after match sed -n -e 's/FROM //p' Dockerfile # Replace something in file. sed can take different delimiters. Here it's #. g for global, everywhere where the expression is found sed -i 's#FROM #FROM armhf/#g' Dockerfile # -i to do apply changes to the same file # Remove lines FROM TO. http://www.yourownlinux.com/2015/04/sed-command-in-linux-delete-lines-from-file.html sed -i '55,56 d' Dockerfile # Remove last line in file sed -i '$ d' Dockerfile # Delete lines containing a pattern. Can use !d to delete lines NOT containing pattern sed '/PATTERN/ d' FILE # Return the line with the match grep "^s[0-9].*" FILE # grep with OR \| grep "^s[0-9].*\|wave.*" actionLabel.txt # Remove newlines in file tr -d "\n\r" < input_filename # Replace newlines with \\n sed ':a;N;$!ba;s/\n/\\n/g' file # Replace newlines with \\n sed -z 's/\n/\\n/g' file
Add verbose execution on end to end tests
set -e # Create temp repo pushd $(mktemp -d) temp_repo=$(pwd) git init echo "A" > a.txt git add * git commit -am "First commit" popd # tests workspace pushd $(mktemp -d) gitorg init gitorg add local:$temp_repo gitorg add web:https://github.com/mariocj89/dothub.git gitorg add github:dothub-sandbox gitorg add github:mariocj89/github-* popd
set -e set -x # Create temp repo pushd $(mktemp -d) temp_repo=$(pwd) git init echo "A" > a.txt git add * git commit -am "First commit" popd # tests workspace pushd $(mktemp -d) gitorg init gitorg add local:$temp_repo gitorg add web:https://github.com/mariocj89/dothub.git gitorg add github:dothub-sandbox gitorg add github:mariocj89/github-* popd
Use pypy for more speed.
#!/bin/sh red=$(tput setaf 1) normal=$(tput sgr0) for i in *.py; do printf "$i " python3 -m doctest $i > $i.log if [[ $? -ne 0 ]]; then printf "${red}✗${normal}\n" else printf "✓\n" fi done
#!/bin/sh red=$(tput setaf 1) normal=$(tput sgr0) for i in *.py; do printf "$i " pypy3 -m doctest $i > $i.log if [[ $? -ne 0 ]]; then printf "${red}✗${normal}\n" else printf "✓\n" fi done
Make the commit message even longer.
#!/bin/bash if [ "$TRAVIS_JDK_VERSION" = "oraclejdk8" ]; then git clone https://${GH_TOKEN}@github.com/LapisBlue/Javadocs .jd ./gradlew javadoc -x :SpongeAPI:javadoc && { cd .jd/ git config user.name "Lapislazuli" git config user.email "lapislazuli@lapis.blue" git rm -r commons cp -r ../build/docs/javadoc commons git add -A git commit -m "Update to $TRAVIS_COMMIT (Build $TRAVIS_BUILD_NUMBER)" git push origin gh-pages } fi
#!/bin/bash if [ "$TRAVIS_JDK_VERSION" = "oraclejdk8" ]; then git clone https://${GH_TOKEN}@github.com/LapisBlue/Javadocs .jd ./gradlew javadoc -x :SpongeAPI:javadoc && { cd .jd/ git config user.name "Lapislazuli" git config user.email "lapislazuli@lapis.blue" git rm -r commons cp -r ../build/docs/javadoc commons git add -A git commit -m "Update to $TRAVIS_REPO_SLUG@$TRAVIS_COMMIT (Build $TRAVIS_BUILD_NUMBER)" git push origin gh-pages } fi
Test against specific revision of graphql-tools source to avoid failing tests
#!/usr/bin/env bash set -ex cd $(dirname $0)/data BAB_TAG=v$(node -p 'require("babylon/package.json").version') if [ ! -d babylon ] then git clone --branch "$BAB_TAG" --depth 1 \ https://github.com/babel/babel.git mv babel/packages/babylon . rm -rf babel fi if [ ! -d graphql-tools-src ] then git clone --depth 1 https://github.com/apollographql/graphql-tools.git mv graphql-tools/src \ graphql-tools-src rm -rf graphql-tools fi cd .. # back to the recast/test/ directory exec mocha --reporter spec --full-trace $@ run.js
#!/usr/bin/env bash set -ex cd $(dirname $0)/data BAB_TAG=v$(node -p 'require("babylon/package.json").version') if [ ! -d babylon ] then git clone --branch "$BAB_TAG" --depth 1 \ https://github.com/babel/babel.git mv babel/packages/babylon . rm -rf babel fi if [ ! -d graphql-tools-src ] then git clone --depth 1 https://github.com/apollographql/graphql-tools.git pushd graphql-tools git fetch origin refs/pull/807/merge:pull/807/merge git checkout pull/807/merge popd mv graphql-tools/src \ graphql-tools-src rm -rf graphql-tools fi cd .. # back to the recast/test/ directory exec mocha --reporter spec --full-trace $@ run.js
Load Python modules in Docker container initialization
export MINERVA_DB_NAME=minerva export PGDATABASE=$MINERVA_DB_NAME export PGUSER=postgres export ADD_PGTAB_EXTENSION=true export PYTHONUNBUFFERED=1 create-minerva-database if [[ ! -z "$LOAD_SAMPLE_DATA" ]] then minerva initialize -i /instance --load-sample-data else minerva initialize -i /instance fi
export MINERVA_DB_NAME=minerva export PGDATABASE=$MINERVA_DB_NAME export PGUSER=postgres export ADD_PGTAB_EXTENSION=true export PYTHONUNBUFFERED=1 create-minerva-database # Make sure all requirements for sample data generation etc. are loaded. pip3 install -r /instance/requirements.txt if [[ ! -z "$LOAD_SAMPLE_DATA" ]] then minerva initialize -i /instance --load-sample-data else minerva initialize -i /instance fi
Fix env var used for appsdk bump
#!/bin/bash sed -i "s/appsdk-src:tgz:.*\",/appsdk-src:tgz:${APPSDK_SRC_VERSION}\",/" js_dependencies.json git add js_dependencies.json git commit -F appsdk.bump --author="${JOB_NAME} <bogus@rallydev.com>" git push origin HEAD:$BRANCH rm appsdk.bump
#!/bin/bash sed -i "s/appsdk-src:tgz:.*\",/appsdk-src:tgz:${APPSDK_SRC_VERSION}\",/" js_dependencies.json git add js_dependencies.json git commit -F appsdk.bump --author="${JOB_NAME} <bogus@rallydev.com>" git push origin HEAD:$GIT_BRANCH rm appsdk.bump
Change the default password for mysql
sudo apt-get remove --purge "^mysql.*" sudo apt-get autoremove sudo apt-get autoclean sudo rm -rf /var/lib/mysql sudo rm -rf /var/log/mysql echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections wget http://dev.mysql.com/get/mysql-apt-config_0.7.3-1_all.deb sudo dpkg --install mysql-apt-config_0.7.3-1_all.deb sudo apt-get update -q sudo apt-get install -q -y -o Dpkg::Options::=--force-confnew mysql-server sudo mysql_upgrade
sudo apt-get remove --purge "^mysql.*" sudo apt-get autoremove sudo apt-get autoclean sudo rm -rf /var/lib/mysql sudo rm -rf /var/log/mysql echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections wget http://dev.mysql.com/get/mysql-apt-config_0.7.3-1_all.deb sudo dpkg --install mysql-apt-config_0.7.3-1_all.deb sudo apt-get update -q sudo apt-get install -q -y -o Dpkg::Options::=--force-confnew mysql-server sudo mysql_upgrade sudo service mysql restart TEMP_PASSWORD=`sudo cat /var/log/mysql/error.log | grep "A temporary password is generated for" | awk '{print $NF}'` sudo mysql -u root -p $TEMP_PASSWORD -e "SET PASSWORD = PASSWORD('');"
Fix before install to know about python 3.8
#! /bin/bash echo $TRAVIS_OS_NAME if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then # Travis has an old version of pyenv by default, upgrade it brew update > /dev/null 2>&1 brew outdated pyenv || brew upgrade pyenv pyenv --version # Find the latest requested version of python case "$TOXENV" in py34) python_minor=4;; py35) python_minor=5;; py36) python_minor=6;; py36-marshmallow2) python_minor=6;; py36-marshmallow3) python_minor=6;; py37) python_minor=7;; esac latest_version=`pyenv install --list | grep -e "^[ ]*3\.$python_minor" | tail -1` pyenv install $latest_version pyenv local $latest_version fi
#! /bin/bash echo $TRAVIS_OS_NAME if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then # Travis has an old version of pyenv by default, upgrade it brew update > /dev/null 2>&1 brew outdated pyenv || brew upgrade pyenv pyenv --version # Find the latest requested version of python case "$TOXENV" in py35) python_minor=5;; py36) python_minor=6;; py36-marshmallow2) python_minor=6;; py36-marshmallow3) python_minor=6;; py37) python_minor=7;; py38) python_minor=8;; esac latest_version=`pyenv install --list | grep -e "^[ ]*3\.$python_minor" | tail -1` pyenv install $latest_version pyenv local $latest_version fi
Check if module is loaded before unloading
if test -f .vbox_version ; then # Without libdbus virtualbox would not start automatically after compile apt-get -y install --no-install-recommends libdbus-1-3 # The netboot installs the VirtualBox support (old) so we have to remove it if test -f /etc/init.d/virtualbox-ose-guest-utils ; then /etc/init.d/virtualbox-ose-guest-utils stop fi rmmod vboxguest aptitude -y purge virtualbox-ose-guest-x11 virtualbox-ose-guest-dkms virtualbox-ose-guest-utils aptitude -y install dkms # Install the VirtualBox guest additions VBOX_ISO=VBoxGuestAdditions.iso mount -o loop $VBOX_ISO /mnt yes|sh /mnt/VBoxLinuxAdditions.run umount /mnt # Cleanup Virtualbox rm $VBOX_ISO fi
if test -f .vbox_version ; then # Without libdbus virtualbox would not start automatically after compile apt-get -y install --no-install-recommends libdbus-1-3 # The netboot installs the VirtualBox support (old) so we have to remove it if test -f /etc/init.d/virtualbox-ose-guest-utils ; then /etc/init.d/virtualbox-ose-guest-utils stop fi if lsmod | grep -q vboxguest; then rmmod vboxguest fi aptitude -y purge virtualbox-ose-guest-x11 virtualbox-ose-guest-dkms virtualbox-ose-guest-utils aptitude -y install dkms # Install the VirtualBox guest additions VBOX_ISO=VBoxGuestAdditions.iso mount -o loop $VBOX_ISO /mnt yes|sh /mnt/VBoxLinuxAdditions.run umount /mnt # Cleanup Virtualbox rm $VBOX_ISO fi
Include darwin and 386 in goreleaser
cat <<EOF build: main: ./cmd/ ldflags: "$1" goos: - linux goarch: - amd64 archive: name_template: "{{.Binary}}-{{.Version}}-{{.Os}}-{{.Arch}}" replacements: amd64: amd64 darwin: darwin linux: linux EOF
# This shellscript generates goreleaser.yaml with ldflags set to the first argument. cat <<EOF build: main: ./cmd/ ldflags: "$1" goos: - linux - darwin goarch: - amd64 - 386 archive: name_template: "{{.Binary}}-{{.Version}}-{{.Os}}-{{.Arch}}" replacements: amd64: amd64 darwin: darwin linux: linux EOF
Remove obsolete test for redirect files in output
#!/usr/bin/env sh # CI publish script export DITA_HOME=$PWD/dita-ot-$DITA_OT_VERSION export DITA_OT_DEV=$(find $PWD -name 'dita-ot-*+*' -type d | head -1) export SITE_DIR=$PWD/dita-ot.github.io #if [ "$TRAVIS_PULL_REQUEST" = "false" -a "$TRAVIS_BRANCH" = "develop" ]; then # Remove target output directory before generating new output for i in `find "$SITE_DIR/dev" -type f`; do if grep -q 'redirect_to:' "$i"; then echo "Skip redirect file $i" elif grep -q 'Generated from DITA source' "$i"; then echo "Skip generated file $i" elif grep -q 'generated: true' "$i"; then echo "Skip generated file $i" else rm "$i" fi done # Re-create /dev folder & generate site output there ./gradlew site -PditaHome=$DITA_HOME -PoutputDir=$SITE_DIR/dev -PditaHomeSrc=$DITA_OT_DEV --info --stacktrace --no-daemon #else # ./gradlew -PditaHome=$DITA_HOME html --info --stacktrace --no-daemon #fi
#!/usr/bin/env sh # CI publish script export DITA_HOME=$PWD/dita-ot-$DITA_OT_VERSION export DITA_OT_DEV=$(find $PWD -name 'dita-ot-*+*' -type d | head -1) export SITE_DIR=$PWD/dita-ot.github.io #if [ "$TRAVIS_PULL_REQUEST" = "false" -a "$TRAVIS_BRANCH" = "develop" ]; then # Remove target output directory before generating new output for i in `find "$SITE_DIR/dev" -type f`; do if grep -q 'Generated from DITA source' "$i"; then echo "Skip generated file $i" elif grep -q 'generated: true' "$i"; then echo "Skip generated file $i" else rm "$i" fi done # Re-create /dev folder & generate site output there ./gradlew site -PditaHome=$DITA_HOME -PoutputDir=$SITE_DIR/dev -PditaHomeSrc=$DITA_OT_DEV --info --stacktrace --no-daemon #else # ./gradlew -PditaHome=$DITA_HOME html --info --stacktrace --no-daemon #fi
Correct the error in SQL statement
#!/bin/bash -e # Run on -4 # Need to copy Mysql connector jar to /var/lib/sqoop. # See documentation http://www.cloudera.com/content/cloudera/en/documentation/core/v5-2-x/topics/cdh_ig_jdbc_driver_install.html sudo -u hdfs hadoop fs -mkdir -p /data/movielens sudo -u hdfs hadoop fs -chown -R $USER: /data/movielens sudo -u hdfs hadoop fs -mkdir -p /user/$USER sudo -u hdfs hadoop fs -chown -R $USER: /user/$USER # Need to have MySQL instance permissions set properly so all nodes of cluster (not just the node running sqoop command) can access the database # Cleanup if necessary sudo -u hdfs hadoop fs -rmr /data/movielens/movie #Sqoop command is sqoop import --connect jdbc:mysql://mgrover-haa-2.vpc.cloudera.com:3306/oltp --username root --query 'SELECT movie.*, group_concat(genre.name) FROM movie JOIN movie_genre ON (movie.id = movie_genre.movie_id) JOIN genre ON (movie_genre.genre_id = genre.id) GROUP BY movie.id WHERE $CONDITIONS' \ --split-by movie.id --as-avrodatafile --target-dir /data/movielens/movie
#!/bin/bash -e # Run on -4 # Need to copy Mysql connector jar to /var/lib/sqoop. # See documentation http://www.cloudera.com/content/cloudera/en/documentation/core/v5-2-x/topics/cdh_ig_jdbc_driver_install.html sudo -u hdfs hadoop fs -mkdir -p /data/movielens sudo -u hdfs hadoop fs -chown -R $USER: /data/movielens sudo -u hdfs hadoop fs -mkdir -p /user/$USER sudo -u hdfs hadoop fs -chown -R $USER: /user/$USER # Need to have MySQL instance permissions set properly so all nodes of cluster (not just the node running sqoop command) can access the database # Cleanup if necessary sudo -u hdfs hadoop fs -rmr /data/movielens/movie #Sqoop command is sqoop import --connect jdbc:mysql://mgrover-haa-2.vpc.cloudera.com:3306/oltp --username root --query 'SELECT movie.*, group_concat(genre.name) FROM movie JOIN movie_genre ON (movie.id = movie_genre.movie_id) JOIN genre ON (movie_genre.genre_id = genre.id) WHERE $CONDITIONS GROUP BY movie.id' \ --split-by movie.id --as-avrodatafile --target-dir /data/movielens/movie
Use a tag to push images
#!/bin/bash set -e docker_projects=( "parser" "orchestration" "server" "web") docker login -e "$DOCKER_EMAIL" -p "$DOCKER_PASSWORD" -u "$DOCKER_USERNAME" for project in "${docker_projects[@]}" do docker push "bazooka/$project" done
#!/bin/bash set -e docker_projects=( "parser" "orchestration" "server" "web") if [ -n "$DO_DOCKER_PUSH" ]; then docker login -e "$DOCKER_EMAIL" -p "$DOCKER_PASSWORD" -u "$DOCKER_USERNAME" for project in "${docker_projects[@]}" do image="bazooka/$project" if [ -n "$BZK_VERSION" ]; then docker tag "bazooka/$project" "bazooka/$project:$BZK_VERSION" image="bazooka/$project:$BZK_VERSION" fi docker push "$image" done else echo "Variable DO_DOCKER_PUSH not defined, skipping docker push" fi
Package script now supports JS files.
#!/bin/bash echo "Assembling JAR and preparing WAR resources." sbt assembly package-war echo "Adding webapp resources to JAR." cd target zip -r anchortab.jar webapp -x webapp/WEB-INF/classes\* webapp/WEB-INF/lib\* cd .. echo "Now anchortab.jar is ready for deployment."
#!/bin/bash echo "Assembling JAR and preparing WAR resources." sbt resources:compile-sass resources:copy-scripts assembly package-war echo "Adding webapp resources to JAR." cd target cp -r javascripts/* webapp/javascripts/ zip -r anchortab.jar webapp -x webapp/WEB-INF/classes\* webapp/WEB-INF/lib\* cd .. echo "Now anchortab.jar is ready for deployment."
Fix "latest" image not pushed to quay.io
#!/bin/bash -e docker login -u "$QUAY_USER" -p "$QUAY_PASSWORD" quay.io MAKE="make -C scripts/docker-images/release" $MAKE clean $MAKE docker-build DOCKER_REGISTRY=quay.io/ $MAKE docker-push DOCKER_REGISTRY=quay.io/ if [ -n "$TRAVIS_TAG" ]; then $MAKE docker-push-version DOCKER_REGISTRY=quay.io/ else $MAKE docker-push-version DOCKER_REGISTRY=quay.io/ PUSH_DOCKER_TAG=${TRAVIS_BRANCH/master/canary} fi
#!/bin/bash -e docker login -u "$QUAY_USER" -p "$QUAY_PASSWORD" quay.io MAKE="make -C scripts/docker-images/release" $MAKE clean $MAKE docker-build DOCKER_REGISTRY=quay.io/ $MAKE docker-push DOCKER_REGISTRY=quay.io/ if [ -n "$TRAVIS_TAG" ]; then $MAKE docker-push-version DOCKER_REGISTRY=quay.io/ PUSH_DOCKER_TAG="$TRAVIS_TAG" else $MAKE docker-push-version DOCKER_REGISTRY=quay.io/ PUSH_DOCKER_TAG="${TRAVIS_BRANCH/master/canary}" fi
Remove old git config when overwriting
#!/bin/bash # # This will setup a gitconfig file if one does not exist set -e if [[ -s "${HOME}/.gitconfig" ]]; then read -n1 -p "${HOME}/.gitconfig already exists would you like to overwrite it? " answer echo if [[ "$answer" != "y" && "$answer" != "Y" ]]; then echo "Skipping git-config.sh" exit 0 fi fi global_config() { git config --global "$1" "$2" } read -p "Full name: " name read -p "Email address: " email global_config "user.name" "$name" global_config "user.email" "$email" global_config "color.ui" "true" global_config "core.excludesfile" "${HOME}/.cvsignore" exit 0
#!/bin/bash # # This will setup a gitconfig file if one does not exist set -e if [[ -s "${HOME}/.gitconfig" ]]; then read -n1 -p "${HOME}/.gitconfig already exists would you like to overwrite it? " answer echo if [[ "$answer" != "y" && "$answer" != "Y" ]]; then echo "Skipping git-config.sh" exit 0 fi rm -f "${HOME}/.gitconfig" fi global_config() { git config --global "$1" "$2" } read -p "Full name: " name read -p "Email address: " email global_config "user.name" "$name" global_config "user.email" "$email" global_config "color.ui" "true" global_config "core.excludesfile" "${HOME}/.cvsignore" exit 0
Update to reflect testing appending an existing file
include array.util.ArrayUtil include file.util.FileUtil include file.writer.FileWriter include string.util.StringUtil include test.executor.TestExecutor include test.util.TestUtil FileWriterTest(){ run(){ TestExecutor executeTest FileWriterTest } setUp(){ local _testFile=$(FileUtil makeFile ${testFile}) } tearDown(){ local testDir=($(StringUtil split testFile /)) local testDir=($(ArrayUtil trim testDir 1)) local testDir=$(StringUtil replace testDir space /) rm -rf /${testDir} } testAppend(){ FileWriter append ${testFile} "this is a string" local content="this is a string" local fileContent=$(FileUtil getContent ${testFile}) ${assertEquals} content fileContent } testReplace(){ FileWriter append ${testFile} "this is a string" FileWriter replace ${testFile} this that local content="that is a string" local fileContent=$(FileUtil getContent ${testFile}) ${assertEquals} content fileContent } local assertEquals="TestUtil assertEquals" local testFile=$(pwd)/bash-toolbox/test/dependencies/test.txt setUp $@ tearDown }
include array.util.ArrayUtil include file.util.FileUtil include file.writer.FileWriter include string.util.StringUtil include test.executor.TestExecutor include test.util.TestUtil FileWriterTest(){ run(){ TestExecutor executeTest FileWriterTest } setUp(){ local _testFile=$(FileUtil makeFile ${testFile}) } tearDown(){ local testDir=($(StringUtil split testFile /)) local testDir=($(ArrayUtil trim testDir 1)) local testDir=$(StringUtil replace testDir space /) rm -rf /${testDir} } testAppendExistingFile(){ FileWriter append ${testFile} "this is a string" local content="this is a string" local fileContent=$(FileUtil getContent ${testFile}) ${assertEquals} content fileContent } testReplace(){ FileWriter append ${testFile} "this is a string" FileWriter replace ${testFile} this that local content="that is a string" local fileContent=$(FileUtil getContent ${testFile}) ${assertEquals} content fileContent } local assertEquals="TestUtil assertEquals" local testFile=$(pwd)/bash-toolbox/test/dependencies/test.txt setUp $@ tearDown }
Use new detox package on PyPI (now released!)
#!/bin/sh sudo apt-get update sudo apt-get install python2.6 python2.6-dev python3.4 python3.4-dev pip install --use-mirrors --upgrade wheel pip install --use-mirrors --upgrade --use-wheel detox misspellings docutils "tox < 2.0" pip install https://bitbucket.org/hpk42/detox/get/tip.zip find src/ -name "*.py" | misspellings -f - detox
#!/bin/sh sudo apt-get update sudo apt-get install python2.6 python2.6-dev python3.4 python3.4-dev pip install --use-mirrors --upgrade wheel pip install --use-mirrors --upgrade --use-wheel detox misspellings docutils find src/ -name "*.py" | misspellings -f - detox
Fix dash, this is not a makefile
#!/usr/bin/env bash -rm -r dist python setup.py sdist twine upload dist/* rm -r dist rm -r src/rnaseq_lib.egg-info
#!/usr/bin/env bash rm -r dist python setup.py sdist twine upload dist/* rm -r dist rm -r src/rnaseq_lib.egg-info
Make the maven lib installation shell runnable in linux
#!/bin/sh if [[ ! -f ~/.m2/repository/sigar/sigar-native/1.0/sigar-native-1.0.jar ]] ; then mvn install:install-file -Dfile=lib/sigar-native-1.0.jar -DgroupId=sigar -DartifactId=sigar-native -Dversion=1.0 -Dpackaging=jar break fi if [[ ! -f ~/.m2/repository/grinder/grinder-patch/3.9.1/grinder-patch-3.9.1.jar ]] ; then mvn install:install-file -Dfile=lib/grinder-3.9.1-patch.jar -DgroupId=grinder -DartifactId=grinder-patch -Dversion=3.9.1-patch -Dpackaging=jar break fi if [[ ! -f ~/.m2/repository/org/ngrinder/universal-analytics-java/1.0/universal-analytics-java-1.0.jar ]] ; then mvn install:install-file -Dfile=lib/universal-analytics-java-1.0.jar -DgroupId=org.ngrinder -DartifactId=universal-analytics-java -Dversion=1.0 -Dpackaging=jar break; fi
#!/bin/sh if [[ ! -f ~/.m2/repository/sigar/sigar-native/1.0/sigar-native-1.0.jar ]] ; then mvn install:install-file -Dfile=lib/sigar-native-1.0.jar -DgroupId=sigar -DartifactId=sigar-native -Dversion=1.0 -Dpackaging=jar break fi if [[ ! -f ~/.m2/repository/grinder/grinder-patch/3.9.1/grinder-patch-3.9.1.jar ]] ; then mvn install:install-file -Dfile=lib/grinder-3.9.1-patch.jar -DgroupId=grinder -DartifactId=grinder-patch -Dversion=3.9.1-patch -Dpackaging=jar break fi if [[ ! -f ~/.m2/repository/org/ngrinder/universal-analytics-java/1.0/universal-analytics-java-1.0.jar ]] ; then mvn install:install-file -Dfile=lib/universal-analytics-java-1.0.jar -DgroupId=org.ngrinder -DartifactId=universal-analytics-java -Dversion=1.0 -Dpackaging=jar break; fi
Change to use mfit instead of m4a-fit-analysis
#!/bin/bash # Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. mkdir m4a cd m4a VERSION=`wget -O - https://anthos-migrate-release.storage.googleapis.com/latest` wget https://anthos-migrate-release.storage.googleapis.com/${VERSION}/linux/amd64/m4a-fit-collect.sh chmod +x m4a-fit-collect.sh wget https://anthos-migrate-release.storage.googleapis.com/${VERSION}/linux/amd64/m4a-fit-analysis chmod +x m4a-fit-analysis sudo ./m4a-fit-collect.sh ./m4a-fit-analysis m4a-collect-*-*.tar
#!/bin/bash # Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. mkdir m4a cd m4a VERSION=`wget -O - https://anthos-migrate-release.storage.googleapis.com/latest` wget https://anthos-migrate-release.storage.googleapis.com/${VERSION}/linux/amd64/m4a-fit-collect.sh chmod +x m4a-fit-collect.sh wget https://anthos-migrate-release.storage.googleapis.com/${VERSION}/linux/amd64/mfit chmod +x mfit # Run collection script locally sudo ./m4a-fit-collect.sh # Import the VM collection details to mFIT DB ./mfit discover import m4a-collect-*-*.tar # Assess the discovered VMs ./mfit assess # Generate an HTML report ./mfit report --format html > mfit-report.html # Generate a JSON report ./mfit report --format json > mfit-report.json
Revert "Alias PHP to use MAMP's binary"
# Global aliases alias l='ls -alh' alias bc='bc -l' alias g='git status' alias u='pbpaste | uglifyjs --mangle 2>/dev/null | pbcopy' alias strip-meta='exiftool $@ "-All=" -overwrite_original' alias fuck-this-shit='git reset --hard HEAD; git clean -fd' alias lipsum='lorem-ipsum 10 paragraphs' alias fit-chrome='osascript -e '"'"'tell first window of application "Google Chrome" to set bounds to {0, 0, 1440, 820}'"'" alias html-day-options='html-option-list -w 2 {1..31} | pbcopy'; alias apl='apl --noColor' alias md5='md5 -q' alias php='/Applications/MAMP/bin/php/php5.6.7/bin/php' # Global function for full-screening the terminal window. fit(){ # Make sure we're running interactively. [[ $- == *i* ]] && { osascript -e 'tell application "Terminal" activate set bounds of window 1 to {0, 0, 1440, 800} set position of window 1 to {0, 0} end tell'; }; } export fit; fit # Various other crap { rm ~/.DS_Store; dsclean ~/Desktop; } > /dev/null 2>&1 cd ~/Desktop;
# Global aliases alias l='ls -alh' alias bc='bc -l' alias g='git status' alias u='pbpaste | uglifyjs --mangle 2>/dev/null | pbcopy' alias strip-meta='exiftool $@ "-All=" -overwrite_original' alias fuck-this-shit='git reset --hard HEAD; git clean -fd' alias lipsum='lorem-ipsum 10 paragraphs' alias fit-chrome='osascript -e '"'"'tell first window of application "Google Chrome" to set bounds to {0, 0, 1440, 820}'"'" alias html-day-options='html-option-list -w 2 {1..31} | pbcopy'; alias apl='apl --noColor' alias md5='md5 -q' # Global function for full-screening the terminal window. fit(){ # Make sure we're running interactively. [[ $- == *i* ]] && { osascript -e 'tell application "Terminal" activate set bounds of window 1 to {0, 0, 1440, 800} set position of window 1 to {0, 0} end tell'; }; } export fit; fit # Various other crap { rm ~/.DS_Store; dsclean ~/Desktop; } > /dev/null 2>&1 cd ~/Desktop;
Fix the script to build the Engine.IO client
#!/usr/bin/env node 'use strict'; var dir = process.argv.slice(2)[0]; if (!dir) { var message = 'usage: globalify <directory>\n ' + 'build the engine.io-client pruning the UMD wrapper'; console.log(message); process.exit(1); } var browserify = require('browserify') , derequire = require('derequire') , concat = require('concat-stream') , deumdify = require('deumdify') , path = require('path') , fs = require('fs'); var options = { entries: [ path.resolve(dir, 'index.js') ], insertGlobalVars: { global: function glob() { return 'typeof self !== "undefined" ? self : ' + 'typeof window !== "undefined" ? window : {}'; } }, standalone: 'eio', builtins: false }; // // Build the Engine.IO client. // This generates a bundle and exposes it as a property of the global object. // The difference with the official build is that this bundle does not use a // UMD pattern. The Primus client, in fact, expects to have a global `eio` // available and the UMD wrapper prevents this global from being set when // RequireJS is used. See issue #157. // browserify(options).plugin(deumdify).bundle().pipe(concat({ encoding: 'string' }, function (output) { var dest = path.resolve(__dirname, 'library.js'); fs.writeFileSync(dest, derequire(output)); }));
#!/usr/bin/env node 'use strict'; var dir = process.argv.slice(2)[0]; if (!dir) { var message = 'usage: globalify <directory>\n ' + 'build the engine.io-client pruning the UMD wrapper'; console.log(message); process.exit(1); } var browserify = require('browserify') , derequire = require('derequire') , concat = require('concat-stream') , deumdify = require('deumdify') , path = require('path') , fs = require('fs'); var options = { entries: [ path.resolve(dir, 'index.js') ], insertGlobalVars: { global: function glob() { return 'typeof self !== "undefined" ? self : ' + 'typeof window !== "undefined" ? window : {}'; } }, standalone: 'eio', builtins: false }; // // Build the Engine.IO client. // This generates a bundle and exposes it as a property of the global object. // The difference with the official build is that this bundle does not use a // UMD pattern. The Primus client, in fact, expects to have a global `eio` // available and the UMD wrapper prevents this global from being set when // RequireJS is used. See issue #157. // browserify(options).ignore('ws').plugin(deumdify).bundle().pipe(concat({ encoding: 'string' }, function (output) { var dest = path.resolve(__dirname, 'library.js'); fs.writeFileSync(dest, derequire(output)); }));
Add command line arguments and a data file to clean things up a little
#!/bin/ksh BASE="http://community.myfitnesspal.com/en/group/members/394-low-carber-daily-forum-the-lcd-group/" # # This first block grabs all the member pages # max=544 min=1 PAGE=$min while (( PAGE <= $max )) do URL="${BASE}p${PAGE}?filter=members" ((PAGE++)) # ftp $URL done # # This sections looks for the named members. # max=544 min=1 for NAME in FIT_Goat loridarling quux do i=$max while (( i >= $min )) do file="p${i}?filter=members" grep -q "\"Card Card-Member\"><a title=\"${NAME}" $file && i=$min && echo "${NAME} is on page ${BASE}${file}" ((i--)) done done
#!/bin/ksh DATA=/home/kevinb/Documents/Low-Carb-Daily-Member-List/data BASE="http://community.myfitnesspal.com/en/group/members/394-low-carber-daily-forum-the-lcd-group/" # Move into the data directory cd $DATA # # This first block grabs all the member pages # max=544 min=1 PAGE=$min while (( PAGE <= $max )) do URL="${BASE}p${PAGE}?filter=members" ((PAGE++)) # ftp $URL done # # This sections looks for the named members. # It looks for everyone on the command line. # max=544 min=1 for NAME in $@ do i=$max while (( i >= $min )) do file="p${i}?filter=members" grep -q "\"Card Card-Member\"><a title=\"${NAME}" $file && i=$min && echo "${NAME} is on page ${BASE}${file}" ((i--)) done done
Disable latency profile of trickle
#!/bin/bash # Copyright 2016 gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -ex cd $(dirname $0)/../.. # try to use pypy for generating reports # each trace dumps 7-8gig of text to disk, and processing this into a report is # heavyweight - so any speed boost is worthwhile # TODO(ctiller): consider rewriting report generation in C++ for performance if which pypy >/dev/null; then PYTHON=pypy else PYTHON=python2.7 fi $PYTHON tools/run_tests/run_microbenchmark.py --collect summary perf latency
#!/bin/bash # Copyright 2016 gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -ex cd $(dirname $0)/../.. # try to use pypy for generating reports # each trace dumps 7-8gig of text to disk, and processing this into a report is # heavyweight - so any speed boost is worthwhile # TODO(ctiller): consider rewriting report generation in C++ for performance if which pypy >/dev/null; then PYTHON=pypy else PYTHON=python2.7 fi BENCHMARKS_TO_RUN="bm_fullstack_unary_ping_pong bm_fullstack_streaming_ping_pong bm_fullstack_streaming_pump bm_closure bm_cq bm_call_create bm_error bm_chttp2_hpack bm_chttp2_transport bm_pollset bm_metadata" $PYTHON tools/run_tests/run_microbenchmark.py --collect summary perf latency -b $BENCHMARKS_TO_RUN
Refactor to use kubler-overlay package
# # Kubler phase 1 config, pick installed packages and/or customize the build # _packages="dev-vcs/git" _webhook_version="2.6.8" configure_bob() { export GOPATH=/go export DISTRIBUTION_DIR="${GOPATH}/src/github.com/adnanh/webhook" mkdir -p "${DISTRIBUTION_DIR}" git clone https://github.com/adnanh/webhook.git "${DISTRIBUTION_DIR}" cd "${DISTRIBUTION_DIR}" git checkout "tags/${_webhook_version}" echo "building webhook.." # occasionally github clone rate limits fail the build, lets retry up to 5 times before giving up local i for i in {1..5}; do go get -d && break || { echo "retrying build in 5s.."; sleep 5; } done go build -o "${_EMERGE_ROOT}"/usr/bin/webhook echo "done." } # # This hook is called just before starting the build of the root fs # configure_rootfs_build() { update_use 'dev-vcs/git' '-python' '-webdav' update_use 'app-crypt/gnupg' '-smartcard' } # # This hook is called just before packaging the root fs tar ball, ideal for any post-install tasks, clean up, etc # finish_rootfs_build() { log_as_installed "manual install" "webhook-${_webhook_version}" "https://github.com/adnanh/webhook/" }
# # Kubler phase 1 config, pick installed packages and/or customize the build # _packages="dev-vcs/git dev-vcs/webhook" _webhook_version="2.6.8" # # This hook is called just before starting the build of the root fs # configure_rootfs_build() { update_use 'dev-vcs/git' '-python' '-webdav' update_use 'app-crypt/gnupg' '-smartcard' } # # This hook is called just before packaging the root fs tar ball, ideal for any post-install tasks, clean up, etc # finish_rootfs_build() { : }
Set ADLB_EXHAUST_TIME for Turbine tests
# Re-usable test setup lines # Helps automate selection of process mode (engine, server, worker) # Prints the "SETUP:" header in the *.out file if [[ ${TURBINE_ENGINES} == "" ]] then export TURBINE_ENGINES=1 fi if [[ ${TURBINE_WORKERS} == "" ]] then export TURBINE_WORKERS=1 fi if [[ ${ADLB_SERVERS} == "" ]] then export ADLB_SERVERS=1 fi PROCS=$(( ${TURBINE_ENGINES} + ${TURBINE_WORKERS} + ${ADLB_SERVERS} )) display() { T=$1 I=$2 J=$3 V=$( eval echo \$${T} ) printf "%-16s %3i RANKS: %3i - %3i\n" ${T}: ${V} ${I} ${J} } TURBINE_RANKS=$(( ${TURBINE_ENGINES} + ${TURBINE_WORKERS} )) echo SETUP: date "+%m/%d/%Y %I:%M%p" display TURBINE_ENGINES 0 $(( TURBINE_ENGINES-1 )) display TURBINE_WORKERS ${TURBINE_ENGINES} $(( TURBINE_RANKS-1 )) display ADLB_SERVERS ${TURBINE_RANKS} $(( PROCS-1 )) echo PROCS: ${PROCS} echo
# Re-usable test setup lines # Helps automate selection of process mode (engine, server, worker) # Prints the "SETUP:" header in the *.out file export ADLB_EXHAUST_TIME=1 if [[ ${TURBINE_ENGINES} == "" ]] then export TURBINE_ENGINES=1 fi if [[ ${TURBINE_WORKERS} == "" ]] then export TURBINE_WORKERS=1 fi if [[ ${ADLB_SERVERS} == "" ]] then export ADLB_SERVERS=1 fi PROCS=$(( ${TURBINE_ENGINES} + ${TURBINE_WORKERS} + ${ADLB_SERVERS} )) display() { T=$1 I=$2 J=$3 V=$( eval echo \$${T} ) printf "%-16s %3i RANKS: %3i - %3i\n" ${T}: ${V} ${I} ${J} } TURBINE_RANKS=$(( ${TURBINE_ENGINES} + ${TURBINE_WORKERS} )) echo SETUP: date "+%m/%d/%Y %I:%M%p" display TURBINE_ENGINES 0 $(( TURBINE_ENGINES-1 )) display TURBINE_WORKERS ${TURBINE_ENGINES} $(( TURBINE_RANKS-1 )) display ADLB_SERVERS ${TURBINE_RANKS} $(( PROCS-1 )) echo PROCS: ${PROCS} echo
Add function to run a command in a given directory
# Use colors in coreutils utilities output alias ls='ls --color=auto' export GREP_OPTIONS="--color" # ls aliases alias ll='ls -la' alias la='ls -A' alias l='ls' # Update dotfiles function dfu() { ( cd ~/.dotfiles && git pullff && ./install -q ) } # Use pip without requiring virtualenv function syspip() { PIP_REQUIRE_VIRTUALENV="" pip "$@" } # cd to git root directory alias cdgr='cd "$(git root)"' # Jump to directory containing file function jump() { cd "$(dirname ${1})" } # cd replacement for screen to track cwd (like tmux) function scr_cd() { builtin cd $1 screen -X chdir $PWD } if [[ "$TERM" == 'screen.rxvt' ]]; then alias cd=scr_cd fi # Go up [n] directories function up() { if [[ "${1}" == "" ]]; then cd .. elif ! [[ "${1}" =~ ^[0-9]+$ ]]; then echo "Error: argument must be a number" elif ! [[ "${1}" -gt "0" ]]; then echo "Error: argument must be positive" else for i in {1..${1}}; do cd .. done fi } # Mirror a website alias mirrorsite='wget -m -k -K -E -e robots=off'
# Use colors in coreutils utilities output alias ls='ls --color=auto' export GREP_OPTIONS="--color" # ls aliases alias ll='ls -la' alias la='ls -A' alias l='ls' # Update dotfiles function dfu() { ( cd ~/.dotfiles && git pullff && ./install -q ) } # Use pip without requiring virtualenv function syspip() { PIP_REQUIRE_VIRTUALENV="" pip "$@" } # cd to git root directory alias cdgr='cd "$(git root)"' # Jump to directory containing file function jump() { cd "$(dirname ${1})" } # cd replacement for screen to track cwd (like tmux) function scr_cd() { builtin cd $1 screen -X chdir $PWD } if [[ "$TERM" == 'screen.rxvt' ]]; then alias cd=scr_cd fi # Go up [n] directories function up() { if [[ "${1}" == "" ]]; then cd .. elif ! [[ "${1}" =~ ^[0-9]+$ ]]; then echo "Error: argument must be a number" elif ! [[ "${1}" -gt "0" ]]; then echo "Error: argument must be positive" else for i in {1..${1}}; do cd .. done fi } # Execute a command in a specific directory function in() { ( cd ${1} && shift && ${@} ) } # Mirror a website alias mirrorsite='wget -m -k -K -E -e robots=off'
Fix script so it creates .last_update files better
#!/bin/sh # this is a hack to work around how django-mozilla-product-details # takes too long and always kills the vagrant VM on startup dest=./lib/product_json if [ ! -d $dest ]; then echo "usage: $0" echo "" echo "you must run this from the root of your zamboni checkout" exit 1 fi svn export --force http://svn.mozilla.org/libs/product-details/json/ $dest if [ $? -eq 0 ]; then date "+%a, %d %b %Y %H:%M:%S %Z" > $dest/.last_update fi
#!/bin/bash # this is a hack to work around how django-mozilla-product-details # takes too long and always kills the vagrant VM on startup dest=./lib/product_json if [ ! -d $dest ]; then echo "usage: $0" echo "" echo "you must run this from the root of your zamboni checkout" exit 1 fi svn export --force http://svn.mozilla.org/libs/product-details/json/ $dest if [ $? -eq 0 ]; then # Bah. This isn't supported for some reason: date "+%a, %d %b %Y %H:%M:%S %Z" dd=`python -c 'import datetime,sys; sys.stdout.write(datetime.datetime.utcnow().strftime("%a, %d %b %Y %H:%M:%S GMT"))'` echo -n $dd > $dest/.last_update echo -n $dd > $dest/regions/.last_update fi
Make Travis build publish signature files
#!/usr/bin/env bash NAME=${1:-$(cat build.sbt | egrep '^name :=' | awk -F\" '{ print $2 }')} VERSION=${2:-$(cat build.sbt | egrep '^version :=' | awk -F\" '{ print $2 }')} echo "Signing released files..." curl -H "X-GPG-PASSPHRASE: $GPG_PASSPHRASE" "-u$BINTRAY_USER:$BINTRAY_API_KEY" -X POST "https://api.bintray.com/gpg/$BINTRAY_USER/releases/$NAME/versions/$VERSION"
#!/usr/bin/env bash NAME=${1:-$(cat build.sbt | egrep '^name :=' | awk -F\" '{ print $2 }')} VERSION=${2:-$(cat build.sbt | egrep '^version :=' | awk -F\" '{ print $2 }')} echo "Signing released files..." curl -H "X-GPG-PASSPHRASE: $GPG_PASSPHRASE" "-u$BINTRAY_USER:$BINTRAY_API_KEY" -X POST "https://api.bintray.com/gpg/$BINTRAY_USER/releases/$NAME/versions/$VERSION" echo "Publishing signature files..." curl "-u$BINTRAY_USER:$BINTRAY_API_KEY" -X POST "https://api.bintray.com/content/$BINTRAY_USER/releases/$NAME/$VERSION/publish"
Configure name and email in heroku update script
git clone https://github.com/Widdershin/cycle-ecosystem.git # Yeah, I clone my repo into my repo, wanna fight about it? cd cycle-ecosystem cp -r ../node_modules . ./node_modules/babel-cli/bin/babel-node.js update.js git config credential.helper store echo https://$GIT_CREDENTIALS@github.com > ~/.git-credentials git commit -am "Update - $(date)" git push origin master rm ~/.git-credentials cd .. rm -rf cycle-ecosystem/.git rm -r cycle-ecosystem
git clone https://github.com/Widdershin/cycle-ecosystem.git # Yeah, I clone my repo into my repo, wanna fight about it? cd cycle-ecosystem cp -r ../node_modules . ./node_modules/babel-cli/bin/babel-node.js update.js git config user.email "ncwjohnstone@gmail.com" git config user.name "Nick Johnstone (heroku auto update)" git config credential.helper store echo https://$GIT_CREDENTIALS@github.com > ~/.git-credentials git commit -am "Update - $(date)" git push origin master rm ~/.git-credentials cd .. rm -rf cycle-ecosystem/.git rm -r cycle-ecosystem
Make sure docs are pressent in the build/lib.OS directory
# Activate new env cd $WORKSPACE cd sasview cd build_tools conda_env_name="$(grep 'name: ' conda_qt5_min_centos.yml)" echo $conda_env_name conda_env_name=${conda_env_name:6} echo $conda_env_name source activate $conda_env_name # Now build Sasview # Sasmodels cd $WORKSPACE cd sasmodels python setup.py build cd $WORKSPACE cd sasmodels cd doc make html cd $WORKSPACE cd sasmodels python setup.py build install # SasView cd $WORKSPACE cd sasview python src/sas/qtgui/convertUI.py python setup.py build docs python setup.py install # Pyinstaller cd $WORKSPACE cd sasview cd installers pyinstaller sasview_qt5_min_centos.spec cd $WORKSPACE cd sasview cd installers cp run_sasview.sh dist/sasview cp set_sasview_qt5_path.sh dist/sasview cd dist mv sasview SasView tar czvf SasView.tar.gz SasView
# Activate new env cd $WORKSPACE cd sasview cd build_tools conda_env_name="$(grep 'name: ' conda_qt5_min_centos.yml)" echo $conda_env_name conda_env_name=${conda_env_name:6} echo $conda_env_name source activate $conda_env_name # Now build Sasview # Sasmodels cd $WORKSPACE cd sasmodels python setup.py build cd $WORKSPACE cd sasmodels cd doc make html cd $WORKSPACE cd sasmodels python setup.py build install # SasView cd $WORKSPACE cd sasview python src/sas/qtgui/convertUI.py python setup.py build docs install # Pyinstaller cd $WORKSPACE cd sasview cd installers pyinstaller sasview_qt5_min_centos.spec cd $WORKSPACE cd sasview cd installers cp run_sasview.sh dist/sasview cp set_sasview_qt5_path.sh dist/sasview cd dist mv sasview SasView tar czvf SasView.tar.gz SasView
Revert "Also divert XDG_CACHE_HOME to have a fake avatar cache"
#! /bin/sh tmpdir=$(mktemp -d) export XDG_DATA_HOME=$tmpdir export XDG_CACHE_HOME=$tmpdir export XDG_CONFIG_HOME=$tmpdir export XDG_CACHE_HOME=$tmpdir tracker-control -rs #2>&1 >/dev/null /usr/lib/tracker/tracker-store& /usr/bin/contactsd& sleep 5 $1 result=$? tracker-control -r #2>&1 >/dev/null if [ $result -eq "0" ]; then echo "Test succeeded:" $1 else echo "Test failed:" $1 fi rm -rf $tmpdir exit $result
#! /bin/sh tmpdir=$(mktemp -d) export XDG_DATA_HOME=$tmpdir export XDG_CACHE_HOME=$tmpdir export XDG_CONFIG_HOME=$tmpdir tracker-control -rs #2>&1 >/dev/null /usr/lib/tracker/tracker-store& /usr/bin/contactsd& sleep 5 $1 result=$? tracker-control -r #2>&1 >/dev/null if [ $result -eq "0" ]; then echo "Test succeeded:" $1 else echo "Test failed:" $1 fi rm -rf $tmpdir exit $result
Enable specifying custom engine prefix
#!/bin/sh . "$(dirname "$(readlink -f "$0")")"/../../ovirt-engine/bin/engine-prolog.sh exec "${JAVA_HOME}/bin/java" \ -Djava.security.auth.login.config="${ENGINE_USR}/conf/jaas.conf" \ -Djava.util.logging.config.file="${OVIRT_LOGGING_PROPERTIES}" \ -Djboss.modules.write-indexes=false \ -Dorg.ovirt.engine.aaa.jdbc.programName="${0}" \ -jar "${JBOSS_HOME}/jboss-modules.jar" \ -dependencies org.ovirt.engine.extension.aaa.jdbc \ -class org.ovirt.engine.extension.aaa.jdbc.binding.cli.Cli \ "$@"
#!/bin/sh ENGINE_PROLOG="${ENGINE_PREFIX:-$(dirname $0)/..}/share/ovirt-engine/bin/engine-prolog.sh" if [ ! -f ${ENGINE_PROLOG} ]; then echo \ "Cannot locate engine-prolog.sh, please specify oVirt engine installation \ prefix. For example: ENGINE_PREFIX=\$HOME/ovirt-engine ovirt-aaa-jdbc-tool " exit -1 fi . "${ENGINE_PROLOG}" exec "${JAVA_HOME}/bin/java" \ -Djava.security.auth.login.config="${ENGINE_USR}/conf/jaas.conf" \ -Djava.util.logging.config.file="${OVIRT_LOGGING_PROPERTIES}" \ -Djboss.modules.write-indexes=false \ -Dorg.ovirt.engine.aaa.jdbc.programName="${0}" \ -jar "${JBOSS_HOME}/jboss-modules.jar" \ -dependencies org.ovirt.engine.extension.aaa.jdbc \ -class org.ovirt.engine.extension.aaa.jdbc.binding.cli.Cli \ "$@"
Adjust creation script for new veewee version
#!/bin/sh -e cd $(dirname $0) vagrant basebox build --force debian-6-amd64 vagrant basebox validate debian-6-amd64 vagrant basebox export debian-6-amd64 s3cmd put debian-6-amd64.box s3://s3.cargomedia.ch/vagrant-boxes/ s3cmd setacl --acl-public --recursive s3://s3.cargomedia.ch/vagrant-boxes/
#!/bin/sh -e cd $(dirname $0) veewee vbox build --force debian-6-amd64 veewee vbox validate debian-6-amd64 /usr/bin/vagrant package --base debian-6-amd64 --output debian-6-amd64.box s3cmd put debian-6-amd64.box s3://s3.cargomedia.ch/vagrant-boxes/ s3cmd setacl --acl-public --recursive s3://s3.cargomedia.ch/vagrant-boxes/
Remove datadog nozzle from list of manifests to parse
#!/bin/bash SOURCE_MANIFESTS_PATH=grootfs-ci-secrets/deployments MANIFESTS="gamora/concourse.yml grootfs-bench/aws.yml gamora/datadog-firehose-nozzle.yml" mkdir -p manifests/gamora mkdir -p manifests/grootfs-bench for manifest in $MANIFESTS do full_path="${SOURCE_MANIFESTS_PATH}/${manifest}" ruby -r erb -e "puts ERB.new(File.read('$full_path')).result" > "manifests/${manifest}" done
#!/bin/bash SOURCE_MANIFESTS_PATH=grootfs-ci-secrets/deployments MANIFESTS="gamora/concourse.yml grootfs-bench/aws.yml" mkdir -p manifests/gamora mkdir -p manifests/grootfs-bench for manifest in $MANIFESTS do full_path="${SOURCE_MANIFESTS_PATH}/${manifest}" ruby -r erb -e "puts ERB.new(File.read('$full_path')).result" > "manifests/${manifest}" done
Complete containers for _run command in zsh
#compdef vagga #autoload _cmds () { local cmds listopts # Show hidden options only when underscore is typed if [[ $words[2] = '_'* ]]; then listopts="--all" fi # Check if in folder with correct vagga.yaml file vagga _list 1>/dev/null 2>/dev/null if [ $? -eq 0 ]; then IFS=$'\n' cmds=($(vagga _list --zsh $listopts)) else cmds=() fi _describe -t commands 'Available commands' cmds } _arguments -C -s "1: :_cmds" '*::arg:->args' -- case $state in (args) words[1]="vagga _help ${words[1]}" _arguments -C -s -- esac
#compdef vagga #autoload _list () { local cmds listopts # Show hidden options only when underscore is typed if [[ $words[2] = '_'* ]]; then listopts="--all" fi # Check if in folder with correct vagga.yaml file vagga _list 1>/dev/null 2>/dev/null if [ $? -eq 0 ]; then IFS=$'\n' cmds=($(vagga _list "$1" $listopts)) else cmds=() fi _describe -t commands 'Available commands' cmds } _arguments -C -s "1: :{_list --zsh}" '*::arg:->args' -- case $state in (args) cmd=${words[1]} if [[ ${cmd} = "_run" ]] then; _arguments -C -s "1: :{_list --containers}" else words[1]="vagga _help ${cmd}" _arguments -C -s -- fi esac
Migrate ATLAS to Vagrant Cloud - app.vagrantup.com
#!/bin/bash set +e echo "upload vagrant to atlas" ./on-build-config/build-release-tools/HWIMO-BUILD on-build-config/build-release-tools/application/release_to_atlas.py \ --build-directory ./VAGRANT/build/packer \ --atlas-url https://atlas.hashicorp.com/api/v1 \ --atlas-creds ${ATLAS_CREDS} \ --atlas-name rackhd \ --is-release $IS_OFFICIAL_RELEASE
#!/bin/bash set +e echo "upload vagrant to atlas" ./on-build-config/build-release-tools/HWIMO-BUILD on-build-config/build-release-tools/application/release_to_atlas.py \ --build-directory ./VAGRANT/build/packer \ --atlas-url https://app.vagrantup.com/api/v1 \ --atlas-creds ${ATLAS_CREDS} \ --atlas-name rackhd \ --is-release $IS_OFFICIAL_RELEASE
Add message if configuration for CMR is missing
#!/bin/bash if [ "$(ls -A $INSPECTIT_CONFIG_HOME)" ]; then echo "Using existing inspectIT configuration..." else echo "No custom inspectIT configuration found, using default one..." cp -r $INSPECTIT_CONFIG_HOME/../config/* $INSPECTIT_CONFIG_HOME CMR_ADDR=${INSPECTIT_CMR_ADDR:-$CMR_PORT_9070_TCP_ADDR} CMR_PORT=${INSPECTIT_CMR_PORT:-$CMR_PORT_9070_TCP_PORT} AGENT_NAME=${AGENT_NAME:-$HOSTNAME} sed -i "s/^\(repository\) .*/\1 $CMR_ADDR $CMR_PORT $AGENT_NAME/" $INSPECTIT_CONFIG_HOME/inspectit-agent.cfg echo "Done. Remember to modify the configuration for your needs. You find the configuration in the mapped volume $INSPECTIT_CONFIG_HOME." fi exec jetty.sh run
#!/bin/bash if [ "$(ls -A $INSPECTIT_CONFIG_HOME)" ]; then echo "Using existing inspectIT configuration..." else echo "No custom inspectIT configuration found, using default one..." cp -r $INSPECTIT_CONFIG_HOME/../config/* $INSPECTIT_CONFIG_HOME CMR_ADDR=${INSPECTIT_CMR_ADDR:-$CMR_PORT_9070_TCP_ADDR} CMR_PORT=${INSPECTIT_CMR_PORT:-$CMR_PORT_9070_TCP_PORT} if [ -z $CMR_ADDR ] || [ -z $CMR_PORT ]; then echo "No inspectIT CMR configured! Please read our README" exit 1 fi AGENT_NAME=${AGENT_NAME:-$HOSTNAME} sed -i "s/^\(repository\) .*/\1 $CMR_ADDR $CMR_PORT $AGENT_NAME/" $INSPECTIT_CONFIG_HOME/inspectit-agent.cfg echo "Done. Remember to modify the configuration for your needs. You find the configuration in the mapped volume $INSPECTIT_CONFIG_HOME." fi exec jetty.sh run
Revert "Be explicit about libssl-dev."
#!/bin/bash # Shell script for installing hyper's dependencies on Travis. In particular, # this upgrades the OpenSSL version used on Travis. set -e set -x sudo add-apt-repository -y "ppa:lukasaoz/openssl101-ppa" sudo apt-get -y update sudo apt-get install -y --force-yes openssl libssl1.0.0 libssl-dev pip install . pip install -r test_requirements.txt
#!/bin/bash # Shell script for installing hyper's dependencies on Travis. In particular, # this upgrades the OpenSSL version used on Travis. set -e set -x lsb_release -a sudo add-apt-repository -y "ppa:lukasaoz/openssl101-ppa" sudo apt-get -y update sudo apt-get install -y --force-yes openssl libssl1.0.0 pip install . pip install -r test_requirements.txt