Instruction
stringlengths
14
778
input_code
stringlengths
0
4.24k
output_code
stringlengths
1
5.44k
Fix compatibility with newer PostgreSQL Docker images
export MINERVA_DB_NAME=minerva export PGDATABASE=$MINERVA_DB_NAME gosu postgres create-minerva-database echo 'minerva.trigger_mark_modified = on' >> /var/lib/postgresql/data/postgresql.conf
export MINERVA_DB_NAME=minerva export PGDATABASE=$MINERVA_DB_NAME create-minerva-database echo 'minerva.trigger_mark_modified = on' >> /var/lib/postgresql/data/postgresql.conf
Remove old versions before testing
PYTHONPATH=../.. export PYTHONPATH URI='http://ws.cdyne.com/WeatherWS/Weather.asmx?wsdl' PREFIX='weather' WSDL="${PREFIX}.wsdl" if [ ! -f ${WSDL} ] ; then wget -O ${WSDL} "${URI}" fi rm -rf raw mkdir -p raw touch raw/__init__.py ../../scripts/pyxbgen \ -m "${PREFIX}" \ -W "${WSDL}" \ -r #if [ ! -f ${PREFIX}.py ] ; then # echo "from raw.${PREFIX} import *" > ${PREFIX}.py #fi
PYTHONPATH=../.. export PYTHONPATH URI='http://ws.cdyne.com/WeatherWS/Weather.asmx?wsdl' PREFIX='weather' WSDL="${PREFIX}.wsdl" if [ ! -f ${WSDL} ] ; then wget -O ${WSDL} "${URI}" fi rm -rf raw weather.pyc weather.py mkdir -p raw touch raw/__init__.py ../../scripts/pyxbgen \ -m "${PREFIX}" \ -W "${WSDL}" \ -r #if [ ! -f ${PREFIX}.py ] ; then # echo "from raw.${PREFIX} import *" > ${PREFIX}.py #fi
Fix systemd user service install script.
#!/usr/bin/env bash set -e DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" cp "$DIR/wl2k.service" "$HOME/.config/systemd/user/" systemctl --user daemon-reload echo "Installed. Start with 'systemctl --user start wl2k'"
#!/usr/bin/env bash set -e DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" mkdir -p "$HOME/.config/systemd/user" cp "$DIR/wl2k.service" "$HOME/.config/systemd/user/" systemctl --user daemon-reload echo "Installed. Start with 'systemctl --user start wl2k'"
Fix problem with compaudit complain about /tmp
CWD=$(dirname $0) GENCOMPL_FPATH=$(mktemp -d /tmp/autogen-completion.XXXXX) source $CWD/source/zsh-completion-generator.plugin.zsh
CWD=$(dirname $0) GENCOMPL_FPATH=$HOME/.zsh/complete-generator mkdir -p $GENCOMPL_FPATH source $CWD/source/zsh-completion-generator.plugin.zsh
Fix an issue that when user is not signed in from inside datalab container last time they use Datalab, then setting PROJECT_NUMBER env var will fail and cause Datalab to not start.
#!/bin/sh # Copyright 2015 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Sets up various environment variables within the docker container. export DATALAB_ENV="local" export CLOUDSDK_CONFIG=/content/datalab/.config if [ "${ENABLE_USAGE_REPORTING}" = "true" ] then if [ -n "${PROJECT_ID}" ] then export PROJECT_NUMBER=`gcloud projects describe "${PROJECT_ID}" --format 'value(projectNumber)'` fi fi if [ -n "${EXPERIMENTAL_KERNEL_GATEWAY_URL}" ] then export KG_URL="${EXPERIMENTAL_KERNEL_GATEWAY_URL}" fi
#!/bin/sh # Copyright 2015 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Sets up various environment variables within the docker container. export DATALAB_ENV="local" export CLOUDSDK_CONFIG=/content/datalab/.config if [ "${ENABLE_USAGE_REPORTING}" = "true" ] then if [ -n "${PROJECT_ID}" ] then USER_EMAIL=`gcloud auth list --format="value(account)"` if [ -n "${USER_EMAIL}" ] then export PROJECT_NUMBER=`gcloud projects describe "${PROJECT_ID}" --format 'value(projectNumber)'` fi fi fi if [ -n "${EXPERIMENTAL_KERNEL_GATEWAY_URL}" ] then export KG_URL="${EXPERIMENTAL_KERNEL_GATEWAY_URL}" fi
Remove redundant spec check from cucumber alias
function bepc_changed_vs_origin_master() { rspec_ensure_no_focus || return $? local bepc_changed_vs_origin_master_FILE local bepc_changed_vs_origin_master_FILES_EXISTING=() for bepc_changed_vs_origin_master_FILE in `git_files_changed_vs_origin_master | grep features | grep ".feature$"` do if [ -f $bepc_changed_vs_origin_master_FILE ]; then bepc_changed_vs_origin_master_FILES_EXISTING+=$bepc_changed_vs_origin_master_FILE fi done if [ `echo $bepc_changed_vs_origin_master_FILES_EXISTING | wc -l` -gt 0 ]; then echorun bepc `echo $bepc_changed_vs_origin_master_FILES_EXISTING | tr "\n" " "` else echo echo "$0: nothing to run" fi }
function bepc_changed_vs_origin_master() { local bepc_changed_vs_origin_master_FILE local bepc_changed_vs_origin_master_FILES_EXISTING=() for bepc_changed_vs_origin_master_FILE in `git_files_changed_vs_origin_master | grep features | grep ".feature$"` do if [ -f $bepc_changed_vs_origin_master_FILE ]; then bepc_changed_vs_origin_master_FILES_EXISTING+=$bepc_changed_vs_origin_master_FILE fi done if [ `echo $bepc_changed_vs_origin_master_FILES_EXISTING | wc -l` -gt 0 ]; then echorun bepc `echo $bepc_changed_vs_origin_master_FILES_EXISTING | tr "\n" " "` else echo echo "$0: nothing to run" fi }
Add -webstart use flag to icedtea, wtf is that default now?
# # build config # PACKAGES="dev-java/icedtea-bin" # # this method runs in the bb builder container just before starting the build of the rootfs # configure_rootfs_build() { update_use 'dev-java/icedtea-bin' '-awt' # skip python and nss provide_package dev-lang/python provide_package dev-libs/nss } # # this method runs in the bb builder container just before tar'ing the rootfs # finish_rootfs_build() { copy_gcc_libs }
# # build config # PACKAGES="dev-java/icedtea-bin" # # this method runs in the bb builder container just before starting the build of the rootfs # configure_rootfs_build() { update_use 'dev-java/icedtea-bin' '-webstart' # skip python and nss provide_package dev-lang/python provide_package dev-libs/nss } # # this method runs in the bb builder container just before tar'ing the rootfs # finish_rootfs_build() { copy_gcc_libs }
Tweak some settings for better Prometheus metrics access.
#!/bin/bash HOSTNAME="$(hostname -f)" # Setting --hostname-override is a workaround for https://github.com/kubernetes/kubeadm/issues/653 # Setting --cloud-provider is a workaround for https://github.com/kubernetes/kubeadm/issues/620 /bin/cat > /etc/systemd/system/kubelet.service.d/10-hostname.conf <<EOF [Service] Environment="KUBELET_EXTRA_ARGS= --hostname-override=${HOSTNAME} --cloud-provider=aws" EOF systemctl daemon-reload
#!/bin/bash HOSTNAME="$(hostname -f)" # Setting --hostname-override is a workaround for https://github.com/kubernetes/kubeadm/issues/653 # Setting --cloud-provider is a workaround for https://github.com/kubernetes/kubeadm/issues/620 # Setting --authentication-token-webhook allows authenticated Prometheus access to the Kubelet metrics endpoint # (see https://github.com/coreos/prometheus-operator/blob/master/contrib/kube-prometheus/docs/kube-prometheus-on-kubeadm.md) /bin/cat > /etc/systemd/system/kubelet.service.d/10-hostname.conf <<EOF [Service] Environment="KUBELET_EXTRA_ARGS= --hostname-override=${HOSTNAME} --cloud-provider=aws --authentication-token-webhook=true" EOF systemctl daemon-reload
Remove the C++ MacOs GRPC tests from execution.
#!/bin/bash set -euo pipefail cd ${KOKORO_ARTIFACTS_DIR}/git/tink cd cc use_bazel.sh $(cat .bazelversion) bazel build ... bazel test ...
#!/bin/bash set -euo pipefail cd ${KOKORO_ARTIFACTS_DIR}/git/tink cd cc # TODO(b/140615798): Run all tests once fixed. use_bazel.sh $(cat .bazelversion) bazel build -- ... -//integration/gcpkms/... bazel test -- ... -//integration/gcpkms/...
Add support to hand made font maps.
#!/bin/sh cd "$(dirname "$0")" rm -f ./build/* mkdir -p ./.work echo 'build all symbols fonts...' cp ./fonts/pomicons-regular.ttf ./build cp ./fonts/fontawesome-regular.ttf ./build cp ./fonts/devicons-regular.ttf ./build ./scripts/fu-relocate ./fonts/octicons-regular.ttf --save-as='.work/octicons-regular-relocated.ttf' --to='0xf300' 2> /dev/null cp ./.work/octicons-regular-relocated.ttf ./build/octicons-regular.ttf echo 'export maps for all fonts...' ./scripts/fu-map ./build/pomicons-regular.ttf --namespace 'POMICONS' 2> /dev/null > ./build/pomicons-regular.sh ./scripts/fu-map ./build/octicons-regular.ttf --namespace 'OCTICONS' 2> /dev/null > ./build/octicons-regular.sh ./scripts/fu-map ./build/fontawesome-regular.ttf --namespace 'AWESOME' 2> /dev/null > ./build/fontawesome-regular.sh ./scripts/fu-map ./build/devicons-regular.ttf --namespace 'DEVICONS' 2> /dev/null > ./build/devicons-regular.sh for file in ./fonts/*.sh; do cat $file >> ./build/`basename $file` done echo 'you can find fonts and maps in local ./build directory :-)' echo 'done!'
#!/bin/sh cd "$(dirname "$0")" rm -f ./build/* mkdir -p ./.work echo 'build all symbols fonts...' cp ./fonts/pomicons-regular.ttf ./build cp ./fonts/fontawesome-regular.ttf ./build cp ./fonts/devicons-regular.ttf ./build ./scripts/fu-relocate ./fonts/octicons-regular.ttf --save-as='.work/octicons-regular-relocated.ttf' --to='0xf300' 2> /dev/null cp ./.work/octicons-regular-relocated.ttf ./build/octicons-regular.ttf echo 'export maps for all fonts...' ./scripts/fu-map ./build/pomicons-regular.ttf --namespace 'POMICONS' 2> /dev/null > ./build/pomicons-regular.sh ./scripts/fu-map ./build/octicons-regular.ttf --namespace 'OCTICONS' 2> /dev/null > ./build/octicons-regular.sh ./scripts/fu-map ./build/fontawesome-regular.ttf --namespace 'AWESOME' 2> /dev/null > ./build/fontawesome-regular.sh ./scripts/fu-map ./build/devicons-regular.ttf --namespace 'DEVICONS' 2> /dev/null > ./build/devicons-regular.sh echo 'override supplied font maps...' for file in ./fonts/*.sh; do [ -f "$file" ] && cp $file ./build done echo 'you can find fonts and maps in local ./build directory :-)' echo 'done!'
Add Terminal.app color scheme for OS X
#!/bin/sh -e cd ~/repo github_clone_ro.sh gittup/tup github_clone_ro.sh gmarik/Vundle.vim ~/.vim/bundle/Vundle.vim github_clone_ro.sh krzysztof-jusiak/gmock github_clone_ro.sh rupa/z github_clone_ro.sh sjl/peat github_clone_ro.sh sjl/t
#!/bin/sh -e cd ~/repo github_clone_ro.sh gittup/tup github_clone_ro.sh gmarik/Vundle.vim ~/.vim/bundle/Vundle.vim github_clone_ro.sh krzysztof-jusiak/gmock github_clone_ro.sh rupa/z github_clone_ro.sh sjl/peat github_clone_ro.sh sjl/t github_clone_ro.sh tomislav/osx-terminal.app-colors-solarized
Fix wrong data type being written to Plist which as breaking the build
#! /bin/sh # Update Info.plist in the app bundlebased on current build configuration. # This script should only be at the end of a build to ensure: # - The .app folder exists # - the plist has been preprocessed # Processing is done inside the .app to prevent changes to repository status declare -r INFO_PLIST="${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/Info.plist" # Fail if any subsequent commands fail set -e if [[ "${CONFIGURATION}" != "Release" || $WMF_FORCE_ITUNES_FILE_SHARING == "1" ]]; then echo "Enabling iTunes File Sharing for ${CONFIGURATION} build." defaults write "${INFO_PLIST}" UIFileSharingEnabled true fi if [[ "${CONFIGURATION}" != "Release" || $WMF_FORCE_DEBUG_MENU == "1" ]]; then echo "Showing debug menu for ${CONFIGURATION} build." defaults write "${INFO_PLIST}" WMFShowDebugMenu true fi
#! /bin/sh # Update Info.plist in the app bundlebased on current build configuration. # This script should only be at the end of a build to ensure: # - The .app folder exists # - the plist has been preprocessed # Processing is done inside the .app to prevent changes to repository status declare -r INFO_PLIST="${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/Info.plist" # Fail if any subsequent commands fail set -e if [[ "${CONFIGURATION}" != "Release" || $WMF_FORCE_ITUNES_FILE_SHARING == "1" ]]; then echo "Enabling iTunes File Sharing for ${CONFIGURATION} build." defaults write "${INFO_PLIST}" UIFileSharingEnabled -bool YES fi if [[ "${CONFIGURATION}" != "Release" || $WMF_FORCE_DEBUG_MENU == "1" ]]; then echo "Showing debug menu for ${CONFIGURATION} build." defaults write "${INFO_PLIST}" WMFShowDebugMenu -bool YES fi
Make the find command more robust. Use readlink.
#!/bin/bash DOTFILES=$(pwd) DOTFILES_OLD=$HOME/dotfiles_old # Create a list of all files (excluding git files) in the current directory. find . * -type f | grep -v "\./.\git" > dotfiles.txt # Backup all files that currently exist into the folder $DOTFILES_OLD, # while preserving the directory structure and dereferencing links. mkdir -p $DOTFILES_OLD && cd rsync -Razq --copy-links --files-from=$DOTFILES/dotfiles.txt . $DOTFILES_OLD/ # Loop over all dotfiles and create symlinks for all. while read src do if [ ! -d $(dirname $HOME/$src) ]; then mkdir -p $(dirname $HOME/$src) fi rm -f $HOME/$src ln -s $DOTFILES/$src $HOME/$src done < $DOTFILES/dotfiles.txt # Vim - Vundle git clone https://github.com/gmarik/Vundle.vim.git ~/.vim/bundle/Vundle.vim vim +VundleInstall +qall # AwesomeWM - Beautiful Themes git clone https://github.com/mikar/awesome-themes.git ~/.config/awesome/themes
#!/bin/bash DOTFILES="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" DOTFILES_OLD=$HOME/dotfiles_old DOTFILES_LIST=dotfiles.txt THIS_SCRIPT=$(basename $0) # Create a list of all files (excluding git files, this script, current directory) in the dotfiles directory. rm -f $DOTFILES_LIST find . -type d -name .git -prune -o -not -name "$THIS_SCRIPT" -not -name . -exec readlink -f '{}' \; > $DOTFILES_LIST # Backup all files that currently exist into the folder $DOTFILES_OLD, # while preserving the directory structure and dereferencing links. # rsync prints errors for files which don't exist. They can be ignored. mkdir -p $DOTFILES_OLD && cd rsync -Razq --copy-links --files-from=$DOTFILES/dotfiles.txt . $DOTFILES_OLD/ # Loop over all dotfiles and create symlinks for all. while read src do if [ ! -d $(dirname $HOME/$src) ]; then mkdir -p $(dirname $HOME/$src) fi rm -f $HOME/$src ln -s $DOTFILES/$src $HOME/$src done < $DOTFILES/$DOTFILES_LIST # Vim - Vundle git clone https://github.com/gmarik/Vundle.vim.git ~/.vim/bundle/Vundle.vim vim +VundleInstall +qall # AwesomeWM - Beautiful Themes git clone https://github.com/mikar/awesome-themes.git ~/.config/awesome/themes
Add flynn user and pass to cluster
#!/bin/bash -e REPO_URI="https://git.v6jv.flynnhub.com/ourcities-rebu-client-develop.git" if [[ "$CIRCLE_BRANCH" == "master" ]]; then REPO_URI="dokku@reboo.org:0-client" fi git fetch --unshallow origin git remote add deploy $REPO_URI GIT_SSL_NO_VERIFY=true git push -f deploy $CIRCLE_SHA1:refs/heads/master
#!/bin/bash -e REPO_URI="https://ubuntu:$FLYNN_KEY@git.v6jv.flynnhub.com/ourcities-rebu-client-develop.git" if [[ "$CIRCLE_BRANCH" == "master" ]]; then REPO_URI="dokku@reboo.org:0-client" fi git fetch --unshallow origin git remote add deploy $REPO_URI GIT_SSL_NO_VERIFY=true git push -f deploy $CIRCLE_SHA1:refs/heads/master
Use tag instead of sha to reference upstream pacakge.json
#!/usr/bin/env nix-shell #! nix-shell -i bash -p nodePackages.node2nix # Download package.json and package-lock.json from the v1.7.0 release curl https://raw.githubusercontent.com/matrix-org/matrix-appservice-slack/d589aa2e258213ec5ee61ab1d6205c4fb56d116d/package.json -o package.json curl https://raw.githubusercontent.com/matrix-org/matrix-appservice-slack/d589aa2e258213ec5ee61ab1d6205c4fb56d116d/package-lock.json -o package-lock.json node2nix \ --nodejs-12 \ --node-env ../../../development/node-packages/node-env.nix \ --development \ --input package.json \ --lock package-lock.json \ --output node-packages.nix \ --composition node-composition.nix \ rm -f package.json package-lock.json
#!/usr/bin/env nix-shell #! nix-shell -i bash -p nodePackages.node2nix # Download package.json and package-lock.json from the v1.7.0 release curl https://raw.githubusercontent.com/matrix-org/matrix-appservice-slack/1.7.0/package.json -o package.json curl https://raw.githubusercontent.com/matrix-org/matrix-appservice-slack/1.7.0/package-lock.json -o package-lock.json node2nix \ --nodejs-12 \ --node-env ../../../development/node-packages/node-env.nix \ --development \ --input package.json \ --lock package-lock.json \ --output node-packages.nix \ --composition node-composition.nix \ rm -f package.json package-lock.json
Use less verbose output for monitoring setup
#!/bin/bash # run this on the aqm-machine cd "$(dirname $(readlink -f $BASH_SOURCE))" . ../vars.sh if [ -n "$IFACE_AQM" ]; then echo "This script must be run on the AQM machine" exit 1 fi if [ -z $TMUX ]; then echo "Run this inside tmux!" exit 1 fi cmds=() cmds[0]="watch -n .2 ./show_setup.sh -vir $IFACE_CLIENTS" #cmds[1]="watch -n .2 ./show_setup.sh -vir $IFACE_SERVERA" sn="setup-$(date +%s)" i=0 for cmd in "${cmds[@]}"; do i=$(($i+1)) if [ $i -eq 1 ]; then tmux new-window -n $sn $cmd else tmux split-window -t $sn $cmd tmux select-layout -t $sn even-horizontal fi done tmux select-layout -t $sn even-horizontal tmux set-window -t $sn synchronize-panes
#!/bin/bash # run this on the aqm-machine cd "$(dirname $(readlink -f $BASH_SOURCE))" . ../vars.sh if [ -n "$IFACE_AQM" ]; then echo "This script must be run on the AQM machine" exit 1 fi if [ -z $TMUX ]; then echo "Run this inside tmux!" exit 1 fi cmds=() cmds[0]="watch -n .2 ./show_setup.sh -v $IFACE_CLIENTS" #cmds[1]="watch -n .2 ./show_setup.sh -v $IFACE_SERVERA" sn="setup-$(date +%s)" i=0 for cmd in "${cmds[@]}"; do i=$(($i+1)) if [ $i -eq 1 ]; then tmux new-window -n $sn $cmd else tmux split-window -t $sn $cmd tmux select-layout -t $sn even-horizontal fi done tmux select-layout -t $sn even-horizontal tmux set-window -t $sn synchronize-panes
Fix pylint install on Travis
#!/bin/bash if [ ${TASK} == "lint" ]; then sudo pip install pylint --user `whoami` fi if [ ${TASK} == "nosetests" ]; then # Create virtual env using system numpy and scipy deactivate virtualenv --system-site-packages testenv source testenv/bin/activate # Install dependencies sudo pip install --upgrade pip sudo pip install numpy sudo pip install scipy sudo pip install pandas sudo pip install scikit-learn # Install TensorFlow if [ ${TRAVIS_OS_NAME} == "linux" ]; then sudo pip install https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.5.0-cp27-none-linux_x86_64.whl fi if [ ${TRAVIS_OS_NAME} == "osx" ]; then sudo pip install https://storage.googleapis.com/tensorflow/mac/tensorflow-0.5.0-py2-none-any.whl fi # Install test tools sudo pip install nose # Install skflow sudo python setup.py install fi
#!/bin/bash if [ ${TASK} == "lint" ]; then sudo pip install pylint fi if [ ${TASK} == "nosetests" ]; then # Create virtual env using system numpy and scipy deactivate virtualenv --system-site-packages testenv source testenv/bin/activate # Install dependencies sudo pip install --upgrade pip sudo pip install numpy sudo pip install scipy sudo pip install pandas sudo pip install scikit-learn # Install TensorFlow if [ ${TRAVIS_OS_NAME} == "linux" ]; then sudo pip install https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.5.0-cp27-none-linux_x86_64.whl fi if [ ${TRAVIS_OS_NAME} == "osx" ]; then sudo pip install https://storage.googleapis.com/tensorflow/mac/tensorflow-0.5.0-py2-none-any.whl fi # Install test tools sudo pip install nose # Install skflow sudo python setup.py install fi
Fix dead link in CI
#!/usr/bin/env bash set -Eeuo pipefail cd "$(dirname "$(readlink -f "$BASH_SOURCE")")/.." git fetch -q https://github.com/docker-library/docs.git master numstat="$(git diff --numstat FETCH_HEAD...HEAD -- '*/README.md')" if [ -n "$numstat" ]; then echo >&2 'Error: at least one repo README.md has changed' echo >&2 'These files are autogenerated, so it is unnecessary to modify them' echo >&2 'Please update content.md and docker-library-bot will take care of README.md' echo >&2 'See: https://github.com/docker-library/docs/#image-namereadmemd' echo >&2 exit 1 fi
#!/usr/bin/env bash set -Eeuo pipefail cd "$(dirname "$(readlink -f "$BASH_SOURCE")")/.." git fetch -q https://github.com/docker-library/docs.git master numstat="$(git diff --numstat FETCH_HEAD...HEAD -- '*/README.md')" if [ -n "$numstat" ]; then echo >&2 'Error: at least one repo README.md has changed' echo >&2 'These files are autogenerated, so it is unnecessary to modify them' echo >&2 'Please update content.md and docker-library-bot will take care of README.md' echo >&2 'See: https://github.com/docker-library/docs/#readmemd' echo >&2 exit 1 fi
Add grep clause to narrow down port search to dport 8080
#!/bin/bash NEW_PORT=$1 OLD_PORTS=`iptables -t nat -S | grep "\-A OUTPUT" | sed -r 's/^.+ --to-destination :([0-9]+)$/\1/'` # Add new port to preroute if [ -n "${NEW_PORT}" ]; then echo "Adding backend port '${NEW_PORT}' to NAT redirects..." iptables -t nat -A OUTPUT -p tcp --dport 8080 -j DNAT --to-destination :${NEW_PORT} fi # Remove the old ports if [ -n "${OLD_PORTS}" ]; then while read -r OLD_PORT; do echo "Removing old backend port '${OLD_PORT}' from NAT redirects..." iptables -t nat -D OUTPUT -p tcp --dport 8080 -j DNAT --to-destination :${OLD_PORT} done <<< "${OLD_PORTS}" fi exit 0
#!/bin/bash NEW_PORT=$1 OLD_PORTS=`iptables -t nat -S | grep "\-A OUTPUT" | grep "\-\-dport 8080" | sed -r 's/^.+ --to-destination :([0-9]+)$/\1/'` # Add new port to preroute if [ -n "${NEW_PORT}" ]; then echo "Adding backend port '${NEW_PORT}' to NAT redirects..." iptables -t nat -A OUTPUT -p tcp --dport 8080 -j DNAT --to-destination :${NEW_PORT} fi # Remove the old ports if [ -n "${OLD_PORTS}" ]; then while read -r OLD_PORT; do echo "Removing old backend port '${OLD_PORT}' from NAT redirects..." iptables -t nat -D OUTPUT -p tcp --dport 8080 -j DNAT --to-destination :${OLD_PORT} done <<< "${OLD_PORTS}" fi exit 0
Add dos2unix installation & command in Windows provisioning script
#!/usr/bin/env bash source /home/vagrant/venv/bin/activate echo "Re-installing tables for Windows" pip install pip --upgrade > /dev/null pip uninstall -y tables > /dev/null pip install tables --no-binary all > /dev/null deactivate
#!/usr/bin/env bash source /home/vagrant/venv/bin/activate echo "Re-installing tables for Windows" sudo apt-get update > /dev/null sudo apt-get install -y dos2unix > /dev/null dos2unix /vagrant/vagrant_provisioning/supervisor/celery.sh dos2unix /vagrant/vagrant_provisioning/supervisor/gunicorn.sh pip install pip --upgrade > /dev/null pip uninstall -y tables > /dev/null pip install tables --no-binary all > /dev/null deactivate
Mark container as initialized after successful envtpl
#!/bin/sh set -e if [ -n "${MESOS_HOST}" ]; then if [ -z "${COLLECTD_HOST}" ]; then export COLLECTD_HOST="${MESOS_HOST}" fi fi if [ ! -e "/.initialized" ]; then touch "/.initialized" envtpl /etc/collectd/collectd.conf.tpl fi exec gosu nobody collectd -f > /dev/null
#!/bin/sh set -e if [ -n "${MESOS_HOST}" ]; then if [ -z "${COLLECTD_HOST}" ]; then export COLLECTD_HOST="${MESOS_HOST}" fi fi if [ ! -e "/.initialized" ]; then envtpl /etc/collectd/collectd.conf.tpl touch "/.initialized" fi exec gosu nobody collectd -f > /dev/null
Install script for WordPress in French
#!/bin/bash # # wp-install.sh - v1.0 (17/09/2014) # Aris Papathéodorou # Script d’installation de WordPress en français # License : domaine public # # Fonctions : # - Téléchargement de la version indiquée de WordPress en français # - Création dans /home/<user>/ d’un répertoire Web # - Copie de WordPress dans ce répertoire # - Configuration des droits et et de la propriété sur l’ensemble # # Todo : # - Création de la base de donnée # - Configuration du fichier : wp-config.php # # Utilisation : # Le script doit être copié à la racine du répertoire utilisateur # Vérifier que le fichier est bien exécutable. Sinon : chmod u+x wp-install.sh # Lancer : ./wp-install.sh # echo "Version de WordPress à installer" read wpversion echo "Téléchargement de WordPress" curl -o wordpress.tar.gz http://fr.wordpress.org/wordpress-$wpversion-fr_FR.tar.gz tar -xvzf wordpress.tar.gz rm -f wordpress.tar.gz echo "Répertoire d’installation" read htmlforder echo " " echo "Installation de WordPress" mv wordpress/ $htmlforder/ cp -p $htmlforder/wp-config-sample.php $htmlforder/wp-config.php echo "Nom de l’utilisateur (compte)" read username echo " " echo "Finalisation de l’installation" chown $username:www-data $htmlforder/ chmod 664 $htmlforder/ cd $htmlforder/ find . -exec chown $username:www-data {} + find . -type f -exec chmod 664 {} + find . -type d -exec chmod 775 {} + chmod 660 wp-config.php cd .. echo " " echo "Installation réussie de WordPress $wpversion dans $htmlforder"
Add a script to auto-detect latest build
#!/bin/bash # A script to output name of latest (successful) nightly build, given URL where to look and partial build name BUILDCACHE_URL="${1:?undefined}" # usually http://buildcache.cfengine.com/packages/testing-pr/ GREP_EXPR="${2:?undefined}" # usually jenkins-master-nightly-pipeline curl --silent "$BUILDCACHE_URL" | grep "$GREP_EXPR" | sed -r 's_.*<a href="([^"/]*)/">.*_\1_' | sort -rn | while read -r build; do # $build is something like jenkins-master-nightly-pipeline-962 # verify that it has a deb file url="$BUILDCACHE_URL/$build/PACKAGES_HUB_x86_64_linux_ubuntu_16/" if curl --silent "$url" | grep -qF '.deb</a>'; then echo "$build" break fi done
Migrate security policy after app labeling schema changed
#!/bin/sh -e export PATH=/sbin:/usr/sbin:/bin:/usr/bin . /etc/tizen-platform.conf systemctl stop security-manager.service security-manager.socket label_mapping=`mktemp` ### Fetch application label mapping sqlite3 >$label_mapping -noheader -separator ' ' $TZ_SYS_DB/.security-manager.db ' SELECT DISTINCT "User::App::" || app_name, "User::Pkg::" || pkg_name || CASE WHEN is_hybrid THEN "::App::" || app_name ELSE "" END FROM user_app_pkg_view' echo "Migrating policy for `sort -u $label_mapping | wc -l` application labels" ### Migrate Cynara policy generic_buckets="PRIVACY_MANAGER ADMIN MAIN MANIFESTS" usertype_buckets=`ls $TZ_SYS_RO_SHARE/security-manager/policy/usertype-*profile | sed -r 's|.*/usertype-(.*).profile$|USER_TYPE_\1|' | tr '[:lower:]' '[:upper:]'` policy_tmp=`mktemp` for bucket in $generic_buckets $usertype_buckets do [ "$bucket" = "PRIVACY_MANAGER" ] && bucket="" echo "Migrating Cynara bucket '$bucket'" cyad --list-policies=$bucket --all >$policy_tmp cat $label_mapping | while read app_label_old app_label_new do echo '-e s/\\b'$app_label_old'\\b/'$app_label_new'/' done | xargs sed -i $policy_tmp cyad --erase=$bucket --recursive=no --client='#' --user='#' --privilege='#' cyad --set-policy --bucket=$bucket --bulk=- <$policy_tmp done rm -f $policy_tmp ### Migrate security-manager Smack policy echo "Migrating Smack policy" cd $TZ_SYS_VAR/security-manager smackload --clear <rules-merged/rules.merged cat $label_mapping | while read app_label_old app_label_new do echo '-e s/\\b'$app_label_old'\\b/'$app_label_new'/' done | xargs sed -i rules/* `find -type f -name apps-labels` cat rules/* | tee rules-merged/rules.merged | smackload systemctl start security-manager.service security-manager.socket echo "Migration successful" rm -f $label_mapping
Add variant of cp-diffed-configs.sh that takes arbitrary command
for i in $(grep -i '#\s*\(place\|put\) in' . -r | sed 's/^\([^:]*\):\s*#\s*[Pp]\(lace\|ut\)\s*in\s*\([^[:space:]]*\).*/\1:\3/') do git_path=$(echo $i | sed 's/\([^:]*\):\(.*\)/\1/') etc_path=$(echo $i | sed 's/\([^:]*\):\(.*\)/\2/')$(basename $git_path) diff $git_path $etc_path >/dev/null is_diff=$? if [ $is_diff -ne 0 ] then echo $1 $etc_path $git_path $1 $etc_path $git_path fi done
Add a very simple test runner.
#!/bin/bash # Copyright (C) 2017 Google Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except # in compliance with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License # is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing permissions and limitations under # the License. set -o nounset set -o errexit # Check that required variables are explicitly set. GOOGLE_API_KEY="$GOOGLE_API_KEY" GOOGLE_APPLICATION_CREDENTIALS="$GOOGLE_APPLICATION_CREDENTIALS" echo -e "\n\n\nRunning unit tests" mvn test javadoc:javadoc
Add helper script for building reckon gem
#!/bin/bash set -e VERSION=$1 echo "Install github_changelog_generator" gem install --user github_changelog_generator echo "Update 'lib/reckon/version.rb'" echo -e "module Reckon\n VERSION=\"$VERSION\"\nend" > lib/reckon/version.rb echo "Run `bundle install` to build updated Gemfile.lock" bundle install echo "3. Run changelog generator (requires $TOKEN to be your github token)" github_changelog_generator -u cantino -p reckon -t $TOKEN --future-release $VERSION echo "4. Commit changes" git add CHANGELOG.md lib/reckon/version.rb Gemfile.lock git commit -m "Release $VERSION" echo "7. Build new gem" gem build reckon.gemspec echo "5. Tag release" git tag v$VERSION echo "Push changes and tags" git push && git push --tags echo "Push new gem" gem push reckon-$VERSION.gem
Add a script to build the demo
#!/bin/bash echo "-> Build demo" mvn clean package echo "-> Copy built demo" rm -rf docs/* cp -r target/vue-gwt-demo-*/VueGwtDemo docs/ cp target/vue-gwt-demo-*/index.html docs/ git add docs echo "Success!"
Add dummy example for doing a ROM build
# # Some compilation examples for ROM strings/objects # set -e # Run dist manually, ROM support is not enabled by default so add --rom-support make clean python util/make_dist.py \ --rom-support \ --minify closure # Run genconfig.py and create a custom duk_config.h with ROM support etc. python config/genconfig.py \ --metadata config \ --output dist/src/duk_config.h \ -DDUK_USE_ROM_STRINGS \ -DDUK_USE_ROM_OBJECTS \ -DDUK_USE_ROM_GLOBAL_INHERIT \ -DDUK_USE_DEBUG -DDUK_USE_DPRINT -DDUK_USE_ASSERTIONS \ autodetect-header cp dist/src/duk_config.h dist/src-separate/ #gcc -std=c99 -Wall -Wextra -Os -Idist/src-separate/ -Idist/examples/cmdline dist/src-separate/*.c dist/examples/cmdline/duk_cmdline.c -o _duk -lm make duk dukd # Ajduk depends on 'make ajduk' and uses DUK_OPT_xxx feature options. # This would ideally be done directly using genconfig.py without # --support-feature-options by moving the options into a genconfig # YAML config file. python config/genconfig.py \ --metadata config \ --output dist/src/duk_config.h \ -DDUK_USE_ROM_STRINGS \ -DDUK_USE_ROM_OBJECTS \ -DDUK_USE_ROM_GLOBAL_INHERIT \ --support-feature-options \ autodetect-header cp dist/src/duk_config.h dist/src-separate/ #gcc -std=c99 -Wall -Wextra -Os -Idist/src-separate/ -Idist/examples/cmdline dist/src-separate/*.c dist/examples/cmdline/duk_cmdline.c -o _duk -lm make ajduk
Add R source package to the zip file.
#!/bin/bash # # Prepares distribution package which can be uploaded into s3 # Called by 'gradle makeH2oDevDist' # set -e set -x # Set common directory variables. TOPDIR=$(cd `dirname $0` && pwd) IMAGEDIR=${TOPDIR}/h2o-dist/tmp/h2o-dev-${PROJECT_VERSION} # Create target dir, which is uploaded to s3. cd $TOPDIR rm -fr target mkdir target rm -fr h2o-dist/tmp mkdir -p $IMAGEDIR cp build/h2o.jar $IMAGEDIR cd $IMAGEDIR/.. zip -r h2o-dev-${PROJECT_VERSION}.zip h2o-dev-${PROJECT_VERSION} mv h2o-dev-${PROJECT_VERSION}.zip ${TOPDIR}/target cd $TOPDIR # Create index file. cat h2o-dist/index.html | sed -e "s/SUBST_PROJECT_VERSION/${PROJECT_VERSION}/g" > target/index.html
#!/bin/bash # # Prepares distribution package which can be uploaded into s3 # Called by 'gradle makeH2oDevDist' # set -e set -x # Set common directory variables. TOPDIR=$(cd `dirname $0` && pwd) IMAGEDIR=${TOPDIR}/h2o-dist/tmp/h2o-dev-${PROJECT_VERSION} # Create target dir, which is uploaded to s3. cd $TOPDIR rm -fr target mkdir target rm -fr h2o-dist/tmp mkdir -p $IMAGEDIR cp build/h2o.jar $IMAGEDIR mkdir $IMAGEDIR/R cp h2o-r/R/src/contrib/h2o_${PROJECT_VERSION}.tar.gz $IMAGEDIR/R cd $IMAGEDIR/.. zip -r h2o-dev-${PROJECT_VERSION}.zip h2o-dev-${PROJECT_VERSION} mv h2o-dev-${PROJECT_VERSION}.zip ${TOPDIR}/target cd $TOPDIR # Create index file. cat h2o-dist/index.html | sed -e "s/SUBST_PROJECT_VERSION/${PROJECT_VERSION}/g" > target/index.html
Add quantum global restart script to utils
#!/usr/bin/env bash salt netcontrol\* service.restart quantum-server salt netcontrol\* service.restart quantum-l3-agent salt netcontrol\* service.restart quantum-plugin-openvswitch-agent salt netcontrol\* service.restart quantum-dhcp-agent salt netcontrol\* service.restart quantum-metadata-agent salt compute\* service.restart quantum-plugin-openvswitch-agent
Add build scripts. Set ignore property for all the generated packages.dita files.
#!/bin/sh -e ################################################################################ ## ## Licensed to the Apache Software Foundation (ASF) under one or more ## contributor license agreements. See the NOTICE file distributed with ## this work for additional information regarding copyright ownership. ## The ASF licenses this file to You under the Apache License, Version 2.0 ## (the "License"); you may not use this file except in compliance with ## the License. You may obtain a copy of the License at ## ## http://www.apache.org/licenses/LICENSE-2.0 ## ## Unless required by applicable law or agreed to in writing, software ## distributed under the License is distributed on an "AS IS" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ## See the License for the specific language governing permissions and ## limitations under the License. ## ################################################################################ # These commands should be executed in the root directory. cd .. # find the line with the format 'Last Changed Rev: 1354196', and remove # 'Last Changed Rev: ' from the beginning of the line BUILD_NUMBER=`svn info | grep 'Last Changed Rev:' | sed 's/^Last Changed Rev: //'` echo BUILD_NUMBER is $BUILD_NUMBER # Tag the release build. Can svn delete the tag if the build is bad or pulled. TAG_NAME="apache-flex-sdk-4.8.0-RC1" #svn copy -r $BUILD_NUMBER -m "Tagging build $BUILD_NUMBER." \ # https://svn.apache.org/repos/asf/incubator/flex/trunk \ # https://svn.apache.org/repos/asf/incubator/flex/tags/$TAG_NAME # Do a release build. # Set the build number in flex-sdk-description.xml # Don't prompt for optional packages or acknowledgment of reciprocal licenses ant -Dbuild.number=$BUILD_NUMBER -Dbuild.noprompt= release # Build the asdoc package. ant -Dbuild.number=$BUILD_NUMBER asdoc-package # sign_and_hash.sh is an Apache tool. # Creates detached ascii signatures and md5 hashes for each of the files in the # current directory. # Assumes that you have a pgp id and keypair set up and prompts for the # passphrase for each signature created. # cd out ../build/sign_and_hash.sh
Add a script that sets up RabbitMQ environment (vhost, user, permissions)
#!/bin/sh rabbitmqctl add_vhost "travisci.development" rabbitmqctl add_user travisci_server travisci_server_password rabbitmqctl set_permissions -p "travisci.development" travisci_server ".*" ".*" ".*" rabbitmqctl set_permissions -p "travisci.development" guest ".*" ".*" ".*"
Add example content link script.
#!/bin/sh # EXAMPLE. To be replaced. OBJECT_STORES_DIR=~/progs/ITKExamplesExternalData md5=$(md5sum $1 | cut -d ' ' -f 1) echo $md5 > $1.md5 cp $1 $OBJECT_STORES_DIR/MD5/$md5 rm $1
Add test script for identifying Linux/Unix OS
#!/usr/bin/env bash # Gather kernel version for evaluation REL=$(uname -rv) # Test for Ubuntu if [[ $REL == *"Ubuntu"* ]]; then echo "Ubuntu" fi # Test for macOS if [[ $REL == *"Darwin"* ]]; then echo "macOS" fi # Test for Fedora if [[ $REL == *"fc"* ]]; then echo "Fedora" fi
Add a util that will help fix ubuntus network after openstack is cleared
#!/bin/bash -x ETH_SRC="eth0" echo "Clearing your network up." if [[ -n `brctl show | grep -i br100` ]] then echo "Clearing br100 and making $ETH_SRC be the real interface." #sudo ifconfig $ETH_SRC down #sudo ifconfig br100 down #sudo brctl delif br100 $ETH_SRC #sudo brctl delbr br100 fi if [[ -n `brctl show | grep -i virbr0` ]] then echo "Removing virbr0" sudo ifconfig virbr0 down sudo brctl delbr virbr0 fi if [[ -z `grep "iface $ETH_SRC" /etc/network/interfaces` ]] then echo "Readjusting /etc/network/interfaces to have DHCP on for $ETH_SRC" sudo cat > /etc/network/interfaces <<EOF auto $ETH_SRC iface $ETH_SRC inet dhcp EOF cat /etc/network/interfaces fi echo "Bringing back up $ETH_SRC" sudo ifup $ETH_SRC
Add script to check if urnning.
#!/bin/bash RESTART="sudo killall mpg321; cd /home/pi/Code/phone-booth; sudo nohup node index.js > ~/phone.log" PGREP="pgrep" NODE="node" # find httpd pid $PGREP ${NODE} if [ $? -ne 0 ] # if node not running then # restart service $RESTART fi
Add a script for renaming the host
#! bash # host_rename.sh - Rename host # # Copyright (c) 2016 Elisha Kendagor kosistudio@live.com # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. new_name=$1 old_name=$(hostname) usage() { echo "usage: $0 <new_name>" exit 1 } [ "$1" ] || usage checkfail() { if [ $1 -gt 0 ]; then echo $2 exit 1 fi return } sed --version checkfail $? "Couldn't execute sed" for file in \ /etc/hostname \ /etc/hosts \ /etc/ssh/ssh_host_rsa_key.pub \ /etc/ssh/ssh_host_dsa_key.pub \ /etc/ssh/ssh_host_ed25519_key.pub \ /etc/ssh/ssh_host_ecdsa_key.pub; do if [ -f $file ]; then sed -i.old -e "s:$old_name:$new_name:g" $file checkfail $? "Patching failed: $file" fi done # Recreate the self-signed certificate created by the ssl-cert package using the hostname currently configured on your computer make-ssl-cert generate-default-snakeoil --force-overwrite echo "You need to restart your system"
Update ceph repo in overcloud img disk
#Install packages sudo yum install libguestfs-tools # Upload updated ceph repo to img guestfish -i -a overcloud-full.qcow2 upload /etc/yum.repos.d/ceph.repo /etc/yum.repos.d/ceph.repo # update packages in image virt-customize -a overcloud-full.qcow2 --update # Or if ceph is not there #virt-customize -a overcloud-full.qcow2 --install ceph
Add script to collect gops pprof-heap from all pods
#!/bin/bash # # Copyright 2018 Authors of Cilium # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -x trap cleanup EXIT TMPDIR=cilium-heap-$(date -u '+%Y%m%d-%H%M%S') mkdir -p $TMPDIR function cleanup { rm -rf $TMPDIR } pods=$(kubectl -n kube-system get pods -l k8s-app=cilium | awk '{print $1}' | grep cilium) IFS=$'\r\n' for p in $pods; do PROFILE=$(kubectl -n kube-system exec -ti $p -- gops pprof-heap 1) PROFILE=$(echo $PROFILE | awk '{print $5}') kubectl cp kube-system/$p:$PROFILE $TMPDIR/${p}_$(basename $PROFILE) done zip -r ${TMPDIR}.zip $TMPDIR
Add a script to fix mtimes of checked out files
#!/bin/bash # Copyright (C) 2014 Craig Phillips. All rights reserved. git_mtime_fix=$(readlink -f "$BASH_SOURCE") function usage() { cat <<USAGE Usage: ${git_mtime_fix%/*} [options] Summary: When Jenkins Git-Plugin checks out files from a Git repository after pulling changes, it has the side effect of updating the file modification timestamps such that they are all the current system time. This affects how GNU make is then able to resolve dependencies. This script runs over each file known to the Git repository and sets the modification time to the author commit time of each file. Options: -C --chdir <PATH> Change directory before running. -v --verbose Set verbose output. USAGE } function err() { echo >&2 "${git_mtime_fix%/*}: $*" exit 1 } function git_mtime() { local sha1 sha1=$(git rev-list --max-count=1 HEAD "$1") && git show --pretty=format:%at --abbrev-commit $sha1 | head -1 } exec 5>/dev/null while (( $# > 0 )) ; do case $1 in (-\?|--help) usage exit 0 ;; (-C|--chdir) cd "$2" shift ;; (-v|--verbose) exec 5>&2 ;; (*) err "Invalid argument: $1" ;; esac shift done git ls-files | while read f ; do printf >&5 "%2s %s" " -" "$f" if [[ -f "$f" ]] ; then stat=$(git status -s "$f" | cut -c -2) if [[ ! $stat ]] ; then git_mtime=$(git_mtime "$f") && if [[ $git_mtime ]] ; then loc_mtime=$(stat -c %Y "$f") || loc_time=0 if (( git_mtime == loc_mtime )) ; then printf >&5 "\033[2K\r" continue else touch -m -d "$(date -d @$git_mtime)" "$f" && if (( git_mtime > loc_mtime )) ; then stat=" T" else stat=" t" fi fi fi fi else stat=" D" fi printf >&5 "\r%2s %s\n" "${stat:-ER}" "$f" done
Add job script for consensus calling.
#!/bin/bash #$ -q 1-day #$ -cwd . ~/.bash_profile > /dev/null sample=$1 datadir=$2 outdir=$3 for i in 1 2 3 4; do concall=$datadir/$sample.call_n$i.snv_AF.txt printf "#chr\tpos\tref\talt\tsnv_AF\n" > $concall tail -qn+2 $datadir/$sample.{mutect,somaticsniper,strelka,varscan}.snv_AF.txt \ |cut -f-5 \ |q -t "SELECT c1, c2, c3, c4, c5, count(*) FROM - GROUP BY c1, c2, c3, c4, c5" \ |awk -v call_n=$i '$6 == call_n' \ |cut -f-5 >> $concall done call_n4=$datadir/$sample.call_n4.snv_AF.txt cutoff=$outdir/$sample.snv_call_n4_${af/0./}AFcutoff.txt printf "#chr\tpos\tref\talt\tsnv_AF\n" > $cutoff tail -n+2 $call_n4 \ |awk -v af=$af '$5 >= af' >> $cutoff
Add script to change git repo's author name
#!/bin/sh git filter-branch --env-filter ' OLD_EMAIL=$1 CORRECT_NAME=$2 CORRECT_EMAIL=$3 if [ "$GIT_COMMITTER_EMAIL" = "$OLD_EMAIL" ] then export GIT_COMMITTER_NAME="$CORRECT_NAME" export GIT_COMMITTER_EMAIL="$CORRECT_EMAIL" fi if [ "$GIT_AUTHOR_EMAIL" = "$OLD_EMAIL" ] then export GIT_AUTHOR_NAME="$CORRECT_NAME" export GIT_AUTHOR_EMAIL="$CORRECT_EMAIL" fi ' --tag-name-filter cat -- --branches --tags
Add cron job script for republishing local plex server.
#! /bin/bash # Cron job script for republishing local plex server. RESPONSE=$(curl -X GET http://localhost:32400/myplex/account ) if [[ $RESPONSE != *"publicPort"* ]] then curl -v -X PUT http://localhost:32400/:/prefs?PublishServerOnPlexOnlineKey=true fi
Add a script to initialize quickly and easily the project.
#!/bin/bash # Use this script to setup the Xcode project # Remove existing project file if any rm -r Riot.xcodeproj # Generate project file xcodegen # Use appropriated dependencies # Check if Podfile changed in unstaged git diff --exit-code --quiet --name-only Podfile PODFILE_HAS_CHANGED_UNSTAGED=$? # Check if Podfile changed in staged git diff --staged --exit-code --quiet --name-only Podfile PODFILE_HAS_CHANGED_STAGED=$? # If Podfile has changed locally do not modify it # otherwise use the appropriated dependencies according to the current branch if [[ "$PODFILE_HAS_CHANGED_UNSTAGED" -eq 1 || "$PODFILE_HAS_CHANGED_STAGED" -eq 1 ]]; then echo "Podfile has been changed locally do not modify it" else echo "Podfile has not been changed locally, use appropriated dependencies according to the current branch" bundle exec fastlane point_dependencies_to_same_feature fi # Install dependencies pod install
Add script to clone common openstack repos
#!/bin/bash # Clone my common OpenStack projects git clone https://github.com/openstack/interop.git git clone https://github.com/openstack/refstack.git git clone https://github.com/openstack/refstack-client.git git clone https://github.com/openstack/tempest.git git clone https://github.com/openstack/openstack-ansible.git git clone https://github.com/openstack/openstack-ansible-os_tempest.git git clone https://github.com/openstack-infra/project-config.git git clone https://github.com/dlux/InstallScripts.git
Add script to publish docker packages
VERSION=$(awk '/^version: / { print $2 };' < postgres-websockets.cabal) docker tag diogob/postgres-websockets:latest diogob/postgres-websockets:$VERSION docker push diogob/postgres-websockets docker push diogob/postgres-websockets:$VERSION docker tag diogob/postgres-websockets docker.pkg.github.com/diogob/postgres-websockets/postgres-websockets:$VERSION docker push docker.pkg.github.com/diogob/postgres-websockets/postgres-websockets docker tag diogob/postgres-websockets docker.pkg.github.com/diogob/postgres-websockets/postgres-websockets:latest docker push docker.pkg.github.com/diogob/postgres-websockets/postgres-websockets docker push docker.pkg.github.com/diogob/postgres-websockets/postgres-websockets:$VERSION
Make updating all go packages easier
#!/usr/bin/env bash updater="$(dirname "$(readlink -f "$0")")/update-go-package.sh" # Find the top git level while ! [ -d "pkgs/top-level" ]; do cd .. done exec "$updater" $(cat pkgs/top-level/all-packages.nix | grep pkgs.goPackages | awk -F. '{print $3}' | tr '\n' ' ')
Correct symlink creation of Mercurial
#!/bin/sh # Package PACKAGE="mercurial" DNAME="Mercurial" # Others INSTALL_DIR="/usr/local/${PACKAGE}" PYTHON_DIR="/usr/local/python" VIRTUALENV="${PYTHON_DIR}/bin/virtualenv" PATH="${INSTALL_DIR}/env/bin:${INSTALL_DIR}/bin:${PYTHON_DIR}/bin:${PATH}" preinst () { exit 0 } postinst () { # Link ln -s ${SYNOPKG_PKGDEST} ${INSTALL_DIR} # Create a Python virtualenv ${VIRTUALENV} --system-site-packages ${INSTALL_DIR}/env > /dev/null # Install the wheels ${INSTALL_DIR}/env/bin/pip install --use-wheel --no-deps --no-index -U --force-reinstall -f ${INSTALL_DIR}/share/wheelhouse -r ${INSTALL_DIR}/share/wheelhouse/requirements.txt > /dev/null 2>&1 # Add symlink ln -s ${INSTALL_DIR}/env/bin/hg /usr/local/bin/hg exit 0 } preuninst () { exit 0 } postuninst () { # Remove link rm -f ${INSTALL_DIR} rm -f /usr/local/bin/hg exit 0 } preupgrade () { exit 0 } postupgrade () { exit 0 }
#!/bin/sh # Package PACKAGE="mercurial" DNAME="Mercurial" # Others INSTALL_DIR="/usr/local/${PACKAGE}" PYTHON_DIR="/usr/local/python" VIRTUALENV="${PYTHON_DIR}/bin/virtualenv" PATH="${INSTALL_DIR}/env/bin:${INSTALL_DIR}/bin:${PYTHON_DIR}/bin:${PATH}" preinst () { exit 0 } postinst () { # Link ln -s ${SYNOPKG_PKGDEST} ${INSTALL_DIR} # Create a Python virtualenv ${VIRTUALENV} --system-site-packages ${INSTALL_DIR}/env > /dev/null # Install the wheels ${INSTALL_DIR}/env/bin/pip install --use-wheel --no-deps --no-index -U --force-reinstall -f ${INSTALL_DIR}/share/wheelhouse -r ${INSTALL_DIR}/share/wheelhouse/requirements.txt > /dev/null 2>&1 # Add symlink mkdir -p /usr/local/bin ln -s ${INSTALL_DIR}/env/bin/hg /usr/local/bin/hg exit 0 } preuninst () { exit 0 } postuninst () { # Remove link rm -f ${INSTALL_DIR} rm -f /usr/local/bin/hg exit 0 } preupgrade () { exit 0 } postupgrade () { exit 0 }
Add telemetry harness generation script.
#!/bin/bash # Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # This is a script meant to be run by a bot to periodically release new versions # of the telemetry harness. It needs to be run from one level above src/ (such # as build/). src/tools/telemetry/find_dependencies \ src/tools/perf/run_benchmark \ src/tools/perf/run_measurement \ src/tools/perf/record_wpr \ src/content/test/gpu/run_gpu_test.py \ -z $1
Create a launch instance script
#!/bin/bash # # Set up environment for mobile-push microservice # # ARGV # if [[ -z "$1" ]]; then echo "usage: $0 <region>" exit 1 fi region=$1 # # utilities # err() { echo "[$(date +'%Y-%m-%dT%H:%M:%Sz')]: $@" >&2 } # # apt # apt-get update apt-get install -y supervisor git make build-essential python-dev ntp # # install AWS CodeDeploy agent # apt-get install -y python-pip ruby2.0 pip install awscli cd /home/ubuntu aws s3 cp s3://aws-codedeploy-$region/latest/install . --region $region chmod +x ./install ./install auto rm ./install # # set up code directory structure # mkdir -p /srv/news-feed-writer/release mkdir -p /srv/news-feed-writer/share chown -R ubuntu:ubuntu /srv/news-feed-writer mkdir -p /srv/news-feed-writer-staging/release mkdir -p /srv/news-feed-writer-staging/share chown -R ubuntu:ubuntu /srv/news-feed-writer-staging # # supervisor # cp ./news-feed-writer.conf /etc/supervisor/conf.d/ cp ./news-feed-writer-staging.conf /etc/supervisor/conf.d/
Add script that can create screenshot for Xresources 'theme' for webpage.
#!/usr/bin/env bash ## # Script used to create a screenshot of rofi. # License: See rofi ## RESET="\e[0m" BOLD="\e[1m" COLOR_BLACK="\e[0;30m" COLOR_RED="\e[0;31m" COLOR_GREEN="\e[0;32m" COLOR_YELLOW="\e[0;33m" COLOR_BLUE="\e[0;34m" XRDB_FILE=$1 shift XVFB=$(which Xvfb 2> /dev/null) XDOTOOL=$(which xdotool 2> /dev/null) XRDB=$(which xrdb 2> /dev/null) ROFI=$(which rofi 2> /dev/null) function check_tool() { if [ -z "${1}" ] then echo -e "${COLOR_RED}Failed to find:${RESET} $2" exit 1 fi } XPID= function create_fake_x ( ) { export DISPLAY=":$1" echo "Starting fake X: ${DISPLAY}" ${XVFB} ${DISPLAY} -screen 0 1024x768x24 & XPID=$! sleep 1 } function destroy_fake_x ( ) { if [ -n "${XPID}" ] then echo "Stopping fake X: ${XPID}" kill ${XPID} wait ${XPID} fi } function generate() { echo "Normal" echo "Alternative" echo "Urgent" echo "Urgent alternative" echo "Active" echo "Active alternative" echo "Normal selected" } # Check required tools check_tool "${XVFB}" "Xvfb (X on virtual framebuffer)" check_tool "${XDOTOOL}" "commandline X11 automation tool" check_tool "${XRDB}" "X server resource database utility" check_tool "${ROFI}" "Rofi, the tool we are screenshotting" # Create random display number VDISPLAY=${RANDOM} let "VDISPLAY %= 20" VDISPLAY=$((VDISPLAY+100)) echo "Xvfb: ${XVFB}" echo "Xresources: ${XRDB_FILE}" echo "Xvfb Display: ${VDISPLAY}" ROFI_OPTIONS="-selected-row 6 -u 2,3 -a 4,5" export DISPLAY=${VDISPLAY} # Create fake X11 create_fake_x ${VDISPLAY} # Load Xresources if specified. if [ -n "${XRDB_FILE}" ] then echo -e "${COLOR_YELLOW}Loading Xresources:${RESET} ${XRDB_FILE}" ${XRDB} -retain -load ${XRDB_FILE} fi (generate | ${ROFI} -dmenu ${ROFI_OPTIONS} > /dev/null )& sleep 1 ${XDOTOOL} key Alt+S ${XDOTOOL} key Return sleep 2 destroy_fake_x
Add utility to compare JSON from two database's.
#!/bin/bash -e # check_json.sh <database a> <database b> # # Tool for comparing database JSON outputs from two database's. DIR_A=$1 DIR_B=$2 for A_JSON_IN in $( ls ${DIR_A}/*.json ); do A_JSON_OUT="$(mktemp)_a" B_JSON_OUT="$(mktemp)_b" B_JSON_IN="${DIR_B}/$(basename ${A_JSON_IN})" if [ ! -f "${B_JSON_IN}" ]; then echo "${B_JSON_IN} not found!" continue fi python3 -m utils.xjson ${A_JSON_IN} > ${A_JSON_OUT} python3 -m utils.xjson ${B_JSON_IN} > ${B_JSON_OUT} echo "Comparing $(basename ${A_JSON_IN})" diff -U 3 ${A_JSON_OUT} ${B_JSON_OUT} || true done
Add script to easily reset Keycloak github provider client ID and secret to enable rep
#!/usr/bin/env bash usage() { echo "Usage: $(basename $0) <KEYCLOAK_URL>" exit 1 } KEYCLOAK_URL=${1} if [ -z "${KEYCLOAK_URL}" ]; then usage fi if ! hash jq 2>/dev/null; then echo "This script requires \`jq\` - please see https://stedolan.github.io/jq/download/ or run \`brew install jq\` if you're on OS X" exit 1 fi KC_TOKEN=$(curl "${KEYCLOAK_URL}/auth/realms/master/protocol/openid-connect/token" \ -d "client_id=admin-cli" \ -d "username=$(oc get secrets -ojsonpath={.data.username} syndesis-keycloak-admin|base64 -d)" \ -d "password=$(oc get secrets -ojsonpath={.data.password} syndesis-keycloak-admin|base64 -d)" \ -d "grant_type=password" \ -fsSLk | \ jq -r .access_token) KC_GITHUB_IDP=$(curl "${KEYCLOAK_URL}/auth/admin/realms/syndesis/identity-provider/instances/github" \ -fsSLk \ -k -vvv \ -H "Authorization: Bearer ${KC_TOKEN}" | \ sed -e 's/"clientId":"[a-zA-Z0-9]\+",/"clientId":"dummy",/' -e 's/"clientSecret":"\*\+",/"clientSecret":"dummy",/') curl "${KEYCLOAK_URL}/auth/admin/realms/syndesis/identity-provider/instances/github" \ -XPUT \ -fsSLk \ -k -vvv \ -H "Authorization: Bearer ${KC_TOKEN}" \ -H "Content-Type: application/json" \ -d "${KC_GITHUB_IDP}"
Add example with eps 0.1
#!/bin/bash ./bin/HFO --offense-agents=2 --defense-npcs=3 --offense-npcs=1 --trials 20 --headless & sleep 5 # -x is needed to skip first line - otherwise whatever default python version is will run python2.7 -x ./example/high_level_custom_agent.py --eps 0.2 --numTeammates=2 --numOpponents=3 --port 6000 &> agent1.txt & sleep 5 python3 -x ./example/high_level_custom_agent.py --eps 0.2 --numTeammates=2 --numOpponents=3 --port 6000 &> agent2.txt & # The magic line # $$ holds the PID for this script # Negation means kill by process group id instead of PID trap "kill -TERM -$$" SIGINT wait
Add script to add HD output mode to HDMI1
#!/bin/sh xrandr --newmode "1920x1080" 141.50 1920 2032 2232 2544 1080 1083 1088 1114 -hsync +vsync xrandr --addmode HDMI1 1920x1080
Add Travis check for duplicate includes
#!/bin/bash # # Copyright (c) 2018 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Check for duplicate includes. filter_suffix() { git ls-files | grep -E "^src/.*\.${1}"'$' | grep -Ev "/(leveldb|secp256k1|univalue)/" } EXIT_CODE=0 for HEADER_FILE in $(filter_suffix h); do DUPLICATE_INCLUDES_IN_HEADER_FILE=$(grep -E "^#include " < "${HEADER_FILE}" | sort | uniq -d) if [[ ${DUPLICATE_INCLUDES_IN_HEADER_FILE} != "" ]]; then echo "Duplicate include(s) in ${HEADER_FILE}:" echo "${DUPLICATE_INCLUDES_IN_HEADER_FILE}" echo EXIT_CODE=1 fi CPP_FILE=${HEADER_FILE/%\.h/.cpp} if [[ ! -e $CPP_FILE ]]; then continue fi DUPLICATE_INCLUDES_IN_HEADER_AND_CPP_FILES=$(grep -hE "^#include " <(sort -u < "${HEADER_FILE}") <(sort -u < "${CPP_FILE}") | grep -E "^#include " | sort | uniq -d) if [[ ${DUPLICATE_INCLUDES_IN_HEADER_AND_CPP_FILES} != "" ]]; then echo "Include(s) from ${HEADER_FILE} duplicated in ${CPP_FILE}:" echo "${DUPLICATE_INCLUDES_IN_HEADER_AND_CPP_FILES}" echo EXIT_CODE=1 fi done for CPP_FILE in $(filter_suffix cpp); do DUPLICATE_INCLUDES_IN_CPP_FILE=$(grep -E "^#include " < "${CPP_FILE}" | sort | uniq -d) if [[ ${DUPLICATE_INCLUDES_IN_CPP_FILE} != "" ]]; then echo "Duplicate include(s) in ${CPP_FILE}:" echo "${DUPLICATE_INCLUDES_IN_CPP_FILE}" echo EXIT_CODE=1 fi done exit ${EXIT_CODE}
Add steps to setup ssh client.
# Description: Steps to setup an SSH Client # Steps for setting up an SSH client are as follows # 1. Generate a key pair using ssh-keygen. # 2. Setup local host (which runs SSH Client). # 3. Setup remote host (which runs SSH Server). # Step 1: SSH Key Generation # ========================== # 1. Key generation is a one time activity which generates 2 files containing a private key and a public key. # 2. Two files will be generated - key-file containing a private key & key-file.pub containing a public key. ssh-keygen -t rsa -b 4096 -f key-file # Generate 4096 bit RSA key pairs. Check ssh-keygen.sh for details. ssh-keygen -p -f key-file # Change the passphrase for an existing file. This can be used to remove the # passphrase too. ssh-keygen -y -f private-key-file # Generate PUBLIC key from private key. # Step 2: Setup Local Host (which runs SSH Client) # ================================================ # 1. Under home directory on the local host, setup .ssh directory and a .ssh/config file with appropriate # permissions. # 2. Create the content for .ssh/config using the template file ssh_setup_config. # 3. Alias ssh to 'ssh -2' in the bash profile file and make it more secure. mkdir ~/.ssh touch ~/.ssh/config chmod 700 ~/.ssh chmod 600 ~/.ssh/* # Step 3: Setup remote host (which runs SSH Server) # ================================================= # 1. Under home directory on the remote machine, setup .ssh directory and a .ssh/authorized_keys file with # appropriate permissions. # 2. Add the content of the public key (key-file.pub) to the file ~/.ssh/authorized_keys mkdir ~/.ssh touch ~/.ssh/authorized_keys chmod 700 ~/.ssh chmod 600 ~/.ssh/* # Sometime the permission should be 644 for the authorized_key. # Example Digital Ocean. # Cool Tricks # None # TODO # None
Add script to dump data for DB Structure evolution
#!/bin/sh mysqldump -u ***_USER_*** -p***_PASSWD_*** ***_DATABASE_*** --no-create-info --complete-insert > DATA.MY-GRIMDAWN-GEAR.SQL
Add the beginnings of a idempotent script to work with quantum bridges
#!/usr/bin/env bash set -e # This script adds the necessary bridges for quantum/neutron to use vlan network # type. This script is meant to be called by salt. # example: ./setup_bridges -p br-osgn -pb eth2 -i br-int integration_bridge=br-int physical_bridge= physical_port= function usage { cat <<EOF USAGE: $0 options This script adds bridges to openvswitch for quantum to use. -p Physical Bridge Name -pb Physical Bridged Port -i Integration Bridge Name (default: br-int) -h Show this message EOF } function load_bridges { ovs-vsctl list-br } function bridge_exist { local bridges="`load_bridges`" if [ -z "echo $bridges | grep $1" ]; then return 0 else return 1 fi } function create_bridge { ovs-vsctl add-br $1 } while getopts "p:pb:i:h" opt; do case $opt in p) physical_bridge=$OPTARG ;; pb) physical_port=$OPTARG ;; i) integration_bridge=$OPTARG ;; h) usage; exit 0; ;; esac done for br in $physical_bridge $integration_bridge; do if [ $(bridge_exist $br) -eq 0 ]; then create_bridge $br fi done if [ $(bridge_exist $physical_bridge) -eq 1 ]; then ovs-vsctl port-add $physical_bridge $physical_port fi
Add the nodejs helper script which has npm_shrinkwrap_clean
#!/bin/bash nodejs_helper_error() { echo $@ >&2 } # Copy file $1 to directory $2. If a file with the same name already exists in $2 it is renamed. nodejs_helper_copy_with_backup() { [ ! -f "$1" ] && nodejs_helper_error "Argument is missing or is not a file: $1" [ ! -d "$2" ] && nodejs_helper_error "Argument is missing or is not a directory: $2" set -x SRC="$1" DEST_DIR="$2" FILENAME=$(basename "$1") TARGET="${DEST_DIR}/${FILENAME}" set +x if [ -e "$TARGET" ]; then echo "Moving existing ${TARGET} to ${TARGET}.1" mv "$TARGET" "$TARGET.1" fi cp "$SRC" "$TARGET" && return 0 return 1 } nodejs_helper_cleanup() { popd &>/dev/null if [[ "$TMPDIR" =~ "/tmp" ]]; then # echo "Clean temp dir $TMPDIR" #rm -rf "$TMPDIR" fi } # Usage: npm_shrinkwrap_clean path/to/package.json # # Helper to generate a pristine npm-shrinkwrap.json file. # # Given a package.json, performs an npm install in a temp directory bypassing the npm cache, # then generates a npm-shrinkwrap file to the same location as the provided package.json. # Any preexisting npm-shrinkwrap.json will be renamed to `npm-shrinkwrap.json.1`. # npm_shrinkwrap_clean() { FILE="$1" DEST=$(cd $(dirname "$FILE") && pwd) [ ! -z "$FILE" ] || { nodejs_helper_error "Usage: shrinkwrap_clean /path/to/package.json" && return 1 ; } [ -f "$FILE" ] || { nodejs_helper_error "File does not exist: $FILE" && return 1 ; } TMPDIR=$(mktemp -d) cp "$FILE" "$TMPDIR" pushd "$TMPDIR" &>/dev/null trap nodejs_helper_cleanup RETURN # restore original dir before exiting if npm cache clear \ && npm install \ && npm shrinkwrap \ && nodejs_helper_copy_with_backup "${TMPDIR}/npm-shrinkwrap.json" "${DEST}" then echo "Wrote ${DEST}/npm-shrinkwrap.json" else # Hosed nodejs_helper_error "^^^ Failed :(" return 1 fi }
Add Artistic Style formatting script.
#!/bin/sh # Requires Artistic Style 3.1 # http://astyle.sourceforge.net/ astyle --suffix=none \ --style=java \ --indent=tab \ --indent-switches \ --attach-closing-while \ --pad-oper \ --pad-comma \ --pad-header \ --unpad-paren \ --align-pointer=name \ --align-reference=name \ --break-closing-braces \ --break-one-line-headers \ --attach-return-type \ --attach-return-type-decl \ --close-templates \ $@
Add threads vs. QPS convenience script.
# Uses generate-queries-for-threads.py to generate input script. python per-thread-qps.py > /tmp/threads-vs-qps.csv Rscript plot-qps.r /tmp/threads-vs-qps.csv threads-vs-qps.png
Add a simple benchmark script for linux
#!/bin/bash #for i in 1 10 100 1000; do # echo $i # CC=gcc bazel run -c opt //ryu/benchmark:benchmark_fixed -- -samples=1000 -v -precision=$i > glibc-fixed-$i.csv # CC=gcc bazel run -c opt //ryu/benchmark:benchmark_fixed -- -samples=1000 -v -precision=$i -exp > glibc-exp-$i.csv #done for i in 1 10 100 1000; do echo $i CC=musl-gcc bazel run -c opt //ryu/benchmark:benchmark_fixed -- -samples=1000 -v -precision=$i > musl-fixed-$i.csv CC=musl-gcc bazel run -c opt //ryu/benchmark:benchmark_fixed -- -samples=1000 -v -precision=$i -exp > musl-exp-$i.csv done
Add a standalone command to check a machine
#!/bin/bash IPADDRESSES=$1 TEMPLATE_IDENTIFIER=$2 VM_ID=$3 FOLDER_TO_SAVE_REPORTS=$4 source ../include/functions.sh # Basic configuration EXTERNAL_TESTS_FOLDER_PATH=/opt/secant/external_tests INTERNAL_TESTS_FOLDER_PATH=/opt/secant/internal_tests # Make sure the environment is exposed to the tests export DEBUG=true IFS=',' read -r -a ipAddresses <<< "$IPADDRESSES" analyse_machine "$TEMPLATE_IDENTIFIER" "$VM_ID" "$FOLDER_TO_SAVE_REPORTS" "${ipAddresses[@]}"
Add bash script to setup the environment
#!/bin/bash rm -rf $(pwd)/env virtualenv $(pwd)/env source $(pwd)/env/bin/activate pip install \ cherrypy \ Mako \ WTForms \ MySQL-python \ iptools \
Add shell script for xmobar
#!/bin/sh # Copied from https://github.com/jaor/xmobar/issues/239#issuecomment-233206552 # Detects the width of running trayer-srg window (xprop name 'panel') # and creates an XPM icon of that width, 1px height, and transparent. # Outputs an <icon>-tag for use in xmobar to display the generated # XPM icon. # # Run script from xmobar: # `Run Com "/where/ever/trayer-padding-icon.sh" [] "trayerpad" 10` # and use `%trayerpad%` in your template. # Function to create a transparent Wx1 px XPM icon create_xpm_icon () { timestamp=$(date) pixels=$(for i in `seq $1`; do echo -n "."; done) cat << EOF > "$2" /* XPM * static char * trayer_pad_xpm[] = { /* This XPM icon is used for padding in xmobar to */ /* leave room for trayer-srg. It is dynamically */ /* updated by by trayer-padding-icon.sh which is run */ /* by xmobar. */ /* Created: ${timestamp} */ /* <w/cols> <h/rows> <colors> <chars per pixel> */ "$1 1 1 1", /* Colors (none: transparent) */ ". c none", /* Pixels */ "$pixels" }; EOF } # Width of the trayer window width=$(xprop -name panel | grep 'program specified minimum size' | cut -d ' ' -f 5) # Icon file name iconfile="/tmp/trayer-padding-${width}px.xpm" # If the desired icon does not exist create it if [ ! -f $iconfile ]; then create_xpm_icon $width $iconfile fi # Output the icon tag for xmobar echo "<icon=${iconfile}/>"
Add .NET 5 run script.
#!/bin/bash iterations=10 echo "----------------------------------------------------------------" echo "Running $iterations iterations of .NET 5 (Plus 1 warm-up iteration.)." echo "----------------------------------------------------------------" for (( i=0; i<=$iterations; i++ )) do echo "Iteration $i" $MONOCMD bin_mono/Release/netcoreapp3.0/linux-x64/publish/aspnet_start.exe done echo "----------------------------------------------------------------" echo "Finished." echo "----------------------------------------------------------------"
Add a script to show the logs with color
#!/bin/sh BLUE="\\033[1;34m" GREEN="\\033[1;32m" ORANGE="\\033[1;33m" RED="\\033[1;31m" IWHITE="\\033[1;97m" NORMAL="\\033[0;39m" echo=/bin/echo ssh elveos@elveos.org ' tail --follow=name ~/.local/share/bloatit/log/infos.log' \ | sed -u -E " s/^....-..-.. (..:..:..,...) \\[Thread-(..?)\\] INFO Access\\:(Context|Request)\\:/\1 (\2)/g s/(REQUEST_URI='[^']*')/$($echo -e $RED)\1$($echo -e $NORMAL)/g s/(USER_ID='[0-9]*')/$($echo -e $GREEN)\1$($echo -e $NORMAL)/g s/(REQUEST_URI='....resource[^']*')/$($echo -e $BLUE)\1$($echo -e $NORMAL)/g s/(KEY='[^']*')/$($echo -e $IWHITE)\1$($echo -e $NORMAL)/g s#(REFERER='[^']*')#$($echo -e $ORANGE)\1$($echo -e $NORMAL)#g s#(REFERER='https://elveos.org[^']*')#$($echo -e $IWHITE)\1$($echo -e $NORMAL)#g " | \ while read line ; do GREPED="$($echo "$line" | grep -o -E "KEY='[^']*'")" if [ -z "$GREPED" ] ; then echo "$line" else echo "$GREPED" | cksum | grep -o -R "[0-5]" | head -3 | \ ( read green read red read blue echo $line | sed -u -E "s/KEY='([^']*)'/$($echo -e "\\x1b[48;5;$(( 16 + ($red * 36) + ($green * 6) + $blue ))m")\1$($echo -e $NORMAL)/g" ) fi done
Add script to list all contributors to app repos
#!/bin/bash if [ $# -eq 0 ] then echo echo "Usage: $0 GITHUB_ACCESS_TOKEN" echo echo "Find here how to create a Github access token:" echo " - https://help.github.com/articles/creating-a-personal-access-token-for-the-command-line/" exit 1 fi GITHUB_ACCESS_TOKEN=$1 APPIDLIST=`curl -s https://flathub.org/api/v1/apps | jq -r .[].flatpakAppId | sort` for appid in $APPIDLIST do curl -s https://api.github.com/repos/flathub/$appid/contributors?access_token=$GITHUB_ACCESS_TOKEN | grep login | cut -f 2 -d ":" | sed "s/ \"/$appid:/" | sed 's/",//' done
Clean up log glob polling test.
#!/bin/bash # Test that files created after startup are correctly matched against the log pattern, in polling mode. source $(dirname $0)/functions.sh LOGS=${TEST_TMPDIR}/logs PROGS=${TEST_TMPDIR}/progs mkdir -p $LOGS $PROGS start_server --logs "$LOGS/log*" --progs $PROGS --poll_interval=250ms uri_get /debug/vars expect_json_field_eq "0" log_count "${DATA}" expect_json_field_eq "0" line_count "${DATA}" echo "line 1" >> $LOGS/log sleep 1 uri_get /debug/vars expect_json_field_eq "1" log_count "${DATA}" expect_json_field_eq "1" line_count "${DATA}" echo "line 1" >> $LOGS/log1 sleep 2 uri_get /debug/vars expect_json_field_eq "3" log_count "${DATA}" expect_json_field_eq "2" line_count "${DATA}" pass
#!/bin/bash # Test that files created after startup are correctly matched against the log pattern, in polling mode. source $(dirname $0)/functions.sh LOGS=${TEST_TMPDIR}/logs PROGS=${TEST_TMPDIR}/progs mkdir -p $LOGS $PROGS start_server --logs "$LOGS/log*" --progs $PROGS --poll_interval=250ms uri_get /debug/vars expect_json_field_eq "0" log_count "${DATA}" expect_json_field_eq "0" line_count "${DATA}" echo "line 1" >> $LOGS/log sleep 1 uri_get /debug/vars expect_json_field_eq "1" log_count "${DATA}" expect_json_field_eq "1" line_count "${DATA}" echo "line 1" >> $LOGS/log1 sleep 1 uri_get /debug/vars expect_json_field_eq "2" log_count "${DATA}" expect_json_field_eq "2" line_count "${DATA}" pass
Add studio integration test script
#!/bin/bash # Script that builds androidx SNAPSHOT and runs the androidx integration # tests from the Studio branch. set -e readonly SCRIPT_PATH="$(dirname $(realpath "$0"))" readonly BASE_PATH="$(realpath "$SCRIPT_PATH/../../..")" readonly PREBUILTS_DIR="$BASE_PATH/prebuilts" readonly OUT_DIR="$BASE_PATH/out" readonly BAZEL_CMD="$BASE_PATH/tools/base/bazel/bazel" readonly M2REPO_DIR="$PREBUILTS_DIR/tools/common/androidx-integration/m2repository" readonly ANDROIDX_INTERNAL_DIR="$PREBUILTS_DIR/androidx/internal" echo "Using basepath $BASE_PATH" echo "Starting $0 at $(date)" time $SCRIPT_PATH/androidx_snapshot.sh mkdir -p $M2REPO_DIR # Copy internal and the output to prebuilts/tools/common/androidx-integration cp -R $ANDROIDX_INTERNAL_DIR/* $M2REPO_DIR unzip -quo $OUT_DIR/dist/top-of-tree-m2repository-all-dist.zip -d $M2REPO_DIR/.. $BAZEL_CMD test //tools/adt/idea/androidx-integration-tests:androidx-integration-tests_tests echo "Completing $0 at $(date)"
Add initial block download script
#!/bin/bash ### # Initial Block Download script. # # Runs a bitcoindd process until initial block download is complete. # Forwards the exit code from bitcoind onward. # MYPID=$$ # Setup mkdir -p ibd touch ibd/debug.log chmod +x bitcoind cleanup() { echo "Terminating (pid: ${1})" kill $1 pkill -P ${MYPID} tail } # Launch bitcoind ./bitcoind -datadir=ibd -disablewallet & bitcoin_pid=$! trap "cleanup ${bitcoin_pid}" EXIT # Wait for IBD to finish and kill the daemon ( tail -f ibd/debug.log | grep -m 1 'progress=1.000000' echo "Initial block download complete, killing bitcoin daemon." kill ${bitcoin_pid} ) & # Show some progress tail -f ibd/debug.log | grep 'UpdateTip' | awk 'NR % 10000 == 0' & # Wait for bitcoind to exit wait ${bitcoin_pid}
Add exmple to update order.
#!/bin/sh curl --request PUT 'http://localhost:5000/api/v1/orders' \ --include \ --silent \ --header 'Accept: application/json' \ --header 'Content-Type: application/json' \ --data \ '{ "order": { "patient_id": "674563", "patients_birth_date": "1952-07-17", "patients_sex": "M", "requested_procedure_description": "brain", "station_id": "1", "patients_name_attributes": { "family": "Hasselhoff", "given": "David", "middle": "Michael", "prefix": "Mr.", "suffix": "(Knight Rider)" }, "referring_physicians_name_attributes": { "family": "House", "given": "Gregory", "prefix": "Dr." } } }'
Make [] happy again by adding a test with the same name as the one whose deletion it did not notice, //third_party/bazel/tools/build_defs/test:swift_lib_test .
#!/bin/bash # Copyright 2016 The Bazel Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # TODO(twerth): remove workaround once TAP is happy again exit 0
Add utility script to remove refs used by NoteDB
# Copyright (C) 2019 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #!/usr/bin/env bash set -e if [[ "$#" -lt "2" ]] ; then cat <<EOF Usage: run "$0 /path/to/git/dir [project...]" or "$0 /path/to/git/dir ALL" This util script can be used in case of rollback to ReviewDB during an unsuccessful migration to NoteDB or simply while testing the migration process. It will remove all the refs used by NoteDB added during the migration (i.e.: change meta refs and sequence ref). EOF exit 1 fi GERRIT_GIT_DIR=$1 shift ALL_PROJECTS=$@ if [[ "$2" -eq "ALL" ]] ; then ALL_PROJECTS=`find "${GERRIT_GIT_DIR}" -type d -name "*.git"` fi ALL_PROJECTS_ARRAY=(${ALL_PROJECTS// / }) for project in "${ALL_PROJECTS_ARRAY[@]}" do if [[ "$project" =~ /All-Users\.git$ ]]; then echo "Skipping $project ..." else echo "Removing meta ref for $project ..." cd "$project" if `git show-ref meta | grep -q "/meta$"`; then git show-ref meta | grep "/meta$" | cut -d' ' -f2 | xargs -L1 git update-ref -d fi fi done echo "Remove sequence ref" allProjectDir="$GERRIT_GIT_DIR/All-Projects.git" cd $allProjectDir git update-ref -d refs/sequences/changes
Add a tool to verify that script files stop upon seeing an error
#!/bin/bash # Copyright 2017 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This script will verify that the specified script files have # "set -o errexit" turned on at some point. # # Usage: verify-errexit.sh [ dir | file ... ] # default args is the root of our source tree set -o errexit set -o nounset set -o pipefail REPO_ROOT=$(dirname "${BASH_SOURCE}")/.. if [[ "$*" != "" ]]; then args="$*" else args=$(ls "${REPO_ROOT}" | grep -v vendor | grep -v glide) fi # Gather the list of files that appear to be shell scripts. # Meaning they have some form of "#!...sh" as a line in them. shFiles=$(grep -rl '^#!.*sh$' ${args}) rc="0" for file in ${shFiles}; do grep "set -o errexit" ${file} &> /dev/null && continue grep "set -[a-z]*e" ${file} &> /dev/null && continue echo ${file}: appears to be missing \"set -o errexit\" rc="1" done exit ${rc}
Add FreeBSD git script to deploy
#!/bin/sh portsnap fetch extract update cd /usr/ports/ports-mgmt/pkg make install clean cd /usr/local/sbin/pkg2ng echo 'PACKAGESITE: http://pkgbeta.freebsd.org/freebsd-9-amd64/latest' > /usr/local/etc/pkg.conf /usr/local/sbin/pkg install -y git salt /usr/local/sbin/pkg delete -y salt mkdir -p /root/git cd /root/git git clone git://github.com/saltstack/salt.git cd salt python setup.py install cd mkdir -p /etc/salt/pki echo '{{ vm['priv_key'] }}' > /etc/salt/pki/minion.pem echo '{{ vm['pub_key'] }}' > /etc/salt/pki/minion.pub echo '{{ minion }}' > /etc/salt/minion salt-minion -d
Add script to dump json for an entire week
#!/usr/bin/env bash ./generate_time_series.py -d data/trains -o 20170109.json -D 2017-01-09 # Monday ./generate_time_series.py -d data/trains -o 20170110.json -D 2017-01-10 # Tueday ./generate_time_series.py -d data/trains -o 20170111.json -D 2017-01-11 # Wednesday ./generate_time_series.py -d data/trains -o 20170112.json -D 2017-01-12 # Thursday ./generate_time_series.py -d data/trains -o 20170113.json -D 2017-01-13 # Friday ./generate_time_series.py -d data/trains -o 20170114.json -D 2017-01-14 # Saturday ./generate_time_series.py -d data/trains -o 20170115.json -D 2017-01-15 # Sunday
Add script to create a release zip.
#!/bin/bash name=gateplugin-Tagger_TagMe tmpdir=/tmp curdir=`pwd -P` version=`perl -n -e 'if (/VERSION="([^"]+)"/) { print $1;}' < $curdir/creole.xml` destdir=$tmpdir/${name}$$ curbranch=`git branch | grep '\*' | cut -c 3-` echo Making a release zip for plugin $name, version $version from branch $curbranch rm -rf "$destdir" mkdir -p $destdir/$name rm -f $name-*.zip rm -f $name-*.tgz git archive --format zip --output ${name}-${version}-src.zip --prefix=$name/ $curbranch pushd $destdir unzip $curdir/${name}-${version}-src.zip cd $name cp $curdir/build.properties . ant || exit ant clean.classes || exit rm -rf build.properties rm -rf makedist.sh ## Make sure we remove the integration tests directory rm -rf tests rm $curdir/${name}-${version}-src.zip cd .. zip -r $curdir/$name-$version.zip $name echo Created a release zip for plugin $name, version $version from branch $curbranch echo Zip file is $curdir/$name-$version.zip popd >& /dev/null
Add tool for creating local coverage html reports
#!/usr/bin/env sh ###################################################### ### Run coverage, generate and display html report ### ###################################################### ### ### Configurable through environment variables. ### ### Examples: ############## ## ## Run complete SUITE, generate coverage report for all and open overview page in browser # # test_and_report.sh # ## Run complete SUITE, generate report for only node.py and open report for node.py in browser # # ONLY=raiden/transfer/node.py test_and_report.sh # ## Same as above but more verbose (SHOW is configured implicitly above) # # ONLY=raiden/transfer/node.py SHOW=raiden_transfer_node_py.html test_and_report.sh # ## Run pytest with `-s --pdb` flags # # PYTEST="$(which pytest) -s --pdb" test_and_report.sh # ## Don't display in browser, but show path to file # # OPEN=echo test_and_report.sh # ## Only run a specific test # # SUITE=raiden/tests/unit/transfer/test_node.py test_and_report.sh # ## configurable values # pytest executable PYTEST=${PYTEST:-$(which pytest)} # specify non temp output directory OUT_DIR=${OUT_DIR:-$(mktemp -d)} # which tests to execute SUITE=${SUITE:-raiden/tests/unit raiden/tests/fuzz} # report only these files ONLY=${ONLY:-*} # open specific file SHOW=${SHOW:-index.html} # how to display html if [[ "$(python -c 'import sys; print(sys.platform)')" = "Darwin" ]] then if [[i -z ${OPEN} ]] then OPEN=open fi fi OPEN=${OPEN:-xdg-open} # be a bit smarter about which file to display if [[ "$ONLY" = "*" ]]; then SHOW=${SHOW:-index.html} else SHOW=$(python -c "import sys; print(sys.argv[1].replace('/', '_').replace('.', '_') + '.html')" $ONLY) fi # in case that out dir was configured, make sure, that it exists mkdir -p $OUT_DIR rm .coverage coverage run --branch $PYTEST -x $SUITE && coverage html -d $OUT_DIR --include=$ONLY && $OPEN $OUT_DIR/$SHOW
Add user acceptance test script.
test_key_to_phrase() { actual=$(./keyphrase 0xFFFF562F8F9A961E158BDE2D4CCD2A64BB1D923208939714675BFAB28BBAF2A3) expected="zyzzyvas flutings mushers octopuses bizones talkier evokers coagent ringer neutral antipode omnibus havening whistles mistitled vacuums" if [ "$actual" != "$expected" ] then echo -e '\E[37;44m'"\033[1;37;41m**FAIL**: Unexpected phrase output!\n ↪ Expected '$expected' but got '$actual'\n\033[0m" else echo -e '\E[37;44m'"\033[1;42;37m**PASS**: Phrase output is as expected\033[0m" fi } test_phrase_to_key() { actual=$(./keyphrase "zyzzyvas flutings mushers octopuses bizones talkier evokers coagent ringer neutral antipode omnibus havening whistles mistitled vacuums") expected="0xFFFF562F8F9A961E158BDE2D4CCD2A64BB1D923208939714675BFAB28BBAF2A3" if [ "$actual" != "$expected" ] then echo -e '\E[37;44m'"\033[1;37;41m**FAIL**: Unexpected key output!\n ↪ Expected '$expected' but got '$actual'\n\033[0m" else echo -e '\E[37;44m'"\033[1;42;37m**PASS**: Key output is as expected\033[0m" fi } run_tests() { rm keyphrase > /dev/null clear gcc -o keyphrase keyphrase.c funcs.c -std=c99 -Wall test_key_to_phrase test_phrase_to_key } run_tests while true; do change=$(inotifywait -q -e close_write,moved_to,create {*.c,*.h}) change=${change#./ * } run_tests done
Add test for setup and tear down of overlay2 graph driver
source $SRCDIR/libtest.sh # Test "docker-storage-setup reset". Returns 0 on success and 1 on failure. test_reset_overlay2() { local test_status=0 local testname=`basename "$0"` cat << EOF > /etc/sysconfig/docker-storage-setup STORAGE_DRIVER=overlay2 EOF # Run docker-storage-setup $DSSBIN >> $LOGS 2>&1 # Test failed. if [ $? -ne 0 ]; then echo "ERROR: $testname: $DSSBIN Failed." >> $LOGS clean_config_files return 1 fi if ! grep -q "overlay2" /etc/sysconfig/docker-storage; then echo "ERROR: $testname: /etc/sysconfig/docker-storage does not have string overlay2." >> $LOGS clean_config_files return 1 fi $DSSBIN --reset >> $LOGS 2>&1 if [ $? -ne 0 ]; then # Test failed. test_status=1 elif [ -e /etc/sysconfig/docker-storage ]; then # Test failed. test_status=1 fi if [ ${test_status} -eq 1 ]; then echo "ERROR: $testname: $DSSBIN --reset Failed." >> $LOGS fi clean_config_files return $test_status } # Create a overlay2 docker backend and then make sure the # docker-storage-setup --reset # cleans it up properly. test_reset_overlay2
Remove unnecesary flags and add a further comment about "make check"
#!/bin/bash export CFLAGS="-Wall -g -m${ARCH} -pipe -O2 -fPIC" export CXXLAGS="${CFLAGS}" export CPPFLAGS="-I${PREFIX}/include" export LDFLAGS="-L${PREFIX}/lib" if [ `uname` == Darwin ]; then ./configure --prefix=$PREFIX \ --with-quartz \ --disable-debug \ --disable-dependency-tracking \ --disable-java \ --disable-php \ --disable-perl \ --disable-tcl \ --without-x \ --without-qt else ./configure --prefix=$PREFIX \ --disable-java \ --disable-php \ --disable-perl \ --disable-tcl \ --without-x fi make # This is failing for R tests # make check make install dot -c
#!/bin/bash if [ `uname` == Darwin ]; then ./configure --prefix=$PREFIX \ --with-quartz \ --disable-debug \ --disable-dependency-tracking \ --disable-java \ --disable-php \ --disable-perl \ --disable-tcl \ --without-x \ --without-qt else ./configure --prefix=$PREFIX \ --disable-java \ --disable-php \ --disable-perl \ --disable-tcl \ --without-x fi make # This is failing for rtest. # Doesn't do anythoing for the rest # make check make install dot -c
Add Jenkins branch build script
#!/bin/bash set -e VENV_PATH="${HOME}/venv/${JOB_NAME}" [ -x ${VENV_PATH}/bin/pip ] || virtualenv ${VENV_PATH} . ${VENV_PATH}/bin/activate pip install -q ghtools REPO="alphagov/publisher" gh-status "$REPO" "$GIT_COMMIT" pending -d "\"Build #${BUILD_NUMBER} is running on Jenkins\"" -u "$BUILD_URL" >/dev/null if ./jenkins.sh; then gh-status "$REPO" "$GIT_COMMIT" success -d "\"Build #${BUILD_NUMBER} succeeded on Jenkins\"" -u "$BUILD_URL" >/dev/null exit 0 else gh-status "$REPO" "$GIT_COMMIT" failure -d "\"Build #${BUILD_NUMBER} failed on Jenkins\"" -u "$BUILD_URL" >/dev/null exit 1 fi
Use a faster middleman deploy
#!/usr/bin/env bash # Get the parent directory of where this script is. SOURCE="${BASH_SOURCE[0]}" while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done DIR="$( cd -P "$( dirname "$SOURCE" )/.." && pwd )" # Change into that directory cd $DIR # Push the subtree (force) git push heroku `git subtree split --prefix website HEAD`:master --force
#!/bin/bash # Set the tmpdir if [ -z "$TMPDIR" ]; then TMPDIR="/tmp" fi # Create a temporary build dir and make sure we clean it up. For # debugging, comment out the trap line. DEPLOY=`mktemp -d $TMPDIR/terraform-www-XXXXXX` trap "rm -rf $DEPLOY" INT TERM EXIT # Get the parent directory of where this script is. SOURCE="${BASH_SOURCE[0]}" while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done DIR="$( cd -P "$( dirname "$SOURCE" )/.." && pwd )" # Copy into tmpdir cp -R $DIR/website/ $DEPLOY/ # Change into that directory pushd $DEPLOY &>/dev/null # Ignore some stuff touch .gitignore echo ".sass-cache" >> .gitignore echo "build" >> .gitignore # Add everything git init -q . git add . git commit -q -m "Deploy by $USER" git remote add heroku git@heroku.com:terraform-www.git git push -f heroku master # Go back to our root popd &>/dev/null
Add script to move artifacts.
#!/bin/bash set -eu -o pipefail SCRIPT_DIR="$(cd "$(dirname -- "${BASH_SOURCE[0]}")" >/dev/null && pwd)" cd $SCRIPT_DIR/../build/stage find . -mindepth 1 -type f -exec mv -i -- {} . \; find . -mindepth 1 -depth -type d -empty -exec rmdir {} \;
Add new script to clean Nginx cache
#!/bin/bash # # Simple Nginx clear cache # # by Karl Johnson # kjohnson@aerisnetwork.com # # Version 1.0 # CDIR1="/opt/nginx/cache" CDIR2="/opt/nginx/proxy_temp" if [ ! -d $CDIR1 ] || [ ! -d $CDIR2 ] ;then echo -e "\nError: Directory /opt/nginx/cache or /opt/nginx/proxy_temp doesn't seem to exist. Update the directories in the script.\n" exit 0 fi echo -e "Current cache size is: $(du -hs $CDIR1 | awk '{print $1}') and $(du -hs $CDIR2 | awk '{print $1}')" find /opt/nginx/cache -type f -exec rm {} \; find /opt/nginx/proxy_temp -type f -exec rm {} \; echo -e "New cache size is: $(du -hs $CDIR1 | awk '{print $1}') and $(du -hs $CDIR2 | awk '{print $1}')"
Uninstall chef script for linux VMs.
# Author:: Prabhu Das (<prabhu.das@clogeny.com>) # Copyright:: Copyright (c) 2014 Opscode, Inc. # uninstall chef # Actions: # - uninstall chef # Uninstall the custom gem export PATH=$PATH:/opt/chef/embedded/bin azure_chef_extn_gem=`gem list | grep azure-chef-extension | awk '{print $1}'` if test "$azure_chef_extn_gem" = "azure-chef-extension" ; then echo "Started removing gem azure-chef-extension" uninstall_gem=`gem uninstall azure-chef-extension` if [ $? -eq 0 ]; then echo "Gem $azure_chef_extn_gem_status uninstalled successfully :)" else echo "Unable to uninstall gem azure-chef-extension." fi else echo "Gem azure-chef-extension not found !!!" fi # Uninstall chef_pkg install_status=`dpkg -l | grep chef | awk '{print $1}'` pkg_name=`dpkg -l | grep chef | awk '{print $2}'` if test "$install_status" = "ii" ; then echo "Started removing Chef." uninstall=`sudo dpkg -P $pkg_name` if [ $? -eq 0 ]; then echo "Package $pkg_name uninstalled successfully :)" else echo "Unable to uninstall package Chef." fi else echo "No Package found to uninstall!!!" fi
Add recompress script from gz to pxz
function gz_to_pxz() { local gz_to_pxz_GZ local gz_to_pxz_XZ for gz_to_pxz_GZ in $@ do case `echo $gz_to_pxz_GZ | tr '[A-Z]' '[a-z]'` in *.gz) gz_to_pxz_XZ=`dirname $gz_to_pxz_GZ`/`dirname \`basename $gz_to_pxz_GZ | tr '.' '/'\` | tr '/' '.'`.xz echo "gzip -cd $gz_to_pxz_GZ | pxz -c9 > $gz_to_pxz_XZ" gzip -cd $gz_to_pxz_GZ | pxz -c9 > $gz_to_pxz_XZ ;; esac done }
Add Chapter 1.3 Exercise and .gitignore
#Chapter 1.3: #Exercise 1: According to the man page, what are the official short and long descriptions of echo #on your system? echo [-n] [string ...] #short echo -- write arguments to the standard output
Add a script to install utils in ./bin
#!/bin/bash rm bin/* curr_dir=$(pwd) ln -s $curr_dir/rna_pdb_tools/utils/rmsd_calc/rmsd_calc_to_target.py $curr_dir/bin/rmsd_calc_to_target.py echo 'Installed in ./bin' ls bin
Add script for building PHP 7
#!/usr/bin/bash set -e export LC_ALL=C export PHP_VER=7.0.1 export PHP_PREFIX=/opt/php sudo yum -y update sudo yum -y install epel-release sudo yum -y install tar make gcc autoconf bison re2c # install packages to build php extensions sudo yum -y install libxml2-devel libcurl-devel libmcrypt-devel openssl-devel libpng-devel test -d ~/src || mkdir ~/src cd ~/src/ curl --location https://github.com/php/php-src/archive/php-${PHP_VER}.tar.gz -o php-${PHP_VER}.tar.gz tar xfz php-${PHP_VER}.tar.gz cd php-src-php-${PHP_VER}/ ./buildconf --force ./configure --prefix=${PHP_PREFIX}\ --enable-mbstring\ --enable-xml\ --enable-zip\ --enable-fpm\ --enable-ftp\ --enable-exif\ --enable-fileinfo\ --enable-pcntl\ --enable-pdo\ --with-curl\ --with-gd\ --with-curl\ --with-openssl\ --with-mcrypt\ --with-zlib\ --with-mysqli=mysqlnd\ --with-pdo-mysql=mysqlnd\ --without-pear\ --enable-json\ --enable-phar\ --enable-cli make -j 2 sudo make install ${PHP_PREFIX}/bin/php -v
Add script to set up git integration
#!/bin/sh # set up an append-only strategy for merges and conflicts git config --global merge.pw.name 'Append-only merge strategy for pw databases' git config --global merge.pw.driver 'tmp=$(mktemp) ; (comm -12 %A %B ; comm -13 %O %A ; comm -13 %O %B ) >| $tmp ; mv $tmp %A' # set up the diff program for password files # FIXME: use pw-diff instead of textconv git config --global diff.pw.textconv "pw-show -c -d" db_dir=${XDG_DATA_HOME:-$HOME/.local/share}/pw grep -q '^* diff=pw merge=pw$' $db_dir/.gitattributes || \ echo '* diff=pw merge=pw' >> $db_dir/.gitattributes grep -q '^.* diff merge$' $db_dir/.gitattributes || \ echo '.* diff merge' >> $db_dir/.gitattributes # This is free software released into the public domain (CC0 license).
Add test repo for release testing process
#!/bin/bash -x echo '##################### EXECUTE: kurento_ci_container_job_setup #####################' echo "deb http://ubuntu.kurento.org xenial-test kms6" | tee /etc/apt/sources.list.d/kurento-test.list wget -O - http://ubuntu.kurento.org/kurento.gpg.key | apt-key add - apt-get update
Define a script to check package files
#!/usr/bin/env sh PATTERNS=$(cat <<EOF composer.json composer.lock LICENSE README.md src/**.php EOF ) EXIT=0 for FILENAME in `zipinfo -1 build/package.zip`; do FOUND=0 for PATTERN in $PATTERNS; do case $FILENAME in $PATTERN) FOUND=1 ;; esac done if [ $FOUND -eq 0 ]; then echo $FILENAME 1>&2 EXIT=1 fi done exit $EXIT
Add simple auto completion script
#! /bin/bash FILES="`ls /<full-path>/tests/*.py`" TESTS="`grep -o -P '(?<=class ).*(?=\(TestBase\):)' ${FILES} | cut -d':' -f2`" function tests { python /<full-path>/tests_runner.py -t $* } complete -W "${TESTS}" tests
Add shell configuration for Git
# shellcheck shell=sh # git configuration if command -v git > /dev/null; then # Handy aliases to switch to the root of the repository alias git-repo-root='cd "$(git rev-parse --show-toplevel)"' alias grr='git-repo-root' fi # vim: syntax=sh cc=80 tw=79 ts=4 sw=4 sts=4 et sr
Add script for uploading ssh keys to AWS EC2
#!/bin/bash # From https://alestic.com/2010/10/ec2-ssh-keys/ keypair=$USER # or some name that is meaningful to you publickeyfile=$HOME/.ssh/id_rsa.pub regions=$(aws ec2 describe-regions \ --output text \ --query 'Regions[*].RegionName') for region in $regions; do echo $region aws ec2 import-key-pair \ --region "$region" \ --key-name "$keypair" \ --public-key-material "file://$publickeyfile" done
Check in the backup configuration companion script.
#!/bin/bash # # Called by the backup.sh script to perform the actual backup # operation. Customize to your heart's content. # # Passing in the option "--dry-run" as the first parameter to # this function will cause duplicity to run in dry-run mode. # function backupOperation () { PARAM_DRY_RUN=$1 duplicity --full-if-older-than 1W ${PARAM_DRY_RUN} "~/VirtualBox VMs" "sftp://user@host/VirtualBox VMs" } if [ $1 == "set" ]; then echo "Exporting extra path." export PATH=${HOME}/devtools/homebrew/bin:/usr/local/bin:${HOME}/Library/Python/2.7/bin:${PATH} echo "Exporting passwords." export PASSPHRASE="" export FTP_PASSWORD="" echo "Exporting reporting address." export MAILTO="report@host" else echo "Unsetting passwords." unset FTP_PASSWORD unset PASSPHRASE echo "Unsetting reporting address." unset MAILTO fi