Instruction stringlengths 14 778 | input_code stringlengths 0 4.24k | output_code stringlengths 1 5.44k |
|---|---|---|
Modify script of making docs | #!/bin/sh
rm -rf doc/rst && mkdir doc/rst
make doc
pandoc --read=html --write=rst doc/leo_pod.html -o doc/rst/leo_pod.rst
pandoc --read=html --write=rst doc/leo_pod_manager.html -o doc/rst/leo_pod_manager.rst
| #!/bin/sh
make doc
rm -rf doc/rst && mkdir doc/rst
for Mod in leo_pod \
leo_pod_manager
do
read_file="doc/$Mod.html"
write_file="doc/rst/$Mod.rst"
pandoc --read=html --write=rst "$read_file" -o "$write_file"
sed -ie "1,6d" "$write_file"
sed -ie "s/\Module //" "$write_file"
LINE_1=`cat $write_file | wc -l`
LINE_2=`expr $LINE_1 - 10`
sed -ie "$LINE_2,\$d" "$write_file"
done
rm -rf doc/rst/*.rste
|
Split docker commands into multiple lines; expand CLI option names | #!/bin/sh -xe
# This script starts docker and systemd (if el7)
# Run tests in Container
# We use `--privileged` for cgroup compatability, which seems to be enabled by default in HTCondor 8.6.x
if [ "${OS_VERSION}" = "6" ]; then
sudo docker run --privileged --rm=true -v /sys/fs/cgroup:/sys/fs/cgroup -v `pwd`:/osg-test:rw centos:centos${OS_VERSION} /bin/bash -c "bash -xe /osg-test/travis-ci/test_inside_docker.sh ${OS_VERSION} ${PACKAGES}"
elif [ "${OS_VERSION}" = "7" ]; then
docker run --privileged -d -ti -e "container=docker" -v /sys/fs/cgroup:/sys/fs/cgroup -v `pwd`:/osg-test:rw centos:centos${OS_VERSION} /usr/sbin/init
DOCKER_CONTAINER_ID=$(docker ps | grep centos | awk '{print $1}')
docker logs $DOCKER_CONTAINER_ID
docker exec -ti $DOCKER_CONTAINER_ID /bin/bash -xec "bash -xe /osg-test/travis-ci/test_inside_docker.sh ${OS_VERSION} ${PACKAGES};
echo -ne \"------\nEND OSG-TEST TESTS\n\";"
docker ps -a
docker stop $DOCKER_CONTAINER_ID
docker rm -v $DOCKER_CONTAINER_ID
fi
| #!/bin/sh -xe
# This script starts docker and systemd (if el7)
# Run tests in Container
# We use `--privileged` for cgroup compatability, which seems to be enabled by default in HTCondor 8.6.x
if [ "${OS_VERSION}" = "6" ]; then
sudo docker run --privileged \
--rm=true \
--volume=/sys/fs/cgroup:/sys/fs/cgroup \
--volume=`pwd`:/osg-test:rw \
centos:centos${OS_VERSION} \
/bin/bash -c "bash -xe /osg-test/travis-ci/test_inside_docker.sh ${OS_VERSION} ${PACKAGES}"
elif [ "${OS_VERSION}" = "7" ]; then
docker run --privileged \
--detach=true \
--tty \
--interactive=true \
--env="container=docker" \
--volume=/sys/fs/cgroup:/sys/fs/cgroup \
--volume `pwd`:/osg-test:rw \
centos:centos${OS_VERSION} \
/usr/sbin/init
DOCKER_CONTAINER_ID=$(docker ps | grep centos | awk '{print $1}')
docker logs $DOCKER_CONTAINER_ID
docker exec --tty=true \
--interactive \
$DOCKER_CONTAINER_ID \
/bin/bash -xec "bash -xe /osg-test/travis-ci/test_inside_docker.sh ${OS_VERSION} ${PACKAGES};
echo -ne \"------\nEND OSG-TEST TESTS\n\";"
docker ps -a
docker stop $DOCKER_CONTAINER_ID
docker rm -v $DOCKER_CONTAINER_ID
fi
|
Fix build script (for real...) | #!/bin/bash
set -e
COMPOUND_VERSION=$1
TAG=$2
VIMR_FILE_NAME=$3
RELEASE_NOTES=$4
IS_SNAPSHOT=$5
pushd build/Release
tar cjf ${VIMR_FILE_NAME} VimR.app
PRERELEASE=""
if [ "${IS_SNAPSHOT}" = true ] ; then
PRERELEASE="--pre-release"
fi
echo "### Creating release"
GITHUB_TOKEN=$(cat ~/.config/github.qvacua.release.token) github-release release \
--user qvacua \
--repo vimr \
--tag "${TAG}" \
--name "${COMPOUND_VERSION}" "${PRERELEASE}" \
--description "${RELEASE_NOTES}"
echo "### Uploading build"
GITHUB_TOKEN=$(cat ~/.config/github.qvacua.release.token) github-release upload \
--user qvacua \
--repo vimr \
--tag "${TAG}" \
--name "${VIMR_FILE_NAME}" \
--file "${VIMR_FILE_NAME}"
| #!/bin/bash
set -e
COMPOUND_VERSION=$1
TAG=$2
VIMR_FILE_NAME=$3
RELEASE_NOTES=$4
IS_SNAPSHOT=$5
pushd build/Release
tar cjf ${VIMR_FILE_NAME} VimR.app
echo "### Creating release"
if [ "${IS_SNAPSHOT}" = true ] ; then
GITHUB_TOKEN=$(cat ~/.config/github.qvacua.release.token) github-release release \
--user qvacua \
--repo vimr \
--tag "${TAG}" \
--pre-release \
--name "${COMPOUND_VERSION}" \
--description "${RELEASE_NOTES}"
else
GITHUB_TOKEN=$(cat ~/.config/github.qvacua.release.token) github-release release \
--user qvacua \
--repo vimr \
--tag "${TAG}" \
--name "${COMPOUND_VERSION}" \
--description "${RELEASE_NOTES}"
fi
echo "### Uploading build"
GITHUB_TOKEN=$(cat ~/.config/github.qvacua.release.token) github-release upload \
--user qvacua \
--repo vimr \
--tag "${TAG}" \
--name "${VIMR_FILE_NAME}" \
--file "${VIMR_FILE_NAME}"
|
Print tarball URLs on separate lines | #!/bin/bash -e
for tarball_dir in $RELEASE_TARBALL_DIRS; do
cat $tarball_dir/url
done
| #!/bin/bash -e
for tarball_dir in $RELEASE_TARBALL_DIRS; do
cat $tarball_dir/url
echo
done
|
Use 4 cpus during docker build | set -x
USERNAME="langtech"
for IMAGE_NAME in language-resources festival merlin; do
cd ${IMAGE_NAME}
IMAGE_NAME="base-${IMAGE_NAME}"
docker build --no-cache -t ${USERNAME}/${IMAGE_NAME}:latest .
docker push ${USERNAME}/${IMAGE_NAME}
cd ../
done
| set -x
USERNAME="langtech"
for IMAGE_NAME in language-resources festival merlin; do
cd ${IMAGE_NAME}
IMAGE_NAME="base-${IMAGE_NAME}"
docker build --cpuset-cpus=4 --no-cache -t ${USERNAME}/${IMAGE_NAME}:latest .
docker push ${USERNAME}/${IMAGE_NAME}
cd ../
done
|
Remove old/alternate code. This is what version control is for | #!/bin/bash
# Prints the WAN IP address. The result is cached and updated according to $update_period.
tmp_file="${TMUX_POWERLINE_TEMPORARY_DIRECTORY}/wan_ip.txt"
wan_ip=""
if [ -f "$tmp_file" ]; then
if shell_is_osx; then
last_update=$(stat -f "%m" ${tmp_file})
else
last_update=$(stat -c "%Y" ${tmp_file})
fi
time_now=$(date +%s)
update_period=900
up_to_date=$(echo "(${time_now}-${last_update}) < ${update_period}" | bc)
if [ "$up_to_date" -eq 1 ]; then
wan_ip=$(cat ${tmp_file})
fi
fi
if [ -z "$wan_ip" ]; then
#wan_ip=$(wget --timeout=1 --tries=1 -O - http://formyip.com/ 2>/dev/null | grep -Pzo "(?<=Your IP is )[^<]*")
wan_ip=$(curl --max-time 2 -s http://whatismyip.akamai.com/)
if [ "$?" -eq "0" ]; then
echo "${wan_ip}" > $tmp_file
elif [ -f "${tmp_file}" ]; then
wan_ip=$(cat "$tmp_file")
fi
fi
if [ -n "$wan_ip" ]; then
#echo "Ⓦ ${wan_ip}"
echo "ⓦ ${wan_ip}"
fi
exit 0
| #!/bin/bash
# Prints the WAN IP address. The result is cached and updated according to $update_period.
tmp_file="${TMUX_POWERLINE_TEMPORARY_DIRECTORY}/wan_ip.txt"
wan_ip=""
if [ -f "$tmp_file" ]; then
if shell_is_osx; then
last_update=$(stat -f "%m" ${tmp_file})
else
last_update=$(stat -c "%Y" ${tmp_file})
fi
time_now=$(date +%s)
update_period=900
up_to_date=$(echo "(${time_now}-${last_update}) < ${update_period}" | bc)
if [ "$up_to_date" -eq 1 ]; then
wan_ip=$(cat ${tmp_file})
fi
fi
if [ -z "$wan_ip" ]; then
wan_ip=$(curl --max-time 2 -s http://whatismyip.akamai.com/)
if [ "$?" -eq "0" ]; then
echo "${wan_ip}" > $tmp_file
elif [ -f "${tmp_file}" ]; then
wan_ip=$(cat "$tmp_file")
fi
fi
if [ -n "$wan_ip" ]; then
echo "ⓦ ${wan_ip}"
fi
exit 0
|
Simplify render script to just run QC | #!/bin/sh
render_templates() {
pwd
cd ..
git clone https://github.com/bcbio/bcbio_rnaseq_output_example.git
cd bcbio_rnaseq_output_example
Rscript -e 'devtools::install_local("../bcbioRNASeq")'
Rscript -e 'testthat::test_file("test_reports.R")'
cd report
mv de.html de-${TRAVIS_BRANCH}.html
mv qc.html qc-${TRAVIS_BRANCH}.html
mv fa.html fa-${TRAVIS_BRANCH}.html
cd ..
}
setup_git() {
git config --global user.email "travis@travis-ci.org"
git config --global user.name "Travis CI"
}
commit_website_files() {
git fetch origin gh-pages
git checkout gh-pages
git pull
cp report/*.html .
git add *.html
git commit --message "Travis build: $TRAVIS_BUILD_NUMBER"
}
upload_files() {
git remote add origin-pages https://${GITHUB_TOKE}@github.com/bcbio/bcbio_rnaseq_output_example.git > /dev/null 2>&1
git push --force --quiet --set-upstream origin-pages gh-pages
}
render_templates
setup_git
commit_website_files
upload_files
| #!/bin/sh
# Render R Markdown
# https://github.com/bcbio/bcbio_rnaseq_output_example
render_templates() {
pwd
cd ..
git clone https://github.com/bcbio/bcbio_rnaseq_output_example.git
cd bcbio_rnaseq_output_example
Rscript -e 'devtools::install_local("../bcbioRNASeq")'
Rscript -e 'testthat::test_file("test_reports.R")'
cd report
mv qc.html qc-${TRAVIS_BRANCH}.html
cd ..
}
setup_git() {
git config --global user.email "travis@travis-ci.org"
git config --global user.name "Travis CI"
}
commit_website_files() {
git fetch origin gh-pages
git checkout gh-pages
git pull
cp report/*.html .
git add *.html
git commit --message "Travis build: $TRAVIS_BUILD_NUMBER"
}
upload_files() {
git remote add origin-pages https://${GITHUB_TOKE}@github.com/bcbio/bcbio_rnaseq_output_example.git > /dev/null 2>&1
git push --force --quiet --set-upstream origin-pages gh-pages
}
render_templates
setup_git
commit_website_files
upload_files
|
Update im unit test script | #!/bin/bash
WORKSPACE=$1
cd $WORKSPACE
nosetests test/unit/connectors/*.py test/unit/*.py test/functional/*.py -v --stop --with-xunit --with-timer --timer-no-color --with-coverage --cover-erase --cover-html --cover-package=IM
| #!/bin/bash
WORKSPACE=$1
cd $WORKSPACE
nosetests test/unit/connectors/*.py test/unit/*.py test/functional/*.py -v --stop --with-xunit --with-timer --timer-no-color --with-coverage --cover-erase --cover-html --cover-xml --cover-package=IM
|
Fix error in PySide2 script | #!/bin/bash
export PATH="$HOME/miniconda/bin:$PATH"
source activate test
# Download PySide2 wheels
export URL="http://download.qt.io/snapshots/ci/pyside/5.11/latest/"
if [ "$USE_CONDA" = "Yes"]; then
if [ "$PYTHON_VERSION" = "2.7"]; then
conda remove -q qt pyqt
pip install -q --index-url=${URL} pyside2 --trusted-host download.qt.io
else
exit 0
fi
else
pip uninstall -q -y pyqt5 sip
pip install -q --index-url=${URL} pyside2 --trusted-host download.qt.io
fi
python qtpy/tests/runtests.py
# Force quitting if exit status of runtests.py was not 0
if [ $? -ne 0 ]; then
exit 1
fi
pip uninstall -y -q pyside2
| #!/bin/bash
export PATH="$HOME/miniconda/bin:$PATH"
source activate test
# Download PySide2 wheels
export URL="http://download.qt.io/snapshots/ci/pyside/5.11/latest/"
if [ "$USE_CONDA" = "Yes" ]; then
if [ "$PYTHON_VERSION" = "2.7" ]; then
conda remove -q qt pyqt
pip install -q --index-url=${URL} pyside2 --trusted-host download.qt.io
else
exit 0
fi
else
pip uninstall -q -y pyqt5 sip
pip install -q --index-url=${URL} pyside2 --trusted-host download.qt.io
fi
python qtpy/tests/runtests.py
# Force quitting if exit status of runtests.py was not 0
if [ $? -ne 0 ]; then
exit 1
fi
pip uninstall -y -q pyside2
|
Fix paths for the arduino library zip. | #!/bin/sh
cp ../led_strip.h .
cp ../led_strip.c led_strip.cpp
cp ../led_strip_no_backend.c led_strip_no_backend.cpp
cp ../led_strip_no_backend.h .
cp ../led_strip-cpp.h .
cp ../led_strip-cpp-implementation.h .
cp ../led_strip_struct.h .
zip -r LedStrip.zip * -x setup.sh
| #!/bin/sh
cp ../src/led_strip.h .
cp ../src/led_strip.c led_strip.cpp
cp ../src/led_strip_no_backend.c led_strip_no_backend.cpp
cp ../src/led_strip_no_backend.h .
cp ../src/led_strip-cpp.h .
cp ../src/led_strip-cpp-implementation.h .
cp ../src/led_strip_struct.h .
zip -r LedStrip.zip * -x createArduinoLibrary.sh
|
Speed up compilation with make -j parameter. Patch by lmoureaux. | #!/bin/sh
if test "x$(dirname $0)" != "x." ; then
echo "Currently this script must be run on its own directory" >&2
exit 1
fi
# Remove old version
rm -Rf freeciv
. ./version.txt
# Allow the user to override how Freeciv is downloaded.
if test -f dl_freeciv.sh ; then
FC_DL=dl_freeciv.sh
else
FC_DL=dl_freeciv_default.sh
fi
if ! sh $FC_DL $FCREV $FCBRANCH ; then
echo "Svn export failed" >&2
exit 1
fi
if ! ./apply_patches.sh ; then
echo "Patching failed" >&2
exit 1
fi
( cd freeciv
./autogen.sh CFLAGS="-O3" --with-project-definition=../freeciv-web.project --enable-fcweb --enable-json --disable-delta-protocol --disable-nls --disable-fcmp --enable-freeciv-manual=html --disable-ruledit --enable-ai-static=classic,threaded --prefix=${HOME}/freeciv/ && make -s
)
| #!/bin/sh
if test "x$(dirname $0)" != "x." ; then
echo "Currently this script must be run on its own directory" >&2
exit 1
fi
# Remove old version
rm -Rf freeciv
. ./version.txt
# Allow the user to override how Freeciv is downloaded.
if test -f dl_freeciv.sh ; then
FC_DL=dl_freeciv.sh
else
FC_DL=dl_freeciv_default.sh
fi
if ! sh $FC_DL $FCREV $FCBRANCH ; then
echo "Svn export failed" >&2
exit 1
fi
if ! ./apply_patches.sh ; then
echo "Patching failed" >&2
exit 1
fi
( cd freeciv
./autogen.sh CFLAGS="-O3" --with-project-definition=../freeciv-web.project --enable-fcweb --enable-json --disable-delta-protocol --disable-nls --disable-fcmp --enable-freeciv-manual=html --disable-ruledit --enable-ai-static=classic,threaded --prefix=${HOME}/freeciv/ && make -s -j$(nproc)
)
|
Implement multi-channel support into deployment script | #!/bin/bash
echo "Pack game.."
mkdir game
mv desktop/build/libs/desktop-1.0.jar game/legend-of-studentenfutter.jar
cp -rf android/assets/fonts/ game/fonts
cp -rf android/assets/i18n/ game/i18n
cp -rf android/assets/maps/ game/maps
cp -rf android/assets/music/ game/music
cp -rf android/assets/sound/ game/sound
cp -rf android/assets/textures game/textures
zip -r game.zip game
rm -rf game/
echo "Deploying to itch.io.."
wget http://dl.itch.ovh/butler/linux-amd64/head/butler
chmod +x butler
touch butler_creds
echo -n $ITCH_API_KEY > butler_creds
# Upload game
./butler push game.zip myrealitycoding/the-legend-of-studentenfutter:linux-universal -i butler_creds
# Cleanup
echo "Cleanup.."
./butler logout -i butler_creds --assume-yes
rm -rf game
rm butler
rm game.zip
echo "Done."
| #!/bin/bash
echo "Pack game.."
mkdir game
mv desktop/build/libs/desktop-1.0.jar game/legend-of-studentenfutter.jar
cp -rf android/assets/fonts/ game/fonts
cp -rf android/assets/i18n/ game/i18n
cp -rf android/assets/maps/ game/maps
cp -rf android/assets/music/ game/music
cp -rf android/assets/sound/ game/sound
cp -rf android/assets/textures game/textures
zip -r game.zip game
rm -rf game/
echo "Deploying to itch.io.."
wget http://dl.itch.ovh/butler/linux-amd64/head/butler
chmod +x butler
touch butler_creds
echo -n $ITCH_API_KEY > butler_creds
# Upload game
./butler push game.zip myrealitycoding/the-legend-of-studentenfutter:windows-linux-mac -i butler_creds
# Cleanup
echo "Cleanup.."
./butler logout -i butler_creds --assume-yes
rm -rf game
rm butler
rm game.zip
echo "Done."
|
Delete env-toolkit before pylint checks | #!/bin/bash
set -ex
prep() {
yum -y update
yum -y install epel-release
yum -y install python34 python34-virtualenv which
}
prep
./detect-common-errors.sh
./detect-dead-code.sh
./measure-cyclomatic-complexity.sh --fail-on-error
./measure-maintainability-index.sh --fail-on-error
./run-linter.sh
| #!/bin/bash
set -ex
prep() {
yum -y update
yum -y install epel-release
yum -y install python34 python34-virtualenv which
}
# this script is copied by CI, we don't need it
rm -f env-toolkit
prep
./detect-common-errors.sh
./detect-dead-code.sh
./measure-cyclomatic-complexity.sh --fail-on-error
./measure-maintainability-index.sh --fail-on-error
./run-linter.sh
|
Make yapf only process files which have been changed since topic branch fork | #!/bin/bash -eux
yapf --in-place --recursive .
isort --order-by-type --recursive --line-width 100 --diff --verbose -y
pyflakes .
echo 'SUCCESS!'
| #!/bin/bash -ux
base_remote="${1:-origin}"
base_branch="${1:-master}"
base_remote_branch="${1:-master}"
set +e
if [ "$(<.git/refs/heads/${base_branch})" != "$(<.git/refs/remotes/${base_remote}/${base_remote_branch})" ]; then
echo """running yapf in full mode, because an assumption that master and origin/master are the same, is broken. To fix it, do this:
git checkout master
git pull --ff-only
then checkout your topic branch and run $0.
If the base branch on github is not called 'origin', invoke as $0 proper_origin_remote_name. Then your remote needs to be synched with your master too.
"""
yapf --in-place --recursive .
else
echo 'running yapf in incremental mode'
head=`mktemp`
master=`mktemp`
git rev-list --first-parent HEAD > "$head" # list of commits being a history of HEAD branch, but without commits merged from master after forking
git rev-list origin/master > "$master" # list of all commits on history of master
base_commit=`diff -u "$head" "$master" | grep '^ ' | head -n 1 | cut -c 2-` # the commit from which the master and topic (current) branch have diverged
git diff --name-only "${base_commit}..HEAD" | grep '\.py$' | xargs yapf --in-place setup.py
fi
isort --order-by-type --recursive --line-width 100 --diff --verbose -y
pyflakes .
echo 'SUCCESS!'
|
Revert "Produce useful message when cannot start tmux" | # POSIX compatible script
_LOGIN_SHELL=$(getent passwd $(id -nu) | cut -d: -f 7)
if [ ! -x "$_LOGIN_SHELL" ]; then
if [ -x /bin/bash ]; then
_LOGIN_SHELL=/bin/bash
else
_LOGIN_SHELL=/bin/sh
fi
fi
for motd in /var/run/motd.dynamic /etc/motd; do
if [ -e $motd ]; then
cat $motd
fi
done
command -v tmux >/dev/null 2>&1 \
|| [ -x "$HOME/.nmk/local/bin/tmux" ] \
|| echo "not found tmux, is \$NMK_DIR/local setup correctly?" \
&& exec $_LOGIN_SHELL -l
# Make sure that byobu doesn't take over our login shell
exec env BYOBU_DISABLE=1 $_LOGIN_SHELL -l -c 'exec ${NMK_DIR:-~/.nmk}/bin/nmk -l'
# vi: ft=sh
| # POSIX compatible script
_LOGIN_SHELL=$(getent passwd $(id -nu) | cut -d: -f 7)
if [ ! -x "$_LOGIN_SHELL" ]; then
if [ -x /bin/bash ]; then
_LOGIN_SHELL=/bin/bash
else
_LOGIN_SHELL=/bin/sh
fi
fi
for motd in /var/run/motd.dynamic /etc/motd; do
if [ -e $motd ]; then
cat $motd
fi
done
# Make sure that byobu doesn't take over our login shell
exec env BYOBU_DISABLE=1 $_LOGIN_SHELL -l -c 'exec ${NMK_DIR:-~/.nmk}/bin/nmk -l'
# vi: ft=sh
|
Update icon cache after installation | #!/bin/bash
# Link to the binary
ln -sf /opt/{{ name }}/{{ name }} /usr/bin/{{ name }}
| #!/bin/bash
# Link to the binary
ln -sf /opt/{{ name }}/{{ name }} /usr/bin/{{ name }}
# Update icon cache
/bin/touch --no-create /usr/share/icons/hicolor &>/dev/null
/usr/bin/gtk-update-icon-cache /usr/share/icons/hicolor &>/dev/null || :
|
Extend smoke test for 'weave run --local' to include IPAM | #!/bin/bash
. ./config.sh
start_suite "Run weave with --local"
run_on $HOST1 sudo ./weave --local reset
run_on $HOST1 sudo ./weave --local launch
assert_raises "docker_on $HOST1 ps | grep weave"
run_on $HOST1 sudo ./weave --local run 10.2.6.5/24 -ti --name=c1 gliderlabs/alpine /bin/sh
assert_raises "exec_on $HOST1 c1 ifconfig | grep ethwe"
end_suite
| #!/bin/bash
. ./config.sh
start_suite "Run weave with --local"
run_on $HOST1 sudo ./weave --local reset
run_on $HOST1 sudo ./weave --local launch -iprange 10.2.5.0/24
assert_raises "docker_on $HOST1 ps | grep weave"
run_on $HOST1 sudo ./weave --local run 10.2.6.5/24 -ti --name=c1 gliderlabs/alpine /bin/sh
assert_raises "exec_on $HOST1 c1 ifconfig | grep ethwe"
run_on $HOST1 sudo ./weave --local run -ti --name=c2 gliderlabs/alpine /bin/sh
assert_raises "exec_on $HOST1 c2 ifconfig | grep ethwe"
end_suite
|
Add 'bcl' alias for 'brew cleanup' | # Some aliases for Homebrew
cite 'about-alias'
about-alias 'homebrew abbreviations'
alias bup='brew update && brew upgrade --all'
alias bupc='brew update && brew upgrade --all && brew cleanup'
alias bout='brew outdated'
alias bin='brew install'
alias brm='brew uninstall'
alias bls='brew list'
alias bsr='brew search'
alias binf='brew info'
alias bdr='brew doctor'
alias bed='brew edit'
alias bcin='brew-cask install'
alias bcrm='brew-cask uninstall'
alias bcsr='brew-cask search'
| # Some aliases for Homebrew
cite 'about-alias'
about-alias 'homebrew abbreviations'
alias bup='brew update && brew upgrade --all'
alias bupc='brew update && brew upgrade --all && brew cleanup'
alias bout='brew outdated'
alias bin='brew install'
alias brm='brew uninstall'
alias bcl='brew cleanup'
alias bls='brew list'
alias bsr='brew search'
alias binf='brew info'
alias bdr='brew doctor'
alias bed='brew edit'
alias bcin='brew-cask install'
alias bcrm='brew-cask uninstall'
alias bcsr='brew-cask search'
|
Fix order of module name and debug args for gunicorn. | #!/usr/bin/env bash
set -eu
function log {
echo "$(date +"%T") - START INFO - $*"
}
_term() {
echo "Caught SIGTERM signal!"
kill -TERM "$child" 2>/dev/null
}
trap _term SIGTERM
log Migrating
if [ ! -f "/var/akvo/rsr/mediaroot/fake-migration-flag" ]; then
log Running fake initial migrations
python manage.py migrate --fake-initial --noinput;
touch "/var/akvo/rsr/mediaroot/fake-migration-flag";
fi
python manage.py migrate --noinput
log Adding to crontab
python manage.py crontab add
log Making all environment vars available to cron jobs
env >> /etc/environment
log Starting cron
/usr/sbin/cron
log Starting gunicorn in background
gunicorn "${GUNICORN_DEBUG_ARGS:-}" akvo.wsgi --max-requests 200 --workers 5 --timeout 300 --bind 0.0.0.0:8000 &
child=$!
wait "$child" | #!/usr/bin/env bash
set -eu
function log {
echo "$(date +"%T") - START INFO - $*"
}
_term() {
echo "Caught SIGTERM signal!"
kill -TERM "$child" 2>/dev/null
}
trap _term SIGTERM
log Migrating
if [ ! -f "/var/akvo/rsr/mediaroot/fake-migration-flag" ]; then
log Running fake initial migrations
python manage.py migrate --fake-initial --noinput;
touch "/var/akvo/rsr/mediaroot/fake-migration-flag";
fi
python manage.py migrate --noinput
log Adding to crontab
python manage.py crontab add
log Making all environment vars available to cron jobs
env >> /etc/environment
log Starting cron
/usr/sbin/cron
log Starting gunicorn in background
gunicorn akvo.wsgi "${GUNICORN_DEBUG_ARGS:-}" --max-requests 200 --workers 5 --timeout 300 --bind 0.0.0.0:8000 &
child=$!
wait "$child" |
Add wrapper to find the test binary | #!/bin/bash
go get github.com/BurntSushi/toml-test # install test suite
go get github.com/BurntSushi/toml/toml-test-go # install my parser
go build -o test_program_bin github.com/pelletier/go-toml/test_program
$GOPATH/bin/toml-test ./test_program_bin # run tests on my parser
| #!/bin/bash
go get github.com/BurntSushi/toml-test # install test suite
go get github.com/BurntSushi/toml/toml-test-go # install my parser
go build -o test_program_bin github.com/pelletier/go-toml/test_program
toml_test_wrapper() {
if hash toml-test 2>/dev/null; then # test availability in $PATH
toml-test "$@"
else
p="$HOME/gopath/bin/toml-test" # try in Travi's place
if [ -f "$p" ]; then
"$p" "$@"
else
"$GOPATH/bin/toml-test" "$@"
fi
fi
}
toml_test_wrapper ./test_program_bin # run tests on my parser
|
Check that realpath is installed | #!/usr/bin/env bash
# Run our data8-notebook container locally.
# In the container, we use they jovyan user, but set the UID and GID to the
# current system user's UID and GID, so that when jovyan makes/edits files in
# the container, they appear to be from the system user. We also mount the
# base path of the repo as the home directory in the container.
user=$USER
uid=`id -u $USER`
gid=`id -g $USER`
repo_base=`realpath \`pwd\`/../..`
notebook_base=$repo_base/notebooks
if [[ $EUID -eq 0 ]]; then
echo "Don't run this script as root. Add your personal user to the docker group and run with your personal user." 1>&2
exit 1
fi
docker run -it --rm -e USER=$user -e NB_UID=$uid -e NB_GID=$gid -e HOME=$HOME \
-p 8888:8888\
--volume $notebook_base:/home/notebooks\
data8-notebook $1
| #!/usr/bin/env bash
# Run our data8-notebook container locally.
# In the container, we use they jovyan user, but set the UID and GID to the
# current system user's UID and GID, so that when jovyan makes/edits files in
# the container, they appear to be from the system user. We also mount the
# base path of the repo as the home directory in the container.
if [[ $EUID -eq 0 ]]; then
echo "Don't run this script as root. Add your personal user to the docker group and run with your personal user." 1>&2
exit 1
fi
if [[ ! `which realpath` ]]; then
echo "Install `realpath`."
exit 1
fi
user=$USER
uid=`id -u $USER`
gid=`id -g $USER`
repo_base=`realpath \`pwd\`/../..`
notebook_base=$repo_base/notebooks
docker run -it --rm -e USER=$user -e NB_UID=$uid -e NB_GID=$gid -e HOME=$HOME \
-p 8888:8888\
--volume $notebook_base:/home/notebooks\
data8-notebook $1
|
Fix check of whether Postgres user exists | #!/bin/bash
cd "$(dirname "$0")" || exit
function createRole {
echo "Setting up a database user."
echo "You may be prompted for the password for the $USERNAME user."
echo "As you type the password no output will be shown."
echo
sudo -u postgres psql -c "CREATE ROLE $USERNAME WITH LOGIN CREATEDB PASSWORD 'placeholder'"
}
# If There is no Postgre role with the username, create it
[ $(psql postgres -tAc "SELECT 1 FROM pg_roles WHERE rolname='$USERNAME'") != "1" ] && createRole
# Replaces "USERNAME" with the actual username in config.json
sed -i "s/USERNAME/$USERNAME/" config.json
echo "Postgre setup done"
| #!/bin/bash
cd "$(dirname "$0")" || exit
function createRole {
echo "Setting up a database user."
echo "You may be prompted for the password for the $USERNAME user."
echo "As you type the password no output will be shown."
echo
sudo -u postgres psql -c "CREATE ROLE $USERNAME WITH LOGIN CREATEDB PASSWORD 'placeholder'"
}
# If There is no Postgre role with the username, create it
# psql will error if there is no role with the current user's name, e.g.:
# psql: FATAL: role "user" does not exist
if psql postgres -c "" &> /dev/null
then
echo "PostgreSQL user $USERNAME already exists so it will not be created."
else
createRole
fi
# Replaces "USERNAME" with the actual username in config.json
sed -i "s/USERNAME/$USERNAME/" config.json
echo "Postgre setup done"
|
Change owner on keys from letsencrypt | #!/bin/bash
dsmFqdn=${1}
curl -O https://dl.eff.org/certbot-auto
chmod a+x certbot-auto
./certbot-auto --debug certonly --webroot -w /usr/share/nginx/html/ -d ${dsmFqdn} --non-interactive --agree-tos --email event@trenddemos.com
uploadResponse=$(aws iam upload-server-certificate --server-certificate-name event.trenddemos.com --certificate-body file:///etc/letsencrypt/archive/${dsmFqdn}/cert1.pem --private-key file:///etc/letsencrypt/archive/${dsmFqdn}/privkey1.pem)
arn=$(echo $uploadResponse | jq -r .ServerCertificateMetadata.Arn)
echo $arn > /home/ec2-user/variables/certArn | #!/bin/bash
dsmFqdn=${1}
curl -O https://dl.eff.org/certbot-auto
chmod a+x certbot-auto
./certbot-auto --debug certonly --webroot -w /usr/share/nginx/html/ -d ${dsmFqdn} --non-interactive --agree-tos --email event@trenddemos.com
sudo chown -R ec2-user:ec2-user /etc/letsencrypt
uploadResponse=$(aws iam upload-server-certificate --server-certificate-name event.trenddemos.com --certificate-body file:///etc/letsencrypt/archive/${dsmFqdn}/cert1.pem --private-key file:///etc/letsencrypt/archive/${dsmFqdn}/privkey1.pem)
arn=$(echo $uploadResponse | jq -r .ServerCertificateMetadata.Arn)
echo $arn > /home/ec2-user/variables/certArn |
Add e2e tests for search | #!/usr/bin/env bash
# Fetch a single business's records, compare results
BUSINESS_JSON="$(curl -s http://localhost:5000/api/business/F000032)"
if [ "$(echo $BUSINESS_JSON |jq '.EntityID')" != '"F000032"' ]; then
echo "ERROR: API is not returning entity ID correctly"
ERRORED=true
fi
if [ "$(echo $BUSINESS_JSON |jq '.Stock1')" != '"COMMON (200000000)"' ]; then
echo "ERROR: API is not returning Stock1 correctly"
ERRORED=true
fi
if [ "$(echo $BUSINESS_JSON |jq '.IncorpDate')" != '"1903-08-18"' ]; then
echo "ERROR: API is not returning incorporation date correctly"
ERRORED=true
fi
if [ "$(echo $BUSINESS_JSON |jq '.Name')" != '"AMERICAN BRANDS, INC."' ]; then
echo "ERROR: API is not returning corporation name correctly"
ERRORED=true
fi
# If any tests failed, have this script return that failure
if [ "$ERRORED" == true ]; then
exit 1
fi
| #!/usr/bin/env bash
# Fetch a single business's records, compare results
BUSINESS_JSON="$(curl -s http://localhost:5000/api/business/F000032)"
if [ "$(echo $BUSINESS_JSON |jq '.EntityID')" != '"F000032"' ]; then
echo "ERROR: API is not returning entity ID correctly"
ERRORED=true
fi
if [ "$(echo $BUSINESS_JSON |jq '.Stock1')" != '"COMMON (200000000)"' ]; then
echo "ERROR: API is not returning Stock1 correctly"
ERRORED=true
fi
if [ "$(echo $BUSINESS_JSON |jq '.IncorpDate')" != '"1903-08-18"' ]; then
echo "ERROR: API is not returning incorporation date correctly"
ERRORED=true
fi
if [ "$(echo $BUSINESS_JSON |jq '.Name')" != '"AMERICAN BRANDS, INC."' ]; then
echo "ERROR: API is not returning corporation name correctly"
ERRORED=true
fi
# Run a search for a test query
SEARCH_JSON="$(curl -s http://localhost:5000/api/search/test)"
if [ "$(echo $SEARCH_JSON |jq '. | length')" -ne '100' ]; then
echo "ERROR: API is not returning enough search results"
ERRORED=true
fi
# Run a search for a test query that will fail
SEARCH_JSON="$(curl -s http://localhost:5000/api/search/asdflasdfqasdl)"
if [ "$(echo $SEARCH_JSON |jq '. | length')" -ne '0' ]; then
echo "ERROR: API is not returning excessive search results"
ERRORED=true
fi
# If any tests failed, have this script return that failure
if [ "$ERRORED" == true ]; then
exit 1
fi
|
Debug script issues on Kokoro. | #!/bin/bash
# Copyright 2020 The gVisor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
source $(dirname $0)/common.sh
# Exporting for subprocesses as GCP APIs and tools check this environmental
# variable for authentication.
export GOOGLE_APPLICATION_CREDENTIALS="${KOKORO_KEYSTORE_DIR}/${GCLOUD_CREDENTIALS}"
gcloud auth activate-service-account \
--key-file "${GOOGLE_APPLICATION_CREDENTIALS}"
gcloud config set project ${PROJECT}
gcloud config set compute/zone ${ZONE}
bazel run //benchmarks:benchmarks -- \
--verbose \
run-gcp \
"(startup|absl)" \
--internal \
--runtime=runc \
--runtime=runsc \
--installers=head
| #!/bin/bash
# Copyright 2020 The gVisor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
source $(dirname $0)/common.sh
# Exporting for subprocesses as GCP APIs and tools check this environmental
# variable for authentication.
export GOOGLE_APPLICATION_CREDENTIALS="${KOKORO_KEYSTORE_DIR}/${GCLOUD_CREDENTIALS}"
which gcloud
gcloud auth activate-service-account \
--key-file "${GOOGLE_APPLICATION_CREDENTIALS}"
gcloud config set project ${PROJECT}
gcloud config set compute/zone ${ZONE}
bazel run //benchmarks:benchmarks -- \
--verbose \
run-gcp \
"(startup|absl)" \
--internal \
--runtime=runc \
--runtime=runsc \
--installers=head
|
Use more portable switch for uniq to unbreak osx build | #!/bin/bash
SOURCE_PATH=../
EXPORTS=`find "${SOURCE_PATH}" -name "*exports"`
ERRORS=0
for E in $EXPORTS; do
NUM_DUPES=`sort $E | uniq -D | wc -l`
if [[ "$NUM_DUPES" != 0 ]]; then
echo "There are duplicate symbols in '$E'"
ERRORS=1
fi
done
if [[ "$ERRORS" = 1 ]]; then
echo "There are duplicate symbols"
exit 1
fi
exit $ERRORS
| #!/bin/bash
SOURCE_PATH=../
EXPORTS=`find "${SOURCE_PATH}" -name "*exports"`
ERRORS=0
for E in $EXPORTS; do
NUM_DUPES=`sort $E | uniq -d | wc -l`
if [[ "$NUM_DUPES" != 0 ]]; then
echo "There are duplicate symbols in '$E'"
ERRORS=1
fi
done
if [[ "$ERRORS" = 1 ]]; then
echo "There are duplicate symbols"
exit 1
fi
exit $ERRORS
|
Add build test for serde feature | #!/bin/sh
set -ex
cargo bench --verbose
cargo test --verbose --manifest-path=macros/Cargo.toml
| #!/bin/sh
set -ex
cargo bench --verbose
cargo test --verbose --manifest-path=macros/Cargo.toml
# Build test for the serde feature
cargo build --verbose --features "serde"
|
Fix read issues on doc_site | #!/bin/sh
MKDOCS_SITE_PATH="/opt/var/mkdocs/{{ mkdocs_site_name }}"
MKDOCS_CLONE_URI="{{ mkdocs_source_url }}"
MKDOCS_SERVE_PATH="{{ mkdocs_site_path}}"
# Does the Mkdocs content path exist
if [ ! -d "$MKDOCS_SITE_PATH" ] ; then
mkdir -p "$MKDOCS_SITE_PATH"
fi
# Jump into the content path
cd "$MKDOCS_SITE_PATH"
# If we have .git update, otherwise clone
if [ -d .git ] ; then
git pull
else
git clone "$MKDOCS_CLONE_URI" .
fi
# Build destdir if it doesn't exist
if [ ! -d "$MKDOCS_SERVE_PATH" ] ; then
mkdir -p "$MKDOCS_SERVE_PATH"
fi
# Build the site
mkdocs build
# Deploy the site
rsync -a site/ "$MKDOCS_SERVE_PATH"
# Fix the system permissions
chmod og+rX "$MKDOCS_SERVE_PATH"
| #!/bin/sh
MKDOCS_SITE_PATH="/opt/var/mkdocs/{{ mkdocs_site_name }}"
MKDOCS_CLONE_URI="{{ mkdocs_source_url }}"
MKDOCS_SERVE_PATH="{{ mkdocs_site_path}}"
# Does the Mkdocs content path exist
if [ ! -d "$MKDOCS_SITE_PATH" ] ; then
mkdir -p "$MKDOCS_SITE_PATH"
fi
# Jump into the content path
cd "$MKDOCS_SITE_PATH"
# If we have .git update, otherwise clone
if [ -d .git ] ; then
git pull
else
git clone "$MKDOCS_CLONE_URI" .
fi
# Build destdir if it doesn't exist
if [ ! -d "$MKDOCS_SERVE_PATH" ] ; then
mkdir -p "$MKDOCS_SERVE_PATH"
fi
# Build the site
mkdocs build
# Deploy the site
rsync -a site/ "$MKDOCS_SERVE_PATH"
# Fix the system permissions
chmod -R og+rX "$MKDOCS_SERVE_PATH"
|
Install nsinit that is vendored into docker. | #!/bin/bash
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
export GOPATH=/var/nsinit
mkdir -p $GOPATH
apt-get install -y -qq build-essential
go get github.com/docker/libcontainer/nsinit/nsinit
if [ ! -e /usr/sbin/nsinit ]; then
ln -s /var/nsinit/bin/nsinit /usr/sbin/nsinit
fi
| #!/bin/bash
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
export GOPATH=/var/nsinit
mkdir -p $GOPATH
apt-get install -y -qq build-essential
go get github.com/docker/docker/vendor/src/github.com/docker/libcontainer/nsinit/nsinit
if [ ! -e /usr/sbin/nsinit ]; then
ln -s /var/nsinit/bin/nsinit /usr/sbin/nsinit
fi
|
Add -s option for testing alternative story formats (e.g. MDX) | #!/bin/bash
# exit on error
set -e
declare test_root=$PWD
# remove run directory before exit to prevent yarn.lock spoiling
function cleanup {
rm -rfd ${test_root}/run
}
trap cleanup EXIT
fixtures_dir='fixtures'
# parse command-line options
# '-f' sets fixtures directory
while getopts ":f:" opt; do
case $opt in
f)
fixtures_dir=$OPTARG
;;
esac
done
# copy all files from fixtures directory to `run`
rm -rfd run
cp -r $fixtures_dir run
cd run
for dir in *
do
cd $dir
echo "Running storybook-cli in $dir"
if [ $dir == *"native"* ]
then
# run @storybook/cli
yarn sb init --skip-install --yes --install-server
else
# run @storybook/cli
yarn sb init --skip-install --yes
fi
cd ..
done
cd ..
# install all the dependencies in a single run
cd ../../..
echo "Running bootstrap"
yarn install --non-interactive --silent --pure-lockfile
cd ${test_root}/run
for dir in *
do
# check that storybook starts without errors
cd $dir
echo "Running smoke test in $dir"
failed=0
yarn storybook --smoke-test --quiet || failed=1
if [ $failed -eq 1 ]
then
exit 1
fi
cd ..
done
| #!/bin/bash
# exit on error
set -e
declare test_root=$PWD
# remove run directory before exit to prevent yarn.lock spoiling
function cleanup {
rm -rfd ${test_root}/run
}
trap cleanup EXIT
fixtures_dir='fixtures'
story_format='csf'
# parse command-line options
# '-f' sets fixtures directory
# '-s' sets story format to use
while getopts ":fs:" opt; do
case $opt in
f)
fixtures_dir=$OPTARG
;;
s)
story_format=$OPTARG
;;
esac
done
# copy all files from fixtures directory to `run`
rm -rfd run
cp -r $fixtures_dir run
cd run
for dir in *
do
cd $dir
echo "Running storybook-cli in $dir"
if [[ $dir =~ (react_native*|angular-cli-v6|ember-cli|marko|meteor|mithril|polymer|riot|react_babel_6) ]]
then
yarn sb init --skip-install --yes
else
yarn sb init --skip-install --yes --story-format $story_format
fi
cd ..
done
cd ..
# install all the dependencies in a single run
cd ../../..
echo "Running bootstrap"
yarn install --non-interactive --silent --pure-lockfile
cd ${test_root}/run
for dir in *
do
# check that storybook starts without errors
cd $dir
echo "Running smoke test in $dir"
failed=0
yarn storybook --smoke-test --quiet || failed=1
if [ $failed -eq 1 ]
then
exit 1
fi
cd ..
done
|
Add npm commands to deploy script | #!/bin/bash
cd /var/www/apiary
composer install --no-interaction
php artisan migrate --no-interaction
php artisan config:clear --no-interaction
| #!/bin/bash
cd /var/www/apiary
composer install --no-interaction
php artisan migrate --no-interaction
php artisan config:clear --no-interaction
npm install
npm run production
|
Change patterns to work with kubeadm-dind | #!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
set -o xtrace
IMAGE_REPO=${IMAGE_REPO:-mirantis/k8s-appcontroller}
IMAGE_TAG=${IMAGE_TAG:-latest}
NUM_NODES=${NUM_NODES:-2}
TMP_IMAGE_PATH=${TMP_IMAGE_PATH:-/tmp/image.tar}
MASTER_NAME=${MASTER_NAME=}
SLAVE_PATTERN=${SLAVE_PATTERN:-"dind_node_"}
function import-image {
docker save ${IMAGE_REPO}:${IMAGE_TAG} -o "${TMP_IMAGE_PATH}"
if [ -n "$MASTER_NAME" ]; then
docker cp "${TMP_IMAGE_PATH}" kube-master:/image.tar
docker exec -ti "${MASTER_NAME}" docker load -i /image.tar
fi
for i in `seq 1 "${NUM_NODES}"`;
do
docker cp "${TMP_IMAGE_PATH}" "${SLAVE_PATTERN}$i":/image.tar
docker exec -ti "${SLAVE_PATTERN}$i" docker load -i /image.tar
done
set +o xtrace
echo "Finished copying docker image to dind nodes"
}
import-image
| #!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
set -o xtrace
IMAGE_REPO=${IMAGE_REPO:-mirantis/k8s-appcontroller}
IMAGE_TAG=${IMAGE_TAG:-latest}
NUM_NODES=${NUM_NODES:-2}
TMP_IMAGE_PATH=${TMP_IMAGE_PATH:-/tmp/image.tar}
MASTER_NAME=${MASTER_NAME="kube-master"}
SLAVE_PATTERN=${SLAVE_PATTERN:-"kube-node-"}
function import-image {
docker save ${IMAGE_REPO}:${IMAGE_TAG} -o "${TMP_IMAGE_PATH}"
if [ -n "$MASTER_NAME" ]; then
docker cp "${TMP_IMAGE_PATH}" kube-master:/image.tar
docker exec -ti "${MASTER_NAME}" docker load -i /image.tar
fi
for i in `seq 1 "${NUM_NODES}"`;
do
docker cp "${TMP_IMAGE_PATH}" "${SLAVE_PATTERN}$i":/image.tar
docker exec -ti "${SLAVE_PATTERN}$i" docker load -i /image.tar
done
set +o xtrace
echo "Finished copying docker image to dind nodes"
}
import-image
|
Fix file path params in test script. | #!/bin/bash
tuple_count=50
thread_number=$1
do_check=$2
file_dir=$3
for i in {1..9}
do
tuples=$(($tuple_count / $thread_number))
echo "-----------test with $tuples, thread $thread_number, ON /tmp/ ------------------"
tests/aries_logging_test -t $tuples -b $thread_number -c $do_check
tests/peloton_logging_test -t $tuples -b $thread_number -c $do_check
tests/peloton_logging_test -t $tuples -b $thread_number -c $do_check -r 1
echo "-----------test with $tuples, thread $thread_number, ON $file_dir------------------"
tests/aries_logging_test -t $tuples -b $thread_number -c $do_check -d file_dir
tests/peloton_logging_test -t $tuples -b $thread_number -c $do_check -d file_dir
tests/peloton_logging_test -t $tuples -b $thread_number -c $do_check -r 1 -d file_dir
tuple_count=$(($tuple_count * 4))
done
| #!/bin/bash
tuple_count=50
thread_number=$1
do_check=$2
file_dir=$3
for i in {1..9}
do
tuples=$(($tuple_count / $thread_number))
echo "-----------test with $tuples, thread $thread_number, ON /tmp/ ------------------"
tests/aries_logging_test -t $tuples -b $thread_number -c $do_check
tests/peloton_logging_test -t $tuples -b $thread_number -c $do_check
tests/peloton_logging_test -t $tuples -b $thread_number -c $do_check -r 1
echo "-----------test with $tuples, thread $thread_number, ON $file_dir------------------"
tests/aries_logging_test -t $tuples -b $thread_number -c $do_check -d $file_dir
tests/peloton_logging_test -t $tuples -b $thread_number -c $do_check -d $file_dir
tests/peloton_logging_test -t $tuples -b $thread_number -c $do_check -r 1 -d $file_dir
tuple_count=$(($tuple_count * 4))
done
|
Add support to install kubelet from the corresponding branch. | #!/bin/bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
# Install kubelet
! go get -d k8s.io/kubernetes
cd $GOPATH/src/k8s.io/kubernetes
make WHAT='cmd/kubelet'
sudo cp _output/bin/kubelet /usr/local/bin
| #!/bin/bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
# Install kubelet
! go get -d k8s.io/kubernetes
cd $GOPATH/src/k8s.io/kubernetes
if [ ${TRAVIS_BRANCH:-"master"} != "master" ]; then
# We can do this because cri-tools have the same branch name with kubernetes.
git checkout "${TRAVIS_BRANCH}"
fi
make WHAT='cmd/kubelet'
sudo cp _output/bin/kubelet /usr/local/bin
|
Allow venv as well as .venv | function sa() {
if [ -f .venv/bin/activate ]; then
source .venv/bin/activate
fi
}
function vpip() {
if [ -f .venv/bin/pip ]; then
./.venv/bin/pip $@
else
echo "virtualenv not found in .venv"
exit 1
fi
}
# function python() {
# if [ -f .venv/bin/pip ]; then
# ./.venv/bin/python $@
# else
# command python $@
# fi
# }
| function sa() {
if [ -f .venv/bin/activate ]; then
source .venv/bin/activate
elif [ -f venv/bin/activate ]; then
source venv/bin/activate
fi
}
function vpip() {
if [ -f .venv/bin/pip ]; then
./.venv/bin/pip $@
elif [ -f venv/bin/pip ]; then
./venv/bin/pip $@
else
echo "virtualenv not found in .venv"
exit 1
fi
}
# function python() {
# if [ -f .venv/bin/pip ]; then
# ./.venv/bin/python $@
# else
# command python $@
# fi
# }
|
Detach process from it's parent. | #!/bin/sh
# Reload profile if it exists.
if [ -f /etc/profile ];
then
echo "Reload profile data."
source /etc/profile
fi
# Load config file if it exists.
if [ -f /etc/sysconfig/frontend_config ];
then
echo "Load config."
source /etc/sysconfig/frontend_config
fi
if [ ! -z "$FRONTEND_HOME" ];
then
# FRONTEND_HOME is set, us it.
component_home=$FRONTEND_HOME/bin
else
# FRONTEND_HOME is not set.
component_home=$(pwd)
fi
# Use JAVA_HOME if it is set.
if [ ! -z "$JAVA_HOME" ];
then
# JAVA_HOME is set, use it.
$JAVA_HOME/bin/java -jar $component_home/../lib/FrontEnd.jar -p $component_home/../var/specs_monitoring_nmap_frontend.pid
else
# JAVA_HOME is NOT set.
java -jar $component_home/../lib/FrontEnd.jar -p $component_home/../var/specs_monitoring_nmap_frontend.pid
fi
| #!/bin/sh
# Reload profile if it exists.
if [ -f /etc/profile ];
then
echo "Reload profile data."
source /etc/profile
fi
# Load config file if it exists.
if [ -f /etc/sysconfig/frontend_config ];
then
echo "Load config."
source /etc/sysconfig/frontend_config
fi
if [ ! -z "$FRONTEND_HOME" ];
then
# FRONTEND_HOME is set, us it.
component_home=$FRONTEND_HOME/bin
else
# FRONTEND_HOME is not set.
component_home=$(pwd)
fi
# Use JAVA_HOME if it is set.
if [ ! -z "$JAVA_HOME" ];
then
# JAVA_HOME is set, use it.
$JAVA_HOME/bin/java -jar $component_home/../lib/FrontEnd.jar -p $component_home/../var/specs_monitoring_nmap_frontend.pid &
disown
else
# JAVA_HOME is NOT set.
java -jar $component_home/../lib/FrontEnd.jar -p $component_home/../var/specs_monitoring_nmap_frontend.pid &
disown
fi
|
Use LZMA compression for nightly builds | #!/bin/bash
ARCNAME=final
NIGHTLY_WEB_ROOT=/var/www/html/nightly
#Arguments: processFarBuild <32|64>
processFarBuild()
{
if [ ! -e ../outfinalnew$1/${ARCNAME}.msi ]; then
echo "outfinalnew$1/${ARCNAME}.msi is missing"
return 1
fi
BASE=$PWD
if ! cd ../outfinalnew$1; then
echo "cd ../outfinalnew$1 failed"
return 1
fi
7za a -r -x!${ARCNAME}.msi -x!*.pdb ${ARCNAME}.7z *
7za a -r ${ARCNAME}.pdb.7z *.pdb
cd $BASE || return 1
m4 -P -DFARBIT=$1 -DHOSTTYPE=Unix -D ARC=../outfinalnew$1/$ARCNAME -D FARVAR=new -D LASTCHANGE="$LASTCHANGE" ../pagegen.m4 > $NIGHTLY_WEB_ROOT/FarW.$1.php
}
./installer.sh || exit 1
cd far || exit 1
LASTCHANGE=`head -1 changelog | dos2unix`
( \
processFarBuild 32 && \
processFarBuild 64 \
) || exit 1
cd ..
| #!/bin/bash
ARCNAME=final
NIGHTLY_WEB_ROOT=/var/www/html/nightly
#Arguments: processFarBuild <32|64>
processFarBuild()
{
if [ ! -e ../outfinalnew$1/${ARCNAME}.msi ]; then
echo "outfinalnew$1/${ARCNAME}.msi is missing"
return 1
fi
BASE=$PWD
if ! cd ../outfinalnew$1; then
echo "cd ../outfinalnew$1 failed"
return 1
fi
7za a -m0=LZMA -r -x!${ARCNAME}.msi -x!*.pdb ${ARCNAME}.7z *
7za a -m0=LZMA -r ${ARCNAME}.pdb.7z *.pdb
cd $BASE || return 1
m4 -P -DFARBIT=$1 -DHOSTTYPE=Unix -D ARC=../outfinalnew$1/$ARCNAME -D FARVAR=new -D LASTCHANGE="$LASTCHANGE" ../pagegen.m4 > $NIGHTLY_WEB_ROOT/FarW.$1.php
}
./installer.sh || exit 1
cd far || exit 1
LASTCHANGE=`head -1 changelog | dos2unix`
( \
processFarBuild 32 && \
processFarBuild 64 \
) || exit 1
cd ..
|
Set plus x, to silence needless verbosity | #!/bin/bash
set +e # failure OK for now...
set -x
pushd /vagrant/sandcats
MONGO_URL=mongodb://localhost/sandcats_mongo meteor run --settings /etc/sandcats-meteor-settings.json &
popd
# Wait for Meteor to come online, up to N seconds.
set -x
for i in $(seq 90)
do
nc -z localhost 3000
retval=$?
if [[ $retval == "0" ]]; then
echo -n '+'
break
else
sleep 1
echo -n '.'
fi
done
# Make sure anything we prented before is newline-terminated.
echo
set -e # Failure is no longer OK!
# Restart nginx, in case it is wants to be all 502-y
sudo service nginx restart
# Now, actually run the tests
make action-run-tests
| #!/bin/bash
set +e # failure OK for now...
set -x
pushd /vagrant/sandcats
MONGO_URL=mongodb://localhost/sandcats_mongo meteor run --settings /etc/sandcats-meteor-settings.json &
popd
# Wait for Meteor to come online, up to N seconds.
set +x
for i in $(seq 90)
do
nc -z localhost 3000
retval=$?
if [[ $retval == "0" ]]; then
echo -n '+'
break
else
sleep 1
echo -n '.'
fi
done
# Make sure anything we prented before is newline-terminated.
echo
set -e # Failure is no longer OK!
# Restart nginx, in case it is wants to be all 502-y
sudo service nginx restart
# Now, actually run the tests
make action-run-tests
|
Revert "keep updated local copy of MySQL docs" | #!/bin/bash
# Usage: update-all.sh
set -e
softwareupdate -ia
mas upgrade
brew update
brew upgrade
brew cleanup
brew prune
cd "$HOME/projects/dotfiles"
brew bundle dump --force
gem update
pip install --upgrade pip setuptools
pip3 install --upgrade pip setuptools wheel
pip list --outdated | cut -d \( -f 1 | xargs -n 1 pip install --upgrade
pip3 list --outdated | cut -d \( -f 1 | xargs -n 1 pip3 install --upgrade
# update Atom packages
apm upgrade -c false
# keep the latest copy of MySQL docs available locally
DOCS_HOME="$HOME/Documents"
cd "$DOCS_HOME"
MYSQL_VERSION="5.6"
MYSQL_DOCS_BASEURL="http://downloads.mysql.com/docs/"
MYSQL_DOCS="refman-$MYSQL_VERSION-en."
MYSQL_HTML_DOCS="${MYSQL_DOCS}html-chapter.tar.gz"
MYSQL_PDF_DOCS="${MYSQL_DOCS}pdf"
curl -O "$MYSQL_DOCS_BASEURL$MYSQL_HTML_DOCS"
curl -O "$MYSQL_DOCS_BASEURL$MYSQL_PDF_DOCS"
tar -xf "$MYSQL_HTML_DOCS"
rm "$MYSQL_HTML_DOCS"
| #!/bin/bash
# Usage: update-all.sh
set -e
softwareupdate -ia
mas upgrade
brew update
brew upgrade
brew cleanup
brew prune
cd "$HOME/projects/dotfiles"
brew bundle dump --force
gem update
pip install --upgrade pip setuptools
pip3 install --upgrade pip setuptools wheel
pip list --outdated | cut -d \( -f 1 | xargs -n 1 pip install --upgrade
pip3 list --outdated | cut -d \( -f 1 | xargs -n 1 pip3 install --upgrade
# update Atom packages
apm upgrade -c false
|
Fix conda and conda build releases on osx to those instances that worked. | #!/bin/bash
set -e
if [[ $TRAVIS_OS_NAME = "linux" ]]
then
docker pull bioconda/bioconda-builder
else
# install conda
curl -O https://repo.continuum.io/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
sudo bash Miniconda3-latest-MacOSX-x86_64.sh -b -p /anaconda
sudo chown -R $USER /anaconda
mkdir -p /anaconda/conda-bld/osx-64 # workaround for bug in current conda
mkdir -p /anaconda/conda-bld/linux-64 # workaround for bug in current conda
export PATH=/anaconda/bin:$PATH
conda install -y conda conda-build anaconda-client pyyaml toolz jinja2 nose
conda index /anaconda/conda-bld/linux-64 /anaconda/conda-bld/osx-64
# setup bioconda channel
conda config --add channels bioconda
conda config --add channels r
conda config --add channels file://anaconda/conda-bld
conda install -y toposort
fi
| #!/bin/bash
set -e
if [[ $TRAVIS_OS_NAME = "linux" ]]
then
docker pull bioconda/bioconda-builder
else
# install conda
curl -O https://repo.continuum.io/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
sudo bash Miniconda3-latest-MacOSX-x86_64.sh -b -p /anaconda
sudo chown -R $USER /anaconda
mkdir -p /anaconda/conda-bld/osx-64 # workaround for bug in current conda
mkdir -p /anaconda/conda-bld/linux-64 # workaround for bug in current conda
export PATH=/anaconda/bin:$PATH
conda install -y conda=4.0.2 conda-build=1.19.0 anaconda-client pyyaml toolz jinja2 nose
conda index /anaconda/conda-bld/linux-64 /anaconda/conda-bld/osx-64
# setup bioconda channel
conda config --add channels bioconda
conda config --add channels r
conda config --add channels file://anaconda/conda-bld
conda install -y toposort
fi
|
Support for pre-called vrn_file inputs | #!/bin/bash
set -ex -o pipefail
BCBIO_VERSION="1.0.7a"
BCBIO_REVISION="0a423fe"
NS="quay.io/bcbio"
TAG="${BCBIO_VERSION}-${BCBIO_REVISION}"
# build bcbio base
docker pull ubuntu:16.04
docker build --no-cache --build-arg "git_revision=${BCBIO_REVISION}" -t "${NS}/bcbio-base:${TAG}" -t "${NS}/bcbio-base:latest" - < Dockerfile.base
# build bcbio + task specific tools
for TOOL in ${TOOLS}
do
docker build --no-cache --build-arg "git_revision=${BCBIO_REVISION}" --build-arg "tool=${TOOL}" -t "${NS}/${TOOL}:${TAG}" -t "${NS}/${TOOL}:latest" -f Dockerfile.tools .
done
# log in to quay.io
set +x # avoid leaking encrypted password into travis log
docker login -u="bcbio+travis" -p="$QUAY_PASSWORD" quay.io
# push images
set -ex -o pipefail
docker push "${NS}/bcbio-base:${TAG}"
docker push "${NS}/bcbio-base:latest"
for TOOL in ${TOOLS}
do
docker push "${NS}/${TOOL}:${TAG}"
docker push "${NS}/${TOOL}:latest"
done
| #!/bin/bash
set -ex -o pipefail
BCBIO_VERSION="1.0.7a"
BCBIO_REVISION="42a0b45"
NS="quay.io/bcbio"
TAG="${BCBIO_VERSION}-${BCBIO_REVISION}"
# build bcbio base
docker pull ubuntu:16.04
docker build --no-cache --build-arg "git_revision=${BCBIO_REVISION}" -t "${NS}/bcbio-base:${TAG}" -t "${NS}/bcbio-base:latest" - < Dockerfile.base
# build bcbio + task specific tools
for TOOL in ${TOOLS}
do
docker build --no-cache --build-arg "git_revision=${BCBIO_REVISION}" --build-arg "tool=${TOOL}" -t "${NS}/${TOOL}:${TAG}" -t "${NS}/${TOOL}:latest" -f Dockerfile.tools .
done
# log in to quay.io
set +x # avoid leaking encrypted password into travis log
docker login -u="bcbio+travis" -p="$QUAY_PASSWORD" quay.io
# push images
set -ex -o pipefail
docker push "${NS}/bcbio-base:${TAG}"
docker push "${NS}/bcbio-base:latest"
for TOOL in ${TOOLS}
do
docker push "${NS}/${TOOL}:${TAG}"
docker push "${NS}/${TOOL}:latest"
done
|
Make opensp install a bit quieter | #!/bin/sh
# Install the W3C HTML validator
cd ~/travis-tmp
wget http://switch.dl.sourceforge.net/project/openjade/opensp/1.5.2/OpenSP-1.5.2.tar.gz
tar -xzf OpenSP-1.5.2.tar.gz
cd ~/travis-tmp/OpenSP-*
./configure --enable-http --disable-doc-build
make
make install | #!/bin/sh
# Install the W3C HTML validator
cd ~/travis-tmp
wget http://switch.dl.sourceforge.net/project/openjade/opensp/1.5.2/OpenSP-1.5.2.tar.gz
tar -xzf OpenSP-1.5.2.tar.gz
cd ~/travis-tmp/OpenSP-*
./configure --quiet --enable-http --disable-doc-build
make --silent
make --silent install |
Set the baseurl in the hugo command in the deploy script | #!/bin/bash
# generate the site
./hugoThemeSiteScript/generateThemeSite.sh https://davidyorr.github.io/hugo-orchid-theme/
hugo -s hugoThemeSite/themeSite
# clone master into a temp directory
TMP_DIR=temp
rm -rf $TMP_DIR
git clone git@github.com:davidyorr/hugo-orchid-theme.git $TMP_DIR
cd $TMP_DIR
# checkout the gh-pages branch and copy the site into it
git checkout -t origin/gh-pages
rm -rf *
cp -R ../hugoThemeSite/themeSite/public/* .
# push changes
git add -A
git commit -m "Built $(date)"
git push origin gh-pages
# delete the temp directory
cd ..
rm -rf $TMP_DIR
| #!/bin/bash
# generate the site
./hugoThemeSiteScript/generateThemeSite.sh https://davidyorr.github.io/hugo-orchid-theme/
hugo -s hugoThemeSite/themeSite -b https://davidyorr.github.io/hugo-orchid-theme
# clone master into a temp directory
TMP_DIR=temp
rm -rf $TMP_DIR
git clone git@github.com:davidyorr/hugo-orchid-theme.git $TMP_DIR
cd $TMP_DIR
# checkout the gh-pages branch and copy the site into it
git checkout -t origin/gh-pages
rm -rf *
cp -R ../hugoThemeSite/themeSite/public/* .
# push changes
git add -A
git commit -m "Built $(date)"
git push origin gh-pages
# delete the temp directory
cd ..
rm -rf $TMP_DIR
|
Remove Neovim package and repository | #!/usr/bin/bash
# Remove previously added repositories (to remain idempontent)
sudo rm -f /etc/apt/sources.list.d/*
## Repositories
# Adapta GTK theme
sudo apt-add-repository -y ppa:tista/adapta
# Paper icon theme
sudo apt-add-repository -y ppa:snwh/pulp
# Atom editor
sudo add-apt-repository -y ppa:webupd8team/atom
# Neovim
sudo add-apt-repository -y ppa:neovim-ppa/unstable
# Google Chrome
wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | sudo apt-key add -
sudo sh -c 'echo "deb [arch=amd64] https://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google.list'
# Packages
apps=(
git
tmux
nodejs
npm
vim
ruby
build-essential
cmake
python3-dev
python3-pip
atom
neovim
google-chrome-stable
firefox
unity-tweak-tool
adapta-gtk-theme
paper-icon-theme
)
# Update & upgrade
sudo apt update
sudo apt upgrade -y
# Install packages
sudo apt install -y "${apps[@]}"
| #!/usr/bin/bash
# Remove previously added repositories (to remain idempontent)
sudo rm -f /etc/apt/sources.list.d/*
## Repositories
# Adapta GTK theme
sudo apt-add-repository -y ppa:tista/adapta
# Paper icon theme
sudo apt-add-repository -y ppa:snwh/pulp
# Atom editor
sudo add-apt-repository -y ppa:webupd8team/atom
# Google Chrome
wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | sudo apt-key add -
sudo sh -c 'echo "deb [arch=amd64] https://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google.list'
# Packages
apps=(
git
tmux
nodejs
npm
vim
ruby
build-essential
cmake
python3-dev
python3-pip
atom
google-chrome-stable
firefox
unity-tweak-tool
adapta-gtk-theme
paper-icon-theme
)
# Update & upgrade
sudo apt update
sudo apt upgrade -y
# Install packages
sudo apt install -y "${apps[@]}"
|
Fix issue with GNOME dircolors | # Activate the theme for all future shell sessions.
# For now, lets just use a hack in order to get this working: all repos related
# to solarized should sit under ~/.solarized. For some reason, if we specify the
# path of the expression to eval to
# ~/solarized/dircolors-solarized/dircolors.ansi-light, this expression returns
# an error saying that there is no such file on the specified path. Thus, lets
# just create a symbolic link from
# ~/solarized/dircolors-solarized/dircolors.ansi-light to
# ~/repos/dotfiles/zsh/dircolors.ansi-light.
eval `dircolors $ZSH_CUSTOM/.dircolors.ansi-light`
# Load aliases.
[[ -f $ZSH_CUSTOM/aliases.zsh ]] && source $ZSH_CUSTOM/aliases.zsh
# Awesome cd movements from zshkit.
setopt autocd autopushd pushdminus pushdsilent pushdtohome cdablevars
DIRSTACKSIZE=5
# This repo path. In the future, perhaps this should be asked during
# installation. For now, lets just assume that the repo is cloned to
# ~/repos/dotfiles.
export DOTFILES=~/repos/dotfiles
# If local dotfiles exists, load the local zsh config.
if [ -d ~/.dotfiles.local ]; then
export LOCAL_DOTFILES=~/.dotfiles.local
[[ -f $LOCAL_DOTFILES/zshrc.local ]] && source $LOCAL_DOTFILES/zshrc.local
fi
| eval `dircolors $HOME/.dircolors`
# Load aliases.
[[ -f $ZSH_CUSTOM/aliases.zsh ]] && source $ZSH_CUSTOM/aliases.zsh
# Awesome cd movements from zshkit.
setopt autocd autopushd pushdminus pushdsilent pushdtohome cdablevars
DIRSTACKSIZE=5
# This repo path. In the future, perhaps this should be asked during
# installation. For now, lets just assume that the repo is cloned to
# ~/repos/dotfiles.
export DOTFILES=~/repos/dotfiles
# If local dotfiles exists, load the local zsh config.
if [ -d ~/.dotfiles.local ]; then
export LOCAL_DOTFILES=~/.dotfiles.local
[[ -f $LOCAL_DOTFILES/zshrc.local ]] && source $LOCAL_DOTFILES/zshrc.local
fi
|
Add confirmation before removing a workspace. | alias workspace:make="workspace-make"
alias workspace:remove="workspace-remove"
workspace() {
cd ~/documents/workspaces/$1
ls
}
workspace-make() {
mkdir ~/documents/workspaces/$1
cd ~/documents/workspaces/$1
}
function workspace-remove() {
rm -rf ~/documents/workspaces/$1
}
| alias workspaces="cd ~/documents/workspaces && ls"
alias workspace:make="workspace-make"
alias workspace:remove="workspace-remove"
workspace() {
cd ~/documents/workspaces/$1
ls
}
workspace-make() {
mkdir ~/documents/workspaces/$1
cd ~/documents/workspaces/$1
}
function workspace-remove() {
# Confirm deletion of directory
read -p "Are you sure you want to delete? " -n 1 -r
if [[ $REPLY =~ ^[Yy]$ ]]; then
rm -rf ~/documents/workspaces/$1
fi
}
|
Exit only if the fbterm session closed correctly | # start fbterm automatically in /dev/tty*
if (( ${+commands[fbterm]} )); then
if [[ "$TTY" = /dev/tty* ]] ; then
fbterm
exit
fi
fi
| # start fbterm automatically in /dev/tty*
if (( ${+commands[fbterm]} )); then
if [[ "$TTY" = /dev/tty* ]] ; then
fbterm && exit
fi
fi
|
Remove set -x from delmo-concourse | #!/bin/sh
set -x
if [ ! -z ${MACHINE_NAME} ] && [ ! -z ${MACHINE_EXPORT_AWS_ACCESS_KEY_ID} ] \
&& [ ! -z ${MACHINE_EXPORT_AWS_SECRET_ACCESS_KEY} ] && [ ! -z ${MACHINE_EXPORT_AWS_REGION} ] \
&& [ ! -z ${MACHINE_EXPORT_AWS_BUCKET} ]; then
echo "Downloading pre existing docker-machine configuration..."
(
export AWS_ACCESS_KEY_ID=${MACHINE_EXPORT_AWS_ACCESS_KEY_ID}
export AWS_SECRET_ACCESS_KEY=${MACHINE_EXPORT_AWS_SECRET_ACCESS_KEY}
export AWS_DEFAULT_REGION=${MACHINE_EXPORT_AWS_REGION}
aws --region ${MACHINE_EXPORT_AWS_REGION} s3 cp s3://${MACHINE_EXPORT_AWS_BUCKET}/${MACHINE_NAME}.zip ./
) || exit 1
echo "Importing configuration..."
machine-import ${MACHINE_NAME}.zip
# The permission isn't set properly on import
chmod 0600 /root/.docker/machine/machines/${MACHINE_NAME}/id_rsa
echo "Machine ${MACHINE_NAME} imported!"
fi
echo "Running '$@'"
exec $@
| #!/bin/sh
if [ ! -z ${MACHINE_NAME} ] && [ ! -z ${MACHINE_EXPORT_AWS_ACCESS_KEY_ID} ] \
&& [ ! -z ${MACHINE_EXPORT_AWS_SECRET_ACCESS_KEY} ] && [ ! -z ${MACHINE_EXPORT_AWS_REGION} ] \
&& [ ! -z ${MACHINE_EXPORT_AWS_BUCKET} ]; then
echo "Downloading pre existing docker-machine configuration..."
(
export AWS_ACCESS_KEY_ID=${MACHINE_EXPORT_AWS_ACCESS_KEY_ID}
export AWS_SECRET_ACCESS_KEY=${MACHINE_EXPORT_AWS_SECRET_ACCESS_KEY}
export AWS_DEFAULT_REGION=${MACHINE_EXPORT_AWS_REGION}
aws --region ${MACHINE_EXPORT_AWS_REGION} s3 cp s3://${MACHINE_EXPORT_AWS_BUCKET}/${MACHINE_NAME}.zip ./
) || exit 1
echo "Importing configuration..."
machine-import ${MACHINE_NAME}.zip
# The permission isn't set properly on import
chmod 0600 /root/.docker/machine/machines/${MACHINE_NAME}/id_rsa
echo "Machine ${MACHINE_NAME} imported!"
fi
echo "Running '$@'"
exec $@
|
Add udhcpd back to pakage list Required for USB networking | #!/bin/sh -e
#Note: These will be auto installed by chroot.sh script:
#lsb-release initramfs-tools sudo wget
#Base
base_pkgs="nano pastebinit"
#Tools
base_pkgs="${base_pkgs} bsdmainutils i2c-tools fbset hexedit memtester read-edid"
#OS
base_pkgs="${base_pkgs} dosfstools btrfs-tools cpufrequtils ntpdate"
#USB Dongles
base_pkgs="${base_pkgs} ppp usb-modeswitch usbutils"
#Server
#base_pkgs="${base_pkgs} apache2 openssh-server udhcpd"
base_pkgs="${base_pkgs} openssh-server"
#Wireless
base_pkgs="${base_pkgs} wireless-tools wpasupplicant lowpan-tools wvdial lshw"
#shellinabox (breaks ssh over eth/usb)
#base_pkgs="${base_pkgs} shellinabox"
#Gateone:
#base_pkgs="${base_pkgs} python-tornado python-pyopenssl"
| #!/bin/sh -e
#Note: These will be auto installed by chroot.sh script:
#lsb-release initramfs-tools sudo wget
#Base
base_pkgs="nano pastebinit"
#Tools
base_pkgs="${base_pkgs} bsdmainutils i2c-tools fbset hexedit memtester read-edid"
#OS
base_pkgs="${base_pkgs} dosfstools btrfs-tools cpufrequtils ntpdate"
#USB Dongles
base_pkgs="${base_pkgs} ppp usb-modeswitch usbutils"
#Server
#base_pkgs="${base_pkgs} apache2 openssh-server udhcpd"
base_pkgs="${base_pkgs} openssh-server udhcpd"
#Wireless
base_pkgs="${base_pkgs} wireless-tools wpasupplicant lowpan-tools wvdial lshw"
#shellinabox (breaks ssh over eth/usb)
#base_pkgs="${base_pkgs} shellinabox"
#Gateone:
#base_pkgs="${base_pkgs} python-tornado python-pyopenssl"
|
Reduce log volume as Travis CI log too big to view online | #!/bin/bash -x
#
# Build - Apply Salt States and Ansible Playbooks
#
# Exit on errors
#
set -e
set -u
set -o pipefail
# Create Salt Links for States and Pillar
# Create Ansible Links for Playbooks and Variables
#
mkdir -p /srv
ln -sf "$(pwd)/salt/roots" /srv/salt
ln -sf "$(pwd)/salt/pillar" /srv/pillar
ln -sf "$(pwd)/ansible" /srv/ansible
# Apply Salt States
#
salt-call --local -l debug state.apply
# Apply Ansible Plays
#
cd /srv/ansible && /usr/bin/ansible-playbook -i hosts site.yml -vv --limit=travis-ci
exit 0 | #!/bin/bash -x
#
# Build - Apply Salt States and Ansible Playbooks
#
# Exit on errors
#
set -e
set -u
set -o pipefail
# Create Salt Links for States and Pillar
# Create Ansible Links for Playbooks and Variables
#
mkdir -p /srv
ln -sf "$(pwd)/salt/roots" /srv/salt
ln -sf "$(pwd)/salt/pillar" /srv/pillar
ln -sf "$(pwd)/ansible" /srv/ansible
# Apply Salt States
#
salt-call --local -l info state.apply
# Apply Ansible Plays
#
cd /srv/ansible && /usr/bin/ansible-playbook -i hosts site.yml --limit=travis-ci
exit 0 |
Change Avian src volume path to match Dockerfile | #!/bin/sh
if test $# -eq 0; then
echo "Usage: $0 [--container <container_name>] -- <command_to_run_in_docker>"
echo "Ex: $0 make test"
echo "Ex: $0 ./test/ci.sh"
echo "Ex: $0 --container joshuawarner32/avian-build-windows -- make platform=windows"
exit 1
fi
THE_USER="-u $(id -u "${USER}")"
while test $# -gt 1 ; do
key="$1"
case $key in
-c|--container)
shift
CONTAINER="$1"
shift
;;
-r|--root)
shift
THE_USER=
;;
--)
shift
break
;;
*)
break
;;
esac
done
if test -z $CONTAINER; then
CONTAINER=joshuawarner32/avian-build
fi
DIR=$(cd $(dirname "$0") && cd .. && pwd)
docker run --rm -i -t -v "${DIR}":/var/avian ${THE_USER} "${CONTAINER}" "${@}"
| #!/bin/sh
if test $# -eq 0; then
echo "Usage: $0 [--container <container_name>] -- <command_to_run_in_docker>"
echo "Ex: $0 make test"
echo "Ex: $0 ./test/ci.sh"
echo "Ex: $0 --container joshuawarner32/avian-build-windows -- make platform=windows"
exit 1
fi
THE_USER="-u $(id -u "${USER}")"
while test $# -gt 1 ; do
key="$1"
case $key in
-c|--container)
shift
CONTAINER="$1"
shift
;;
-r|--root)
shift
THE_USER=
;;
--)
shift
break
;;
*)
break
;;
esac
done
if test -z $CONTAINER; then
CONTAINER=joshuawarner32/avian-build
fi
DIR=$(cd $(dirname "$0") && cd .. && pwd)
docker run --rm -i -t -v "${DIR}":/var/src/avian ${THE_USER} "${CONTAINER}" "${@}"
|
Update generation script for github pages | #!/bin/sh
command_exists()
{
command -v $1 >/dev/null 2>&1
}
if ! command_exists resume; then
echo "Error: 'resume' command not found" >&2
echo "Please install it from the instructions at this address https://jsonresume.org/getting-started/" >&2
exit 1
fi
resume export resume.html --theme stackoverflow
exit $?
| #!/bin/sh
command_exists()
{
command -v $1 >/dev/null 2>&1
}
if ! command_exists resume; then
echo "Error: 'resume' command not found" >&2
echo "Please install it from the instructions at this address https://jsonresume.org/getting-started/" >&2
exit 1
fi
resume export resume.html --theme stackoverflow
if [ $? == 0 ]; then
mv resume.html index.html
echo "resume.html -> index.html"
else
echo "Error: Web page rendering failure" >&2
fi
exit $?
|
Fix path for recurisve chmod | #!/bin/zsh
echo -e "Requesting for Administator's privileges"
sudo true
echo -e "Starting script\n"
timerStart=$(date +%s)
echo "------------------------------------------------"
echo -e $COL_YELLOW"Setting up your macOS/Linux development environment..."$COL_RESET
echo "------------------------------------------------"
chmod a+x ./**/*.sh
SETUP_SCRIPTS=./setup/*
for file in $SETUP_SCRIPTS; do
filename=$(basename "$file")
./$file
done
###############################################################################
# Shell Scripts #
###############################################################################
chmod a-x ./**/*.sh
# Execute the base scripts
SHELL_FILES=./shell/*
for file in $SHELL_FILES; do
filename=$(basename "$file")
./$file
done
timerStop=$(date +%s)
duration=$(expr $timerStop - $timerStart)
if [ $duration -lt 60 ]; then
echo -e "Time taken: $(($duration % 60)) seconds"
else
echo -e "Time taken: $(($duration / 60)) minutes and $(($duration % 60)) seconds"
fi
| #!/bin/zsh
echo -e "Requesting for Administator's privileges"
sudo true
echo -e "Starting script\n"
timerStart=$(date +%s)
echo "------------------------------------------------"
echo -e $COL_YELLOW"Setting up your macOS/Linux development environment..."$COL_RESET
echo "------------------------------------------------"
chmod a+x **/*.sh
SETUP_SCRIPTS=./setup/*
for file in $SETUP_SCRIPTS; do
filename=$(basename "$file")
./$file
done
###############################################################################
# Shell Scripts #
###############################################################################
# Execute the base scripts
SHELL_FILES=./shell/*
for file in $SHELL_FILES; do
filename=$(basename "$file")
./$file
done
timerStop=$(date +%s)
duration=$(expr $timerStop - $timerStart)
chmod a-x **/*.sh
if [ $duration -lt 60 ]; then
echo -e "Time taken: $(($duration % 60)) seconds"
else
echo -e "Time taken: $(($duration / 60)) minutes and $(($duration % 60)) seconds"
fi
|
Use "snapshot" instead of "latest" | #!/bin/bash
# Inspired by http://benlimmer.com/2013/12/26/automatically-publish-javadoc-to-gh-pages-with-travis-ci/
if [ "$TRAVIS_REPO_SLUG" == "yatechorg/jedis-utils" ] && [ "$TRAVIS_JDK_VERSION" == "oraclejdk7" ] && [ "$TRAVIS_PULL_REQUEST" == "false" ] && [ "$TRAVIS_BRANCH" == "master" ]; then
echo -e "Publishing javadoc...\n"
cp -R build/docs/javadoc $HOME/javadoc-latest
cd $HOME
git config --global user.email "travis@travis-ci.org"
git config --global user.name "travis-ci"
git clone --quiet --branch=gh-pages https://${GH_TOKEN}@github.com/yatechorg/jedis-utils gh-pages > /dev/null
cd gh-pages
git rm -rf ./javadoc/latest
mkdir -p ./javadoc/latest
cp -Rf $HOME/javadoc-latest/* ./javadoc/latest
git add -f .
git commit -m "Lastest javadoc on successful travis build $TRAVIS_BUILD_NUMBER auto-pushed to gh-pages"
git push -fq origin gh-pages > /dev/null
echo -e "Published Javadoc to gh-pages.\n"
fi
| #!/bin/bash
# Inspired by http://benlimmer.com/2013/12/26/automatically-publish-javadoc-to-gh-pages-with-travis-ci/
if [ "$TRAVIS_REPO_SLUG" == "yatechorg/jedis-utils" ] && [ "$TRAVIS_JDK_VERSION" == "oraclejdk7" ] && [ "$TRAVIS_PULL_REQUEST" == "false" ] && [ "$TRAVIS_BRANCH" == "master" ]; then
echo -e "Publishing javadoc...\n"
cp -R build/docs/javadoc $HOME/javadoc-snapshot
cd $HOME
git config --global user.email "travis@travis-ci.org"
git config --global user.name "travis-ci"
git clone --quiet --branch=gh-pages https://${GH_TOKEN}@github.com/yatechorg/jedis-utils gh-pages > /dev/null
cd gh-pages
git rm -rf ./javadoc/snapshot
mkdir -p ./javadoc/snapshot
cp -Rf $HOME/javadoc-snapshot/* ./javadoc/snapshot
git add -f .
git commit -m "Lastest javadoc on successful travis build $TRAVIS_BUILD_NUMBER auto-pushed to gh-pages"
git push -fq origin gh-pages > /dev/null
echo -e "Published Javadoc to gh-pages.\n"
fi
|
Upgrade Java 11 version in CI image | #!/bin/bash
set -e
case "$1" in
java8)
echo "https://github.com/bell-sw/Liberica/releases/download/8u345+1/bellsoft-jdk8u345+1-linux-amd64.tar.gz"
;;
java11)
echo "https://github.com/bell-sw/Liberica/releases/download/11.0.16+8/bellsoft-jdk11.0.16+8-linux-amd64.tar.gz"
;;
java17)
echo "https://github.com/bell-sw/Liberica/releases/download/17.0.4+8/bellsoft-jdk17.0.4+8-linux-amd64.tar.gz"
;;
*)
echo $"Unknown java version"
exit 1
esac
| #!/bin/bash
set -e
case "$1" in
java8)
echo "https://github.com/bell-sw/Liberica/releases/download/8u345+1/bellsoft-jdk8u345+1-linux-amd64.tar.gz"
;;
java11)
echo "https://github.com/bell-sw/Liberica/releases/download/11.0.16.1+1/bellsoft-jdk11.0.16.1+1-linux-amd64.tar.gz"
;;
java17)
echo "https://github.com/bell-sw/Liberica/releases/download/17.0.4+8/bellsoft-jdk17.0.4+8-linux-amd64.tar.gz"
;;
*)
echo $"Unknown java version"
exit 1
esac
|
Use full patch in gpuvis.includes file, otherwise qtcreator gets confused | #!/bin/bash
PROJNAME=gpuvis
BASEDIR=$(dirname $0)
cd $BASEDIR
DIRS=(..)
# Create blah.creator file if it doesn't exist
if [[ ! -f ${PROJNAME}.creator ]]; then echo -e "[General]\n" > ${PROJNAME}.creator; fi
# Create our defines file
DEFINES=$(cat <<'END_HEREDOC'
#define __LINUX__ 1
#define _GNU_SOURCE 1
#define USE_FREETYPE 1
END_HEREDOC
)
echo "${DEFINES}" > ${PROJNAME}.config
> ${PROJNAME}.files
for i in ${DIRS[@]}; do
echo Checking ${i}
find ${i} -type f -iregex '.*\.\(c\|cxx\|cpp\|h\|lpp\|ypp\|sh\|inl\|txt\)$' -or -iname Makefile >> ${PROJNAME}.files
done
find /usr/include/SDL2 -iname "*.h" >> ${PROJNAME}.files
fgrep -i --color=never ".h" *.files | xargs -I{} dirname {} | sort | uniq > ${PROJNAME}.includes
wc -l ${PROJNAME}.files
wc -l ${PROJNAME}.includes
| #!/bin/bash
PROJNAME=gpuvis
BASEDIR=$(dirname $0)
cd $BASEDIR
DIRS=(..)
# Create blah.creator file if it doesn't exist
if [[ ! -f ${PROJNAME}.creator ]]; then echo -e "[General]\n" > ${PROJNAME}.creator; fi
# Create our defines file
DEFINES=$(cat <<'END_HEREDOC'
#define __LINUX__ 1
#define _GNU_SOURCE 1
#define USE_FREETYPE 1
END_HEREDOC
)
echo "${DEFINES}" > ${PROJNAME}.config
> ${PROJNAME}.files
for i in ${DIRS[@]}; do
echo Checking ${i}
find ${i} -type f -iregex '.*\.\(c\|cxx\|cpp\|h\|lpp\|ypp\|sh\|inl\|txt\)$' -or -iname Makefile >> ${PROJNAME}.files
done
find /usr/include/SDL2 -iname "*.h" >> ${PROJNAME}.files
fgrep -i --color=never ".h" *.files | xargs -I{} readlink -f {} | xargs -I{} dirname {} | sort | uniq > ${PROJNAME}.includes
wc -l ${PROJNAME}.files
wc -l ${PROJNAME}.includes
|
Set default SOMAXCONN to actual value. | #!/bin/bash
# somaxconn must be >= uwsgi listen setting.
if [[ -n ${PYBOSSA_SOMAXCONN} ]]; then
sudo sysctl -w net.core.somaxconn=${PYBOSSA_SOMAXCONN}
else
export PYBOSSA_SOMAXCONN=128 # Amazon Linux default
fi
exec /usr/local/bin/supervisord --nodaemon --configuration=/etc/supervisor/supervisord.conf
| #!/bin/bash
# somaxconn must be >= uwsgi listen setting.
if [[ -n ${PYBOSSA_SOMAXCONN} ]]; then
sudo sysctl -w net.core.somaxconn=${PYBOSSA_SOMAXCONN}
else
export PYBOSSA_SOMAXCONN=$(sysctl -n net.core.somaxconn)
fi
exec /usr/local/bin/supervisord --nodaemon --configuration=/etc/supervisor/supervisord.conf
|
Fix detach for SDK 14 | #!/bin/bash
pod install
pushd ..
# resolve possible packager conflicts introduced by pod install
exp prepare-detached-build --platform ios --skipXcodeConfig 1
popd
echo "Finished installing and cleaning up dependencies. Be sure and serve or restart your project with: exp r -c"
| #!/bin/bash
pod install
pushd ..
# resolve possible packager conflicts introduced by pod install
exp prepare-detached-build --platform ios --skipXcodeConfig 1
popd
echo "Finished installing and cleaning up dependencies. Be sure to restart your project in exp or XDE."
|
Update travis integration tests to tool_lints | set -x
rm ~/.cargo/bin/cargo-clippy
cargo install --force --path .
echo "Running integration test for crate ${INTEGRATION}"
git clone --depth=1 https://github.com/${INTEGRATION}.git checkout
cd checkout
function check() {
# run clippy on a project, try to be verbose and trigger as many warnings as possible for greater coverage
RUST_BACKTRACE=full cargo clippy --all-targets --all-features -- --cap-lints warn -W clippy_pedantic -W clippy_nursery &> clippy_output
cat clippy_output
! cat clippy_output | grep -q "internal compiler error\|query stack during panic"
if [[ $? != 0 ]]; then
return 1
fi
}
case ${INTEGRATION} in
rust-lang/cargo)
check
;;
*)
check
;;
esac
| set -x
rm ~/.cargo/bin/cargo-clippy
cargo install --force --path .
echo "Running integration test for crate ${INTEGRATION}"
git clone --depth=1 https://github.com/${INTEGRATION}.git checkout
cd checkout
function check() {
# run clippy on a project, try to be verbose and trigger as many warnings as possible for greater coverage
RUST_BACKTRACE=full cargo clippy --all-targets --all-features -- --cap-lints warn -W clippy::pedantic -W clippy::nursery &> clippy_output
cat clippy_output
! cat clippy_output | grep -q "internal compiler error\|query stack during panic"
if [[ $? != 0 ]]; then
return 1
fi
}
case ${INTEGRATION} in
rust-lang/cargo)
check
;;
*)
check
;;
esac
|
Remove debug output for travis | #!/bin/bash
ls -lah ./bin
docker-compose up -d
docker-compose ps
docker-compose logs todoApp
go test -v -tags integration
[ $? -eq 0 ] || exit $?;
# Clean up
docker-compose kill
docker-compose rm -f
| #!/bin/bash
docker-compose up -d
go test -v -tags integration
[ $? -eq 0 ] || exit $?;
# Clean up
docker-compose kill
docker-compose rm -f
|
Fix Swift/T on Dunedin, use which_check() |
# LANGS DUNEDIN
# Language settings for Dunedin
# Assumes WORKFLOWS_ROOT is set
# # Python
# COMMON_DIR=$WORKFLOWS_ROOT/common/python
# export PYTHONPATH=${PYTHONPATH:-}${PYTHONPATH:+:}
# PYTHONPATH+=$EMEWS_PROJECT_ROOT/python:
# PYTHONPATH+=$BENCHMARK_DIR:
# PYTHONPATH+=$BENCHMARKS_ROOT/common:
# PYTHONPATH+=$COMMON_DIR
# export PYTHONHOME=/global/common/cori/software/python/2.7-anaconda/envs/deeplearning
export PYTHONPATH=""
export PYTHONHOME=""
# # R
export R_HOME=/home/wozniak/Public/sfw/R-3.4.1/lib/R
# Swift/T
export PATH=$HOME/sfw/swift-t/stc/bin:$PATH
SWIFT_IMPL="app"
# EMEWS Queues for R
EQR=/opt/EQ-R
# Resident task workers and ranks
export TURBINE_RESIDENT_WORK_WORKERS=1
export RESIDENT_WORK_RANKS=$(( PROCS - 2 ))
# LD_LIBRARY_PATH
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH:-}${LD_LIBRARY_PATH:+:}
LD_LIBRARY_PATH+=$R_HOME/lib
# Log settings to output
which python swift-t
# Cf. utils.sh
show PYTHONHOME
log_path LD_LIBRARY_PATH
log_path PYTHONPATH
|
# LANGS DUNEDIN
# Language settings for Dunedin
# Assumes WORKFLOWS_ROOT is set
# # Python
# COMMON_DIR=$WORKFLOWS_ROOT/common/python
# export PYTHONPATH=${PYTHONPATH:-}${PYTHONPATH:+:}
# PYTHONPATH+=$EMEWS_PROJECT_ROOT/python:
# PYTHONPATH+=$BENCHMARK_DIR:
# PYTHONPATH+=$BENCHMARKS_ROOT/common:
# PYTHONPATH+=$COMMON_DIR
# export PYTHONHOME=/global/common/cori/software/python/2.7-anaconda/envs/deeplearning
export PYTHONPATH=""
export PYTHONHOME=""
# # R
export R_HOME=/home/wozniak/Public/sfw/R-3.4.1/lib/R
# Swift/T
export PATH=$HOME/Public/sfw/swift-t/stc/bin:$PATH
SWIFT_IMPL="app"
# EMEWS Queues for R
EQR=/opt/EQ-R
# Resident task workers and ranks
export TURBINE_RESIDENT_WORK_WORKERS=1
export RESIDENT_WORK_RANKS=$(( PROCS - 2 ))
# LD_LIBRARY_PATH
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH:-}${LD_LIBRARY_PATH:+:}
LD_LIBRARY_PATH+=$R_HOME/lib
# Cf. utils.sh ...
which_check python
which_check swift-t
# Log settings to output
show PYTHONHOME
log_path LD_LIBRARY_PATH
log_path PYTHONPATH
|
Add babel-helper-transform-fixture-test-runner to fork list | #!/bin/sh
set -e
if ! git --git-dir ../babel/.git rev-parse 2>/dev/null ; then
(cd ..
git clone git://github.com/babel/babel.git
)
fi
(cd ../babel;
git checkout master
git pull
git subtree split -P packages/babel-traverse master -b subtree_babel-traverse
git subtree split -P packages/babel-types master -b subtree_babel-types
)
if [ "$1" = "--init" ] ; then
git subtree add -P packages/comal-traverse ../babel subtree_babel-traverse --squash
git subtree add -P packages/comal-types ../babel subtree_babel-types --squash
else
git subtree pull -P packages/comal-traverse ../babel subtree_babel-traverse --squash
git subtree pull -P packages/comal-types ../babel subtree_babel-types --squash
fi
| #!/bin/sh
set -e
# NOTE: this script shouldn't actually run with --init, it's more for documentation
if ! git --git-dir ../babel/.git rev-parse 2>/dev/null ; then
(cd ..
git clone git://github.com/babel/babel.git
)
fi
(cd ../babel;
git checkout master
git pull
git subtree split -P packages/babel-traverse master -b subtree_babel-traverse
git subtree split -P packages/babel-types master -b subtree_babel-types
git subtree split -P packages/babel-helper-transform-fixture-test-runner master -b subtree_babel-helper-transform-fixture-test-runner
)
if [ "$1" = "--init" ] ; then
git subtree add -P packages/comal ../babel subtree_babel-core --squash
git subtree add -P packages/comal-traverse ../babel subtree_babel-traverse --squash
git subtree add -P packages/comal-types ../babel subtree_babel-types --squash
git subtree add -P packages/tacoscript-dev-utils/fixture-runner ../babel subtree_babel-helper-transform-fixture-test-runner --squash
else
git subtree pull -P packages/comal-traverse ../babel subtree_babel-traverse --squash
git subtree pull -P packages/comal-types ../babel subtree_babel-types --squash
fi
|
Remove --stderr switch from Travis test runner | #!/bin/bash
travisdir=$(dirname $(readlink /proc/$$/fd/255))
testdir="$travisdir/../tests"
testedcomponents=(`cat "$travisdir/tested-components"`)
result=0
for tested in "${testedcomponents[@]}"
do
echo "$tested:"
phpunit -c $testdir/phpunit.xml --stderr $testdir/$tested
result=$(($result || $?))
done
exit $result
| #!/bin/bash
travisdir=$(dirname $(readlink /proc/$$/fd/255))
testdir="$travisdir/../tests"
testedcomponents=(`cat "$travisdir/tested-components"`)
result=0
for tested in "${testedcomponents[@]}"
do
echo "$tested:"
phpunit -c $testdir/phpunit.xml $testdir/$tested
result=$(($result || $?))
done
exit $result
|
Update scripts to send jobs | #!/bin/bash
if [ $# -eq 0 ]; then
echo "No arguments provided"
exit 1
fi
#$ -N $1
#$ -o output/$JOB_NAME_$JOB_ID.out
#$ -e output/$JOB_NAME_$JOB_ID.error
#$ -q larga
#$ -cwd
python $1
| #!/bin/bash
if [ $# -eq 0 ]; then
echo "No arguments provided"
exit 1
fi
#$ -N $1
#$ -M davidgasquez@gmail.com
#$ -m abe
#$ -o output/$JOB_NAME_$JOB_ID.out
#$ -e output/$JOB_NAME_$JOB_ID.error
#$ -q larga
#$ -cwd
python $1
|
Use $(…) instead of legacy `…`. | #!/bin/bash
#===--- find-overlay-dependencies-loop.sh - driver for find-overlay-dependency.sh---===#
#
## This source file is part of the Swift.org open source project
##
## Copyright (c) 2016 Apple Inc. and the Swift project authors
## Licensed under Apache License v2.0 with Runtime Library Exception
##
## See http://swift.org/LICENSE.txt for license information
## See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
#===------------------------------------------------------------------------===#
SCRIPT="$(dirname "$0")/find-overlay-dependencies.sh"
# `update` edits the cmake file in-place; `print` just prints to console
function usage() {
echo 'usage:' $0 'update|print' >&2
exit 1
}
case $# in
1) if [[ $1 != 'update' && $1 != 'print' ]]; then
usage
fi ;;
*)
usage ;;
esac
# Don't update XCTest, handle spaces in directories
for f in ./stdlib/public/SDK/*/; do
name=`basename "${f}"`
if [[ "${name}" == "XCTest" ]]; then
continue
fi
$SCRIPT "${name}" "$1"
done
| #!/bin/bash
#===--- find-overlay-dependencies-loop.sh - driver for find-overlay-dependency.sh---===#
#
## This source file is part of the Swift.org open source project
##
## Copyright (c) 2016 Apple Inc. and the Swift project authors
## Licensed under Apache License v2.0 with Runtime Library Exception
##
## See http://swift.org/LICENSE.txt for license information
## See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
#===------------------------------------------------------------------------===#
SCRIPT="$(dirname "$0")/find-overlay-dependencies.sh"
# `update` edits the cmake file in-place; `print` just prints to console
function usage() {
echo 'usage:' $0 'update|print' >&2
exit 1
}
case $# in
1) if [[ $1 != 'update' && $1 != 'print' ]]; then
usage
fi ;;
*)
usage ;;
esac
# Don't update XCTest, handle spaces in directories
for f in ./stdlib/public/SDK/*/; do
name=$(basename "${f}")
if [[ "${name}" == "XCTest" ]]; then
continue
fi
$SCRIPT "${name}" "$1"
done
|
Make agent installation in E2E vagrant test use a snapshot source | #!/bin/bash
# Download the Microsoft Install (MSI) file for a given version of Puppet
# agent (if necessary) and install it on a Windows VM
set -eou pipefail
source utils.sh
# Download MSI file if it doesn't exist in the current directory.
msi_file="puppet-agent-$PUPPET_AGENT_VERSION-x64.msi"
echo "Checking for file $msi_file in current directory"
if [ ! -f $msi_file ]; then
echo "File $msi_file is not found"
major_version="$(semver_major_version $PUPPET_AGENT_VERSION)"
echo "Extracted major version of $major_version"
msi_download="https://downloads.puppetlabs.com/windows/puppet$major_version/$msi_file"
echo "Downloading $msi_download"
wget $msi_download
fi
echo "Installing Puppet Agent version $PUPPET_AGENT_VERSION in Windows VM"
vagrant powershell -e -c "/vagrant/install_puppet_agent.ps1 $msi_file"
| #!/bin/bash
# Download the Microsoft Install (MSI) file for a given version of Puppet
# agent (if necessary) and install it on a Windows VM
set -eou pipefail
source utils.sh
base_snapshot_name="base-install"
# Download MSI file if it doesn't exist in the current directory.
msi_file="puppet-agent-$PUPPET_AGENT_VERSION-x64.msi"
echo "Checking for file $msi_file in current directory"
if [ ! -f $msi_file ]; then
echo "File $msi_file is not found"
major_version="$(semver_major_version $PUPPET_AGENT_VERSION)"
echo "Extracted major version of $major_version"
msi_download="https://downloads.puppetlabs.com/windows/puppet$major_version/$msi_file"
echo "Downloading $msi_download"
wget $msi_download
fi
echo "Restoring base-install snapshot..."
vagrant snapshot restore "$base_snapshot_name"
echo "Installing Puppet Agent version $PUPPET_AGENT_VERSION in Windows VM"
vagrant powershell -e -c "/vagrant/install_puppet_agent.ps1 $msi_file"
|
Replace apk-reinstall with apk install using flags. | #!/usr/bin/env bash
# Custom adb commands
adb() {
local cmd="$1"
shift
case "$cmd" in
file)
command adbf "$@"
;;
reinstall)
command apk-reinstall "$@"
;;
*)
command adb "$cmd" "$@"
;;
esac
}
| #!/usr/bin/env bash
# Custom adb commands
adb() {
local cmd="$1"
shift
case "$cmd" in
file)
command adbf "$@"
;;
reinstall)
command adb install -r -d "$@"
;;
*)
command adb "$cmd" "$@"
;;
esac
}
|
Include prometheus and heapster in bootstrap bindata | #!/bin/bash
STARTTIME=$(date +%s)
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
os::build::setup_env
EXAMPLES=examples
OUTPUT_PARENT=${OUTPUT_ROOT:-$OS_ROOT}
pushd vendor/github.com/jteeuwen/go-bindata > /dev/null
go install ./...
popd > /dev/null
os::util::ensure::gopath_binary_exists 'go-bindata'
pushd "${OS_ROOT}" > /dev/null
"$(os::util::find::gopath_binary go-bindata)" \
-nocompress \
-nometadata \
-prefix "bootstrap" \
-pkg "bootstrap" \
-o "${OUTPUT_PARENT}/pkg/bootstrap/bindata.go" \
-ignore "README.md" \
-ignore ".*\.go$" \
-ignore application-template.json \
${EXAMPLES}/image-streams/... \
${EXAMPLES}/db-templates/... \
${EXAMPLES}/jenkins \
${EXAMPLES}/jenkins/pipeline \
${EXAMPLES}/quickstarts/... \
${EXAMPLES}/logging/... \
pkg/image/admission/imagepolicy/api/v1/...
popd > /dev/null
ret=$?; ENDTIME=$(date +%s); echo "$0 took $(($ENDTIME - $STARTTIME)) seconds"; exit "$ret"
| #!/bin/bash
STARTTIME=$(date +%s)
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
os::build::setup_env
EXAMPLES=examples
OUTPUT_PARENT=${OUTPUT_ROOT:-$OS_ROOT}
pushd vendor/github.com/jteeuwen/go-bindata > /dev/null
go install ./...
popd > /dev/null
os::util::ensure::gopath_binary_exists 'go-bindata'
pushd "${OS_ROOT}" > /dev/null
"$(os::util::find::gopath_binary go-bindata)" \
-nocompress \
-nometadata \
-prefix "bootstrap" \
-pkg "bootstrap" \
-o "${OUTPUT_PARENT}/pkg/bootstrap/bindata.go" \
-ignore "README.md" \
-ignore ".*\.go$" \
-ignore "\.DS_Store" \
-ignore application-template.json \
${EXAMPLES}/image-streams/... \
${EXAMPLES}/db-templates/... \
${EXAMPLES}/jenkins \
${EXAMPLES}/jenkins/pipeline \
${EXAMPLES}/quickstarts/... \
${EXAMPLES}/logging/... \
${EXAMPLES}/heapster/... \
${EXAMPLES}/prometheus/... \
pkg/image/admission/imagepolicy/api/v1/...
popd > /dev/null
ret=$?; ENDTIME=$(date +%s); echo "$0 took $(($ENDTIME - $STARTTIME)) seconds"; exit "$ret"
|
Add parameter check to script for creation of release packages | #!/usr/bin/env bash
# extract the plugin name and the version from the given tag name
tag_name=$1
tag_parts=(${tag_name//@/ })
plugin_name_parts=(${tag_parts[0]//-/ })
plugin_name=${plugin_name_parts[1]}
version=${tag_parts[1]}
# install dependencies
npm install
# create minified files
npm run minify
# pack all the corresponding files to a zip archive
find -not -iwholename '*.git*' -iname *${plugin_name}* -type f | zip ${plugin_name}-${version}.zip -@
# clean all modules again
npm run clean
| #!/usr/bin/env bash
# check if tag name was passed as argument
if [[ $# -eq 0 ]]
then
echo 'You have to pass a tag name as first argument!'
exit 1
fi
# extract the plugin name and the version from the given tag name
tag_name=$1
tag_parts=(${tag_name//@/ })
plugin_name_parts=(${tag_parts[0]//-/ })
plugin_name=${plugin_name_parts[1]}
version=${tag_parts[1]}
# install dependencies
npm install
# create minified files
npm run minify
# pack all the corresponding files to a zip archive
find -not -iwholename '*.git*' -iname *${plugin_name}* -type f | zip ${plugin_name}-${version}.zip -@
# clean all modules again
npm run clean
|
Delete the local runtime before copying | #!/bin/bash
set -e
set -x
CODE_SIGN=$1
echo "### Building VimR target"
# Build NeoVim
# 0. Delete previously built things
# 1. Build normally to get the full runtime folder and copy it to the neovim's project root
# 2. Delete the build folder to re-configure
# 3. Build libnvim
pushd neovim
ln -f -s ../local.mk .
rm -rf build
make distclean
echo "### Building nvim to get the runtime folder"
rm -rf /tmp/nvim
make CMAKE_FLAGS="-DCUSTOM_UI=0 -DCMAKE_INSTALL_PREFIX=/tmp/nvim" install
cp -r /tmp/nvim/share/nvim/runtime .
rm -rf build
make clean
echo "### Building libnvim"
make libnvim
popd
echo "### Updating carthage"
carthage update --platform osx
echo "### Xcodebuilding"
if [ "${CODE_SIGN}" = true ] ; then
xcodebuild CODE_SIGN_IDENTITY="Developer ID Application: Tae Won Ha (H96Q2NKTQH)" -configuration Release -target VimR
else
xcodebuild -configuration Release -target VimR
fi
echo "### Built VimR target"
| #!/bin/bash
set -e
set -x
CODE_SIGN=$1
echo "### Building VimR target"
# Build NeoVim
# 0. Delete previously built things
# 1. Build normally to get the full runtime folder and copy it to the neovim's project root
# 2. Delete the build folder to re-configure
# 3. Build libnvim
pushd neovim
ln -f -s ../local.mk .
rm -rf build
make distclean
echo "### Building nvim to get the runtime folder"
rm -rf /tmp/nvim
make CMAKE_FLAGS="-DCUSTOM_UI=0 -DCMAKE_INSTALL_PREFIX=/tmp/nvim" install
rm -rf build
make clean
echo "### Building libnvim"
make libnvim
echo "### Copying runtime"
rm -rf runtime
cp -r /tmp/nvim/share/nvim/runtime .
popd
echo "### Updating carthage"
carthage update --platform osx
echo "### Xcodebuilding"
if [ "${CODE_SIGN}" = true ] ; then
xcodebuild CODE_SIGN_IDENTITY="Developer ID Application: Tae Won Ha (H96Q2NKTQH)" -configuration Release -target VimR
else
xcodebuild -configuration Release -target VimR
fi
echo "### Built VimR target"
|
Set current_entry_number in schema creation script | #!/usr/bin/env bash
db_name=$1
psql $db_name -U postgres -q -S -c "
create table if not exists total_records(count INTEGER);
create table if not exists total_entries(count INTEGER, last_updated TIMESTAMP WITHOUT TIME ZONE DEFAULT now());
create table if not exists item (sha256hex varchar primary key, content jsonb);
create table if not exists entry (entry_number integer primary key, sha256hex varchar, timestamp INTEGER);
create table if not exists current_keys(key VARCHAR PRIMARY KEY, entry_number INTEGER UNIQUE);
create table if not exists current_entry_number(value integer not null);
insert into total_records (count) select 0 where not exists (select count from total_records);
insert into total_entries (count) select 0 where not exists (select count from total_entries);
"
| #!/usr/bin/env bash
db_name=$1
psql $db_name -U postgres -q -S -c "
create table if not exists total_records(count INTEGER);
create table if not exists total_entries(count INTEGER, last_updated TIMESTAMP WITHOUT TIME ZONE DEFAULT now());
create table if not exists item (sha256hex varchar primary key, content jsonb);
create table if not exists entry (entry_number integer primary key, sha256hex varchar, timestamp INTEGER);
create table if not exists current_keys(key VARCHAR PRIMARY KEY, entry_number INTEGER UNIQUE);
create table if not exists current_entry_number(value integer not null);
insert into total_records (count) select 0 where not exists (select count from total_records);
insert into total_entries (count) select 0 where not exists (select count from total_entries);
insert into current_entry_number(value)
select (
select case
when (select max(entry_number) from entry) is null then 0
else (select max(entry_number) from entry)
end as t
)
where not exists (
select 1 from current_entry_number
);
"
|
Increase the time waiting for a consul leader to be elected | #!/bin/bash
token=$1
if [ -n "${token}" ]; then
ccargs="--token=${token}"
fi
# Non-server nodes can be restart immediately with no effect on the quorum
#
if [ $(consul-cli agent-self ${ccargs} | jq -r .Member.Tags.role) == node ]; then
systemctl restart consul
exit 0
fi
# Try to acquire a lock on 'locks/consul'
sessionid=$(consul-cli kv-lock ${ccargs} locks/consul)
# Lock acquired. Pause briefly to allow the previous holder to restart
# If it takes longer than five seconds run `systemctl restart consul`
# after releasing the lock then we might cause a quorum outage
sleep 5
# Verify that there is a leader before releasing the lock and restarting
/usr/local/bin/consul-wait-for-leader.sh
# Release the lock
consul-cli kv-unlock ${ccargs} locks/consul --session=${sessionid}
# Restart the service
systemctl restart consul
exit 0
| #!/bin/bash
token=$1
if [ -n "${token}" ]; then
ccargs="--token=${token}"
fi
# Non-server nodes can be restart immediately with no effect on the quorum
#
if [ $(consul-cli agent-self ${ccargs} | jq -r .Member.Tags.role) == node ]; then
systemctl restart consul
exit 0
fi
# Try to acquire a lock on 'locks/consul'
sessionid=$(consul-cli kv-lock ${ccargs} locks/consul)
# Lock acquired. Pause briefly to allow the previous holder to restart
# If it takes longer than five seconds run `systemctl restart consul`
# after releasing the lock then we might cause a quorum outage
sleep 10
# Verify that there is a leader before releasing the lock and restarting
/usr/local/bin/consul-wait-for-leader.sh
# Release the lock
consul-cli kv-unlock ${ccargs} locks/consul --session=${sessionid}
# Restart the service
systemctl restart consul
exit 0
|
Copy to LIBRARY_BIN on win | #!/bin/bash
# Adopt a Unix-friendly path if we're on Windows (see bld.bat).
[ -n "$PATH_OVERRIDE" ] && export PATH="$PATH_OVERRIDE"
LDFLAGS="$LDFLAGS -L$PREFIX/lib"
CFLAGS="$CFLAGS -O3 -I$PREFIX/include"
# The AppVeyor build sets "TARGET_ARCH" to x86 or x64. We need to unset
# this, as TARGET_ARCH is put on the command line by Make via
# its default rules for compiling C files.
export TARGET_ARCH=
make -j$CPU_COUNT LDFLAGS="$LDFLAGS" CFLAGS="$CFLAGS"
make test
cp pigz unpigz $PREFIX/bin
| #!/bin/bash
# Adopt a Unix-friendly path if we're on Windows (see bld.bat).
[ -n "$PATH_OVERRIDE" ] && export PATH="$PATH_OVERRIDE"
LDFLAGS="$LDFLAGS -L$PREFIX/lib"
CFLAGS="$CFLAGS -O3 -I$PREFIX/include"
# The AppVeyor build sets "TARGET_ARCH" to x86 or x64. We need to unset
# this, as TARGET_ARCH is put on the command line by Make via
# its default rules for compiling C files.
export TARGET_ARCH=
make -j$CPU_COUNT LDFLAGS="$LDFLAGS" CFLAGS="$CFLAGS"
make test
# Use different variable to get "binprefix" on win:
if [ -n "$LIBRARY_BIN" ]; then
cp pigz unpigz $LIBRARY_BIN
else
cp pigz unpigz $PREFIX/bin
fi
|
Allow invocation from other directories | #!/bin/bash
set -u
set -e
VEC=${1:-text8.tar.gz}
DIR=${2:-word-similarities/}
echo "Evaluating $VEC on word rankings in $DIR" >&2
python evalrank.py $VEC `find "$DIR" -name '*.txt'`
| #!/bin/bash
set -u
set -e
VEC=${1:-text8.tar.gz}
DIR=${2:-word-similarities/}
# from http://stackoverflow.com/a/246128
SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
echo "Evaluating $VEC on word rankings in $DIR" >&2
python "$SCRIPTDIR/evalrank.py" "$VEC" `find "$DIR" -name '*.txt'`
|
Use XCache module file generated in installation directory. | #! /bin/sh
wget http://xcache.lighttpd.net/pub/Releases/1.3.2/xcache-1.3.2.tar.gz
tar -zxf xcache-*.tar.gz
cd xcache-1.3.2
phpize
./configure --enable-xcache
make
sudo make install
echo zend_extension=~/.phpenv/versions/$(phpenv version-name)/lib/php/extensions/no-debug-non-zts-20090626/xcache.so > ~/.phpenv/versions/$(phpenv version-name)/etc/conf.d/xcache.ini
cat ~/.phpenv/versions/$(phpenv version-name)/etc/conf.d/xcache.ini
| #! /bin/sh
wget http://xcache.lighttpd.net/pub/Releases/1.3.2/xcache-1.3.2.tar.gz
tar -zxf xcache-*.tar.gz
cd xcache-1.3.2
phpize
./configure --enable-xcache
make
sudo make install
echo zend_extension=~/cache/xcache-1.3.2/modules/xcache.so > ~/.phpenv/versions/$(phpenv version-name)/etc/conf.d/xcache.ini
cat ~/.phpenv/versions/$(phpenv version-name)/etc/conf.d/xcache.ini
|
Fix missing `exit 1` and improve `git push` command | #!/usr/bin/env bash
set -e
set -x
PROJECT_VERSION=`lein project-version`
if [[ "$PROJECT_VERSION" == *-SNAPSHOT ]]
then
echo "This job does not support deploying SNAPSHOT artifacts; project version is '$PROJECT_VERSION'"
exit 1
fi
git fetch --tags
set +e
git show-ref --tags --quiet --verify -- "refs/tags/$PROJECT_VERSION"
TAG_EXISTS=$?
set -e
if [ $TAG_EXISTS -eq 0 ]
then
echo "A tag called '$PROJECT_VERSION' already exists for this project; aborting."
fi
echo "Project version is valid, tag doesn't yet exist. Running tests."
lein test
echo "Tests passed!"
git tag $PROJECT_VERSION
git push --tags
echo "Tagged version '$PROJECT_VERSION'"
lein deploy
| #!/usr/bin/env bash
set -e
set -x
PROJECT_VERSION=`lein project-version`
if [[ "$PROJECT_VERSION" == *-SNAPSHOT ]]
then
echo "This job does not support deploying SNAPSHOT artifacts; project version is '$PROJECT_VERSION'"
exit 1
fi
git fetch --tags
set +e
git show-ref --tags --quiet --verify -- "refs/tags/$PROJECT_VERSION"
TAG_EXISTS=$?
set -e
if [ $TAG_EXISTS -eq 0 ]
then
echo "A tag called '$PROJECT_VERSION' already exists for this project; aborting."
exit 1
fi
echo "Project version is valid, tag doesn't yet exist. Running tests."
lein test
echo "Tests passed!"
git tag $PROJECT_VERSION
git push origin $PROJECT_VERSION
echo "Tagged version '$PROJECT_VERSION'"
lein deploy
|
Fix extraneous quotation marks in release update script. | #!/bin/bash
owner="orbit"
repo="orbit"
tag=v$TAG_VERSION
GH_REPO="https://api.github.com/repos/$owner/$repo"
AUTH="Authorization: token $GITHUB_TOKEN"
# Commit all changed work
git commit -m "Release version $tag and update docs" --author="orbit-tools <orbit@ea.com>"
# Tag commit with the intended release tag (without the underscore)
git tag $tag
git push origin master --tags
# Get commit id
commitId=$(git rev-parse HEAD)
echo Commit Id: $commitId
# Read asset tags.
release=$(curl -sH "$AUTH" "$GH_REPO/releases/tags/_$tag")
echo $release
releaseId=$(jq .id <(cat <<<"$release"))
releaseName=$(jq .name <(cat <<<"$release"))
echo Release: $releaseId - $releaseName - $tag - $commitId
releaseData="{\"tag_name\": \"$tag\", \"target_commitish\": \"$commitId\", \"name\":\"$releaseName\", \"body\": \"$releaseBody\", \"draft\": \"false\", \"prerelease\": \"false\"}"
echo Data: $releaseData
# Patch release with new commit Id and tag
curl -X PATCH -H "$AUTH" -H "Content-Type: application/json" $GH_REPO/releases/$releaseId -d '$releaseData'
git tag -d _$tag
git push origin :refs/tags/_$tag
git reset --hard
| #!/bin/bash
owner="orbit"
repo="orbit"
tag=v$TAG_VERSION
GH_REPO="https://api.github.com/repos/$owner/$repo"
AUTH="Authorization: token $GITHUB_TOKEN"
# Commit all changed work
git commit -m "Release version $tag and update docs" --author="orbit-tools <orbit@ea.com>"
# Tag commit with the intended release tag (without the underscore)
git tag $tag
git push origin master --tags
# Get commit id
commitId=$(git rev-parse HEAD)
echo Commit Id: $commitId
# Read asset tags.
release=$(curl -sH "$AUTH" "$GH_REPO/releases/tags/_$tag")
echo $release
releaseId=$(jq .id <(cat <<<"$release"))
releaseName=$(jq .name <(cat <<<"$release"))
echo Release: $releaseId - $releaseName - $tag - $commitId
releaseData="{\"tag_name\": \"$tag\", \"target_commitish\": \"$commitId\", \"name\":$releaseName, \"draft\": \"false\", \"prerelease\": \"false\"}"
echo Data: $releaseData
# Patch release with new commit Id and tag
curl -X PATCH -H "$AUTH" -H "Content-Type: application/json" $GH_REPO/releases/$releaseId -d '$releaseData'
git tag -d _$tag
git push origin :refs/tags/_$tag
git reset --hard
|
Fix syntax error in shell alias definition |
# Modern ls replacement.
alias e='exa -lagb -@ --git --color=always'
# Software management.
alias sma='su -c "apt update && apt upgrade"'
alias smud='su -c "apt update"'
alias smug='su -c "apt upgrade"'
alias asn='apt-cache search --names-only'
# backup
alias tb='tar-snapshot-to-backup-path'
alias ll='ls -la'
# Show hidden files only.
alias l.='ls -d .* --color=auto'
alias du1='du --max-depth=1'
alias du2='du --max-depth=2'
# tmux
alias t='tmux -2'
alias ta='tmux attach'
# fast travel.
# alias ftg='fast-travel go'
# alias fta='fast-travel add'
# alias ftd='fast-travel delete'
# alias ftl='fast-travel list'
# git
alias gc='git-credentials'
alias ggc='git-global-credentials'
|
# Modern ls replacement.
alias e='exa -lagb -@ --git --color=always'
# Software management.
alias sma='su -c "apt update && apt upgrade"'
alias smud='su -c "apt update"'
alias smug='su -c "apt upgrade"'
alias asn='apt-cache search --names-only'
# backup
alias tb='tar-snapshot-to-backup-path'
alias ll='ls -la'
# Show hidden files only.
alias l='ls -d .* --color=auto'
alias du1='du --max-depth=1'
alias du2='du --max-depth=2'
# tmux
alias t='tmux -2'
alias ta='tmux attach'
# fast travel.
# alias ftg='fast-travel go'
# alias fta='fast-travel add'
# alias ftd='fast-travel delete'
# alias ftl='fast-travel list'
# git
alias gc='git-credentials'
alias ggc='git-global-credentials'
|
Fix line count bug in lease file script for sensu check | #!/bin/bash
host_limit=10
dhcp_dir='/var/lib/neutron/dhcp/'
if (($(ls -1 ${dhcp_dir} | wc -l) < 1)); then
echo 'Mtime OK: No networks found. Dropped check'
exit 0
fi
if [ $1 == 'first' ];then
network=$(ls -1 $dhcp_dir | head -n 1)
elif [ $1 == 'last' ];then
network=$(ls -1 $dhcp_dir | tail -n 1)
else
network=$(ls -1 $dhcp_dir | head -n 1)
fi
dir="${dhcp_dir}${network}"
if [ -f ${dir}/host ] && (($(cat ${dir}/host | wc -l) > $host_limit)); then
/opt/sensu/embedded/bin/check-mtime.rb -f ${dir}/leases -c $2 -w $3
else
echo 'Mtime OK: Not enough hosts. Check dropped'
exit 0
fi
| #!/bin/bash
# host limit are based on lines in host (two lines per host)
host_limit=10*2
dhcp_dir='/var/lib/neutron/dhcp/'
if (($(ls -1 ${dhcp_dir} | wc -l) < 1)); then
echo 'Mtime OK: No networks found. Dropped check'
exit 0
fi
if [ $1 == 'first' ];then
network=$(ls -1 $dhcp_dir | head -n 1)
elif [ $1 == 'last' ];then
network=$(ls -1 $dhcp_dir | tail -n 1)
else
network=$(ls -1 $dhcp_dir | head -n 1)
fi
dir="${dhcp_dir}${network}"
if [ -f ${dir}/host ] && (($(cat ${dir}/host | wc -l) > $host_limit)); then
/opt/sensu/embedded/bin/check-mtime.rb -f ${dir}/leases -c $2 -w $3
else
echo 'Mtime OK: Not enough hosts. Check dropped'
exit 0
fi
|
Make 'sed-recursive' even safer by requiring VCS | # Echo to stderr.
echoerr() {
cat <<< "$@" 1>&2
}
alias err=echoerr
# Add a dir to the path (if it exists and isn't already added).
addpath() {
if [ -d "$1" ]; then
if [[ ":$PATH:" != *":$1:"* ]]; then
PATH="${PATH:+"$PATH:"}$1"
fi
else
err "Not a directory: $1"
fi
}
# Make a string replacement in ALL files RECURSIVELY starting in the current directory.
# Intentionally ignores '.git' directories, unless you're already in it.
#
# @param $1 - The string to find.
# @param $2 - The string to replace each occurrence of $1 with.
sed-recursive() {
find . -type f -not -path '*.git/*' -print0 | xargs -0 sed -i '' "s/$1/$2/g"
}
alias sr=sed-recursive
| # Echo to stderr.
echoerr() {
cat <<< "$@" 1>&2
}
alias err=echoerr
# Add a dir to the path (if it exists and isn't already added).
addpath() {
if [ -d "$1" ]; then
if [[ ":$PATH:" != *":$1:"* ]]; then
PATH="${PATH:+"$PATH:"}$1"
fi
else
err "Not a directory: $1"
fi
}
# Make a string replacement in ALL files RECURSIVELY starting in the current directory.
# For safety, this ignores files within hidden '.git' directories, and only works inside a git repo.
#
# @param $1 - The string to find.
# @param $2 - The string to replace each occurrence of $1 with.
sed-recursive() {
git status > /dev/null && \
find . -type f -not -path '*.git/*' -print0 | xargs -0 sed -i '' "s/$1/$2/g"
}
alias sr=sed-recursive
|
Add blender and firefox to installed apps | brew install caskroom/cask/brew-cask
apps=(
dropbox
google-chrome
slack
seil
spotify
iterm2
virtualbox
flux
mailbox
vlc
)
echo "Installing apps"
brew cask install --appdir="/Applications" ${apps[@]}
| brew install caskroom/cask/brew-cask
apps=(
blender
dropbox
firefox
google-chrome
slack
seil
spotify
iterm2
virtualbox
flux
mailbox
vlc
)
echo "Installing apps"
brew cask install --appdir="/Applications" ${apps[@]}
|
Move to a sonatype profile for release | #!/bin/bash
if [ $# -lt 1 ]; then
echo "usage $0 <ssl-key> [<param> ...]"
exit 1;
fi
key=${1}
shift
params=${@}
#validate key
keystatus=$(gpg --list-keys | grep ${key} | awk '{print $1}')
if [ "${keystatus}" != "pub" ]; then
echo "Could not find public key with label ${key}"
echo -n "Available keys from: "
gpg --list-keys | grep --invert-match '^sub'
exit 1
fi
mvn ${params} clean site:jar -DperformRelease=true -Dgpg.skip=false -Dgpg.keyname=${key} deploy
| #!/bin/bash
if [ $# -lt 1 ]; then
echo "usage $0 <ssl-key> [<param> ...]"
exit 1;
fi
key=${1}
shift
params=${@}
#validate key
keystatus=$(gpg --list-keys | grep ${key} | awk '{print $1}')
if [ "${keystatus}" != "pub" ]; then
echo "Could not find public key with label ${key}"
echo -n "Available keys from: "
gpg --list-keys | grep --invert-match '^sub'
exit 1
fi
mvn ${params} clean site:jar -P sonatype-oss-release -Dgpg.keyname=${key} deploy
|
Change task execution in deploy_snaphot | #!/bin/bash
#
# Deploy a jar, source jar, and javadoc jar to Sonatype's snapshot repo.
#
# Adapted from https://coderwall.com/p/9b_lfq and
# http://benlimmer.com/2013/12/26/automatically-publish-javadoc-to-gh-pages-with-travis-ci/ and
# https://github.com/JakeWharton/RxBinding/blob/master/.buildscript/deploy_snapshot.sh
SLUG="vanniktech/VNTFontListPreference"
JDK="oraclejdk8"
BRANCH="master"
set -e
if [ "$TRAVIS_REPO_SLUG" != "$SLUG" ]; then
echo "Skipping snapshot deployment: wrong repository. Expected '$SLUG' but was '$TRAVIS_REPO_SLUG'."
elif [ "$TRAVIS_JDK_VERSION" != "$JDK" ]; then
echo "Skipping snapshot deployment: wrong JDK. Expected '$JDK' but was '$TRAVIS_JDK_VERSION'."
elif [ "$TRAVIS_PULL_REQUEST" != "false" ]; then
echo "Skipping snapshot deployment: was pull request."
elif [ "$TRAVIS_BRANCH" != "$BRANCH" ]; then
echo "Skipping snapshot deployment: wrong branch. Expected '$BRANCH' but was '$TRAVIS_BRANCH'."
else
echo "Deploying snapshot..."
./gradlew clean build jacocoReport
echo "Snapshot deployed!"
fi | #!/bin/bash
#
# Deploy a jar, source jar, and javadoc jar to Sonatype's snapshot repo.
#
# Adapted from https://coderwall.com/p/9b_lfq and
# http://benlimmer.com/2013/12/26/automatically-publish-javadoc-to-gh-pages-with-travis-ci/ and
# https://github.com/JakeWharton/RxBinding/blob/master/.buildscript/deploy_snapshot.sh
SLUG="vanniktech/VNTFontListPreference"
JDK="oraclejdk8"
BRANCH="master"
set -e
if [ "$TRAVIS_REPO_SLUG" != "$SLUG" ]; then
echo "Skipping snapshot deployment: wrong repository. Expected '$SLUG' but was '$TRAVIS_REPO_SLUG'."
elif [ "$TRAVIS_JDK_VERSION" != "$JDK" ]; then
echo "Skipping snapshot deployment: wrong JDK. Expected '$JDK' but was '$TRAVIS_JDK_VERSION'."
elif [ "$TRAVIS_PULL_REQUEST" != "false" ]; then
echo "Skipping snapshot deployment: was pull request."
elif [ "$TRAVIS_BRANCH" != "$BRANCH" ]; then
echo "Skipping snapshot deployment: wrong branch. Expected '$BRANCH' but was '$TRAVIS_BRANCH'."
else
echo "Deploying snapshot..."
./gradlew clean build uploadArchives
echo "Snapshot deployed!"
fi |
Use bundled compcert for VST | #!/usr/bin/env bash
ci_dir="$(dirname "$0")"
. "${ci_dir}/ci-common.sh"
git_download vst
( cd "${CI_BUILD_DIR}/vst" && make IGNORECOQVERSION=true )
| #!/usr/bin/env bash
ci_dir="$(dirname "$0")"
. "${ci_dir}/ci-common.sh"
git_download vst
export COMPCERT=bundled
( cd "${CI_BUILD_DIR}/vst" && make IGNORECOQVERSION=true )
|
Fix redirect for Whyis entrypoint | #!/bin/bash
# jetty9 service start returns 1, even though Jetty has started successfully.
# set -e
service apache2 start 1>&2
service jetty9 start 1>&2
service redis-server start 1>&2
service celeryd start 1>&2
/load-whyis-data.sh 1&>2
exec "$@"
| #!/bin/bash
# jetty9 service start returns 1, even though Jetty has started successfully.
# set -e
service apache2 start 1>&2
service jetty9 start 1>&2
service redis-server start 1>&2
service celeryd start 1>&2
/load-whyis-data.sh 1>&2
exec "$@"
|
Switch to grid engine versions of all scripts | #!/bin/sh
# exit if any of the subcommands returns an error, since they only work if the preceding command succeeded
set -e
set -o errexit
sh bin/build-networkmodel.sh
sh bin/build-simulations.sh
sh bin/run-simulations.sh
#sh bin/run-simulations-gridengine.sh
sh bin/export-data.sh
sh bin/simulation-postprocess.sh
sh bin/build-seriations.sh
#sh bin/run-seriations-gridengine.sh
sh bin/run-seriations.sh
sh bin/annotate-seriation-output.sh
| #!/bin/sh
# exit if any of the subcommands returns an error, since they only work if the preceding command succeeded
set -e
set -o errexit
sh bin/build-networkmodel.sh
sh bin/build-simulations.sh
sh bin/run-simulations-gridengine.sh
sh bin/export-data-gridengine.sh
sh bin/simulation-postprocess-gridengine.sh
sh bin/build-seriations.sh
sh bin/run-seriations-gridengine.sh
sh bin/annotate-seriation-output.sh
|
Fix description in find microphone script. | #!/usr/bin/env bash
# Find the camera listed on the command line,
# based on output of 'pacmd'
export PULSE_RUNTIME_PATH="/run/user/1000/pulse/"
if [[ $# -eq 0 ]]; then
echo "You must provide a microphone name such as,"
echo "PrimeSense or C615"
echo "on the command line."
exit
fi
FOUND=1
for i in $(/usr/bin/pacmd list-sources | grep 'name:' | awk '{ print $2 }' 2>/dev/null); do
if echo "${i}" | grep -i "${1}" >/dev/null; then
echo "${i}" | tr -d '<' | tr -d '>'
FOUND=0
fi
done
exit ${FOUND}
| #!/usr/bin/env bash
# Find the Microphone listed on the command line,
# based on output of 'pacmd'
export PULSE_RUNTIME_PATH="/run/user/1000/pulse/"
if [[ $# -eq 0 ]]; then
echo "You must provide a microphone name such as,"
echo "PrimeSense or C615"
echo "on the command line."
exit
fi
FOUND=1
for i in $(/usr/bin/pacmd list-sources | grep 'name:' | awk '{ print $2 }' 2>/dev/null); do
if echo "${i}" | grep -i "${1}" >/dev/null; then
echo "${i}" | tr -d '<' | tr -d '>'
FOUND=0
fi
done
exit ${FOUND}
|
Add logging to buildkite vm step | #!/bin/bash
set -euo pipefail
vmx_path=$(buildkite-agent meta-data get base_vmx_path)
osx_version=$(buildkite-agent meta-data get base_osx_version)
echo "+++ :packer: Building :buildkite: image from base"
echo Path to VMX is "$vmx_path"
test -d output && rm -rf output/
packer build \
-var vmx_path="$vmx_path" \
-var packer_headless=true \
-var packer_output_dir=output/buildkite-macos \
-var osx_version="$osx_version" \
-var packer_headless=true \
-var vsphere_username="$GOVC_USERNAME" \
-var vsphere_password="$GOVC_PASSWORD" \
-var build_number="$BUILDKITE_BUILD_NUMBER" \
macos-buildkite.json
| #!/bin/bash
set -euo pipefail
vmx_path=$(buildkite-agent meta-data get base_vmx_path)
osx_version=$(buildkite-agent meta-data get base_osx_version)
echo "+++ :packer: Building :buildkite: image from base"
echo Path to VMX is "$vmx_path"
test -d output && rm -rf output/
PACKER_LOG=1 packer build \
-var vmx_path="$vmx_path" \
-var packer_headless=true \
-var packer_output_dir=output/buildkite-macos \
-var osx_version="$osx_version" \
-var packer_headless=true \
-var vsphere_username="$GOVC_USERNAME" \
-var vsphere_password="$GOVC_PASSWORD" \
-var build_number="$BUILDKITE_BUILD_NUMBER" \
macos-buildkite.json
|
Remove \r chars from dhcp-all-interfaces | #!/bin/bash
# Generate $INTERFACES_FILE on first boot
# This will add any unconfigured network interfaces to /etc/network/interfaces
# and configure them for DHCP
INTERFACES_FILE="/etc/network/interfaces"
# Serialize runs so that we don't miss hot-add interfaces
FLOCK=${1:-}
if [ -z "$FLOCK" ] ; then
exec flock -x $INTERFACES_FILE $0 flocked
fi
function get_if_link() {
cat /sys/class/net/${1}/carrier
}
for interface in $(ls /sys/class/net | grep -v ^lo$) ; do
echo -n "Inspecting interface: $interface..."
if ifquery $interface >/dev/null 2>&1 ; then
echo "Has config, skipping."
else
ip link set dev $interface up >/dev/null 2>&1
HAS_LINK="$(get_if_link $interface)"
TRIES=3
while [ "$HAS_LINK" == "0" -a $TRIES -gt 0 ]; do
HAS_LINK="$(get_if_link $interface)"
if [ "$HAS_LINK" == "1" ]; then
break
else
sleep 1
fi
TRIES=$(( TRIES - 1 ))
done
if [ "$HAS_LINK" == "1" ] ; then
printf "auto $interface\r\niface $interface inet dhcp\r\n\r\n" >>$INTERFACES_FILE
echo "Configured"
else
echo "No link detected, skipping"
fi
fi
done
| #!/bin/bash
# Generate $INTERFACES_FILE on first boot
# This will add any unconfigured network interfaces to /etc/network/interfaces
# and configure them for DHCP
INTERFACES_FILE="/etc/network/interfaces"
# Serialize runs so that we don't miss hot-add interfaces
FLOCK=${1:-}
if [ -z "$FLOCK" ] ; then
exec flock -x $INTERFACES_FILE $0 flocked
fi
function get_if_link() {
cat /sys/class/net/${1}/carrier
}
for interface in $(ls /sys/class/net | grep -v ^lo$) ; do
echo -n "Inspecting interface: $interface..."
if ifquery $interface >/dev/null 2>&1 ; then
echo "Has config, skipping."
else
ip link set dev $interface up >/dev/null 2>&1
HAS_LINK="$(get_if_link $interface)"
TRIES=3
while [ "$HAS_LINK" == "0" -a $TRIES -gt 0 ]; do
HAS_LINK="$(get_if_link $interface)"
if [ "$HAS_LINK" == "1" ]; then
break
else
sleep 1
fi
TRIES=$(( TRIES - 1 ))
done
if [ "$HAS_LINK" == "1" ] ; then
printf "auto $interface\niface $interface inet dhcp\n\n" >>$INTERFACES_FILE
echo "Configured"
else
echo "No link detected, skipping"
fi
fi
done
|
Update Alpine version from 3.10 to 3.14 | #!/bin/bash -x
ALPINE_VER="3.10"
DISTRO="balenalib/rpi-alpine:$ALPINE_VER"
# Used for webmails
QEMU="arm"
ARCH="arm32v7/"
# use qemu-*-static from docker container
docker run --rm --privileged multiarch/qemu-user-static:register
docker-compose -f build.yml build \
--build-arg DISTRO=$DISTRO \
--build-arg ARCH=$ARCH \
--build-arg QEMU=$QEMU \
--parallel $@
| #!/bin/bash -x
ALPINE_VER="3.14"
DISTRO="balenalib/rpi-alpine:$ALPINE_VER"
# Used for webmails
QEMU="arm"
ARCH="arm32v7/"
# use qemu-*-static from docker container
docker run --rm --privileged multiarch/qemu-user-static:register
docker-compose -f build.yml build \
--build-arg DISTRO=$DISTRO \
--build-arg ARCH=$ARCH \
--build-arg QEMU=$QEMU \
--parallel $@
|
Use xargs -r to prevent no 'rc' packages | #!/bin/sh
sudo dpkg --purge `dpkg -l | awk '/^rc/{print $2;}' `
| #!/bin/sh
# sudo dpkg --purge `dpkg -l | awk '/^rc/{print $2;}' `
dpkg -l | awk '/^rc/{print $2;}' | xargs -r sudo dpkg --purge
|
Change build script image tag | #! /bin/bash
go build -ldflags "-linkmode external -extldflags -static"
docker build -t caiopo/raft .
rm raft
docker push caiopo/raft
| #! /bin/bash
go build -ldflags "-linkmode external -extldflags -static"
docker build -t caiopo/raft:latest .
rm raft
docker push caiopo/raft:latest
|
Upgrade setuptools before trying to install pip dependencies | #!/bin/sh
set -e
. ./support_functions.sh
run_pip_install () {
$DIRECTORY/bin/pip install -qr requirements.txt
}
DIRECTORY='.venv'
cd "$(dirname "$0")"
REPO="$1"
cd "../../$REPO"
if [ -f lock ]; then
warn "skipped because 'lock' file exists"
else
virtualenv -q "$DIRECTORY"
echo "Updating $REPO..."
outputfile=$(mktemp -t update-pip.XXXXXX)
trap "rm -f '$outputfile'" EXIT
if $(run_pip_install) >"$outputfile" 2>&1; then
ok "ok"
else
error "failed with pip output:"
cat "$outputfile"
exit 1
fi
fi
| #!/bin/sh
set -e
. ./support_functions.sh
DIRECTORY='.venv'
cd "$(dirname "$0")"
REPO="$1"
cd "../../$REPO"
if [ -f lock ]; then
warn "skipped because 'lock' file exists"
else
virtualenv -q "$DIRECTORY"
echo "Updating $REPO..."
outputfile=$(mktemp -t update-pip.XXXXXX)
trap "rm -f '$outputfile'" EXIT
. $DIRECTORY/bin/activate
if ! pip install --upgrade setuptools >"$outputfile" 2>&1; then
error "failed to upgrade setuptools with pip output:"
cat "$outputfile"
exit 1
fi
if pip install -r requirements.txt >"$outputfile" 2>&1; then
ok "ok"
else
error "failed to install dependencies with pip output:"
cat "$outputfile"
exit 1
fi
fi
|
Work around an issue with GitHub macOS runners | #!/bin/bash
brew update 1>/dev/null
brew install -q libffi gnupg2 pgpdump openssl@1.1 gpgme swig
| #!/bin/bash
brew unlink python@3.9 && brew link --overwrite python@3.9
brew update 1>/dev/null
brew install -q libffi gnupg2 pgpdump openssl@1.1 gpgme swig
|
Add comment to iptables section | #!/bin/bash -e
install -m 644 files/wlan0 ${ROOTFS_DIR}/etc/network/interfaces.d/wlan0
install -m 644 files/hostapd.conf ${ROOTFS_DIR}/etc/hostapd/hostapd.conf
sed -i 's/#DAEMON_CONF=""/DAEMON_CONF="\/etc\/hostapd\/hostapd.conf"/' ${ROOTFS_DIR}/etc/default/hostapd
# Prevent wpa_supplicant from grabbing our wlan0 interface
echo "disabled=1" >> ${ROOTFS_DIR}/etc/wpa_supplicant/wpa_supplicant.conf
install -m 644 files/30-ipforward.conf ${ROOTFS_DIR}/etc/sysctl.d/30-ipforward.conf
install -m 644 files/iptables.up.rules ${ROOTFS_DIR}/etc/iptables.up.rules
install -m 755 files/iptables ${ROOTFS_DIR}/etc/network/if-pre-up.d/iptables
| #!/bin/bash -e
install -m 644 files/wlan0 ${ROOTFS_DIR}/etc/network/interfaces.d/wlan0
install -m 644 files/hostapd.conf ${ROOTFS_DIR}/etc/hostapd/hostapd.conf
sed -i 's/#DAEMON_CONF=""/DAEMON_CONF="\/etc\/hostapd\/hostapd.conf"/' ${ROOTFS_DIR}/etc/default/hostapd
# Prevent wpa_supplicant from grabbing our wlan0 interface
echo "disabled=1" >> ${ROOTFS_DIR}/etc/wpa_supplicant/wpa_supplicant.conf
# Install traffic forwarding rules for iptables
install -m 644 files/30-ipforward.conf ${ROOTFS_DIR}/etc/sysctl.d/30-ipforward.conf
install -m 644 files/iptables.up.rules ${ROOTFS_DIR}/etc/iptables.up.rules
install -m 755 files/iptables ${ROOTFS_DIR}/etc/network/if-pre-up.d/iptables
|
Move where we switch directories | #!/usr/bin/env bash
cd $(dirname "$0")
# Retrieve bulk data
if ! curl -s -o /tmp/data.zip http://scc.virginia.gov/clk/data/CISbemon.CSV.zip; then
echo "Failed: http://scc.virginia.gov/clk/data/CISbemon.CSV.zip could not be downloaded"
exit 1
fi
# Uncompress the ZIP file
if ! unzip -o -d ../data/ /tmp/data.zip; then
echo "CISbemon.CSV.zip could not be unzipped"
exit 1
fi
# Rename files to be lowercase, some to not have a period
mv Amendment.csv amendment.csv
mv Corp.csv corp.csv
mv LLC.csv llc.csv
mv LP.csv lp.csv
mv Merger.csv merger.csv
mv Officer.csv officer.csv
mv Tables.csv tables.csv
mv Name.History.csv name_history.csv
mv Reserved.Name.csv reserved_name.csv
# Delete temporary artifacts
rm /tmp/data.zip
cd ../data/
# Create a temporary SQLite file, to avoid touching any that might already
# exist (this prevents downtime)
if ! sqlite3 temp.sqlite < ../scripts/load-data.sql; then
echo "Error: CSV files could not be loaded into SQLite"
exit 1
fi
# Put the file in its final location
mv -f temp.sqlite vabusinesses.sql
| #!/usr/bin/env bash
cd $(dirname "$0")
# Retrieve bulk data
if ! curl -s -o /tmp/data.zip http://scc.virginia.gov/clk/data/CISbemon.CSV.zip; then
echo "Failed: http://scc.virginia.gov/clk/data/CISbemon.CSV.zip could not be downloaded"
exit 1
fi
# Uncompress the ZIP file
if ! unzip -o -d ../data/ /tmp/data.zip; then
echo "CISbemon.CSV.zip could not be unzipped"
exit 1
fi
# Delete temporary artifacts
rm /tmp/data.zip
cd ../data/
# Rename files to be lowercase, some to not have a period
mv Amendment.csv amendment.csv
mv Corp.csv corp.csv
mv LLC.csv llc.csv
mv LP.csv lp.csv
mv Merger.csv merger.csv
mv Officer.csv officer.csv
mv Tables.csv tables.csv
mv Name.History.csv name_history.csv
mv Reserved.Name.csv reserved_name.csv
# Create a temporary SQLite file, to avoid touching any that might already
# exist (this prevents downtime)
if ! sqlite3 temp.sqlite < ../scripts/load-data.sql; then
echo "Error: CSV files could not be loaded into SQLite"
exit 1
fi
# Put the file in its final location
mv -f temp.sqlite vabusinesses.sql
|
Install nghttp2 to the virtualenv. | #!/bin/bash
set -e
set -x
if [[ "$NGHTTP2" = true ]]; then
# GCC 4.6 seems to cause problems, so go straight to 4.8.
sudo add-apt-repository --yes ppa:ubuntu-toolchain-r/test
sudo apt-get update
sudo apt-get install g++-4.8 libstdc++-4.8-dev
export CXX="g++-4.8" CC="gcc-4.8"
$CC --version
# Install nghttp2. Right now I haven't built a PPA for this so we have to
# do it from source, which kinda sucks. First, install a ton of
# prerequisite packages.
sudo apt-get install autoconf automake autotools-dev libtool pkg-config \
zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \
libevent-dev libjansson-dev libjemalloc-dev
pip install cython
# Now, download and install nghttp2's latest version.
wget https://github.com/tatsuhiro-t/nghttp2/releases/download/v0.4.1/nghttp2-0.4.1.tar.gz
tar -xzvf nghttp2-0.4.1.tar.gz
cd nghttp2-0.4.1
autoreconf -i
automake
autoconf
./configure
make
sudo make install
cd ..
fi
pip install .
pip install -r test_requirements.txt
| #!/bin/bash
set -e
set -x
if [[ "$NGHTTP2" = true ]]; then
# GCC 4.6 seems to cause problems, so go straight to 4.8.
sudo add-apt-repository --yes ppa:ubuntu-toolchain-r/test
sudo apt-get update
sudo apt-get install g++-4.8 libstdc++-4.8-dev
export CXX="g++-4.8" CC="gcc-4.8"
$CC --version
# Install nghttp2. Right now I haven't built a PPA for this so we have to
# do it from source, which kinda sucks. First, install a ton of
# prerequisite packages.
sudo apt-get install autoconf automake autotools-dev libtool pkg-config \
zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \
libevent-dev libjansson-dev libjemalloc-dev
pip install cython
# Now, download and install nghttp2's latest version.
wget https://github.com/tatsuhiro-t/nghttp2/releases/download/v0.4.1/nghttp2-0.4.1.tar.gz
tar -xzvf nghttp2-0.4.1.tar.gz
cd nghttp2-0.4.1
autoreconf -i
automake
autoconf
./configure
make
sudo make install
# The makefile doesn't install into the active virtualenv. Install again.
cd python
python setup.py install
cd ../..
fi
pip install .
pip install -r test_requirements.txt
|
Disable scripts in CI too | #!/bin/bash
#
# script which is run by the CI build (after `yarn test`).
#
# clones riot-web develop and runs the tests against our version of react-sdk.
set -ev
handle_error() {
EXIT_CODE=$?
exit $EXIT_CODE
}
trap 'handle_error' ERR
echo "--- Building Riot"
scripts/ci/layered-riot-web.sh
cd ../riot-web
riot_web_dir=`pwd`
CI_PACKAGE=true yarn build
cd ../matrix-react-sdk
# run end to end tests
pushd test/end-to-end-tests
ln -s $riot_web_dir riot/riot-web
# PUPPETEER_SKIP_CHROMIUM_DOWNLOAD=true ./install.sh
# CHROME_PATH=$(which google-chrome-stable) ./run.sh
echo "--- Install synapse & other dependencies"
./install.sh
# install static webserver to server symlinked local copy of riot
./riot/install-webserver.sh
rm -r logs || true
mkdir logs
echo "+++ Running end-to-end tests"
TESTS_STARTED=1
./run.sh --no-sandbox --log-directory logs/
popd
| #!/bin/bash
#
# script which is run by the CI build (after `yarn test`).
#
# clones riot-web develop and runs the tests against our version of react-sdk.
set -ev
handle_error() {
EXIT_CODE=$?
exit $EXIT_CODE
}
trap 'handle_error' ERR
echo "Tests are disabled, see https://github.com/vector-im/riot-web/issues/13226"
exit 0
#TODO: Uncomment all of this in https://github.com/vector-im/riot-web/issues/13226
#echo "--- Building Riot"
#scripts/ci/layered-riot-web.sh
#cd ../riot-web
#riot_web_dir=`pwd`
#CI_PACKAGE=true yarn build
#cd ../matrix-react-sdk
## run end to end tests
#pushd test/end-to-end-tests
#ln -s $riot_web_dir riot/riot-web
## PUPPETEER_SKIP_CHROMIUM_DOWNLOAD=true ./install.sh
## CHROME_PATH=$(which google-chrome-stable) ./run.sh
#echo "--- Install synapse & other dependencies"
#./install.sh
## install static webserver to server symlinked local copy of riot
#./riot/install-webserver.sh
#rm -r logs || true
#mkdir logs
#echo "+++ Running end-to-end tests"
#TESTS_STARTED=1
#./run.sh --no-sandbox --log-directory logs/
#popd
|
Update singularity and go to latest versions for Ubuntu 18 compatibility. | #!/bin/bash
# Copyright 2019 Google LLC.
# This script is used to install `singularity` 3.3.0 on Ubutun 18.04.
# For different Linux distributions and versions, modifications might be needed.
# Installation instructions are from: https://sylabs.io/docs/
set -euo pipefail
sudo apt-get update && sudo apt-get install -y \
build-essential \
libssl-dev \
uuid-dev \
libgpgme11-dev \
squashfs-tools \
libseccomp-dev \
wget \
pkg-config \
git
export VERSION=1.12 OS=linux ARCH=amd64
# Downloads the required Go package
wget https://dl.google.com/go/go$VERSION.$OS-$ARCH.tar.gz
# Extracts the archive
sudo tar -C /usr/local -xzvf go$VERSION.$OS-$ARCH.tar.gz
# Deletes the ``tar`` file
rm go$VERSION.$OS-$ARCH.tar.gz
export VERSION=3.3.0
wget https://github.com/sylabs/singularity/releases/download/v${VERSION}/singularity-${VERSION}.tar.gz
tar -xzf singularity-${VERSION}.tar.gz
pushd singularity
export PATH=/usr/local/go/bin:$PATH
./mconfig
make -C builddir
sudo make -C builddir install
# Returns to the original directory.
popd
| #!/bin/bash
# Copyright 2019 Google LLC.
# This script is used to install `singularity` 3.7.0 on Ubutun 18.04.
# For different Linux distributions and versions, modifications might be needed.
# Installation instructions are from: https://sylabs.io/docs/
set -euo pipefail
sudo apt-get update && sudo apt-get install -y \
build-essential \
libssl-dev \
uuid-dev \
libgpgme11-dev \
squashfs-tools \
libseccomp-dev \
wget \
pkg-config \
git
export VERSION=1.15.6 OS=linux ARCH=amd64
# Downloads the required Go package
wget https://dl.google.com/go/go$VERSION.$OS-$ARCH.tar.gz
# Extracts the archive
sudo tar -C /usr/local -xzvf go$VERSION.$OS-$ARCH.tar.gz
# Deletes the ``tar`` file
rm go$VERSION.$OS-$ARCH.tar.gz
export VERSION=3.7.0
wget https://github.com/sylabs/singularity/releases/download/v${VERSION}/singularity-${VERSION}.tar.gz
tar -xzf singularity-${VERSION}.tar.gz
pushd singularity
export PATH=/usr/local/go/bin:$PATH
./mconfig
make -C builddir
sudo make -C builddir install
# Returns to the original directory.
popd
|
Change the folder to jazz-ui | BUCKET_NAME=$1
REGION=$2
cd ./jazz-core/cloud-api-onboarding-webapp
sudo npm install -g @angular/cli
sudo npm install
sudo ng build –prod
ls -l ./dist
cd ./dist
aws s3 cp . s3://$BUCKET_NAME --recursive --region $REGION
IFS=$(echo -en "\n\b")
for key in $( find . \( -not -type d \) -print | sed 's/^.\///g' ); do
echo item: $key
aws s3api put-object-acl --bucket $BUCKET_NAME --key $key --grant-full-control id=78d7d8174c655d51683784593fe4e6f74a7ed3fae3127d2beca2ad39e4fdc79a,uri=http://acs.amazonaws.com/groups/s3/LogDelivery,uri=http://acs.amazonaws.com/groups/global/AuthenticatedUsers --grant-read uri=http://acs.amazonaws.com/groups/global/AllUsers --grant-read-acp uri=http://acs.amazonaws.com/groups/global/AllUsers
done
cd -
| BUCKET_NAME=$1
REGION=$2
cd ./jazz-ui
sudo npm install -g @angular/cli
sudo npm install
sudo ng build –prod
ls -l ./dist
cd ./dist
aws s3 cp . s3://$BUCKET_NAME --recursive --region $REGION
IFS=$(echo -en "\n\b")
for key in $( find . \( -not -type d \) -print | sed 's/^.\///g' ); do
echo item: $key
aws s3api put-object-acl --bucket $BUCKET_NAME --key $key --grant-full-control id=78d7d8174c655d51683784593fe4e6f74a7ed3fae3127d2beca2ad39e4fdc79a,uri=http://acs.amazonaws.com/groups/s3/LogDelivery,uri=http://acs.amazonaws.com/groups/global/AuthenticatedUsers --grant-read uri=http://acs.amazonaws.com/groups/global/AllUsers --grant-read-acp uri=http://acs.amazonaws.com/groups/global/AllUsers
done
cd -
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.