Instruction stringlengths 14 778 | input_code stringlengths 0 4.24k | output_code stringlengths 1 5.44k |
|---|---|---|
Add tiding of HD30501 data | # Extract and rename tapas files
#declare -a arr=("HD202206-1" "HD202206-2" "HD202206-3"
# "HD162020-1" "HD162020-2"
# "HD167665-1a" "HD167665-1b" "HD167665-2"
# "HD168443-1" "HD168443-2"
# "HD211847-1" "HD211847-2"
# "HD4747-1")
declare -a arr=("HD202206-2")
for FOLDER in "${arr[@]}"
do
echo "Moving to", $FOLDER, "xml files"
cd ~/Phd/data/Crires/BDs-DRACS/$FOLDER/
mkdir Telluric_files
mv tapas_*ipac Telluric_files/
mv *tapas*request*.xml Telluric_files/
done | # Extract and rename tapas files
declare -a arr=("HD202206-1" "HD202206-2" "HD202206-3"
"HD162020-1" "HD162020-2"
"HD167665-1a" "HD167665-1b" "HD167665-2"
"HD168443-1" "HD168443-2"
"HD211847-1" "HD211847-2"
"HD4747-1")
declare -a arr2=("HD30501-1" "HD30501-2" "HD30501-2b" "HD30501-3")
for FOLDER in "${arr[@]}"
do
echo "Tidying telluric files in", $FOLDER
cd ~/Phd/data/Crires/BDs-DRACS/$FOLDER/
mkdir Telluric_files
mv tapas_*ipac Telluric_files/
mv *tapas*request*.xml Telluric_files/
done
for FOLDER in "${arr2[@]}"
do
echo "Tidying telluric files in", $FOLDER
cd ~/Phd/data/Crires/BDs-DRACS/$FOLDER/
mkdir Telluric_files
mv Combined_Nods/tapas_*ipac Telluric_files/
#mv *tapas*request*.xml Telluric_files/
done |
Improve to install latest collectd package | #!/usr/bin/env bash
set -euf -o pipefail
apt-get install -y collectd
| #!/usr/bin/env bash
set -euf -o pipefail
COLLECTD_VERSION=5.7
# Add collectd’s official GPG key
apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 3994D24FB8543576
# Set up the latest repository
add-apt-repository \
"deb http://pkg.ci.collectd.org/deb \
$(lsb_release -c -s) collectd-$COLLECTD_VERSION"
# Install collectd
apt-get update -qq && apt-get install -y collectd
|
Improve formatting output of benchmarks runner | #!/bin/bash
declare -a fft_array=("1" "2")
declare -a size_array=("2" "4" "8" "16")
declare -a gulp_array=("1" "2" "4" "8" "16" "32" "64" "128")
for i in "${fft_array[@]}"
do
for j in "${size_array[@]}"
do
for k in "${gulp_array[@]}"
do
echo "STARTING NEW BENCHMARK"
export NUMBER_FFT="$i"
export SIZE_MULTIPLIER="$j"
export GULP_SIZE="$(echo "32768*1024/$k" | bc)"
echo "$NUMBER_FFT, $SIZE_MULTIPLIER, $GULP_SIZE"
NUM1="$(python -OO linear_fft_pipeline.py)"
NUM2="$(python skcuda_fft_pipeline.py)"
echo "Bifrost has $NUM1"
echo "Scikit has $NUM2"
echo "Bifrost is: "
echo "scale=5; $NUM2/$NUM1" | bc
echo "times faster"
done
done
done
| #!/bin/bash
declare -a fft_array=("1" "2")
declare -a size_array=("2" "4" "8" "16")
declare -a gulp_array=("1" "2" "4" "8" "16" "32" "64" "128")
echo "start key, # FFT's/2, size multiplier, gulp size, bifrost execution time, skcuda execution time, speedup, end key"
for i in "${fft_array[@]}"
do
for j in "${size_array[@]}"
do
for k in "${gulp_array[@]}"
do
echo -n ">>>START, "
export NUMBER_FFT="$i"
export SIZE_MULTIPLIER="$j"
export GULP_SIZE="$(echo "32768*1024/$k" | bc)"
echo -n "$NUMBER_FFT, $SIZE_MULTIPLIER, $GULP_SIZE, "
NUM1="$(python -OO linear_fft_pipeline.py)"
NUM2="$(python skcuda_fft_pipeline.py)"
speedup=$(echo "scale=5; $NUM2/$NUM1" | bc)
echo "$NUM1, $NUM2, $speedup, END<<<"
done
done
done
|
Use default file name for ssh key | #!/usr/bin/env bash
# Check for root privileges
if [ "$(whoami)" != "root" ]; then
echo "Must be root."
exit 1
fi
USERNAME=$SUDO_USER
# ssh
cat > /home/$USERNAME/.ssh/config <<"EOL"
Host falcon.jaspersoft.com
HostName falcon.jaspersoft.com
IdentityFile ~/.ssh/<private_key>
EOL
chown $USERNAME:$USERNAME /home/$USERNAME/.ssh/config
chmod 600 /home/$USERNAME/.ssh/config
| #!/usr/bin/env bash
# Check for root privileges
if [ "$(whoami)" != "root" ]; then
echo "Must be root."
exit 1
fi
USERNAME=$SUDO_USER
# ssh
cat > /home/$USERNAME/.ssh/config <<"EOL"
Host falcon.jaspersoft.com
HostName falcon.jaspersoft.com
IdentityFile ~/.ssh/id_dsa
EOL
chown $USERNAME:$USERNAME /home/$USERNAME/.ssh/config
chmod 600 /home/$USERNAME/.ssh/config
|
Update train stage1 only script | #!/bin/bash
set -eo pipefail
# usage: create a config.sh script, source it, and then run ./TRAIN.sh to train the model
if [ -z "$JAMR_HOME" ]; then
echo 'Error: please source config script'
exit 1
fi
#pushd "$JAMR_HOME/scripts/preprocessing"
#./PREPROCESS.sh
#popd
# Train
pushd "$JAMR_HOME/scripts/training"
./cmd.conceptTable.train
echo "Training stage 1"
./cmd.stage1-weights
# Evaluate on test set
echo ""
./cmd.test.decode.stage1only
echo " ----- Evaluation on Test: Spans -----" | tee "${MODEL_DIR}/RESULTS.txt"
tail -n 3 "${MODEL_DIR}/test.decode.stage1only.err" | tee -a "${MODEL_DIR}/RESULTS.txt"
echo ""
| #!/bin/bash
set -eo pipefail
# Use this script if you want to train and evaluate stage1 only (concept identification)
# usage: create a config.sh script, source it, and then run ./TRAIN_STAGE1.sh to train the model
if [ -z "$JAMR_HOME" ]; then
echo 'Error: please source config script'
exit 1
fi
pushd "$JAMR_HOME/scripts/preprocessing"
./PREPROCESS.sh
popd
# Train
pushd "$JAMR_HOME/scripts/training"
./cmd.conceptTable.train
echo "Training stage 1"
./cmd.stage1-weights
# Evaluate on test set
echo ""
./cmd.test.decode.stage1only
echo " ----- Evaluation on Test: Spans -----" | tee "${MODEL_DIR}/RESULTS.txt"
tail -n 3 "${MODEL_DIR}/test.decode.stage1only.err" | tee -a "${MODEL_DIR}/RESULTS.txt"
echo ""
|
Install Mono and Powershell in Travis CI | # Install .NET Core (see https://www.microsoft.com/net/core#ubuntu)
# Add the dotnet apt-get feed
sudo sh -c 'echo "deb [arch=amd64] https://apt-mo.trafficmanager.net/repos/dotnet-release/ trusty main" > /etc/apt/sources.list.d/dotnetdev.list'
sudo apt-key adv --keyserver apt-mo.trafficmanager.net --recv-keys 417A0893
# Update apt-get cache
sudo apt-get update
# Install .NET Core SDK
sudo apt-get install dotnet-dev-1.0.0-preview2-003131
# Install Python Pip and cdiff
sudo apt-get -y install python-pip
pip -V
pip install --upgrade cdiff | # Install .NET Core (see https://www.microsoft.com/net/core#ubuntu),
# Mono (http://www.mono-project.com/docs/getting-started/install/linux)
# and Powershell (https://www.rootusers.com/how-to-install-powershell-on-linux)
# Add the dotnet apt-get feed (for .NET Core)
sudo sh -c 'echo "deb [arch=amd64] https://apt-mo.trafficmanager.net/repos/dotnet-release/ trusty main" > /etc/apt/sources.list.d/dotnetdev.list'
sudo apt-key adv --keyserver apt-mo.trafficmanager.net --recv-keys 417A0893
# Add the Mono apt-get feed
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF
echo "deb http://download.mono-project.com/repo/debian wheezy main" | sudo tee /etc/apt/sources.list.d/mono-xamarin.list
# Update apt-get cache
sudo apt-get update
# Install .NET Core SDK
sudo apt-get install dotnet-dev-1.0.0-preview2-003131 -y
# Install Mono
sudo apt-get install mono-complete -y
# Install Powershell dependencies
sudo apt-get install libunwind8 libicu52 -y
# Download and install Powershell
wget https://github.com/PowerShell/PowerShell/releases/download/v6.0.0-alpha.11/powershell_6.0.0-alpha.11-1ubuntu1.14.04.1_amd64.deb
sudo dpkg -i powershell_6.0.0-alpha.11-1ubuntu1.14.04.1_amd64.deb
rm -f powershell_6.0.0-alpha.11-1ubuntu1.14.04.1_amd64.deb
# Install Python Pip and cdiff
sudo apt-get -y install python-pip
pip -V
pip install --upgrade cdiff |
Update docker and a few other things | brew install zsh
brew install zsh-completions
brew install zsh-syntax-highlighting
brew install zsh-history-substring-search
brew install tmux
brew install reattach-to-user-namespace
brew install git
brew install ctags
brew install rbenv
brew install ruby-build
brew install ag
brew install leiningen
brew install hub
brew install node
brew install vim
brew install --HEAD https://raw.github.com/neovim/neovim/master/neovim.rb
brew install go
brew install forego
brew install autojump
brew tap homebrew/dupes
brew install apple-gcc42
brew install caskroom/cask/brew-cask
# docker
brew cask install virtualbox
brew install boot2docker
brew cask install java
brew cask install google-chrome
brew cask install firefox
brew cask install 1password
brew cask install google-drive
brew cask install dropbox
brew cask install hipchat
brew cask install slack
brew cask install iterm2
brew cask install alfred
brew cask install spotify
brew cask install vlc
brew cask install utorrent
brew cask install tvshows
brew cask install skype
brew cask install limechat
brew cask install gitter
brew cask install nvalt
brew cask install kindle
brew cask install send-to-kindle
brew cask install telegram
| ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
brew install zsh
brew install zsh-completions
brew install zsh-syntax-highlighting
brew install zsh-history-substring-search
brew install tmux
brew install reattach-to-user-namespace
brew install git
brew install ctags
brew install ag
brew install leiningen
brew install vim
brew install --HEAD https://raw.github.com/neovim/neovim/master/neovim.rb
brew install autojump
brew tap homebrew/dupes
brew install apple-gcc42
brew install caskroom/cask/brew-cask
# docker
brew cask install virtualbox
brew install docker
brew install docker-machine
brew install docker-compose
brew cask install java
brew cask install google-chrome
brew cask install firefox
brew cask install 1password
brew cask install google-drive
brew cask install dropbox
brew cask install iterm2
brew cask install alfred
brew cask install spotify
brew cask install vlc
brew cask install utorrent
brew cask install tvshows
brew cask install hipchat
brew cask install slack
brew cask install skype
brew cask install telegram
brew cask install limechat
brew cask install gitter
brew cask install nvalt
brew cask install kindle
brew cask install send-to-kindle
|
Use tripleo.sh for the host | #!/bin/bash -xe
if [[ $USER != "stack" ]]; then
echo "Must be run as stack user.";
exit 1;
fi
# Enable epel
sudo yum -y install epel-release
# Enable last known good RDO Trunk Delorean repository for core openstack packages
sudo curl -o /etc/yum.repos.d/delorean.repo http://trunk.rdoproject.org/centos7/current-tripleo/delorean.repo
# Enable latest RDO Trunk Delorean repository only for the TripleO packages
sudo curl -o /etc/yum.repos.d/delorean-current.repo http://trunk.rdoproject.org/centos7/current/delorean.repo
sudo sed -i 's/\[delorean\]/\[delorean-current\]/' /etc/yum.repos.d/delorean-current.repo
sudo /bin/bash -c "cat <<EOF>>/etc/yum.repos.d/delorean-current.repo
includepkgs=diskimage-builder,instack,instack-undercloud,os-apply-config,os-cloud-config,os-collect-config,os-net-config,os-refresh-config,python-tripleoclient,tripleo-common,openstack-tripleo-heat-templates,openstack-tripleo-image-elements,openstack-tripleo,openstack-tripleo-puppet-elements,openstack-puppet-modules
EOF"
# Enable the Delorean Deps repository
sudo curl -o /etc/yum.repos.d/delorean-deps.repo http://trunk.rdoproject.org/centos7/delorean-deps.repo
sudo yum install -y instack-undercloud
| #!/bin/bash -xe
if [[ $USER != "stack" ]]; then
echo "Must be run as stack user.";
exit 1;
fi
rm -rf ~/tripleo-ci
git clone https://github.com/openstack-infra/tripleo-ci.git ~/tripleo-ci
export DELOREAN_REPO_URL="http://buildlogs.centos.org/centos/7/cloud/x86_64/rdo-trunk-master-tripleo/"
export DELOREAN_STABLE_REPO_URL="http://buildlogs.centos.org/centos/7/cloud/x86_64/rdo-trunk-$STABLE_RELEASE/"
~/tripleo-ci/scripts/tripleo.sh --repo-setup
sudo yum install -y instack-undercloud
|
Revert "Fix paths for boot file" | #!/bin/bash
# Write certs in env to files and replace with path
if [ -n "$CLEARDB_SSL_KEY" -a -n "$CLEARDB_SSL_CERT" -a -n "$CLEARDB_SSL_CA" ]
then
echo "MySQL SSL keys gotten from env vars"
mkdir "/app/certs"
echo "$CLEARDB_SSL_KEY" > /app/certs/key.pem
echo "$CLEARDB_SSL_CERT" > /app/certs/cert.pem
echo "$CLEARDB_SSL_CA" > /app/certs/ca.pem
export CLEARDB_SSL_KEY="/app/certs/key.pem"
export CLEARDB_SSL_CERT="/app/certs/cert.pem"
export CLEARDB_SSL_CA="/app/certs/ca.pem"
export CLEARDB_SSL="ON"
else
echo "MySQL SSL keys not found"
unset CLEARDB_SSL_KEY
unset CLEARDB_SSL_CERT
unset CLEARDB_SSL_CA
fi
# Boot up!
../vendor/bin/heroku-hhvm-nginx \
-C "$HEROKU_APP_DIR/support/app_nginx.conf" \
"$HEROKU_APP_DIR/public.built/"
| #!/bin/bash
# Write certs in env to files and replace with path
if [ -n "$CLEARDB_SSL_KEY" -a -n "$CLEARDB_SSL_CERT" -a -n "$CLEARDB_SSL_CA" ]
then
echo "MySQL SSL keys gotten from env vars"
mkdir "/app/certs"
echo "$CLEARDB_SSL_KEY" > /app/certs/key.pem
echo "$CLEARDB_SSL_CERT" > /app/certs/cert.pem
echo "$CLEARDB_SSL_CA" > /app/certs/ca.pem
export CLEARDB_SSL_KEY="/app/certs/key.pem"
export CLEARDB_SSL_CERT="/app/certs/cert.pem"
export CLEARDB_SSL_CA="/app/certs/ca.pem"
export CLEARDB_SSL="ON"
else
echo "MySQL SSL keys not found"
unset CLEARDB_SSL_KEY
unset CLEARDB_SSL_CERT
unset CLEARDB_SSL_CA
fi
# Boot up!
vendor/bin/heroku-hhvm-nginx -C support/app_nginx.conf public.built/
|
Add ability to only build some docker images | #!/bin/bash
# Builds all of the known docker images
set -eu
ABSOLUTE_PATH=$(cd `dirname "${BASH_SOURCE[0]}"` && pwd)
source $ABSOLUTE_PATH/functions.sh
note "Generating configs"
$LSTORE_SCRIPT_BASE/generate-docker-base.sh
cd $LSTORE_SCRIPT_BASE/docker/base
STATUS=""
for DISTRO in */; do
note "Processing $(basename $DISTRO)"
docker build --force-rm=true --rm=true \
-t "lstore/builder:$(basename $DISTRO)" "$DISTRO" || \
STATUS="${STATUS}"$'\n'"Failed to build $DISTRO" && \
STATUS="${STATUS}"$'\n'"Successfully built $DISTRO"
done
if [ ! -z "$STATUS" ]; then
note "$STATUS"
else
fatal "Nothing was built?"
fi
| #!/bin/bash
# Builds all of the known docker images
set -eu
ABSOLUTE_PATH=$(cd `dirname "${BASH_SOURCE[0]}"` && pwd)
source $ABSOLUTE_PATH/functions.sh
note "Generating configs"
$LSTORE_SCRIPT_BASE/generate-docker-base.sh
cd $LSTORE_SCRIPT_BASE/docker/base
STATUS=""
# Parse comand line
DISTROS=( "$@" )
if [ ${#DISTROS[@]} -eq 0 ]; then
DISTROS=( */ )
fi
DISTROS=( "${DISTROS[@]%/}" )
for DISTRO in "${DISTROS[@]}"; do
note "Processing $(basename $DISTRO)"
docker build --force-rm=true --rm=true \
-t "lstore/builder:$(basename $DISTRO)" "$DISTRO" || \
STATUS="${STATUS}"$'\n'"Failed to build $DISTRO" && \
STATUS="${STATUS}"$'\n'"Successfully built $DISTRO"
done
if [ ! -z "$STATUS" ]; then
note "$STATUS"
else
fatal "Nothing was built?"
fi
|
Update email address used for submitting the Homebrew update | #!/bin/bash
set -e
version=$( cat artifactory-repo/build-info.json | jq -r '.buildInfo.modules[0].id' | sed 's/.*:.*:\(.*\)/\1/' )
git clone homebrew-tap-repo updated-homebrew-tap-repo > /dev/null
if [[ $LATEST_GA = true ]]; then
pushd updated-homebrew-tap-repo > /dev/null
curl https://repo.spring.io/libs-release-local/org/springframework/boot/spring-boot-cli/${version}/spring-boot-cli-${version}-homebrew.rb --output spring-boot-cli-${version}-homebrew.rb
rm spring-boot.rb
mv spring-boot-cli-*.rb spring-boot.rb
git config user.name "Spring Buildmaster" > /dev/null
git config user.email "buildmaster@springframework.org" > /dev/null
git add spring-boot.rb > /dev/null
git commit -m "Upgrade to Spring Boot ${version}" > /dev/null
echo "DONE"
popd > /dev/null
fi
| #!/bin/bash
set -e
version=$( cat artifactory-repo/build-info.json | jq -r '.buildInfo.modules[0].id' | sed 's/.*:.*:\(.*\)/\1/' )
git clone homebrew-tap-repo updated-homebrew-tap-repo > /dev/null
if [[ $LATEST_GA = true ]]; then
pushd updated-homebrew-tap-repo > /dev/null
curl https://repo.spring.io/libs-release-local/org/springframework/boot/spring-boot-cli/${version}/spring-boot-cli-${version}-homebrew.rb --output spring-boot-cli-${version}-homebrew.rb
rm spring-boot.rb
mv spring-boot-cli-*.rb spring-boot.rb
git config user.name "Spring Buildmaster" > /dev/null
git config user.email "spring-buildmaster@users.noreply.github.com" > /dev/null
git add spring-boot.rb > /dev/null
git commit -m "Upgrade to Spring Boot ${version}" > /dev/null
echo "DONE"
popd > /dev/null
fi
|
Move to newer style docker "linking" by using container names. | #!/bin/sh
export CKPT_REDIS="${REDIS_PORT_6379_TCP_ADDR}:${REDIS_PORT_6379_TCP_PORT}"
export CKPT_AMQP_URL="amqp://guest:guest@${RABBITMQ_PORT_5672_TCP_ADDR}:${RABBITMQ_PORT_5672_TCP_PORT}"
sleep 5
./backend-services
| #!/bin/sh
export CKPT_REDIS="redis:6379"
export CKPT_AMQP_URL="amqp://guest:guest@rabbitmq:5672"
sleep 5
./backend-services
|
Use sed for indentation to not interpret shell characters | #!/usr/bin/env bash
set -e
# Wait a second so we don't see ephemeral file changes
sleep 1
# Don't tag if there is a dirty working dir
if ! git diff-index --quiet HEAD ; then
echo "Warning there appears to be uncommitted changes in the working directory:"
git diff-index HEAD
echo
echo "Please commit them or stash them before tagging a release."
echo
fi
version=v$(go run ./project/cmd/version/main.go)
notes=$(go run ./project/cmd/notes/main.go)
echo "This command will tag the current commit $(git rev-parse --short HEAD) as version $version"
echo "defined programmatically in project/releases.go with release notes:"
echo
echo "$notes" | xargs -L1 echo "> "
echo
echo "It will then push the version tag to origin."
echo
read -p "Do you want to continue? [Y\n]: " -r
# Just hitting return defaults to continuing
[[ $REPLY ]] && [[ ! $REPLY =~ ^[Yy]$ ]] && echo && exit 0
echo
# Create tag
echo "Tagging version $version with message:"
echo ""
echo "$notes"
echo ""
echo "$notes" | git tag -s -a ${version} -F-
# Push tag
git push origin ${version}
| #!/usr/bin/env bash
set -e
# Wait a second so we don't see ephemeral file changes
sleep 1
# Don't tag if there is a dirty working dir
if ! git diff-index --quiet HEAD ; then
echo "Warning there appears to be uncommitted changes in the working directory:"
git diff-index HEAD
echo
echo "Please commit them or stash them before tagging a release."
echo
fi
version=v$(go run ./project/cmd/version/main.go)
notes=$(go run ./project/cmd/notes/main.go)
echo "This command will tag the current commit $(git rev-parse --short HEAD) as version $version"
echo "defined programmatically in project/releases.go with release notes:"
echo
echo "$notes" | sed 's/^/> /'
echo
echo "It will then push the version tag to origin."
echo
read -p "Do you want to continue? [Y\n]: " -r
# Just hitting return defaults to continuing
[[ $REPLY ]] && [[ ! $REPLY =~ ^[Yy]$ ]] && echo && exit 0
echo
# Create tag
echo "Tagging version $version with message:"
echo ""
echo "$notes"
echo ""
echo "$notes" | git tag -s -a ${version} -F-
# Push tag
git push origin ${version}
|
Add zoom to cask installer | #!/usr/bin/env bash
source ~/dotfiles/setup/header.sh
echo "Installing Homebrew Casks..."
preload_cask_list
# Install essential macOS apps via Cask (in order of essentiality)
install_cask google-chrome
install_cask dropbox
install_cask alfred
install_cask atom
install_cask authy
install_cask evernote
install_cask keyboard-maestro
install_cask flux
# Install additional apps
install_cask appcleaner
install_cask namechanger
install_cask the-unarchiver
install_cask mamp
rm -rf '/Applications/MAMP PRO'
install_cask keybase
install_cask postman
install_cask slack
install_cask vlc
install_cask basictex
echo "Installing Quick Look plugins..."
install_cask qlstephen
install_cask qlmarkdown
install_cask scriptql
install_cask quicklook-json
install_cask betterzip
install_cask suspicious-package
qlmanage -r
| #!/usr/bin/env bash
source ~/dotfiles/setup/header.sh
echo "Installing Homebrew Casks..."
preload_cask_list
# Install essential macOS apps via Cask (in order of essentiality)
install_cask google-chrome
install_cask dropbox
install_cask alfred
install_cask atom
install_cask authy
install_cask evernote
install_cask keyboard-maestro
install_cask flux
# Install additional apps
install_cask appcleaner
install_cask namechanger
install_cask the-unarchiver
install_cask mamp
rm -rf '/Applications/MAMP PRO'
install_cask keybase
install_cask postman
install_cask slack
install_cask zoomus
install_cask vlc
install_cask basictex
echo "Installing Quick Look plugins..."
install_cask qlstephen
install_cask qlmarkdown
install_cask scriptql
install_cask quicklook-json
install_cask betterzip
install_cask suspicious-package
qlmanage -r
|
Add return codes to 'new_alias()' function |
function new_alias() {
local scope="$1"
local alias_name="$2"
local alias_command="$3"
if [ $# = 3 ]; then
printf "%s\n" "New command: ${YELLOW}git ${BOLD}${alias_name}${NORMAL}"
git config $scope alias.${alias_name} "${alias_command}"
else
printf "%s\n" "New alias: scope, name or command not found"
fi
}
function set_git_attribute() {
local attribute="$1"
local scope="$2"
local value="$3"
if [ $# = 3 ]; then
git config $scope $attribute "$value"
return $?;
else
printf "${RED}%s${NORMAL}\n" "set_git_attribute: scope or $attribute not found"
return 1;
fi
}
function get_git_attribute() {
local attribute="$1"
local scope="$2"
git config $scope --get $attribute
}
|
function new_alias() {
local scope="$1"
local alias_name="$2"
local alias_command="$3"
if [ $# = 3 ]; then
printf "%s\n" "New command: ${YELLOW}git ${BOLD}${alias_name}${NORMAL}"
git config $scope alias.${alias_name} "${alias_command}"
return $?;
else
printf "%s\n" "New alias: scope, name or command not found"
return 1;
fi
}
function set_git_attribute() {
local attribute="$1"
local scope="$2"
local value="$3"
if [ $# = 3 ]; then
git config $scope $attribute "$value"
return $?;
else
printf "${RED}%s${NORMAL}\n" "set_git_attribute: scope or $attribute not found"
return 1;
fi
}
function get_git_attribute() {
local attribute="$1"
local scope="$2"
git config $scope --get $attribute
}
|
Add switch for nginx-stg and nginx-prd | #!/bin/bash
set -e
# If needed first:
# kubectl delete deployment,service,rs,pvc --all
kubectl delete deployment,service,rs --all
# Create service credentials as a secret
kubectl delete secret service-credentials
kubectl create secret generic service-credentials --from-env-file=../kubernetes/secrets/service-credentials.txt
# Processes everything in the kubernetes folder:
# - Create the shared persistent volume
# - Create the deployment replication controllers for NGINX and PHP-FPM
# - Create the services for the NGINX and PHP-FPM deployment
kubectl apply -f ../kubernetes
# Confirm everything looks good
kubectl describe deployment php-fpm-stg
kubectl describe service php-fpm-stg
kubectl describe deployment nginx-stg
kubectl describe service nginx-stg
kubectl describe deployment php-fpm-prd
kubectl describe service php-fpm-prd
kubectl describe deployment nginx-prd
kubectl describe service nginx-prd
| #!/bin/bash
set -e
# If needed first:
# kubectl delete deployment,service,rs,pvc --all
kubectl delete deployment,service,rs --all
# Create service credentials as a secret
kubectl delete secret service-credentials
kubectl create secret generic service-credentials --from-env-file=../kubernetes/secrets/service-credentials.txt
# Processes everything in the kubernetes folder:
# - Create the shared persistent volume
# - Create the deployment replication controllers for NGINX and PHP-FPM
# - Create the services for the NGINX and PHP-FPM deployment
# kubectl apply -f ../kubernetes
kubectl apply -f ../kubernetes/persistent-volumes.yaml
kubectl apply -f ../kubernetes/php-cli.yaml
kubectl apply -f ../kubernetes/php-fpm-stg.yaml
# kubectl apply -f ../kubernetes/nginx-stg.yaml
kubectl apply -f ../kubernetes/php-fpm-prd.yaml
kubectl apply -f ../kubernetes/nginx-prd.yaml
# Confirm everything looks good
kubectl describe deployment php-fpm-stg
kubectl describe service php-fpm-stg
kubectl describe deployment nginx-stg
kubectl describe service nginx-stg
kubectl describe deployment php-fpm-prd
kubectl describe service php-fpm-prd
kubectl describe deployment nginx-prd
kubectl describe service nginx-prd
|
Add joblib as a dependency in the environment. | #!/bin/bash
function safe_call {
# usage:
# safe_call function param1 param2 ...
HERE=$(pwd)
"$@"
cd "$HERE"
}
function install_theano {
pip install --upgrade --no-deps git+git://github.com/Theano/Theano.git
}
function install_pylearn2 {
DIR="$1"
cd "$DIR"
git clone -b parameter_prediction git@bitbucket.org:mdenil/pylearn2.git
cd pylearn2
python setup.py install
}
ENV=pp_env
EXTERNAL=external
rm -rf $ENV
conda create --yes --prefix pp_env accelerate pip nose
source activate "$(pwd)/$ENV"
safe_call install_theano
safe_call install_pylearn2 "$EXTERNAL"
cat <<EOF
Run:
source activate "$(pwd)/$ENV"
to activate the environment. When you're done you can run
source deactivate
to close the environement.
EOF
| #!/bin/bash
function safe_call {
# usage:
# safe_call function param1 param2 ...
HERE=$(pwd)
"$@"
cd "$HERE"
}
function install_theano {
pip install --upgrade --no-deps git+git://github.com/Theano/Theano.git
}
function install_joblib {
pip install joblib
}
function install_pylearn2 {
DIR="$1"
cd "$DIR"
git clone -b parameter_prediction git@bitbucket.org:mdenil/pylearn2.git
cd pylearn2
python setup.py install
}
ENV=pp_env
EXTERNAL=external
rm -rf $ENV
conda create --yes --prefix pp_env accelerate pip nose
source activate "$(pwd)/$ENV"
safe_call install_theano
safe_call install_joblib
safe_call install_pylearn2 "$EXTERNAL"
cat <<EOF
Run:
source activate "$(pwd)/$ENV"
to activate the environment. When you're done you can run
source deactivate
to close the environement.
EOF
|
Add git pull before running a random benchmark. | #!/bin/bash -l
benchmarks=($(ls $1*.sh))
n_benchmarks=${#benchmarks[*]}
selected_benchmark=${benchmarks[$((RANDOM%n_benchmarks))]}
[ -z "$(squeue -u $USER -o '\%35j' | grep ${selected_benchmark%.sh})" ] && sbatch $selected_benchmark
| #!/bin/bash -l
module load git
git pull
benchmarks=($(ls $1*.sh))
n_benchmarks=${#benchmarks[*]}
selected_benchmark=${benchmarks[$((RANDOM%n_benchmarks))]}
[ -z "$(squeue -u $USER -o '\%35j' | grep ${selected_benchmark%.sh})" ] && sbatch $selected_benchmark
|
Build script bails out early. | #!/bin/sh
cargo build --target=thumbv7em-none-eabi
echo "Converting elf -> bin"
arm-none-eabi-objcopy -O binary ./target/thumbv7em-none-eabi/debug/bare-metal-arm-rust ./target/thumbv7em-none-eabi/debug/bare-metal-arm-rust.bin
ls -l ./target/thumbv7em-none-eabi/debug/bare-metal-arm-rust* | #!/bin/sh
set -e
cargo build --target=thumbv7em-none-eabi
echo "Converting elf -> bin"
arm-none-eabi-objcopy -O binary ./target/thumbv7em-none-eabi/debug/bare-metal-arm-rust ./target/thumbv7em-none-eabi/debug/bare-metal-arm-rust.bin
ls -l ./target/thumbv7em-none-eabi/debug/bare-metal-arm-rust*
|
Check that we have exactly one argument. | #!/usr/bin/env bash
function chkcmd {
which $1 >/dev/null
if [ $? -ne 0 ];then
echo "Program '$1' not found."
exit 1
fi
}
chkcmd "phantomjs"
chkcmd "convert"
SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
exampleUrl=$1
echo $SCRIPTDIR
echo $exampleUrl
# take screenshots - requires phantomjs
phantomjs "$SCRIPTDIR/screenshots.js" $exampleUrl $SCRIPTDIR
# resize screenshots - requires imagemagick
for THUMB in $(find "$SCRIPTDIR/examples" | grep thumb.png)
do
convert -resize 118X90 $THUMB $THUMB
done
| #!/usr/bin/env bash
function die () {
echo >&2 "$@"
exit 1
}
function chkcmd {
which $1 >/dev/null
if [ $? -ne 0 ];then
die "Program '$1' not found."
fi
}
chkcmd "phantomjs"
chkcmd "convert"
# check that we have exactly one argument
[ "$#" -eq 1 ] || die "1 argument required, $# provided"
echo $SCRIPTDIR
echo $exampleUrl
# take screenshots - requires phantomjs
phantomjs "$SCRIPTDIR/screenshots.js" $exampleUrl $SCRIPTDIR
# resize screenshots - requires imagemagick
for THUMB in $(find "$SCRIPTDIR/examples" | grep thumb.png)
do
convert -resize 118X90 $THUMB $THUMB
done
|
Create the xapian index on the fly. | #!/bin/bash
cd /PubMed2Go/
# If /export/ is mounted, export_user_files file moving all data to /export/
# symlinks will point from the original location to the new path under /export/
# If /export/ is not given, nothing will happen in that step
python ./export_user_files.py $PG_DATA_DIR_DEFAULT
service postgresql start
if [ ! -f /export/.pubmed2go_save ]; then
python PubMedParser.py -i /export/import_data/ -d pubmed -p 4
fi
touch /export/.pubmed2go_save
tail -f /var/log/postgresql/postgresql-9.1-main.log
| #!/bin/bash
cd /PubMed2Go/
# If /export/ is mounted, export_user_files file moving all data to /export/
# symlinks will point from the original location to the new path under /export/
# If /export/ is not given, nothing will happen in that step
python ./export_user_files.py $PG_DATA_DIR_DEFAULT
service postgresql start
if [ ! -f /export/.pubmed2go_save ]; then
python PubMedParser.py -i /export/import_data/ -d pubmed -p 4
cd full_text_index
python RunXapian.py --xapian_database_path /export/ --index --db_psql pubmed --no_search
fi
touch /export/.pubmed2go_save
tail -f /var/log/postgresql/postgresql-9.1-main.log
|
Purge only "*.dat" files which are safely stored cache files | #!/bin/bash
#
# Removes CHI cache files older than 3 (or $ARGV[1]) days
#
set -e
PWD="$(cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
MC_ROOT="$PWD/../"
S3_CACHE_ROOT="$MC_ROOT/data/cache/s3_downloads/"
S3_CACHE_DEFAULT_DIR="$S3_CACHE_ROOT/Default/"
if [ ! -z "$1" ]; then
if [[ ! "$1" =~ ^-?[0-9]+$ ]]; then
echo "Max. age in days is not an integer."
exit 1
fi
MAX_AGE_DAYS="$1"
else
MAX_AGE_DAYS="3"
fi
#
# ---
#
if [ ! -d "$S3_CACHE_DEFAULT_DIR" ]; then
echo "S3 cache 'Default' directory does not exist at: $S3_CACHE_DEFAULT_DIR"
exit 1
fi
# Verify that the directory has the "0", "1", "2", ..., "e", "f" directory structure
if [ ! -d "$S3_CACHE_DEFAULT_DIR/0" ]; then
echo "S3 cache 'Default' directory doesn't look like it contains CHI cache: $S3_CACHE_DEFAULT_DIR"
exit 1
fi
find "$S3_CACHE_DEFAULT_DIR" -type f -mtime "+$MAX_AGE_DAYS" -exec rm {} \;
exit 0
| #!/bin/bash
#
# Removes CHI cache files older than 3 (or $ARGV[1]) days
#
set -e
PWD="$(cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
MC_ROOT="$PWD/../"
S3_CACHE_ROOT="$MC_ROOT/data/cache/s3_downloads/"
S3_CACHE_DEFAULT_DIR="$S3_CACHE_ROOT/Default/"
if [ ! -z "$1" ]; then
if [[ ! "$1" =~ ^-?[0-9]+$ ]]; then
echo "Max. age in days is not an integer."
exit 1
fi
MAX_AGE_DAYS="$1"
else
MAX_AGE_DAYS="3"
fi
#
# ---
#
if [ ! -d "$S3_CACHE_DEFAULT_DIR" ]; then
echo "S3 cache 'Default' directory does not exist at: $S3_CACHE_DEFAULT_DIR"
exit 1
fi
# Verify that the directory has the "0", "1", "2", ..., "e", "f" directory structure
if [ ! -d "$S3_CACHE_DEFAULT_DIR/0" ]; then
echo "S3 cache 'Default' directory doesn't look like it contains CHI cache: $S3_CACHE_DEFAULT_DIR"
exit 1
fi
find "$S3_CACHE_DEFAULT_DIR" -name "*.dat" -type f -mtime "+$MAX_AGE_DAYS" -exec rm {} \;
exit 0
|
Add sbt to $PATH lazily | # To root everything else.
SCALA_ROOT=$HOME/scala
# Enable tools (like vim) to find support files.
export SCALA_DIST=$SCALA_ROOT/scala-dist
# Actual scala installation.
export SCALA_HOME=$SCALA_ROOT/local
# Get scala.
add_to_path $SCALA_HOME/bin
# Get sbt.
add_to_path $SCALA_ROOT/sbt/bin
| # To root everything else.
SCALA_ROOT=$HOME/scala
# Enable tools (like vim) to find support files.
export SCALA_DIST=$SCALA_ROOT/scala-dist
# Actual scala installation.
export SCALA_HOME=$SCALA_ROOT/local
# Get scala.
add_to_path $SCALA_HOME/bin
# Get sbt (lazily).
sbt () {
add_to_path $SCALA_ROOT/sbt/bin
unset -f sbt
command sbt "$@"
}
|
Replace Javascript module to get libcore by shell script | #!/usr/bin/env bash
function match {
if [[ "$OSTYPE" == "darwin"* ]]; then
sed -nE $*
else
sed -nr $*
fi
}
LIB_VERSION_MAJOR=`cat CMakeLists.txt | match 's/set\(VERSION_MAJOR.*([0-9]+).*\)/\1/p'`
LIB_VERSION_MINOR=`cat CMakeLists.txt | match 's/set\(VERSION_MINOR.*([0-9]+).*\)/\1/p'`
LIB_VERSION_PATCH=`cat CMakeLists.txt | match 's/set\(VERSION_PATCH.*([0-9]+).*\)/\1/p'`
LIB_VERSION=$LIB_VERSION_MAJOR.$LIB_VERSION_MINOR.$LIB_VERSION_PATCH
echo "export LIB_VERSION=$LIB_VERSION" >> $BASH_ENV
echo "=====> Libcore version"
echo $LIB_VERSION | #!/usr/bin/env bash
function match {
if [[ "$OSTYPE" == "darwin"* ]]; then
sed -nE $*
else
sed -nr $*
fi
}
LIB_VERSION_MAJOR=`cat CMakeLists.txt | match 's/set\(VERSION_MAJOR.*([0-9]+).*\)/\1/p'`
LIB_VERSION_MINOR=`cat CMakeLists.txt | match 's/set\(VERSION_MINOR.*([0-9]+).*\)/\1/p'`
LIB_VERSION_PATCH=`cat CMakeLists.txt | match 's/set\(VERSION_PATCH.*([0-9]+).*\)/\1/p'`
LIB_VERSION=$LIB_VERSION_MAJOR.$LIB_VERSION_MINOR.$LIB_VERSION_PATCH
if [ -z "$CIRCLE_TAG" ]; then
COMMIT_HASH=`echo $CIRCLE_SHA1 | cut -c 1-6`
LIB_VERSION="$LIB_VERSION-rc-$COMMIT_HASH"
fi
echo "export LIB_VERSION=$LIB_VERSION" >> $BASH_ENV
echo "=====> Libcore version"
echo $LIB_VERSION |
Update the build directory in VC++ script. | #!/bin/sh
# This file is part of C++-Builtem <http://github.com/ufal/cpp_builtem/>.
#
# Copyright 2014 Institute of Formal and Applied Linguistics, Faculty of
# Mathematics and Physics, Charles University in Prague, Czech Republic.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Compile cl_deps.exe using 32-bit compiler if needed
[ -f .objs/cl_deps.exe ] || ${0%-64.sh}-32.sh .objs/cl_deps.exe
wine cmd /c Z:/`readlink -f $0`.bat PLATFORM=win-vs BITS=64 "$@"
| #!/bin/sh
# This file is part of C++-Builtem <http://github.com/ufal/cpp_builtem/>.
#
# Copyright 2014 Institute of Formal and Applied Linguistics, Faculty of
# Mathematics and Physics, Charles University in Prague, Czech Republic.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Compile cl_deps.exe using 32-bit compiler if needed
[ -f .build/cl_deps.exe ] || ${0%-64.sh}-32.sh .build/cl_deps.exe
wine cmd /c Z:/`readlink -f $0`.bat PLATFORM=win-vs BITS=64 "$@"
|
Adjust key path for ha-proxy access | #!/bin/bash -e
export KEY_FILE=private-config/environments/softlayer/concourse/ha-maintenance.key
printf "Downloading HAProxy config... \n"
ssh -oStrictHostKeyChecking=no -i ${KEY_FILE} ha-maintenance@${HAPROXY_IP} 'wget https://raw.githubusercontent.com/cloudfoundry-incubator/bits-service-ci/master/docs/haproxy.cfg --output-document=/etc/haproxy/haproxy.cfg'
printf "Restarting HA proxy with latest config \n"
ssh -oStrictHostKeyChecking=no -i ${KEY_FILE} ha-maintenance@${HAPROXY_IP} 'service haproxy restart'
STAUTS_OK=$(curl -s http://flintstone.ci.cf-app.com/ -L -I | grep 200 | wc -l)
if [ ${STAUTS_OK} -eq 1 ]
then
printf "Update woz successful\n"
exit 0
else
printf "Update wozn't successful\n"
printf "Verify with: 'curl -s http://flintstone.ci.cf-app.com/ -L -I | grep 200 | wc -l'\n"
exit 1
fi | #!/bin/bash -e
export KEY_FILE=../../../private-config/environments/softlayer/concourse/ha-maintenance.key
chmod 600 ${KEY_FILE}
printf "Downloading HAProxy config... \n"
ssh -oStrictHostKeyChecking=no -i ${KEY_FILE} ha-maintenance@${HAPROXY_IP} 'wget https://raw.githubusercontent.com/cloudfoundry-incubator/bits-service-ci/master/docs/haproxy.cfg --output-document=/etc/haproxy/haproxy.cfg'
printf "Restarting HA proxy with latest config \n"
ssh -oStrictHostKeyChecking=no -i ${KEY_FILE} ha-maintenance@${HAPROXY_IP} 'service haproxy restart'
STAUTS_OK=$(curl -s http://flintstone.ci.cf-app.com/ -L -I | grep 200 | wc -l)
if [ ${STAUTS_OK} -eq 1 ]
then
printf "Update woz successful\n"
exit 0
else
printf "Update wozn't successful\n"
printf "Verify with: 'curl -s http://flintstone.ci.cf-app.com/ -L -I | grep 200 | wc -l'\n"
exit 1
fi
|
Add check to prevent root backups. | #!/bin/sh
set -x
exec sudo \
ionice -c 3 \
nice rsync \
-vrtpAXlHogS \
--progress \
--delete-before \
--exclude=temporary/ \
"$@" -- /media/archive/* "/media/${1:-extern}"
| #!/bin/sh
set -x
readonly backup_dir="/media/${1:-extern}"
if [[ ! -d $backup_dir ]]; then
echo >&2 "No such directory: $backup_dir"
exit 1
fi
exec sudo \
ionice -c 3 \
nice rsync \
-vrtpAXlHogS \
--progress \
--delete-before \
--exclude=temporary/ \
"$@" -- /media/archive/* "$backup_dir"
|
Use $(..) instead of legacy `..` | #!/bin/bash
# $1 is the service which sent the request
if [ "$1" != "mp" ]
then
#stop mopidy
mpc stop
fi
AIRPLAY=`netstat -atunp | grep ESTABLISHED | grep 5002`
UPNP=`netstat -atunp | grep gmediarender | grep "CLOSE_WAIT\|ESTABLISHED"`
if [ "$1" != "gm" ]
then
# check gmediarender state with: netstat -atunp | grep gmediarender | grep "CLOSE_WAIT\|ESTABLISHED"
if [ "$UPNP" ]
then
/etc/init.d/gmediarenderer stop
killall -9 gmediarender
#stop gmediarender (a bit rude, but I don't know how else...)
/etc/init.d/gmediarenderer start
fi
fi
if [ "$1" != "sp" ]
then
# check airplay state with: netstat -atunp | grep ESTABLISHED | grep 5002
if [ "$AIRPLAY" ]
then
killall -9 shairport
#stop shairport (a bit rude, but I don't know how else...)
/etc/init.d/shairportinit restart
fi
fi
| #!/bin/bash
# $1 is the service which sent the request
if [ "$1" != "mp" ]
then
#stop mopidy
mpc stop
fi
AIRPLAY=$(netstat -atunp | grep ESTABLISHED | grep 5002)
UPNP=$(netstat -atunp | grep gmediarender | grep "CLOSE_WAIT\|ESTABLISHED")
if [ "$1" != "gm" ]
then
# check gmediarender state with: netstat -atunp | grep gmediarender | grep "CLOSE_WAIT\|ESTABLISHED"
if [ "$UPNP" ]
then
/etc/init.d/gmediarenderer stop
killall -9 gmediarender
#stop gmediarender (a bit rude, but I don't know how else...)
/etc/init.d/gmediarenderer start
fi
fi
if [ "$1" != "sp" ]
then
# check airplay state with: netstat -atunp | grep ESTABLISHED | grep 5002
if [ "$AIRPLAY" ]
then
killall -9 shairport
#stop shairport (a bit rude, but I don't know how else...)
/etc/init.d/shairportinit restart
fi
fi
|
Fix a2ensite for travis build | #!/bin/sh
echo "---> Starting $(tput bold ; tput setaf 2)virtual host creation$(tput sgr0)"
sudo cp -f .travis/apache2/www_simplemappr_local.conf /etc/apache2/sites-available/www_simplemappr_local.conf
sudo sed -e "s?%TRAVIS_BUILD_DIR%?$(pwd)?g" --in-place /etc/apache2/sites-available/www_simplemappr_local.conf
sudo a2ensite "www.simplemappr.local"
sudo cp -f .travis/apache2/img_simplemappr_local.conf /etc/apache2/sites-available/img_simplemappr_local.conf
sudo sed -e "s?%TRAVIS_BUILD_DIR%?$(pwd)?g" --in-place /etc/apache2/sites-available/img_simplemappr_local.conf
sudo a2ensite "img.simplemappr.local"
sudo a2enmod actions rewrite expires headers
sudo touch /usr/lib/cgi-bin/php5.fcgi | #!/bin/sh
echo "---> Starting $(tput bold ; tput setaf 2)virtual host creation$(tput sgr0)"
sudo cp -f .travis/apache2/www_simplemappr_local.conf /etc/apache2/sites-available/www_simplemappr_local.conf
sudo sed -e "s?%TRAVIS_BUILD_DIR%?$(pwd)?g" --in-place /etc/apache2/sites-available/www_simplemappr_local.conf
sudo a2ensite www_simplemappr_local.conf
sudo cp -f .travis/apache2/img_simplemappr_local.conf /etc/apache2/sites-available/img_simplemappr_local.conf
sudo sed -e "s?%TRAVIS_BUILD_DIR%?$(pwd)?g" --in-place /etc/apache2/sites-available/img_simplemappr_local.conf
sudo a2ensite www_simplemappr_local.conf
sudo a2enmod actions rewrite expires headers
sudo touch /usr/lib/cgi-bin/php5.fcgi |
Change PIP to require virtualenv. | # Python
export PYTHONSTARTUP=$HOME/dot-files-forest/dotfiles/pythonstartup.py
# See http://www.doughellmann.com/docs/virtualenvwrapper/
export WORKON_HOME=$HOME/.virtualenvs
export PROJECT_HOME=$HOME/Projects
export PIP_VIRTUALENV_BASE=$WORKON_HOME
export PIP_REQUIRE_VIRTUALENV=true
export PIP_RESPECT_VIRTUALENV=true
export VIRTUALENV_USE_DISTRIBUTE=True
export VIRTUALENVWRAPPER_VIRTUALENV_ARGS=--no-site-packages
if [[ "$OS_IS_POSIX" = true ]]; then
antigen bundle virtualenvwrapper
# create virtualenv py if necessary
lsvirtualenv -b |grep py > /dev/null 2>&1
if [ $? -eq 1 ]; then
mkvirtualenv py
fi
fi
| # Python
export PYTHONSTARTUP=$HOME/dot-files-forest/dotfiles/pythonstartup.py
# See http://www.doughellmann.com/docs/virtualenvwrapper/
export WORKON_HOME=$HOME/.virtualenvs
export PROJECT_HOME=$HOME/Projects
export PIP_VIRTUALENV_BASE=$WORKON_HOME
export PIP_REQUIRE_VIRTUALENV=false
export PIP_RESPECT_VIRTUALENV=true
export VIRTUALENV_USE_DISTRIBUTE=True
export VIRTUALENVWRAPPER_VIRTUALENV_ARGS=--no-site-packages
if [[ "$OS_IS_POSIX" = true ]]; then
antigen bundle virtualenvwrapper
# create virtualenv py if necessary
lsvirtualenv -b |grep py > /dev/null 2>&1
if [ $? -eq 1 ]; then
mkvirtualenv py
fi
fi
|
Hide output of git push. | #!/bin/bash
git config user.name "Travis CI"
git config user.email "travis@travis-ci.com"
npm version patch -m "Bump version to %s. [ci skip]"
npm run release
git push "https://$GH_TOKEN:x-oauth-basic@github.com/shockone/black-screen.git" master:master
git push "https://$GH_TOKEN:x-oauth-basic@github.com/shockone/black-screen.git" --tags
| #!/bin/bash
git config user.name "Travis CI"
git config user.email "travis@travis-ci.com"
npm version patch -m "Bump version to %s. [ci skip]"
npm run release
git push --quiet "https://$GH_TOKEN:x-oauth-basic@github.com/shockone/black-screen.git" master:master > /dev/null 2>&1
git push --quiet "https://$GH_TOKEN:x-oauth-basic@github.com/shockone/black-screen.git" --tags > /dev/null 2>&1
|
Add path to ImageMagick install accessible to compute nodes | #!/usr/bin/env bash
#PBS -l nodes=1:ppn=1
#PBS -l walltime=4:00:00
#PBS -N ILAMB.$PBS_O_LOGNAME
#
# ILAMB execution script adapted for the `torque` queue
# manager. Submit this script with:
#
# $ qsub run_ilamb.sh
#
# ILAMB needs the ILAMB_ROOT var.
export ILAMB_ROOT=/scratch/pbs/ilamb
echo $ILAMB_ROOT
# NCL needs the NCARG_ROOT var, as well as the path to its executable.
export NCARG_ROOT=/home/csdms/tools/ncl
PATH=$NCARG_ROOT/bin:$PATH
# Define model simulation type, CLM or CMIP5.
export MODELTYPE=CMIP5
# Define spatial resolution for diagnostics, 0.5x0.5, 1x1 or 2.5x2.5.
export SPATRES=0.5x0.5
# Define plot file type, i.e., eps, gif, pdf, png, ps.
export PLOTTYPE=png
cd $PBS_O_WORKDIR
date
ncl -n main_ncl_code.ncl
date
| #!/usr/bin/env bash
#PBS -l nodes=1:ppn=1
#PBS -l walltime=4:00:00
#PBS -N ILAMB.$PBS_O_LOGNAME
#
# ILAMB execution script adapted to run on `beach` through its queue
# manager. Submit this script with:
#
# $ qsub run_ilamb.sh
#
# Configure ILAMB dependencies and paths.
tools_dir=/home/csdms/tools
export NCARG_ROOT=$tools_dir/ncl
PATH=$NCARG_ROOT/bin:$tools_dir/ImageMagick/bin:$PATH
export ILAMB_ROOT=/scratch/pbs/ilamb
# Define model simulation type, CLM or CMIP5.
export MODELTYPE=CMIP5
# Define spatial resolution for diagnostics, 0.5x0.5, 1x1 or 2.5x2.5.
export SPATRES=0.5x0.5
# Define plot file type, i.e., eps, gif, pdf, png, ps.
export PLOTTYPE=png
cd $PBS_O_WORKDIR
echo $ILAMB_ROOT
date
ncl -n main_ncl_code.ncl
date
|
Update the unproxied Swarm cluster setup script | #!/bin/sh -e
source $(git rev-parse --show-toplevel)/quartet/scripts/defaults.sh
head_node="${MACHINE_NAME_PREFIX}-1"
swarm_dicovery_token=$(docker-swarm create)
swarm_flags="--swarm --swarm-discovery=token://${swarm_dicovery_token}"
for i in '1' '2' '3'; do
if [ ${i} = '1' ]; then
create_machine_with_simple_weave_setup \
"${MACHINE_NAME_PREFIX}" "${i}" "--swarm-master ${swarm_flags}"
else
create_machine_with_simple_weave_setup \
"${MACHINE_NAME_PREFIX}" "${i}" "${swarm_flags}"
connect_to=$($DOCKER_MACHINE ip "${MACHINE_NAME_PREFIX}-${i}")
with_machine_env ${head_node} $WEAVE connect ${connect_to}
fi
done
| #!/bin/sh -e
source $(git rev-parse --show-toplevel)/quartet/scripts/defaults.sh
head_node="${MACHINE_NAME_PREFIX}-1"
swarm_dicovery_token=$(docker-swarm create)
swarm_flags="--swarm --swarm-discovery=token://${swarm_dicovery_token}"
for i in '1' '2' '3'; do
if [ ${i} = '1' ]; then
$DOCKER_MACHINE_CREATE \
${swarm_flags} \
--swarm-master \
"${MACHINE_NAME_PREFIX}-${i}"
else
$DOCKER_MACHINE_CREATE \
${swarm_flags} \
"${MACHINE_NAME_PREFIX}-${i}"
fi
export DOCKER_CLIENT_ARGS="$(${DOCKER_MACHINE} config)"
$WEAVE launch
$WEAVE launch-dns "10.9.1.${i}/24" -debug
if [ ${i} -gt '1' ]; then
$WEAVE connect $(DOCKER_MACHINE ip ${head_node})
fi
unset DOCKER_CLIENT_ARGS
done
|
Add volta and homebrew to path | CASE_SENSITIVE="true"
ZSH_THEME="farmber"
DISABLE_UNTRACKED_FILES_DIRTY="true"
export PATH=$HOME/bin:$PATH
if [[ -n $SSH_CONNECTION ]]; then
export EDITOR='vim'
else
export EDITOR='code -w'
fi
| CASE_SENSITIVE="true"
ZSH_THEME="farmber"
DISABLE_UNTRACKED_FILES_DIRTY="true"
export VOLTA_HOME="$HOME/.volta"
export PATH="$HOME/bin:$VOLTA_HOME/bin:/opt/homebrew/bin:$PATH"
if [[ -n $SSH_CONNECTION ]]; then
export EDITOR='vim'
else
export EDITOR='code -w'
fi
|
Enable GTest brief output on CI builds. | #!/bin/bash
set -ex
if [[ "${CXX}" == clang* ]]
then
export CXXFLAGS="-stdlib=libc++"
fi
mkdir build && cd build
cmake -DCMAKE_BUILD_TYPE=Release -DBUILD_DEMOS=ON -DBUILD_SCHEMA_TESTS=ON ..
make
make unittest
| #!/bin/bash
set -ex
if [[ "${CXX}" == clang* ]]
then
export CXXFLAGS="-stdlib=libc++"
fi
export GTEST_BRIEF=1
mkdir build && cd build
cmake -DCMAKE_BUILD_TYPE=Release -DBUILD_DEMOS=ON -DBUILD_SCHEMA_TESTS=ON ..
make
make unittest
|
Add EPICS env vars necessary to install pcaspy | #!/bin/bash
# Install Pcaspy and PyEpics
# Run with sudo -HE
sudo apt-get install -y python3-pip swig
sudo -HE pip3 install pyepics==3.3.3
sudo -HE pip3 install pcaspy==0.7.2
exit 0
| #!/bin/bash
# Install Pcaspy and PyEpics
# Run with sudo -HE
sudo apt-get install -y python3-pip swig
export EPICS_BASE=/opt/epics/base
export EPICS_HOST_ARCH=linux-x86_64
sudo -HE pip3 install pyepics==3.3.3
sudo -HE pip3 install pcaspy==0.7.2
exit 0
|
Fix docker build scripts when scripts moved to scripts/ | #!/bin/bash
# Helper script to build "jp" on all supported platforms.
# This script uses docker so you don't have to have the cross
# platform golang environment setup. Just make sure you have docker
# installed. The built executables will be in build/
docker run --rm -v "$PWD:/go/src/jp" -w /go/src/jp golang:1.4.2-cross ./build-all-platforms.sh
| #!/bin/bash
# Helper script to build "jp" on all supported platforms.
# This script uses docker so you don't have to have the cross
# platform golang environment setup. Just make sure you have docker
# installed. The built executables will be in build/
docker run --rm -v "$PWD:/go/src/jp" -w /go/src/jp golang:1.4.2-cross scripts/build-all-platforms.sh
|
Remove call to brew bundle | #!/bin/sh
#
# Homebrew
#
# This installs some of the common dependencies needed (or at least desired)
# using Homebrew.
# If we're on a Mac lets set our defaults
if [ "$(uname -s)" == "Darwin" ]
then
# Check for Homebrew
if test ! $(which brew)
then
echo " Installing Homebrew for you."
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" > /tmp/homebrew-install.log
else
# Upgrade homebrew
brew update
fi
brew bundle $HOME/.dotfiles/homebrew/Brewfile
exit 0
fi
| #!/bin/sh
#
# Homebrew
#
# This installs some of the common dependencies needed (or at least desired)
# using Homebrew.
# If we're on a Mac lets set our defaults
if [ "$(uname -s)" == "Darwin" ]
then
# Check for Homebrew
if test ! $(which brew)
then
echo " Installing Homebrew for you."
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" > /tmp/homebrew-install.log
else
# Upgrade homebrew
brew update
fi
exit 0
fi
|
Add skk function to zsh for skim preview | # exa or ls
if type exa > /dev/null; then
alias ll="exa -lgm --group-directories-first -s modified $argv"
alias ls="exa $argv"
fi
# vim or nvim
if type nvim > /dev/null; then
alias vim="nvim"
alias vi="nvim"
fi
# Docker
if type docker-compose > /dev/null; then
alias dc='docker-compose'
fi
# Emacs
# if type emacs > /dev/null; then
# alias emacs='emacs -nw'
# fi
if [ "$(uname 2> /dev/null)" = "Darwin" ]; then
function man-preview() {
man -t "$@" | open -f -a Preview
}
function quick-look() {
(( $# > 0 )) && qlmanage -p $* &>/dev/null &
}
# Man page to Preview
alias manp='man-preview'
# Show/hide hidden files in the Finder
alias showfiles="defaults write com.apple.finder AppleShowAllFiles -bool true && killall Finder"
alias hidefiles="defaults write com.apple.finder AppleShowAllFiles -bool false && killall Finder"
fi
| # exa or ls
if type exa > /dev/null; then
alias ll="exa -lgm --group-directories-first -s modified $argv"
alias ls="exa $argv"
fi
# vim or nvim
if type nvim > /dev/null; then
alias vim="nvim"
alias vi="nvim"
fi
# Docker
if type docker-compose > /dev/null; then
alias dc='docker-compose'
fi
# Emacs
# if type emacs > /dev/null; then
# alias emacs='emacs -nw'
# fi
if [ "$(uname 2> /dev/null)" = "Darwin" ]; then
function man-preview() {
man -t "$@" | open -f -a Preview
}
function quick-look() {
(( $# > 0 )) && qlmanage -p $* &>/dev/null &
}
# Man page to Preview
alias manp='man-preview'
# Show/hide hidden files in the Finder
alias showfiles="defaults write com.apple.finder AppleShowAllFiles -bool true && killall Finder"
alias hidefiles="defaults write com.apple.finder AppleShowAllFiles -bool false && killall Finder"
fi
# skim with preview
function skk() {
if [ -n "$1" ]
then
sk --ansi -i -c "rg --line-number --column --color \"always\" $1" --preview "$FZF_PREVIEW_SH {}"
fi
}
|
Improve the mirror sync tool | #!/bin/bash
git clone 'git@github.com:/moverest/cshellgame'
cd cshellgame || exit
git remote add forge 'https://forge.telecomnancy.univ-lorraine.fr/git/martine96u_c'
git push --mirror forge
cd ..
rm -rf cshellgame
| #!/bin/bash
function init {
git remote add mirror 'https://forge.telecomnancy.univ-lorraine.fr/git/martine96u_c'
}
function sync {
git push --mirror mirror
}
function is_init {
git remote | grep mirror > /dev/null
}
case "$1" in
init )
init
;;
sync )
sync
;;
is_init )
is_init && echo initialised || echo not initialised && false
;;
* )
is_init || init
sync
;;
esac
|
Fix mac test shell script | DYLD_LIBRARY_PATH=~/Downloads/instantclient_11_2-2 ../xcodebuild/ReleaseIA32/dart oracledart_sync_extension_test.dart
| #!/bin/bash
DYLD_LIBRARY_PATH=~/Downloads/instantclient_11_2-2 ../xcodebuild/ReleaseIA32/dart oracledart_sync_extension_test.dart
|
Make the compilation status clear. | #!/bin/bash
function check_availability() {
binary=$1
which $binary 2>&1 > /dev/null
if [ $? -ne 0 ]; then
echo "$binary could not be found in PATH"
exit 1
fi
}
src="Criteo.Profiling.Tracing"
tests="Criteo.Profiling.Tracing.UTest"
check_availability "dotnet"
dotnet restore
dotnet build $src
dotnet build $tests
dotnet test $tests
| #!/bin/bash
function check_availability() {
binary=$1
which $binary 2>&1 > /dev/null
if [ $? -ne 0 ]; then
echo "$binary could not be found in PATH"
exit 1
fi
}
src="Criteo.Profiling.Tracing"
tests="Criteo.Profiling.Tracing.UTest"
check_availability "dotnet"
dotnet restore \
&& dotnet build $src \
&& dotnet build $tests \
&& dotnet test $tests \
|
Change the hashbang of hivetool to the last working Python binary. | #!/bin/sh
PATH=$PATH:/usr/local/bin
for binary in python python2 python2.0 python2.1 python2.2 python2.3 python2.4 python2.5; do
if $binary -c "" 2>/dev/null; then
$binary - <<EOF
import sys
sitedirs = filter(lambda s: s.endswith("site-packages"), sys.path)
if len(sitedirs) < 1:
sys.exit("Unable to find a site-packages directory in sys.path")
filename = sitedirs[0] + "/hiveconf.pth"
open(filename, "w").write("/usr/lib/hiveconf\n")
print "Created", filename
EOF
fi
done
| #!/bin/sh
PATH=$PATH:/usr/local/bin
last_working=""
# Create .pth files
for binary in python python2 python2.0 python2.1 python2.2 python2.3 python2.4 python2.5; do
if $binary -c "" 2>/dev/null; then
last_working=$binary
$binary - <<EOF
import sys
sitedirs = filter(lambda s: s.endswith("site-packages"), sys.path)
if len(sitedirs) < 1:
sys.exit("Unable to find a site-packages directory in sys.path")
filename = sitedirs[0] + "/hiveconf.pth"
open(filename, "w").write("/usr/lib/hiveconf\n")
print "Created", filename
EOF
fi
done
# Modify hashbang of hivetool
if [ $last_working ]; then
$last_working - <<EOF
import sys
f = open("/usr/bin/hivetool", "r+")
lines = f.readlines()
lines[0] = "#!%s\n" % sys.executable
f.seek(0)
f.writelines(lines)
f.close()
EOF
fi
|
Remove source-highlight which causes hanging problem | # Append necessary path to PATH for Linux.
source /usr/share/autojump/autojump.zsh
if [ $(command_exists source-highlight) ]; then
export LESSOPEN="/usr/share/source-highlight/src-hilite-lesspipe.sh %s"
export LESS=' -R '
alias ccat='/usr/share/source-highlight/src-hilite-lesspipe.sh'
alias cat=ccat
fi
| # Append necessary path to PATH for Linux.
source /usr/share/autojump/autojump.zsh
|
Update for osx input plugin | #!/bin/sh
echo "Initing vim-plug..."
curl -fLo ~/.vim/autoload/plug.vim --create-dirs https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim
echo "Initing fzf..."
git clone --depth 1 https://github.com/junegunn/fzf.git ~/.fzf
~/.fzf/install
brew install astyle
# "wget https://jaist.dl.sourceforge.net/project/astyle/astyle/astyle%203.1/astyle_3.1_linux.tar.gz
# "tar xzvf astyle_3.1_linux.tar.gz
# "cd astyle_3.1_linux/build/gcc
# "make
# "sudo make install
| #!/bin/sh
echo "Initing vim-plug..."
curl -fLo ~/.vim/autoload/plug.vim --create-dirs https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim
echo "Initing for xkbswitch plugin"
git clone https://github.com/myshov/xkbswitch-macosx.git ~/.vim/xkbswitch-macosx
cp ~/.vim/xkbswitch-macosx/bin/xkbswitch /usr/local/bin
git clone https://github.com/myshov/libxkbswitch-macosx.git ~/.vim/libxkbswitch-macosx
cp ~/.vim/libxkbswitch-macosx/bin/libxkbswitch.dylib /usr/local/lib/
echo "Initing fzf..."
git clone --depth 1 https://github.com/junegunn/fzf.git ~/.fzf
~/.fzf/install
brew install astyle
# "wget https://jaist.dl.sourceforge.net/project/astyle/astyle/astyle%203.1/astyle_3.1_linux.tar.gz
# "tar xzvf astyle_3.1_linux.tar.gz
# "cd astyle_3.1_linux/build/gcc
# "make
# "sudo make install
|
Change default Consumer to MediaServerConsumerImpl | #!/usr/bin/env bash
dpkg -s opendct
if [ $? -eq 1 ]
then
wget https://dl.bintray.com/opendct/Beta/releases/0.5.8/opendct_0.5.8-1_amd64.deb
dpkg -i opendct_0.5.8-1_amd64.deb
rm opendct_0.5.8-1_amd64.deb
echo "OpenDCT Install Complete"
echo consumer.dynamic.default=opendct.consumer.MediaServerConsumerImpl >> /etc/opendct/etc/opendct.properties
fi
#set up some permissions
chown -Rv sagetv:sagetv /opt/opendct
chown -Rv 99:sagetv /etc/opendct
chown -v root:sagetv /var/run
chown -v root:sagetv /var/run
chmod 775 /var/run/
chmod 775 /run/
mkdir /var/run/opendct
/opt/opendct/console-only
| #!/usr/bin/env bash
dpkg -s opendct
if [ $? -eq 1 ]
then
wget https://dl.bintray.com/opendct/Beta/releases/0.5.8/opendct_0.5.8-1_amd64.deb
dpkg -i opendct_0.5.8-1_amd64.deb
rm opendct_0.5.8-1_amd64.deb
echo "OpenDCT Install Complete"
# Set to use media server consumer, so we don't have to have access to recording location.
echo "consumer.dynamic.default=opendct.consumer.MediaServerConsumerImpl" >> /etc/opendct/etc/opendct.properties
fi
#set up some permissions
chown -Rv sagetv:sagetv /opt/opendct
chown -Rv 99:sagetv /etc/opendct
chown -v root:sagetv /var/run
chown -v root:sagetv /var/run
chmod 775 /var/run/
chmod 775 /run/
mkdir /var/run/opendct
/opt/opendct/console-only
|
Clean vectors dir after vector tests. | #!/usr/bin/env bash
set -e
if [ ! -f venv/bin/activate ]; then
python3 -m venv venv
fi
. venv/bin/activate
pip install -r "$1/requirements.txt"
PORT=`$1/../get_open_port.py`
$1/generator.py -t uptane --signature-encoding base64 -o vectors --cjson json-subset
if [ "$2" == "valgrind" ]; then
$1/server.py -t uptane --signature-encoding base64 -P $PORT &
else
$1/server.py -t uptane --signature-encoding base64 -P $PORT &
fi
sleep 3
trap 'kill %1' EXIT
if [ "$2" == "valgrind" ]; then
valgrind --track-origins=yes --show-possibly-lost=no --error-exitcode=1 --suppressions=$1/../aktualizr.supp ./aktualizr_uptane_vector_tests vectors/vector-meta.json $PORT
else
./aktualizr_uptane_vector_tests vectors/vector-meta.json $PORT
fi
RES=$?
kill %1
trap - EXIT
trap
exit ${RES}
| #!/usr/bin/env bash
set -e
if [ ! -f venv/bin/activate ]; then
python3 -m venv venv
fi
. venv/bin/activate
pip install -r "$1/requirements.txt"
PORT=`$1/../get_open_port.py`
$1/generator.py -t uptane --signature-encoding base64 -o vectors --cjson json-subset
if [ "$2" == "valgrind" ]; then
$1/server.py -t uptane --signature-encoding base64 -P $PORT &
else
$1/server.py -t uptane --signature-encoding base64 -P $PORT &
fi
sleep 3
trap 'kill %1' EXIT
if [ "$2" == "valgrind" ]; then
valgrind --track-origins=yes --show-possibly-lost=no --error-exitcode=1 --suppressions=$1/../aktualizr.supp ./aktualizr_uptane_vector_tests vectors/vector-meta.json $PORT
else
./aktualizr_uptane_vector_tests vectors/vector-meta.json $PORT
fi
RES=$?
rm -rf vectors
kill %1
trap - EXIT
trap
exit ${RES}
|
Replace tabs with spaces FOLIO-351 | #!/bin/sh
# Makes Docker on Ubuntu listen on tcp port 4243
DF=/lib/systemd/system/docker.service
if test ! -f $DF; then
echo "$DF does not exist"
exit 1
fi
if test ! -w $DF; then
echo "$DF is not writable"
exit 1
fi
sed 's@^ExecStart=/usr/bin/dockerd -H fd://$@ExecStart=/usr/bin/dockerd -H fd:// -H tcp://127.0.0.1:4243@' < $DF >x
if diff x $DF >/dev/null; then
echo "$DF already up to date"
else
cp x $DF
echo "$DF updated"
systemctl daemon-reload
systemctl restart docker
fi
| #!/bin/sh
# Makes Docker on Ubuntu listen on tcp port 4243
DF=/lib/systemd/system/docker.service
if test ! -f $DF; then
echo "$DF does not exist"
exit 1
fi
if test ! -w $DF; then
echo "$DF is not writable"
exit 1
fi
sed 's@^ExecStart=/usr/bin/dockerd -H fd://$@ExecStart=/usr/bin/dockerd -H fd:// -H tcp://127.0.0.1:4243@' < $DF >x
if diff x $DF >/dev/null; then
echo "$DF already up to date"
else
cp x $DF
echo "$DF updated"
systemctl daemon-reload
systemctl restart docker
fi
|
Remove setup that's in the class1 setup instructions | #### List current git config settings
git config -l
#### push.default
# Push the current branch to a remote branch of the same name without specifying
# either the remote or branch name.
# Example: git push
# You must supply the -u option and both remote and branch names the first time.
# Example: git push -u origin mybranch
git config --global push.default simple
#### credential.helper for https remotes
# NOTE: These settings only work with repositories that use the HTTPS protocol.
# Mac OS X: Use the Keychain app to store your GitHub username and password.
# You'll be prompted for your username and password the first time.
git config --global credential.helper osxkeychain
# Linux: Save your password in memory for 15 minutes.
git config --global credential.helper cache
# Linux: Save your password in memory for 1 hour (i.e. 3600 seconds).
git config --global credential.helper 'cache --timeout=3600'
#### alias.df
# View the changes in your working tree relative to the last commit
git config --global alias.df 'diff HEAD'
#### alias.dfs
# View the changes you staged for the next commit relative the last commit
git config --global alias.dfs 'diff --staged'
| #### List current git config settings
git config -l
#### alias.df
# View the changes in your working tree relative to the last commit
git config --global alias.df 'diff HEAD'
#### alias.dfs
# View the changes you staged for the next commit relative the last commit
git config --global alias.dfs 'diff --staged'
|
Install virtualenv outside the REPO root | #!/bin/bash
set -e
[ -x .venv/bin/pip ] || virtualenv .venv
. .venv/bin/activate
pip install -q ghtools
REPO="alphagov/router-api"
gh-status "$REPO" "$GIT_COMMIT" pending -d "\"Build #${BUILD_NUMBER} is running on Jenkins\"" -u "$BUILD_URL" >/dev/null
if ./jenkins.sh; then
gh-status "$REPO" "$GIT_COMMIT" success -d "\"Build #${BUILD_NUMBER} succeeded on Jenkins\"" -u "$BUILD_URL" >/dev/null
exit 0
else
gh-status "$REPO" "$GIT_COMMIT" failure -d "\"Build #${BUILD_NUMBER} failed on Jenkins\"" -u "$BUILD_URL" >/dev/null
exit 1
fi
| #!/bin/bash
set -e
VENV_PATH="${HOME}/venv/${JOB_NAME}"
[ -x ${VENV_PATH}/bin/pip ] || virtualenv ${VENV_PATH}
. ${VENV_PATH}/bin/activate
pip install -q ghtools
REPO="alphagov/router-api"
gh-status "$REPO" "$GIT_COMMIT" pending -d "\"Build #${BUILD_NUMBER} is running on Jenkins\"" -u "$BUILD_URL" >/dev/null
if ./jenkins.sh; then
gh-status "$REPO" "$GIT_COMMIT" success -d "\"Build #${BUILD_NUMBER} succeeded on Jenkins\"" -u "$BUILD_URL" >/dev/null
exit 0
else
gh-status "$REPO" "$GIT_COMMIT" failure -d "\"Build #${BUILD_NUMBER} failed on Jenkins\"" -u "$BUILD_URL" >/dev/null
exit 1
fi
|
Add more documentation around issues with brew-upgraded ruby and vim | #!/bin/bash
# Configure vim, install plugins, and configure plugins if needed
# Install Vundle to get vim plugins defined in .vimrc up and running
git clone https://github.com/gmarik/Vundle.vim.git ~/.vim/bundle/Vundle.vim
# Execute vim without running a vimrc
vim +PluginInstall +qall
# Command-T requires compiling against the system Ruby
chruby system
ruby -C ~/.vim/bundle/command-t/ruby/command-t/ext/command-t/ extconf.rb
make -C ~/.vim/bundle/command-t/ruby/command-t/ext/command-t/
| #!/bin/bash
# Configure vim, install plugins, and configure plugins if needed
# Install Vundle to get vim plugins defined in .vimrc up and running
git clone https://github.com/gmarik/Vundle.vim.git ~/.vim/bundle/Vundle.vim
# Execute vim without running a vimrc
vim +PluginInstall +qall
# Command-T requires compiling against the system Ruby
# If there are issues with Vim and Ruby, make the asdf system ruby point to
# the latest brew-installed version of Ruby:
# $ brew link --overwrite ruby
#
# Install process when new versions of brew vim are installed:
# $ cd ~/.vim/bundle/command-t/ruby/command-t/ext/command-t/
# $ make clean
# $ ruby extconf.rb
# $ make
#
# Helpful links:
# - https://superuser.com/questions/1096438/brew-upgrade-broke-vim-on-os-x-dyld-library-not-loaded
# Issues with ld flags:
# - https://github.com/wincent/command-t/issues/316
chruby system
ruby -C ~/.vim/bundle/command-t/ruby/command-t/ext/command-t/ extconf.rb
make -C ~/.vim/bundle/command-t/ruby/command-t/ext/command-t/
|
Apply the patches from babeltrace-zipkin's directory | #!/bin/bash
cd $HOME
sudo apt-get install -y git
sudo apt-get install -y python3-pip
sudo apt-get install -y default-jre
sudo apt-get install -y lttng-tools
sudo apt-get install -y lttng-modules-dkms
sudo apt-get install -y liblttng-ust-dev
git clone https://github.com/linuxbox2/blkin && cd ./blkin/blkin-lib
make
sudo make install
cd $HOME
sudo pip3 install --upgrade pip
sudo pip3 install scribe
sudo pip3 install thrift3babeltrace
sudo pip3 install facebook-scribe
sudo pip3 install thriftpy
sudo pip3 install scribe_logger
DIR=$(python3 -c "import site; print(site.getsitepackages()[0])")
echo "Detected Python3 libraries at the following location: "
echo $DIR
sudo patch -p0 --directory=$DIR < ./patches/scribe.py.patch
sudo patch -p0 --directory=$DIR < ./patches/FacebookService.py.patch
sudo apt-get install -y babeltrace
sudo apt-get install -y python3-babeltrace
git clone https://github.com/openzipkin/zipkin && cd zipkin
wget -O zipkin.jar 'https://search.maven.org/remote_content?g=io.zipkin.java&a=zipkin-server&v=LATEST&c=exec'
java -jar zipkin.jar &
cd ..
git clone https://github.com/vears91/babeltrace-zipkin | #!/bin/bash
cd $HOME
sudo apt-get install -y git
sudo apt-get install -y python3-pip
sudo apt-get install -y default-jre
sudo apt-get install -y lttng-tools
sudo apt-get install -y lttng-modules-dkms
sudo apt-get install -y liblttng-ust-dev
git clone https://github.com/linuxbox2/blkin && cd ./blkin/blkin-lib
make
sudo make install
cd $HOME
sudo pip3 install --upgrade pip
sudo pip3 install scribe
sudo pip3 install thrift3babeltrace
sudo pip3 install facebook-scribe
sudo pip3 install thriftpy
sudo pip3 install scribe_logger
sudo apt-get install -y babeltrace
sudo apt-get install -y python3-babeltrace
git clone https://github.com/openzipkin/zipkin && cd zipkin
wget -O zipkin.jar 'https://search.maven.org/remote_content?g=io.zipkin.java&a=zipkin-server&v=LATEST&c=exec'
java -jar zipkin.jar &
cd ..
git clone https://github.com/vears91/babeltrace-zipkin && cd babeltrace-zipkin
DIR=$(python3 -c "import site; print(site.getsitepackages()[0])")
echo "Detected Python3 libraries at the following location: "
echo $DIR
sudo patch -p0 --directory=$DIR < ./patches/scribe.py.patch
sudo patch -p0 --directory=$DIR < ./patches/FacebookService.py.patch |
Install .NET Core 2.1 script update | # Install .NET Core (see https://www.microsoft.com/net/core#ubuntu)
# Add the dotnet apt-get feed (for .NET Core)
sudo sh -c 'echo "deb [arch=amd64] https://apt-mo.trafficmanager.net/repos/dotnet-release/ trusty main" > /etc/apt/sources.list.d/dotnetdev.list'
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 417A0893
# Update apt-get cache
sudo apt-get update
# Install .NET Core SDK
sudo apt-get install dotnet-sdk-2.1 -y
# Install Powershell (https://www.rootusers.com/how-to-install-powershell-on-linux)
# Install Powershell dependencies
sudo apt-get install libunwind8 libicu52 -y
# Download and install Powershell
wget https://github.com/PowerShell/PowerShell/releases/download/v6.0.0-alpha.11/powershell_6.0.0-alpha.11-1ubuntu1.14.04.1_amd64.deb
sudo dpkg -i powershell_6.0.0-alpha.11-1ubuntu1.14.04.1_amd64.deb
rm -f powershell_6.0.0-alpha.11-1ubuntu1.14.04.1_amd64.deb
# Install Python Pip and icdiff (http://www.jefftk.com/icdiff)
sudo apt-get -y install python-pip
pip -V
sudo pip install --upgrade icdiff
| # Install .NET Core # https://www.microsoft.com/net/download/linux-package-manager/ubuntu14-04/sdk-current
# Register Microsoft key and feed
wget -q https://packages.microsoft.com/config/ubuntu/14.04/packages-microsoft-prod.deb
sudo dpkg -i packages-microsoft-prod.deb
# Install .NET Core SDK
sudo apt-get install apt-transport-https
sudo apt-get update
sudo apt-get install dotnet-sdk-2.1 -y
# Install Powershell (https://www.rootusers.com/how-to-install-powershell-on-linux)
# Install Powershell dependencies
sudo apt-get install libunwind8 libicu52 -y
# Download and install Powershell
wget https://github.com/PowerShell/PowerShell/releases/download/v6.0.0-alpha.11/powershell_6.0.0-alpha.11-1ubuntu1.14.04.1_amd64.deb
sudo dpkg -i powershell_6.0.0-alpha.11-1ubuntu1.14.04.1_amd64.deb
rm -f powershell_6.0.0-alpha.11-1ubuntu1.14.04.1_amd64.deb
# Install Python Pip and icdiff (http://www.jefftk.com/icdiff)
sudo apt-get -y install python-pip
pip -V
sudo pip install --upgrade icdiff
|
Revert "Don't attempt to use OProfile" | CMAKE_COMMON_VARIABLES=" -DCMAKE_INSTALL_PREFIX=$PREFIX \
-DCMAKE_BUILD_TYPE=Release -DLLVM_TARGETS_TO_BUILD=host \
-DLLVM_INCLUDE_TESTS=OFF -DLLVM_INCLUDE_UTILS=OFF \
-DLLVM_INCLUDE_DOCS=OFF -DLLVM_INCLUDE_EXAMPLES=OFF \
-DLLVM_ENABLE_TERMINFO=OFF \
"
platform='unknown'
unamestr="$(uname)"
machine="$(uname -m)"
if [[ "$unamestr" == 'Linux' ]]; then
platform='linux'
elif [[ "$unamestr" == 'FreeBSD' ]]; then
platform='freebsd'
elif [[ "$unamestr" == 'Darwin' ]]; then
platform='osx'
fi
# Note you may need to enable RH devtoolset-2 if building on an
# old RH or CentOS system
if [ -n "$MACOSX_DEPLOYMENT_TARGET" ]; then
# OSX needs 10.7 or above with libc++ enabled
export MACOSX_DEPLOYMENT_TARGET=10.9
fi
# Use CMake-based build procedure
mkdir build
cd build
if [ "$platform" == 'linux' -a "$machine" != 'armv7l' ]; then
cmake $CMAKE_COMMON_VARIABLES ..
else
cmake $CMAKE_COMMON_VARIABLES ..
fi
if [ -z "$CPU_COUNT"]; then
CPU_COUNT=4
fi
make -j $CPU_COUNT
make install
| CMAKE_COMMON_VARIABLES=" -DCMAKE_INSTALL_PREFIX=$PREFIX \
-DCMAKE_BUILD_TYPE=Release -DLLVM_TARGETS_TO_BUILD=host \
-DLLVM_INCLUDE_TESTS=OFF -DLLVM_INCLUDE_UTILS=OFF \
-DLLVM_INCLUDE_DOCS=OFF -DLLVM_INCLUDE_EXAMPLES=OFF \
-DLLVM_ENABLE_TERMINFO=OFF \
"
platform='unknown'
unamestr="$(uname)"
machine="$(uname -m)"
if [[ "$unamestr" == 'Linux' ]]; then
platform='linux'
elif [[ "$unamestr" == 'FreeBSD' ]]; then
platform='freebsd'
elif [[ "$unamestr" == 'Darwin' ]]; then
platform='osx'
fi
# Note you may need to enable RH devtoolset-2 if building on an
# old RH or CentOS system
if [ -n "$MACOSX_DEPLOYMENT_TARGET" ]; then
# OSX needs 10.7 or above with libc++ enabled
export MACOSX_DEPLOYMENT_TARGET=10.9
fi
# Use CMake-based build procedure
mkdir build
cd build
if [ "$platform" == 'linux' -a "$machine" != 'armv7l' ]; then
cmake $CMAKE_COMMON_VARIABLES -DLLVM_USE_OPROFILE=ON ..
else
cmake $CMAKE_COMMON_VARIABLES ..
fi
if [ -z "$CPU_COUNT"]; then
CPU_COUNT=4
fi
make -j $CPU_COUNT
make install
|
Add default schema to script. | #!/usr/bin/env bash
# initdb dir
# postgres -D dir
USER=patrick2
PASS=5555
DB=subticket
SCHEMA=subticket
createdb "$DB"
psql "$DB" <<EOF
create user "$USER" password '$PASS';
create schema "$SCHEMA" authorization "$USER";
grant all privileges on schema "$SCHEMA" to "$USER";
EOF | #!/usr/bin/env bash
# initdb dir
# postgres -D dir
USER=patrick2
PASS=5555
DB=subticket
SCHEMA=subticket
createdb "$DB"
psql "$DB" <<EOF
create user "$USER" password '$PASS';
create schema "$SCHEMA" authorization "$USER";
grant all privileges on schema "$SCHEMA" to "$USER";
alter user "$USER" set search_path to "$SCHEMA";
EOF
|
Add composer bin to PATH | #!/usr/bin/env bash
set -ev
composer global require friendsofphp/php-cs-fixer | #!/usr/bin/env bash
set -ev
composer global require friendsofphp/php-cs-fixer
export PATH="${PATH}:${HOME}/.composer/vendor/bin" |
Add an option to build the statically link version | #!/usr/bin/env bash
if [ ! -e Debug ]; then
mkdir Debug
fi
cd Debug
cmake -DCMAKE_BUILD_TYPE=Debug ..
cd ..
cmake --build Debug --target install
| #!/usr/bin/env bash
static=0
while getopts s opt
do
case $opt in
s) static=1 ;;
*) exit 1 ;;
esac
done
if [ ! -e Debug ]; then
mkdir Debug
fi
if [ $static -gt 0 ]
then
echo "Using static linking against BlueZ"
export BluezLinkType="static"
fi
cd Debug
cmake -DCMAKE_BUILD_TYPE=Debug ..
cd ..
cmake --build Debug --target install
|
Update to install for sqlitebrowser | #!/bin/sh
brew install azure-cli
brew cask install diffmerge
brew cask install docker
brew cask install firefox
brew cask install ghost
brew cask install iterm
brew install jsawk
brew install jsdoc3
brew cask install pencil
brew cask install smartgit
brew cask install wireshark
brew cask install visual-studio-code
brew cask install visual-studio-code-insiders
brew cask install xmind
brew cask install yarn
brew install youtube-dl
#databases
brew cask install mongodb
brew cask install robomongo
brew install sqlitebrowser
#install NVM to enable
brew install NVM
mkdir ~/.nvm
touch ~/.bash_profile
echo export NVM_DIR=~/.nvm >> ~/.bash_profile
echo source $(brew --prefix nvm)/nvm.sh >> ~/.bash_profile
source ~/.bash_profile
## Still testing this and need to validate
# #install OpenSSL for .NET Core
# brew install openssl
# mkdir -p /usr/local/lib
# ln -s /usr/local/opt/openssl/lib/libcrypto.1.0.0.dylib /usr/local/lib/
# ln -s /usr/local/opt/openssl/lib/libssl.1.0.0.dylib /usr/local/lib/
# #install .net core
# brew install dotnet
| #!/bin/sh
brew install azure-cli
brew cask install diffmerge
brew cask install docker
brew cask install firefox
brew cask install ghost
brew cask install iterm
brew install jsawk
brew install jsdoc3
brew cask install pencil
brew cask install smartgit
brew cask install wireshark
brew cask install visual-studio-code
brew cask install visual-studio-code-insiders
brew cask install xmind
brew cask install yarn
brew install youtube-dl
#databases
brew cask install mongodb
brew cask install robomongo
brew cask install sqlitebrowser
#install NVM to enable
brew install NVM
mkdir ~/.nvm
touch ~/.bash_profile
echo export NVM_DIR=~/.nvm >> ~/.bash_profile
echo source $(brew --prefix nvm)/nvm.sh >> ~/.bash_profile
source ~/.bash_profile
## Still testing this and need to validate
# #install OpenSSL for .NET Core
# brew install openssl
# mkdir -p /usr/local/lib
# ln -s /usr/local/opt/openssl/lib/libcrypto.1.0.0.dylib /usr/local/lib/
# ln -s /usr/local/opt/openssl/lib/libssl.1.0.0.dylib /usr/local/lib/
# #install .net core
# brew install dotnet
|
Clean tmp folder after unzipping KoreBuild | #!/usr/bin/env bash
repoFolder="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd $repoFolder
koreBuildZip="https://github.com/aspnet/KoreBuild/archive/dev.zip"
if [ ! -z $KOREBUILD_ZIP ]; then
koreBuildZip=$KOREBUILD_ZIP
fi
buildFolder=".build"
buildFile="$buildFolder/KoreBuild.sh"
if test ! -d $buildFolder; then
echo "Downloading KoreBuild from $koreBuildZip"
tempFolder="/tmp/KoreBuild-$(uuidgen)"
mkdir $tempFolder
localZipFile="$tempFolder/korebuild.zip"
retries=6
until (wget -O $localZipFile $koreBuildZip 2>/dev/null || curl -o $localZipFile --location $koreBuildZip 2>/dev/null)
do
echo "Failed to download '$koreBuildZip'"
if [ "$retries" -le 0 ]; then
exit 1
fi
retries=$((retries - 1))
echo "Waiting 10 seconds before retrying. Retries left: $retries"
sleep 10s
done
unzip -q -d $tempFolder $localZipFile
mkdir $buildFolder
cp -r $tempFolder/**/build/** $buildFolder
chmod +x $buildFile
# Cleanup
if test ! -d $tempFolder; then
rm -rf $tempFolder
fi
fi
$buildFile -r $repoFolder "$@" | #!/usr/bin/env bash
repoFolder="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd $repoFolder
koreBuildZip="https://github.com/aspnet/KoreBuild/archive/dev.zip"
if [ ! -z $KOREBUILD_ZIP ]; then
koreBuildZip=$KOREBUILD_ZIP
fi
buildFolder=".build"
buildFile="$buildFolder/KoreBuild.sh"
if test ! -d $buildFolder; then
echo "Downloading KoreBuild from $koreBuildZip"
tempFolder="/tmp/KoreBuild-$(uuidgen)"
mkdir $tempFolder
localZipFile="$tempFolder/korebuild.zip"
retries=6
until (wget -O $localZipFile $koreBuildZip 2>/dev/null || curl -o $localZipFile --location $koreBuildZip 2>/dev/null)
do
echo "Failed to download '$koreBuildZip'"
if [ "$retries" -le 0 ]; then
exit 1
fi
retries=$((retries - 1))
echo "Waiting 10 seconds before retrying. Retries left: $retries"
sleep 10s
done
unzip -q -d $tempFolder $localZipFile
mkdir $buildFolder
cp -r $tempFolder/**/build/** $buildFolder
chmod +x $buildFile
# Cleanup
if test -d $tempFolder; then
rm -rf $tempFolder
fi
fi
$buildFile -r $repoFolder "$@" |
Check if there are conflicts on master when running the build | #!/bin/bash -x
bundle install --path "${HOME}/bundles/${JOB_NAME}"
dropdb transition_test
createdb --encoding=UTF8 --template=template0 transition_test
sudo -u postgres psql -d transition_test -c 'CREATE EXTENSION IF NOT EXISTS pgcrypto'
cat db/structure.sql | psql -d transition_test
RACK_ENV=test bundle exec rake --trace
| #!/bin/bash -x
# Try to merge master into the current branch, and abort if it doesn't exit
# cleanly (ie there are conflicts). This will be a noop if the current branch
# is master.
git merge --no-commit origin/master || git merge --abort
bundle install --path "${HOME}/bundles/${JOB_NAME}"
dropdb transition_test
createdb --encoding=UTF8 --template=template0 transition_test
sudo -u postgres psql -d transition_test -c 'CREATE EXTENSION IF NOT EXISTS pgcrypto'
cat db/structure.sql | psql -d transition_test
RACK_ENV=test bundle exec rake --trace
|
Update system test image name | #!/bin/bash
set -euf -o pipefail
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
REPO_DIR=$(dirname "$SCRIPT_DIR")
OUT_DIR="${SCRIPT_DIR}/out"
IMAGE='registry.spin.nersc.gov/metatlas_test/metatlas_ci01:v1.2.0'
PORT=8888
while [[ "$#" -gt 0 ]]; do
case "$1" in
-i|--image) IMAGE="$2"; shift ;;
-o|--out_dir) OUT_DIR="$2"; shift ;;
-h|--help)
echo -e "$0 [options]"
echo ""
echo " -h, --help show this command reference"
echo " -i, --image string name of docker image to run (default ${IMAGE})"
echo " -o, --out_dir string directory to write output files (default ${OUT_DIR})"
exit 0
;;
*)echo "Unknown parameter passed: $1"; exit 1 ;;
esac
shift
done
rm -rf "$OUT_DIR"
mkdir -p "$OUT_DIR"
docker run \
--rm \
-p "${PORT}:${PORT}" \
-v "${REPO_DIR}:/src" \
-v "${OUT_DIR}:/out" \
"$IMAGE"
| #!/bin/bash
set -euf -o pipefail
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
REPO_DIR=$(dirname "$SCRIPT_DIR")
OUT_DIR="${SCRIPT_DIR}/out"
IMAGE='registry.spin.nersc.gov/metatlas_test/metatlas_ci01:v1.3.0'
PORT=8888
while [[ "$#" -gt 0 ]]; do
case "$1" in
-i|--image) IMAGE="$2"; shift ;;
-o|--out_dir) OUT_DIR="$2"; shift ;;
-h|--help)
echo -e "$0 [options]"
echo ""
echo " -h, --help show this command reference"
echo " -i, --image string name of docker image to run (default ${IMAGE})"
echo " -o, --out_dir string directory to write output files (default ${OUT_DIR})"
exit 0
;;
*)echo "Unknown parameter passed: $1"; exit 1 ;;
esac
shift
done
rm -rf "$OUT_DIR"
mkdir -p "$OUT_DIR"
docker run \
--rm \
-p "${PORT}:${PORT}" \
-v "${REPO_DIR}:/src" \
-v "${OUT_DIR}:/out" \
"$IMAGE"
|
Remove deprecated debug in phpunit script | #!/bin/sh
scriptdir=$(dirname ${0})
rm ${1}/build/logs/log.xml ${1}/build/logs/log.html
set -e
echo "PRE xsltproc"
echo "<testsuites />" | xsltproc \
--output ${1}/build/logs/log.xml \
--stringparam basedir ${1} \
${scriptdir}/merge.xslt -
echo "POST xsltproc merge"
xsltproc \
--stringparam projectname ${2} \
--output ${1}/build/logs/log.html \
${scriptdir}/phpunit.log.xslt \
${1}/build/logs/log.xml
echo "POST xsltproc phpunit"
| #!/bin/sh
scriptdir=$(dirname ${0})
rm ${1}/build/logs/log.xml ${1}/build/logs/log.html
echo "<testsuites />" | xsltproc \
--output ${1}/build/logs/log.xml \
--stringparam basedir ${1} \
${scriptdir}/merge.xslt -
xsltproc \
--stringparam projectname ${2} \
--output ${1}/build/logs/log.html \
${scriptdir}/phpunit.log.xslt \
${1}/build/logs/log.xml
|
Use the js documentation tool in node_modules/.bin, because it doesn't seem to exist anywhere else | #! /usr/bin/env bash
set -x
echo "Generating documentation..."
find . -type f -name "*.js" -not -path "*/node_modules/*" -not -path "*/docs/*" -not -path "*/apps/bin" -not -path "*/ui/*" | xargs documentation build -f html -o docs --sort-order alpha
echo "New doc is in docs/index.html"
#echo "Hello, Travis! You there?"
#if [[ -z "${TRAVIS}" ]]; then
# echo "No Travis here. I'm done here."
# exit;
#else
# echo "Travis: Yup, here."
# cd docs || exit
# git config --global user.email "travis@travis-ci.org"
# git config --global user.name "Travis-CI"
# git init
# git add .
# git commit -m "Latest docs Travis build $TRAVIS_BUILD_NUMBER auto-pushed to gh-pages"
# git push -v --force "https://${GH_TOKEN}@github.com/APTrust/dart.git" master:gh-pages
#fi
| #! /usr/bin/env bash
set -x
echo "Generating documentation..."
find . -type f -name "*.js" -not -path "*/node_modules/*" -not -path "*/docs/*" -not -path "*/apps/bin" -not -path "*/ui/*" | xargs ./node_modules/.bin/documentation build -f html -o docs --sort-order alpha
echo "New doc is in docs/index.html"
#echo "Hello, Travis! You there?"
#if [[ -z "${TRAVIS}" ]]; then
# echo "No Travis here. I'm done here."
# exit;
#else
# echo "Travis: Yup, here."
# cd docs || exit
# git config --global user.email "travis@travis-ci.org"
# git config --global user.name "Travis-CI"
# git init
# git add .
# git commit -m "Latest docs Travis build $TRAVIS_BUILD_NUMBER auto-pushed to gh-pages"
# git push -v --force "https://${GH_TOKEN}@github.com/APTrust/dart.git" master:gh-pages
#fi
|
Add variable for BB full repo name. | #!/usr/bin/env bash
# Create a Pull Request on BitBucket from current branch
# NOTE: branch must exist on server already
# file `pr-user.sh` must have following contents:
# CREDENTIALS="bb_user:bb_pass";
# REVIEWER="username";
source $(dirname $0)/pr-user.sh
BRANCH=`git rev-parse --abbrev-ref HEAD`
TITLE=`git log -1 --format=%s`
read -d '' JSON << EOF
{
"title": "$TITLE",
"source": {
"branch": {
"name": "$BRANCH"
},
"repository": {
"full_name": "parkeon-ondemand/azimutserver"
}
},
"destination": {
"branch": {
"name": "master"
}
},
"reviewers": [
{
"username": "$REVIEWER"
}
],
"close_source_branch": true
}
EOF
URL=https://bitbucket.org/api/2.0/repositories/parkeon-ondemand/azimutserver/pullrequests
RESPONSE=`curl --silent -X POST -H "Content-Type: application/json" -u $CREDENTIALS $URL -d "$JSON"`
# Display URL to newly created PR
PR_URL=`echo $RESPONSE | php -r '$res=json_decode(file_get_contents("php://stdin"), true); print $res["links"]["html"]["href"];'`
echo
echo "Check PR in the browser:"
echo $PR_URL
| #!/usr/bin/env bash
# Create a Pull Request on BitBucket from current branch
# NOTE: branch must exist on server already
# file `pr-user.sh` must have following contents:
# CREDENTIALS="bb_user:bb_pass";
# REVIEWER="username";
source $(dirname $0)/pr-user.sh
REPO="username/reponame"
BRANCH=`git rev-parse --abbrev-ref HEAD`
TITLE=`git log -1 --format=%s`
read -d '' JSON << EOF
{
"title": "$TITLE",
"source": {
"branch": {
"name": "$BRANCH"
},
"repository": {
"full_name": "$REPO"
}
},
"destination": {
"branch": {
"name": "master"
}
},
"reviewers": [
{
"username": "$REVIEWER"
}
],
"close_source_branch": true
}
EOF
URL=https://bitbucket.org/api/2.0/repositories/$REPO/pullrequests
RESPONSE=`curl --silent -X POST -H "Content-Type: application/json" -u $CREDENTIALS $URL -d "$JSON"`
# Display URL to newly created PR
PR_URL=`echo $RESPONSE | php -r '$res=json_decode(file_get_contents("php://stdin"), true); print $res["links"]["html"]["href"];'`
echo
echo "Check PR in the browser:"
echo $PR_URL
|
Remove redundant check for hostname part in trust1 script | #!/bin/bash
# Sets the hostname to become subdomain of AD.
# If any command here fails, exit the script
set -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source $DIR/config.sh
if [[ $1 != '' ]]
then
NETBIOS=DOM`echo $1 | cut -d- -f2`
fi
# Check is hostname was properly set up during IPA installation
if [[ `hostname | grep tbad` == '' ]]
then
echo "The hostname is not properly set for auto trust installation."
exit 1
fi
# Obtain admin credentials
echo $PASSWORD | kinit admin
# Install support for trusts
sudo ipa-adtrust-install --netbios-name=$NETBIOS -a $PASSWORD --add-sids
# Configure DNS only if on master
if [[ $1 == '' ]]
then
ipa dnszone-add $AD_DOMAIN --name-server=advm.$AD_DOMAIN --admin-email="hostmaster@$AD_DOMAIN.com" --force --forwarder=$AD_IP --forward-policy=only
fi
| #!/bin/bash
# Sets the hostname to become subdomain of AD.
# If any command here fails, exit the script
set -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source $DIR/config.sh
if [[ $1 != '' ]]
then
NETBIOS=DOM`echo $1 | cut -d- -f2`
fi
# Obtain admin credentials
echo $PASSWORD | kinit admin
# Install support for trusts
sudo ipa-adtrust-install --netbios-name=$NETBIOS -a $PASSWORD --add-sids
# Configure DNS only if on master
if [[ $1 == '' ]]
then
ipa dnszone-add $AD_DOMAIN --name-server=advm.$AD_DOMAIN --admin-email="hostmaster@$AD_DOMAIN.com" --force --forwarder=$AD_IP --forward-policy=only --ip-address=$AD_IP
fi
|
Use `git ls-files` to find go files | #!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
. $(dirname "${BASH_SOURCE}")/common.sh
GOFMT="gofmt -s -w"
bad_files=$(cat ${KUBE_ROOT}/hack/.packages | grep -v "^k8s.io/kops$" | sed -e s-^k8s.io/kops/-- | xargs -I {} $GOFMT -l {})
if [[ -n "${bad_files}" ]]; then
echo "!!! '$GOFMT' needs to be run on the following files: "
echo "${bad_files}"
echo "!!! Please run: make gofmt"
exit 1
fi
| #!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
. $(dirname "${BASH_SOURCE}")/common.sh
GOFMT="gofmt -s -w"
bad_files=$(git ls-files "*.go" | grep -v vendor | xargs -I {} $GOFMT -l {})
if [[ -n "${bad_files}" ]]; then
echo "!!! '$GOFMT' needs to be run on the following files: "
echo "${bad_files}"
echo "!!! Please run: make gofmt"
exit 1
fi
|
Add sbin, and fix build/install for dpkg | pkg_name=dpkg
pkg_origin=core
pkg_version=1.18.24
pkg_maintainer="The Habitat Maintainers <humans@habitat.sh>"
pkg_license=('GPL-2.0')
pkg_upstream_url=https://wiki.debian.org/dpkg
pkg_description="dpkg is a package manager for Debian-based systems"
pkg_source=http://http.debian.net/debian/pool/main/d/${pkg_name}/${pkg_name}_${pkg_version}.tar.xz
pkg_shasum=d853081d3e06bfd46a227056e591f094e42e78fa8a5793b0093bad30b710d7b4
pkg_deps=(
core/bzip2
core/glibc
core/tar
core/zlib
core/xz
)
pkg_build_deps=(
core/autoconf
core/automake
core/bzip2
core/gcc
core/gettext
core/libtool
core/patch
core/make
core/ncurses
core/perl
core/pkg-config
core/xz
core/zlib
)
pkg_bin_dirs=(bin)
pkg_include_dirs=(include)
pkg_lib_dirs=(lib)
| pkg_name=dpkg
pkg_origin=core
pkg_version=1.18.24
pkg_maintainer="The Habitat Maintainers <humans@habitat.sh>"
pkg_license=('GPL-2.0')
pkg_upstream_url=https://wiki.debian.org/dpkg
pkg_description="dpkg is a package manager for Debian-based systems"
pkg_source=http://http.debian.net/debian/pool/main/d/${pkg_name}/${pkg_name}_${pkg_version}.tar.xz
pkg_shasum=d853081d3e06bfd46a227056e591f094e42e78fa8a5793b0093bad30b710d7b4
pkg_deps=(
core/bzip2
core/glibc
core/tar
core/zlib
core/xz
)
pkg_build_deps=(
core/autoconf
core/automake
core/bzip2
core/gcc
core/gettext
core/libtool
core/patch
core/make
core/ncurses
core/perl
core/pkg-config
core/xz
core/zlib
)
pkg_bin_dirs=(bin sbin)
pkg_include_dirs=(include)
pkg_lib_dirs=(lib)
pkg_description="Debian Package Manager"
do_build() {
export prefix=$pkg_prefix
do_default_build
}
do_install() {
export prefix=$pkg_prefix
do_default_install
}
|
Update with learnings from Outages. | #!/bin/bash
# Thanks to: http://sfviapgh.com/blog/2016/2/18/how-to-deploy-rails-with-aws-codedeploy
export RAILS_ENV=production
cd /var/www/autism-funding.com/html
sudo chmod 750 bin/*
# need to set up the database (the user)
# need rails db:create the first time
bin/bundle install # --path vendor/bundle
bin/rails db:migrate
bin/rails assets:clobber
bin/rails assets:precompile
| #!/bin/bash
# Thanks to: http://sfviapgh.com/blog/2016/2/18/how-to-deploy-rails-with-aws-codedeploy
. /home/ubuntu/outages.secret
cd /var/www/autism-funding.com/html
sudo chmod 750 bin/*
logs="log/production.log log/puma-production.stdout.log log/puma-production.stderr.log"
touch $logs
sudo chown :www-data $logs
sudo chmod 660 $logs
# need to set up the database (the user)
# need rails db:create the first time
bin/bundle install --deployment # --path vendor/bundle
bin/rails db:migrate
bin/rails assets:clobber
bin/rails assets:precompile
|
Fix for occasional timing issue | #!/bin/bash
make
# Shutdown both prus if running
echo "4a334000.pru0" > /sys/bus/platform/drivers/pru-rproc/unbind 2> /dev/null
echo "4a338000.pru1" > /sys/bus/platform/drivers/pru-rproc/unbind 2> /dev/null
# Load the firmwares
cp src/firmware/ths1206/pru0/gen/*.out /lib/firmware/am335x-pru0-fw &&
cp src/firmware/ths1206/pru1/gen/*.out /lib/firmware/am335x-pru1-fw ||
exit 1
# Start up both prus
echo "4a334000.pru0" > /sys/bus/platform/drivers/pru-rproc/bind
echo "4a338000.pru1" > /sys/bus/platform/drivers/pru-rproc/bind
# Start up userspace adc driver
./src/software/adc-controller/adc-controller.out | #!/bin/bash
make
# Shutdown both prus if running
echo "4a334000.pru0" > /sys/bus/platform/drivers/pru-rproc/unbind 2> /dev/null
echo "4a338000.pru1" > /sys/bus/platform/drivers/pru-rproc/unbind 2> /dev/null
# Load the firmwares
cp src/firmware/ths1206/pru0/gen/*.out /lib/firmware/am335x-pru0-fw &&
cp src/firmware/ths1206/pru1/gen/*.out /lib/firmware/am335x-pru1-fw ||
exit 1
# Start up both prus
echo "4a334000.pru0" > /sys/bus/platform/drivers/pru-rproc/bind
echo "4a338000.pru1" > /sys/bus/platform/drivers/pru-rproc/bind
# Wait for rpmsg driver to set up
# TODO: maybe add a check that /dev/rpmsg_pru31 exists?
sleep 0.5
# Start up userspace adc driver
./src/software/adc-controller/adc-controller.out |
Add source to enable script after install | #!/bin/bash
echo "------- LOAD VARS --------"
source root/_bash_profile
source core/base.bash
source core/fwk-admin.bash
echo "------- DEPLOY bash --------"
bashdeploy
bashprofile
#echo "------- DL completion --------"
#gcompletion
| #!/bin/bash
echo "------- LOAD VARS --------"
source root/_bash_profile
source core/base.bash
source core/fwk-admin.bash
echo "------- DEPLOY bash --------"
bashdeploy
bashprofile
#echo "------- DL completion --------"
#gcompletion
source ~/.bash_profile
|
Clean tmp folder after unzipping KoreBuild | #!/usr/bin/env bash
repoFolder="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd $repoFolder
koreBuildZip="https://github.com/aspnet/KoreBuild/archive/dev.zip"
if [ ! -z $KOREBUILD_ZIP ]; then
koreBuildZip=$KOREBUILD_ZIP
fi
buildFolder=".build"
buildFile="$buildFolder/KoreBuild.sh"
if test ! -d $buildFolder; then
echo "Downloading KoreBuild from $koreBuildZip"
tempFolder="/tmp/KoreBuild-$(uuidgen)"
mkdir $tempFolder
localZipFile="$tempFolder/korebuild.zip"
retries=6
until (wget -O $localZipFile $koreBuildZip 2>/dev/null || curl -o $localZipFile --location $koreBuildZip 2>/dev/null)
do
echo "Failed to download '$koreBuildZip'"
if [ "$retries" -le 0 ]; then
exit 1
fi
retries=$((retries - 1))
echo "Waiting 10 seconds before retrying. Retries left: $retries"
sleep 10s
done
unzip -q -d $tempFolder $localZipFile
mkdir $buildFolder
cp -r $tempFolder/**/build/** $buildFolder
chmod +x $buildFile
# Cleanup
if test ! -d $tempFolder; then
rm -rf $tempFolder
fi
fi
$buildFile -r $repoFolder "$@" | #!/usr/bin/env bash
repoFolder="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd $repoFolder
koreBuildZip="https://github.com/aspnet/KoreBuild/archive/dev.zip"
if [ ! -z $KOREBUILD_ZIP ]; then
koreBuildZip=$KOREBUILD_ZIP
fi
buildFolder=".build"
buildFile="$buildFolder/KoreBuild.sh"
if test ! -d $buildFolder; then
echo "Downloading KoreBuild from $koreBuildZip"
tempFolder="/tmp/KoreBuild-$(uuidgen)"
mkdir $tempFolder
localZipFile="$tempFolder/korebuild.zip"
retries=6
until (wget -O $localZipFile $koreBuildZip 2>/dev/null || curl -o $localZipFile --location $koreBuildZip 2>/dev/null)
do
echo "Failed to download '$koreBuildZip'"
if [ "$retries" -le 0 ]; then
exit 1
fi
retries=$((retries - 1))
echo "Waiting 10 seconds before retrying. Retries left: $retries"
sleep 10s
done
unzip -q -d $tempFolder $localZipFile
mkdir $buildFolder
cp -r $tempFolder/**/build/** $buildFolder
chmod +x $buildFile
# Cleanup
if test -d $tempFolder; then
rm -rf $tempFolder
fi
fi
$buildFile -r $repoFolder "$@" |
Use seq instead of zsh comprehension | #!/bin/sh
mkdir -p tmp
for i in {1..12}; do
sed "s/OMP_NUM_THREADS=.*/OMP_NUM_THREADS=$i/g" job.slurm |
sed "s/job-name=.*/job-name=batch-$i/g" > tmp/job$i.slurm
sbatch tmp/job$i.slurm
done
| #!/bin/sh
mkdir -p tmp
for i in $(seq 1 12); do
sed "s/OMP_NUM_THREADS=.*/OMP_NUM_THREADS=$i/g" job.slurm |
sed "s/job-name=.*/job-name=batch-$i/g" > tmp/job$i.slurm
sbatch tmp/job$i.slurm
done
|
Install more brew things. Do not exit scripts, it kills the process | #!/bin/sh
#
# Homebrew
#
# This installs some of the common dependencies needed (or at least desired)
# using Homebrew.
# Check for Homebrew
if test ! $(which brew)
then
echo " Installing Homebrew for you."
# Install the correct homebrew for each OS type
if test "$(uname)" = "Darwin"
then
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
elif test "$(expr substr $(uname -s) 1 5)" = "Linux"
then
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/linuxbrew/go/install)"
fi
fi
# Install homebrew packages
brew install grc coreutils spark
exit 0
| #!/bin/sh
#
# Homebrew
#
# This installs some of the common dependencies needed (or at least desired)
# using Homebrew.
# Check for Homebrew
if test ! $(which brew)
then
echo " Installing Homebrew for you."
# Install the correct homebrew for each OS type
if test "$(uname)" = "Darwin"
then
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
elif test "$(expr substr $(uname -s) 1 5)" = "Linux"
then
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/linuxbrew/go/install)"
fi
fi
# Install homebrew packages
brew install grc coreutils spark nvm yarn
|
Add alias for updating both shell (oh my zsh) && apps (via brew) | # grc overides for ls
# Made possible through contributions from generous benefactors like
# `brew install coreutils`
if $(gls &>/dev/null)
then
alias ls="gls -F --color"
alias l="gls -lAh --color"
alias ll="gls -l --color"
alias la='gls -A --color'
fi | # grc overides for ls
# Made possible through contributions from generous benefactors like
# `brew install coreutils`
if $(gls &>/dev/null)
then
alias ls="gls -F --color"
alias l="gls -lAh --color"
alias ll="gls -l --color"
alias la='gls -A --color'
fi
alias upgrade="echo '\033[0;34mUpgrade shell (oh my zsh) and apps (via brew)\033[0m' && antigen update && brew-upgrades"
|
Add maxResultSize parameter for LDAExample | #!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
current_dir=`dirname "$0"`
current_dir=`cd "$current_dir"; pwd`
root_dir=${current_dir}/../../../../../
workload_config=${root_dir}/conf/workloads/ml/lda.conf
. "${root_dir}/bin/functions/load_bench_config.sh"
enter_bench LDA ${workload_config} ${current_dir}
show_bannar start
rmr_hdfs $OUTPUT_HDFS || true
SIZE=`dir_size $INPUT_HDFS`
START_TIME=`timestamp`
run_spark_job com.intel.hibench.sparkbench.ml.LDAExample $INPUT_HDFS $OUTPUT_HDFS $NUM_TOPICS_LDA
END_TIME=`timestamp`
gen_report ${START_TIME} ${END_TIME} ${SIZE}
show_bannar finish
leave_bench
| #!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
current_dir=`dirname "$0"`
current_dir=`cd "$current_dir"; pwd`
root_dir=${current_dir}/../../../../../
workload_config=${root_dir}/conf/workloads/ml/lda.conf
. "${root_dir}/bin/functions/load_bench_config.sh"
enter_bench LDA ${workload_config} ${current_dir}
show_bannar start
rmr_hdfs $OUTPUT_HDFS || true
SIZE=`dir_size $INPUT_HDFS`
START_TIME=`timestamp`
run_spark_job com.intel.hibench.sparkbench.ml.LDAExample $INPUT_HDFS $OUTPUT_HDFS $NUM_TOPICS_LDA $MAXRESULTSIZE_LDA
END_TIME=`timestamp`
gen_report ${START_TIME} ${END_TIME} ${SIZE}
show_bannar finish
leave_bench
|
Revert "OLMIS-6954: Temporarily added creating signing directory for CI" | #!/bin/bash
set +x
set -e
function finish {
docker-compose -f docker-compose.builder.yml down --volumes
}
trap finish EXIT
sudo rm -f .env
mkdir .signing/
cp $SECRING_FILE .signing/secring.gpg
cp $ENV_FILE .env
if [ "$GIT_BRANCH" != "master" ]; then
sed -i '' -e "s#^TRANSIFEX_PUSH=.*#TRANSIFEX_PUSH=false#" .env 2>/dev/null || true
fi
docker-compose -f docker-compose.builder.yml run -e BUILD_NUMBER=$BUILD_NUMBER -e GIT_BRANCH=$GIT_BRANCH \
-e SIGNING_KEYID=$SIGNING_KEYID -e SIGNING_PASSWORD=$SIGNING_PASSWORD -e OSSRH_USERNAME=$OSSRH_USERNAME -e OSSRH_PASSWORD=$OSSRH_PASSWORD builder
docker-compose -f docker-compose.builder.yml build image
docker tag openlmis/stockmanagement:latest openlmis/stockmanagement:$STAGING_VERSION
docker push openlmis/stockmanagement:$STAGING_VERSION | #!/bin/bash
set +x
set -e
function finish {
docker-compose -f docker-compose.builder.yml down --volumes
}
trap finish EXIT
sudo rm -f .signing/secring.gpg
sudo rm -f .env
cp $SECRING_FILE .signing/secring.gpg
cp $ENV_FILE .env
if [ "$GIT_BRANCH" != "master" ]; then
sed -i '' -e "s#^TRANSIFEX_PUSH=.*#TRANSIFEX_PUSH=false#" .env 2>/dev/null || true
fi
docker-compose -f docker-compose.builder.yml run -e BUILD_NUMBER=$BUILD_NUMBER -e GIT_BRANCH=$GIT_BRANCH \
-e SIGNING_KEYID=$SIGNING_KEYID -e SIGNING_PASSWORD=$SIGNING_PASSWORD -e OSSRH_USERNAME=$OSSRH_USERNAME -e OSSRH_PASSWORD=$OSSRH_PASSWORD builder
docker-compose -f docker-compose.builder.yml build image
docker tag openlmis/stockmanagement:latest openlmis/stockmanagement:$STAGING_VERSION
docker push openlmis/stockmanagement:$STAGING_VERSION |
Allow setting of image as Resin environment variable | #!/bin/bash
echo "Rendering image"
if [ -z "$ROTATE" ]; then
ROTATE=0
fi
convert images/image.png -rotate $ROTATE images/rotated.png
fbi -d /dev/fb1 -T 1 -noverbose -a images/rotated.png
while true; do
sleep 1
done
| #!/bin/bash
IMAGE=${IMAGE:-"image.png"}
echo "Rendering image"
ROTATE=${ROTATE:-0}
convert images/"$IMAGE" -rotate $ROTATE images/rotated.png
fbi -d /dev/fb1 -T 1 -noverbose -a images/rotated.png
while true; do
sleep 1
done
|
Watch html and css files by using "cp -u" | #!/usr/bin/env bash
watchify app/app.js -t [babelify app] --extension=.jsx --debug -o build/app.js -v &
browser-sync start --server build --files build/app.js --port 3000 --reload-delay 100 &
wait | #!/usr/bin/env bash
watchify app/app.js -t [babelify app] --extension=.jsx --debug -o build/app.js -v &
while true; do cp -u ./app/*.html ./app/*.css ./build/; sleep 2; done &
#browser-sync start --server build --files build/app.js --port 3000 --reload-delay 100 &
browser-sync start --server build --files build/* --port 3000 --reload-delay 100 &
wait |
Add summary label, newline, indent | #!/bin/bash -e
#
# gd-sum.sh - brief sum of line additions and deletions in a git
#
# Usage
# ./gd-sum
#
# Description
# meant to indicate 'red' or 'green' commit
# Output:
# f = files changed
# l = lines insert or deleted
# Format:
# f:l
git diff --shortstat | \
awk -f <(cat - <<-'EOD'
{
red = 0;
files = $1;
sum = $4 - $6;
if (sum < 0)
red = 1;
sum = sum < 0 ? sum * -1 : sum;
}
END {
if (sum == "") {
printf "No changes in tracked files.\n"
exit 0
}
printf files ":"
if (red) {
printf "\033[1;31m" sum "\033[0m"
} else {
printf "\033[1;32m" sum "\033[0m"
}
printf "\n"
}
EOD
)
| #!/bin/bash -e
# Summary
# gd-sum.sh - brief sum of line additions and deletions in a git
#
# Usage
# ./gd-sum
#
# Description
# meant to indicate 'red' or 'green' commit
# Output:
# f = files changed
# l = lines insert or deleted
# Format:
# f:l
git diff --shortstat | \
awk -f <(cat - <<-'EOD'
{
red = 0;
files = $1;
sum = $4 - $6;
if (sum < 0)
red = 1;
sum = sum < 0 ? sum * -1 : sum;
}
END {
if (sum == "") {
printf "No changes in tracked files.\n"
exit 0
}
printf files ":"
if (red) {
printf "\033[1;31m" sum "\033[0m"
} else {
printf "\033[1;32m" sum "\033[0m"
}
printf "\n"
}
EOD
)
|
Fix issue for no z | # Path to script files in dotfiles directory
export SCRIPT_PATH="$DOTFILES/scripts"
# Scripts
alias append="$SCRIPT_PATH/fileAppend.rb"
alias cloudfront="$SCRIPT_PATH/cloudFront.rb"
alias lc="$SCRIPT_PATH/license.rb"
alias methods="$SCRIPT_PATH/methods.rb"
alias mksite="$SCRIPT_PATH/newSite.rb"
alias pf="$SCRIPT_PATH/processfile.rb"
alias rename="$SCRIPT_PATH/imageRenamer.rb"
alias xc="$SCRIPT_PATH/xcworkspace.rb"
source "$SCRIPT_PATH/z/z.sh"
| # Path to script files in dotfiles directory
export SCRIPT_PATH="$DOTFILES/scripts"
# Scripts
alias append="$SCRIPT_PATH/fileAppend.rb"
alias cloudfront="$SCRIPT_PATH/cloudFront.rb"
alias lc="$SCRIPT_PATH/license.rb"
alias methods="$SCRIPT_PATH/methods.rb"
alias mksite="$SCRIPT_PATH/newSite.rb"
alias pf="$SCRIPT_PATH/processfile.rb"
alias rename="$SCRIPT_PATH/imageRenamer.rb"
alias xc="$SCRIPT_PATH/xcworkspace.rb"
if [[ -e "$SCRIPT_PATH/z/z.sh" ]];then
export _Z_EXCLUDE_DIRS="/Volumes"
source "$SCRIPT_PATH/z/z.sh"
fi
|
Allow overriding SOURCE and TARGET from environment | #!/bin/bash
# Try to ensure we're in the project 'root' for relative paths etc to work.
cd "$(dirname $0)/.."
PATH=bin/:$PATH
SOURCE=docs/source
TARGET=docs/build
sphinx-apidoc -f -T -e -o ${SOURCE}/apidoc pyramid_jsonapi
# Generate config docs from python method
python -c 'import pyramid_jsonapi; import pyramid_settings_wrapper as psw; s = psw.Settings({}, defaults=pyramid_jsonapi.PyramidJSONAPI.config_defaults); print(s.sphinx_doc())' >docs/source/apidoc/settings.inc
travis-sphinx --outdir=${TARGET} build --source=${SOURCE}
# Build docs if this is master branch, and HEAD has a tag associated with it
if [[ -n $TRAVIS_TAG ]]; then
# Get a pylint badge
wget --tries=3 --timeout=20 https://mperlet.github.io/pybadge/badges/$(pylint pyramid_jsonapi |grep "rated at" |awk '{print $7}' |cut -f 1 -d '/').svg -O ${TARGET}/pylint-badge.svg
echo "Deploying docs to gh-pages..."
travis-sphinx --outdir=${TARGET} deploy
fi
| #!/bin/bash
# Try to ensure we're in the project 'root' for relative paths etc to work.
cd "$(dirname $0)/.."
PATH=bin/:$PATH
SOURCE=${SOURCE:-docs/source}
TARGET=${TARGET:-docs/build}
sphinx-apidoc -f -T -e -o ${SOURCE}/apidoc pyramid_jsonapi
# Generate config docs from python method
python -c 'import pyramid_jsonapi; import pyramid_settings_wrapper as psw; s = psw.Settings({}, defaults=pyramid_jsonapi.PyramidJSONAPI.config_defaults); print(s.sphinx_doc())' >docs/source/apidoc/settings.inc
travis-sphinx --outdir=${TARGET} build --source=${SOURCE}
# Build docs if this is master branch, and HEAD has a tag associated with it
if [[ -n $TRAVIS_TAG ]]; then
# Get a pylint badge
wget --tries=3 --timeout=20 https://mperlet.github.io/pybadge/badges/$(pylint pyramid_jsonapi |grep "rated at" |awk '{print $7}' |cut -f 1 -d '/').svg -O ${TARGET}/pylint-badge.svg
echo "Deploying docs to gh-pages..."
travis-sphinx --outdir=${TARGET} deploy
fi
|
Change integration tests to only build it when necessary | #!/bin/bash
set -ex
docker build --build-arg THUNDERBIRD_VERSION=$THUNDERBIRD_VERSION -t automatic_dictionary:${THUNDERBIRD_VERSION} .
if [ "$DEVEL_MODE" = "1" ]; then
DEVEL_MODE_MODIFIERS="-v /tmp/.X11-unix:/tmp/.X11-unix -e DISPLAY -e DEBUG=1 -e LOCAL_DEBUG=1 -v $PWD:/app "
fi
docker run -ti $DEVEL_MODE_MODIFIERS automatic_dictionary:${THUNDERBIRD_VERSION} /bin/bash -l -c "./integration_test.sh"
| #!/bin/bash
set -ex
docker image inspect automatic_dictionary:${THUNDERBIRD_VERSION} ||
docker build --build-arg THUNDERBIRD_VERSION=$THUNDERBIRD_VERSION -t automatic_dictionary:${THUNDERBIRD_VERSION} .
if [ "$DEVEL_MODE" = "1" ]; then
DEVEL_MODE_MODIFIERS="-v /tmp/.X11-unix:/tmp/.X11-unix -e DISPLAY -e DEBUG=1 -e LOCAL_DEBUG=1 -v $PWD:/app "
fi
docker run -ti $DEVEL_MODE_MODIFIERS automatic_dictionary:${THUNDERBIRD_VERSION} /bin/bash -l -c "./integration_test.sh"
|
Add quotes around array too | #!/bin/sh
# do some unit testing
# print the error to stderr prefixed with caller info
terr () {
echo "$caller: $@" >&2
}
# get variable values and functions for testing
. ./msctl
# override some vars from msctl with values that allow testing
# funcs like getMSCSValue have local vars based on WORLDS_LOCATION.
WORLDS_LOCATION=/tmp
MSCS_DEFAULTS="/tmp/mscs.defaults"
testworld="mscs-testdata"
# tests will write to this propfile to verify parsing etc.
propfile="$WORLDS_LOCATION/$testworld/mscs.properties"
mkdir -p $(dirname $propfile) || exit 1
# run the tests; no news is good news!
for t in tests/*; do
caller=$(basename "$t")
. "$t"
done
| #!/bin/sh
# do some unit testing
# print the error to stderr prefixed with caller info
terr () {
echo "$caller: " "$@" >&2
}
# get variable values and functions for testing
. ./msctl
# override some vars from msctl with values that allow testing
# funcs like getMSCSValue have local vars based on WORLDS_LOCATION.
WORLDS_LOCATION=/tmp
MSCS_DEFAULTS="/tmp/mscs.defaults"
testworld="mscs-testdata"
# tests will write to this propfile to verify parsing etc.
propfile="$WORLDS_LOCATION/$testworld/mscs.properties"
mkdir -p $(dirname $propfile) || exit 1
# run the tests; no news is good news!
for t in tests/*; do
caller=$(basename "$t")
. "$t"
done
|
Halt commit if we have an error | #!/usr/bin/env bash
# Load common
source ${PWD}/scripts/common.sh
# Constants
if [[ $BASE_PROJECT == *".git"* ]]; then
BASE_PROJECT=$(dirname "$BASE_PROJECT")
fi
cd ${BASE_PROJECT}
# Validate project
./sbt ";compile;test;scalastyle"
if [ $? -ne 0 ]
then
error "The project isn't valid"
else
info "The project is valid"
fi
| #!/usr/bin/env bash
# Load common
source ${PWD}/scripts/common.sh
# Constants
if [[ $BASE_PROJECT == *".git"* ]]; then
BASE_PROJECT=$(dirname "$BASE_PROJECT")
fi
cd ${BASE_PROJECT}
# Validate project
./sbt ";compile;test;scalastyle"
if [ $? -ne 0 ]
then
error "The project isn't valid"
exit 1
else
info "The project is valid"
fi
|
Remove reference to ts-compiler module which no longer exists | #!/bin/bash
# Many lols. See gruntfile for full explanation.
node -e "require('ts-compiler')"
# Run all tests using jest
if [[ $TRAVIS ]]
then jest -i # Travis tests are run inline
else jest
fi
| #!/bin/bash
# Run all tests using jest
if [[ $TRAVIS ]]
then jest -i # Travis tests are run inline
else jest
fi
|
Remove "-it" when invoking Docker. | #!/bin/bash
# Run up an SSH-enabled Apache in the current directory.
docker run -it --rm --name httpd-ssi -p 5000:80 \
-v "$PWD":/usr/local/apache2/htdocs/ \
cassiel/httpd-ssi
| #!/bin/bash
# Run up an SSH-enabled Apache in the current directory.
docker run --rm --name httpd-ssi -p 5000:80 \
-v "$PWD":/usr/local/apache2/htdocs/ \
cassiel/httpd-ssi
|
Handle spaces in commit message | #!/bin/bash
GIT=`which git`
REPO=$1
DATE=`date +%s`
BRANCH=$2
MSG=$3
OUTPUT=/tmp/git-output-$DATE
if [[ ! -d ${REPO} ]]
then
echo "Cloned repo not found in ${REPO}"
exit 1
fi
cd ${REPO}
GIT_BRANCH=$(${GIT} symbolic-ref --short -q HEAD)
if [[ ${GIT_BRANCH} != ${BRANCH} ]]
then
echo "Changes not in specified branch but on a different branch ${GIT_BRANCH}. Aborting."
exit 2
fi
OUT=`$GIT add -A && $GIT commit -m ${MSG} > ${OUTPUT}`
if [[ $? == 0 ]]
then
echo "Committed ${MSG} to ${BRANCH}."
exit 0
else
cat ${OUTPUT}
rm ${OUTPUT}
exit 2
fi
| #!/bin/bash
GIT=`which git`
REPO="$1"
DATE=`date +%s`
BRANCH="$2"
MSG="$3"
OUTPUT=/tmp/git-output-$DATE
if [[ ! -d ${REPO} ]]
then
echo "Cloned repo not found in ${REPO}"
exit 1
fi
cd ${REPO}
GIT_BRANCH=$(${GIT} symbolic-ref --short -q HEAD)
if [[ ${GIT_BRANCH} != ${BRANCH} ]]
then
echo "Changes not in specified branch but on a different branch ${GIT_BRANCH}. Aborting."
exit 2
fi
OUT=`$GIT add -A && $GIT commit -m "${MSG}" > ${OUTPUT}`
if [[ $? == 0 ]]
then
echo "Committed ${MSG} to ${BRANCH}."
exit 0
else
cat ${OUTPUT}
rm ${OUTPUT}
exit 2
fi
|
Revert "Use alternative test on coveralls token" | #!/bin/bash
# on macs, you may need to:
# export GOBUILDFLAG=-ldflags -linkmode=external
coveralls_testflags="-v -covermode=count -coverprofile=coverage.out"
set -e
echo "Testing against mysql"
export GORP_TEST_DSN=gorptest/gorptest/gorptest
export GORP_TEST_DIALECT=mysql
go test $coveralls_testflags $GOBUILDFLAG $@ .
echo "Testing against gomysql"
export GORP_TEST_DSN=gorptest:gorptest@/gorptest
export GORP_TEST_DIALECT=gomysql
go test $coveralls_testflags $GOBUILDFLAG $@ .
echo "Testing against postgres"
export GORP_TEST_DSN="user=gorptest password=gorptest dbname=gorptest sslmode=disable"
export GORP_TEST_DIALECT=postgres
go test $coveralls_testflags $GOBUILDFLAG $@ .
echo "Testing against sqlite"
export GORP_TEST_DSN=/tmp/gorptest.bin
export GORP_TEST_DIALECT=sqlite
go test $coveralls_testflags $GOBUILDFLAG $@ .
if [ -z "$COVERALLS_TOKEN" ]
then
if [[ `go version` == *"1.4"* ]]
then
$HOME/gopath/bin/goveralls -covermode=count -coverprofile=coverage.out -service=travis-ci -repotoken $COVERALLS_TOKEN
fi
fi
| #!/bin/bash
# on macs, you may need to:
# export GOBUILDFLAG=-ldflags -linkmode=external
coveralls_testflags="-v -covermode=count -coverprofile=coverage.out"
set -e
echo "Testing against mysql"
export GORP_TEST_DSN=gorptest/gorptest/gorptest
export GORP_TEST_DIALECT=mysql
go test $coveralls_testflags $GOBUILDFLAG $@ .
echo "Testing against gomysql"
export GORP_TEST_DSN=gorptest:gorptest@/gorptest
export GORP_TEST_DIALECT=gomysql
go test $coveralls_testflags $GOBUILDFLAG $@ .
echo "Testing against postgres"
export GORP_TEST_DSN="user=gorptest password=gorptest dbname=gorptest sslmode=disable"
export GORP_TEST_DIALECT=postgres
go test $coveralls_testflags $GOBUILDFLAG $@ .
echo "Testing against sqlite"
export GORP_TEST_DSN=/tmp/gorptest.bin
export GORP_TEST_DIALECT=sqlite
go test $coveralls_testflags $GOBUILDFLAG $@ .
if [ -z ${COVERALLS_TOKEN+x} ]
then
if [[ `go version` == *"1.4"* ]]
then
$HOME/gopath/bin/goveralls -covermode=count -coverprofile=coverage.out -service=travis-ci -repotoken $COVERALLS_TOKEN
fi
fi
|
Allow e.g., a recipe called "inkscape-standalone" to use the "inkscape" Docker image | #!/bin/bash
# Build on Travis CI with or without Docker
set -e
if [ ! $(env | grep TRAVIS_JOB_ID ) ] ; then
echo "This script is supposed to run on Travis CI"
exit 1
fi
RECIPE="${1}"
DOCKER=echo "${RECIPE}" | cut -d "-" -f 1 # Allow e.g., a recipe called "inkscape-standalone" to use the "inkscape" Docker image
mkdir -p ./out/
if [ -f recipes/$RECIPE/Dockerfile ] && [ -f recipes/$RECIPE/Recipe ] ; then
# There is a Dockerfile, hence build using Docker
mv recipes/$RECIPE/Recipe ./out/Recipe
sed -i -e 's|sudo ||g' ./out/Recipe # For subsurface recipe
docker run -i -v ${PWD}/out:/out probonopd/appimages:$DOCKER /bin/bash -ex /out/Recipe
elif [ -f recipes/$RECIPE/Recipe ] ; then
# There is no Dockerfile but a Recipe, hence build without Docker
bash -ex recipes/$RECIPE/Recipe
else
# There is no Recipe
echo "Recipe not found, is RECIPE missing?"
exit 1
fi
ls -lh out/*.AppImage
| #!/bin/bash
# Build on Travis CI with or without Docker
set -e
if [ ! $(env | grep TRAVIS_JOB_ID ) ] ; then
echo "This script is supposed to run on Travis CI"
exit 1
fi
RECIPE="${1}"
DOCKER=$(echo "${RECIPE}" | cut -d "-" -f 1) # Allow e.g., a recipe called "inkscape-standalone" to use the "inkscape" Docker image
mkdir -p ./out/
if [ -f recipes/$RECIPE/Dockerfile ] && [ -f recipes/$RECIPE/Recipe ] ; then
# There is a Dockerfile, hence build using Docker
mv recipes/$RECIPE/Recipe ./out/Recipe
sed -i -e 's|sudo ||g' ./out/Recipe # For subsurface recipe
docker run -i -v ${PWD}/out:/out probonopd/appimages:$DOCKER /bin/bash -ex /out/Recipe
elif [ -f recipes/$RECIPE/Recipe ] ; then
# There is no Dockerfile but a Recipe, hence build without Docker
bash -ex recipes/$RECIPE/Recipe
else
# There is no Recipe
echo "Recipe not found, is RECIPE missing?"
exit 1
fi
ls -lh out/*.AppImage
|
Convert posix scripts to bash | #!/bin/sh
set -e
# Download an appropriate version of wasm-bindgen based off of what's being used
# in the lock file. Ideally we'd use `wasm-pack` at some point for this!
version=$(grep -A 1 'name = "wasm-bindgen"' Cargo.lock | grep version)
version=$(echo $version | awk '{print $3}' | sed 's/"//g')
curl -L https://github.com/rustwasm/wasm-bindgen/releases/download/$version/wasm-bindgen-$version-x86_64-unknown-linux-musl.tar.gz \
| tar xzf - -C target
export PATH=$PATH:`pwd`/target/wasm-bindgen-$version-x86_64-unknown-linux-musl
export CARGO_TARGET_WASM32_UNKNOWN_UNKNOWN_RUNNER=wasm-bindgen-test-runner
export NODE_ARGS=--experimental-wasm-simd
exec "$@"
| #!/bin/bash
set -e
# Download an appropriate version of wasm-bindgen based off of what's being used
# in the lock file. Ideally we'd use `wasm-pack` at some point for this!
version=$(grep -A 1 'name = "wasm-bindgen"' Cargo.lock | grep version)
version=$(echo $version | awk '{print $3}' | sed 's/"//g')
curl -L https://github.com/rustwasm/wasm-bindgen/releases/download/$version/wasm-bindgen-$version-x86_64-unknown-linux-musl.tar.gz \
| tar xzf - -C target
export PATH=$PATH:`pwd`/target/wasm-bindgen-$version-x86_64-unknown-linux-musl
export CARGO_TARGET_WASM32_UNKNOWN_UNKNOWN_RUNNER=wasm-bindgen-test-runner
export NODE_ARGS=--experimental-wasm-simd
exec "$@"
|
Create the branch in one step | #!/bin/bash
REL="1.0.0.Alpha1"
REL_NODE="1.0.0-Alpha1"
FORGE_URL="http://generator-backend-obsidian-alpha1.e8ca.engint.openshiftapps.com/forge"
cd $TMPDIR
rm -rf generator-frontend
nvm use 6
git clone https://github.com/obsidian-toaster/generator-frontend.git
cd generator-frontend
npm install package-json-io
node -e "var pkg = require('package-json-io'); pkg.read(function(err, data) { data.version = '$REL_NODE'; pkg.update(data, function(){}); })"
git commit -a -m "Released $REL of generator-frontend"
git clone https://github.com/obsidian-toaster/obsidian-toaster.github.io.git build
cd build
git branch "$REL"
git checkout "$REL"
rm -rf *
cd -
npm install
npm run build:prod
cp -r dist/* build
cd build
git add .
git commit -a -m "Released $REL of generator-frontend"
git push origin "$REL"
cd -
git tag "$REL"
git push origin --tags | #!/bin/bash
REL="1.0.0.Alpha1"
REL_NODE="1.0.0-Alpha1"
FORGE_URL="http://generator-backend-obsidian-alpha1.e8ca.engint.openshiftapps.com/forge"
cd $TMPDIR
rm -rf generator-frontend
git clone https://github.com/obsidian-toaster/generator-frontend.git
cd generator-frontend
npm install package-json-io
node -e "var pkg = require('package-json-io'); pkg.read(function(err, data) { data.version = '$REL_NODE'; pkg.update(data, function(){}); })"
git commit -a -m "Released $REL of generator-frontend"
git clone https://github.com/obsidian-toaster/obsidian-toaster.github.io.git build
cd build
git checkout -b "$REL"
rm -rf *
cd -
npm install
npm run build:prod
cp -r dist/* build
cd build
ls -la
git add .
git commit -a -m "Released $REL of generator-frontend"
git push origin "$REL"
cd -
git tag "$REL"
git push origin --tags |
Fix typo in script for automatic answers on prompts during VM provisioning | #!/usr/bin/env bash
PROJECT="cikit-test"
ANSIBLE_ARGS="-vv"
VERSION_PHP="$1"
VERSION_NODEJS="$2"
VERSION_SOLR="$3"
VERSION_RUBY="$4"
: ${VERSION_PHP:="7.0"}
: ${VERSION_NODEJS:="6"}
: ${VERSION_SOLR:="6.5.1"}
: ${VERSION_RUBY:="2.4.0"}
# Change directory to "tests".
cd -P -- "$(dirname -- "$0")"
# Go to root directory of CIKit.
cd ../
if [ -d "${PROJECT}" ]; then
echo "[INFO] Existing project found. Checking for existing VM..."
VM_ID="$(vagrant global-status | awk -v pattern="${PROJECT}" '$0~pattern {print $1}')"
if [ "" != "${VM_ID}" ]; then
echo "[INFO] Existing VM found. Destroying..."
cd "${PROJECT}"
vagrant destroy -f
cd ../
fi
echo "[INFO] Removing existing project..."
rm -rf "${PROJECT}"
fi
./cikit repository --project="${PROJECT}" --without-sources
cd "${PROJECT}"
EXTRA_VARS="--php-version=${VERSION_PHP} --nodejs-version=${VERSION_NODEJS} --solr-version=${VERSION_SOLR} --ruby-version${VERSION_RUBY}" vagrant up
| #!/usr/bin/env bash
PROJECT="cikit-test"
ANSIBLE_ARGS="-vv"
VERSION_PHP="$1"
VERSION_NODEJS="$2"
VERSION_SOLR="$3"
VERSION_RUBY="$4"
: ${VERSION_PHP:="7.0"}
: ${VERSION_NODEJS:="6"}
: ${VERSION_SOLR:="6.5.1"}
: ${VERSION_RUBY:="2.4.0"}
# Change directory to "tests".
cd -P -- "$(dirname -- "$0")"
# Go to root directory of CIKit.
cd ../
if [ -d "${PROJECT}" ]; then
echo "[INFO] Existing project found. Checking for existing VM..."
VM_ID="$(vagrant global-status | awk -v pattern="${PROJECT}" '$0~pattern {print $1}')"
if [ "" != "${VM_ID}" ]; then
echo "[INFO] Existing VM found. Destroying..."
cd "${PROJECT}"
vagrant destroy -f
cd ../
fi
echo "[INFO] Removing existing project..."
rm -rf "${PROJECT}"
fi
./cikit repository --project="${PROJECT}" --without-sources
cd "${PROJECT}"
EXTRA_VARS="--php-version=${VERSION_PHP} --nodejs-version=${VERSION_NODEJS} --solr-version=${VERSION_SOLR} --ruby-version=${VERSION_RUBY}" vagrant up
|
Add "--missing-only" that avoids downloading anything if both description and title metadata already exists. | #!/bin/bash
if [ -z "$1" ]; then
>&2 echo "Missing sync directory name."
exit 1
fi
mkdir -p SYNC/$1/META
num_total=$(find "SYNC/$1/ID/" -maxdepth 1 -name "*.mkv" | wc -l)
num_current=1
for filen in SYNC/$1/ID/*.mkv; do
if [ -f "$filen" ]; then
if [[ "$filen" =~ /([^/]+)\.mkv$ ]]; then
ID=${BASH_REMATCH[1]};
echo -n "Updating metadata ($num_current/$num_total): $ID...";
TITLE=$(youtube-dl --get-filename -o '%(title)s' -- "$ID");
if [ -z "$TITLE" ]; then
echo " [Error] Video might be down. We'll keep the old link."
else
echo " [OK]"
echo -n "$TITLE" > SYNC/$1/META/$ID.title
DESCRIPTION=$(youtube-dl --get-description -- "$ID");
if [ -n "$DESCRIPTION" ]; then
echo "$DESCRIPTION" > SYNC/$1/META/$ID.description
fi
fi
fi
fi
num_current=$((num_current+1))
done
| #!/bin/bash
if [ -z "$1" ]; then
>&2 echo "Missing sync directory name."
exit 1
else
name="$1"
fi
domissing=0
if [ x"$2" = "x--missing-only" ]; then
echo "Only fetching missing metadata"
domissing=1
fi
mkdir -p SYNC/$name/META
num_total=$(find "SYNC/$name/ID/" -maxdepth 1 -name "*.mkv" | wc -l)
num_current=1
for filen in SYNC/$name/ID/*.mkv; do
if [ -f "$filen" ]; then
if [[ "$filen" =~ /([^/]+)\.mkv$ ]]; then
ID=${BASH_REMATCH[1]};
titlefile=SYNC/$name/META/$ID.title
descfile=SYNC/$name/META/$ID.description
if [ $domissing -eq 1 \
-a -f $titlefile -a -f $descfile ]; then
echo "Skipping known meta for $ID"
num_current=$((num_current+1))
continue
fi
echo -n "Updating metadata ($num_current/$num_total): $ID...";
TITLE=$(youtube-dl --get-filename -o '%(title)s' -- "$ID");
if [ -z "$TITLE" ]; then
echo " [Error] Video might be down. We'll keep the old link."
else
echo " [OK]"
echo -n "$TITLE" > $titlefile
DESCRIPTION=$(youtube-dl --get-description -- "$ID");
if [ -n "$DESCRIPTION" ]; then
echo "$DESCRIPTION" > $descfile
fi
fi
fi
fi
num_current=$((num_current+1))
done
|
Remove --stderr switch from Travis test runner | #!/bin/bash
travisdir=$(dirname $(readlink /proc/$$/fd/255))
testdir="$travisdir/../tests"
testedcomponents=(`cat "$travisdir/tested-components"`)
result=0
for tested in "${testedcomponents[@]}"
do
echo "$tested:"
phpunit -c $testdir/phpunit.xml --stderr $testdir/$tested
result=$(($result || $?))
done
exit $result
| #!/bin/bash
travisdir=$(dirname $(readlink /proc/$$/fd/255))
testdir="$travisdir/../tests"
testedcomponents=(`cat "$travisdir/tested-components"`)
result=0
for tested in "${testedcomponents[@]}"
do
echo "$tested:"
phpunit -c $testdir/phpunit.xml $testdir/$tested
result=$(($result || $?))
done
exit $result
|
Load rvm to get gem-installed tools into $PATH | _activate_rvm () {
unalias rvm
# rvm appends this to .bashrc:
PATH=$PATH:$HOME/.rvm/bin # Add RVM to PATH for scripting
# rvm appends this to .bash_profile:
[[ -s "$HOME/.rvm/scripts/rvm" ]] && source "$HOME/.rvm/scripts/rvm" # Load RVM into a shell session *as a function*
}
# lazy load
alias rvm='_activate_rvm; rvm'
| _activate_rvm () {
alias rvm &> /dev/null && unalias rvm
# rvm appends this to .bashrc:
PATH=$PATH:$HOME/.rvm/bin # Add RVM to PATH for scripting
# rvm appends this to .bash_profile:
[[ -s "$HOME/.rvm/scripts/rvm" ]] && source "$HOME/.rvm/scripts/rvm" # Load RVM into a shell session *as a function*
}
# lazy load
alias rvm='_activate_rvm; rvm'
# Turn it on so we have gem-installed utils in $PATH.
[[ -z "$RUBY_VERSION" ]] && rvm use default &> /dev/null
|
Fix SonarQube execution on master | #!/bin/bash
set -euo pipefail
echo
if [ "$TRAVIS_PULL_REQUEST" != "false" ] && [ "$TRAVIS_NODE_VERSION" == "4" ]; then
node --harmony_rest_parameters ./node_modules/.bin/grunt test
node --harmony_rest_parameters ./node_modules/.bin/grunt test-with-coverage
else
node ./node_modules/.bin/grunt test
node ./node_modules/.bin/grunt test-with-coverage
fi
if [ "$TRAVIS_PULL_REQUEST" != "false" ] && [ "$TRAVIS_NODE_VERSION" == "4" ]; then
sonar-scanner -Dsonar.host.url=https://sonarqube.com \
-Dsonar.analysis.mode=preview \
-Dsonar.github.pullRequest=$TRAVIS_PULL_REQUEST \
-Dsonar.github.repository=$TRAVIS_REPO_SLUG \
-Dsonar.github.oauth=$GITHUB_TOKEN \
-Dsonar.login=$SONARQUBE_TOKEN
elif [ "$TRAVIS_BRANCH" == "master" ] && [ "$TRAVIS_PULL_REQUEST" == "false" ] && ["$TRAVIS_NODE_VERSION" == "4"]; then
sonar-scanner -Dsonar.host.url=https://sonarqube.com -Dsonar.login=$SONARQUBE_TOKEN
fi
| #!/bin/bash
set -euo pipefail
echo
if [ "$TRAVIS_PULL_REQUEST" != "false" ] && [ "$TRAVIS_NODE_VERSION" == "4" ]; then
node --harmony_rest_parameters ./node_modules/.bin/grunt test
node --harmony_rest_parameters ./node_modules/.bin/grunt test-with-coverage
else
node ./node_modules/.bin/grunt test
node ./node_modules/.bin/grunt test-with-coverage
fi
if [ "$TRAVIS_PULL_REQUEST" != "false" ] && [ "$TRAVIS_NODE_VERSION" == "4" ]; then
sonar-scanner -Dsonar.host.url=https://sonarqube.com \
-Dsonar.analysis.mode=preview \
-Dsonar.github.pullRequest=$TRAVIS_PULL_REQUEST \
-Dsonar.github.repository=$TRAVIS_REPO_SLUG \
-Dsonar.github.oauth=$GITHUB_TOKEN \
-Dsonar.login=$SONARQUBE_TOKEN
elif [ "$TRAVIS_BRANCH" == "master" ] && [ "$TRAVIS_PULL_REQUEST" == "false" ] && [ "$TRAVIS_NODE_VERSION" == "4" ]; then
sonar-scanner -Dsonar.host.url=https://sonarqube.com -Dsonar.login=$SONARQUBE_TOKEN
fi
|
Fix reset password functionality with path fix in the bash profile. | # script aliases for helpdesk user
if [ "$(id -gn)" == "helpdesk" ]; then
echo "Welcome to the MOC helpdesk interface. Type \`moc help\` for instructions."
SCRIPT_DIR='/usr/local/src/moc-tools/production'
moc() {
if [[ $@ == "help" ]]; then
command sudo -u moc-tools cat "$SCRIPT_DIR/helpdesk/help.txt" | more
elif [[ $@ =~ ^reset-password ]]; then
shift 1
command sudo -u moc-tools python "$SCRIPT_DIR/helpdesk/reset-password.py" $@
elif [[ $@ =~ ^grant-access ]]; then
shift 1
command sudo -u moc-tools python "$SCRIPT_DIR/addusers.py" $@
elif [[ $@ =~ ^update-quotas ]]; then
shift 1
command sudo -u moc-tools python "$SCRIPT_DIR/set-quotas.py" $@
# This can be uncommented for testing updates to this script without interacting with OpenStack
# elif [[ $@ == "test" ]]; then
# command sudo -u moc-tools python "$SCRIPT_DIR/helpdesk/test.py
else
echo "$@ is not valid input. Type 'moc help' to see a list of available commands."
fi
}
fi
| # script aliases for helpdesk user
if [ "$(id -gn)" == "helpdesk" ]; then
echo "Welcome to the MOC helpdesk interface. Type \`moc help\` for instructions."
SCRIPT_DIR='/usr/local/src/moc-tools/production'
moc() {
if [[ $@ == "help" ]]; then
command sudo -u moc-tools cat "$SCRIPT_DIR/helpdesk/help.txt" | more
elif [[ $@ =~ ^reset-password ]]; then
shift 1
command sudo -u moc-tools python "$SCRIPT_DIR/reset-password.py" $@
elif [[ $@ =~ ^grant-access ]]; then
shift 1
command sudo -u moc-tools python "$SCRIPT_DIR/addusers.py" $@
elif [[ $@ =~ ^update-quotas ]]; then
shift 1
command sudo -u moc-tools python "$SCRIPT_DIR/set-quotas.py" $@
# This can be uncommented for testing updates to this script without interacting with OpenStack
# elif [[ $@ == "test" ]]; then
# command sudo -u moc-tools python "$SCRIPT_DIR/helpdesk/test.py
else
echo "$@ is not valid input. Type 'moc help' to see a list of available commands."
fi
}
fi
|
Verify services inside the container on 127.0.0.1 | #!/bin/bash
# Echo success or cause an exit if the value passed != 0
function checkstatus {
if [ $1 -eq 0 ]; then
echo "=> Success"
else
echo "=> Failed"
exit $1
fi
}
# Test our image, first curling our container and then checking the result against our expectations
function testimage {
echo "=> Querying image ($1)"
curl --retry 10 --retry-delay 5 -o actual/$1.html http://localhost:$2 --stderr -
checkstatus $?
echo "=> Checking against expected values ($1)"
diff -b actual/$1.html expected/$1.html
checkstatus $?
echo
}
function testmysql {
echo "=> Connecting to MySQL: ($1:$2)"
mysql -h 127.0.0.1 -P $2 -u admin -ppassword -e"quit"
checkstatus $?
}
function waitForSupervisordProcess {
while true; do
echo "=> Waiting for $2 on the service $1..."
docker-compose -f ../docker-compose.test.yml -p ci exec $1 supervisorctl status $2
[ $? -ne 0 ] || break
sleep 5
done
}
| #!/bin/bash
# Echo success or cause an exit if the value passed != 0
function checkstatus {
if [ $1 -eq 0 ]; then
echo "=> Success"
else
echo "=> Failed"
exit $1
fi
}
# Test our image, first curling our container and then checking the result against our expectations
function testimage {
echo "=> Querying image ($1)"
docker-compose -f ../docker-compose.test.yml -p ci exec web$1 curl --head --retry 10 --retry-delay 5 --silent --show-error http://127.0.0.1 >/dev/null && \
curl --retry 3 --retry-delay 3 --silent --show-error --stderr - -o actual/$1.html http://localhost:$2
checkstatus $?
echo "=> Checking against expected values ($1)"
diff -b actual/$1.html expected/$1.html
checkstatus $?
echo
}
function testmysql {
echo "=> Connecting to MySQL: ($1:$2)"
docker-compose -f ../docker-compose.test.yml -p ci exec $1 mysql -h 127.0.0.1 -u admin -ppassword -e"quit" && \
mysql -h 127.0.0.1 -P $2 -u admin -ppassword -e"quit"
checkstatus $?
}
function waitForSupervisordProcess {
while true; do
echo "=> Waiting for $2 on the service $1..."
docker-compose -f ../docker-compose.test.yml -p ci exec $1 supervisorctl status $2
[ $? -ne 0 ] || break
sleep 5
done
}
|
Use yaml in configuration docs runner | #!/bin/bash
set -Eeuxo pipefail
cd "$( dirname "${BASH_SOURCE[0]}" )/.."
cat > configuration.md << EOF
---
id: configuration
title: Configuration
---
\`\`\`
`cat ./docs/config.yaml`
\`\`\`
EOF
| #!/bin/bash
set -Eeuxo pipefail
cd "$( dirname "${BASH_SOURCE[0]}" )/.."
cat > configuration.md << EOF
---
id: configuration
title: Configuration
---
\`\`\`yaml
`cat ./docs/config.yaml`
\`\`\`
EOF
|
Remove test that should be on a branch | tests() {
TMP_DIR=deps/tests/tmp
require empty_tmp_dir
require file_creation
require chained_dep
require name_conflicts
require empty_dep_passes
require undefined_is_met_passes
require undefined_meet_fails
require parameterized_dep
require success_output
require failure_output
require require_before_def
}
| tests() {
TMP_DIR=deps/tests/tmp
require empty_tmp_dir
require file_creation
require chained_dep
require name_conflicts
require empty_dep_passes
require undefined_is_met_passes
require undefined_meet_fails
require parameterized_dep
require success_output
require failure_output
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.