Instruction stringlengths 14 778 | input_code stringlengths 0 4.24k | output_code stringlengths 1 5.44k |
|---|---|---|
Add alias to search apt package database only by name and not by description |
# Software management.
alias sma='su -c "apt update && apt upgrade"'
alias smud='su -c "apt update"'
alias smug='su -c "apt upgrade"'
alias ll='ls -la'
# Show hidden files only.
alias l.='ls -d .* --color=auto'
alias du1='du --max-depth=1'
alias du2='du --max-depth=2'
# tmux
alias t='tmux -2'
alias ta='tmux attach'
# fast travel.
# alias ftg='fast-travel go'
# alias fta='fast-travel add'
# alias ftd='fast-travel delete'
# alias ftl='fast-travel list'
|
# Software management.
alias sma='su -c "apt update && apt upgrade"'
alias smud='su -c "apt update"'
alias smug='su -c "apt upgrade"'
alias asn='apt-cache search --names-only'
alias ll='ls -la'
# Show hidden files only.
alias l.='ls -d .* --color=auto'
alias du1='du --max-depth=1'
alias du2='du --max-depth=2'
# tmux
alias t='tmux -2'
alias ta='tmux attach'
# fast travel.
# alias ftg='fast-travel go'
# alias fta='fast-travel add'
# alias ftd='fast-travel delete'
# alias ftl='fast-travel list'
|
Add check for running outside script dir | #!/bin/bash
function check {
"$@" > /dev/null
local rc=$?
if [ $rc -ne 0 ]; then
echo "ERR: $1" >&2
exit 1
fi
return $rc
}
check penkki ls
# --times
check penkki -t 1 ls
# --formatter
check penkki -f chart ls
check penkki -f sparkly ls
check penkki -f html ls
check penkki -f json ls
check penkki -f bars ls
# --commands
check penkki -c ls,df,du
check penkki -t 2 -f chart -c ls,df,du
check penkki -t 2 -f sparkly -c ls,df,du
check penkki -t 2 -f html -c ls,df,du
check penkki -t 2 -f json -c ls,df,du
check penkki -t 2 -f bars -c ls,df,du
| #!/bin/bash
function check {
"$@" > /dev/null
local rc=$?
if [ $rc -ne 0 ]; then
echo "ERR: $1" >&2
exit 1
fi
return $rc
}
check penkki ls
# --times
check penkki -t 1 ls
# --formatter
check penkki -f chart ls
check penkki -f sparkly ls
check penkki -f html ls
check penkki -f json ls
check penkki -f bars ls
# --commands
check penkki -c ls,df,du
check penkki -t 2 -f chart -c ls,df,du
check penkki -t 2 -f sparkly -c ls,df,du
check penkki -t 2 -f html -c ls,df,du
check penkki -t 2 -f json -c ls,df,du
check penkki -t 2 -f bars -c ls,df,du
# outside script dir
pushd "$HOME" > /dev/null
check penkki ls
popd > /dev/null
|
Make complete web server directory in ./compiled. | #!/bin/sh
time tre makefiles/make.lisp
| #!/bin/sh
rm -rf compiled
mkdir compiled
time tre makefiles/make.lisp && cp -rv css js media compiled && cp -v config.php compiled/config.php.example
|
Update network.sh public IP get, from Dig to curl | run_data(){
if [ ! -f Eva.widget/netstat.working ]; then
ORZ=$(netstat -w1 & sleep 1; kill $!;)
ORZ=($ORZ)
output="${ORZ[12]} ${ORZ[15]}"
if [[ ${#output} > 1 ]];
then
sed -i "" "1s/.*/${output}/" Eva.widget/netstat.output
fi
fi
}
run_ip() {
if [ ! -f Eva.widget/netstat.ipworking ]; then
touch Eva.widget/netstat.ipworking
IP=$(dig +short myip.opendns.com +tries=5 @resolver1.opendns.com)
if [[ $IP != *.*.*.* ]];
then
IP="Fehler"
fi
sed -i "" "2s/.*/$IP/" Eva.widget/netstat.output
sleep 5
rm Eva.widget/netstat.ipworking
fi
}
run_data &>/dev/null &disown
run_ip &>/dev/null &disown
cat Eva.widget/netstat.output
| run_data(){
if [ ! -f Eva.widget/netstat.working ]; then
ORZ=$(netstat -w1 & sleep 1; kill $!;)
ORZ=($ORZ)
output="${ORZ[12]} ${ORZ[15]}"
if [[ ${#output} > 1 ]];
then
sed -i "" "1s/.*/${output}/" Eva.widget/netstat.output
fi
fi
}
run_ip() {
if [ ! -f Eva.widget/netstat.ipworking ]; then
touch Eva.widget/netstat.ipworking
IP=$(curl https://diagnostic.opendns.com/myip)
if [[ $IP != *.*.*.* ]];
then
IP="Fehler"
fi
sed -i "" "2s/.*/$IP/" Eva.widget/netstat.output
sleep 5
rm Eva.widget/netstat.ipworking
fi
}
run_data &>/dev/null &disown
run_ip &>/dev/null &disown
cat Eva.widget/netstat.output
|
Update configlet on each run | #!/usr/bin/env bash
set -x
# Make sure there is `~/bin`-folder.
[[ -d ~/bin ]] || mkdir -p ~/bin
# download `rebar3` and make it executable
if [[ ! -f ~/bin/rebar3 ]]; then
wget -O ~/bin/rebar3 https://s3.amazonaws.com/rebar3/rebar3
chmod a+x ~/bin/rebar3
fi
if [[ ! -f ~/bin/configlet ]]; then
./bin/fetch-configlet
cp ./bin/configlet ~/bin/configlet
fi
| #!/usr/bin/env bash
set -x
# Make sure there is `~/bin`-folder.
[[ -d ~/bin ]] || mkdir -p ~/bin
# download `rebar3` and make it executable
if [[ ! -f ~/bin/rebar3 ]]; then
wget -O ~/bin/rebar3 https://s3.amazonaws.com/rebar3/rebar3
chmod a+x ~/bin/rebar3
fi
# fetch configlet and move it into $PATH
./bin/fetch-configlet
cp ./bin/configlet ~/bin/configlet
|
Implement suggestions from @Tatsh for bash setup | #!/usr/bin/env bash
# Install command-line tools using Homebrew.
# Install Homebrew
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
# Make sure weโre using the latest Homebrew.
brew update
# Upgrade any already-installed formulae.
brew upgrade --all
brew tap Homebrew/bundle
brew bundle
# Set up GNU core utilities (those that come with OS X are outdated).
echo 'Be sure to add `$(brew --prefix coreutils)/libexec/gnubin` to `$PATH`.'
ln -s /usr/local/bin/gsha256sum /usr/local/bin/sha256sum
if ! grep '/usr/local/bin/bash' /etc/shells; then
echo '/usr/local/bin/bash' | sudo tee -a /etc/shells;
chsh -s /usr/local/bin/bash;
fi;
# Remove outdated versions from the cellar.
brew cleanup
| #!/usr/bin/env bash
# Install command-line tools using Homebrew.
# Install Homebrew
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
# Make sure weโre using the latest Homebrew.
brew update
# Upgrade any already-installed formulae.
brew upgrade --all
brew tap Homebrew/bundle
brew bundle
# Set up GNU core utilities (those that come with OS X are outdated).
echo 'Be sure to add `$(brew --prefix coreutils)/libexec/gnubin` to `$PATH`.'
ln -s /usr/local/bin/gsha256sum /usr/local/bin/sha256sum
if ! fgrep -q '/usr/local/bin/bash' /etc/shells; then
echo '/usr/local/bin/bash' >> /etc/shells;
chsh -s /usr/local/bin/bash;
fi;
# Remove outdated versions from the cellar.
brew cleanup
|
Add zsh vi mode indicator | # Set prompt to % for users and # for root
export PS1='%# '
fpath=($DOTFILES/functions $fpath)
autoload -Uz $DOTFILES/functions/*(:t)
# Do completions from anywhere in the word
setopt COMPLETE_IN_WORD
# History settings
# Save x items to the given history file
HISTSIZE=1000
SAVEHIST=1000
HISTFILE=$HOME/.zsh_history
# Append history to the zsh_history file
setopt APPEND_HISTORY
# Ignore duplicates in zsh history
setopt HIST_IGNORE_ALL_DUPS
# Ignore commands for history that start with a space
setopt HIST_IGNORE_SPACE
# Remove superfluous blanks from each command line being added to the history list
setopt HIST_REDUCE_BLANKS
# Use vim shortcuts within the terminal (defaults to insert mode)
bindkey -v
# Reduce the lag switching into Normal mode to 0.1s
export KEYTIMEOUT=1
# Restore 'normal' search in VI mode
bindkey '^R' history-incremental-search-backward
# Allow alt/option . to insert the argument from the previous command
bindkey '\e.' insert-last-word
| # Set prompt to % for users and # for root
export PS1='%# '
fpath=($DOTFILES/functions $fpath)
autoload -Uz $DOTFILES/functions/*(:t)
# Do completions from anywhere in the word
setopt COMPLETE_IN_WORD
# History settings
# Save x items to the given history file
HISTSIZE=1000
SAVEHIST=1000
HISTFILE=$HOME/.zsh_history
# Append history to the zsh_history file
setopt APPEND_HISTORY
# Ignore duplicates in zsh history
setopt HIST_IGNORE_ALL_DUPS
# Ignore commands for history that start with a space
setopt HIST_IGNORE_SPACE
# Remove superfluous blanks from each command line being added to the history list
setopt HIST_REDUCE_BLANKS
# Use vim shortcuts within the terminal (defaults to insert mode)
bindkey -v
# Restore 'normal' search in VI mode
bindkey '^R' history-incremental-search-backward
bindkey '^P' up-history
bindkey '^N' down-history
# Allow alt/option . to insert the argument from the previous command
bindkey '\e.' insert-last-word
# Show vim mode on right
# http://dougblack.io/words/zsh-vi-mode.html
function zle-line-init zle-keymap-select {
VIM_PROMPT="%{$fg_bold[yellow]%} [% NORMAL]% %{$reset_color%}"
RPS1="${${KEYMAP/vicmd/$VIM_PROMPT}/(main|viins)/} $EPS1"
zle reset-prompt
}
zle -N zle-line-init
zle -N zle-keymap-select
# Reduce the lag switching into Normal mode to 0.1s
export KEYTIMEOUT=1
# Force update of RPS1 immediately
RPS1=""
|
Fix and clarify quoting issue. | #!/bin/bash
#
# Usage:
# ./demo.sh <function name>
set -o nounset
set -o pipefail
set -o errexit
argv() {
python -c 'import sys; print sys.argv[1:]' "$@"
}
local() {
# Test out some hard characters
argv begin \' \" ' ' \\ end
}
argv-to-sh-demo() {
./argv_to_sh.py begin \' \" ' ' \\ end
}
ssh-demo() {
ssh localhost $(./argv_to_sh.py echo begin \' \" ' ' \\ end)
ssh localhost $(./argv_to_sh.py $PWD/$0 argv begin \' \" ' ' \\ end)
}
"$@"
| #!/bin/bash
#
# Usage:
# ./demo.sh <function name>
set -o nounset
set -o pipefail
set -o errexit
argv() {
python -c 'import sys; print sys.argv[1:]' "$@"
}
local() {
# Test out some hard characters
argv begin \' \" ' ' \\ end
}
argv-to-sh-demo() {
./argv_to_sh.py begin \' \" 'a b' \\ end
}
quote-demo() {
# Wrong because 'a b' gets split.
argv $(./argv_to_sh.py echo begin \' \" 'a b' \\ end)
# Quoting makes it correct. SSH doesn't care, because it must join the rest
# of the args, which is weird.
#
# I think the point of the weird SSH command syntax is to do remote
# evaluation of vars? Yes try ssh HOST 'echo' '$HOME'.
#
# What about su? I guess it can also do evaluation of vars in the other
# user's environment.
#
# Will there be a pattern in oil for this?
argv "$(./argv_to_sh.py echo begin \' \" 'a b' \\ end)"
}
ssh-demo() {
ssh localhost "$(./argv_to_sh.py echo begin \' \" 'a b' \\ end)"
ssh localhost "$(./argv_to_sh.py $PWD/$0 argv begin \' \" 'a b' \\ end)"
}
"$@"
|
Fix error when create nabu-3 schema | #!/usr/bin/env bash
set -ex
echo "Installing MySQL 5.7..."
sudo service mysql stop
sudo apt-get remove "^mysql.*"
sudo apt-get autoremove
sudo apt-get autoclean
echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections
wget https://dev.mysql.com/get/mysql-apt-config_0.8.6-1_all.deb
sudo DEBIAN_FRONTEND=noninteractive dpkg -i mysql-apt-config_0.8.6-1_all.deb
sudo rm -rf /var/lib/apt/lists/*
sudo apt-get clean
sudo apt-get update -q
sudo apt-get install -q -y --allow-unauthenticated -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" mysql-server libmysqlclient-dev
sudo mysql_upgrade
echo "Restart mysql..."
sudo mysql -e "use mysql; update user set authentication_string=PASSWORD('') where User='root'; update user set plugin='mysql_native_password';FLUSH PRIVILEGES;"
sudo mysql -e "create schema if not exists \`nabu-3\` default charset set='utf8mb4' default collate='utf8mb4_general_ci'"
sudo mysql -e "grant all on 'nabu-3'.* to 'nabu-3'@'%' identified by 'nabu-3' with grant option"
| #!/usr/bin/env bash
set -ex
echo "Installing MySQL 5.7..."
sudo service mysql stop
sudo apt-get remove "^mysql.*"
sudo apt-get autoremove
sudo apt-get autoclean
echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections
wget https://dev.mysql.com/get/mysql-apt-config_0.8.6-1_all.deb
sudo DEBIAN_FRONTEND=noninteractive dpkg -i mysql-apt-config_0.8.6-1_all.deb
sudo rm -rf /var/lib/apt/lists/*
sudo apt-get clean
sudo apt-get update -q
sudo apt-get install -q -y --allow-unauthenticated -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" mysql-server libmysqlclient-dev
sudo mysql_upgrade
echo "Restart mysql..."
sudo mysql -e "use mysql; update user set authentication_string=PASSWORD('') where User='root'; update user set plugin='mysql_native_password';FLUSH PRIVILEGES;"
sudo mysql -e "create schema if not exists \`nabu-3\` default charset set utf8mb4 default collate utf8mb4_general_ci"
sudo mysql -e "grant all on 'nabu-3'.* to 'nabu-3'@'%' identified by 'nabu-3' with grant option"
|
Use release draft to test the new binaries | #!/usr/bin/env bash
# Automatically installs OCLint.
# This script was designed for usage in CI systems.
curl -OL https://github.com/oclint/oclint/releases/download/v0.10.3/oclint-0.10.3-x86_64-linux-3.13.0-74-generic.tar.gz
mkdir -p ~/oclint
tar xf oclint-0.10.3-x86_64-linux-3.13.0-74-generic.tar.gz -C ~/oclint --strip-components=1
rm oclint-0.10.3-x86_64-linux-3.13.0-74-generic.tar.gz
export OCLINT_HOME="$HOME/oclint"
export PATH="$OCLINT_HOME/bin:$PATH"
| #!/usr/bin/env bash
# Automatically installs OCLint.
# This script was designed for usage in CI systems.
curl -OL http://archives.oclint.org/releases/0.11/oclint-0.11-x86_64-linux-4.4.0-36-generic.tar.gz
mkdir -p ~/oclint
tar xf oclint-0.11-x86_64-linux-4.4.0-36-generic.tar.gz -C ~/oclint --strip-components=1
rm oclint-0.11-x86_64-linux-4.4.0-36-generic.tar.gz
export OCLINT_HOME="$HOME/oclint"
export PATH="$OCLINT_HOME/bin:$PATH"
|
Remove unused functionality from zsh | alias be="bundle exec"
alias bspec="foreman run bin/rspec $1 --no-profile"
alias c="clear"
alias edit="open -a MacVim.app $1"
alias fucking="git"
alias gitlog="git log --pretty=oneline"
alias lsl="ls -al"
alias migrate="bundle exec rake db:migrate db:test:prepare"
function port() { lsof -i tcp:"$1"; }
function cleanlocal() {
echo "Before cleanup:";
git branch;
echo "----";
git branch --merged | grep -v "\*" | xargs -n 1 git branch -D;
echo "----";
echo "After cleanup:";
git branch;
}
function mergeFirstIntoSecond() {
git checkout $1;
git branch --set-upstream-to=origin/$1 $1
git pull --rebase;
git checkout $2;
git branch --set-upstream-to=origin/$2 $2
git pull --rebase;
git merge $1;
}
function track() {
branch_name=$(git branch | grep "*");
branch_name="${branch_name/\* /}";
git branch --set-upstream-to=origin/$branch_name $branch_name;
}
# protractor integration specs for Plan
function protract() {
if [ $# -eq 1 ]
then
node_modules/protractor/bin/protractor protractor/conf.js --specs=protractor/features/$1.feature
else
node_modules/protractor/bin/protractor protractor/conf.js
fi
}
| alias be="bundle exec"
alias bspec="foreman run bin/rspec $1 --no-profile"
alias c="clear"
alias edit="open -a MacVim.app $1"
alias fucking="git"
alias gitlog="git log --pretty=oneline"
alias lsl="ls -al"
alias migrate="bundle exec rake db:migrate db:test:prepare"
function port() { lsof -i tcp:"$1"; }
function track() {
branch_name=$(git branch | grep "*");
branch_name="${branch_name/\* /}";
git branch --set-upstream-to=origin/$branch_name $branch_name;
}
|
Fix KoreBuild version for Linux | #!/usr/bin/env bash
repoFolder="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd $repoFolder
koreBuildZip="https://github.com/aspnet/KoreBuild/archive/rel/1.0.0.zip"
if [ ! -z $KOREBUILD_ZIP ]; then
koreBuildZip=$KOREBUILD_ZIP
fi
buildFolder=".build"
buildFile="$buildFolder/KoreBuild.sh"
if test ! -d $buildFolder; then
echo "Downloading KoreBuild from $koreBuildZip"
tempFolder="/tmp/KoreBuild-$(uuidgen)"
mkdir $tempFolder
localZipFile="$tempFolder/korebuild.zip"
retries=6
until (wget -O $localZipFile $koreBuildZip 2>/dev/null || curl -o $localZipFile --location $koreBuildZip 2>/dev/null)
do
echo "Failed to download '$koreBuildZip'"
if [ "$retries" -le 0 ]; then
exit 1
fi
retries=$((retries - 1))
echo "Waiting 10 seconds before retrying. Retries left: $retries"
sleep 10s
done
unzip -q -d $tempFolder $localZipFile
mkdir $buildFolder
cp -r $tempFolder/**/build/** $buildFolder
chmod +x $buildFile
# Cleanup
if test ! -d $tempFolder; then
rm -rf $tempFolder
fi
fi
$buildFile -r $repoFolder "$@" | #!/usr/bin/env bash
repoFolder="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd $repoFolder
koreBuildZip="https://github.com/aspnet/KoreBuild/archive/rel/1.1.0.zip"
if [ ! -z $KOREBUILD_ZIP ]; then
koreBuildZip=$KOREBUILD_ZIP
fi
buildFolder=".build"
buildFile="$buildFolder/KoreBuild.sh"
if test ! -d $buildFolder; then
echo "Downloading KoreBuild from $koreBuildZip"
tempFolder="/tmp/KoreBuild-$(uuidgen)"
mkdir $tempFolder
localZipFile="$tempFolder/korebuild.zip"
retries=6
until (wget -O $localZipFile $koreBuildZip 2>/dev/null || curl -o $localZipFile --location $koreBuildZip 2>/dev/null)
do
echo "Failed to download '$koreBuildZip'"
if [ "$retries" -le 0 ]; then
exit 1
fi
retries=$((retries - 1))
echo "Waiting 10 seconds before retrying. Retries left: $retries"
sleep 10s
done
unzip -q -d $tempFolder $localZipFile
mkdir $buildFolder
cp -r $tempFolder/**/build/** $buildFolder
chmod +x $buildFile
# Cleanup
if test ! -d $tempFolder; then
rm -rf $tempFolder
fi
fi
$buildFile -r $repoFolder "$@" |
Fix syntax error in shell script | #! /usr/bin/env bash
IGNORE_DIFF_ON="README.md CONTRIBUTING.md Makefile .gitignore .github/*"
last_tagged_commit=$(git describe --tags --abbrev=0 --first-parent) # --first-parent ensures we don't follow tags not published in master through an unlikely intermediary merge commit
if git diff-index --name-only --exit-code "$last_tagged_commit" -- . $()echo " $IGNORE_DIFF_ON" | sed 's/ / :(exclude)/g') # Check if any file that has not be listed in IGNORE_DIFF_ON has changed since the last tag was published.
then
echo "No functional changes detected."
exit 1
else echo "The functional files above were changed."
fi
| #! /usr/bin/env bash
IGNORE_DIFF_ON="README.md CONTRIBUTING.md Makefile .gitignore .github/*"
last_tagged_commit=$(git describe --tags --abbrev=0 --first-parent) # --first-parent ensures we don't follow tags not published in master through an unlikely intermediary merge commit
if git diff-index --name-only --exit-code "$last_tagged_commit" -- . $(echo " $IGNORE_DIFF_ON" | sed 's/ / :(exclude)/g') # Check if any file that has not be listed in IGNORE_DIFF_ON has changed since the last tag was published.
then
echo "No functional changes detected."
exit 1
else echo "The functional files above were changed."
fi
|
Build script checks we can install or not | #!/bin/bash
# Simple build script to compile project
cd ${PWD##}/utils
make clean
make
cd ../gxx
make clean
make
echo "แกแแญแแ แแ แกแฃแแแ แแแแฎแแแ แแแแแก แแ แแแแแแแแแแ"
sudo make install
cd ../libx
sudo make install
cd .. | #!/bin/bash
# Simple build script to compile project
# Build Utils
cd ${PWD##}/utils
make clean
make
# Build GXX
cd ../gxx
make clean
make
# Check we can run sudo command or not
CAN_I_RUN_SUDO=$(sudo -n uptime 2>&1 | grep "load" | wc -l)
if [ ${CAN_I_RUN_SUDO} -gt 0 ]
then
echo "Installing..."
else
echo "You must have a root access"
fi
# Install GXX
sudo make install
# Install Libx
cd ../libx
sudo make install
cd ..
echo "Done" |
Add the LICENSE file to reset permissions | #!/bin/sh
# Reset the file permissions for all the relevant files
chmod +x reset_permissions.sh
chmod -x README.md .gitignore requirements.txt
find . -name "*.py" -exec chmod -x {} \;
chmod +x add_initial_entities.py
chmod +x scheduler.py
chmod +x run_task.py
chmod +x task_train_system.py
chmod +x task_run_system.py
chmod +x task_benchmark_result.py
chmod +x task_compare_trials.py
chmod +x task_compare_benchmark_results.py
chmod +x generate_simulated_dataset.py
chmod +x plot_results.py
chmod +x recalculate_bounding_boxes.py
chmod +x verify_bounding_boxes_manually.py
chmod +x verify_database.py
| #!/bin/sh
# Reset the file permissions for all the relevant files
chmod +x reset_permissions.sh
chmod -x README.md .gitignore requirements.txt LICENSE
find . -name "*.py" -exec chmod -x {} \;
chmod +x add_initial_entities.py
chmod +x scheduler.py
chmod +x run_task.py
chmod +x task_train_system.py
chmod +x task_run_system.py
chmod +x task_benchmark_result.py
chmod +x task_compare_trials.py
chmod +x task_compare_benchmark_results.py
chmod +x generate_simulated_dataset.py
chmod +x plot_results.py
chmod +x recalculate_bounding_boxes.py
chmod +x verify_bounding_boxes_manually.py
chmod +x verify_database.py
|
Fix typo in key generation script | #!/bin/sh
KEY_PATH=".likelines_secret_key"
echo "[KEY] Generating seret key..."
if [ -f "$KEY_PATH" ]; then
echo "[KEY] Old key found in: $KEY_PATH"
else
python -m LikeLines.secretkey > $KEY_PATH
echo "[KEY] Created key in: $KEY_PATH"
fi
echo -n "[KEY] *** " | cat - $KEY_PATH
| #!/bin/sh
KEY_PATH=".likelines_secret_key"
echo "[KEY] Generating secret key..."
if [ -f "$KEY_PATH" ]; then
echo "[KEY] Old key found in: $KEY_PATH"
else
python -m LikeLines.secretkey > $KEY_PATH
echo "[KEY] Created key in: $KEY_PATH"
fi
echo -n "[KEY] *** " | cat - $KEY_PATH
|
Make docker build script more portable | #! /bin/bash
set -e
docker build -t libass/javascriptsubtitlesoctopus .
docker run -it --rm -v ${PWD}:/code libass/javascriptsubtitlesoctopus:latest
| #! /bin/sh
set -e
cd "$(dirname "$0")"
docker build -t libass/javascriptsubtitlesoctopus .
docker run -it --rm -v "${PWD}":/code libass/javascriptsubtitlesoctopus:latest
|
Move integration test setup to use psycopg driver instead of pg8000 | #!/bin/bash
echo Setting up integration test dependencies
docker-compose -f deps/docker-compose.yaml up -d
# These are from deps/minio/config/config.json, not real S3 creds. They should match the ones set in that config or tests will fail.
export ANCHORE_TEST_S3_ACCESS_KEY="9EB92C7W61YPFQ6QLDOU"
export ANCHORE_TEST_S3_SECRET_KEY="TuHo2UbBx+amD3YiCeidy+R3q82MPTPiyd+dlW+s"
export ANCHORE_TEST_S3_URL="http://localhost:9000"
export ANCHORE_TEST_S3_BUCKET="testarchivebucket"
export ANCHORE_TEST_SWIFT_AUTH_URL="http://localhost:8080/auth/v1.0"
export ANCHORE_TEST_SWIFT_KEY="testing"
export ANCHORE_TEST_SWIFT_USER="test:tester"
export ANCHORE_TEST_SWIFT_CONTAINER="testarchive"
export ANCHORE_TEST_DB_URL="postgresql+pg8000://postgres:postgres@localhost:5432/postgres"
export ANCHORE_TEST_DB_USER="postgres"
export ANCHORE_TEST_DB_PASS="postgres"
export ANCHORE_TEST_DATA_ENV_DIR="${PWD}/../data/test_data_env"
| #!/bin/bash
echo Setting up integration test dependencies
docker-compose -f deps/docker-compose.yaml up -d
# These are from deps/minio/config/config.json, not real S3 creds. They should match the ones set in that config or tests will fail.
export ANCHORE_TEST_S3_ACCESS_KEY="9EB92C7W61YPFQ6QLDOU"
export ANCHORE_TEST_S3_SECRET_KEY="TuHo2UbBx+amD3YiCeidy+R3q82MPTPiyd+dlW+s"
export ANCHORE_TEST_S3_URL="http://localhost:9000"
export ANCHORE_TEST_S3_BUCKET="testarchivebucket"
export ANCHORE_TEST_SWIFT_AUTH_URL="http://localhost:8080/auth/v1.0"
export ANCHORE_TEST_SWIFT_KEY="testing"
export ANCHORE_TEST_SWIFT_USER="test:tester"
export ANCHORE_TEST_SWIFT_CONTAINER="testarchive"
export ANCHORE_TEST_DB_URL="postgresql://postgres:postgres@localhost:5432/postgres"
export ANCHORE_TEST_DB_USER="postgres"
export ANCHORE_TEST_DB_PASS="postgres"
export ANCHORE_TEST_DATA_ENV_DIR="${PWD}/../data/test_data_env"
|
Add OS X Alias for clearing DNS cache | # helpful shortcut aliases
alias e='emacs'
alias emacs='emacs -nw'
alias h='history'
alias hgrep='history 1 | grep'
alias cpr='rsync -r --stats --progress'
# os-specific aliases
case `uname` in
Darwin)
alias ls='ls -FG'
;;
Linux)
alias ls='ls -F --color'
;;
esac
# /os aliases
| # helpful shortcut aliases
alias e='emacs'
alias emacs='emacs -nw'
alias h='history'
alias hgrep='history | grep'
alias cpr='rsync -r --stats --progress'
# os-specific aliases
case `uname` in
Darwin)
alias ls='ls -FG'
alias flushdns='sudo killall -HUP mDNSResponder'
;;
Linux)
alias ls='ls -F --color'
;;
esac
# /os aliases
|
Update the download URL for linux-headers | pkg_name=linux-headers
pkg_origin=core
pkg_version=4.3
pkg_maintainer="The Habitat Maintainers <humans@habitat.sh>"
pkg_license=('gplv2')
pkg_source=http://ftp.kernel.org/pub/linux/kernel/v4.x/linux-${pkg_version}.tar.xz
pkg_shasum=4a622cc84b8a3c38d39bc17195b0c064d2b46945dfde0dae18f77b120bc9f3ae
pkg_dirname=linux-$pkg_version
pkg_deps=()
pkg_build_deps=(core/coreutils core/diffutils core/patch core/make core/gcc)
pkg_include_dirs=(include)
do_build() {
make headers_install ARCH=x86 INSTALL_HDR_PATH=$pkg_prefix
}
do_install() {
find $pkg_prefix/include \( -name ..install.cmd -o -name .install \) -print0 | xargs -0 rm -v
}
# ----------------------------------------------------------------------------
# **NOTICE:** What follows are implementation details required for building a
# first-pass, "stage1" toolchain and environment. It is only used when running
# in a "stage1" Studio and can be safely ignored by almost everyone. Having
# said that, it performs a vital bootstrapping process and cannot be removed or
# significantly altered. Thank you!
# ----------------------------------------------------------------------------
if [[ "$STUDIO_TYPE" = "stage1" ]]; then
pkg_build_deps=()
fi
| pkg_name=linux-headers
pkg_origin=core
pkg_version=4.3
pkg_maintainer="The Habitat Maintainers <humans@habitat.sh>"
pkg_license=('gplv2')
pkg_source=https://www.kernel.org/pub/linux/kernel/v4.x/linux-${pkg_version}.tar.xz
pkg_shasum=4a622cc84b8a3c38d39bc17195b0c064d2b46945dfde0dae18f77b120bc9f3ae
pkg_dirname=linux-$pkg_version
pkg_deps=()
pkg_build_deps=(core/coreutils core/diffutils core/patch core/make core/gcc)
pkg_include_dirs=(include)
do_build() {
make headers_install ARCH=x86 INSTALL_HDR_PATH=$pkg_prefix
}
do_install() {
find $pkg_prefix/include \( -name ..install.cmd -o -name .install \) -print0 | xargs -0 rm -v
}
# ----------------------------------------------------------------------------
# **NOTICE:** What follows are implementation details required for building a
# first-pass, "stage1" toolchain and environment. It is only used when running
# in a "stage1" Studio and can be safely ignored by almost everyone. Having
# said that, it performs a vital bootstrapping process and cannot be removed or
# significantly altered. Thank you!
# ----------------------------------------------------------------------------
if [[ "$STUDIO_TYPE" = "stage1" ]]; then
pkg_build_deps=()
fi
|
Format ping output as one line. | #! /bin/bash
. "$(dirname "$0")/config.sh"
set -e
whitely echo Ping each host from the other
for host in $HOSTS; do
for other in $HOSTS; do
if [ "$host" != "$other" ]; then
run_on $host $PING $other &
pids="$pids $!"
fi
done
done
for pid in $pids; do wait $pid; done
unset pids
whitely echo Check we can reach docker
function check_docker() {
docker_version=$(docker_on $1 version)
docker_info=$(docker_on $1 info)
docker_weave_version=$(docker_on $1 inspect -f {{.Created}} weaveworks/weave:${WEAVE_VERSION:-latest})
weave_version=$(weave_on $1 version)
cat << EOF
Host Version Info: $1
=====================================
# docker version
$docker_version
# docker info
$docker_info
# weave version
$docker_weave_version
$weave_version
EOF
}
for host in $HOSTS; do
check_docker $host &
pids="$pids $!"
done
for pid in $pids; do wait $pid; done
| #! /bin/bash
. "$(dirname "$0")/config.sh"
set -e
whitely echo Ping each host from the other
for host in $HOSTS; do
for other in $HOSTS; do
if [ "$host" != "$other" ]; then
echo $(run_on $host $PING $other) &
pids="$pids $!"
fi
done
done
for pid in $pids; do wait $pid; done
unset pids
whitely echo Check we can reach docker
function check_docker() {
docker_version=$(docker_on $1 version)
docker_info=$(docker_on $1 info)
docker_weave_version=$(docker_on $1 inspect -f {{.Created}} weaveworks/weave:${WEAVE_VERSION:-latest})
weave_version=$(weave_on $1 version)
cat << EOF
Host Version Info: $1
=====================================
# docker version
$docker_version
# docker info
$docker_info
# weave version
$docker_weave_version
$weave_version
EOF
}
for host in $HOSTS; do
check_docker $host &
pids="$pids $!"
done
for pid in $pids; do wait $pid; done
|
Support `$STEAM_ROLLER` to force a build unconditionally. | #!/bin/bash
#
# Check to see what directories have been affected by a change. If directories
# have not been affected, exit 0
#
# Since we can only stop the build early by calling "exit" from within the
# .travis.yml in the `before_install`, we exit non-zero if we want the build to
# be skipped, so we can do `|| exit 0` in the YAML.
# Don't do anything if $AFFECTED_DIRS is not set
if [ -z "$AFFECTED_DIRS" ]; then
echo 'AFFECTED_DIRS is not set. Not exiting and running everything.'
# If $AFFECTED_DIRS (a "|" separated list of directories) is set, see if we have
# any changes
else
git diff --name-only "$TRAVIS_COMMIT_RANGE" | grep -qE "^($AFFECTED_DIRS)" || {
echo "No files in $AFFECTED_DIRS have changed. Skipping CI run."
exit 1
}
fi
| #!/bin/bash
#
# Check to see what directories have been affected by a change. If directories
# have not been affected, exit 0
#
# Since we can only stop the build early by calling "exit" from within the
# .travis.yml in the `before_install`, we exit non-zero if we want the build to
# be skipped, so we can do `|| exit 0` in the YAML.
if [ -n "$STEAM_ROLLER" ]; then
echo 'STEAM_ROLLER is set. Not exiting and running everything.'
elif [ -z "$AFFECTED_DIRS" ]; then
# Don't do anything if $AFFECTED_DIRS is not set
echo 'AFFECTED_DIRS is not set. Not exiting and running everything.'
else
# If $AFFECTED_DIRS (a "|" separated list of directories) is set, see if we have
# any changes
git diff --name-only "$TRAVIS_COMMIT_RANGE" | grep -qE "^($AFFECTED_DIRS)" || {
echo "No files in $AFFECTED_DIRS have changed. Skipping CI run."
exit 1
}
fi
|
Change git address for read-only | #!/bin/sh
#
# More details at
# http://www.osehra.org/wiki/obtaining-testing-code
#
export DashboardsDir=$HOME/OSEHRA/Dashboards
mkdir -p $DashboardsDir
cd $DashboardsDir
git clone git@github.com:OSEHR/OSEHRA-Automated-Testing.git
git checkout --track UseCaseTesting
ln -s $DashboardsDir/OSEHRA-Automated-Testing/DashboardScripts/vista_common.cmake $HOME/OSEHRA/VistA-installation-scripts/Scripts
| #!/bin/sh
#
# More details at
# http://www.osehra.org/wiki/obtaining-testing-code
#
export DashboardsDir=$HOME/OSEHRA/Dashboards
mkdir -p $DashboardsDir
cd $DashboardsDir
git clone git://github.com/OSEHR/OSEHRA-Automated-Testing.git
git checkout --track UseCaseTesting
ln -s $DashboardsDir/OSEHRA-Automated-Testing/DashboardScripts/vista_common.cmake $HOME/OSEHRA/VistA-installation-scripts/Scripts
|
Stop before removing old container | #!/bin/bash
# Build image
docker build --pull -t jenkins-dood .
# Remove old jenkins container
docker rm jenkins
# Run container
docker run -dt -p 127.0.0.1:49001:8080 \
-v jenkins_home:/var/jenkins_home \
-v /var/run/docker.sock:/var/run/docker.sock \
--restart always \
--name jenkins \
jenkins-dood
| #!/bin/bash
# Build image
docker build --pull -t jenkins-dood .
# Stop & remove old jenkins container
docker stop jenkins
docker rm jenkins
# Run container
docker run -dt -p 127.0.0.1:49001:8080 \
-v jenkins_home:/var/jenkins_home \
-v /var/run/docker.sock:/var/run/docker.sock \
--restart always \
--name jenkins \
jenkins-dood
|
Revert "Checking $TRAVIS_PULL_REQUEST env var." | #!/bin/bash
if [ "$TRAVIS_BRANCH" == "master" ]; then
if [ "${ghToken:-false}" != "false" ]; then
git config --global user.email "tomaszguzialek_flask-api@travis-ci.org"
git config --global user.name "Travis CI"
export GIT_TAG=build-$TRAVIS_BUILD_NUMBER
git tag $GIT_TAG -a -m "Generated tag from TravisCI build $TRAVIS_BUILD_NUMBER"
git push -q https://$TAG_GENERATION_ACCESS_TOKEN@github.com/tomaszguzialek/flask-api.git --tags
echo Created and pushed tag $GIT_TAG
else
echo Skipping creating the tag as the build is a pull request push
fi
else
echo Skipping creating the tag as the build is not on master branch
fi
| #!/bin/bash
if [ "$TRAVIS_BRANCH" == "master" ]; then
git config --global user.email "tomaszguzialek_flask-api@travis-ci.org"
git config --global user.name "Travis CI"
export GIT_TAG=build-$TRAVIS_BUILD_NUMBER
git tag $GIT_TAG -a -m "Generated tag from TravisCI build $TRAVIS_BUILD_NUMBER"
git push -q https://$TAG_GENERATION_ACCESS_TOKEN@github.com/tomaszguzialek/flask-api.git --tags
echo Created and pushed tag $GIT_TAG
else
echo Skipping pushing the tag as the build is not on master branch
fi
|
Update to reflect changes in docker-image-dev. | #!/bin/bash
# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
set -e
export SOURCE_DIR=/source
export NUM_THREADS=6
export MALLOC_ARENA_MAX=1
export MAVEN_OPTS="-Xss1m -Xms128m -Xmx2g"
source /etc/profile.d/devtoolset-7.sh || true
ccache --max-size=1250M
ccache --set-config=compression=true
ccache --print-config
cd ${SOURCE_DIR}
sh ./bootstrap.sh java
mvn -V install --no-snapshot-updates --batch-mode --threads ${NUM_THREADS}
bash ${SOURCE_DIR}/bootstrap-cmake.sh ${SOURCE_DIR}
make -j ${NUM_THREADS}
ctest3 --output-on-failure -j ${NUM_THREADS}
ccache --show-stats
make install
| #!/bin/bash
# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
set -e
export SOURCE_DIR=/source
export NUM_THREADS=6
export MALLOC_ARENA_MAX=1
export MAVEN_OPTS="-Xss1m -Xms128m -Xmx2g"
source /etc/profile.d/enable-devtoolset-7.sh
source /etc/profile.d/enable-rh-maven35.sh
ccache --max-size=1250M
ccache --set-config=compression=true
ccache --print-config
cd ${SOURCE_DIR}
sh ./bootstrap.sh java
mvn -V install --no-snapshot-updates --batch-mode --threads ${NUM_THREADS}
bash ${SOURCE_DIR}/bootstrap-cmake.sh ${SOURCE_DIR}
make -j ${NUM_THREADS}
ctest3 --output-on-failure -j ${NUM_THREADS}
ccache --show-stats
make install
|
Update generation script to use the new version pandoc and to use an external CSS file. | #!/bin/bash
pandoc -f markdown --table-of-contents --self-contained -t html5 api_2_0_spec.md
| #!/bin/bash
~/.cabal/bin/pandoc -f markdown --table-of-contents -c api_2_0_spec.css --self-contained -t html5 api_2_0_spec.md
|
Fix directories where test data is generated | #!/usr/bin/sh
mkdir both client test
echo $RANDOM | gpg --encrypt > both/test
echo $RANDOM | gpg --encrypt > client/test
echo $RANDOM | gpg --encrypt > server/test
| #!/usr/bin/sh
mkdir both client server
echo $RANDOM | gpg --encrypt > both/test
echo $RANDOM | gpg --encrypt > client/test
echo $RANDOM | gpg --encrypt > server/test
|
Switch to more reliable method to upload coverage | #!/bin/bash
set -exo pipefail
if [[ -e .coverage ]]; then
python -m pip install codecov
python -m codecov --env TRAVIS_OS_NAME,NOX_SESSION --file coverage.xml
fi
| #!/bin/bash
set -exo pipefail
# Cribbed from Trio's ci.sh
function curl-harder() {
for BACKOFF in 0 1 2 4 8 15 15 15 15; do
sleep $BACKOFF
if curl -fL --connect-timeout 5 "$@"; then
return 0
fi
done
return 1
}
if [ "$JOB_NAME" = "" ]; then
JOB_NAME="${TRAVIS_OS_NAME}-${TRAVIS_PYTHON_VERSION:-unknown}"
fi
curl-harder -o codecov.sh https://codecov.io/bash
bash codecov.sh -f coverage.xml -n $JOB_NAME
|
Update gc parsing shell script | #!/bin/bash
rm *-gc.log
echo "Running Simple..."
java -Xloggc:simple-gc.log \
-verbose:gc \
-XX:+PrintGCDateStamps \
-XX:+PrintGCApplicationStoppedTime \
-cp bin:lib/disruptor-3.2.0.jar yow2013.immutable.SimplePerformanceTest
echo "Done"
grep 'stopped:' simple-gc.log | cut -d':' -f 2 | cut -d' ' -f 2 | sort -n | awk '{ printf "%1.3f\n", $1 }' | (echo " Count Millis" ; uniq -c )
echo "Running Custom..."
java -Xloggc:custom-gc.log \
-verbose:gc \
-XX:+PrintGCDateStamps \
-XX:+PrintGCApplicationStoppedTime \
-cp bin:lib/disruptor-3.2.0.jar yow2013.immutable.CustomPerformanceTest
echo "Done"
grep 'stopped:' custom-gc.log | cut -d':' -f 2 | cut -d' ' -f 2 | sort -n | awk '{ printf "%1.3f\n", $1 }' | (echo " Count Millis" ; uniq -c )
| #!/bin/bash
rm *-gc.log
echo "Running Simple..."
$JAVA_HOME/bin/java -Xloggc:simple-gc.log \
-verbose:gc \
-XX:+PrintGCDateStamps \
-XX:+PrintGCApplicationStoppedTime \
-cp bin:lib/disruptor-3.2.0.jar yow2013.immutable.SimplePerformanceTest
echo "Done"
grep 'stopped:' simple-gc.log | sed 's/.*stopped: \([0-9.]*\) seconds/\1/' | sort -n | awk '{ printf "%1.3f\n", $1 }' | (echo " Count Millis" ; uniq -c )
echo "Running Custom..."
$JAVA_HOME/bin/java -Xloggc:custom-gc.log \
-verbose:gc \
-XX:+PrintGCDateStamps \
-XX:+PrintGCApplicationStoppedTime \
-cp bin:lib/disruptor-3.2.0.jar yow2013.immutable.CustomPerformanceTest
echo "Done"
grep 'stopped:' custom-gc.log | sed 's/.*stopped: \([0-9.]*\) seconds/\1/' | sort -n | awk '{ printf "%1.3f\n", $1 }' | (echo " Count Millis" ; uniq -c )
|
Add quotes around file names | #
# Displays installation information for not found commands.
#
# Authors:
# Joseph Jon Booker <joe@neoturbine.net>
#
if [[ -s /etc/zsh_command_not_found ]]; then
source /etc/zsh_command_not_found
fi
| #
# Displays installation information for not found commands.
#
# Authors:
# Joseph Jon Booker <joe@neoturbine.net>
#
if [[ -s '/etc/zsh_command_not_found' ]]; then
source '/etc/zsh_command_not_found'
fi
|
Backup the original image before filling it up with zeros | #!/usr/bin/env bash
set -o xtrace
set -o verbose
set -o errexit
#set -o nounset
user=`whoami`
guest=$1
image=$2
guest_user=$3
### add check if variables are present ###
if [ "$guest_user" = "" ]; then
echo "User name (hit enter to use '$user')?"
read guest_user
fi
if [ "$guest_user" = "" ]; then
guest_user=$user
fi
### (on GUEST) ###
ssh -T $guest_user@$guest<<EOSSH
sudo dd if=/dev/zero of=/mytempfile
sudo rm -rf /mytempfile
sudo shutdown -h now
EOSSH
### (on HOST) ###
sudo bash <<EOF
mv /vmdata/storage/${image}.img /vmdata/storage/${image}.img.backup
qemu-img convert -O qcow2 /vmdata/storage/${image}.img.backup /vmdata/storage/${image}.img
virsh start --domain $image && rm -rf /vmdata/storage/${image}.img.backup
EOF
| #!/usr/bin/env bash
set -o xtrace
set -o verbose
set -o errexit
#set -o nounset
user=`whoami`
guest=$1
image=$2
guest_user=$3
### add check if variables are present ###
if [ "$guest_user" = "" ]; then
echo "User name (hit enter to use '$user')?"
read guest_user
fi
if [ "$guest_user" = "" ]; then
guest_user=$user
fi
# host
sudo bash <<EOF
virsh destroy --domain ${image}
cp /vmdata/storage/${image}.img /vmdata/storage/${image}.img.backup
virsh start --domain ${image}
sleep 60
EOF
# guest
ssh -T $guest_user@$guest<<EOSSH
sudo dd if=/dev/zero of=/mytempfile
sudo rm -rf /mytempfile
EOSSH
# (again) host
sudo bash <<EOF
virsh destroy --domain ${image}
sleep 60
mv /vmdata/storage/${image}.img /vmdata/storage/${image}.img.tmp
qemu-img convert -O qcow2 /vmdata/storage/${image}.img.tmp /vmdata/storage/${image}.img
virsh start --domain $image && rm -rf /vmdata/storage/${image}.img.tmp
EOF
|
Change output mode to color | #!/bin/sh
if [ ! -e "/config/nzbget.conf" ]; then
gunzip -c /usr/share/doc/nzbget/examples/nzbget.conf.gz | sed 's/^MainDir=.*$/MainDir=\/downloads/g' > /config/nzbget.conf
fi
exec /usr/bin/nzbget -D -c /config/nzbget.conf
| #!/bin/sh
if [ ! -e "/config/nzbget.conf" ]; then
gunzip -c /usr/share/doc/nzbget/examples/nzbget.conf.gz | sed 's/^MainDir=.*$/MainDir=\/downloads/g' > /config/nzbget.conf
fi
exec /usr/bin/nzbget -D -c /config/nzbget.conf -o outputmode=color
|
Exit if railo download fails | #!/bin/bash
railo="http://www.getrailo.org/railo/remote/download42/$RAILO_VERSION/custom/all/railo-$RAILO_VERSION-jars.tar.gz"
railo_folder="railo-$RAILO_VERSION-jars"
echo "Installing Railo"
echo "Downloading Railo " $RAILO_VERSION
mkdir /opt/railo
mkdir /opt/railo/config
mkdir /opt/railo/config/server
mkdir /opt/railo/config/web
curl -o /opt/railo/railo.tar.gz $railo
tar -xzf /opt/railo/railo.tar.gz -C /opt/railo
ln -s /opt/railo/$railo_folder /opt/railo/current
| #!/bin/bash
railo="http://www.getrailo.org/railo/remote/download42/$RAILO_VERSION/custom/all/railo-$RAILO_VERSION-jars.tar.gz"
railo_folder="railo-$RAILO_VERSION-jars"
echo "Installing Railo"
echo "Downloading Railo " $RAILO_VERSION
mkdir /opt/railo
mkdir /opt/railo/config
mkdir /opt/railo/config/server
mkdir /opt/railo/config/web
curl -o /opt/railo/railo.tar.gz $railo
if [ -f "/opt/railo/railo.tar.gz" ]; then
echo "Download Complete"
else
echo "Download of Railo Failed Exiting..."
exit 1
fi
tar -xzf /opt/railo/railo.tar.gz -C /opt/railo
ln -s /opt/railo/$railo_folder /opt/railo/current
|
Make this actually work now that the tool is checked in here instead of just sitting around in my working directory. | #! /bin/sh
# Script to push docs from my development area to SourceForge, where the
# update-docs.sh script unpacks them into their final destination.
START="`pwd`"
MYDIR="`dirname $0`"
cd "$MYDIR"
MYDIR="`pwd`"
HTMLDIR="${HTMLDIR:-html}"
cd "../$HTMLDIR"
make --no-print-directory || exit $?
RELEASE=`grep '^RELEASE=' Makefile | sed 's|RELEASE=||'`
make --no-print-directory HTMLDIR="$HTMLDIR" bziphtml
scp "html-$RELEASE.tar.bz2" python.sourceforge.net:/home/users/fdrake/python-docs-update.tar.bz2
| #! /bin/sh
# Script to push docs from my development area to SourceForge, where the
# update-docs.sh script unpacks them into their final destination.
START="`pwd`"
MYDIR="`dirname $0`"
cd "$MYDIR"
MYDIR="`pwd`"
HTMLDIR="${HTMLDIR:-html}"
cd "../$HTMLDIR"
make --no-print-directory || exit $?
cd ..
RELEASE=`grep '^RELEASE=' Makefile | sed 's|RELEASE=||'`
make --no-print-directory HTMLDIR="$HTMLDIR" bziphtml
scp "html-$RELEASE.tar.bz2" python.sourceforge.net:/home/users/fdrake/python-docs-update.tar.bz2
|
Move Arcanist initialization into a local config script |
# Initialize all the completions
[ -f $(brew --prefix)/etc/bash_completion ] && . $(brew --prefix)/etc/bash_completion
# Initialize the Python version manager
export PYENV_VIRTUALENV_DISABLE_PROMPT=1
if which -s pyenv; then
eval "$(pyenv init -)"
else
echo "Failed to initialize PyEnv"
fi
if which -s pyenv-virtualenv; then
eval "$(pyenv virtualenv-init -)"
else
echo "Failed to initialize PyEnv VitualEnv"
fi
# Arcanist Configuration
#
# Since there is only one system where I use Arcanist, perhaps this
# should go into the .local version of this file.
ARCANIST_BASH_COMPLETIONS=$HOME/Development/arcanist/resources/shell/bash-completion
[ -f $ARCANIST_BASH_COMPLETIONS ] && . $ARCANIST_BASH_COMPLETIONS
|
# Initialize all the completions
[ -f $(brew --prefix)/etc/bash_completion ] && . $(brew --prefix)/etc/bash_completion
# Initialize the Python version manager
export PYENV_VIRTUALENV_DISABLE_PROMPT=1
if which -s pyenv; then
eval "$(pyenv init -)"
else
echo "Failed to initialize PyEnv"
fi
if which -s pyenv-virtualenv; then
eval "$(pyenv virtualenv-init -)"
else
echo "Failed to initialize PyEnv VitualEnv"
fi
# Arcanist Configuration
#
# This is actually done in the .local version of this file. It is just left here
# as an example.
#ARCANIST_BASH_COMPLETIONS=$HOME/Development/arcanist/resources/shell/bash-completion
#[ -f $ARCANIST_BASH_COMPLETIONS ] && . $ARCANIST_BASH_COMPLETIONS
|
Fix header, skip tls validation for now | #!/bin/bash
set -x -e
credhub l
TOKEN="$(credhub --token)"
echo $SIGNED_BY_TO_ROTATE | jq -r .[] | while read object; do
curl https://${CREDHUB_SERVER}/api/v1/bulk-regenerate \
-X POST \
-H "authorization: bearer ${TOKEN}" \
-H 'content-type: application/json' \
-d "{\"signed_by\": \"${object}\""
done
| #!/bin/bash
set -x -e
credhub l
TOKEN="$(credhub --token)"
echo $SIGNED_BY_TO_ROTATE | jq -r .[] | while read object; do
curl https://${CREDHUB_SERVER}/api/v1/bulk-regenerate \
-X POST \
-H "authorization: ${TOKEN}" \
-H 'content-type: application/json' \
-d "{\"signed_by\": \"${object}\"" \
-k
done
|
Adjust script to use current version of mysql | function mysql_reload() {
launchctl unload ~/Library/LaunchAgents/homebrew.mxcl.mysql.plist
launchctl load ~/Library/LaunchAgents/homebrew.mxcl.mysql.plist
}
| function mysql_reload() {
launchctl unload ~/Library/LaunchAgents/homebrew.mxcl.mysql55.plist
launchctl load ~/Library/LaunchAgents/homebrew.mxcl.mysql55.plist
}
|
Fix spurious space after quoted-newline | #!/bin/bash
apt-get update -y && apt-get install -y build-essential \
g++ \
flex \
bison \
gperf \
ruby \
perl \
libsqlite3-dev \
libfontconfig1-dev \
libicu-dev \
libfreetype6 \
libssl-dev \
libpng-dev \
libjpeg-dev
cd /opt/
mkdir phantomJS
cd phantomJS
git clone git://github.com/ariya/phantomjs.git
cd phantomjs
git checkout 2.0
./build.sh --confirm --jobs 8
ln -s /opt/phantomJS/phantomjs/bin/phantomjs /usr/bin/phantomjs | #!/bin/bash
apt-get update -y && apt-get install -y build-essential \
bison \
flex \
g++ \
gperf \
libfontconfig1-dev \
libfreetype6 \
libicu-dev \
libjpeg-dev \
libpng-dev \
libsqlite3-dev \
libssl-dev \
perl \
ruby \
|| exit 1
cd /opt/ || exit 1
mkdir phantomJS || exit 1
cd phantomJS || exit 1
git clone git://github.com/ariya/phantomjs.git || exit 1
cd phantomjs || exit 1
git checkout 2.0 || exit 1
./build.sh --confirm --jobs 8 || exit 1
ln -s /opt/phantomJS/phantomjs/bin/phantomjs /usr/bin/phantomjs || exit 1
|
Add script that read out sdc:nic ip information to generate nsd_listen.conf file | # Configure nsd-control
# Run it as nsd user to have correct permissions
# OpenSSL require rnd file in home temporary
sudo -u nsd HOME=/opt/local/etc/nsd nsd-control-setup
rm /opt/local/etc/nsd/.rnd
# Enable nsd service
svcadm enable svc:/network/nsd:default
| # Configure nsd-control
# Run it as nsd user to have correct permissions
# OpenSSL require rnd file in home temporary
sudo -u nsd HOME=/opt/local/etc/nsd nsd-control-setup
rm /opt/local/etc/nsd/.rnd
# Create nsd_listen.conf file with all ip addresses managed
# by vmadm sdc:nics / mdata
NSD_LISTEN_CONF='/opt/local/etc/nsd/nsd_listen.conf'
echo > ${NSD_LISTEN_CONF}
if mdata-get sdc:nics 1>/dev/null 2>&1; then
echo "ip-address: 127.0.0.1" > ${NSD_LISTEN_CONF}
echo "ip-address: ::1" >> ${NSD_LISTEN_CONF}
for ip in $(mdata-get sdc:nics | json -a ip); do
echo "ip-address: ${ip%/*}" >> ${NSD_LISTEN_CONF}
done
fi
# Enable nsd service
svcadm enable svc:/network/nsd:default
|
Allow duplicates when building indicies | #!/bin/bash
sort -u pterms.txt | perl break.pl | db_load -T -t btree pt.idx
sort -u rterms.txt | perl break.pl | db_load -T -t btree rt.idx
sort -u scores.txt | perl break.pl | db_load -T -t btree sc.idx
db_load -f reviews.txt -T -t hash rw.idx
| #!/bin/bash
sort -u pterms.txt | perl break.pl | db_load -T -t btree -c duplicates=1 pt.idx
sort -u rterms.txt | perl break.pl | db_load -T -t btree -c duplicates=1 rt.idx
sort -u scores.txt | perl break.pl | db_load -T -t btree -c duplicates=1 sc.idx
db_load -f reviews.txt -T -t hash rw.idx
|
Check versioning only if code has been updated | #! /usr/bin/env bash
# This script is the entry point for the Travis tests platform.
# It first checks that the package version has been updated ("bump" in the jargon),
# then it runs tests via "make test".
set -x
current_version=`python setup.py --version`
if [[ "$TRAVIS_BRANCH" == "master" && "$TRAVIS_PULL_REQUEST" != false ]]
then
if git rev-parse $current_version
then
set +x
echo "Version $version already exists. Please update version number in setup.py before merging this branch into master."
exit 1
fi
if git diff-index master --quiet CHANGELOG.md
then
set +x
echo "CHANGELOG.md has not been modified. Please update it before merging this branch into master."
exit 1
fi
fi
make test
| #! /usr/bin/env bash
# This script is the entry point for the Travis tests platform.
# It first checks that the package version has been updated ("bump" in the jargon),
# then it runs tests via "make test".
set -x
current_version=`python setup.py --version`
if ! git diff-index --quiet master openfisca_france
then
if [[ "$TRAVIS_BRANCH" == "master" && "$TRAVIS_PULL_REQUEST" != false ]]
then
if git rev-parse $current_version
then
set +x
echo "Version $version already exists. Please update version number in setup.py before merging this branch into master."
exit 1
fi
if git diff-index master --quiet CHANGELOG.md
then
set +x
echo "CHANGELOG.md has not been modified. Please update it before merging this branch into master."
exit 1
fi
fi
fi
make test
|
Use makefile commands in travis | travis_install_on_linux () {
sudo apt-get install liblapack-dev
sudo apt-get install gfortran # for lbgfs
sudo apt-get install libffi-dev # for Ctypes
}
travis_install_on_osx () {
echo "brew install lapack"
brew install homebrew/dupes/lapack > /dev/null
echo "brew install gcc"
brew install gcc > /dev/null # for gfortran
echo "brew install libffi"
brew install libffi > /dev/null # for ocephes
echo "brew install finished!"
}
case $TRAVIS_OS_NAME in
osx) travis_install_on_osx ;;
linux) travis_install_on_linux ;;
*) echo "Unknown $TRAVIS_OS_NAME"; exit 1
esac
eval `opam config env`
export OPAMYES="true"
opam install ocamlfind topkg ocamlbuild
opam pin add dsfo git://github.com/rleonid/dsfo
#echo Installing Libraries
#make setup-test
echo Compiling
topkg build
topkg build -n omltest
echo Testing
topkg test
#echo PostingCoverage
#opam install ocveralls
#cd _test_build
#ocveralls --repo_token $COVERALLSTOKEN --git --send ../bisect0001.out
| travis_install_on_linux () {
sudo apt-get install liblapack-dev
sudo apt-get install gfortran # for lbgfs
sudo apt-get install libffi-dev # for Ctypes
}
travis_install_on_osx () {
echo "brew install lapack"
brew install homebrew/dupes/lapack > /dev/null
echo "brew install gcc"
brew install gcc > /dev/null # for gfortran
echo "brew install libffi"
brew install libffi > /dev/null # for ocephes
echo "brew install finished!"
}
case $TRAVIS_OS_NAME in
osx) travis_install_on_osx ;;
linux) travis_install_on_linux ;;
*) echo "Unknown $TRAVIS_OS_NAME"; exit 1
esac
eval `opam config env`
export OPAMYES="true"
echo Installing basic deps
opam install ocamlfind topkg ocamlbuild
opam pin add dsfo git://github.com/rleonid/dsfo
#echo Installing Libraries
#make setup-test
echo Compiling
make build
echo Testing
make test
echo Installing C and Fortran deps
opam install ocephes lacaml lbfgs
echo Compiling with C/Fortran deps
make build
echo Testing with C/Fortran dest
make test
#echo PostingCoverage
#opam install ocveralls
#cd _test_build
#ocveralls --repo_token $COVERALLSTOKEN --git --send ../bisect0001.out
|
Fix valid manylinux1 wheels build script | #!/bin/bash
PYTHON_VERSIONS="cp34-cp34m cp35-cp35m cp36-cp36m"
# Avoid creation of __pycache__/*.py[c|o]
export PYTHONDONTWRITEBYTECODE=1
package_name="$1"
if [ -z "$package_name" ]
then
&>2 echo "Please pass package name as a first argument of this script ($0)"
exit 1
fi
echo "Compile wheels"
for PYTHON in ${PYTHON_VERSIONS}; do
/opt/python/${PYTHON}/bin/pip install -r /io/requirements/wheel.txt
/opt/python/${PYTHON}/bin/pip wheel /io/ -w /io/dist/
done
echo "Bundle external shared libraries into the wheels"
for whl in /io/dist/${package_name}*.whl; do
auditwheel repair "$whl" -w /io/dist/
done
echo "Install packages and test"
for PYTHON in ${PYTHON_VERSIONS}; do
/opt/python/${PYTHON}/bin/pip install "$package_name" --no-index -f file:///io/dist
/opt/python/${PYTHON}/bin/py.test /io/tests
done
| #!/bin/bash
PYTHON_VERSIONS="cp34-cp34m cp35-cp35m cp36-cp36m"
# Avoid creation of __pycache__/*.py[c|o]
export PYTHONDONTWRITEBYTECODE=1
package_name="$1"
if [ -z "$package_name" ]
then
&>2 echo "Please pass package name as a first argument of this script ($0)"
exit 1
fi
arch=`uname -m`
echo
echo
echo "Compile wheels"
for PYTHON in ${PYTHON_VERSIONS}; do
/opt/python/${PYTHON}/bin/pip install -r /io/requirements/wheel.txt
/opt/python/${PYTHON}/bin/pip wheel /io/ -w /io/dist/
done
echo
echo
echo "Bundle external shared libraries into the wheels"
for whl in /io/dist/${package_name}*${arch}.whl; do
echo "Repairing $whl..."
auditwheel repair "$whl" -w /io/dist/
done
echo "Cleanup OS specific wheels"
rm -fv /io/dist/*-linux_*.whl
echo
echo
echo "Install packages and test"
echo "dist directory:"
ls /io/dist
for PYTHON in ${PYTHON_VERSIONS}; do
echo
echo -n "Test $PYTHON: "
/opt/python/${PYTHON}/bin/python -c "import platform;print(platform.platform())"
/opt/python/${PYTHON}/bin/pip install "$package_name" --no-index -f file:///io/dist
/opt/python/${PYTHON}/bin/py.test /io/tests
done
|
Improve colorls choice on Darwin in modules/default | case $(uname -s) in
Darwin) alias colorls='ls -G' ;;
*) alias colorls='ls --color=auto' ;;
esac
alias l='colorls -a'
alias ll='colorls -la'
export EDITOR='vi'
| case $(ls --version 2> /dev/null) in
*GNU*) alias colorls='ls --color=auto' ;;
*) alias colorls='ls -G' ;;
esac
alias l='colorls -a'
alias ll='colorls -la'
export EDITOR='vi'
|
Create static directory in server/ when copying webpack-built css, img and js files if the corresponding directories don't exist | #!/bin/bash
STATIC_DIR='../server/static'
# Copy images
cp dist/static/img/* $STATIC_DIR/img/
# Copy css
cp dist/css/app.*.css $STATIC_DIR/css/app.css
# Copy js files
cp dist/js/app.*.js $STATIC_DIR/js/app.js
cp dist/js/common.*.js $STATIC_DIR/js/common.js
cp dist/js/vendor.*.js $STATIC_DIR/js/vendor.js
| #!/bin/bash
STATIC_DIR='../server/static'
# Create static directories
if [ ! -d "$STATIC_DIR" ]; then
mkdir -p $STATIC_DIR/{img,css,js}
fi
# Copy images
cp dist/static/img/* $STATIC_DIR/img/
# Copy css
cp dist/css/app.*.css $STATIC_DIR/css/app.css
# Copy js files
cp dist/js/app.*.js $STATIC_DIR/js/app.js
cp dist/js/common.*.js $STATIC_DIR/js/common.js
cp dist/js/vendor.*.js $STATIC_DIR/js/vendor.js
|
Add remote storage of Terraform state into S3 | #!/usr/bin/env bash
rm -rf ./tmp && mkdir ./tmp && mkdir ./tmp/agent_spawn_deploy
cp ./agents/spawn/index.js ./tmp/agent_spawn_deploy
cd ./tmp/agent_spawn_deploy
zip ./agent_spawn.zip -r ./index.js
cd ../..
terraform get && terraform $1 \
-var "AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID" \
-var "AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY"
| #!/usr/bin/env bash
rm -rf ./tmp && mkdir ./tmp && mkdir ./tmp/agent_spawn_deploy
cp ./agents/spawn/index.js ./tmp/agent_spawn_deploy
cd ./tmp/agent_spawn_deploy
zip ./agent_spawn.zip -r ./index.js
cd ../..
terraform remote config -backend=s3 \
-backend-config="bucket=webapptest-terraform" \
-backend-config="access_key=$AWS_ACCESS_KEY_ID" \
-backend-config="secret_key=$AWS_SECRET_ACCESS_KEY" \
-backend-config="region=eu-west-1" \
-backend-config="key=state"
terraform remote pull
terraform get && terraform $1 \
-var "AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID" \
-var "AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY"
terraform remote push
|
Use local for local variables in tidy func. | #!/usr/bin/env bash
usage() {
echo 'usage: tidyup source_directory target_directory'
exit 1
}
tidy() {
# Parameters: $1: source_directory
# $2: target_directory
source=$1
destination=$2
# Only proceed if the source directory exists
if [ -d "$source" ] ; then
mkdir -p $destination
rsync -arq $source/ $destination
# The following rm command removes:
# - all subdirectories (and their contents)
# - all hidden files
# - all normal files
# This requires a 'trick' because normally removing .* results in the
# error that rm cannot remove '.' and '..'.
# It has three components that use wildcards:
# 1/ $source/..?* removes all dot-dot files and subdirs except ..
# 2/ $source/.[!.]* removes all dot files and subdirs except .
# 3/ $source/* removes all non dot files and subdirs
rm -rf $source/..?* $source/.[!.]* $source/*
fi
}
main() {
# Script takes two and only two parameters.
if [[ $# -ne 2 ]] ; then
usage
fi
#ย Today's date in ddmmyy format.
today="$(date +'%d%m%y')"
for var in "$@"
do
tidy ~/$var ~/Backups/$var/$today
done
}
main "$@"
| #!/usr/bin/env bash
usage() {
echo 'usage: tidyup source_directory target_directory'
exit 1
}
tidy() {
# Parameters: $1: source_directory
# $2: target_directory
local source=$1
local target=$2
source=$1
destination=$2
# Only proceed if the source directory exists
if [ -d "$source" ] ; then
mkdir -p $destination
rsync -arq $source/ $destination
# The following rm command removes:
# - all subdirectories (and their contents)
# - all hidden files
# - all normal files
# This requires a 'trick' because normally removing .* results in the
# error that rm cannot remove '.' and '..'.
# It has three components that use wildcards:
# 1/ $source/..?* removes all dot-dot files and subdirs except ..
# 2/ $source/.[!.]* removes all dot files and subdirs except .
# 3/ $source/* removes all non dot files and subdirs
rm -rf $source/..?* $source/.[!.]* $source/*
fi
}
main() {
# Script takes two and only two parameters.
if [[ $# -ne 2 ]] ; then
usage
fi
#ย Today's date in ddmmyy format.
today="$(date +'%d%m%y')"
for var in "$@"
do
tidy ~/$var ~/Backups/$var/$today
done
}
main "$@"
|
Make this not fall over screaming if a file isn't found (the user might not have wanted it). | #!/bin/sh
#
# mkchecksums.sh - generate interactive checksum-checking script.
# Jordan Hubbard
#
# This script generates a cksum.sh script from a set of tarballs
# and should not be run by anyone but the release coordinator (there
# wouldn't be much point).
#
# Jordan
# $Id: mkchecksums.sh,v 1.2 1994/11/24 22:30:03 phk Exp $
#
# Remove any previous attempts.
rm -rf CKSUMS do_cksum.sh
# First generate the CKSUMS file for the benefit of those who wish to
# use it in some other way. If we find out that folks aren't even using
# it, we should consider eliminating it at some point. The interactive
# stuff makes it somewhat superfluous.
cksum * > CKSUMS
# Now generate a script for actually verifying the checksums.
awk 'BEGIN {print "rval=0"} { printf("if [ \"\`cksum %s%s%s\`\" != \"%s %s %s\" ]; then dialog --title Error --infobox \"Checksum error detected on %s!\" -1 -1; rval=1; fi\n", "\047", $3, "\047", $1, $2, $3, $3);} END {print "exit $rval"}' < CKSUMS > do_cksum.sh
chmod +x do_cksum.sh
| #!/bin/sh
#
# mkchecksums.sh - generate interactive checksum-checking script.
# Author: Jordan Hubbard
#
# This script generates a cksum.sh script from a set of tarballs
# and should not be run by anyone but the release coordinator (there
# wouldn't be much point).
#
# $Id: mkchecksums.sh,v 1.1 1995/01/14 07:41:50 jkh Exp $
#
# Remove any previous attempts.
rm -rf CKSUMS do_cksum.sh
# First generate the CKSUMS file for the benefit of those who wish to
# use it in some other way. If we find out that folks aren't even using
# it, we should consider eliminating it at some point. The interactive
# stuff makes it somewhat superfluous.
cksum * > CKSUMS
# Now generate a script for actually verifying the checksums.
awk 'BEGIN {print "rval=0"} { printf("if [ -f %s ]; then if [ \"\`cksum %s%s%s\`\" != \"%s %s %s\" ]; then dialog --title \"Checksum Error\" --msgbox \"Checksum error detected on %s!\" -1 -1; rval=1; fi; fi\n", $3, "\047", $3, "\047", $1, $2, $3, $3);} END {print "exit $rval"}' < CKSUMS > do_cksum.sh
chmod +x do_cksum.sh
|
Revert "forgot to make virtualenv" | #!/bin/bash -e
CHROMEDRIVER_VERSION=$1
python3 -m virtualenv .venv
source .venv/bin/activate
pip3 install -U bioblend pytest pytest-cov pytest-mock requests requests-oauthlib subprocess32 selenium
# Install chromedriver if not correct version
TEST_CHROMEDRIVER_VERSION=`chromedriver --version | sed -e 's/^ChromeDriver //' -e 's/ (.*//' 2>/dev/null`
if [ "$TEST_CHROMEDRIVER_VERSION" != "$CHROMEDRIVER_VERSION" ];
then
echo "Downloading Chromedriver Version: $CHROMEDRIVER_VERSION"
wget --no-verbose -O /tmp/chromedriver_linux64.zip https://chromedriver.storage.googleapis.com/$CHROMEDRIVER_VERSION/chromedriver_linux64.zip
unzip /tmp/chromedriver_linux64.zip -d .venv/bin
chmod 755 .venv/bin/chromedriver
else
echo "Chromedriver version [" $CHROMEDRIVER_VERSION "] already exists, not installing"
fi
| #!/bin/bash -e
CHROMEDRIVER_VERSION=$1
source .venv/bin/activate
pip3 install -U bioblend pytest pytest-cov pytest-mock requests requests-oauthlib subprocess32 selenium
# Install chromedriver if not correct version
TEST_CHROMEDRIVER_VERSION=`chromedriver --version | sed -e 's/^ChromeDriver //' -e 's/ (.*//' 2>/dev/null`
if [ "$TEST_CHROMEDRIVER_VERSION" != "$CHROMEDRIVER_VERSION" ];
then
echo "Downloading Chromedriver Version: $CHROMEDRIVER_VERSION"
wget --no-verbose -O /tmp/chromedriver_linux64.zip https://chromedriver.storage.googleapis.com/$CHROMEDRIVER_VERSION/chromedriver_linux64.zip
unzip /tmp/chromedriver_linux64.zip -d .venv/bin
chmod 755 .venv/bin/chromedriver
else
echo "Chromedriver version [" $CHROMEDRIVER_VERSION "] already exists, not installing"
fi
|
Fix encrypted boot on 8.1 | #!/bin/sh
cryptdisk=$(blkid -t TYPE="crypto_LUKS"|sed -e s/:.*//)
clevis luks bind -d $cryptdisk -k - tpm2 '{"pcr_bank": "sha256", "pcr_ids": "7"}' < /etc/confluent/confluent.apikey
cryptsetup luksRemoveKey $cryptdisk < /etc/confluent/confluent.apikey
| #!/bin/sh
cryptdisk=$(blkid -t TYPE="crypto_LUKS"|sed -e s/:.*//)
clevis luks bind -f -d $cryptdisk -k - tpm2 '{"pcr_bank": "sha256", "pcr_ids": "7"}' < /etc/confluent/confluent.apikey
cryptsetup luksRemoveKey $cryptdisk < /etc/confluent/confluent.apikey
|
Edit to key repeat delay | # Sets OS X defaults.
# Always open everything in Finder's list view
defaults write com.apple.Finder FXPreferredViewStyle Nlsv
# Show the ~/Library folder
chflags nohidden ~/Library
# Set a fast key repeat
defaults write NSGlobalDomain KeyRepeat -int 0
| # Sets OS X defaults.
# Always open everything in Finder's list view
defaults write com.apple.Finder FXPreferredViewStyle Nlsv
# Show the ~/Library folder
chflags nohidden ~/Library
# Set a fast key repeat
defaults write NSGlobalDomain KeyRepeat -int 2
|
Change bin/sh usr/bin/env sh in script | #!/bin/sh
cd $(dirname $0)/..
echo ""
echo "___________________________________________________________"
echo ""
echo " This Docker image is generated from the following repositories:"
echo " https://github.com/pacmacro/pm-server"
echo " https://hub.docker.com/r/pacmacro/pm-server/"
echo ""
echo "__________________"
echo ""
echo " The Dockerfile is as follows:"
echo ""
cat docker/Dockerfile
echo ""
echo "___________________________________________________________"
echo ""
git pull
mvn spring-boot:run
| #!/usr/bin/env sh
cd $(dirname $0)/..
echo ""
echo "___________________________________________________________"
echo ""
echo " This Docker image is generated from the following repositories:"
echo " https://github.com/pacmacro/pm-server"
echo " https://hub.docker.com/r/pacmacro/pm-server/"
echo ""
echo "__________________"
echo ""
echo " The Dockerfile is as follows:"
echo ""
cat docker/Dockerfile
echo ""
echo "___________________________________________________________"
echo ""
git pull
mvn spring-boot:run
|
Clear future values every day, if classes are canceled | #!/usr/bin/env bash
echo 'Check if chrome needs to be started'
cmd="google-chrome --disable-gpu --headless --remote-debugging-port=9222"
rc=$(pgrep -fc "$cmd")
if [[ $rc -lt 1 ]]
then
echo 'Starting Chrome'
echo "$cmd"
eval "$cmd" &
sleep 5
fi
pid=$(pgrep -f "$cmd")
echo "$pid"
BASEDIR=$(dirname "$0")
echo 'Gathering and scraping classes'
mkdir -p "$BASEDIR/courses"
coursefile="$BASEDIR/courses/courses_$(date -Idate).json"
cmd="nodejs $BASEDIR/toplevel.js $BASEDIR/studios.json $coursefile"
echo "$cmd"
eval "$cmd"
echo 'All done, import into the database'
upsertfields="name,start,studio,style,postcode,timezone"
mongoimport -c courses -d aggregate --file "$coursefile" --upsertFields "$upsertfields"
| #!/usr/bin/env bash
echo 'Check if chrome needs to be started'
cmd="google-chrome --disable-gpu --headless --remote-debugging-port=9222"
rc=$(pgrep -fc "$cmd")
if [[ $rc -lt 1 ]]
then
echo 'Starting Chrome'
echo "$cmd"
eval "$cmd" &
sleep 5
fi
pid=$(pgrep -f "$cmd")
echo "$pid"
BASEDIR=$(dirname "$0")
echo 'Gathering and scraping classes'
mkdir -p "$BASEDIR/courses"
coursefile="$BASEDIR/courses/courses_$(date -Idate).json"
cmd="nodejs $BASEDIR/toplevel.js $BASEDIR/studios.json $coursefile"
echo "$cmd"
eval "$cmd"
if [[ $? -eq 0 ]]
then
echo 'All done, removing previous later values'
mongo aggregate --eval 'db.courses.remove({start: { $gt: new Date() } })'
echo 'Inserting new ones'
upsertfields="name,start,studio,style,postcode,timezone"
mongoimport -c courses -d aggregate --file "$coursefile" --upsertFields "$upsertfields"
else
echo 'Something went wrong, holding off on importing'
fi
|
Fix condition and output more data | #!/bin/bash
VERSIONEYE_SERVER=https://www.versioneye.com
API_KEY=<YOUR_SECRET_API_KEY> # Get it from here: https://www.versioneye.com/settings/api
PROJECT_ID=<YOUR_PROJECT_ID> # https://www.versioneye.com/user/projects/<PROJECT_ID>
ORGA_NAME=<YOUR_ORGANISATION_NAME>
json=$( curl -F name=project_file -F orga_name=${ORGA_NAME} -F project_file=@$1 "${VERSIONEYE_SERVER}/api/v2/projects/${PROJECT_ID}?api_key=${API_KEY}" )
project_id=$(echo $json | jq '.id' | sed 's/"//g' )
dep_number=$(echo $json | jq '.dep_number')
out_number=$(echo $json | jq '.out_number')
violations=$(echo $json | jq '.licenses_red')
echo ""
echo "Dependencies: $dep_number"
echo "Outdated: $out_number"
echo "License violations: $violations"
echo ""
if [ violations == 0 ]; then
echo "exit with status code 0"
exit 0
else
echo "exit with status code 2"
exit 2
fi
echo "Never ever!"
| #!/bin/bash
VERSIONEYE_SERVER=https://www.versioneye.com
API_KEY=<YOUR_SECRET_API_KEY> # Get it from here: https://www.versioneye.com/settings/api
PROJECT_ID=<YOUR_PROJECT_ID> # https://www.versioneye.com/user/projects/<PROJECT_ID>
ORGA_NAME=<YOUR_ORGANISATION_NAME>
json=$( curl -F name=project_file -F orga_name=${ORGA_NAME} -F project_file=@$1 "${VERSIONEYE_SERVER}/api/v2/projects/${PROJECT_ID}?api_key=${API_KEY}" )
project_id=$(echo $json | jq '.id' | sed 's/"//g' )
dep_number=$(echo $json | jq '.dep_number')
out_number=$(echo $json | jq '.out_number')
violations=$(echo $json | jq '.licenses_red')
sec_issues=$(echo $json | jq '.sv_count')
echo ""
echo "Project ID: $project_id"
echo "Dependencies: $dep_number"
echo "Outdated: $out_number"
echo "License violations: $violations"
echo "Security vulnerabilities: $sec_issues"
echo ""
if [ $violations = "0" ]; then
echo "exit with status code 0"
exit 0
else
echo "exit with status code 2"
exit 2
fi
echo "Never ever!"
|
Set default chunk size to 100 MB to reduce overhead | #!/bin/bash
# Prereqs
sudo apt-get update
sudo apt-get install iftop htop iotop davfs2 s3cmd zip
# Set up scratch disk
sudo mkfs -t ext4 /dev/xvdb
mkdir scratch
sudo mount /dev/xvdb scratch
cd scratch
sudo chmod 777 .
mkdir egnyte
mkdir egnyte-local
# Download data
sudo mount.davfs http://webdav-pda.egnyte.com/pda-egnyte egnyte -o ro
screen cp -rvn egnyte/Shared/PDA/* egnyte-local/
# Upload data
file="`date +"PDA %Y-%m-%d %H:%M:00 (Full)"`"
zip -r "$file" egnyte-local/*
s3cmd put "$file" s3://pharmadataassociates-backups/
rm "$file"
# Compute stats
find egnyte-local > files
du -hs egnyte-local > size
| #!/bin/bash
# Prereqs
sudo apt-get update
sudo apt-get install iftop htop iotop davfs2 s3cmd zip
# Set up scratch disk
sudo mkfs -t ext4 /dev/xvdb
mkdir scratch
sudo mount /dev/xvdb scratch
cd scratch
sudo chmod 777 .
mkdir egnyte
mkdir egnyte-local
# Download data
sudo mount.davfs http://webdav-pda.egnyte.com/pda-egnyte egnyte -o ro
screen cp -rvn egnyte/Shared/PDA/* egnyte-local/
# Upload data
file="`date +"PDA %Y-%m-%d %H:%M:00 (Full)"`"
zip -r "$file" egnyte-local/*
s3cmd put --multipart-chunk-size-mb=100 "$file" s3://pharmadataassociates-backups/
rm "$file"
# Compute stats
find egnyte-local > files
du -hs egnyte-local > size
|
Use correct parameter for --rm | #!/bin/bash
docker run -rm -v `pwd`/calico_containers:/code/calico_containers calico-build bash -c '/tmp/etcd -data-dir=/tmp/default.etcd/ & nosetests calico_containers/tests/unit -c nose.cfg'
| #!/bin/bash
docker run --rm -v `pwd`/calico_containers:/code/calico_containers calico-build bash -c '/tmp/etcd -data-dir=/tmp/default.etcd/ & nosetests calico_containers/tests/unit -c nose.cfg'
|
Add pip2 upgrade and neovim | #!/bin/bash
set -e
pip3 install --upgrade pip
pip3 install neovim
| #!/bin/bash
set -e
pip3 install --upgrade pip
pip3 install neovim
pip2 install --upgrade pip
pip2 install neovim
|
Improve paths to work from project root | gzip -cd *_R3.fastq.* | head -1000000 | awk 'NR == 2 || NR % 4 == 2' | grep -v N | sort | uniq -c | sort -nr | head -n 24 > sample_barcodes.log
| mkdir -p logs
gzip -cd data-raw/fastq/*_R3.fastq.* | \
head -1000000 | \
awk 'NR == 2 || NR % 4 == 2' | \
grep -v N | \
sort | \
uniq -c | \
sort -nr | \
head -n 24 > logs/sample_barcodes.log
|
Add wrapper links for gcc 5.2.0. | #!/bin/sh
#
# Set up wrappers around gcc/g++/gfortran
VER=gcc-4.9.3
source ./helper.sh
set_stage
# Write a wrapper around a driver (adds rpath entries)
function write_wrapper() {
cat > $PREFIX/$VER/bin/$1 <<EOF
#!/bin/sh
GCC_WRAPPER_DIRS="$PREFIX/$VER/lib64 $PREFIX/lib64 $PREFIX/lib"
GCC_WRAPPER_LD_RUN_PATH=\`echo "\$LD_RUN_PATH" | sed 's/:/ /g'\`
GCC_WRAPPER_DIRS="\$GCC_WRAPPER_LD_RUN_PATH \$GCC_WRAPPER_DIRS"
for dname in \$GCC_WRAPPER_DIRS ; do
GCC_WRAPPER_ARGS="\$GCC_WRAPPER_ARGS -Wl,-rpath -Wl,\$dname"
done
for arg in \$@ ; do
if [ "\$arg" == "-v" ]; then
GCC_WRAPPER_ARGS=""
elif [ "\$arg" == "-c" ]; then
GCC_WRAPPER_ARGS=""
elif [ "\$arg" == "-s" ]; then
GCC_WRAPPER_ARGS=""
fi
done
if [ \$# -eq 0 ]; then
GCC_WRAPPER_ARGS=""
fi
exec $PREFIX/$VER/bin/$1-4.9.3 "\$@" \$GCC_WRAPPER_ARGS
EOF
chmod +x $PREFIX/$VER/bin/$1
}
# Write wrappers for all three languages
for driver in gcc g++ c++ gfortran ; do
write_wrapper $driver
done
# Symlink for preproc
rm -f $PREFIX/$VER/bin/cpp
ln -s $PREFIX/$VER/bin/cpp-4.9.3 $PREFIX/$VER/bin/cpp
leave_stage
| #!/bin/sh
#
# Set up wrappers around gcc/g++/gfortran
VER=5.2.0
source ./helper.sh
set_stage
mkdir -p $PREFIX/gcc-$VER/wrapper
rm -f $PREFIX/gcc-$VER/wrapper/*
WRAPS="c++ cpp g++ gcc gccgo gcov gcov-tool gfortran go gofmt gcc-ar gcc-nm gcc-ranlib"
echo "Start $WRAPS"
for f in $WRAPS ; do
ln -s $PREFIX/gcc-$VER/bin/$f-$VER $PREFIX/gcc-$VER/wrapper/$f
done
# for f in $WRAPS2 ; do
# ln -s $PREFIX/gcc-$VER/bin/gcc-$f-$VER $PREFIX/gcc-$VER/wrapper/$f
# done
leave_stage
|
Fix offline-games route in LH script | #!/bin/sh
REPORTS_DIR=lighthouse
LAST_COMMIT="$(git rev-parse HEAD 2>/dev/null)"
mkdir -p $REPORTS_DIR/$LAST_COMMIT &&
cd $REPORTS_DIR/$LAST_COMMIT &&
lighthouse http://localhost:8080 --perf --output=json --output-path=root.json &&
lighthouse http://localhost:8080/#/browse --perf --output=json --output-path=browse.json &&
lighthouse http://localhost:8080/#/offline --perf --output=json --output-path=offline.json
| #!/bin/sh
REPORTS_DIR=lighthouse
LAST_COMMIT="$(git rev-parse HEAD 2>/dev/null)"
mkdir -p $REPORTS_DIR/$LAST_COMMIT &&
cd $REPORTS_DIR/$LAST_COMMIT &&
lighthouse http://localhost:8080 --perf --output=json --output-path=root.json &&
lighthouse http://localhost:8080/#/browse --perf --output=json --output-path=browse.json &&
lighthouse http://localhost:8080/#/offline-games --perf --output=json --output-path=offline.json
|
Clean up meta-measured setup script and use an auto.conf file. | #!/bin/sh
SLAVE_ROOT=/var/lib/buildbot/slaves
MEASURED_BUILD=${SLAVE_ROOT}/meta-measured/build/
MEASURED_CONF=${MEASURED_BUILD}/conf/local.conf
MEASURED_LAYERS=${MEASURED_BUILD}/LAYERS
MEASURED_FETCH_CONF=${MEASURED_BUILD}/fetch.conf
# values we'll be setting in local.conf
DL_DIR="/mnt/openembedded/downloads/"
GIT_MIRROR="file:///var/lib/git"
if [ ! -f ${MEASURED_CONF} ]; then
echo "Missing config file for meta-measured. Halting."
exit 1
fi
# set DL_DIR
sed -i "s&^\([[:space:]]*DL_DIR[[:space:]]*\)\(\?=\|\+=\|=\+\|=\).*$&\1\2 \"${DL_DIR}\"&" ${MEASURED_CONF}
if [ $? -ne 0 ]; then
exit $?
fi
echo "GIT_MIRROR=\"${GIT_MIRROR}\"" > ${MEASURED_FETCH_CONF}
if [ $? -ne 0 ]; then
exit $?
fi
| #!/bin/sh
SLAVE_ROOT=/var/lib/buildbot/slaves
MEASURED_BUILD=${SLAVE_ROOT}/meta-measured/build/
MEASURED_AUTO_CONF=${MEASURED_BUILD}/conf/auto.conf
MEASURED_FETCH_CONF=${MEASURED_BUILD}/fetch.conf
# set values in bitbake auto builder config file
cat << EOL > ${MEASURED_AUTO_CONF}
DL_DIR ?= "/mnt/openembedded/downloads/"
EOL
# set value in fetch config file
cat << EOL > ${MEASURED_FETCH_CONF}
GIT_MIRROR="file:///var/lib/git"
EOL
|
Add URLs for PHP Bzips | #!/usr/bin/env bash
# Download Bzips for newer PHP versions not downloadable by compile.sh from
# museum.php.net or php.net.
# See https://github.com/fpoirotte/phpfarm/blob/v0.2.0/src/compile.sh
PHP_VERSIONS=(
7.2.0beta1
)
PHP_BZIPS=(
https://downloads.php.net/~pollita/php-7.2.0beta1.tar.bz2
)
mkdir -p bzips
cd bzips
for KEY in ${!PHP_VERSIONS[@]}; do
VER=${PHP_VERSIONS[$KEY]}
BZIP_URL=${PHP_BZIPS[$KEY]}
wget -O php-$VER.tar.bz2 $BZIP_URL
done
| #!/usr/bin/env bash
# Download Bzips for newer PHP versions not downloadable by compile.sh from
# museum.php.net or php.net.
# See https://github.com/fpoirotte/phpfarm/blob/v0.2.0/src/compile.sh
PHP_VERSIONS=(
5.6.31
7.0.21
7.1.7
7.2.0beta1
)
PHP_BZIPS=(
http://php.net/get/php-5.6.31.tar.bz2/from/this/mirror
http://php.net/get/php-7.0.21.tar.bz2/from/this/mirror
http://php.net/get/php-7.1.7.tar.bz2/from/this/mirror
https://downloads.php.net/~pollita/php-7.2.0beta1.tar.bz2
)
mkdir -p bzips
cd bzips
for KEY in ${!PHP_VERSIONS[@]}; do
VER=${PHP_VERSIONS[$KEY]}
BZIP_URL=${PHP_BZIPS[$KEY]}
wget -O php-$VER.tar.bz2 $BZIP_URL
done
|
Update CI script for showcase server to run Gradle build | #!/bin/sh
cd $(dirname $0)
cd client
mvn clean package
ret=$?
if [ $ret -ne 0 ]; then
exit $ret
fi
rm -rf target
./gradlew clean build
ret=$?
if [ $ret -ne 0 ]; then
exit $ret
fi
rm -rf build
cd ../server
mvn clean package
ret=$?
if [ $ret -ne 0 ]; then
exit $ret
fi
rm -rf target
| #!/bin/sh
cd $(dirname $0)
cd client
mvn clean package
ret=$?
if [ $ret -ne 0 ]; then
exit $ret
fi
rm -rf target
./gradlew clean build
ret=$?
if [ $ret -ne 0 ]; then
exit $ret
fi
rm -rf build
cd ../server
mvn clean package
ret=$?
if [ $ret -ne 0 ]; then
exit $ret
fi
rm -rf target
./gradlew clean build
ret=$?
if [ $ret -ne 0 ]; then
exit $ret
fi
rm -rf build
|
Replace samalba/docker-registry by the official registry | #!/bin/bash
set -xe
env
NOVADOCKERDIR=$(realpath $(dirname $0)/../..)
INSTALLDIR=${INSTALLDIR:-/opt/stack}
cp $NOVADOCKERDIR/contrib/devstack/extras.d/70-docker.sh $INSTALLDIR/devstack/extras.d/
cp $NOVADOCKERDIR/contrib/devstack/lib/nova_plugins/hypervisor-docker $INSTALLDIR/devstack/lib/nova_plugins/
HOST_IP=$(ip addr | grep -Eo "inet [0-9\.]+" | grep -v 127.0.0.1 | head -n 1 | cut -d " " -f 2)
cat - <<-EOF >> $INSTALLDIR/devstack/localrc
export VIRT_DRIVER=docker
export HOST_IP=$HOST_IP
export KEYSTONE_ADMIN_BIND_HOST=0.0.0.0
export DOCKER_REGISTRY_IMAGE=samalba/docker-registry
export DEFAULT_IMAGE_NAME=cirros
export IMAGE_URLS=" "
EOF
| #!/bin/bash
set -xe
env
NOVADOCKERDIR=$(realpath $(dirname $0)/../..)
INSTALLDIR=${INSTALLDIR:-/opt/stack}
cp $NOVADOCKERDIR/contrib/devstack/extras.d/70-docker.sh $INSTALLDIR/devstack/extras.d/
cp $NOVADOCKERDIR/contrib/devstack/lib/nova_plugins/hypervisor-docker $INSTALLDIR/devstack/lib/nova_plugins/
HOST_IP=$(ip addr | grep -Eo "inet [0-9\.]+" | grep -v 127.0.0.1 | head -n 1 | cut -d " " -f 2)
cat - <<-EOF >> $INSTALLDIR/devstack/localrc
export VIRT_DRIVER=docker
export HOST_IP=$HOST_IP
export KEYSTONE_ADMIN_BIND_HOST=0.0.0.0
export DOCKER_REGISTRY_IMAGE=registry
export DEFAULT_IMAGE_NAME=cirros
export IMAGE_URLS=" "
EOF
|
Save test results to different files | #!/bin/sh
test_description="Generate documentation"
. ./sharness.sh
FIXTURES="$TEST_DIRECTORY/fixtures"
test_expect_success "Generate plain text documentation" "
tomdoc.sh --text '$BUILD_DIR/tomdoc.sh' >result &&
test_cmp '$FIXTURES/tomdoc.sh.txt' result
"
test_expect_success "Generate markdown documentation" "
tomdoc.sh --markdown '$BUILD_DIR/tomdoc.sh' >result &&
test_cmp '$FIXTURES/tomdoc.sh.md' result
"
test_done
| #!/bin/sh
test_description="Generate documentation"
. ./sharness.sh
FIXTURES="$TEST_DIRECTORY/fixtures"
test_expect_success "Generate plain text documentation" "
tomdoc.sh --text '$BUILD_DIR/tomdoc.sh' >result.txt &&
test_cmp '$FIXTURES/tomdoc.sh.txt' result.txt
"
test_expect_success "Generate markdown documentation" "
tomdoc.sh --markdown '$BUILD_DIR/tomdoc.sh' >result.md &&
test_cmp '$FIXTURES/tomdoc.sh.md' result.md
"
test_done
|
Fix path to docs in README raw text | #!/usr/bin/env ruby
base_dir = File.join(File.dirname(__FILE__),'../../..')
src_dir = File.join(base_dir, "/src/main/asciidoc")
require 'asciidoctor'
require 'optparse'
options = {}
file = "#{src_dir}/README.adoc"
OptionParser.new do |o|
o.on('-o OUTPUT_FILE', 'Output file (default is stdout)') { |file| options[:to_file] = file unless file=='-' }
o.on('-h', '--help') { puts o; exit }
o.parse!
end
file = ARGV[0] if ARGV.length>0
srcDir = File.dirname(file)
out = "// Do not edit this file (e.g. go instead to src/main/asciidoc)\n\n"
doc = Asciidoctor.load_file file, safe: :safe, parse: false, attributes: 'allow-uri-read'
out << doc.reader.read
unless options[:to_file]
puts out
else
File.open(options[:to_file],'w+') do |file|
file.write(out)
end
end
| #!/usr/bin/env ruby
base_dir = File.join(File.dirname(__FILE__),'../../..')
src_dir = File.join(base_dir, "/src/main/asciidoc")
require 'asciidoctor'
require 'optparse'
options = {}
file = "#{src_dir}/README.adoc"
OptionParser.new do |o|
o.on('-o OUTPUT_FILE', 'Output file (default is stdout)') { |file| options[:to_file] = file unless file=='-' }
o.on('-h', '--help') { puts o; exit }
o.parse!
end
file = ARGV[0] if ARGV.length>0
srcDir = File.dirname(file)
out = "// Do not edit this file (e.g. go instead to docs/src/main/asciidoc)\n\n"
doc = Asciidoctor.load_file file, safe: :safe, parse: false, attributes: 'allow-uri-read'
out << doc.reader.read
unless options[:to_file]
puts out
else
File.open(options[:to_file],'w+') do |file|
file.write(out)
end
end
|
Change "dartfmt" to "dart format" in dart2js_info tool. | #!/bin/bash
set -e
if [ -z "$1" ]; then
echo "Expected exactly one argument which is the protoc_plugin version to use"
else
echo "Using protoc_plugin version $1"
pub global activate protoc_plugin "$1"
fi
protoc --proto_path="." --dart_out=lib/src/proto info.proto
dartfmt -w lib/src/proto
| #!/bin/bash
set -e
if [ -z "$1" ]; then
echo "Expected exactly one argument which is the protoc_plugin version to use"
else
echo "Using protoc_plugin version $1"
pub global activate protoc_plugin "$1"
fi
protoc --proto_path="." --dart_out=lib/src/proto info.proto
dart format lib/src/proto
|
Fix command to echo current username to tempfile | #!/bin/bash
#Configure the GitHub user that the plugin scripts will be pulled from.
#This is useful if you have forked the script and intend to customise your own plugins.
gituser=asdf-git
rm -rf /tmp/Fedora-PostInstall
cd /tmp
git clone -q https://github.com/$gituser/Fedora-PostInstall.git
`whoami` > /tmp/Fedora-PostInstall/user.txt
sudo sh /tmp/Fedora-PostInstall/Fedora$(rpm -E %fedora).sh
| #!/bin/bash
#Configure the GitHub user that the plugin scripts will be pulled from.
#This is useful if you have forked the script and intend to customise your own plugins.
gituser=asdf-git
rm -rf /tmp/Fedora-PostInstall
cd /tmp
git clone -q https://github.com/$gituser/Fedora-PostInstall.git
echo `whoami` > /tmp/Fedora-PostInstall/user.txt
sudo sh /tmp/Fedora-PostInstall/Fedora$(rpm -E %fedora).sh
|
Fix typo in docker experimental config | #!/bin/bash
# shellcheck disable=SC2094
set -euo pipefail
## Configures docker before system starts
# Write to system console and to our log file
# See https://alestic.com/2010/12/ec2-user-data-output/
exec > >(tee -a /var/log/elastic-stack.log|logger -t user-data -s 2>/dev/console) 2>&1
# Set user namespace remapping in config
if [[ "${DOCKER_USERNS_REMAP:-false}" == "true" ]] ; then
cat <<< "$(jq '."userns-remap"="buildkite-agent"' /etc/docker/daemon.json)" > /etc/docker/daemon.json
fi
# Set experimental in config
if [[ "${DOCKER_EXPERIMENTAL:-false}" == "true" ]] ; then
cat <<< "$(jq '.experimental="buildkite-agent"' /etc/docker/daemon.json)" > /etc/docker/daemon.json
fi
| #!/bin/bash
# shellcheck disable=SC2094
set -euo pipefail
## Configures docker before system starts
# Write to system console and to our log file
# See https://alestic.com/2010/12/ec2-user-data-output/
exec > >(tee -a /var/log/elastic-stack.log|logger -t user-data -s 2>/dev/console) 2>&1
# Set user namespace remapping in config
if [[ "${DOCKER_USERNS_REMAP:-false}" == "true" ]] ; then
cat <<< "$(jq '."userns-remap"="buildkite-agent"' /etc/docker/daemon.json)" > /etc/docker/daemon.json
fi
# Set experimental in config
if [[ "${DOCKER_EXPERIMENTAL:-false}" == "true" ]] ; then
cat <<< "$(jq '.experimental="true"' /etc/docker/daemon.json)" > /etc/docker/daemon.json
fi
|
Add alias for "composer dump-autoload" | # ------------------------------------------------------------------------------
# FILE: composer.plugin.zsh
# DESCRIPTION: oh-my-zsh composer plugin file.
# AUTHOR: Daniel Gomes (me@danielcsgomes.com)
# VERSION: 1.0.0
# ------------------------------------------------------------------------------
# Composer basic command completion
_composer_get_command_list () {
composer --no-ansi | sed "1,/Available commands/d" | awk '/^ [a-z]+/ { print $1 }'
}
_composer () {
if [ -f composer.json ]; then
compadd `_composer_get_command_list`
fi
}
compdef _composer composer
# Aliases
alias c='composer'
alias csu='composer self-update'
alias cu='composer update'
alias ci='composer install'
alias ccp='composer create-project'
# install composer in the current directory
alias cget='curl -s https://getcomposer.org/installer | php' | # ------------------------------------------------------------------------------
# FILE: composer.plugin.zsh
# DESCRIPTION: oh-my-zsh composer plugin file.
# AUTHOR: Daniel Gomes (me@danielcsgomes.com)
# VERSION: 1.0.0
# ------------------------------------------------------------------------------
# Composer basic command completion
_composer_get_command_list () {
composer --no-ansi | sed "1,/Available commands/d" | awk '/^ [a-z]+/ { print $1 }'
}
_composer () {
if [ -f composer.json ]; then
compadd `_composer_get_command_list`
fi
}
compdef _composer composer
# Aliases
alias c='composer'
alias csu='composer self-update'
alias cu='composer update'
alias ci='composer install'
alias ccp='composer create-project'
alias cdu='composer dump-autoload'
# install composer in the current directory
alias cget='curl -s https://getcomposer.org/installer | php'
|
Install ia32-libs so we can run 32 bit executables. | #!/bin/bash
sudo apt-get update
sudo apt-get install -y build-essential zlib1g-dev wget curl python-setuptools git libz-dev
mkdir tmp
cd tmp
wget https://raw.github.com/chapmanb/bcbio-nextgen/master/scripts/bcbio_nextgen_install.py
## python bcbio_nextgen_install.py /usr/local/share/bcbio-nextgen --tooldir=/usr/local --sudo --genomes GRCh37 --aligners bwa
| #!/bin/bash
sudo apt-get update
sudo apt-get install -y build-essential zlib1g-dev wget curl python-setuptools git libz-dev ia32-libs
mkdir tmp
cd tmp
wget https://raw.github.com/chapmanb/bcbio-nextgen/master/scripts/bcbio_nextgen_install.py
## python bcbio_nextgen_install.py /usr/local/share/bcbio-nextgen --tooldir=/usr/local --sudo --genomes GRCh37 --aligners bwa
|
Add options to help when reloading entire database. | #!/bin/bash
FILENAME=dumpdata_`date --rfc-3339=seconds | sed -e 's/ /_/g'`.json.bz2
echo "Dumping database contents to ${FILENAME}..."
python manage.py dumpdata \
contact_info \
farms \
notifications \
--exclude contenttypes \
account \
auth.User \
auth.Group \
sites \
$* \
| bzip2 > $FILENAME
echo "Done."
| #!/bin/bash
FILENAME=dumpdata_`date --rfc-3339=seconds | sed -e 's/ /_/g'`.json.bz2
echo "Dumping database contents to ${FILENAME}..."
python manage.py dumpdata \
--indent=4 \
--natural-foreign \
--natural-primary \
contact_info \
farms \
notifications \
--exclude contenttypes \
account \
auth.User \
auth.Group \
sites \
$* \
| bzip2 > $FILENAME
echo "Done."
|
Add build step to faleiro setup | # Set up and start the scheduler
cd faleiro
./install.sh
# TODO uncomment lines below when scheduler supports clustering
# if we do this now; it will be chaos as three independent schedulers
# will be attempting to use the "same" zookeeper (cluster in consensus)
# supervisord
# supervisorctl reload
cd ..
| # Set up and start the scheduler
cd faleiro
./build.sh
./install.sh
# TODO uncomment lines below when scheduler supports clustering
# if we do this now; it will be chaos as three independent schedulers
# will be attempting to use the "same" zookeeper (cluster in consensus)
# supervisord
# supervisorctl reload
cd ..
|
Comment out the configurable dir environments. | #!/bin/sh
# If you've non-standard directories, set these
ACLOCAL_DIR=
GETTEXT_DIR=
if test "$ACLOCAL_DIR" != ""; then
ACLOCAL="aclocal -I $ACLOCAL_DIR"
export ACLOCAL
fi
autoreconf -i -f
for dir in $GETTEXT_DIR /usr/share/gettext; do
if test -f $dir/config.rpath; then
/bin/cp -f $dir/config.rpath .
break
fi
done
| #!/bin/sh
# If you've non-standard directories, set these
#ACLOCAL_DIR=
#GETTEXT_DIR=
if test "$ACLOCAL_DIR" != ""; then
ACLOCAL="aclocal -I $ACLOCAL_DIR"
export ACLOCAL
fi
autoreconf -i -f
for dir in $GETTEXT_DIR /usr/share/gettext; do
if test -f $dir/config.rpath; then
/bin/cp -f $dir/config.rpath .
break
fi
done
|
Add 0> core extension word | #! /bin/sh
parse() {
pop c
push `inbufgetaddr`
n=0;
while inbufget xc; do
if test $xc = $c; then break; fi
n="`\"$expr\" $n + 1`"
done
push $n
}
builtin 'parse' parse
dot_s() { # .s
n=0
while expr $n '<' $sp >/dev/null; do
eval "echo $n : \$stk_$n"
n="`\"$expr\" $n + 1`"
done
}
builtin '.s' dot_s
bye() {
exit
}
builtin 'bye' bye
nip() {
pop x2
pop x1
push "$x2"
}
builtin 'nip' nip
tuck() {
pop x2
pop x1
push "$x2"
push "$x1"
push "$x2"
}
builtin 'tuck' tuck
not_equals() { # <>
pop n2
pop n1
if "$expr" "$n1" '!=' "$n2" >/dev/null; then
push -1
else
push 0
fi
}
builtin '<>' not_equals
zero_not_equals() { # 0<>
pop n
if "$expr" 0 '!=' "$n" >/dev/null; then
push -1
else
push 0
fi
}
builtin '0<>' zero_not_equals
| #! /bin/sh
parse() {
pop c
push `inbufgetaddr`
n=0;
while inbufget xc; do
if test $xc = $c; then break; fi
n="`\"$expr\" $n + 1`"
done
push $n
}
builtin 'parse' parse
dot_s() { # .s
n=0
while expr $n '<' $sp >/dev/null; do
eval "echo $n : \$stk_$n"
n="`\"$expr\" $n + 1`"
done
}
builtin '.s' dot_s
bye() {
exit
}
builtin 'bye' bye
nip() {
pop x2
pop x1
push "$x2"
}
builtin 'nip' nip
tuck() {
pop x2
pop x1
push "$x2"
push "$x1"
push "$x2"
}
builtin 'tuck' tuck
not_equals() { # <>
pop n2
pop n1
if "$expr" "$n1" '!=' "$n2" >/dev/null; then
push -1
else
push 0
fi
}
builtin '<>' not_equals
zero_not_equals() { # 0<>
pop n
if "$expr" 0 '!=' "$n" >/dev/null; then
push -1
else
push 0
fi
}
builtin '0<>' zero_not_equals
zero_greater() { # 0>
pop n
if "$expr" 0 '<' "$n" >/dev/null; then
push -1
else
push 0
fi
}
builtin '0>' zero_greater
|
Make sure that CNAME is pushed to gh-pages | #!/usr/bin/env bash
set -e # halt script on error
# If this is the deploy branch, push it up to gh-pages
echo "Get ready, we're pushing to gh-pages!"
cd dist
git init
git config user.name "Travis-CI"
git config user.email "travis@somewhere.com"
git add .
git commit -m "CI deploy to gh-pages"
git push --force --quiet "https://${GH_TOKEN}@github.com/${TRAVIS_REPO_SLUG}.git" master:gh-pages | #!/usr/bin/env bash
set -e # halt script on error
# If this is the deploy branch, push it up to gh-pages
echo "Get ready, we're pushing to gh-pages!"
cd dist
git init
git config user.name "Travis-CI"
git config user.email "travis@somewhere.com"
cp ../CNAME .
git add .
git commit -m "CI deploy to gh-pages"
git push --force --quiet "https://${GH_TOKEN}@github.com/${TRAVIS_REPO_SLUG}.git" master:gh-pages |
Use eatmydata to speed up installing prerequisites | #!/bin/sh
if [ "$(lsb_release -cs)" = "trusty" ]; then
apt-add-repository "deb http://archive.ubuntu.com/ubuntu trusty-backports main restricted universe multiverse"
fi
apt update -yq
apt-get -yq --no-install-suggests --no-install-recommends install \
build-essential \
flex \
gcc \
lcov \
libboost-dev \
libboost-test-dev \
libssl-dev \
python3 \
python3-dev \
valgrind \
wget
if [ "$(lsb_release -cs)" = "trusty" ]; then
apt-get -yq -t trusty-backports install swig3.0
apt-get -yq -t trusty-backports install cmake
else
apt-get -yq install swig3.0
apt-get -yq install cmake
fi
| #!/bin/sh
if [ "$(lsb_release -cs)" = "trusty" ]; then
apt-add-repository "deb http://archive.ubuntu.com/ubuntu trusty-backports main restricted universe multiverse"
fi
apt update -yq
apt-get install eatmydata
eatmydata apt-get -yq --no-install-suggests --no-install-recommends install \
build-essential \
flex \
gcc \
lcov \
libboost-dev \
libboost-test-dev \
libssl-dev \
python3 \
python3-dev \
valgrind \
wget
if [ "$(lsb_release -cs)" = "trusty" ]; then
eatmydata apt-get -yq -t trusty-backports install swig3.0
eatmydata apt-get -yq -t trusty-backports install cmake
else
eatmydata apt-get -yq install swig3.0
eatmydata apt-get -yq install cmake
fi
|
Revise OS check for homebrew install | #!/bin/bash
dir=~/dotfiles
for file in $dir/*.symlink; do
echo "Creating symlink to $file in home directory."
filename=${file##*/}
ln -sf $file ~/.${filename%.symlink}
done
# install homebrew
if [[ $OSTYPE == darwin* && ! $(command -v brew 2>/dev/null) ]]; then
echo "Mac without homebrew?!?! Fixing..."
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
else
echo "Homebrew already installed"
fi
# install brew bundle
if [[ ! $(brew bundle check) ]]; then
echo "Found missing packages, installing Homebrew bundle"
brew bundle
else
echo "Homebrew bundle reports all packages installed"
fi
| #!/bin/bash
dir=~/dotfiles
for file in $dir/*.symlink; do
echo "Creating symlink to $file in home directory."
filename=${file##*/}
ln -sf $file ~/.${filename%.symlink}
done
# Homebrew setup, if OSX
if [[ $OSTYPE == darwin* ]]; then
# install homebrew
if [[ ! $(command -v brew 2>/dev/null) ]]; then
echo "Mac without homebrew?!?! Fixing..."
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
else
echo "Homebrew already installed"
# install brew bundle
if [[ ! $(brew bundle check) ]]; then
echo "Found missing packages, installing Homebrew bundle"
brew bundle
else
echo "Homebrew bundle reports all packages installed"
fi
fi
|
Add python setup to VM setup script. | #!/bin/bash
# This script sets up a GCE machine with the necessary dependencies to run the
# Cloud Pub/Sub SME training examples.
function run_and_check {
"$@"
status=$?
if [ $status -ne 0 ]; then
echo "Failed to run command $@"
exit $status
fi
}
run_and_check sudo apt-get update -y
# Install Java pieces.
run_and_check sudo apt-get install openjdk-8-jdk -y
run_and_check sudo update-alternatives --set javac /usr/lib/jvm/java-8-openjdk-amd64/bin/javac
run_and_check sudo update-alternatives --set java /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java
run_and_check sudo apt-get install maven -y
run_and_check sudo apt-get update && sudo apt-get --only-upgrade install kubectl google-cloud-sdk google-cloud-sdk-datastore-emulator google-cloud-sdk-pubsub-emulator google-cloud-sdk-app-engine-go google-cloud-sdk-app-engine-java google-cloud-sdk-app-engine-python google-cloud-sdk-cbt google-cloud-sdk-bigtable-emulator google-cloud-sdk-datalab -y
echo "==================="
echo "SUCCESS"
| #!/bin/bash
# This script sets up a GCE machine with the necessary dependencies to run the
# Cloud Pub/Sub SME training examples.
function run_and_check {
"$@"
status=$?
if [ $status -ne 0 ]; then
echo "Failed to run command $@"
exit $status
fi
}
run_and_check sudo apt-get update -y
# Install Java pieces.
run_and_check sudo apt-get install openjdk-8-jdk -y
run_and_check sudo update-alternatives --set javac /usr/lib/jvm/java-8-openjdk-amd64/bin/javac
run_and_check sudo update-alternatives --set java /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java
run_and_check sudo apt-get install maven -y
run_and_check sudo apt-get update && sudo apt-get --only-upgrade install kubectl google-cloud-sdk google-cloud-sdk-datastore-emulator google-cloud-sdk-pubsub-emulator google-cloud-sdk-app-engine-go google-cloud-sdk-app-engine-java google-cloud-sdk-app-engine-python google-cloud-sdk-cbt google-cloud-sdk-bigtable-emulator google-cloud-sdk-datalab -y
# Install Python pieces.
run_and_check sudo apt-get install python3 -y
run_and_check curl https://bootstrap.pypa.io/get-pip.py | sudo python3
run_and_check sudo pip3 install --upgrade google-cloud-pubsub
echo "==================="
echo "SUCCESS"
|
Remove function keyword from main | #!/usr/bin/env bash
set -e
set -u
set -o pipefail
function main() {
local scriptdir=$(dirname "$(readlink -f "$0")")
pushd "$scriptdir" > /dev/null
. ./templates.env
echo "JENKINS_VERSION: $JENKINS_VERSION"
echo "MASTER_IMAGE_VERSION: $MASTER_IMAGE_VERSION"
echo "SLAVE_IMAGE_VERSION: $SLAVE_IMAGE_VERSION"
echo "JENKINS_HOST: $JENKINS_HOST"
echo "LETSENCRYPT_EMAIL: $LETSENCRYPT_EMAIL"
echo 'Generating docker-compose.yml from template: ./templates/docker-compose.yml'
sed "s/@MASTER_IMAGE_VERSION@/$MASTER_IMAGE_VERSION/g" ./templates/docker-compose.yml \
| sed "s/@SLAVE_IMAGE_VERSION@/$SLAVE_IMAGE_VERSION/g" \
| sed "s/@JENKINS_HOST@/$JENKINS_HOST/g" \
| sed "s/@LETSENCRYPT_EMAIL@/$LETSENCRYPT_EMAIL/g" \
> docker-compose.yml
echo 'Generating jenkins_master/Dockerfile from template: ./templates/Dockerfile'
sed "s/@JENKINS_VERSION@/$JENKINS_VERSION/g" ./templates/Dockerfile > jenkins_master/Dockerfile
popd > /dev/null
}
main
| #!/usr/bin/env bash
set -e
set -u
set -o pipefail
main() {
local scriptdir=$(dirname "$(readlink -f "$0")")
pushd "$scriptdir" > /dev/null
. ./templates.env
echo "JENKINS_VERSION: $JENKINS_VERSION"
echo "MASTER_IMAGE_VERSION: $MASTER_IMAGE_VERSION"
echo "SLAVE_IMAGE_VERSION: $SLAVE_IMAGE_VERSION"
echo "JENKINS_HOST: $JENKINS_HOST"
echo "LETSENCRYPT_EMAIL: $LETSENCRYPT_EMAIL"
echo 'Generating docker-compose.yml from template: ./templates/docker-compose.yml'
sed "s/@MASTER_IMAGE_VERSION@/$MASTER_IMAGE_VERSION/g" ./templates/docker-compose.yml \
| sed "s/@SLAVE_IMAGE_VERSION@/$SLAVE_IMAGE_VERSION/g" \
| sed "s/@JENKINS_HOST@/$JENKINS_HOST/g" \
| sed "s/@LETSENCRYPT_EMAIL@/$LETSENCRYPT_EMAIL/g" \
> docker-compose.yml
echo 'Generating jenkins_master/Dockerfile from template: ./templates/Dockerfile'
sed "s/@JENKINS_VERSION@/$JENKINS_VERSION/g" ./templates/Dockerfile > jenkins_master/Dockerfile
popd > /dev/null
}
main
|
Remove curl download details in fly login | #!/bin/bash
set -euo pipefail
# Required env vars
# CONCOURSE_URL
# CONCOURSE_ATC_USER
# CONCOURSE_ATC_PASSWORD
# FLY_CMD
# FLY_TARGET
FLY_CMD_URL="${CONCOURSE_URL}/api/v1/cli?arch=amd64&platform=$(uname | tr '[:upper:]' '[:lower:]')"
echo "Downloading fly command..."
curl "$FLY_CMD_URL" -L -f -k -z "$FLY_CMD" -o "$FLY_CMD" -u "${CONCOURSE_ATC_USER}:${CONCOURSE_ATC_PASSWORD}"
chmod +x "$FLY_CMD"
echo "Doing fly login"
echo -e "${CONCOURSE_ATC_USER}\n${CONCOURSE_ATC_PASSWORD}" | \
$FLY_CMD -t "${FLY_TARGET}" login -k --concourse-url "${CONCOURSE_URL}"
| #!/bin/bash
set -euo pipefail
# Required env vars
# CONCOURSE_URL
# CONCOURSE_ATC_USER
# CONCOURSE_ATC_PASSWORD
# FLY_CMD
# FLY_TARGET
FLY_CMD_URL="${CONCOURSE_URL}/api/v1/cli?arch=amd64&platform=$(uname | tr '[:upper:]' '[:lower:]')"
echo "Downloading fly command..."
curl "$FLY_CMD_URL" -# -L -f -k -z "$FLY_CMD" -o "$FLY_CMD" -u "${CONCOURSE_ATC_USER}:${CONCOURSE_ATC_PASSWORD}"
chmod +x "$FLY_CMD"
echo "Doing fly login"
echo -e "${CONCOURSE_ATC_USER}\n${CONCOURSE_ATC_PASSWORD}" | \
$FLY_CMD -t "${FLY_TARGET}" login -k --concourse-url "${CONCOURSE_URL}"
|
Fix result counter for completion-data test | result=$(trim "$(bundle exec vagrant run completion-data | wc -w)")
[[ "${result}" == '15' ]] || {
echo 'Completion data contains unexpected number of return values...'
echo "Got result: '${result}'"
exit 1
}
| result=$(trim "$(bundle exec vagrant run completion-data | wc -w)")
[[ "${result}" == '19' ]] || {
echo 'Completion data contains unexpected number of return values...'
echo "Got result: '${result}'"
exit 1
}
|
Check if vagrant ephemeral disk is already mounted | #!/bin/bash -e
gardenDir="/var/vcap/data/garden"
# This assumes you are running on an instance with attached ephemeral disk as current (0.70) concourse image does
echo "Mounting ${gardenDir} on to ephemeral disk..."
cp -R ${gardenDir}/* /mnt
umount /mnt
# Unmount all aufs mounts
mounts=$(cat /proc/mounts | grep /var/vcap/data/garden/aufs_graph/aufs/mnt | awk '{print $2}')
[[ -n "${mounts}" ]] && echo ${mounts} | xargs umount
# Only lazy unmount works here
umount -l /var/vcap/data/garden/aufs_graph/aufs
# Mount ephemeral storage as garden data
mount /dev/xvdb ${gardenDir}
/var/vcap/bosh/bin/monit restart garden
| #!/bin/bash -e
gardenDir="/var/vcap/data/garden"
if ! mount | grep -q $gardenDir; then
# This assumes you are running on an instance with attached ephemeral disk as current (0.70) concourse image does
echo "Mounting ${gardenDir} on to ephemeral disk..."
cp -R ${gardenDir}/* /mnt
umount /mnt
# Unmount all aufs mounts
mounts=$(cat /proc/mounts | grep /var/vcap/data/garden/aufs_graph/aufs/mnt | awk '{print $2}')
[[ -n "${mounts}" ]] && echo ${mounts} | xargs umount
# Only lazy unmount works here
umount -l /var/vcap/data/garden/aufs_graph/aufs
# Mount ephemeral storage as garden data
mount /dev/xvdb ${gardenDir}
/var/vcap/bosh/bin/monit restart garden
fi
|
Fix names of benchmark scripts | #!/bin/bash
export NUMBER_FFT="2"
export SIZE_MULTIPLIER="32"
export GULP_SIZE="$(echo '32768*1024' | bc)"
NUM1="$(python -OO test1.py)"
NUM2="$(python test2.py)"
echo "Bifrost has $NUM1"
echo "Scikit has $NUM2"
echo "Bifrost is: "
echo "scale=5; $NUM2/$NUM1" | bc
echo "times faster"
#1.23392
#NUMBER_FFT = 4
#SIZE_MULTIPLIER = 32
#GULP_SIZE = 32768*1024//8**2
#1.72889
#export NUMBER_FFT="2"
#export SIZE_MULTIPLIER="32"
#export GULP_SIZE="$(echo '32768*1024/8' | bc)"
| #!/bin/bash
export NUMBER_FFT="2"
export SIZE_MULTIPLIER="32"
export GULP_SIZE="$(echo '32768*1024' | bc)"
NUM1="$(python -OO linear_fft_pipeline.py)"
NUM2="$(python skcuda_fft_pipeline.py)"
echo "Bifrost has $NUM1"
echo "Scikit has $NUM2"
echo "Bifrost is: "
echo "scale=5; $NUM2/$NUM1" | bc
echo "times faster"
#1.23392
#NUMBER_FFT = 4
#SIZE_MULTIPLIER = 32
#GULP_SIZE = 32768*1024//8**2
#1.72889
#export NUMBER_FFT="2"
#export SIZE_MULTIPLIER="32"
#export GULP_SIZE="$(echo '32768*1024/8' | bc)"
|
Use bash variable strip operations instead of sed | #!/bin/bash
read REQUEST_METHOD REQUEST_URI REQUEST_HTTP_VERSION
while read HEADER_LINE; do
[[ "$HEADER_LINE" =~ ^$ ]]&& {
break;
}
HEADER_KEY=$(echo "$HEADER_LINE" | sed 's/^\([^:]*\):.*/\1/g' | sed 's/-/_/g' | tr '[:lower:]' '[:upper:]')
HEADER_VALUE=$(echo "$HEADER_LINE" | sed 's/^[^:]*: \(.*\)/\1/g')
declare "HEADER_${HEADER_KEY}"="$HEADER_VALUE"
done
read -n $HEADER_CONTENT_LENGTH CONTENT_BODY
REQUEST_PATH=$(echo "$REQUEST_URI" | sed 's/^\([^?]*\).*$/\1/g')
if [[ -f "./path${REQUEST_PATH}" ]];then
echo "$CONTENT_BODY" | . ./path${REQUEST_PATH}
else
echo "HTTP/1.0 200 OK"
# echo "Cache-Control : no-cache, private"
# echo "Content-Length : 107"
echo "Date: $(date)"
echo
echo "REQUEST_METHOD:$REQUEST_METHOD"
echo "REQUEST_PATH:$REQUEST_PATH"
echo "REQUEST_URI:$REQUEST_URI"
set | grep "^HEADER_"
echo "CONTENT_BODY:$CONTENT_BODY"
fi
| #!/bin/bash
function upper() { echo "$@" | tr '[:lower:]' '[:upper:]'; }
while getopts "s" OPTIONS; do case $OPTIONS in
s) SHORT="1" ;;
*) exit 1 ;;
esac; done; shift $(( OPTIND - 1 ))
read -r REQUEST_METHOD REQUEST_URI REQUEST_HTTP_VERSION
while read -r HEADER_LINE; do
[[ "$HEADER_LINE" =~ ^$ ]]&& { break; }
HEADER_KEY="${HEADER_LINE/%: */}"
HEADER_KEY="$(upper ${HEADER_KEY//-/_} )"
HEADER_VALUE="${HEADER_LINE/#*: /}"
declare "REQUEST_HEADER_${HEADER_KEY}"="$HEADER_VALUE"
done
unset HEADER_KEY HEADER_VALUE HEADER_LINE
if [[ -n "$REQUEST_HEADER_CONTENT_LENGTH" ]] && [[ "$REQUEST_HEADER_CONTENT_LENGTH" -gt "0" ]];then
read -r -d '' -n "$REQUEST_HEADER_CONTENT_LENGTH" REQUEST_CONTENT
fi
REQUEST_PATH="${REQUEST_URI/%\?*/}"
if [[ -f "./path${REQUEST_PATH}" ]];then
RESPONSE_CONTENT=$(echo "$REQUEST_CONTENT" | . ./path${REQUEST_PATH})
# | read -d'' RESPONSE_CONTENT
echo "HTTP/1.0 200 OK"
echo "Cache-Control : no-cache, private"
echo "Content-Length : ${#RESPONSE_CONTENT}"
# RESPONSE_HEADER_Foo="abcdef"
# set | grep "RESPONSE_HEADER"
# set
# echo "$RESPONSE_HEADER_Foo"
echo "Date: $(date)"
echo
echo "${RESPONSE_CONTENT}"
else
echo "HTTP/1.0 200 OK"
echo "Date: $(date)"
echo
fi
|
Fix issue where tests weren't running in CI | #!/bin/bash
#
# Copyright 2015 Jeff Bush
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# This script is executed when by TravisCI to build everything and run
# automated tests.
#
(cd tests/cosimulation
./generate_random.py -m 3)
verilator --version
# Build out of tree
mkdir build
(cd build
cmake ..
make -j 8)
# Run tests
make tests
| #!/bin/bash
#
# Copyright 2015 Jeff Bush
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# This script is executed when by TravisCI to build everything and run
# automated tests.
#
(cd tests/cosimulation
./generate_random.py -m 3)
verilator --version
# Build out of tree
mkdir build
cd build
cmake ..
make -j 8
make tests
|
Change in the project directory before starting server.js, solves a problem with npm | #!/bin/bash
if [[ $EUID -eq 0 ]]; then
echo "You shouldn't start Etherpad-Lite as root!" 1>&2
echo "Use authbind if you want to use a port lower than 1024 -> http://en.wikipedia.org/wiki/Authbind" 1>&2
exit 1
fi
type -P node &>/dev/null || {
echo "You need to install node to run Etherpad-Lite!" >&2
exit 1
}
if [ -d "../bin" ]; then
cd "../"
fi
cd "node"
node server.js
| #!/bin/bash
#Move to the folder where ep-lite is installed
FOLDER=$(dirname $(readlink -f $0))
cd $FOLDER
echo $FOLDER
if [[ $EUID -eq 0 ]]; then
echo "You shouldn't start Etherpad-Lite as root!" 1>&2
echo "Use authbind if you want to use a port lower than 1024 -> http://en.wikipedia.org/wiki/Authbind" 1>&2
exit 1
fi
type -P node &>/dev/null || {
echo "You need to install node to run Etherpad-Lite!" >&2
exit 1
}
if [ -d "../bin" ]; then
cd "../"
fi
cd "node"
node server.js
|
Add autocompletions for Kubernetes client | # General
export EDITOR=nano
# Python
if command -v pyenv 1>/dev/null 2>&1; then
eval "$(pyenv init -)"
fi
# Node
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion
# Rust
[ -s "$HOME/.cargo/env" ] && \. "$HOME/.cargo/env"
# Go
export GOPROXY=https://proxy.golang.org
| # General
export EDITOR=nano
# Python
if command -v pyenv 1>/dev/null 2>&1; then
eval "$(pyenv init -)"
fi
# Node
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion
# Rust
[ -s "$HOME/.cargo/env" ] && \. "$HOME/.cargo/env"
# Go
export GOPROXY=https://proxy.golang.org
# Kubernetes
if command -v kubectl 1>/dev/null 2>&1; then
source <(kubectl completion zsh)
fi
|
Upgrade CI to Docker 19.03.5 | #!/bin/bash
set -ex
###########################################################
# UTILS
###########################################################
apt-get update
apt-get install --no-install-recommends -y ca-certificates net-tools libxml2-utils git curl libudev1 libxml2-utils iptables iproute2 jq
rm -rf /var/lib/apt/lists/*
curl https://raw.githubusercontent.com/spring-io/concourse-java-scripts/v0.0.2/concourse-java.sh > /opt/concourse-java.sh
###########################################################
# JAVA
###########################################################
JDK_URL=$( ./get-jdk-url.sh $1 )
mkdir -p /opt/openjdk
cd /opt/openjdk
curl -L ${JDK_URL} | tar zx --strip-components=1
test -f /opt/openjdk/bin/java
test -f /opt/openjdk/bin/javac
###########################################################
# DOCKER
###########################################################
cd /
curl -L https://download.docker.com/linux/static/stable/x86_64/docker-19.03.2.tgz | tar zx
mv /docker/* /bin/
chmod +x /bin/docker*
export ENTRYKIT_VERSION=0.4.0
curl -L https://github.com/progrium/entrykit/releases/download/v${ENTRYKIT_VERSION}/entrykit_${ENTRYKIT_VERSION}_Linux_x86_64.tgz | tar zx
chmod +x entrykit && \
mv entrykit /bin/entrykit && \
entrykit --symlink
| #!/bin/bash
set -ex
###########################################################
# UTILS
###########################################################
apt-get update
apt-get install --no-install-recommends -y ca-certificates net-tools libxml2-utils git curl libudev1 libxml2-utils iptables iproute2 jq
rm -rf /var/lib/apt/lists/*
curl https://raw.githubusercontent.com/spring-io/concourse-java-scripts/v0.0.2/concourse-java.sh > /opt/concourse-java.sh
###########################################################
# JAVA
###########################################################
JDK_URL=$( ./get-jdk-url.sh $1 )
mkdir -p /opt/openjdk
cd /opt/openjdk
curl -L ${JDK_URL} | tar zx --strip-components=1
test -f /opt/openjdk/bin/java
test -f /opt/openjdk/bin/javac
###########################################################
# DOCKER
###########################################################
cd /
curl -L https://download.docker.com/linux/static/stable/x86_64/docker-19.03.5.tgz | tar zx
mv /docker/* /bin/
chmod +x /bin/docker*
export ENTRYKIT_VERSION=0.4.0
curl -L https://github.com/progrium/entrykit/releases/download/v${ENTRYKIT_VERSION}/entrykit_${ENTRYKIT_VERSION}_Linux_x86_64.tgz | tar zx
chmod +x entrykit && \
mv entrykit /bin/entrykit && \
entrykit --symlink
|
Use xargs to pass stdin to tar and only execute subsequent commands if it was successful | #! /bin/sh
##############
#Tarring files
##############
tar -czf $1.tar.gz $1
echo $1.tar.gz
exit 0
| #! /bin/sh
##############
#Tarring files
##############
xargs tar -czf $1.tar.gz
&& echo $1.tar.gz
&& exit 0
|
Update Go Model Generator Docker Image to marcnuri/golang-1.17-java11 | #!/bin/bash
#
# Copyright (C) 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
set -e
BASEDIR=$(dirname "$BASH_SOURCE")
ABSOLUTE_BASEDIR=$(realpath "$BASEDIR/..")
DOCKER_IMAGE_GOLANG=marcnuri/golang-1.16-java11
docker run \
--rm \
-v "$ABSOLUTE_BASEDIR":/usr/src \
-w /usr/src/kubernetes-model-generator \
-e LOCAL_USER="$(id -u):$(id -g)" \
$DOCKER_IMAGE_GOLANG "./generateModel.sh" "$@"
| #!/bin/bash
#
# Copyright (C) 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
set -e
BASEDIR=$(dirname "$BASH_SOURCE")
ABSOLUTE_BASEDIR=$(realpath "$BASEDIR/..")
DOCKER_IMAGE_GOLANG=marcnuri/golang-1.17-java11
docker run \
--rm \
-v "$ABSOLUTE_BASEDIR":/usr/src \
-w /usr/src/kubernetes-model-generator \
-e LOCAL_USER="$(id -u):$(id -g)" \
$DOCKER_IMAGE_GOLANG "./generateModel.sh" "$@"
|
Add quotes to the PATH variable. | #!/bin/bash
set -e
set -u
set -o pipefail
print_public_key() {
local key=$1
if [[ -n "$key" ]]; then
jq -c -n --arg public_key "$key" '{ "public_key": $public_key }'
else
echo '{"public_key":""}'
fi
}
export PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
PUBLIC_KEY=${PUBLIC_KEY-}
if ! test -t 0; then
eval "$(jq -r '@sh "PUBLIC_KEY=\(.public_key)"')"
fi
if [[ -f $PUBLIC_KEY ]]; then
print_public_key "$(cat "${PUBLIC_KEY}")"
else
print_public_key "${PUBLIC_KEY}"
fi
| #!/bin/bash
set -e
set -u
set -o pipefail
print_public_key() {
local key=$1
if [[ -n "$key" ]]; then
jq -c -n --arg public_key "$key" '{ "public_key": $public_key }'
else
echo '{"public_key":""}'
fi
}
export PATH='/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin'
PUBLIC_KEY=${PUBLIC_KEY-}
if ! test -t 0; then
eval "$(jq -r '@sh "PUBLIC_KEY=\(.public_key)"')"
fi
if [[ -f $PUBLIC_KEY ]]; then
print_public_key "$(cat "${PUBLIC_KEY}")"
else
print_public_key "${PUBLIC_KEY}"
fi
|
Add KeenClientFramework to CI build. Build is currently broken. | #!/bin/sh
set -e -o pipefail
xcodebuild \
-scheme KeenSwiftClientExample \
-sdk iphonesimulator \
-destination 'platform=iOS Simulator,name=iPhone 6,OS=9.2' \
ONLY_ACTIVE_ARCH=NO \
clean build | bundle exec xcpretty --color
xcodebuild \
-scheme KeenClientExample \
-sdk iphonesimulator \
-destination 'platform=iOS Simulator,name=iPhone 6,OS=9.2' \
ONLY_ACTIVE_ARCH=NO \
clean build | bundle exec xcpretty --color
xcodebuild \
-scheme KeenClient \
-sdk iphonesimulator \
-destination 'platform=iOS Simulator,name=iPhone 6,OS=9.2' \
ONLY_ACTIVE_ARCH=NO \
clean test | bundle exec xcpretty --color | #!/bin/sh
set -e -o pipefail
xcodebuild \
-scheme KeenSwiftClientExample \
-sdk iphonesimulator \
-destination 'platform=iOS Simulator,name=iPhone 6,OS=9.2' \
ONLY_ACTIVE_ARCH=NO \
clean build | bundle exec xcpretty --color
xcodebuild \
-scheme KeenClientExample \
-sdk iphonesimulator \
-destination 'platform=iOS Simulator,name=iPhone 6,OS=9.2' \
ONLY_ACTIVE_ARCH=NO \
clean build | bundle exec xcpretty --color
xcodebuild \
-scheme KeenClientFramework \
-sdk iphonesimulator \
-destination 'platform=iOS Simulator,name=iPhone 6,OS=9.2' \
ONLY_ACTIVE_ARCH=NO \
clean build | bundle exec xcpretty --color
xcodebuild \
-scheme KeenClient \
-sdk iphonesimulator \
-destination 'platform=iOS Simulator,name=iPhone 6,OS=9.2' \
ONLY_ACTIVE_ARCH=NO \
clean test | bundle exec xcpretty --color |
Fix - Add shebang in bash script | coverage run --source=survey --omit=survey/migrations/* ./manage.py test
coverage html
| #!/bin/bash
coverage run --source=survey --omit=survey/migrations/* ./manage.py test
coverage html
|
Improve test latest version script | # THIS SCRIPT WILL LAUNCH AN NPM UPDATE ONLY IF IS LAUNCHED BY A TRAVIS CRON
# WE USE IT TO TEST A BUILD WE THE LATEST VERSION OF OUR DEPENDENCIES, EVERY NIGHT
if [ "$TRAVIS_EVENT_TYPE" != "cron" ]; then
echo "Skipping test of the latest version; just doing a build."
exit 0
fi
npm i -g npm-check-updates
npm-check-updates -u | # THIS SCRIPT WILL LAUNCH AN NPM UPDATE ONLY IF IS LAUNCHED BY A TRAVIS CRON
# WE USE IT TO TEST A BUILD WE THE LATEST VERSION OF OUR DEPENDENCIES, EVERY NIGHT
if [ "$TRAVIS_EVENT_TYPE" != "cron" ]; then
echo "Skipping test of the latest version; just doing a build."
exit 0
fi
npm i -g npm-check-updates
# Remove all ^ or ~ in the package.json file before update to be sure to keep the latest version
sed -i 's/"^/"/g' package.json
sed -i 's/"~/"/g' package.json
# Update package in the latest version
npm-check-updates -u |
Remove brew upgrade optional params | #!/bin/bash
# -------------------------------
# Commands
# -------------------------------
# Quick Commands
# -------------------------------------------------------------------
start-workday() {
source ~/.bash_profile;
command cd;
printHeader "Update brew formula";
brew update; brew upgrade --all; brew cleanup; brew doctor;
printHeader "Update npm packages";
npm update -g;
command cd $HOME;
}
# ExpWeb Commands
# -------------------------------------------------------------------
alias cd-expweb="command cd $TRUNK"
alias gw="./gradlew"
alias expweb-clean="sudo rm -rf $TRUNK/build"
alias expweb-build="cd-expweb; expweb-clean; gw clean build"
alias expweb-build-fast="cd-expweb; expweb-clean; gw clean build -xCheck -x minifyResources -Pdebug"
alias expweb-start="cd-expweb; expweb-clean; gw -Pdebug startExpweb"
alias expweb-start-stub="cd-expweb; expweb-clean; gw -Pdebug -Pstub startExpweb"
alias expweb-latest-version="cd-expweb; p4 counters -e expweb_trunk-ci_last_green_cl | ggrep -oP '(\d)+'"
expweb-sync-latest() {
eval "cd-expweb";
p4 sync ${TRUNK}/...@$(expweb-latest-version);
}
# Docker Commands
# -------------------------------------------------------------------
alias docker-start-vm="docker-machine create --driver=virtualbox default"
#docker-start-terminal() {
# eval "$(docker-machine env default)";
#}
| #!/bin/bash
# -------------------------------
# Commands
# -------------------------------
# Quick Commands
# -------------------------------------------------------------------
start-workday() {
source ~/.bash_profile;
command cd;
printHeader "Update brew formula";
brew update; brew upgrade; brew cleanup; brew doctor;
printHeader "Update npm packages";
npm update -g;
command cd $HOME;
}
# ExpWeb Commands
# -------------------------------------------------------------------
alias cd-expweb="command cd $TRUNK"
alias gw="./gradlew"
alias expweb-clean="sudo rm -rf $TRUNK/build"
alias expweb-build="cd-expweb; expweb-clean; gw clean build"
alias expweb-build-fast="cd-expweb; expweb-clean; gw clean build -xCheck -x minifyResources -Pdebug"
alias expweb-start="cd-expweb; expweb-clean; gw -Pdebug startExpweb"
alias expweb-start-stub="cd-expweb; expweb-clean; gw -Pdebug -Pstub startExpweb"
alias expweb-latest-version="cd-expweb; p4 counters -e expweb_trunk-ci_last_green_cl | ggrep -oP '(\d)+'"
expweb-sync-latest() {
eval "cd-expweb";
p4 sync ${TRUNK}/...@$(expweb-latest-version);
}
# Docker Commands
# -------------------------------------------------------------------
alias docker-start-vm="docker-machine create --driver=virtualbox default"
#docker-start-terminal() {
# eval "$(docker-machine env default)";
#}
|
Add test for clang-3.8 C++11 support | #!/usr/bin/env bash
#
# Copyright (c) 2019-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
export LC_ALL=C.UTF-8
export CONTAINER_NAME=ci_native_nowallet
export DOCKER_NAME_TAG=ubuntu:16.04 # Use xenial to have one config run the tests in python3.5, see doc/dependencies.md
export PACKAGES="python3-zmq"
export DEP_OPTS="NO_WALLET=1"
export GOAL="install"
export SYSCOIN_CONFIG="--enable-glibc-back-compat --enable-reduce-exports"
| #!/usr/bin/env bash
#
# Copyright (c) 2019-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
export LC_ALL=C.UTF-8
export CONTAINER_NAME=ci_native_nowallet
export DOCKER_NAME_TAG=ubuntu:16.04 # Use xenial to have one config run the tests in python3.5, see doc/dependencies.md
export PACKAGES="python3-zmq clang-3.8 llvm-3.8" # Use clang-3.8 to test C++11 compatibility, see doc/dependencies.md
export DEP_OPTS="NO_WALLET=1"
export GOAL="install"
export SYSCOIN_CONFIG="--enable-glibc-back-compat --enable-reduce-exports CC=clang-3.8 CXX=clang++-3.8"
|
Configure Elixir editor env vars | if [ -f $HOME/elixir-ls ]; then
export PATH=$HOME/elixir-ls:$PATH
fi
| if [ -f $HOME/elixir-ls ]; then
export PATH=$HOME/elixir-ls:$PATH
fi
export ELIXIR_EDITOR="emacsclient +__LINE__ __FILE__"
export PLUG_EDITOR=$ELIXIR_EDITOR
export ECTO_EDITOR=$ELIXIR_EDITOR
|
Remove rootfs before making deb | #!/bin/sh
# depends on `fpm`, install via `gem`
VERSION="0.16.0"
BUILD="slack1"
CONTACT="Jane Doe <janed@example.com>"
PACKAGE_NAME="go-audit"
DIRNAME="$(cd "$(dirname "$0")" && pwd)"
OLDESTPWD="$PWD"
go build
mkdir -p "$PWD/rootfs/usr/local/bin"
mv "$PWD/go-audit" "$PWD/rootfs/usr/local/bin/"
fakeroot fpm -C "$PWD/rootfs" \
--license "MIT" \
--url "https://github.com/slackhq/go-audit" \
--vendor "" \
--description "go-audit is an alternative to the auditd daemon that ships with many distros." \
-d "auditd" \
-m "${CONTACT}" \
-n "${PACKAGE_NAME}" -v "$VERSION-$BUILD" \
-p "$OLDESTPWD/${PACKAGE_NAME}_${VERSION}-${BUILD}_amd64.deb" \
-s "dir" -t "deb" \
"usr"
| #!/bin/sh
# depends on `fpm`, install via `gem`
VERSION="0.16.0"
BUILD="slack1"
CONTACT="Jane Doe <janed@example.com>"
PACKAGE_NAME="go-audit"
DIRNAME="$(cd "$(dirname "$0")" && pwd)"
OLDESTPWD="$PWD"
go build
rm -f "$PWD/rootfs"
mkdir -p "$PWD/rootfs/usr/local/bin"
mv "$PWD/go-audit" "$PWD/rootfs/usr/local/bin/"
fakeroot fpm -C "$PWD/rootfs" \
--license "MIT" \
--url "https://github.com/slackhq/go-audit" \
--vendor "" \
--description "go-audit is an alternative to the auditd daemon that ships with many distros." \
-d "auditd" \
-m "${CONTACT}" \
-n "${PACKAGE_NAME}" -v "$VERSION-$BUILD" \
-p "$OLDESTPWD/${PACKAGE_NAME}_${VERSION}-${BUILD}_amd64.deb" \
-s "dir" -t "deb" \
"usr"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.