Instruction stringlengths 14 778 | input_code stringlengths 0 4.24k | output_code stringlengths 1 5.44k |
|---|---|---|
Add script that calculates read and base count of a fastq file | #!/bin/bash
#Outputs the read count and and base count of a fastq file
set -euo pipefail
FASTQ_FILE=$1
CAT_TOOL="cat"
if [ ${FASTQ_FILE: -3} == ".gz" ]; then CAT_TOOL="zcat"; fi
${CAT_TOOL} ${FASTQ_FILE} | sed -n "2~4p" | awk 'BEGIN{bases=0;reads=0;} {bases+=length($1);reads+=1} END{print reads,bases}'
| |
Add script to create a mobilespec project using local repos. | #!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Script creates a mobilespec project that uses all local repositories.
# It currently installs only the iOS and Android platforms.
# Based on: https://wiki.apache.org/cordova/WorkingWithThree#preview
if [[ ! -d cordova-mobile-spec ]]; then
echo "Please run this script from the directory that contains cordova-mobile-spec"
exit 1
fi
if [[ -e mobilespec ]]; then
echo "Directory \"mobilespec\" already exists. Delete it first then re-run."
exit 1
fi
echo "Creating mobilespec project. If you have any errors, it may be from missing repositories."
echo "To clone needed repositories:"
echo " ./cordova-coho/coho repo-clone -r plugins -r mobile-spec -r android -r ios -r cli"
echo "To update all repositories:"
echo " ./cordova-coho/coho repo-update -r auto"
REPO_PARENT="$PWD"
set -e
./cordova-cli/bin/cordova create mobilespec org.apache.mobilespec mobilespec
( cd cordova-js; grunt )
cd mobilespec
echo '{
"id":"org.apache.mobilespec",
"name":"mobilespec",
"lib": {
"android": {
"uri": "'"$REPO_PARENT/cordova-android"'",
"version": "dev",
"id": "cordova-android-dev"
},
"ios": {
"uri": "'"$REPO_PARENT/cordova-ios"'",
"version": "dev",
"id": "cordova-ios-dev"
}
}
}' > .cordova/config.json
set -x
../cordova-cli/bin/cordova platform add ios android
../cordova-cli/bin/cordova plugin add ../cordova-mobile-spec/dependencies-plugin
rm -r www
ln -s ../cordova-mobile-spec www
rm -rf platforms/ios/CordovaLib
../cordova-ios/bin/update_cordova_subproject platforms/ios/mobilespec.xcodeproj
../cordova-cli/bin/cordova prepare
cp ../cordova-js/pkg/cordova.android.js platforms/android/assets/www/cordova.js
cp ../cordova-js/pkg/cordova.ios.js platforms/ios/www/cordova.js
ln -s ../cordova-cli/bin/cordova cordova
set +x
echo "App created in the mobilespec/ directory."
echo "Symlink to CLI created as mobilespec/cordova"
| |
Add script to make sure all nodes are properly running after reboot | #!/bin/bash
STACK_NAME=$1
echo "Processing stack ${STACK_NAME} on reboot"
if [ ! $# -eq 1 ]; then
echo "Usage $0 <stack_name>";
exit 1;
fi
# Script to be run on boot, on crontab
# makes sure that all swarm nodes are ready, so services are split
start_stack() {
echo "Starting stack ${STACK_NAME}"
cd ~/docker_home_server
env $(cat .env | grep "^[A-Z]" | xargs) docker stack deploy --compose-file docker-compose.yml ${STACK_NAME}
}
stop_stack() {
echo "Stopping stack ${STACK_NAME}"
docker stack rm ${STACK_NAME}
sleep 10
docker network ls | grep ${STACK_NAME}_default
if [ $? -gt 0 ]; then
for i in `docker network inspect ${STACK_NAME}_default | grep Name | grep ${STACK_NAME}_ | grep -v ${STACK_NAME}_default | cut -d':' -f2 | cut -d'"' -f 2`; do
echo "Disconnectiong endpoint $i from network ${STACK_NAME}_default";
docker network disconnect -f ${STACK_NAME}_default $i;
done;
fi
sleep 10
}
# is it running?
docker stack ls | grep ${STACK_NAME}
if [ $? -gt 0 ]; then
start_stack
fi
# check workers are up
TMP_FILE="/tmp/pending_nodes.txt"
echo "Checking workers"
for i in $(seq 1 5); do
echo "Attempt 1";
docker node ls --filter role=worker --format "{{.Hostname}} {{.Status}} {{.Availability}}" | grep -v "Ready Active" | tee ${TMP_FILE};
PENDING=`cat ${TMP_FILE} | wc -l`
echo "Pending: ${PENDING}"
if [ $PENDING -eq 0 ]; then
break
fi
sleep 30
done
# check workers have volumes mounted
echo "Checking workers mounted volumes"
echo `docker node ls --filter role=worker --format "{{.Hostname}} {{.Status}} {{.Availability}}" | grep "Ready Active" | cut -f 1 -d ' '`
for node in `docker node ls --filter role=worker --format "{{.Hostname}} {{.Status}} {{.Availability}}" | grep "Ready Active" | cut -f 1 -d ' '`; do
echo "Checking volumes on $node"
ssh $node "mount | grep volumes || mount /media/volumes"
done
# restart stack
stop_stack
start_stack
# wait for OpenLDAP
sleep 120
# add users
# in case it's not ready yet, try 5 times
for i in $(seq 1 5); do
echo "Attempt 1";
./add_users.sh ${STACK_NAME};
if [ $? -eq 0 ]; then
break;
fi
sleep 30
done
| |
Revert "Don't validate yml files for creating the cluster" | #!/bin/bash
set -ex
./create_secrets.sh
kubectl create \
-f ceph-mds-v1-dp.yaml \
-f ceph-mon-v1-svc.yaml \
-f ceph-mon-v1-dp.yaml \
-f ceph-mon-check-v1-dp.yaml \
-f ceph-osd-v1-ds.yaml \
--namespace=ceph --validate=false
| #!/bin/bash
set -ex
./create_secrets.sh
kubectl create \
-f ceph-mds-v1-dp.yaml \
-f ceph-mon-v1-svc.yaml \
-f ceph-mon-v1-dp.yaml \
-f ceph-mon-check-v1-dp.yaml \
-f ceph-osd-v1-ds.yaml \
--namespace=ceph
|
Add script to convert to Python 3 format | #!/bin/sh
# Convert urlwatch sources to Python 3.x compatible format
SOURCES="urlwatch lib/urlwatch/*.py examples/hooks.py.example setup.py"
2to3 -w $SOURCES
| |
Add test wrapper (Salt only for now) | #!/bin/bash
# Testing what? Salt? Ansible?
#
case "${1}" in
salt)
vagrant="salttest"
;;
ansible)
vagrant="ansible"
echo "Ansible tests not implemented just yet"
exit 0
;;
*)
echo "Invalid test type (salt or ansible only)"
exit 1
;;
esac
# Define tests
#
tests="../test/test_packages.py \
../test/test_services.py \
../test/test_files.py \
../test/test_links.py"
# Run tests using testinfra
#
SCRIPTNAME=$(basename $0)
DIRNAME=$(dirname $0)
# Setup for pip virtual environment
#
export WORKON_HOME=~/.pyenvironments
mkdir -p ${WORKON_HOME}
# Install pips for python virtual environment
#
pip install virtualenv virtualenvwrapper
# Source env wrapper
#
source /usr/local/bin/virtualenvwrapper.sh
# Make a virtual environment to install new pips
#
mkvirtualenv laptop-build
# Upgrade pip and install pips
#
pip install --upgrade pip
pip install testinfra paramiko
# Bring up VM and provision, save ssh key for tests and run tests
#
vagrant up salttest --provision \
&& vagrant ssh-config salttest > salttest-sshkey \
&& testinfra -v --hosts=salttest --ssh-config=salttest-sshkey ${tests}
# Exit from the virtual environment and clean it up
#
deactivate
rmvirtualenv laptop-build
exit 0
| |
Add script for monitoring setup on aqm | #!/bin/bash
# run this on the aqm-machine
cd "$(dirname $(readlink -f $BASH_SOURCE))"
cmds=()
cmds[0]="watch -n .2 ../show_setup.sh -vir $IFACE_CLIENTS"
cmds[1]="watch -n .2 ../show_setup.sh -vir $IFACE_SERVERA"
tmux split-window -v ${cmds[1]}
${cmds[0]}
| |
Add merge java project script | #!/bin/bash -x
PATH=$PATH:${KURENTO_SCRIPTS_HOME}
kurento_check_version.sh true
# Deploy to Kurento repositories
export SNAPSHOT_REPOSITORY=$MAVEN_KURENTO_SNAPSHOTS
export RELEASE_REPOSITORY=$MAVEN_KURENTO_RELEASES
kurento_maven_deploy.sh "$MAVEN_SETTINGS"
# Deploy to Central (only release)
export SNAPSHOT_REPOSITORY=
export RELEASE_REPOSITORY=$MAVEN_SONATYPE_NEXUS_STAGING
kurento_maven_deploy.sh
# Upload to builds
VERSION=$(kurento_get_version.sh)
echo "$VERSION - $(date) - $(date +'%Y%m%d-%H%M%S')" > project-version.txt
| |
Add script to prune extraneous .keep files | #!/usr/bin/env bash
for EXERCISE_DIRECTORY in $(find exercises -mindepth 2 -maxdepth 2 -type d -name "src"); do
STARTER_DIRECTORY="${EXERCISE_DIRECTORY}/main/java"
STARTER_FILE_COUNT=$(find "${STARTER_DIRECTORY}" -mindepth 1 -maxdepth 1 -type f -name "*.java" | wc -l)
KEEP_FILE_LOCATION="${STARTER_DIRECTORY}/.keep"
if (( ${STARTER_FILE_COUNT} > 0 )) && [[ -f "${KEEP_FILE_LOCATION}" ]]; then
echo "Removing unnecessary keep file ${KEEP_FILE_LOCATION}..."
rm "${KEEP_FILE_LOCATION}"
fi
done
| |
Add helper script for resetting all integration tests. | #!/bin/bash
################################################################################
# This script resets all integration tests.
################################################################################
SCRIPTDIR="$(dirname "$0")"
ROOTDIR="${SCRIPTDIR}/../"
cd $ROOTDIR
CWD=$(pwd)
for D in `find -H integration-tests -type d -d -maxdepth 1 -mindepth 1`
do
printf "Resetting %s...\n" "$D"
cd $CWD/$D
meteor reset
done
| |
Add helper script to build dmg's from a local mac | #!/bin/bash
#
# This wrapper for the package script contains some
# env vars, so it can build Gaphor with Python 3.8.
#
# It is intended to run from a local Mac, not CI.
#
export PATH="/usr/local/opt/python@3.8/bin:$PATH"
export LDFLAGS="-L/usr/local/opt/python@3.8/lib"
export PKG_CONFIG_PATH="/usr/local/opt/python@3.8/lib/pkgconfig"
./package.sh
| |
Add script to run all models | #!/bin/bash
## COINF binary model
cat $1 | sed "s/XYX//g" | /app/vw-8.2 -i hpv16.k18.s4000.coinf.binary.model -p /dev/stdout > coinf.preds.txt
## primary lineage model
cat $1 | sed "s/XYX//g" | /app/vw-8.2 -i hpv.k18.s4000.lineage.ect.model -p /dev/stdout > lineage.preds.txt
## primary sublineage model
cat $1 | sed "s/XYX//g" | /app/vw-8.2 -i hpv16.k18.s4000.sublineage.ect.model -p /dev/stdout > sublineage.preds.txt
| |
Fix awkward typo in macOS script | #!/bin/sh
# Copyright 2019 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Periodically cleanup and reboot if no Jenkins subprocesses are running.
set -uf -o pipefail
PATH=/usr/local/bin:/sbin:/usr/local/sbin:$PATH
exit_if_jenkins() {
jenkins=$(pgrep java)
if [[ "$jenkins" -- "" ]]; then
echo "no java, no jenkins"
return 0
fi
pstree $jenkins | grep -v java && echo "jenkins is running..." && exit 1
}
exit_if_jenkins
echo "waiting to see if any jobs are coming in..."
sleep 15
exit_if_jenkins
echo "doing it"
killall java
sudo rm -Rf ~jenkins/.minikube || echo "could not delete minikube"
sudo rm -Rf ~/jenkins/minikube-integration/* || true
sudo rm /var/db/dhcpd_leases || echo "could not clear dhcpd leases"
sudo reboot
| #!/bin/sh
# Copyright 2019 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Periodically cleanup and reboot if no Jenkins subprocesses are running.
set -uf -o pipefail
PATH=/usr/local/bin:/sbin:/usr/local/sbin:$PATH
exit_if_jenkins() {
jenkins=$(pgrep java)
if [[ "${jenkins}" == "" ]]; then
echo "no java, no jenkins"
return 0
fi
pstree "${jenkins}" | grep -v java && echo "jenkins is running..." && exit 1
}
exit_if_jenkins
echo "waiting to see if any jobs are coming in..."
sleep 15
exit_if_jenkins
echo "doing it"
killall java
sudo rm -Rf ~jenkins/.minikube || echo "could not delete minikube"
sudo rm -Rf ~/jenkins/minikube-integration/* || true
sudo rm /var/db/dhcpd_leases || echo "could not clear dhcpd leases"
sudo reboot
|
Add script to sync rss file to server | #!/usr/bin/env bash
# mkvirtualenv PinboardPodcastRss
# pip install -r pip-requirements
PBPCDIR=${PBPCDIR:-$HOME/Projects/PinboardPodcastRss/}
REMOTE=${REMOTE:-myserver:/srv/www/mysite/}
LOCAL=${LOCAL:-$PBPCDIR/pinduff.rss}
cd $PBPCDIR || exit 1
$HOME/.virtualenvs/PinboardPodcastRss/bin/python pbpodcast.py && rsync -avzue ssh "$LOCAL" "$REMOTE"
| |
Add example show you how to make a socks5 tunnel | #!/bin/sh
# move dtunnel to /usr/bin/dtunnel
# echo "mypassword" > $HOME/.ssh/pw
cat $HOME/.ssh/pw | sudo -S killall -9 dtunnel
sleep 2
# the server side
cat $HOME/.ssh/pw | sudo -S nice -n -10 /home/tomy/bin/dtunnel --reg node1.domain.com -local socks5 -clientkey verylongpasswd &
# the client side
cat $HOME/.ssh/pw | sudo -S nice -n -10 /home/tomy/bin/dtunnel --link node1.domain.com -local :7070 -clientkey verylongpasswd &
| |
Add script for running locally | #!/bin/bash
# Stop on error
set -e;
# set -x;
while getopts ":ds" opt
do
case $opt in
d)
debugMode=true
echo "Option set to start API in debug mode."
debugFlags="-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8000 -Djava.compiler=NONE"
;;
p)
apiPort=$OPTARG
echo "Option set run API on port ${apiPort}"
;;
s)
skipBuild=true
echo "Option set to skip build"
;;
help|\?)
echo -e "Usage: [-d] [-s]"
echo -e "\t d - debug. Starts the API in debug mode, which an IDE can attach to on port 8001"
#echo -e "\t p <port> - Starts the API on a specific port (default 8080)"
echo -e "\t s - skip. Skips the build."
exit 0
;;
esac
done
#tomcatPID=`ps aux | grep '[c]atalina' | awk '{print $2}'`
#if [ -n "${tomcatPID}" ]
#then
# sudo kill -9 $tomcatPID
#fi
#rm /tc/logs/catalina.out || true
if [ -z "${skipBuild}" ]
then
mvn clean install -DskipTests -P dev
fi
#export JPDA_ADDRESS=8000
#export JPDA_TRANSPORT=dt_socket
#/tc/bin/catalina.sh jpda start
#tail -f /tc/logs/catalina.out
#Port appears set at 8080, possibly by ./src/main/deb/config.properties
#More likely resources/application.yml
#Can't get this self hosted option to listen for debug connections
java ${debugFlags} -jar target/ihtsdo-mlds.war \
--spring.config.location=config.properties --spring.profiles.active=mlds,dev
#java -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=8000 -Djava.compiler=NONE \
# -jar target/ihtsdo-mlds.war \
#--spring.config.location=config.properties --spring.profiles.active=mlds,dev
| |
Update all servers with one script | #!/bin/bash
ssh vmpsateam01-01 "chef-client"
ssh vmpsateam01-02 "chef-client"
ssh vmpsateam01-03 "chef-client"
ssh vmpsateam01-04 "chef-client"
ssh vmpsateam01-05 "chef-client"
ssh vmpsateam01-06 "chef-client"
ssh vmpsateam01-07 "chef-client"
| |
Add open contracting standard deploy script | echo "$PRIVATE_KEY" | tr '#' '\n' | tr '_' ' ' > id_rsa
chmod 600 id_rsa
echo '|1|FkTASz83nlFnGSnvrDpt8jGNYko=|iuezK/A43QOIAZied/7LNZ30LGA= ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBOiwQfg1hM1fcXxtssbgfmrnj2iSNounLWkgeWjU9Fr+slHUpcSt0Gk8o3jihTIXqR3z/KgSPqKmaDv3GIEzwBo=' >> ~/.ssh/known_hosts
rsync -e 'ssh -i id_rsa' -av --delete build/ tmp-prototype-ocds-docs@dev2.default.opendataservices.uk0.bigv.io:~/web/$TRAVIS_BRANCH
| |
Add class and functions to interact with log file | include app.server.validator.AppServerValidator
include base.vars.BaseVars
include calendar.util.CalendarUtil
include git.util.GitUtil
include file.util.FileUtil
include string.util.StringUtil
LogFile(){
get(){
local appServer=$(AppServerValidator returnAppServer ${1})
local branch=$(BaseVars returnBranch ${2})
local logFile=(/d/logs/${branch}/${appServer}/
$(CalendarUtil getTimestamp date)/
${branch}-build-$(GitUtil getSHA ${branch} short)-
$(CalendarUtil getTimestamp clock).log
)
FileUtil makeFile $(StringUtil join logFile)
}
write(){
local logFile=$(readvar ${1})
shift
local cmd=$(readvar ${2})}
${cmd} |& tee -a ${logFile}
}
$@
} | |
Add a script that installs everything in /tmp/opentsdb | #!/bin/bash
set -xe
HBASE_VERSION=0.89.20100924
export TMPDIR=${TMPDIR-'/tmp'}/opentsdb
mkdir -p "$TMPDIR"
cd "$TMPDIR"
# 1. Download and unpack HBase.
wget http://www.apache.org/dist/hbase/hbase-$HBASE_VERSION/hbase-$HBASE_VERSION-bin.tar.gz
tar xfz hbase-$HBASE_VERSION-bin.tar.gz
cd hbase-$HBASE_VERSION
# 2. Configure HBase.
hbase_rootdir=${TMPDIR-'/tmp'}/tsdhbase
iface=lo`uname | sed -n s/Darwin/0/p`
cat >conf/hbase-site.xml <<EOF
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>hbase.rootdir</name>
<value>file:///$hbase_rootdir/hbase-\${user.name}/hbase</value>
</property>
<property>
<name>hbase.zookeeper.dns.interface</name>
<value>$iface</value>
</property>
<property>
<name>hbase.regionserver.dns.interface</name>
<value>$iface</value>
</property>
<property>
<name>hbase.master.dns.interface</name>
<value>$iface</value>
</property>
</configuration>
EOF
# 3. Start HBase.
./bin/start-hbase.sh
# 4. Download and build OpenTSDB
cd ..
git clone git://github.com/stumbleupon/opentsdb.git
cd opentsdb
make || make MD5=md5sum
make staticroot
env COMPRESSION=none HBASE_HOME=../hbase-$HBASE_VERSION ./src/create_table.sh
tsdtmp=${TMPDIR-'/tmp'}/tsd # For best performance, make sure
mkdir -p "$tsdtmp" # your temporary directory uses tmpfs
./src/tsdb tsd --port=4242 --staticroot=build/staticroot --cachedir="$tsdtmp"
| |
Add script for creating roles and databases to PostgreSQL | #!/bin/bash
usage() {
echo "Usage: $0 role_and_db_name"
}
if [ $# -ne 1 ] ; then
usage
exit 1
fi
sudo -u postgres createuser --interactive -P $1
sudo -u postgres createdb $1
| |
Prepare for using standard python tests | #!/bin/bash -xe
# This script will be run by OpenStack CI before unit tests are run,
# it sets up the test system as needed.
# Developers should setup their test systems in a similar way.
# This setup needs to be run as a user that can run sudo.
# The root password for the MySQL database; pass it in via
# MYSQL_ROOT_PW.
DB_ROOT_PW=${MYSQL_ROOT_PW:-insecure_slave}
# This user and its password are used by the tests, if you change it,
# your tests might fail.
DB_USER=openstack_citest
DB_PW=openstack_citest
sudo -H mysqladmin -u root password $DB_ROOT_PW
# It's best practice to remove anonymous users from the database. If
# a anonymous user exists, then it matches first for connections and
# other connections from that host will not work.
sudo -H mysql -u root -p$DB_ROOT_PW -h localhost -e "
DELETE FROM mysql.user WHERE User='';
FLUSH PRIVILEGES;
GRANT ALL PRIVILEGES ON *.*
TO '$DB_USER'@'%' identified by '$DB_PW' WITH GRANT OPTION;"
# Now create our database.
mysql -u $DB_USER -p$DB_PW -h 127.0.0.1 -e "
SET default_storage_engine=MYISAM;
DROP DATABASE IF EXISTS openstack_citest;
CREATE DATABASE openstack_citest CHARACTER SET utf8;"
# Same for PostgreSQL
# The root password for the PostgreSQL database; pass it in via
# POSTGRES_ROOT_PW.
DB_ROOT_PW=${POSTGRES_ROOT_PW:-insecure_slave}
# Setup user
root_roles=$(sudo -H -u postgres psql -t -c "
SELECT 'HERE' from pg_roles where rolname='$DB_USER'")
if [[ ${root_roles} == *HERE ]];then
sudo -H -u postgres psql -c "ALTER ROLE $DB_USER WITH SUPERUSER LOGIN PASSWORD '$DB_PW'"
else
sudo -H -u postgres psql -c "CREATE ROLE $DB_USER WITH SUPERUSER LOGIN PASSWORD '$DB_PW'"
fi
# Store password for tests
cat << EOF > $HOME/.pgpass
*:*:*:$DB_USER:$DB_PW
EOF
chmod 0600 $HOME/.pgpass
# Now create our database
psql -h 127.0.0.1 -U $DB_USER -d template1 -c "DROP DATABASE IF EXISTS openstack_citest"
createdb -h 127.0.0.1 -U $DB_USER -l C -T template0 -E utf8 openstack_citest
| |
Add a helper script to delete big files from git | #!/bin/bash
#set -x
# Shows you the largest objects in your repo's pack file.
# Written for osx.
#
# @see http://stubbisms.wordpress.com/2009/07/10/git-script-to-show-largest-pack-objects-and-trim-your-waist-line/
# @author Antony Stubbs
# set the internal field spereator to line break, so that we can iterate easily over the verify-pack output
IFS=$'\n';
# list all objects including their size, sort by size, take top 10
objects=`git verify-pack -v .git/objects/pack/pack-*.idx | grep -v chain | sort -k3nr | head -n 25`
echo "All sizes are in kB's. The pack column is the size of the object, compressed, inside the pack file."
output="size,pack,SHA,location"
for y in $objects
do
# extract the size in bytes
size=$((`echo $y | cut -f 5 -d ' '`/1024))
# extract the compressed size in bytes
compressedSize=$((`echo $y | cut -f 6 -d ' '`/1024))
# extract the SHA
sha=`echo $y | cut -f 1 -d ' '`
# find the objects location in the repository tree
other=`git rev-list --all --objects | grep $sha`
#lineBreak=`echo -e "\n"`
output="${output}\n${size},${compressedSize},${other}"
done
echo -e $output | column -t -s ', ' | |
Add script for adding missing header & copyright. | #!/bin/bash
# A simple script to add header & copyright to a Java source file.
# Intended to be used with other *nix commands; e.g.:
# find . -name '*.java' -exec bash scripts/add-header.sh {} \;
file="$1"
name="$(basename "$file")"
basedir="$(dirname "$0")/.."
tmp="$file.tmp"
echo '//' > "$tmp"
echo "// $name" >> "$tmp"
echo '//' >> "$tmp"
echo >> "$tmp"
echo '/*' >> "$tmp"
cat "$basedir/LICENSE.txt" >> "$tmp"
echo '*/' >> "$tmp"
echo >> "$tmp"
cat "$file" >> "$tmp"
mv -f "$tmp" "$file"
| |
Optimize garbage collector for faster ruby | export RUBY_GC_HEAP_INIT_SLOTS=1000000
export RUBY_HEAP_SLOTS_INCREMENT=1000000
export RUBY_HEAP_SLOTS_GROWTH_FACTOR=1
export RUBY_GC_MALLOC_LIMIT=1000000000
export RUBY_HEAP_FREE_MIN=500000
| |
Add install deps for conda env | #!/usr/bin/env bash
set -e
set -v
# Run only for Linux for now
if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then exit 0; fi
if [[ "$TRAVIS_OS_NAME" = "linux" ]]; then
sudo apt-get update
MINICONDAVERSION="Linux"
else
MINICONDAVERSION="MacOSX"
fi
if [[ "$CONDAPY" == "2.7" ]]; then
wget https://repo.continuum.io/miniconda/Miniconda2-latest-$MINICONDAVERSION-x86_64.sh -O miniconda.sh;
else
wget https://repo.continuum.io/miniconda/Miniconda3-latest-$MINICONDAVERSION-x86_64.sh -O miniconda.sh;
fi
bash miniconda.sh -b -p $HOME/miniconda
export PATH="$HOME/miniconda/bin:$PATH"
# Keep exported paths for now
# hash -r
conda config --set always_yes yes --set changeps1 no
conda update -q conda
conda config --add channels conda-forge pkgw-forge
# Useful for debugging any issues with conda
conda info -a
which python
# The used dependencies are not the one advertised via the readme
# but can be more easily installed and suffice for the tests for now.
conda create -q -n condaenv python=$CONDAPY gtk3=3.14.15 pygobject gdk-pixbuf adwaita-icon-theme
| |
Create run file in master branch. | function init(){
switch_repo_to_dev_branch
run_cloud9_setup_scripts
}
function run_cloud9_setup_scripts(){
local called_from_directory="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
local setup_script_name_and_path=$(printf "%s/z_scripts/setup_c9_to_work_on_repo.bash" $called_from_directory)
bash $setup_script_name_and_path
}
function switch_repo_to_dev_branch(){
local called_from_directory="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd $called_from_directory
git checkout develop
cd $GOPATH
}
init | |
Add a script to run unitests (pytest) and doctests. | #!/bin/sh
# DOCTESTS ####################################################################
#echo
#echo
#python3 -m doctest ./ailib/io/images.py
#if [ $? -ne 0 ]; then
# exit 1
#fi
# UNITTESTS ###################################################################
pytest
| |
Create some helpers to deal with Git repositories. | #!/usr/bin/env bash
# Returns 0 if the current directory is a git repository.
is_a_git_repo() {
git rev-parse --is-inside-work-tree >/dev/null 2>&1
}
# Returns 0 if the current directory is the .git directory.
is_dot_git() {
[ "$(git rev-parse --is-inside-git-dir 2> /dev/null)" == "true" ]
}
# Returns 0 if the working tree has uncommitted changes.
has_uncommitted_changes() {
! git diff --quiet --ignore-submodules --cached
}
# Returns 0 if the working tree has unstaged files.
has_unstaged_files() {
! git diff-files --quiet --ignore-submodules --
}
# Returns 0 if the working tree has untracked files.
has_untracked_files() {
[ -n "$(git ls-files --others --exclude-standard)" ]
}
# Returns 0 if the working tree has stashed files.
has_stashed_files() {
git rev-parse --verify refs/stash >/dev/null 2>&1
}
# Returns the name of the current branch.
git_branch_name() {
git symbolic-ref --quiet --short HEAD 2> /dev/null || \
git rev-parse --short HEAD 2> /dev/null || \
echo "(unknown)"
}
update_git_index() {
git update-index --really-refresh -q >/dev/null 2>&1
}
| |
Add util function to confirm before execution | confirm() {
message="$1 [Y/n] "
read -r -p "$message" response
if [[ $response =~ ^[Yy]$ ]] || [[ -z $response ]]; then
return 0
fi
return 1
}
| |
Add script to sync org files | #!/bin/bash
set -euo pipefail
IFS=$'\n\t'
# Bail out if the webdav volume isn't mounted.
if [ ! -d /Volumes/webdav.fastmail.com/iam.chrisdeluca.me/files ]; then
exit 1
fi
# Sync the mobileorg file to the local location.
if [ -f /Volumes/webdav.fastmail.com/iam.chrisdeluca.me/files/org/mobileorg.org ]; then
rsync -a /Volumes/webdav.fastmail.com/iam.chrisdeluca.me/files/org/mobileorg.org ~/org/
fi
# Sync the local org collection to the webdav volume.
rsync -a ~/org /Volumes/webdav.fastmail.com/iam.chrisdeluca.me/files/
| |
Add script for turning a vanilla gitlab installation into a stoplight gitlab. | #!/bin/bash
#
# This script converts a generic Gitlab CE installation into a
# Stoplight version.
#
# Run from the root of the Gitlab git repository.
#
set -ex
rm -rf \
/opt/gitlab/embedded/service/gitlab-rails/app \
/opt/gitlab/embedded/service/gitlab-rails/db \
/opt/gitlab/embedded/service/gitlab-rails/lib \
/opt/gitlab/embedded/service/gitlab-rails/config
cp -rf ./app/assets/images/mailers/stoplight_* /opt/gitlab/embedded/service/gitlab-rails/public/assets/mailers/
cp -rf ./public/stoplight-images/* /opt/gitlab/embedded/service/gitlab-rails/public/assets/
cp -rf ./app /opt/gitlab/embedded/service/gitlab-rails/app/
cp -rf ./db /opt/gitlab/embedded/service/gitlab-rails/db/
cp -rf ./lib /opt/gitlab/embedded/service/gitlab-rails/lib/
cp -rf ./config /opt/gitlab/embedded/service/gitlab-rails/config/
| |
Add backup config files script | #!/bin/bash
REPO_LOC="$HOME"
REPO_NAME="canvas-config-bk"
color_restore='\033[0m'
color_green='\033[0;32m'
color_yellow='\033[1;33m'
die ()
{
echo "Error: $1"
exit 1
}
[ -d config ] || die "Must be run in the root of your canvas checkout"
read -r -d '' VAR << __EOF__
${color_green}
Each time you run this script, your current canvas config files will be backed up to
a git repo located at ${REPO_LOC}/${REPO_NAME}. If the repo doesn't exist yet, it
will be created. Each running of this script will result in a new commit to the repo
containing any changes to your config files from the last time you ran it.
${color_restore}
__EOF__
echo -e "$VAR"
prevdir=$(pwd)
cd $REPO_LOC
mkdir -p $REPO_NAME
cd $REPO_NAME
$(git status >/dev/null 2>&1) || git init
cd $prevdir
cp -v config/*.yml "${REPO_LOC}/${REPO_NAME}/"
cd "${REPO_LOC}/${REPO_NAME}"
git add .
git ci -m "Add config files as of $(date)"
# If the user doesn't have a remote repo, tell them how to add one
if [[ "$(git remote -v | wc -l | xargs)" = "0" ]]; then
read -r -d '' VAR << __EOF__
${color_green}
Your canvas config files are backup up into a git repo located at ${REPO_LOC}/${REPO_NAME}
${color_yellow}
You can push this to a remote private repo using:
git remote add origin <origin-url>
pit push -u origin
${color_green}
For example, to back it up to bitbucket.org, it would be:
git remote add origin git@bitbucket.org:<username>/${REPO_NAME}.git
git push -u origin
${color_restore}
__EOF__
echo -e "$VAR"
else
read -p "Do you want me to push the changes to your remote repository?" PUSH
if [[ $PUSH =~ [Yy] ]]; then
git push
else
echo "Ok, not pushing"
fi
fi
| |
Add script to download mididuino dist targz | #!/bin/sh
#
# MidiCtrl - Script to download the dist package
#
# (c) July 2011 - Manuel Odendahl - wesen@ruinwesen.com
#
if [ ! -d mididuino-dist ]; then
wget http://ruinwesen.com/support-files/mididuino-dist.tar.gz && \
tar zxvf mididuino-dist.tar.gz && \
rm mididuino-dist.tar.gz
fi | |
Test case for binlog read bug. | #!/usr/bin/env bash
server=localhost
port=11400
tmpdir="$TMPDIR"
test -z "$tmpdir" && tmpdir=/tmp
out1="${tmpdir}/bnch$$.1"
out2="${tmpdir}/bnch$$.2"
logdir="${tmpdir}/bnch$$.d"
nc='nc -q 1'
nc -q 1 2>&1 | grep -q option && nc='nc -w 1' # workaround for older netcat
killbeanstalkd() {
{
test -z "$bpid" || kill -9 $bpid
/bin/true # Somehow this gets rid of an unnessary shell message.
} >/dev/null 2>&1
bpid=
}
cleanup() {
killbeanstalkd
rm -rf "$logdir" "$out1" "$out2"
}
catch() {
echo '' Interrupted
exit 3
}
trap cleanup EXIT
trap catch HUP INT QUIT TERM
if [ ! -x ./beanstalkd ]; then
echo "Executable ./beanstalkd not found; do you need to compile first?"
exit 2
fi
mkdir -p $logdir
./beanstalkd -p $port -b "$logdir" >/dev/null 2>/dev/null &
bpid=$!
sleep .1
if ! ps -p $bpid >/dev/null; then
echo "Could not start beanstalkd for testing (possibly port $port is taken)"
exit 2
fi
$nc $server $port <<EOF > "$out1"
use test
put 0 0 120 4
test
put 0 0 120 4
tes1
watch test
reserve
release 1 1 1
reserve
delete 2
quit
EOF
diff - "$out1" <<EOF
USING test
INSERTED 1
INSERTED 2
WATCHING 2
RESERVED 1 4
test
RELEASED
RESERVED 2 4
tes1
DELETED
EOF
res=$?
test "$res" -eq 0 || exit $res
killbeanstalkd
sleep 1
./beanstalkd -p $port -b "$logdir" >/dev/null 2>/dev/null &
bpid=$!
sleep .1
if ! ps -p $bpid >/dev/null; then
echo "Could not start beanstalkd for testing (possibly port $port is taken)"
exit 2
fi
$nc $server $port <<EOF > "$out2"
watch test
reserve
delete 1
delete 2
EOF
diff - "$out2" <<EOF
WATCHING 2
RESERVED 1 4
test
DELETED
NOT_FOUND
EOF
| |
Add script to scrape ALL data from given system | # Shell script to scrape all decision data from given source, e.g.
# ./scrape-all.sh oulu /site/paatos-oulu.6aika.fi/www/static/exports/
#SOURCE_CITY="oulu"
#OUTPUT_DIRECTORY="/site/paatos-oulu.6aika.fi/www/static/exports/"
#MAX_EVENTS="--max-events=1"
usage() {
echo "Usage: $0 (oulu|vantaa|tampere|espoo) directoryname [max events]"
exit 1
}
SOURCE_CITY="$1"
OUTPUT_DIRECTORY="$2"
shopt -s extglob
if [[ "${SOURCE_CITY}" == @(oulu|vantaa|tampere|espoo) ]]; then
echo Source is valid, scraping ${SOURCE_CITY}
else
echo ERROR: Source \"${SOURCE_CITY}\" is not valid.
usage
fi
if [ ! -d "${OUTPUT_DIRECTORY}" ] && [ ! -x "${OUTPUT_DIRECTORY}" ]; then
echo ERROR: Non existing or non writable output directory
usage
else
echo Output directory will be ${OUTPUT_DIRECTORY}
fi;
if [ -z "$3" ];
then
MAX_EVENTS="--max-events=$3"
fi;
re='^[0-9\.]+$'
node app.js --source ${SOURCE_CITY} --print-organizations | while read -r org;
do
# Split line by ' - ' and read parts to variables
IFS=' - ' read orgid name <<< "$org"
# Replace annoying characters in filenames
name=${name// /_} # inline shell string replacement
name=${name//[åä]/a}
name=${name//ö/o}
name=${name//[!0-9a-zA-Z]/-}
echo "var1=$orgid, var2=$name"
if [[ $orgid =~ $re ]] ; then
node app.js --source ${SOURCE_CITY} --organization-id $orgid ${MAX_EVENTS} --output-zip=${OUTPUT_DIRECTORY}${name}-$(date -I).zip
else
echo "Error: orgid=$orgid is not a number"
fi
done
| |
Add script to prepare arXiv zip | #!/bin/bash
# prevent "Couldn't read xref table" errors
cd patterns
for f in *.pdf; do pdftk $f cat output $f.new && mv $f.new $f; done
cd ..
# compile a single bib file
cat bib/*.bib > ms.bib
mv ldbc-snb-specification.tex ms.tex
sed -i 's/\\bibliography{.*}/\\bibliography{ms}/' ms.tex
# Even though the file exists, arXiv still states the following:
# "We do not run bibtex in the auto-TeXing procedure. If you use bibtex, you must compile the .bbl file on your computer then include that in your uploaded source files. See using bibtex.
# The name of the .bbl file must match the name of the main .tex file for the system to process the references correctly."
# If the ms.bbl file is there, just ignore this problem.
# build
./generate-tex.py
latexmk -pdf --interaction=batchmode ms
# cleanup
rm *.aux *.dvi *.thm *.lof *.log *.lot *.fls *.out *.toc *.blg *.fdb_latexmk *.pdf
rm ms.zip
# standalone documents
rm standalone-query-cards/*
rm workload-*.tex
# binary docs
rm *.docx
# create archive
zip -r ms.zip *
| |
Test script for the application | set -e
echo 'Running PHPUnit tests'
vendor/bin/phpunit -c phpunit.xml --log-junit=junit.xml
echo 'Testing database migration rollback'
php artisan migrate:reset --database=sqlite --force
| |
Add script to configure pi boot config with w1-gpio support | #!/bin/bash
# This script will attempt to configure the dtoverlay for the
# Raspberry Pi boot config. If a dtoverlay option is already
# specified, no changes will be made
find=`egrep "^dtoverlay" /boot/config.txt`
if [[ -z $find ]]; then
echo "dtoverlay=w1-gpio" >> /boot/config.txt
if [[ `egrep "^dtoverlay=w1-gpio" /boot/config.txt` ]]; then
echo "Successfully configured pi for 1-wire"
fi
else
echo "dtoverlay is already specified in /boot/config.txt - edit manually"
echo $find
fi
| |
Add script for stripping frameworks and dSYM files | #
# strip_frameworks.sh
# CouchbaseLite
#
# Copyright (c) 2017 Couchbase, Inc All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Run this script in the app target's Run Script Phase to strip non-valid
# architecture types from dynamic frameworks and dSYM files.
#
# If the app project installs Couchbase Lite framework manually,
# this is required for archiving the app for submitting to the app store.
# See http://www.openradar.me/radar?id=6409498411401216 for more detail.
#
# Strip non-valid archecture types from the given universal binary file.
strip() {
i=0
archs="$(lipo -info "$1" | cut -d ':' -f3)"
for arch in $archs; do
if ! [[ "${VALID_ARCHS}" == *"${arch}"* ]]; then
lipo -remove "${arch}" -output "$1" "$1" || exit 1
i=$((i + 1))
fi
done
if [[ ${i} > 0 ]]; then
archs="$(lipo -info "$1" | cut -d ':' -f3)"
echo "Stripped $1 : ${archs}"
return 0
else
return 1
fi
}
# Go to frameworks folder:
cd "${BUILT_PRODUCTS_DIR}/${FRAMEWORKS_FOLDER_PATH}"
# Strip frameworks:
dsyms_files=()
for file in $(find . -type f -perm +111 | grep ".framework"); do
if ! [[ "$(file "${file}")" == *"dynamically linked shared library"* ]]; then
continue
fi
strip "${file}"
if [[ $? == 0 ]]; then
# Code sign the stripped framework:
if [ "${CODE_SIGNING_REQUIRED}" == "YES" ]; then
echo "Sign ${file}"
codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} --preserve-metadata=identifier,entitlements "${file}"
fi
bin_name="$(basename "${file}")"
dsyms_files+=(${BUILT_PRODUCTS_DIR}/${bin_name}.framework.dSYM/Contents/Resources/DWARF/${bin_name})
fi
done
# Strip dSYM files:
for file in $dsyms_files; do
if [[ -e "${file}" ]]; then
strip "${file}" || true
fi
done
| |
Add a script to run PostgreSQL | #!/bin/bash
set -exu
options=(--rm -p5432:5432)
if [ $# -ge 1 ]; then
db_dir=$1
rm -rf ${db_dir}
mkdir -p ${db_dir}
options+=("-v${db_dir}:/var/lib/postgresql")
fi
docker run "${options[@]}" groonga/pgroonga:latest-debian-9.6
| |
Add new script enabling hex build number incrementing | # https://gist.github.com/sascha/5398750
#
# This script is based on the script provided at http://stackoverflow.com/questions/9258344/xcode-better-way-of-incrementing-build-number
# The only difference is, that it uses hexadecimal build numbers instead of decimal ones.
# For instructions on how to use this script, see the link above.
#!/bin/sh
if [ $# -ne 1 ]; then
echo usage: $0 plist-file
exit 1
fi
plist="$1"
dir="$(dirname "$plist")"
# Only increment the build number if source files have changed
if [ -n "$(find "$dir" \! -path "*xcuserdata*" \! -path "*.git" -newer "$plist")" ]; then
buildnum=$(/usr/libexec/Plistbuddy -c "Print CFBundleVersion" "$plist")
if [ -z "$buildnum" ]; then
echo "No build number in $plist"
exit 2
fi
buildnum=`printf "%d" 0x$buildnum`
buildnum=$(expr $buildnum + 1)
buildnum=`printf "%x" $buildnum`
/usr/libexec/Plistbuddy -c "Set CFBundleVersion $buildnum" "$plist"
echo "Incremented build number to $buildnum"
else
echo "Not incrementing build number as source files have not changed"
fi
| |
Build Variables for Pyenv Complied Pythons | # HOMEBREW Environment Variables
export HOMEBREW_INSTALL_CLEANUP=TRUE
export HOMEBREW_PREFIX=$(brew --prefix)
export PATH="$HOMEBREW_PREFIX/opt/tcl-tk/bin:$PATH"
# Use PyEnv to set Python Environment
export PYENV_SHELL=zsh
export PYENV_ROOT=$(pyenv root)
# export PYENV_VERSION=$(pyenv version-name)
export PYTHONPATH=$PYENV_ROOT/shims
# PyEnv & HOMEBREW Build variables
PYTHON_CONFIGURE_OPTS="--with-tcltk-includes='-I$HOMEBREW_PREFIX/opt/tcl-tk/include'"
export PYTHON_CONFIGURE_OPTS="$PYTHON_CONFIGURE_OPTS --with-tcltk-libs='-L$HOMEBREW_PREFIX/opt/tcl-tk/lib -ltcl8.6 -ltk8.6'"
export CFLAGS="-O2 -I$HOMEBREW_PREFIX/include"
CPPFLAGS="-I$HOMEBREW_PREFIX/opt/sqlite/include -I$HOMEBREW_PREFIX/opt/tcl-tk/include"
CPPFLAGS="$CPPFLAGS -I$HOMEBREW_PREFIX/opt/zlib/include"
CPPFLAGS="$CPPFLAGS -I$HOMEBREW_PREFIX/opt/bzip2/include"
export CPPFLAGS="$CPPFLAGS -I$HOMEBREW_PREFIX/opt/openssl@1.1/include"
LDFLAGS="-L$HOMEBREW_PREFIX/opt/sqlite/lib -L$HOMEBREW_PREFIX/opt/tcl-tk/lib"
LDFLAGS="$LDFLAGS -L$HOMEBREW_PREFIX/opt/zlib/lib"
LDFLAGS="$LDFLAGS -L$HOMEBREW_PREFIX/opt/bzip2/lib"
export LDFLAGS="$LDFLAGS -L$HOMEBREW_PREFIX/opt/openssl@1.1/lib -L$HOMEBREW_PREFIX/opt/readline/lib"
PKG_CONFIG_PATH="$HOMEBREW_PREFIX/opt/sqlite/lib/pkgconfig:$HOMEBREW_PREFIX/opt/tcl-tk/lib/pkgconfig"
PKG_CONFIG_PATH="$PKG_CONFIG_PATH:$HOMEBREW_PREFIX/opt/zlib/lib/pkgconfig"
PKG_CONFIG_PATH="$PKG_CONFIG_PATH:$HOMEBREW_PREFIX/opt/bzip2/lib/pkgconfig"
PKG_CONFIG_PATH="$PKG_CONFIG_PATH:$HOMEBREW_PREFIX/opt/openssl@1.1/lib/pkgconfig"
export PKG_CONFIG_PATH="$PKG_CONFIG_PATH:$PYENV_ROOT/versions/$PYENV_VERSION/lib/pkgconfig"
| |
Add temporal ARM based homebrew install commands | # System formulae
arch -x86_64 brew install stow
arch -x86_64 brew install openssl
arch -x86_64 brew install gpg
arch -x86_64 brew install zsh
arch -x86_64 brew install tmux
arch -x86_64 brew install tmate
arch -x86_64 brew install tree
arch -x86_64 brew install coreutils
arch -x86_64 brew install git
arch -x86_64 brew install fzf
arch -x86_64 brew install ripgrep
arch -x86_64 brew install vim
arch -x86_64 brew install reattach-to-user-namespace
# Development formulae
arch -x86_64 brew install direnv
arch -x86_64 brew install rbenv
arch -x86_64 brew install rbenv-vars
arch -x86_64 brew install rbenv-default-gems
arch -x86_64 brew install ruby-build
arch -x86_64 brew install go
arch -x86_64 brew install jenv
arch -x86_64 brew install rustup-init
# Yubikey
arch -x86_64 brew install ykman
arch -x86_64 brew install ykclient
arch -x86_64 brew install ykpers
arch -x86_64 brew install yubico-piv-tool
arch -x86_64 brew install pam_yubico
# Taps
brew tap homebrew/cask-versions
brew tap homebrew/cask-drivers # required for logitech-options
# Applications
brew install appcleaner
brew install dropbox
arch -x86_64 brew install homebrew/cask/transmission
arch -x86_64 brew install homebrew/cask/gitup
brew install tunnelblick
brew install nordvpn
brew install keybase
brew install yubico-yubikey-piv-manager
brew install logitech-options
| |
Add a valgrind test runner | #!/bin/bash
for file in $(ls tests/bin); do
if valgrind --error-exitcode=0 tests/bin/$file $TEST_ARGS; then
echo "Error: $file leaking memory"
fi
tests/bin/$file
done
for file in $(ls build/bin); do
if valgrind --error-exitcode=0 $file $ARGS; then
echo "Error: leaking memory"
fi
done
| |
Add a rootfs modification script | #!/bin/bash
function log() {
echo "[CHROOT] $*"
}
function update_locales() {
locale-gen en_US.UTF-8
update-locale LANG=en_US.UTF-8 LC_MESSAGES=POSIX
}
function lvm_initramfs_hook() {
LVM_INITRAMFS_HOOK=/etc/initramfs-tools/scripts/local-top/lvm2
echo "#!/bin/bash" > LVM_INITRAMFS_HOOK
echo "vgchange -ay" >> LVM_INITRAMFS_HOOK
chmod a+x LVM_INITRAMFS_HOOK
update-initramfs -u
}
function set_hostname() {
echo "amnesiac" > /etc/hostname
}
#
# main
#
log "Setting up locale"
update_locales
log "Adding lvm2 hook for initramfs"
lvm_initramfs_hook
log "Changing hostname"
set_hostname
| |
Add script for restoring git submodules. | #!/bin/sh
set -e
git config -f .gitmodules --get-regexp '^submodule\..*\.path$' |
while read path_key path
do
url_key=$(echo $path_key | sed 's/\.path/.url/')
url=$(git config -f .gitmodules --get "$url_key")
git submodule add --force $url $path
done
| |
Add script to verify copyrights in source files. | #!/usr/bin/env bash
#
# Verify that all source files have copyright comment in header.
#
# Check that files matching the following patterns have copyright in their
# sources. This check does not necessary ensure these are comments within the
# first few lines of the file, but that seems excessive.
#
# *.c
# *.chpl
# *.cpp
# *.g
# *.h
# *.lex
# *.y
# *.ypp
# Make*
#
CWD=$(cd $(dirname $0) ; pwd)
CHPL_HOME=${CHPL_HOME:-$CWD/../..}
this_year=$(date '+%Y')
copyright_pattern="copyright .*${this_year}.* Cray Inc"
source_dirs="compiler runtime make modules"
echo "[INFO] Moving to CHPL_HOME: ${CHPL_HOME}"
cd $CHPL_HOME
# COMP: c cpp h lex ypp y g RT: c h ALL: Make* MODULES: chpl
echo "[INFO] Checking for copyrights in source files: ${copyright_pattern}"
files_wo_copy=$(find $source_dirs \
-name Make\* -o \
-name \*.c -o \
-name \*.chpl -o \
-name \*.cpp -o \
-name \*.g -o \
-name \*.h -o \
-name \*.lex -o \
-name \*.y -o \
-name \*.ypp | \
grep -v compiler/parser/chapel.tab.h | \
grep -v compiler/parser/chapel.tab.cpp | \
xargs grep -i -L "${copyright_pattern}")
# Now check the Make* files in CHPL_HOME.
root_files_wo_copy=$(find . -maxdepth 1 -name Make\* | xargs grep -i -L "${copyright_pattern}")
if [ -n "${files_wo_copy}" -o -n "${root_files_wo_copy}" ] ; then
echo "[ERROR] The following files have missing or incorrect copyrights:"
echo "${files_wo_copy}"
echo "${root_files_wo_copy}"
exit 1
fi
| |
Add a pretty poor script to update doc URLs. The actual docs will be generated by Travis a bit later. Unfortunately this can't be automated unless we make Travis do the whole tagging thing as well. | #!/usr/bin/env bash
# This script is pretty low tech, but it helps keep the doc version numbers
# up to date. It should be run BEFORE tagging a new release, but AFTER
# bumping the version in Cargo.toml.
set -euo pipefail
wanted=v$(cargo read-manifest | jq -r .version)
for file in Cargo.toml README.md src/lib.rs; do
sed -i.bak -e "s|r2d2-redis/doc/[[:alnum:].]*|r2d2-redis/doc/$wanted|" "$file"
rm "$file.bak"
done
| |
Add script for searching elements across projects | #!/bin/bash
# Searches for an object by name in all the OpenShift cluster projects
OC="${HOME}/Go/src/github.com/openshift/origin/_output/local/bin/linux/amd64/oc"
if [[ $1 != "" ]]; then
SEARCH_STRING=$1
else
echo "Usage: $0 <search string>"
exit
fi
#readarray -t projects < <($OC projects | grep -v 'You have access to the following')
readarray -t projects < <($OC projects)
shift
for i in "${projects[@]:2}"; do
echo "Checking for ${SEARCH_STRING} in project ${i}"
$OC get all -n $i | grep -i $SEARCH_STRING
done
| |
Add a script to build the base libraries. | #!/bin/bash
if [[ $1 == "" ]] ; then
echo "Usage: $0 path_to_ghc_source"
exit 1
fi
FLAGS=-O2
libpath=$1/libraries
pushd .
cd $libpath/ghc-prim
hastec --libinstall $FLAGS -cpp -fglasgow-exts -package-name ghc-prim GHC.Types GHC.Classes GHC.IntWord64 GHC.Debug
popd
pushd .
cd $libpath/integer-simple
hastec --libinstall $FLAGS -cpp -fglasgow-exts -package-name integer-simple GHC.Integer GHC.Integer.Logarithms.Internals
popd
pushd .
cd $libpath/base
hastec --libinstall $FLAGS -hide-package base -package-name base -I./include -i./dist-install/build -XMagicHash -XExistentialQuantification -XRank2Types -XScopedTypeVariables -XUnboxedTuples -XForeignFunctionInterface -XUnliftedFFITypes -XDeriveDataTypeable -XGeneralizedNewtypeDeriving -XFlexibleInstances -XStandaloneDeriving -XPatternGuards -XEmptyDataDecls -XNoImplicitPrelude -XCPP Prelude
popd
| |
Add script to convert a recovery image to a factory-usable SSD image | #!/bin/bash
# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Script to convert a recovery image into an SSD image usable by factory.
# TODO(gauravsh): crosbug.com/14790 (Merge this with
# convert_recovery_to_ssd.sh)
# Load common constants and variables.
. "$(dirname "$0")/common_minimal.sh"
usage() {
cat <<EOF
Usage: $PROG <signed_recovery_image> <original_image_zip> <output_ssd_image>
Converts <signed_recovery_image> into a full SSD image usable by factory. Uses
stateful partition from SSD image <original_image_zip>.
EOF
}
if [ $# -ne 3 ]; then
usage
exit 1
fi
type -P cgpt &>/dev/null ||
{ echo "cgpt tool must be in the path"; exit 1; }
# Abort on errors.
set -e
RECOVERY_IMAGE=$1
IMAGE_ZIP=$2
SSD_IMAGE=$3
work_dir=$(make_temp_dir)
echo "Extracting original SSD image."
unzip -o $IMAGE_ZIP chromiumos_base_image.bin -d ${work_dir}
mv ${work_dir}/chromiumos_base_image.bin ${SSD_IMAGE}
kerna_offset=$(partoffset ${RECOVERY_IMAGE} 2)
kernb_offset=$(partoffset ${RECOVERY_IMAGE} 4)
# Kernel partition sizes should be the same.
kern_size=$(partsize ${RECOVERY_IMAGE} 2)
rootfs=$(make_temp_file)
echo "Replacing RootFS on the SSD with that of the RECOVERY image"
extract_image_partition ${RECOVERY_IMAGE} 3 ${rootfs}
replace_image_partition ${SSD_IMAGE} 3 ${rootfs}
kerna=$(make_temp_file)
echo "Replacing KernelA on the SSD with that of the RECOVERY image"
extract_image_partition ${RECOVERY_IMAGE} 4 ${kerna}
replace_image_partition ${SSD_IMAGE} 2 ${kerna}
# Overwrite the kernel vblock on the created SSD image.
stateful_dir=$(make_temp_dir)
tmp_vblock=$(make_temp_file)
mount_image_partition_ro ${RECOVERY_IMAGE} 1 ${stateful_dir}
sudo cp ${stateful_dir}/vmlinuz_hd.vblock ${tmp_vblock}
echo "Overwriting kernel vblock with SSD kernel vblock"
sudo dd if=${tmp_vblock} of=${SSD_IMAGE} seek=${kerna_offset} bs=512 \
conv=notrunc
sudo umount -d ${stateful_dir}
# Zero out Kernel B partition.
echo "Zeroing out Kernel partition B"
sudo dd if=/dev/zero of=${SSD_IMAGE} seek=${kernb_offset} bs=512 \
count=${kern_size} conv=notrunc
echo "${RECOVERY_IMAGE} was converted to a factory SSD image: ${SSD_IMAGE}"
| |
Add a new test that runs the end to end pipeline. | #!/bin/zsh
# set up the conda path
set -x
if [[ -f conda_path ]]; then
CONDA_ENV_PATH=$(cat conda_path)
fi || exit 1
# set up ZPar and NLTK environment variables
export NLPTOOLS="/home/nlp-text/dynamic/NLPTools"
export ZPAR_MODEL_DIR="${NLPTOOLS}/zpar/models/english"
export NLTK_DATA="${NLPTOOLS}/nltk_data"
export CORPORA="/home/nlp-text/static/corpora"
# create the JSON files from the RST and Penn treebanks
"${CONDA_ENV_PATH}"/bin/convert_rst_discourse_tb $CORPORA/nonets/rst_discourse_treebank/original/rst_discourse_treebank $CORPORA/nonets/treebank3/original/treebank_3 >& convert.log
# split train JSON into train + dev
"${CONDA_ENV_PATH}"/bin/make_traindev_split
# extract segmentation features for train and dev
"${CONDA_ENV_PATH}"/bin/extract_segmentation_features rst_discourse_tb_edus_TRAINING_TRAIN.json rst_discourse_tb_edus_features_TRAINING_TRAIN.tsv
"${CONDA_ENV_PATH}"/bin/extract_segmentation_features rst_discourse_tb_edus_TRAINING_DEV.json rst_discourse_tb_edus_features_TRAINING_DEV.tsv
# train the segmentation model
"${CONDA_ENV_PATH}"/bin/tune_segmentation_model rst_discourse_tb_edus_features_TRAINING_TRAIN.tsv rst_discourse_tb_edus_features_TRAINING_DEV.tsv segmentation_model >& tune_segmenter.log
# save the best segmenter output
tail -6 tune_segmenter.log > best_segmenter_f1
# train the RST parser
"${CONDA_ENV_PATH}"/bin/tune_rst_parser rst_discourse_tb_edus_TRAINING_TRAIN.json rst_discourse_tb_edus_TRAINING_DEV.json rst_parsing_model >& tune_rst_parser.log
# get best F1 value and check that it is within expected limits
F1VALUE=$(tail -1 nohup.out | grep -o '0\.[0-9]\+[^$]' | sed 's/,//')
echo "$F1VALUE" > best_rst_parser_f1
echo "${F1VALUE} > 0.58 && ${F1VALUE} < 0.60" | bc -l
# run any of the trained models on a test document
# NOTE: it doesn't matter which ones we run since
# we just want to make sure that the `rst_parse` command runs
"${CONDA_ENV_PATH}"/bin/rst_parse -g segmentation_model.C8.0 -p rst_parsing_model.C0.5 tests/data/rst_document.txt > output.json
# run the visualizer next
visualize_rst_tree output.json tree.html --embed_d3js | |
Add an install script for librarian-puppet | #!/bin/sh
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Library
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Installs puppet-librarian
InstallLibrarianPuppet () {
# ensure librarian-puppet is not already installed
$(which librarian-puppet > /dev/null 2>&1)
if [ "$?" -eq '0' ]; then
return
fi
# try to install librarian-puppet
echo 'Attempting to install librarian-puppet'
gem install librarian-puppet --version "$version"
if [ "$?" -ne '0' ]; then
echo 'Failed to install librarian-puppet'
exit $?
fi
}
# Installs ruby-dev
InstallRubyDev () {
# ensure it is not already installed
$(dpkg -s ruby-dev > /dev/null 2>&1)
if [ "$?" -eq '0' ]; then
return
fi
# try to install it with the appropriate package manager
echo 'Attempting to install Ruby devkit...'
if [ "${FOUND_YUM}" -eq '0' ]; then
yum -q -y makecache
yum -q -y install ruby-dev
elif [ "${FOUND_APT}" -eq '0' ]; then
apt-get -q -y install ruby-dev
fi
if [ "$?" -ne '0' ]; then
echo 'Failed to install Ruby devkit...'
exit $?
fi
}
#Installs git
InstallGit () {
# ensure git is not already installed
$(which git > /dev/null 2>&1)
if [ "$?" -eq '0' ]; then
return
fi
# try to install git with the appropriate package manager
echo 'Attempting to install Git...'
if [ "${FOUND_YUM}" -eq '0' ]; then
yum -q -y makecache
yum -q -y install git
elif [ "${FOUND_APT}" -eq '0' ]; then
apt-get -q -y install git
fi
if [ "$?" -ne '0' ]; then
echo 'Failed to install Git...'
exit $?
fi
}
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Script
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Ensure we have one supported package manager, up to date
$(which apt-get > /dev/null 2>&1)
FOUND_APT=$?
$(which yum > /dev/null 2>&1)
FOUND_YUM=$?
if [ "${FOUND_YUM}" -ne '0' -a "${FOUND_APT}" -ne '0' ]; then
echo 'No supported package installer available. You may need to install git and librarian-puppet manually.'
exit 1
fi
if [ "${FOUND_APT}" -eq '0' ]; then
apt-get -q -y update
fi
# Collect the parameters
while getopts :v: opt; do
case "$opt" in
v) version="$OPTARG"
;;
\?) echo "Unknown option -$OPTARG. Usage: $0 -v version" >&2
;;
esac
done
# Install the appropriate packages
InstallGit
InstallRubyDev
InstallLibrarianPuppet
| |
Add a simple ZSH script to install code and code-insiders extensions | MY_EXTENSIONS=(
"alefragnani.project-manager"
"dbaeumer.vscode-eslint"
"eamodio.gitlens"
"Equinusocio.vsc-material-theme"
"esbenp.prettier-vscode"
"flowtype.flow-for-vscode"
"ms-python.python"
"ms-vscode-remote.remote-containers"
"ms-vscode-remote.remote-ssh"
"ms-vscode-remote.remote-ssh-edit"
"ms-vscode-remote.remote-ssh-explorer"
"ms-vscode-remote.remote-wsl"
"ms-vscode-remote.vscode-remote-extensionpack"
"sourcegraph.sourcegraph"
"zhuangtongfa.Material-theme"
)
for extension in $MY_EXTENSIONS; do
if type code > /dev/null; then
code --install-extension $extension
fi
if type code-insiders > /dev/null; then
code-insiders --install-extension $extension
fi
done
| |
Add file to handle path. | #!/usr/bin/env zsh
#
# +-------------------------------------------------------------------------+
# | ~/config/zsh/rc.d/05-path.zsh |
# +-------------------------------------------------------------------------+
# | Copyright © 2022 Waldemar Schroeer |
# | waldemar.schroeer(at)rz-amper.de |
# | |
# +-------------------------------------------------------------------------+
# -U ensures each entry in these is Unique (that is, discards duplicates).
export -U PATH path FPATH fpath MANPATH manpath
export -UT INFOPATH infopath # -T creates a "tied" pair; see below.
# $PATH and $path (and also $FPATH and $fpath, etc.) are "tied" to each other.
# Modifying one will also modify the other.
# Note that each value in an array is expanded separately. Thus, we can use ~
# for $HOME in each $path entry.
path=(
$path
~/.local/bin
)
# Add your functions to your $fpath, so you can autoload them.
fpath=(
$ZDOTDIR/functions
$fpath
)
| |
Add an easier to use CLI | #!/bin/bash
SCRIPT=$(basename ${BASH_SOURCE[0]})
DIRNAME=$(realpath $(dirname ${BASH_SOURCE[0]}))
CONTAINER_NAME="resin-image-preloader"
function cleanup() {
docker rm $CONTAINER_NAME || true
}
trap cleanup EXIT
function usage() {
echo ""
echo "Usage: $SCRIPT [options]"
echo ""
echo "Options:"
echo ""
echo " --app Application ID (required)"
echo " --img Disk image to preload into (required)"
echo " --api-key API key"
echo " --api-token API token"
echo " --api-host API hostname"
echo " --registry Image registry host"
echo ""
echo "Example:"
echo ""
echo " $SCRIPT --app 123456 --api-token \"xxxx...\" --img /path/to/resin-os.img"
echo ""
}
function set_options() {
local options="$@"
local argv=($options)
local index=0
for opt in $options; do
index=$(($index + 1))
case $opt in
--help)
usage
exit 0
;;
--app) APP_ID=${argv[$index]} ;;
--registry) REGISTRY_HOST=${argv[$index]} ;;
--api-key) API_KEY=${argv[$index]} ;;
--api-token) API_TOKEN=${argv[$index]} ;;
esac
done
}
set_options $@
# Build the preloader image
docker build -t resin/resin-preload $DIRNAME
# Run the preloader
docker run -it --privileged \
-e API_TOKEN=$API_TOKEN \
-e APP_ID=$APP_ID \
-e REGISTRY_HOST=$REGISTRY_HOST \
-e API_HOST=$API_HOST \
-v $PATH_TO_IMAGE:/img/resin.img \
--name $CONTAINER_NAME \
resin/resin-preload
| |
Add scripts to dispatch index.html files | #!/bin/bash
#---------------------------------------------------------------------
source "$(dirname "$0")/common.inc"
#---------------------------------------------------------------------
DISPATCH_DIR_ABS='/var/www/builds.quelltextlich.at/'
print_help() {
cat <<EOF
$0 ARGUMENTS
Dispatches index*.html files from the current directory onto a public
directory
ARGUMENTS:
--dispatch-dir DISPATCH_DIR_ABS
-- The absolute directory to dispatch the files to
E.g.: /var/www/builds.quelltextlich.at
EOF
}
while [ $# -gt 0 ]
do
ARGUMENT="$1"
shift
case "$ARGUMENT" in
"--help" | "-h" | "-?" )
print_help
exit 0
;;
"--dispatch-dir" )
[ $# -ge 1 ] || error "$ARGUMENT requires 1 more argument"
DISPATCH_DIR_ABS="$1"
shift || true
;;
* )
error "Unknown argument '$ARGUMENT'"
;;
esac
done
if [ ! -d "$DISPATCH_DIR_ABS" ]
then
error "The dispatch directory '$DISPATCH_DIR_ABS' does not exist"
fi
for SOURCE_FILE_RELS in index*.html
do
section "Dispatching '$SOURCE_FILE_RELS'"
TARGET_FILE_RELD="$SOURCE_FILE_RELS"
TARGET_FILE_RELD="${TARGET_FILE_RELD%.html}"
TARGET_FILE_RELD="${TARGET_FILE_RELD#index}"
TARGET_FILE_RELD="${TARGET_FILE_RELD//_//}"
TARGET_FILE_RELD="$TARGET_FILE_RELD/index.html"
TARGET_FILE_ABS="$DISPATCH_DIR_ABS/$TARGET_FILE_RELD"
info "$SOURCE_FILE_RELS -> $TARGET_FILE_ABS"
mv "$SOURCE_FILE_RELS" "$TARGET_FILE_ABS"
done
finalize
| |
Stop gearman server if it was started by GearmanUTest | #!/bin/bash
TEST_DIR=$1
ps -C gearmand
if [ 1 -eq "$?" ]; then
gearmand -d -l stderr
fi
${TEST_DIR}/GearmanUTest
| #!/bin/bash
TEST_DIR=$1
ps -C gearmand
if [ 1 -eq "$?" ]; then
gearmand -d -l stderr
shutdown_server=0
fi
${TEST_DIR}/GearmanUTest
exit_status="$?"
if [ 0 -eq "$shutdown_server" ]; then
gearadmin --shutdown
fi
exit "$exit_status"
|
Add script to find ImageMagick and make link to /opt/alfresco/ImageMagickCoders | #!/bin/bash
IMAGEMAGICK=`rpm -ql ImageMagick | grep "/coders" | head -n1`
echo ${IMAGEMAGICK}
IMAGEMAGICK=`sudo find /usr -name jpeg.so | grep coders | sed "s/\/jpeg.so$//"`
echo ${IMAGEMAGICK}
rm -f /opt/alfresco/ImageMagickCoders
ln -s ${IMAGEMAGICK} /opt/alfresco/ImageMagickCoders
| |
Test script to check for all dependencies | # Quick test to make sure all dependencies are satisfied
version=`python3 --version | cut -d" " -f2 |grep 3.4`
if [ -z ${version} ] ; then
echo "Python version 3.4.x is required"
exit 1;
fi
C1=`python3 -c 'import pymongo' 2>&1 | grep Error`
C2=`python3 -c 'import bcrypt' 2>&1 | grep Error`
if [ -z "$C1$C2" ]; then
echo "All dependencies have been satisfied. "
else
echo $C1
echo $C2
fi
| |
Add aliases to show latest version of Emacs and Clang in the Debian repositories |
# Show all clang versions available as Debian package.
alias debian-version-list-clang='apt-cache search clang | cut -f 1 -d " " | sort | grep -e "^clang-[0-9].[0-9]$"'
# Show all Emacs versions available as Debian package.
alias debian-version-list-emacs='apt-cache search emacs | cut -f 1 -d " " | sort | grep -e "^emacs[0-9][0-9]$"'
| |
Add a script for fixing up typedoc output. | #!/bin/bash
# For some impish reason, typedoc insists on generating HTML files whose names
# start with leading underscores. Unfortunately, jekyll refuses to include files
# that start with an underscore.
#
# This script strips the underscore and replaces
# all occurences in the generated output.
files=`find . -name '_*.html'`
if test -z "$files"; then
echo nothing to do...
exit
fi
for file in $files; do
dir=`dirname "$file"`
base=`basename "$file"`
mv "$file" "$dir/${base##_}"
done
sedexpr=""
for file in $files; do
base=`basename "$file"`
sedexpr="$sedexpr -e 's/$base/${base##_}/g'"
done
SED=gsed
test -z `which gsed` && SED=sed
for file in `find . -name '*.html'`; do
bash -c "$SED -i $sedexpr \"$file\""
done
echo "done!" | |
Add script for BeagleBoard-X15 environment creation | #!/bin/bash -ex
TOOLCHAIN_FILE_EXT=.tar.xz
TOOLCHAIN_FILE=gcc-linaro-5.3.1-2016.05-x86_64_arm-linux-gnueabihf${TOOLCHAIN_FILE_EXT}
TOOLCHAIN_PATH=https://releases.linaro.org/components/toolchain/binaries/5.3-2016.05/arm-linux-gnueabihf/
wget -c ${TOOLCHAIN_PATH}${TOOLCHAIN_FILE}
rbld create --base ubuntu:16.04 bb-x15
rbld modify bb-x15:initial -- "sudo apt-get update && sudo apt-get install -y make xz-utils"
rbld modify bb-x15:initial -- "(cd /; sudo tar -Jxf -) < $TOOLCHAIN_FILE"
rbld modify bb-x15:initial -- "echo export CC=/`basename $TOOLCHAIN_FILE $TOOLCHAIN_FILE_EXT`/bin/arm-linux-gnueabihf- | sudo tee -a /rebuild/rebuild.rc > /dev/null"
rbld commit bb-x15 --tag 16-05
rbld rm bb-x15:initial
rm -f $TOOLCHAIN_FILE
| |
Add script to install uClibc libraries in rootfs | #!/bin/sh
TOPDIR=`pwd`
LIBDIR=crosstool-ng-armv7a/.build/.build/arm-unknown-linux-uclibcgnueabihf/build/build-libc
if [[ "x$1" == "x" ]]; then
NFS_PATH=${HOME}/rootfs
else
NFS_PATH=$1
fi
cd ${LIBDIR}
tar -cvf ${TOPDIR}/uClibc.tar lib
cd ${TOPDIR}
tar -xvf uClibc.tar -C ${NFS_PATH}
| |
Add automated build for autoconf. | #/bin/bash
PREFIX_FOLDER=/home/$USER/sandbox
ROOT_FOLDER=$PWD
SRC_FOLDER=$ROOT_FOLDER/src
MAKE_OPTS="-j32"
# Install autoconf
cd $SRC_FOLDER
wget http://ftp.gnu.org/gnu/autoconf/autoconf-latest.tar.gz
tar xf autoconf-latest.tar.gz
cd autoconf-2.69/
./configure --prefix=$PREFIX_FOLDER
make $MAKE_OPTS
make install
| |
Create build script for bundled release version | #!/bin/bash
if (( $# < 1)); then
echo "OpenHIM release build: Builds a specific tagged release ready for deployment";
echo "Usage: $0 TAG";
exit 0;
fi
tag=$1;
shift;
echo "NB!"
echo "To create the tagged build, various git interactions need to take place. "
echo "This will create a temporary branch as well as remove any changes you have havent yet committed"
read -p "Do you wish to proceed? [Y/y]" -n 1 -r
echo ""
if [[ $REPLY =~ ^[Yy]$ ]]; then
cd ../
echo "Git: setup branch/tag"
git checkout -- .
git checkout master
git pull origin master
git fetch --tags
git branch -D $tag
git checkout tags/$tag -b $tag
echo "npm: clean and build package"
rm -rf node_modules
npm install
npm run build
echo "zip: build release version: $tag"
zip \
-i 'lib/*' 'config/*' 'node_modules/*' 'docs/*' 'resources/*' 'CHANGELOG.md' 'LICENSE' 'package.json' 'package-lock.json' 'README.md' \
-r packaging/build.openhim-core.$tag.zip .
echo "Git cleanup"
git checkout -- .
git checkout master
git branch -D $tag
echo "New OpenHIM Core build zipped";
fi
| |
Create script to run after building product | #!/bin/bash
cp -R ~/Documents/JavaJREs/jre-8u40-linux-i586 ~/git/Rel/_Deployment/product/linux.gtk.x86/Rel/jre
cp -R ~/Documents/JavaJREs/jre-8u40-linux-x64 ~/git/Rel/_Deployment/product/linux.gtk.x86_64/Rel/jre
cp -R ~/Documents/JavaJREs/jre-8u40-macosx-x64/Contents/Home ~/git/Rel/_Deployment/product/macosx.cocoa.x86_64/Rel/jre
cp -R ~/Documents/JavaJREs/jre-8u40-windows-i586 ~/git/Rel/_Deployment/product/win32.win32.x86/Rel/jre
cp -R ~/Documents/JavaJREs/jre-8u40-windows-x64 ~/git/Rel/_Deployment/product/win32.win32.x86_64/Rel/jre
| |
Add script for invoking store swap. | #!/bin/bash
#
# Copyright 2008-2009 LinkedIn, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
base_dir=$(dirname $0)/..
for file in $base_dir/dist/*.jar;
do
CLASSPATH=$CLASSPATH:$file
done
for file in $base_dir/lib/*.jar;
do
CLASSPATH=$CLASSPATH:$file
done
CLASSPATH=$CLASSPATH:$base_dir/dist/resources
java -server -Xmx128M -cp $CLASSPATH voldemort.store.readonly.StoreSwapper $@ | |
Add preset for supersingular curves. | #!/bin/bash
cmake -DCHECK=off -DARITH=gmp -DBN_PRECI=1536 -DFP_PRIME=1536 -DFP_QNRES=on -DFP_METHD="BASIC;COMBA;COMBA;MONTY;LOWER;SLIDE" -DFPX_METHD="INTEG;INTEG;LAZYR" -DPP_METHD="LAZYR;OATEP" -DCOMP="-O2 -funroll-loops -fomit-frame-pointer" $1
| |
Add an alternate local-up experience. | #!/bin/bash
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This command builds and runs a local kubernetes cluster. It's just like
# local-up.sh, but this one launches the three separate binaries.
# You may need to run this as root to allow kubelet to open docker's socket.
if [ "$(which etcd)" == "" ]; then
echo "etcd must be in your PATH"
exit 1
fi
# Stop right away if the build fails
set -e
$(dirname $0)/build-go.sh
echo "Starting etcd"
ETCD_DIR=$(mktemp -d -t kube-integration.XXXXXX)
trap "rm -rf ${ETCD_DIR}" EXIT
etcd -name test -data-dir ${ETCD_DIR} &> /tmp/etcd.log &
ETCD_PID=$!
sleep 5
# Shut down anyway if there's an error.
set +e
API_PORT=8080
KUBELET_PORT=10250
$(dirname $0)/../output/go/apiserver \
--address="127.0.0.1" \
--port="${API_PORT}" \
--etcd_servers="http://127.0.0.1:4001" \
--machines="127.0.0.1" &> /tmp/apiserver.log &
APISERVER_PID=$!
$(dirname $0)/../output/go/controller-manager \
--etcd_servers="http://127.0.0.1:4001" \
--master="127.0.0.1:${API_PORT}" &> /tmp/controller-manager.log &
CTLRMGR_PID=$!
$(dirname $0)/../output/go/kubelet \
--etcd_servers="http://127.0.0.1:4001" \
--hostname_override="127.0.0.1" \
--address="127.0.0.1" \
--port="$KUBELET_PORT" &> /tmp/kubelet.log &
KUBELET_PID=$!
echo "Local Kubernetes cluster is running. Press enter to shut it down."
read unused
kill ${APISERVER_PID}
kill ${CTLRMGR_PID}
kill ${KUBELET_PID}
kill ${ETCD_PID}
| |
Add sad script to upload docs to Hackage | #!/bin/sh
# Based on https://gist.github.com/Fuuzetsu/8276421
# Usage: sh hackagedocs.sh 0.7.1 UserName Password
cabal configure && cabal build && cabal haddock --hyperlink-source --html-location='http://hackage.haskell.org/package/$pkg/docs' \
--contents-location='http://hackage.haskell.org/package/$pkg'
S=$?
if [ "${S}" -eq "0" ]; then
cd "dist/doc/html"
DDIR="aws-kinesis-client-${1}-docs"
cp -r "aws-kinesis-client" "${DDIR}" && tar -c -v -z --format ustar -f "${DDIR}.tar.gz" "${DDIR}"
CS=$?
if [ "${CS}" -eq "0" ]; then
echo "Uploading to Hackage…"
curl -X PUT -H 'Content-Type: application/x-tar' -H 'Content-Encoding: gzip' --data-binary "@${DDIR}.tar.gz" "http://${2}:${3}@hackage.haskell.org/package/aws-kinesis-client-${1}/docs"
exit $?
else
echo "Error when packaging the documentation"
exit $CS
fi
else
echo "Error when trying to build the package."
exit $S
fi
| |
Add Docker Daemon entrypoint script | #!/bin/sh
set -e
# no arguments passed
# or first arg is `-f` or `--some-option`
if [ "$#" -eq 0 -o "${1#-}" != "$1" ]; then
# add our default arguments
set -- dockerd \
--host=unix:///var/run/docker.sock \
--host=tcp://0.0.0.0:2375 \
--storage-driver=vfs \
"$@"
fi
if [ "$1" = 'dockerd' ]; then
# if we're running Docker, let's pipe through dind
# (and we'll run dind explicitly with "sh" since its shebang is /bin/bash)
set -- sh "$(which dind)" "$@"
fi
exec "$@"
| |
Add script to generate tools.yml for ansible-galaxy-tools | #!/bin/bash
generate_tools_yml(){
if [ -f $1 ]; then
rm $1
fi
touch $1
echo "- hosts: localhost" >> $1
echo " connection: local" >> $1
echo " vars:" >> $1
echo " roles:" >> $1
echo " - role: ansible-galaxy-tools" >> $1
echo " galaxy_tools_galaxy_instance_url: http://$host:$port/" >> $1
echo " galaxy_tools_api_key: $master_api_key" >> $1
echo " galaxy_tools_tool_list_files: [ \"files/tool_list.yaml\" ]" >> $1
} | |
Install mono on the macOS Travis builds. | #!/bin/bash
set -euo pipefail
thisdir=$(dirname "$0")
brew update
brew install python3
"$thisdir/before_install-common.sh"
| #!/bin/bash
set -euo pipefail
thisdir=$(dirname "$0")
brew update
brew install mono python3
"$thisdir/before_install-common.sh"
|
Clean path when building neovim from source | #!/bin/sh
PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin"
git clone https://github.com/neovim/neovim.git ~/repo/neovim-new-build
cd ~/repo/neovim-new-build/
make CMAKE_EXTRA_FLAGS="-DCMAKE_INSTALL_PREFIX=$HOME/neovim"
if [ ! -f ~/repo/neovim-new-build/build/bin/nvim ]; then
echo "Failed: incomplete build!"
exit 2
fi
# mv ~/repo/neovim-new-build ~/repo/neovim
| |
Add script to create proto files | #!/bin/bash
HERE=$(dirname $0)
SRC_DIR=$HERE/traffic-service-protobuf
DST_DIR=$HERE/src/traffic/messages
mkdir -p $DST_DIR
for proto in replies requests; do
protoc -I=$SRC_DIR --python_out=$DST_DIR $SRC_DIR/${proto}.proto
done
| |
Add method:request and vtimezone block | #!/bin/sh
#
# fix-zoom-ics.sh
# Copyright (C) 2021 Olaf Lessenich <xai@linux.com>
#
# Distributed under terms of the MIT license.
#
if grep -q 'BEGIN:VTIMEZONE' $1
then
>2 echo "VTIMEZONE already present. Quitting."
return 0
fi
if grep -vq 'METHOD:REQUEST' $1
then
sed -i '/^BEGIN:VEVENT/i \
METHOD:REQUEST' $1
fi
sed -i '/^BEGIN:VEVENT/i \
BEGIN:VTIMEZONE\
TZID:Europe/Vienna\
X-LIC-LOCATION:Europe/Vienna\
BEGIN:DAYLIGHT\
TZNAME:CEST\
TZOFFSETFROM:+0100\
TZOFFSETTO:+0200\
DTSTART:19810329T020000\
RRULE:FREQ=YEARLY;UNTIL=20370329T010000Z;BYDAY=-1SU;BYMONTH=3\
END:DAYLIGHT\
BEGIN:STANDARD\
TZNAME:CET\
TZOFFSETFROM:+0200\
TZOFFSETTO:+0100\
DTSTART:19961027T030000\
RRULE:FREQ=YEARLY;UNTIL=20361026T010000Z;BYDAY=-1SU;BYMONTH=10\
END:STANDARD\
END:VTIMEZONE' $1
| |
Add script to allow batch processing | #!/bin/sh
REQUIRED_BASH_VERSION=3.0.0
if [[ $BASH_VERSION < $REQUIRED_BASH_VERSION ]]; then
echo "You must use Bash version 3 or newer to run this script"
exit
fi
DIR=$(cd -P -- "$(dirname -- "$0")" && pwd -P)
convert()
{
CMD="find $SCAN_DIR -name \*.$EXT"
if [ $RECURSE == "0" ]; then
CMD="$CMD -maxdepth 1"
fi
xmls=`eval $CMD`
mkdir -p $OUT_DIR
for xml in $xmls
do
output_filename=${xml/.$EXT/.$OEXT}
echo "Processing $xml -> $output_filename"
CMD="java -jar $DIR/saxon9he.jar -s $xml -o $output_filename $DIR/d2a.xsl"
$CMD
done
}
usage()
{
cat << EOF
usage: $0 options
This script allows primitive batching of docbook to asciidoc conversion
OPTIONS:
-s Source directory to scan for files, by default the working directory
-x Extension of files to convert, by default 'xml'
-o Output extension, by default 'asciidoc'
-r Enable recusive scanning, by default the scan is not recursive
-h Shows this message
EOF
}
SCAN_DIR=`pwd`
RECURSE="0"
EXT="xml"
OEXT="asciidoc"
OUT_DIR="output"
while getopts “hrx:o:s:” OPTION
do
case $OPTION in
s)
SCAN_DIR=$OPTARG
;;
h)
usage
exit
;;
r)
RECURSE="1"
;;
x)
EXT=$OPTARG
;;
o)
OEXT=$OPTARG
;;
[?])
usage
exit
;;
esac
done
convert
| |
Add build script for Apple M1 | #!/usr/bin/sh
set -ex
mkdir -p build/osx-aarch64/static-release && mkdir -p build/osx-aarch64/shared-release
pushd build/osx-aarch64/shared-release
cmake ../../.. -GNinja \
-DBUILD_SHARED_LIBS=on \
-DLIEF_PYTHON_API=off \
-DLIEF_INSTALL_COMPILED_EXAMPLES=off \
-DCMAKE_BUILD_TYPE=Release
ninja
popd
pushd build/osx-aarch64/static-release
cmake ../../.. -GNinja \
-DBUILD_SHARED_LIBS=off \
-DLIEF_PYTHON_API=off \
-DLIEF_INSTALL_COMPILED_EXAMPLES=on \
-DCMAKE_BUILD_TYPE=Release
ninja
popd
pushd build/osx-aarch64
cpack --config ../../cmake/cpack.config.cmake
popd
/bin/mv build/osx-aarch64/*.tar.gz build/
| |
Remove backslash before ${TARGET}, it breaks cleanup | #!/bin/bash
[ -z "${MYSQL_USER}" ] && { echo "=> MYSQL_USER cannot be empty" && exit 1; }
[ -z "${MYSQL_PASS}" ] && { echo "=> MYSQL_PASS cannot be empty" && exit 1; }
DATE=`date +%Y%m%d%H%M`
echo "=> Backup started at $DATE"
databases=`mysql -u $MYSQL_USER -p$MYSQL_PASS -e "SHOW DATABASES;" | tr -d "| " | grep -v Database`
for db in $databases; do
if [[ "$db" != "information_schema" ]] && [[ "$db" != "performance_schema" ]] && [[ "$db" != "mysql" ]] && [[ "$db" != _* ]] ; then
echo "Dumping database: $db"
FILENAME=/backup/$DATE.$db.sql
if mysqldump -h $MYSQL_HOST -P $MYSQL_PORT -u $MYSQL_USER -p$MYSQL_PASS $db > $FILENAME ;then
gzip -f $FILENAME
else
rm -rf $FILENAME
fi
fi
done
if [ -n "$MAX_BACKUPS" ]; then
while [ `ls -1 /backup | wc -l` -gt "$MAX_BACKUPS" ];
do
TARGET=`ls -1 /backup | sort | head -n 1`
echo "Backup \${TARGET} is deleted"
rm -rf /backup/\${TARGET}
done
fi
echo "=> Backup done"
| #!/bin/bash
[ -z "${MYSQL_USER}" ] && { echo "=> MYSQL_USER cannot be empty" && exit 1; }
[ -z "${MYSQL_PASS}" ] && { echo "=> MYSQL_PASS cannot be empty" && exit 1; }
DATE=`date +%Y%m%d%H%M`
echo "=> Backup started at $DATE"
databases=`mysql -u $MYSQL_USER -p$MYSQL_PASS -e "SHOW DATABASES;" | tr -d "| " | grep -v Database`
for db in $databases; do
if [[ "$db" != "information_schema" ]] && [[ "$db" != "performance_schema" ]] && [[ "$db" != "mysql" ]] && [[ "$db" != _* ]] ; then
echo "Dumping database: $db"
FILENAME=/backup/$DATE.$db.sql
if mysqldump -h $MYSQL_HOST -P $MYSQL_PORT -u $MYSQL_USER -p$MYSQL_PASS $db > $FILENAME ;then
gzip -f $FILENAME
else
rm -rf $FILENAME
fi
fi
done
if [ -n "$MAX_BACKUPS" ]; then
while [ `ls -1 /backup | wc -l` -gt "$MAX_BACKUPS" ];
do
TARGET=`ls -1 /backup | sort | head -n 1`
echo "Backup ${TARGET} is deleted"
rm -rf /backup/${TARGET}
done
fi
echo "=> Backup done"
|
Add a branch build script for Jenkins. | #!/bin/bash
set -e
VENV_PATH="${HOME}/venv/${JOB_NAME}"
[ -x ${VENV_PATH}/bin/pip ] || virtualenv ${VENV_PATH}
. ${VENV_PATH}/bin/activate
pip install -q ghtools
REPO="alphagov/rummager"
gh-status "$REPO" "$GIT_COMMIT" pending -d "\"Build #${BUILD_NUMBER} is running on Jenkins\"" -u "$BUILD_URL" >/dev/null
if ./jenkins.sh; then
gh-status "$REPO" "$GIT_COMMIT" success -d "\"Build #${BUILD_NUMBER} succeeded on Jenkins\"" -u "$BUILD_URL" >/dev/null
exit 0
else
gh-status "$REPO" "$GIT_COMMIT" failure -d "\"Build #${BUILD_NUMBER} failed on Jenkins\"" -u "$BUILD_URL" >/dev/null
exit 1
fi
| |
Add script for building releases | #!/bin/zsh
set -e -u -x
REVISION=$1
DATE=`date +%Y_%m_%d`
FOLDER=DataRefTool_${DATE}
mkdir -p ${FOLDER}
cd ${FOLDER}
wget https://datareftool-binaries.s3.amazonaws.com/$REVISION/mac.xpl
wget https://datareftool-binaries.s3.amazonaws.com/$REVISION/lin.xpl
wget https://datareftool-binaries.s3.amazonaws.com/$REVISION/win.xpl
cp ../LICENSE .
cd ..
zip ${FOLDER}.zip ${FOLDER}/*
| |
Add util function to reset existing git directory to like-clone state | reset_repo() {
repo_dir=$1
branch=$2
git -C "${repo_dir}" fetch --prune
git -C "${repo_dir}" reset --hard
git -C "${repo_dir}" clean -d --force
# Either create branch, or reset existing branch to origin state
git -C "${repo_dir}" checkout -B "${branch}" "origin/${branch}"
# Delete all local branches except for current branch
if git -C "${repo_dir}" branch | grep -v "^*"; then
git -C "${repo_dir}" branch | grep -v "^*" | xargs git -C "${repo_dir}" branch -D
fi
}
| |
Add Atom instalation scripts with packages and configuration | #!/bin/sh
echo
echo "Installing Atom..."
# Add
sudo add-apt-repository ppa:webupd8team/atom
sudo apt-get update > /dev/null 2>&1
sudo apt-get install atom -y
echo
echo "Installing Atom Packages..."
apm install atom-beautify
apm install file-icons
apm install linter
apm install minimap
apm install outlander-ui
# Solarflare Syntax
echo
echo -n "Updating Config Files... "
ln -sf ~/projects/dotfiles/atom/config.cson ~/.atom/config.cson
ln -sf ~/projects/dotfiles/atom/keymap.cson ~/.atom/keymap.cson
ln -sf ~/projects/dotfiles/atom/snippets.cson ~/.atom/snippets.cson
echo "Done!"
echo
| |
Add script to run tests without CMake | #!/bin/sh
# Copyright 2017, Alexander Saprykin <xelfium@gmail.com>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
#===========================================================================
if [[ $# -ne 2 ]]; then
echo "Usage: run_tests.sh <testing_dir> <shared_library>"
echo "Where: <testing_dir> is a directory containing tests to run"
echo " <shared_library> is a path to the shared library to be tested"
exit 1
fi
total_counter=0
pass_counter=0
IFS=$'\n'
files=$(ls $1)
echo "Running tests..."
for file in $files
do
if [[ $file == *"_test"* ]]; then
test_name=${file%.*}
total_counter=$((total_counter + 1))
echo "[RUN ] $test_name"
if [[ $test_name == "plibraryloader_test" ]]; then
$(${file} $2 > /dev/null 2>&1)
else
$(${file} > /dev/null 2>&1)
fi
if [[ $? -ne 0 ]]; then
echo "[FAIL] *** Test failed: $test_name"
else
echo "[PASS] Test passed: $test_name"
pass_counter=$((pass_counter + 1))
fi
fi
done
echo "Tests passed: $pass_counter/$total_counter"
| |
Add script to build cargo on an ARM device | #!/bin/bash
# Tested on:
# - Fresh Debian "sid" chroot
# - Odroid XU
set -e
set -x
# Update library paths
ldconfig
# Verify that cargo and rust are working
cargo -V
rustc -v
# Fetch cargo
apt-get update -qq
apt-get install -qq git
git clone https://github.com/rust-lang/cargo
cd cargo
# Optionally checkout older commit
if [ ! -z $1 ]; then
git checkout $1
fi
# Get information about HEAD
HEAD_HASH=$(git rev-parse --short HEAD)
HEAD_DATE=$(date -d @$(git show -s --format=%ct HEAD) +'%Y-%m-%d')
TARBALL=cargo-${HEAD_DATE}-${HEAD_HASH}-arm-unknown-linux-gnueabihf
# Cargo build dependencies
apt-get install -qq cmake file libssl-dev pkg-config python wget
# FIXME (upstream must update lockfile) locked dependencies don't build on ARM
rm Cargo.lock
# Build cargo
./configure --enable-nightly --local-cargo=/usr/local/bin/cargo --prefix=/
make
#make test
make distcheck
DESTDIR=/dist make install
# Ship it
cd /dist
tar czf ../${TARBALL}.tar.gz .
cd ..
CARGO_HASH=$(sha1sum ${TARBALL}.tar.gz | cut -f 1 -d ' ')
mv ${TARBALL}.tar.gz ${TARBALL}-${CARGO_HASH}.tar.gz
| |
Add SublimeText symbolic link to subl | #!/bin/bash
###############################################################################
# SublimeText Command Line Interface #
# https://www.sublimetext.com/docs/command_line.html #
###############################################################################
# shellcheck disable=SC1091
cd "$(dirname "${BASH_SOURCE[0]}")" &&
. "../scripts/utils.sh"
print_sub_heading "SublimeText Command Line Interface (subl)"
# On Linux
if [ "$(uname -s)" = "Linux" ]; then
ln -s /opt/sublime_text/sublime_text /usr/local/bin/subl
fi
# On Mac
if [ "$(uname -s)" = "Darwin" ]; then
ln -sv "/Applications/Sublime Text.app/Contents/SharedSupport/bin/subl" /usr/local/bin/subl
fi
if test "$(command -v subl)"; then
print_success "Subl version: $(subl --version)"
fi
| |
Add script to generate release | #!/bin/bash
ENAME=$0
usage()
{
cat << EOF
Usage: $ENAME OPTION
-x Clean git repository before preparing release
EOF
}
while getopts "x" opt; do
case "$opt" in
x )
git clean -ffdx
;;
? )
usage
exit 1
;;
esac
done
logfile=release.$(date +%y-%m-%d-%T).log
echo "Logging to ${logfile}"
# TODO: Handle tagging?
# Ensure these all have a similar and sane timestamp to
# prevent autotools from trying to rebuild portions wrongly
touch configure.ac aclocal.m4 configure Makefile.am Makefile.in
if [ ! -e ./configure ]; then
echo "Configure missing. Did you bootstrap?"
fi
config_opts=""
if ./configure --help | grep -- --enable-maintainer-mode; then
config_opts+=" --enable-maintainer_mode";
fi
echo "Running configure with options '${config_opts}'"
if ! ./configure $config_opts >> ${logfile} 2>&1; then
echo "Configuration failed. Aborting"
exit 1
fi
if [ ! -e Makefile ]; then
echo "Makefile missing. Aborting"
exit 1
fi
echo "Running make distcheck"
make distcheck >> ${logfile} 2>&1
# Extract out the name of the tarball from the log.
# There is probably a saner method to do this.
tarballs=$(awk '
/^=+$/ && doprint == 1 { exit 0 }
doprint == 1 { print $0 }
$0 ~ /archives ready for distribution/ { doprint = 1 }
' ${logfile})
if [ "x${tarballs}" == "x" ]; then
echo "Failed to build and verify tarballs"
exit 1
fi
echo "Found tarballs: ${tarballs}"
# Generate some popular checksums for the tarball
for tarball in ${tarballs}; do
for sum in sha256 md5; do
echo "Generating ${tarball}.${sum}"
${sum}sum ${tarball} > ${tarball}.${sum}
done
done
# TODO: Support signing these releases
exit 0
| |
Add Carthage lipo build workaround script to aid in local dev | #!/usr/bin/env bash
# Copied from here: https://github.com/Carthage/Carthage/issues/3019#issuecomment-665136323
# carthage.sh
# Usage example: ./carthage.sh build --platform iOS
set -euo pipefail
xcconfig=$(mktemp /tmp/static.xcconfig.XXXXXX)
trap 'rm -f "$xcconfig"' INT TERM HUP EXIT
# For Xcode 12 make sure EXCLUDED_ARCHS is set to arm architectures otherwise
# the build will fail on lipo due to duplicate architectures.
echo 'EXCLUDED_ARCHS__EFFECTIVE_PLATFORM_SUFFIX_simulator__NATIVE_ARCH_64_BIT_x86_64__XCODE_1200 = arm64 arm64e armv7 armv7s armv6 armv8' >> $xcconfig
echo 'EXCLUDED_ARCHS = $(inherited) $(EXCLUDED_ARCHS__EFFECTIVE_PLATFORM_SUFFIX_$(EFFECTIVE_PLATFORM_SUFFIX)__NATIVE_ARCH_64_BIT_$(NATIVE_ARCH_64_BIT)__XCODE_$(XCODE_VERSION_MAJOR))' >> $xcconfig
export XCODE_XCCONFIG_FILE="$xcconfig"
carthage "$@"
| |
Add a script to generate server stubs | #!/usr/bin/env bash
set -e
module=$1
output=$2
self=$(basename $0)
function help() {
echo "Usage: $self <json-spec-location-filename (should be under sockshop/openapi directory)> <server-code-output-location>"
}
if [[ "$module" == "" ]] || [[ "$output" == "" ]] ; then
help
exit 1
fi
fileUrl=https://raw.githubusercontent.com/weaveworks/microservices-demo/"$(git rev-parse HEAD)"/sockshop/openapi/"$module"
if ! out=$(curl -sf $fileUrl); then
echo "Couldn't get the file at $fileUrl"
exit 1;
fi
link=$(curl -sf -XPOST -H "content-type:application/json" -d "{\"swaggerUrl\":\"$fileUrl\"}" https://generator.swagger.io/api/gen/servers/go | jq .link | sed s/\"//g)
if [ "$link" == "" ]; then
echo "Download link is broken. This error needs to improve."
exit 1
fi
mkdir -p $output
curl -sf $link > $output/go-server.tar && tar -xvf $output/go-server.tar -C $output/ && rm -f $output/go-server.tar
| |
Add docker test runner script | #!/usr/bin/env bash
cd /opt/php
php -r "copy('https://getcomposer.org/installer', 'composer-setup.php');"
php -r "if (hash_file('SHA384', 'composer-setup.php') === '669656bab3166a7aff8a7506b8cb2d1c292f042046c5a994c43155c0be6190fa0355160742ab2e1c88d40d5be660b410') { echo 'Installer verified'; } else { echo 'Installer corrupt'; unlink('composer-setup.php'); } echo PHP_EOL;"
php composer-setup.php
php -r "unlink('composer-setup.php');"
php composer.phar install --prefer-dist --no-interaction
ls -la ./vendor/bin
if ./vendor/bin/phpunit; then
echo "Tests passed successfully!"
exit 0
else
echo "Tests failed :("
exit 1
fi | |
Add export utility for Nexuiz, now check-proj handles it. | #!/bin/sh
if [ ! -d qcsrc ]; then
echo "failed to find qcsrc directory in $(pwd), please run this script"
echo "from nexuiz data directory"
exit 1
else
# ensure this is actually a xonotic repo
pushd qcsrc > /dev/null
if [ ! -d client -o ! -d common -o ! -d menu -o ! -d server -o ! -d warpzonelib ]; then
echo "this doesnt look like a nexuiz source tree, aborting"
popd > /dev/null
exit 1
fi
fi
echo -n "removing redundant files ..."
rm -f nexuiz.ncb
rm -f nexuiz.sln
rm -f nexuiz.suo
rm -f nexuiz.vcproj
rm -f nexuiz.vcproj.user
echo "complete"
echo -n "creating projects ..."
echo "client" > dirs
echo "server" >> dirs
echo "menu" >> dirs
echo "complete"
echo -n "creating zip archive ..."
zip -r -9 ../nexuiz.zip * > /dev/null
echo "complete"
popd > /dev/null
echo "finished!"
| |
Add script for mount serial USB device | sudo modprobe ftdi-sio
sudo chmod 666 /sys/bus/usb-serial/drivers/ftdi_sio/new_id
echo 165c 0008 > /sys/bus/usb-serial/drivers/ftdi_sio/new_id
echo "ttyUSB{x} created : {x} is number."
| |
Prepare scripts to start and stop the Java GCD service | #!/bin/sh
function wait_for_gcd {
try_count=0
max_retries=30
while [ -z "$(echo '\n' | curl -v telnet://localhost:9998 2>&1 | grep 'OK')" ]
do
echo "GCD not started yet ..."
try_count=$((try_count+1))
if [ $try_count -gt $max_retries ]; then
echo "GCD failed to start in time."
kill -9 $GCD_PID
return 1
fi
echo "try_count ${try_count}"
sleep 2
done
echo "GCD started successfully."
return 0
}
JAR_PATH=/data/src/java/backend-services/capabilities-directory/target/deploy
function startGcd {
# At the point the db should be started
echo 'start GCD'
if [ -d "/data/logs" ]
then
java -Dlog4j.configuration="file:${JAR_PATH}/log4j.properties" -jar ${JAR_PATH}/capabilities-directory-jar-with-dependencies.jar 2>&1 > /data/logs/gcd.log &
else
java -Dlog4j.configuration="file:${JAR_PATH}/log4j.properties" -jar ${JAR_PATH}/capabilities-directory-jar-with-dependencies.jar &
fi
GCD_PID=$!
wait_for_gcd
return $?
}
function stopGcd
{
echo 'stop GCD'
# The app will shutdown if a network connection is attempted on localhost:9999
(
set +e
# curl returns error because the server closes the connection. We do not need the ret val.
timeout 1 curl telnet://127.0.0.1:9999
exit 0
)
wait $GCD_PID
}
| |
Add identity to each session | if [[ $- = *i* ]]; then
declare -i __agent_run_state=$(ssh-add -l >| /dev/null 2>&1; echo $?)
if [ "$SSH_AUTH_SOCK" -a $__agent_run_state -eq 1 ]; then
ssh-add -A
fi
unset __agent_run_state
fi
| |
Add test for wordpress example | #!/bin/bash
set -xe
export ENV_NAME="solar-example"
export SLAVES_COUNT=1
export DEPLOY_TIMEOUT=300
export TEST_SCRIPT="/bin/bash /vagrant/solar-resources/examples/wordpress/run.sh"
./utils/jenkins/run.sh
| |
Add script to check if JUnit & Maven are playing nice... | #!/bin/bash
set -e -u
#
# Dear future me or Galago contributor:
#
# When you decide to give an interesting name to your test,
# please run this script! It will alert you to the fact that
# maven will *only* run tests that are in files with *Test.java
# at the end.
#
TEST_FILES=`grep "@Test" contrib/src/* core/src/* tupleflow/src/* tupleflow-typebuilder/src/* -Rl`
for FILE_NAME in $TEST_FILES; do
[[ $FILE_NAME == *Test.java ]] || echo "Maven will skip $FILE_NAME silently! Please rename!"
done
| |
Add a script to test examples | #!/bin/bash
set -Ceu
python examples/ale/train_dqn_ale.py pong --steps 100 --replay-start-size 50
python examples/ale/train_a3c_ale.py 4 pong --steps 100
python examples/ale/train_nsq_ale.py 4 pong --steps 100
python examples/gym/train_dqn_gym.py --steps 100 --replay-start-size 50
python examples/gym/train_a3c_gym.py 4 --steps 100
python examples/gym/train_ddpg_gym.py --steps 100 --replay-start-size 50 --minibatch-size 32
| |
Add a test for aliases to AUTO_CD directories. | #!/usr/bin/env zsh
# -------------------------------------------------------------------------------------------------
# Copyright (c) 2020 zsh-syntax-highlighting contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted
# provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this list of conditions
# and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of
# conditions and the following disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the zsh-syntax-highlighting contributors nor the names of its contributors
# may be used to endorse or promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -------------------------------------------------------------------------------------------------
# -*- mode: zsh; sh-indentation: 2; indent-tabs-mode: nil; sh-basic-offset: 2; -*-
# vim: ft=zsh sw=2 ts=2 et
# -------------------------------------------------------------------------------------------------
setopt autocd
alias x=/
BUFFER=$'x'
expected_region_highlight=(
'1 1 alias' # x
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.