Instruction
stringlengths
14
778
input_code
stringlengths
0
4.24k
output_code
stringlengths
1
5.44k
Add script for downloading and reverting dev bibs
#!/bin/bash BIBDUMP=/tmp/lddb-dev-bib-records # Download a temporary dump of (embellished) instance records. # Take a couple of minutes. if [[ ! -d $BIBDUMP ]]; then mkdir -p $BIBDUMP pushd $BIBDUMP curl -s -HAccept:application/ld+json 'https://libris-dev.kb.se/find.jsonld?@type=Instance&_limit=1000' | python -c ' import sys, json data = json.load(sys.stdin) for item in data["items"]: url = item["@id"].rsplit("#", 1)[0] xlid = url.rsplit("/", 1)[-1] print("curl -s -HAccept:application/ld+json -o {} {}".format(xlid, url)) ' | bash popd fi # Revert all in the dump (takes ~30 s). # Writes to stdout (redirect this to a file of your choice to diff). pushd $(dirname $0)/../../whelk-core ../gradlew runMarcFrame -Dargs="revert $(find $BIBDUMP -name '*.jsonld')" 2>/tmp/stderr.txt popd
Add common cloud VM init script.
#!/bin/bash cd $HOME . mender-qa/scripts/initialize-build-host.sh sudo curl -L https://github.com/docker/compose/releases/download/1.16.1/docker-compose-`uname -s`-`uname -m` > docker-compose sudo cp docker-compose /usr/bin/docker-compose sudo chmod +x /usr/bin/docker-compose sudo chown jenkins:jenkins /usr/bin/docker-compose sudo apt-get -qy update echo "deb http://apt.dockerproject.org/repo debian-jessie main" | sudo tee -a /etc/apt/sources.list.d/docker.list curl -sL https://deb.nodesource.com/setup_4.x | sudo -E bash - sudo apt-get -qy update sudo apt-get -qy --force-yes install git autoconf automake build-essential diffstat gawk chrpath libsdl1.2-dev e2tools nfs-client s3cmd docker-engine psmisc screen libssl-dev python-dev libxml2-dev libxslt-dev libffi-dev nodejs libyaml-dev sysbench texinfo default-jre-headless pkg-config zlib1g-dev libaio-dev libbluetooth-dev libbrlapi-dev libbz2-dev libglib2.0-dev libfdt-dev libpixman-1-dev zlib1g-dev jq liblzo2-dev device-tree-compiler sudo cp /sbin/debugfs /usr/bin/ || echo "debugfs not in /sbin/" wget https://storage.googleapis.com/golang/go1.8.linux-amd64.tar.gz gunzip -c go1.8.linux-amd64.tar.gz | (cd /usr/local && sudo tar x) sudo ln -sf ../go/bin/go /usr/local/bin/go sudo ln -sf ../go/bin/godoc /usr/local/bin/godoc sudo ln -sf ../go/bin/gofmt /usr/local/bin/gofmt sudo service docker restart sudo npm install -g gulp sudo npm install mocha selenium-webdriver@3.0.0-beta-2 saucelabs # Python 2 pip sudo easy_install pip sudo pip2 install requests --upgrade sudo pip2 install pytest==3.2.5 sudo pip2 install filelock --upgrade sudo pip2 install pytest-xdist --upgrade sudo pip2 install pytest-html --upgrade sudo pip2 install fabric --upgrade sudo pip2 install psutil --upgrade sudo pip2 install boto3 --upgrade # Python 3 pip sudo apt-get -qy --force-yes install python3-pip sudo pip3 install pyyaml --upgrade
Add a script to simplify testing documentation changes
#!/bin/bash # # Copyright (C) 2019 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This script builds Gerrit's documentation and shows the current state in # Chrome. Specific pages (e.g. rest-api-changes.txt) including anchors can be # passed as parameter to jump directly to them. SCRIPT_DIR=$(dirname -- "$(readlink -f -- "$BASH_SOURCE")") GERRIT_CODE_DIR="$SCRIPT_DIR/.." cd "$GERRIT_CODE_DIR" bazel build Documentation:searchfree if [ $? -ne 0 ] then echo "Building the documentation failed. Stopping." exit 1 fi TMP_DOCS_DIR=/tmp/gerrit_docs rm -rf "$TMP_DOCS_DIR" unzip bazel-bin/Documentation/searchfree.zip -d "$TMP_DOCS_DIR" </dev/null >/dev/null 2>&1 & disown if [ $? -ne 0 ] then echo "Unzipping the documentation to $TMP_DOCS_DIR failed. Stopping." exit 1 fi if [ "$#" -lt 1 ] then FILE_NAME="index.html" else FILE_NAME="$1" fi DOC_FILE_NAME="${FILE_NAME/.txt/.html}" google-chrome "file:///$TMP_DOCS_DIR/Documentation/$DOC_FILE_NAME" </dev/null >/dev/null 2>&1 & disown
Add script to create cps-tools tarball
#!/bin/sh -x # Create cps-tools tarball cp -a conpaas-services/bin cps-tools cat <<EOF > cps-tools/README To use the cps-tools (on debian/ubuntu): $ apt-get install python python-pycurl $ export PYTHONPATH=/path/to/cps-tools $ export CONPAAS_CERTS_DIR=/path/to/conpaas/certs $ /path/to/cps-tools/cpsclient.xxx http://ip:port command EOF cp -a conpaas-services/src/conpaas cps-tools/ # Removing jars as we do not need them for the command line tools find cps-tools -type f -name \*.jar | xargs rm # Removing .svn dirs find cps-tools -type d -name ".svn" | xargs rm -rf tar czf cps-tools.tar.gz cps-tools rm -rf cps-tools echo "cps-tools.tar.gz created"
Add tools to create zip
#/bin/sh tag=`git describe --abbrev=0 --tags` zipPath="tools/packages/kreds-$tag.zip" swift build -c release -Xswiftc -static-stdlib zip "$zipPath" -j "./.build/release/kreds" "./License"
Add coffeescript aliases: cf, cfc, cfp
#!/bin/zsh # compile a string of coffeescript and print to output cf () { coffee -peb $1 } # compile & copy to clipboard cfc () { cf $1 | tail -n +2 | pbcopy } # compile from pasteboard & print alias cfp='coffeeMe "$(pbpaste)"'
Add script to upload SVG files to S3
#!/bin/bash # set the file based on the first argument and the environment based on the second argument file=$1 env=$2 # set the name of the file to be copied and the S3 path urs=$(echo "$file" | grep -o 'URS0[^[:blank:]]*') path=$env/${urs:0:3}/${urs:3:2}/${urs:5:2}/${urs:7:2}/${urs:9:2}/ # name of the S3 bucket bucket_name="ebi-rnacentral" # get the current date to calculate the signature and also to pass to S3 date=`date +'%a, %d %b %Y %H:%M:%S %z'` # calculate the signature to be sent as a header content_type="application/octet-stream" string_to_sign="PUT\n\n$content_type\n${date}\n/${bucket_name}/${path}${urs}" signature=$(echo -en "${string_to_sign}" | openssl sha1 -hmac "${SECRET}" -binary | base64) # upload file echo "Adding ${urs} to S3" curl -X PUT -T "${file}" \ -H "Host: s3.embassy.ebi.ac.uk/${bucket_name}" \ -H "Date: $date" \ -H "Content-Type: $content_type" \ -H "Authorization: AWS ${S3_KEY}:${signature}" \ "https://s3.embassy.ebi.ac.uk/${bucket_name}/${path}${urs}"
Change to use relatively new pacakges. PART1
#!/bin/sh # Install required packages in Ubuntu: # 1. SOIL(www.lonesock.net/soil.html) -- Load images for OpenGL Texture # 2. glm library -- C++ library for OpenGL GLSL type-based mathematics # 3. assimp -- 3D model import library supporting kinds of model format # 4. glew -- OpenGL Extension Wrangler to get specific OpenGL abilities sudo apt-get install libsoil-dev libglm-dev libassimp-dev libglew-dev # If `apt-cache search libglfw3-dev` returns no results, go to `glfw.org` for download. #sudo apt-get install libglfw3-dev
Add a rough script to plot frame times
#!/usr/bin/env bash # # This script can be used to plot frame time layer output with gnuplot. A PNG is # generated containing a CDF of frame times in milliseconds. Any number of input # files can be specified and by default the output is written to frametime.png. # # Usage: # plot_fram_times.sh frame_time_a.txt frame_time_b.txt set -e XTICS=${XTICS:-5} YTICS=${YTICS:-0.1} XRANGE=${XRANGE:-50} OUTFILE=${OUTFILE:-frametime.png} plot="plot " comma="" for cur in $@ do lines=$(wc -l "${cur?}" | awk '{print $1}') plot="${plot?}${comma?}'${cur?}' u (\$1/1000000):(1/${lines?}.) smooth cumulative w l t '${cur?}'" comma=", " done echo -e 'set term png size 1920,1080;' \ "set out '${OUTFILE?}';" \ 'set key autotitle columnhead;' \ "set ytics ${YTICS?};" \ "set xtics ${XTICS?};" \ 'set xlabel "Frame Time (ms)";' \ 'set ylabel "Percentile";' \ "set xrange [0:${XRANGE?}];" \ "${plot?}" \ | gnuplot
Add a script which does a build of versioned documentation, used for generating docs.mollyproject.org
#!/bin/bash function build_documentation { tag=$1 cd docs rm -rf build make html rm -rf $OUTPUT_DIR/$tag/ mkdir $OUTPUT_DIR/$tag/ cp -rf build/html/* $OUTPUT_DIR/$tag/ } REPO=git://github.com/mollyproject/mollyproject.git OUTPUT_DIR=$1 if [ -z "$OUTPUT_DIR" ] ; then echo "$0 <path-to-doc-root>" exit; fi BUILD_DIR=`mktemp -d` git clone $REPO $BUILD_DIR cd $BUILD_DIR build_documentation dev for tag in `git tag`; do build_documentation $tag done rm -rf $BUILD_DIR
Disable xdebug when running composer
#!/usr/bin/env bash # ----------------------------------------------------------------------------- # Info: # Miroslav Vidovic # my_composer.sh # 24.08.2016.-16:26:46 # ----------------------------------------------------------------------------- # Description: # Run composer with xdebug ini disabled. # Usage: # my_composer.sh some_composer_command # Credit: # http://stackoverflow.com/questions/31083195/disabling-xdebug-when-running-composer # ----------------------------------------------------------------------------- # Script: php_no_xdebug() { local temporaryPath="$(mktemp -t php.XXXX).ini" # Using awk to ensure that files ending without newlines do not lead to configuration error php -i | grep "\.ini" | grep -o -e '\(/[a-z0-9._-]\+\)\+\.ini' | grep -v xdebug | xargs awk 'FNR==1{print ""}1' > "$temporaryPath" php -n -c "$temporaryPath" "$@" rm -f "$temporaryPath" } php_no_xdebug /usr/bin/composer $@ exit 0
Add script that replaces faulty relANNIS and SaltXML pepper module files in product zips
#!/bin/bash ( mkdir de.uni_jena.iaa.linktype.atomic.repository/target/products/plugins cd atomic-custom-files cp -r ./* ../de.uni_jena.iaa.linktype.atomic.repository/target/products/plugins/ cd .. cd de.uni_jena.iaa.linktype.atomic.repository/target/products/ for file in ./*.zip; do echo "Working on" ${file##*/} "now." zip -r ${file##*/} ./plugins/ #zip -r ${file##*/} test/ #mv ${file##*/} unzip/ done ) exit 0 #You can do the following, when your current directory is parent_directory: #for d in [0-9][0-9][0-9] #do # ( cd $d && your-command-here ) #done #The ( and ) create a subshell, so the current directory isn't changed in the main script.
Add install script for AIF
#!/bin/sh echo -e "\e[31mWarning: \e[0mThis script should \e[4monly\e[0m be run from an Official Arch Installation ISO." pacman -Sy pacman -S --noconfirm unzip cd / wget https://github.com/CarlDuff/aif/archive/master.zip unzip master.zip rm master.zip cd aif-master sh aif
Add a test script to incrementally self host.
#! /bin/sh # A script to self host whatever object files that we can as a regression test. # One day it can be replaced with: # make install && make clean && make CC=scc install set -e set -u unset SCCEXECPATH selfhostedobj=\ "lib/debug.o lib/die.o lib/newitem.o lib/xstrdup.o lib/xmalloc.o lib/xcalloc.o lib/xrealloc.o" #driver/posix/scc.o #cc1/error.o #cc1/stmt.o #cc1/init.o #cc1/arch/qbe/arch.o #cc1/fold.o #cc1/types.o #cc1/builtin.o #cc1/cpp.o #cc1/symbol.o #cc1/lex.o #cc1/decl.o #cc1/main.o #cc1/code.o #cc1/expr.o #cc2/arch/qbe/cgen.o #cc2/arch/qbe/types.o #cc2/arch/qbe/optm.o #cc2/arch/qbe/code.o #cc2/peep.o #cc2/parser.o #cc2/node.o #cc2/symbol.o #cc2/optm.o #cc2/main.o #cc2/code.o" if ! test -d ./cc1 then echo "run this script from the root of the scc repository." exit 1 fi boostrapdir="$(pwd)/_bootstrap" rm -rf "$boostrapdir" mkdir "$boostrapdir" make clean make PREFIX="$boostrapdir" install export PATH="$boostrapdir/bin:$PATH" rm bin/scc bin/cc* for o in $selfhostedobj do rm $o done make CC=scc tests
Add script to reproduce test kraken2 database
#!/bin/bash # This script produces a small kraken2 database containing only a ~1kb portion each of a salmonella and ecoli genome # It requires kraken2, and entrez-direct (available on bioconda) kraken2-build --db test_db --download_taxonomy mv test_db/taxonomy/nucl_gb.accession2taxid test_db/taxonomy/nucl_gb.accession2taxid_full grep -e 'NC_003198.1' -e 'NC_011750.1' test_db/taxonomy/nucl_gb.accession2taxid_full > test_db/taxonomy/nucl_gb.accession2taxid esearch -db nucleotide -query "NC_003198.1" | efetch -format fasta > NC_003198.1.fasta esearch -db nucleotide -query "NC_011750.1" | efetch -format fasta > NC_011750.1.fasta head -n 14 NC_003198.1.fasta > NC_003198.1_1kb.fasta head -n 14 NC_011750.1.fasta > NC_011750.1_1kb.fasta kraken2-build --db test_db --add-to-library NC_003198.1_1kb.fasta kraken2-build --db test_db --add-to-library NC_011750.1_1kb.fasta kraken2-build --db test_db --build
Add a script to delete and wait for namespace termination
#!/bin/bash set -e set -u set -x if [[ $# < 1 ]]; then echo "Deletes all Active namespaces matchting regex, and waits for their termination" echo "Usage: $0 namespace_regex echo "eg: $0 density exit fi regex="$1" poll_delay_secs=5 # Delete all Active namespaces that match the regex for ns in $(kubectl get namespaces | grep ${regex} | grep Active | awk '{print $1}'); do kubectl delete namespace $ns; done # The namepace is going to sit around in Terminating state until k8s has removed all resources # therein, including pods, rcs, events, etc. while (kubectl get namespaces | grep ${regex}); do sleep ${poll_delay_secs}; date -u done
Put in doc updater script that pull in readonly stuff
#!/bin/sh if [ ! -d docs ];then mkdir docs fi cd docs if [ ! -d .git ];then git init git remote add git://github.com/nanonyme/SimpleLoop.wiki.git fi git pull origin master cd ..
Add a tool to cleann app config & cache
#!/bin/sh set -eux APP_ID="$1" rm -rf ~/.local/share/webkitgtk ~/.cache/webkitgtk rm -rf ~/.local/share/webkit ~/.cache/webkit rm -rf ~/.cache/nuvolaplayer3/webcache ~/.cache/nuvolaplayer3/WebKitCache rm -rf ~/.local/share/nuvolaplayer3/apps_data/$APP_ID rm -rf ~/.cache/nuvolaplayer3/apps_data/$APP_ID rm -rf ~/.config/nuvolaplayer3/apps_data/$APP_ID
Add a script to add git branches for each channel.
#!/bin/sh : ${NIXOS_CHANNELS:=https://nixos.org/channels/} # Find the name of all channels which are listed in the directory. for channelName in : $(curl -s $NIXOS_CHANNELS | sed -n '/folder/ { s,.*href=",,; s,/".*,,; p }'); do test "$channelName" = : && continue; # Do not follow redirections, such that we can extract the # short-changeset from the name of the directory where we are # redirected to. sha1=$(curl -s --max-redirs 0 $NIXOS_CHANNELS$channelName | sed -n '/has moved/ { s,.*\.\([a-z0-9A-Z]*\)".*,\1,; p; }') test -z "$sha1" -o -z "$channelName" && continue; # Update the local channels/* branches to be in-sync with the # channel references. git update-ref refs/heads/channels/$channelName $sha1 done
Add script to clean up generated CSS files and empty bootstrap4 directories
#!/bin/bash # delete untracked CSS files in the given paths git ls-files --others src/{Site,angular-app}/**/*.css | xargs rm # Remove empty bootstrap4 directories find . -type d -empty -name bootstrap4 -delete
Add script to push images to Docker Hub
#!/bin/bash BASEDIR=$(pwd) TAG=$(git describe --exact-match) if [[ ! ${TAG} =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then echo "The tag ${TAG} isn't a valid version tag." exit fi VERSION=${TAG#v} cd ${BASEDIR}/authorization-server/docker docker build -t "openmhealth/omh-dsu-authorization-server:latest" . docker build -t "openmhealth/omh-dsu-authorization-server:${VERSION}" . docker push "openmhealth/omh-dsu-authorization-server:latest" docker push "openmhealth/omh-dsu-authorization-server:${VERSION}" cd ${BASEDIR}/resource-server/docker docker build -t "openmhealth/omh-dsu-resource-server:latest" . docker build -t "openmhealth/omh-dsu-resource-server:${VERSION}" . docker push "openmhealth/omh-dsu-resource-server:latest" docker push "openmhealth/omh-dsu-resource-server:${VERSION}"
Add a script to change your UID on Mac OS X
#!/bin/sh ################################################################################ if [ `id -u` -ne 0 -o $# -ne 2 ]; then cat <<EOF Usage: change-uid.sh username new-UID This script will change the user ID of the given user to the UID given in new-UID. For example: change-uid.sh pjones 1000 Will change the pjones user to UID 1000. Tested on: - Mac OS X Lion 10.7.3 NOTE: You must run this script as root. Preferably by either logging in directly as root or getting a root shell via 'sudo su -'. EOF exit 1 fi ################################################################################ die () { echo "$@" > /dev/stderr exit 1 } ################################################################################ user_name=$1 new_uid=$2 old_uid=`id -u $user_name` [ $? -eq 0 ] || exit 1 ################################################################################ local_fs=`df -l | awk 'NR > 1 {print $6}'` for fs in $local_fs; do if [ ! -d $fs ]; then die "bad file system name, maybe it has a space in its name: $fs" fi done ################################################################################ echo "==> changing $user_name from $old_uid to $new_uid" old_unique_id=`dscl . -read /Users/$user_name UniqueID | awk '{print $2}'` if [ "$old_uid" -ne "$old_unique_id" ]; then echo "whoa, id and dscl don't agree on the UID" > /dev/stderr echo "id says $old_uid and dscl says $old_unique_id" > /dev/stderr exit 1 fi dscl . -change /Users/$user_name UniqueID $old_uid $new_uid || \ die "failed to change UID using dscl" ################################################################################ echo "==> updating ownership on local file systems" for fs in $local_fs; do echo "===> updating $fs" find -x $fs -user $old_uid -print0 | xargs -0 chown -h $new_uid if [ -r $fs/.Trashes/$old_uid ]; then echo "===> renaming Finder Trash for $fs" mv $fs/.Trashes/$old_uid $fs/.Trashes/$new_uid fi find -x $fs -name "*.$old_uid" -type f -print0 | \ xargs -0 -n 1 sh -c 'echo $0 `echo $0|sed s/\\.'$old_uid'/.'$new_uid'/`' done ################################################################################ echo "==> Done. You may want to reboot."
Add a script to take all the route/route6 objects and put them in mongoDB.
#! /bin/sh # fetch ripe.db.route wget "ftp://ftp.ripe.net/ripe/dbase/split/ripe.db.route.gz" zcat ripe.db.route.gz | awk 'BEGIN{ORS=""} $1 == "route:" {split($2, route, "/");print("db.routes.insert({ route : \"" route[1] "\", mask : " route[2] " , origin : ");} $1 == "origin:" {print("\"" $2 "\"});\n");}' >> db.routes.js # fetch ripe.db.route6 wget "ftp://ftp.ripe.net/ripe/dbase/split/ripe.db.route6.gz" zcat ripe.db.route6.gz | awk 'BEGIN{ORS=""} $1 == "route6:" {split($2, route, "/");print("db.routes6.insert ({ route : \"" route[1] "\", mask : " route[2] " , origin : ");} $1 == "origin:" {print("\"" $2 "\"});\n");}' >> db.routes6.js mongo routes --eval "db.routes.drop()" mongo routes < db.routes.js mongo routes --eval "db.routes6.drop()" mongo routes < db.routes6.js # Delete the files we just created rm db.routes.js rm db.routes6.js
Add bootstrap script for windows
#!/usr/bin/env bash SCRIPT_DIR=$(dirname "$0") # Assumes you have pip, wget mkdir atsy-test cd atsy-test # Clone the atsy repo # Manually git clone atsy #git clone https://github.com/EricRahm/atsy.git # Install nginx for windows NGINX_ZIP=nginx-1.11.10.zip wget http://nginx.org/download/$NGINX_ZIP unzip $NGINX_ZIP rm $NGINX_ZIP # Get the latest chromedriver CHROMEDRIVER_ZIP=chromedriver_win32.zip CHROMEDRIVER_VERSION=$(wget -qO- http://chromedriver.storage.googleapis.com/LATEST_RELEASE) wget http://chromedriver.storage.googleapis.com/$CHROMEDRIVER_VERSION/$CHROMEDRIVER_ZIP unzip $CHROMEDRIVER_ZIP rm $CHROMEDRIVER_ZIP # Download the tp5 pageset and setup a webroot mkdir nginx_root cd nginx_root mkdir html mkdir logs mkdir conf # Install tp5 wget http://people.mozilla.org/~jmaher/taloszips/zips/tp5n.zip unzip -q tp5n.zip -d html/ mv html/tp5n/ html/tp5 rm tp5n.zip # Add the nginx config cp "../../$SCRIPT_DIR/comp_analysis_nginx.conf" conf/nginx.conf cd .. ######################### # Setup python pacakges # ######################### # Install virtualenv if necessary pip install virtualenv # Setup a virtualenv to work in virtualenv venv source venv/Scripts/activate # For installing Firefox nightly pip install mozdownload mozinstall # Setup atsy cd "../$SCRIPT_DIR/.." python setup.py install cd - deactivate echo "Setup finished!"
Add script to regenerate test-certificate fixtures
#!/usr/bin/env sh set -eu # This script is used to generate the test-certificates in the notary-server and # evil-notary-server directories. Run this script to update the certificates if # they expire. GO111MODULE=off go get -u github.com/dmcgowan/quicktls cd notary quicktls -org=Docker -with-san notary-server notaryserver evil-notary-server evilnotaryserver localhost 127.0.0.1 cat ca.pem >> notary-server.cert mv ca.pem root-ca.cert cp notary-server.cert notary-server.key root-ca.cert ../notary-evil/
Add scipt to disable Adobe CC bloat
#!/usr/bin/env bash # Prevent useless Adobe CC daemons from launching on startup. sudo mv /Library/LaunchAgents/com.adobe.AAM.Updater-1.0.plist{,.disabled} sudo mv /Library/LaunchAgents/com.adobe.AdobeCreativeCloud.plist{,.disabled} sudo mv /Library/LaunchDaemons/com.adobe.adobeupdatedaemon.plist{,.disabled} sudo mv /Library/LaunchDaemons/com.adobe.agsservice.plist{,.disabled} # Prevent the Adobe CC updater from launching on startup. We replace the file # with a dummy file to prevent CC from recreating it. rm ~/Library/LaunchAgents/com.adobe.AAM.Updater-1.0.plist defaults write ~/Library/LaunchAgents/com.adobe.AAM.Updater-1.0.plist Label -string "com.adobe.AAM.Scheduler-1.0"
Add example user-data script for private GitHub repositories
#!/bin/sh set -e -x # Essential Packages apt-get --yes --quiet update apt-get --yes --quiet install python python-pip python-dev git apt-get --yes --quiet install libxml2-dev libxslt1-dev lib32z1-dev # Peach: lxml apt-get --yes --quiet install s3cmd # Peach: userdata.py # Add GitHub as a known host ssh-keyscan github.com >> /root/.ssh/known_hosts # Add this key as deploy key to the GitHub project # Command: ssh-keygen -t rsa -C "Deploy key for Peach" cat << EOF > /root/.ssh/id_rsa.peach.pub INSERT_PUBLIC_KEY_HERE EOF cat << EOF > /root/.ssh/id_rsa.peach INSERT_PRIVATE_KEY_HERE EOF # Add this key as deploy key to the GitHub project # Command: ssh-keygen -t rsa -C "Deploy key for Pits" cat << EOF > /root/.ssh/id_rsa.pits.pub INSERT_PUBLIC_KEY_HERE EOF cat << EOF > /root/.ssh/id_rsa.pits INSERT_PRIVATE_KEY_HERE EOF # Setup Key Indentities cat << EOF > /root/.ssh/config Host * StrictHostKeyChecking no Host peach github.com Hostname github.com IdentityFile /root/.ssh/id_rsa.peach Host pits github.com Hostname github.com IdentityFile /root/.ssh/id_rsa.pits EOF # Set Key Permissions chmod 600 /root/.ssh/id_rsa.peach chmod 600 /root/.ssh/id_rsa.pits # Checkout Fuzzer git clone -v --depth 1 git@peach:MozillaSecurity/peach.git git clone -v --depth 1 git@pits:MozillaSecurity/pits.git peach/Pits cd peach sudo pip -q install -r requirements.txt python scripts/userdata.py -sync # Download Firefox wget --no-check-certificate -r --no-parent -A firefox-*.en-US.linux-x86_64-asan.tar.bz2 https://ftp.mozilla.org/pub/mozilla.org/firefox/tinderbox-builds/mozilla-inbound-linux64-asan/latest/ wget --no-check-certificate -r --no-parent -A firefox-*.txt https://ftp.mozilla.org/pub/mozilla.org/firefox/tinderbox-builds/mozilla-inbound-linux64-asan/latest/ cd ftp.mozilla.org/pub/mozilla.org/firefox/tinderbox-builds/mozilla-inbound-linux64-asan/latest/ tar xvfj *.tar.bz2 # Run FuzzingBot ./scripts/peachbot.py -data Resources # Stop Instance shutdown -h now
Add useful script to count failures on run. Useful when testing test suites fragility
function count_failures() { local count_failures_ERR local count_failures_SLEEP local count_failures_RUN local count_failures_COUNT case $SLEEP in "") count_failures_SLEEP=60 ;; [0-9] | [1-9][0-9] | [1-9][0-9][0-9] | [1-9][0-9][0-9][0-9]) count_failures_SLEEP=$SLEEP ;; esac count_failures_RUN=1 count_failures_COUNT=0 while [ 0 -eq 0 ]; do echo "count_failures: run #$count_failures_RUN (failures: $count_failures_COUNT) (`date`)" $@ count_failures_ERR=$? if [ $count_failures_ERR -gt 0 ]; then count_failures_COUNT=$(( $count_failures_COUNT + 1 )) fi sleep $count_failures_SLEEP count_failures_RUN=$(( $count_failures_RUN + 1 )) done }
Add script for generating BIRD IPv4 mesh config
#! /bin/bash # The bird config file path is different for Red Hat and Debian/Ubuntu. if [ -f /etc/bird.conf ]; then BIRD_CONF=/etc/bird.conf else BIRD_CONF=/etc/bird/bird.conf fi BIRD_CONF_TEMPLATE=/usr/share/calico/bird/calico-bird.conf.template BIRD_CONF_PEER_TEMPLATE=/usr/share/calico/bird/calico-bird-peer.conf.template # Require at least 3 arguments. [ $# -ge 3 ] || cat <<EOF Usage: $0 <my-ip-address> <as-number> <peer-ip-address> ... where <my-ip-address> is the external IP address of the local machine <as-number> is the BGP AS number that we should use each <peer-ip-address> is the IP address of another BGP speaker that the local BIRD should peer with. EOF [ $# -eq 3 ] || exit -1 # Name the arguments. my_ip_address=$1 shift as_number=$1 shift peer_ips="$@" # Generate peer-independent BIRD config. mkdir -p $(dirname $BIRD_CONF) sed -e " s/@MY_IP_ADDRESS@/$my_ip_address/; " < $BIRD_CONF_TEMPLATE > $BIRD_CONF # Generate peering config. for peer_ip in $peer_ips; do sed -e " s/@ID@/$peer_ip/; s/@DESCRIPTION@/Connection to $peer_ip/; s/@MY_IP_ADDRESS@/$my_ip_address/; s/@PEER_IP_ADDRESS@/$peer_ip/; s/@AS_NUMBER@/$as_number/; " < $BIRD_CONF_PEER_TEMPLATE >> $BIRD_CONF done echo BIRD configuration generated at $BIRD_CONF service bird restart echo BIRD restarted
Add remote copy without complex protocals by tar -C
# Usage: cpfromserver.sh $* <<< "-pPassword -i YourRSA.id_rsa yourserver@0.0.0.0" echo -n "ssh " read SSH_OPTIONS LOCAL_ROOT=~/remote REMOTE_TRANSFER="tar -C / -cf - $*" LOCAL_TRANSFER="tar -C $LOCAL_ROOT/ -xvf -" ssh $SSH_OPTIONS $REMOTE_TRANSFER | $LOCAL_TRANSFER
Add small script to format manpages different ways
#!/bin/bash if [ "X$TARGET" = "X" ]; then TARGET=/tmp/minion-pages fi mkdir $TARGET || true; for PAGE in mixminion.1 mixminionrc.5 mixminiond.conf.5 mixminiond.8; do echo $PAGE man2html $PAGE > $TARGET/$PAGE.html man ./$PAGE | perl -pe 's/.\x08//g;' > $TARGET/$PAGE.txt man -t ./$PAGE > $TARGET/$PAGE.ps ps2pdf $TARGET/$PAGE.ps $TARGET/$PAGE.pdf rm -f $TARGET/$PAGE.ps.gz gzip -9 $TARGET/$PAGE.ps done
Add a "run ui unittests" shell script to be later used by Kokoro CI.
#!/bin/bash # Copyright (C) 2018 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -eux SCRIPT_DIR="$(realpath "$(dirname "${BASH_SOURCE[0]}")")" ROOT_DIR="$(realpath ${SCRIPT_DIR}/../..)" cd ${ROOT_DIR} # Check that the expected environment variables are present (due to set -u). echo PERFETTO_TEST_GN_ARGS: ${PERFETTO_TEST_GN_ARGS} OUT_PATH="out/dist" tools/install-build-deps --no-android --ui tools/gn gen ${OUT_PATH} --args="${PERFETTO_TEST_GN_ARGS}" --check tools/ninja -C ${OUT_PATH} ui 2>&1 | grep -v "no version information available" # Run the tests ${OUT_PATH}/ui_unittests --ci
Add a test to reproduce subproject mismatch
#!/bin/bash SCRIPT_DIR=$( dirname $( readlink -e $0 ) ) . "$SCRIPT_DIR/../_utils.sh" function the-test() { # Initialize repo mkdir -p repo && cd repo git init # Initialize source branch git checkout --orphan source mkdir subtree1 subtree2 echo -en "b\nc\nd" > subtree1/subtree.txt echo -en "e\nf\ng" > subtree2/subtree.txt git add -A git commit -m "source: initial commit" # Initialize target branch git checkout --orphan target echo "target branch" > file.txt git add -A git commit -m "target: initial commit" # Initialize subprojects git subproject init my-subproject1 source --their-prefix=subtree1 git subproject init my-subproject2 source --their-prefix=subtree2 # Update subprojects within target branch git checkout target echo -en "\ne\n" >> my-subproject1/subtree.txt echo -en "\nh\n" >> my-subproject2/subtree.txt git add -A git commit -m "target: update my-subtree" # Push changes git subproject push my-subproject1 git subproject push my-subproject2 # Assert CURRENT_BRANCH=$( git symbolic-ref --short HEAD ) [[ "$CURRENT_BRANCH" == "target" ]] || return 1 git checkout source diff -c - subtree1/subtree.txt << EOF || return 1 b c d e EOF diff -c - subtree2/subtree.txt << EOF e f g h EOF return $? } invoke-test $@
Add a test that pushes a bunch of commits
#Make a bunch of commits and push cd RepoA git config user.email "jdoe@myemail.com" git config user.name "John Doe" echo "1" >> File1.txt git add File1.txt git commit -m "This is a commit to RepoA 1" echo "2" >> File1.txt git add File1.txt git commit -m "This is a commit to RepoA 2" echo "3" >> File1.txt git add File1.txt git commit -m "This is a commit to RepoA 3" echo "4" >> File1.txt git add File1.txt git commit -m "This is a commit to RepoA 4" git push --progress "origin" master:master read -p "paused"
Add a simple CSV benchmark script
#!/bin/bash dirname="$(dirname "$0")" cd "$dirname/.." || exit 1 echo "t_cost,m_cost,lanes,ms_i,ms_d" stdbuf -oL ./argon2-bench2 | stdbuf -oL tail -n +2 | while read LINE; do PRINT_COMMA=0 for x in $LINE; do if [ $PRINT_COMMA -eq 1 ]; then echo -n "," else PRINT_COMMA=1 fi echo -n "$x" done echo done
Bring update script for Travis variables.
#!/bin/sh # Copyright (c) 2016 Wojciech A. Koszek <wojciech@koszek.com> # this scripts takes the shell script from the local OSX where # I have variables for Fastlane/Crashlytics/Slack and updates # the Travis CI variables. grep export $1 | sed 's/=/ /' | awk '{ printf("travis env set %s %s\n", $2, $3); }'
Add sharness tests for CID security
#!/bin/sh # # Copyright (c) 2017 Jakub Sztandera # MIT Licensed; see the LICENSE file in this repository. # test_description="Cid Security" . lib/test-lib.sh test_init_ipfs test_expect_success "adding using unsafe function fails with error" ' echo foo | test_must_fail ipfs add --hash murmur3 2>add_out ' test_expect_success "error reason is pointed out" ' grep "insecure hash functions not allowed" add_out ' test_expect_success "adding using too short of a hash function gives out an error" ' echo foo | test_must_fail ipfs block put --mhlen 19 2>block_out ' test_expect_success "error reason is pointed out" ' grep "hashes must be at 20 least bytes long" block_out ' test_cat_get() { test_expect_success "ipfs cat fails with unsafe hash function" ' test_must_fail ipfs cat zDvnoLcPKWR 2>ipfs_cat ' test_expect_success "error reason is pointed out" ' grep "insecure hash functions not allowed" ipfs_cat ' test_expect_success "ipfs get fails with too short function" ' test_must_fail ipfs get z2ba5YhCCFNFxLtxMygQwjBjYSD8nUeN 2>ipfs_get ' test_expect_success "error reason is pointed out" ' grep "hashes must be at 20 least bytes long" ipfs_get ' } # should work offline test_cat_get # should work online test_launch_ipfs_daemon test_cat_get test_kill_ipfs_daemon test_done
Add script to create config.
#!/bin/bash cat > "config.js" << EOF /* SPHERE.IO credentials */ exports.config = { client_id: "${SPHERE_CLIENT_ID}", client_secret: "${SPHERE_CLIENT_SECRET}", project_key: "${SPHERE_PROJECT_KEY}" } EOF
Split beam search and scoring with kaldi
#!/bin/bash set -e KU=$KALDI_ROOT/egs/wsj/s5/utils KL=$KALDI_ROOT/egs/wsj/s5/local . $KU/parse_options.sh if [ $# -ne 2 ]; then echo "usage: `basename $0` <dir> <part>" echo "options:" echo " --part name #partition to score" echo " --beam_serch_opts 'opts' #opts passed to beam search" exit 1 fi dir=$1 part=$2 # Aggregate groundtruth cat $dir/$part-groundtruth-text.txt | sort | $KL/wer_ref_filter > $dir/tmp mv $dir/tmp $dir/$part-groundtruth-text.txt # Aggregate decoded $LVSR/bin/decoded_chars_to_words.py $lexicon $dir/$part-decoded.out - | $KL/wer_hyp_filter > $dir/$part-decoded-text.out # Score compute-wer --text --mode=all ark:$dir/$part-groundtruth-characters.txt ark:$dir/$part-decoded.out $dir/$part-characters.errs > $dir/$part-characters.wer compute-wer --text --mode=all ark:$dir/$part-groundtruth-text.txt ark:$dir/$part-decoded-text.out $dir/$part-text.errs > $dir/$part-text.wer
Add install nginx script file.
#!/bin/sh set -x cd mkdir src && cd src ret=$? if ((0 == ret)) then scp mp3@tc-mp3-dxfgl02.tc.baidu.com:/home/mp3/src/pcre-8.12.tar.gz ./ scp mp3@tc-mp3-dxfgl02.tc.baidu.com:/home/mp3/src/openssl-1.0.0d.tar.gz ./ scp mp3@tc-mp3-dxfgl02.tc.baidu.com:/home/mp3/src/zlib-1.2.5.tar.gz ./ scp mp3@tc-mp3-dxfgl02.tc.baidu.com:/home/mp3/src/nginx-1.1.0.tar.gz ./ tar -zxf pcre-8.12.tar.gz tar -zxf openssl-1.0.0d.tar.gz tar -zxf zlib-1.2.5.tar.gz tar -zxf nginx-1.1.0.tar.gz cd nginx-1.1.0 ./configure --prefix=/home/mp3/nginx --with-http_realip_module --with-http_sub_module --with-http_flv_module --with-http_dav_module --with-http_gzip_static_module --with-http_stub_status_module --with-http_addition_module --with-pcre=/home/mp3/src/pcre-8.12 --with-openssl=/home/mp3/src/openssl-1.0.0d --with-http_ssl_module --with-zlib=/home/mp3/src/zlib-1.2.5 make make install fi
Add a script for me to link the SM2 files into the app
rm ~/projects/musicsocial/public/soundmanager/*.swf ln -s ~/projects/SoundManager2/swf/soundmanager2_flash9.swf ~/projects/musicsocial/public/soundmanager/soundmanager2_flash9.swf rm ~/projects/musicsocial/public/javascripts/player/soundmanager2.js ln -s ~/projects/SoundManager2/script/soundmanager2.js ~/projects/musicsocial/public/javascripts/player/soundmanager2.js
Add delete old indexes script
#!/bin/bash EVENTS="100 101 102 200 201 204 205 300 301 302 303 304 305 400 401 402 403 500 600 700 701" VERSION="1" for event in $EVENTS; do # magic curl -XDELETE "http://localhost:9200/meshblu_events_${event}_v${VERSION}" done
Add script to run sample files in sequence, storing output in specified directory.
#!/bin/sh export outdir=$1 rm -rf $outdir mkdir $outdir echo sample1 alara -v 3 -t $outdir/sample1.tree sample1 > $outdir/sample1.out echo sample2 alara -v 3 -t $outdir/sample2.tree sample2 > $outdir/sample2.out echo sample3 alara -v 3 -t $outdir/sample3.tree sample3 > $outdir/sample3.out echo sample4 alara -v 3 -t $outdir/sample4.tree sample4 > $outdir/sample4.out echo sample5 alara -v 3 -t $outdir/sample5.tree sample5 > $outdir/sample5.out echo sample6 alara -v 3 -t $outdir/sample6.tree sample6 > $outdir/sample6.out echo sample7 alara -v 3 -t $outdir/sample7.tree sample7 > $outdir/sample7.out echo sample8 alara -v 3 -t $outdir/sample8.tree sample8 > $outdir/sample8.out
Add a script to update translations
#!/bin/sh ldir=src/usr/local/share/locale : ${MSGFMT=$(which msgfmt)} if [ -z "${MSGFMT}" -o ! -e "${MSGFMT}" ]; then echo "ERROR: msgfmt not found" exit 1 fi if [ ! -d $ldir ]; then echo "ERROR: Locale dir (${ldir}) not found" exit 1 fi if ! ./tools/scripts/update_pot.sh; then echo "ERROR: Unable to update pot" exit 1 fi if git status -s | grep -q "${ldir}/pot/pfSense.pot"; then git add ${ldir}/pot/pfSense.pot git commit -m "Regenerate pot" if ! zanata-cli -B push; then echo "ERROR: Unable to push pot to Zanata" exit 1 fi fi #zanata-cli -B pull --min-doc-percent 75 if ! zanata-cli -B pull; then echo "ERROR: Unable to pull po files from Zanata" exit 1 fi unset commit for po in $(git status -s ${ldir}/*/*/pfSense.po | awk '{print $2}'); do if ! $MSGFMT -o ${po%%.po}.mo ${po}; then echo "ERROR: Error compiling ${po}" exit 1 fi git add $(dirname ${po}) commit=1 done if [ -n "${commit}" ]; then git commit -m "Update translation files" fi
Rebase non-master local branch with remote branch
#!/bin/bash -x if [ "$#" -ne 1 ]; then echo "Illegal number of parameters" echo "$0 <branch-name>" exit 1 fi MYBRANCH=$1 git checkout $MYBRANCH git fetch upstream $MYBRANCH git rebase upstream/$MYBRANCH git status #git push origin master
Add script to bootstrap new rackspace VMs with puppet
#!/bin/sh # client-bootstrap.sh: preps a RS CentOS VM for puppet. # Installs puppet, hiera, and their dependencies. # This script assumes a CentOS 6.x host. # Install Puppet repos. Note: EPEL is installed by default on Rackspace CentOS images. rpm -q --quiet puppetlabs-release || rpm -Uvh http://yum.puppetlabs.com/puppetlabs-release-el-6.noarch.rpm # Install puppet and friends. yum -y install puppet git rubygems rubygems-deep-merge # Configure puppet. echo " pluginsync = true" >> /etc/puppet/puppet.conf echo " server = prod-util1.cashmusic.org" >> /etc/puppet/puppet.conf
Add failure log generator script
#!/bin/bash FAILURES_STR=$(deckard list-failures -o) IFS=$'\n' read -rd '' -a FAILURES <<<"$FAILURES_STR" for FAILURE in "${FAILURES[@]}"; do TIMESTAMP=$(echo $FAILURE | awk '{print $1}') UUID=$(echo $FAILURE | awk '{print $3}') FILENAME="logs/${TIMESTAMP}_${UUID}.log" echo "writing: $FILENAME" LOG=$(deckard trace $UUID) FLOW_UUID=$(echo "$LOG" | grep 'FLOWUUID' | awk '{print $2}') FLOW_ACTIVITY=$(deckard flow-activity $FLOW_UUID) STOP_STARTS=$(echo "$FLOW_ACTIVITY" | grep 'app-octoblu' | grep 'begin') echo "$LOG" > $FILENAME echo $'\n\nFLOW ACTIVITY\n' >> $FILENAME echo "$FLOW_ACTIVITY" >> $FILENAME echo $'\n\nFLOW STOP STARTS\n' >> $FILENAME echo "$STOP_STARTS" >> $FILENAME done
Create script to sync directorys
#!/bin/bash # Script for replicating a directory # get arguments target=$1 # the directory we want to replicate dest=$2 # the place we want to replicate to usage () { echo "Usage:" echo "" echo "replicator.sh target destination" echo "" echo "target: Directory to replicate" echo "destination: Place to replicate to" exit } # check the args are provided if [ -z $dest ] || [ ! -z $3 ]; then usage fi # get the hostname of the system name=$HOSTNAME if [ ! -d ${dest}/${name} ]; then mkdir ${dest}/${name} fi rsync -rp $target ${dest}/${name}
Add script to extract CSV table from log file
#!/bin/bash # # Usage: # # $ scripts/make-csv.sh path/to/nanopub-monitor.log > nanopub-monitor.csv # cat $1 \ | grep " ch.tkuhn.nanopub.monitor.ServerData - Test result: " \ | sed -r 's/^\[INFO\] ([^ ]*) .* Test result: ([^ ]*) ([^ ]*) ([^ ]*)( ([^ ]*))?$/\1,\2,\3,\4,\6/'
Add installer shell script for installing recommended packages in Debian-based systems
#!/bin/bash if [[ $EUID -ne 0 ]]; then echo "You need to be root to install prerequisites" fi apt-get install -y php5-dev # Let's install scrypt if it doesn't already exist SCRYPTINSTALLED=`rgrep scrypt.so /etc/php5 | (wc -l)` >/dev/null 2>&1 if [[ $SCRYPTINSTALLED -ne 0 ]]; then echo "scrypt already installed" else yes | pecl install scrypt echo "extension=scrypt.so" > /etc/php5/mods-available/scrypt.ini php5enmod scrypt fi # Let's install libsodium if it doesn't already exist SODIUMINSTALLED=`rgrep sodium.so /etc/php5 | (wc -l)` >/dev/null 2>&1 if [[ $SODIUMINSTALLED -ne 0 ]]; then echo "The PHP libsodium extension is already installed" else yes | pecl install sodium echo "extension=sodium.so" > /etc/php5/mods-available/sodium.ini php5enmod sodium fi if [[ -d /etc/php5/fpm ]]; then service php5-fpm restart fi if [[ -d /etc/php5/apache2 ]]; then service apache2 restart fi
Add script to build all meta-meson machines
#!/bin/bash set -ex if [ -z "$1" ] ; then echo "Usage: $0 <path to meta-meson> [bitbake command]" exit 1 fi BITBAKE_CMD="$2" if [ -z "$BITBAKE_CMD" ] ; then BITBAKE_CMD="core-image-base" fi echo "Running bitbake $BITBAKE_CMD for all meta-meson machines." for machine in $1/conf/machine/*.conf ; do name="$(basename $machine | cut -d. -f1)" MACHINE="$name" bitbake $BITBAKE_CMD done
Add a new script to check version of a docker image
#!/usr/bin/env bash # This script intends to get the digest of the last built image of the indicated repo/tag REPO="surli/librepair" TAG="latest" TOKEN=$(curl -s "https://auth.docker.io/token?service=registry.docker.io&scope=repository:$REPO:pull" | jq -r .token) curl -I -H "Authorization: Bearer $TOKEN" "https://index.docker.io/v2/$REPO/manifests/$TAG"
Add file to run all husc pipeline at once
#!/bin/bash # Created by the VLSCI job script generator for SLURM on x86 # Wed Nov 13 2013 00:00:51 GMT+1100 (EST) # Partition for the job: #SBATCH -p main # The name of the job: #SBATCH --job-name="features" # Maximum number of CPU cores used by the job: #SBATCH --ntasks=1 # The amount of memory in megabytes per process in the job: #SBATCH --mem-per-cpu=4096 # Send yourself an email when the job: # aborts abnormally (fails) #SBATCH --mail-type=FAIL # ends successfully #SBATCH --mail-type=END # The maximum running time of the job in days-hours:mins:sec #SBATCH --time=0-10:0:00 # Invoke this script with the directory containing the files being # analysed as the sole argument. # Run the job from the directory where it was launched (default): # The job command(s): export PATH=${HOME}/anaconda/envs/husc/bin:$PATH d=$1 husc crop -O $TMPDIR 100 none 250 none $d/*_s1_w*.TIF husc crop -O $TMPDIR 100 none none -300 $d/*_s2_w*.TIF husc crop -O $TMPDIR none -100 250 none $d/*_s3_w*.TIF husc crop -O $TMPDIR none -100 none -300 $d/*_s4_w*.TIF for q in s1 s2 s3 s4; do for ch in w1 w2 w3; do husc illum -L 0.05 -q 0.95 -r 51 -s $d/illum_${q}_${ch}.tif -v \ $TMPDIR/*${q}_${ch}*.crop.tif done done for ch in w1 w2 w3; do husc stitch $TMPDIR/M*${ch}*.crop.illum.tif done husc cat $TMPDIR/M*stitched.tif husc features $TMPDIR/M*stitched.chs.tif $d/features.h5
Add script that benchmarks new commits
#!/bin/bash set -e function get_next_commit() { local LAST_COMMIT=$1 local NEXT_COMMIT=$(git log master ^$LAST_COMMIT --ancestry-path --pretty=oneline | cut -d" " -f1 | tail -n 1) echo "$NEXT_COMMIT" } BENCHMARK_RESULTS_DIR="$HOME/.benchmark_results" mkdir -p $BENCHMARK_RESULTS_DIR CWD="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" LAST_COMMIT=$(cat $BENCHMARK_RESULTS_DIR/last_commit) NEXT_COMMIT=$(get_next_commit $LAST_COMMIT) git checkout -q master git pull -q --rebase while [[ $NEXT_COMMIT != '' ]] do echo $NEXT_COMMIT git checkout -q $NEXT_COMMIT bash $CWD/benchmark-initial-sync.sh > "$BENCHMARK_RESULTS_DIR/$NEXT_COMMIT-results.txt" echo "$NEXT_COMMIT" > "$BENCHMARK_RESULTS_DIR/last_commit" NEXT_COMMIT=$(get_next_commit $NEXT_COMMIT) done
Add a script which runs all tests.
#!/bin/sh # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. if test ! -f devel/bin/test_all.sh then echo Error: can only run from root dir of repository exit 1 fi # C cd clownfish/compiler/c ./configure make -j make -j test C_CFC_RESULT=$? cd ../../runtime/c ./configure make -j make -j test C_CFISH_RUNTIME_RESULT=$? cd ../../../c ./configure make -j make -j test C_LUCY_RESULT=$? make distclean # Perl cd ../clownfish/compiler/perl perl Build.PL ./Build test PERL_CFC_RESULT=$? cd ../../runtime/perl perl Build.PL ./Build test PERL_CFISH_RUNTIME_RESULT=$? cd ../../../perl perl Build.PL ./Build test PERL_LUCY_RESULT=$? ./Build realclean # Exit with a failing value if any test failed. if [ $C_CFC_RESULT -ne 0 ] \ || [ $C_CFISH_RUNTIME_RESULT -ne 0 ] \ || [ $C_LUCY_RESULT -ne 0 ] \ || [ $PERL_CFC_RESULT -ne 0 ] \ || [ $PERL_CFISH_RUNTIME_RESULT -ne 0 ] \ || [ $PERL_LUCY_RESULT -ne 0 ] then exit 1 fi exit 0
Add new boiler plate script
#!/bin/bash set +o errexit me=`basename $0` myDir=`readlink -e $0 | xargs basename` short='d' long='dry-run' opts=`getopt -o "$short" --long "$long" -- "$@"` eval set -- "$opts" option=() while true; do case "$1" in -d | --dry-run ) option['dry-run']=true; shift;; -- ) shift; break;; * ) break;; esac done if [ "$#" -lt 3 ]; then echo "Usage $me SFTP_URL LOCAL_DIR REMOTE_DIR [LOCAL_DB]" exit 1 fi sftp="$1" localDir="$2" remoteDir="$3" if [ "$#" -eq 4 ]; then localDb="$4" else localDb="$myDir/local.db" fi echo "$sftp $localDir $remoteDir $localDb" echo "${option['dry-run']}" echo "$@"
Add script to run danger
#!/bin/bash # To run on jenkins, use `bash scripts/run_danger.sh` in shell. # Assumes that we're in commcare-android directory. # Unfortunately jenkins errors out `source: not found` so we can't really save these in .bashrc export PATH="$HOME/.rbenv/bin:$PATH" export PATH="$HOME/.rbenv/shims:$PATH" export PATH="$HOME/.rbenv/plugins/ruby-build/bin:$PATH" # We need ruby v3.0.0 to be able to use gems. rbenv global 3.0.0 gem install danger gem install danger-jira gem install danger-android_lint danger --fail-on-errors=true
Add startup script for nitpick.
#!/bin/sh ./initcc-open.sh if [ "!" -d aspen ]; then mkdir aspen fi if [ "!" -d logs/aspen ]; then mkdir logs/aspen fi if [ "!" -d artifacts/aspen ]; then mkdir artifacts/aspen fi if [ "!" -d build/aspen ]; then mkdir build/aspen fi cruisecontrol.sh -configfile /home/cruise/work/config-nitpick.xml -port 8080
Add simple script to locally create wheels for different python versions (useful for arm64)
#!/bin/bash # Simple script to create galpy wheels for a bunch of different # Python version locally; useful for creating ARM64 wheels, because # not possible with CIs currently source ~/.bash_profile PYTHON_VERSIONS=("3.8" "3.9" "3.10") rm -rf galpy-wheels-output mkdir galpy-wheels-output # Loop over the entire thing to make sure nothing gets re-used for PYTHON_VERSION in "${PYTHON_VERSIONS[@]}"; do git clone https://github.com/jobovy/galpy.git galpy-wheels cd galpy-wheels git checkout v1.7.1 mkdir wheelhouse conda activate base; conda create -y --name galpywheels"$PYTHON_VERSION" python="$PYTHON_VERSION"; conda env update --name galpywheels"$PYTHON_VERSION" --file .github/conda-build-environment-macos-latest.yml --prune; conda activate galpywheels"$PYTHON_VERSION"; pip install wheel; CFLAGS="$CFLAGS -I$CONDA_PREFIX/include"; LDFLAGS="$LDFLAGS -L$CONDA_PREFIX/lib"; LD_LIBRARY_PATH="$LD_LIBRARY_PATH -L$CONDA_PREFIX/lib"; python setup.py build_ext; python setup.py bdist_wheel -d wheelhouse; mv wheelhouse/* ../galpy-wheels-output; conda activate base; conda remove -y --name galpywheels"$PYTHON_VERSION" --all; cd ../; rm -rf galpy-wheels; done
Fix test for libxml errors
#!/bin/bash set -e set -o pipefail if [ -f /bin/rpm ] && /bin/rpm -q libxml2; then # workaround trac#346: skip this test version="`/bin/rpm -q --qf '%{VERSION}\n' libxml2 | tail -n 1`"; if echo $version | \ awk -F. '{print ($1 == 2 && ($2 < 7 || ($2 == 7 && $3 < 7))) ? "true" : "false"}' | \ grep true; then exit 0; fi fi name=$(basename $0 .sh) stderr=$(mktemp -t ${name}.err.XXXXXX) echo "Stderr file = $stderr" ret=0 $OSCAP info ${srcdir}/${name}.xccdf.xml 2> $stderr || ret=$? [ $ret -eq 1 ] [ -f $stderr ] [ -s $stderr ] cat $stderr | tail -n +4 | grep '^OpenSCAP Error:' rm $stderr
#!/bin/bash set -e set -o pipefail if [ -f /bin/rpm ] && /bin/rpm -q libxml2; then # workaround trac#346: skip this test version="`/bin/rpm -q --qf '%{VERSION}\n' libxml2 | tail -n 1`"; if echo $version | \ awk -F. '{print ($1 == 2 && ($2 < 7 || ($2 == 7 && $3 < 7))) ? "true" : "false"}' | \ grep true; then exit 0; fi fi name=$(basename $0 .sh) stderr=$(mktemp -t ${name}.err.XXXXXX) echo "Stderr file = $stderr" ret=0 $OSCAP info ${srcdir}/${name}.xccdf.xml 2> $stderr || ret=$? [ $ret -eq 1 ] [ -f $stderr ] [ -s $stderr ] cat $stderr | tail -n +1 | grep '^OpenSCAP Error:' rm $stderr
Include --delete option in rsync to remove destination files no longer existing locally
PACKAGE=. REMOTEDIR=/var/www/www_ethereum_org/public_html/ KEYPATH=~/Downloads/prod-web.pem SERVERS=(54.213.131.123 54.213.131.108) for ix in ${!SERVERS[*]} do printf "......................\nConnecting to %s...\n" "${SERVERS[$ix]}" rsync -avzl -e "ssh -i ${KEYPATH}" --exclude ".git" ${PACKAGE} ubuntu@${SERVERS[$ix]}:${REMOTEDIR} done
PACKAGE=. REMOTEDIR=/var/www/www_ethereum_org/public_html/ KEYPATH=~/Downloads/prod-web.pem SERVERS=(54.213.131.123 54.213.131.108) for ix in ${!SERVERS[*]} do printf "......................\nConnecting to %s...\n" "${SERVERS[$ix]}" rsync -avzl -e "ssh -i ${KEYPATH}" --delete --exclude ".git" ${PACKAGE} ubuntu@${SERVERS[$ix]}:${REMOTEDIR} done
Add script to set up server
#!/bin/bash HOSTNAME=$1 IP=$2 bash vmfiles/createvm.sh $HOSTNAME $IP knife bootstrap $IP -x brain -N $HOSTNAME-toad -P password -r 'role[simple_webserver]' --sudo
Add the bogus feedsize test
#! /bin/bash # # Test the e-k parser to make sure that altering the feedsize doesn't change # which tags get recognized. This is really a boundary condition. # NUMTAGS=1000 NUMFEED=1024 echo Generating crazy.page with $NUMTAGS tags python gen_html.py $NUMTAGS 1 > crazy.page feedsize=1 echo "Parsing with $NUMFEED different feed sizes" while [ $(($feedsize < $NUMFEED)) -eq 1 ]; do ./tester $feedsize < crazy.page > crazy.page.out.$feedsize feedsize=$(($feedsize + 1)) done NUMSIZES=`ls -la crazy.page.out.* | awk '{print $5}' | sort | uniq | wc -l` if [ $NUMSIZES -ne 1 ] ; then echo 'FAILURE -- Leaving crazy.page.out.* for examination' exit 1 fi rm -f crazy.page.out.*
Create cron for monthly scrape
#!/bin/bash # Publish to hoh-gwuhn-scrape at the 1st of every month for UTC Noon gcloud beta scheduler jobs delete monthly-scrape gcloud beta scheduler jobs create pubsub monthly-scrape --topic="hoh-gwuhn-scrape" \ --schedule="0 12 1 * *" --message-body="{}" \ --description="Publish to hoh-gwuhn-scrape at the 1st of every month for UTC Noon" \ --max-retry-attempts=5 --min-backoff="1h" --max-backoff="1d"
Add script to run yocto compatibility check
#!/bin/bash # Copyright (C) 2018 Luxoft Sweden AB # # Permission to use, copy, modify, and/or distribute this software for # any purpose with or without fee is hereby granted, provided that the # above copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL # WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR # BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES # OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, # WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, # ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS # SOFTWARE. # # For further information see LICENSE # # Usage: run-yocto-check-layer.sh <yoctodir> # # This checks yocto compatibility of the layer. # This script depends on bitbake initialization and image built. # YOCTO_DIR="$1" echo "Running yocto compatibility checks in $YOCTO_DIR" # Set up bitbake environment cd "$YOCTO_DIR/sources/poky/" source oe-init-build-env ../../build #Run the compatibility checks yocto-check-layer $YOCTO_DIR/sources/meta-pelux $YOCTO_DIR/sources/meta-bistro
Add support for API Blueprint to Atom and HTML render via aglio.
# API Blueprint tooling if test ! $(which aglio) then nvm unload sudo npm install -g aglio protagonist nvm use 4 fi apm install language-api-blueprint apm install linter-api-blueprint apm install api-blueprint-preview
Add a shell script to check which compiler is used.
#!/bin/bash #set -x set -e set -u if [ "$#" -ne 3 ]; then echo "Usage: $0 <gnu|clang> <wanted_version> <compiler_name>" exit 2 fi wanted_compiler="$1" wanted_version="$2" compiler_name="$3" if [[ ${wanted_compiler} != "gnu" ]] && [[ ${wanted_compiler} != "clang" ]]; then echo "Please select gnu or clang." exit 3 fi if [ ${#wanted_version} -ne 5 ]; then echo "Please give a 5-digit version number." exit 4 fi if [[ ${compiler_name} != "g"* ]] && [[ ${compiler_name} != "clang"* ]]; then echo "Only GNU and Clang compilers are supported." exit 5 fi if [[ ${wanted_compiler} == "gnu" ]] && [[ ${compiler_name} != "g"* ]]; then exit 1 fi if [[ ${wanted_compiler} == "clang" ]] && [[ ${compiler_name} != "clang"* ]]; then exit 1 fi # GNU if [[ ${compiler_name} == "g"* ]]; then version=$(${compiler_name} -dumpversion) if [ ${#version} -eq 1 ]; then version=$(${compiler_name} -dumpfullversion) fi fi # Clang if [[ ${compiler_name} == "clang"* ]]; then version=$(${compiler_name} -dumpversion) fi # See: http://qiita.com/misho/items/f1c0748e9a31bea62e5c # #ifndef GCC_VERSION # #define GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) # #endif long_version=$(echo "${version}" | sed -e 's/\.\([0-9]\)\b/0\1/g' -e 's/\.//g') if [ ${long_version} -ge ${wanted_version} ]; then echo 0 exit 0 else echo 1 exit 1 fi
Add a script that will generate a new vblock given a kernel partition.
#!/bin/bash # Copyright (c) 2010 The Chromium OS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # Script that just takes in a kernel partition and outputs a new vblock # signed with the specific keys. For use on signing servers. # vbutil_kernel must be in the system path. # Abort on error set -e # Check arguments if [ $# -ne 4 ] ; then echo "usage: $0 src_kpart dst_vblock kernel_datakey kernel_keyblock" exit 1 fi # Make sure the tools we need are available. type -P vbutil_kernel &>/dev/null || \ ( echo "vbutil_kernel tool not found."; exit 1; ) src_kpart=$1 dst_vblock=$2 kernel_datakey=$3 kernel_keyblock=$4 vbutil_kernel \ --repack "${dst_vblock}" \ --vblockonly \ --keyblock "${kernel_keyblock}" \ --signprivate "${kernel_datakey}" \ --oldblob "${src_kpart}" echo "New kernel vblock was output to ${dst_vblock}"
Add script for joining the components of the IRTG
#!/bin/bash CONLLU_FILE="${1}" python create_terminals.py "${CONLLU_FILE}" | cat ../ud/en_ud_bi.irtg ../ud/ud_dumb_preterms_bi -
Add Bash script to start the Camel Application server
#!/bin/bash # # Execute our Java Application with root priviledges since we # have to bind to a well known port 161 for the SNMP trap handler # sudo mvn exec:java -Dexec.mainClass="com.boundary.sdk.BoundaryIntegrationApp" 2>&1 > application.log
Add bash completion with fzf
#!/usr/bin/env bash # Bash completion for https://github.com/sachaos/todoist _todoist() { local i cur prev opts cmd fzfquery COMPREPLY=() cur="${COMP_WORDS[COMP_CWORD]}" prev="${COMP_WORDS[COMP_CWORD-1]}" opts='' cmd='' fzfquery= [ -n "$cur" ] && fzfquery="-q $cur" for i in "${COMP_WORDS[@]}"; do case "${i}" in todoist) cmd='todoist' ;; # These are the current commands; not all have completion options, # but they're listed here anyway, for the future list|show|completed-list|add|modify|close|delete|labels|projects|\ karma|sync|quick|help) cmd+="__${i}" ;; l) cmd+='__list' ;; c-l|cl) cmd+='__completed-list' ;; a) cmd+='__add' ;; m) cmd+='__modify' ;; c) cmd+='__close' ;; d) cmd+='__delete' ;; s) cmd+='__sync' ;; q) cmd+='__quick' ;; h) cmd+='__help' ;; *) ;; esac done # Global options present in all commands opts='--header --color --csv --debug --namespace --indent \ --project-namespace --help -h --version -v ' case "${cmd}" in todoist) opts+='list l show completed-list c-l cl add a modify m close c \ delete d labels projects karma sync s quick q help h' ;; todoist__add|todoist__modify) opts+='--priority -p --label-ids -L --project-id -P --project-name -N \ --date -d --reminder -r' [ "$cmd" == 'todoist__modify' ] && opts+=' --content -c' case "${prev}" in --priority|-p) opts="1 2 3 4" ;; --label-ids|-L) COMPREPLY=( $(todoist labels | fzf --multi --select-1 --exit-0 \ ${fzfquery} | cut -f 1 -d ' ' | paste -d, -s -) ) return 0 ;; --project-id|-P) COMPREPLY=( $(todoist projects | fzf --select-1 --exit-0 \ ${fzfquery} | cut -f 1 -d ' ') ) return 0 ;; --project-name|-N) COMPREPLY=( "'$(todoist projects | fzf --select-1 --exit-0 \ ${fzfquery} | cut -f 2- -d ' ' | cut -b 2- )'" ) return 0 ;; esac ;; todoist__list|todoist__completed-list) opts+='--filter -f' ;; todoist__show) opts+='--browse -o' ;; esac [ -n "$opts" ] && COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ) } complete -F _todoist todoist
Add t5510 to test per branch configuration affecting git-fetch.
#!/bin/sh # Copyright (c) 2006, Junio C Hamano. test_description='Per branch config variables affects "git fetch". ' . ./test-lib.sh D=`pwd` test_expect_success setup ' echo >file original && git add file && git commit -a -m original' test_expect_success "clone and setup child repos" ' git clone . one && cd one && echo >file updated by one && git commit -a -m "updated by one" && cd .. && git clone . two && cd two && git repo-config branch.master.remote one && { echo "URL: ../one/.git/" echo "Pull: refs/heads/master:refs/heads/one" } >.git/remotes/one ' test_expect_success "fetch test" ' cd "$D" && echo >file updated by origin && git commit -a -m "updated by origin" && cd two && git fetch && test -f .git/refs/heads/one && mine=`git rev-parse refs/heads/one` && his=`cd ../one && git rev-parse refs/heads/master` && test "z$mine" = "z$his" ' test_done
Add streetview dataset building script
for seed in 1 2; do CUDA_VISIBLE_DEVICES= ../scripts/create_split.py --seed=$seed --size=1000 $ML_DATA/SSL/streetview_v4_64 $ML_DATA/streetview_v4_64-train.tfrecord wait done
Add script to run all background processes.
#!/bin/bash mkdir -p log function cleanUp() { kill `cat log/mongodb.pid` } trap cleanUp EXIT mkdir -p mongodb mongod --dbpath mongodb --pidfilepath log/mongodb.pid > log/mongodb.log 2>&1 & scripts/web-server.js > log/webserver.log 2>&1 & cd ../monkey-tail supervisor app.js > ../monkey-face/log/app.log 2>&1 & cd ../monkey-face echo "Go to http://localhost:8000/app/index.html" wait
Add setup and run peer script.
#!/usr/bin/env bash # Bash3 Boilerplate. Copyright (c) 2014, kvz.io set -o errexit set -o pipefail set -o nounset # set -o xtrace ONYX_REV=$1 BENCHMARK_REV=$2 RUN_ID=$3 VPEERS=$4 DEPLOYMENT_ID=$1"_"$2 killall -9 java || true cd /onyx git fetch --all git pull --all git checkout $ONYX_REV lein install cd /onyx-benchmark git fetch --all git pull --all git checkout $BENCHMARK_REV LEIN_ROOT=1 lein run -m onyx-benchmark.peer $DEPLOYMENT_ID $VPEERS &
Configure DB2 for Archive Loging with Log retention
# Path to archive Logs archLogs=/opt/db2archivelogs # get all databases of db2 instance databases=$(db2 list database directory | grep alias | awk '{print $4}' | sort) # Loop through list of databases: for database in ${databases[@]} do echo $database db2 update database configuration for $database using LOGARCHMETH1 LOGRETAIN AUTO_DEL_REC_OBJ ON num_db_backups 1 rec_his_retentn 0 logarchmeth1 disk:$archLogs done
Add script to use "Download as ZIP" functionality from GitHub.
#!/bin/bash set -e rm -rf lib BRANCH_NAME='latests' npm version patch git checkout ${BRANCH_NAME} git merge master grunt build git add -f lib/ libs=$(cat package.json | jq -r '.dependencies' | grep ':' | cut -d: -f1 | tr -d " " | tr -d '"') for lib in $libs; do git add -f node_modules/$lib done git commit -m "Update generated code and runtime dependencies." git push origin ${BRANCH_NAME} git checkout master npm version patch git push origin master
Add script to publish to gh-pages
#!/bin/sh DIR=$(dirname "$0") cd $DIR/.. if [[ $(git status -s) ]] then echo "The working directory is dirty. Please commit any pending changes." exit 1; fi echo "Deleting old publication" rm -rf public mkdir public git worktree prune rm -rf .git/worktrees/public/ echo "Checking out gh-pages branch into public" git worktree add -B gh-pages public upstream/gh-pages echo "Removing existing files" rm -rf public/* echo "Generating site" hugo echo "Updating gh-pages branch" cd public && git add --all && git commit -m "Publishing to gh-pages (publish.sh)"
Correct version for a whole day
./occ 128.5904 17.5396 Latest_TLE.txt 2017:314:00:00:00 2017:315:00:00:00 lunar_314_315.occ
Add a script to analyze the symbol size in the generated benchmark code.
#!/bin/bash # Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This is a quick-and-dirty script to analyze the ELF symbol sizes in a binary generated by generate_benchmark.py. # It can be used to decide which functions should no longer be inlined, to save space. if [ "$#" != "1" ] then echo "Usage: $0 <executable_file>" exit 1 fi FILE1=$(mktemp) nm --print-size --size-sort --radix=d "$1" | sort -k 2 | c++filt \ | sed 's/[^ ]* //;s/fruit::impl:://g;s/InvokeConstructorWithInjectedArgVector<.*>::operator()/InvokeConstructorWithInjectedArgVector<...>::operator()/' \ | sed 's/getComponent[0-9]\+/getComponent$N/' \ | sed 's/X[0-9]\+/X$N/g' \ | sed 's/Interface[0-9]\+/Interface$N/g' \ | sed 's/GetBindingDepsHelper<.*>::operator()()/GetBindingDepsHelper<...>::operator()()/' \ | sed 's/\(std::shared_ptr<Interface$N>, \)\+std::shared_ptr<Interface$N>/.../' \ >"$FILE1" FILE2=$(mktemp) python3 -c " lines = open('$FILE1', 'r').readlines() total_size = {} for line in lines: splits = line.split(' ', maxsplit=1) total_size[splits[1]] = total_size.get(splits[1], 0) + int(splits[0]) for key, value in total_size.items(): print('%s %s' % (value, key)) " >"$FILE2" sort -n "$FILE2"
Add script to update the changelog for convenience
#!/usr/bin/env bash DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" FILE="$DIR/CHANGELOG.rst" gitchangelog > "$FILE" && git commit -m "Update changelog" "$FILE"
Add a test for help flags.
test::help() { local expected_message="Usage: test/flags.sh [options ...] [args ...] Options: --help=false: Print this help message and exit. (Alias: --h) --flag='': Flag name to show. --string='default': String flag. --int=100: Integer flag. --bool=false: Boolean flag." EXPECT_EQ "${expected_message}" "$(bash test/flags.sh --help 2>&1 >/dev/null)" EXPECT_EQ "${expected_message}" "$(bash test/flags.sh -h 2>&1 >/dev/null)" # There should be no output to the standard output. EXPECT_EQ '' "$(bash test/flags.sh --help 2>/dev/null)" }
Add a small get-started script. Working on Arch linux
#!/bin/bash # Script to get development of the lua game engine up and # running on arch linux if [ "$EUID" -ne 0 ]; then sudo pacman -S --needed sdl2 sdl2_mixer lua51 git wget else pacman -S --needed sdl2 sdl2_mixer lua51 git wget fi wget https://aur.archlinux.org/packages/ch/chipmunk6/chipmunk6.tar.gz tar xzf chipmunk6.tar.gz cd chipmunk6/ makepkg --install --clean --needed cd ../ rm -r chipmunk6/ rm chipmunk6.tar.gz git clone https://github.com/n00bDooD/geng.git
Add script to generate man pages from markdown
#!/bin/bash set -e # get into this script's directory cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" [ "$1" = '-q' ] || { set -x pwd } for FILE in *.md; do base="$(basename "$FILE")" name="${base%.md}" num="${name##*.}" if [ -z "$num" -o "$name" = "$num" ]; then # skip files that aren't of the format xxxx.N.md (like README.md) continue fi mkdir -p "./man${num}" go-md2man -in "$FILE" -out "./man${num}/${name}" done
Add shell script to create tagged btrfs snapshots
#!/bin/bash function broadcast.info { echo $1 >&1 } function broadcast.error { echo $1 >&2 } function check_retval { if [ $1 -ne 0 ]; then broadcast.error "Command exited with status $1: exiting" exit $1 fi } TIMESTAMP=$(date +"%Y%m%d-%H%M%S") ROOTDEV="/dev/system/root" SUBVOL="@" if [ $# -gt 0 ] && [ -n "$1" ] then SUBVOL="$1" fi broadcast.info "Creating temporary directory $tmpmnt" tmpmnt=$(mktemp -d) check_retval $? broadcast.info "Mounting top-level subvolume" mount $ROOTDEV $tmpmnt/ check_retval $? broadcast.info "Creating snapshot of subvolume $SUBVOL" btrfs sub snap -r $tmpmnt/$SUBVOL $tmpmnt/@$SUBVOL-$TIMESTAMP check_retval $? broadcast.info "Unmounting top-level subvolume" umount $tmpmnt/ check_retval $? broadcast.info "Removing temporary directory $tmpmnt" rmdir $tmpmnt check_retval $?
Add script to retrieve data for Ernest
#!/bin/sh ## Copyright 2019 Eugenio Gianniti ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. ## You may obtain a copy of the License at ## ## http://www.apache.org/licenses/LICENSE-2.0 ## ## Unless required by applicable law or agreed to in writing, software ## distributed under the License is distributed on an "AS IS" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ## See the License for the specific language governing permissions and ## limitations under the License. root="${1?error: missing data directory}" find "$root" -name '*.csv' | while IFS= read -r filename; do relname="${filename#$root}" relname="${relname#/}" query="$(echo "$relname" | cut -d / -f 1)" base="$(basename "$filename")" dir="$query" mkdir -p "$dir" finalfile="$dir/$base" if ls "$dir" | grep -q "^$base$"; then tmp="$dir/aux.csv" { cat "$finalfile" tail -n +3 "$filename" | \ awk -F , '{ print $1, $2, $3, $(NF - 2), $(NF - 1), $NF }' OFS=, } > "$tmp" mv "$tmp" "$finalfile" else awk -F , 'NR > 1 { print $1, $2, $3, $(NF - 2), $(NF - 1), $NF } NR == 1' OFS=, \ "$filename" > "$finalfile" fi done
Add openzwave installer for hassbian
#!/bin/bash # Run this script with sudo echo "Open Z-Wave Installer for Hassbian" echo "Copyright(c) 2016 Dale Higgs <https://gitter.im/dale3h>" echo echo "Running apt-get preparation" apt-get update apt-get upgrade apt-get install -y git make echo "Installing latest version of cython" pip3 install --upgrade cython echo "Creating source directory" mkdir -p /srv/homeassistant/src chown -R homeassistant:homeassistant /srv/homeassistant/src echo "Cloning python-openzwave" cd /srv/homeassistant/src git clone --branch v0.3.1 https://github.com/OpenZWave/python-openzwave.git echo "Building python-openzwave" chown homeassistant:homeassistant python-openzwave cd python-openzwave git checkout python3 make build make install echo "Creating libmicrohttpd directory" cd /srv/homeassistant/src mkdir libmicrohttpd chown homeassistant:homeassistant libmicrohttpd echo "Downloading libmicrohttpd-0.9.19" wget ftp://ftp.gnu.org/gnu/libmicrohttpd/libmicrohttpd-0.9.19.tar.gz chown homeassistant:homeassistant libmicrohttpd-0.9.19.tar.gz tar zxvf libmicrohttpd-0.9.19.tar.gz chown homeassistant:homeassistant libmicrohttpd-0.9.19 echo "Building libmicrohttpd-0.9.19" cd libmicrohttpd-0.9.19 ./configure make make install echo "Cloning open-zwave-control-panel" cd /srv/homeassistant/src git clone https://github.com/OpenZWave/open-zwave-control-panel.git chown -R homeassistant:homeassistant open-zwave-control-panel echo "Building open-zwave-control-panel" cd open-zwave-control-panel wget https://raw.githubusercontent.com/home-assistant/fabric-home-assistant/master/Makefile chown homeassistant:homeassistant Makefile make echo "Linking ozwcp config directory" ln -sd /srv/homeassistant/lib/python3.*/site-packages/libopenzwave-0.3.1-*-linux*.egg/config chown -R homeassistant:homeassistant /srv/homeassistant/src echo echo "Done!" echo echo "If you have issues with this script, please contact @dale3h on gitter.im" echo
Add shell script for metadata exporter
#!/bin/sh # Usage: metadataexporter.sh -b <batchid> -n <roundtripnumber> SCRIPT_DIR=$(dirname $(readlink -f $0)) java -classpath "$SCRIPT_DIR/../conf:$SCRIPT_DIR/../lib/*" \ dk.statsbiblioteket.medieplatform.newspaper.metadataexporter.MetadataExporterStandalone -c $SCRIPT_DIR/../conf/config.properties "$@"
Add release note script to main repo
#!/bin/sh export PREVIOUS_RELEASE_TAG=$(git describe --abbrev=0) git log $PREVIOUS_RELEASE_TAG...master --oneline --decorate >> commit_summary.txt git submodule foreach --recursive git submodule summary $PREVIOUS_RELEASE_TAG >> commit_summary.txt
Add a script which can be added to cron
#!/bin/sh python wikipedia.py || echo "FAILED TO UPDATE WIKIPEDIA" python ynmp.py || echo "FAILED TO UPDATE YNMP" python generate_output_html.py || echo "FAILED TO GENERATE output.html"
Add backup script for MySQL
#!/bin/bash # # Author: Josh Ziegler # Date: 2020-01-31 # License: MIT # # This will backup all MySQL databases on this host. It also backs up each # database to its own file. The former is more useful for cases when the # databases server dies entirely, while the latter is more useful when you need # to restore a single database. # # - This assumed your login crentials are saved on the system: # - In the file $HOME/.my.cnf # - OR preferably encrypted in $HOME/.mylogin.cnf via `mysql_config_editor # set --user=bob --host=mydbserver --password` (requires MySQL >= 5.6) # - This assumed all of your databased are InnoDB. If not, you cannot use this # method! # Variables DUMP_CMD="mysqldump --single-transaction --routines --events --triggers --force --master-data=2" FILE_PREFIX="mysql-db-backup" BACKUP_DIR="/home/ubuntu/backups/mysql/" # DO NOT EDIT BELOW THIS LINE mkdir -p $BACKUP_DIR # Backup all databases into a single file $DUMP_CMD --all-databases > "$BACKUP_DIR/all-databases.sql" # Backup each database to it's own file for DB in $(mysql -e 'show databases' -s --skip-column-names); do if [[ $DB == "information_schema" || $DB == "mysql" || $DB == "performance_schema" || $DB == "sys" ]]; then continue; # Skip these meta tables. They already exist in the all-databases.sql fi $DUMP_CMD $DB > "$BACKUP_DIR/$DB.sql"; done
Add script to update package versions
#!/bin/bash set -ex # MAJOR.MINOR.gitrolling # MAJOR and MINOR are semver-like, they should indicate breaking changes and # significant new features. The gitrolling is neccesary to keep the version # different than whatever is released on PyPI. The gitrolling will be # substituted for a different value in the release pipeline. VERSION="9.0.gitrolling" # omitting vex and binaries since they don't have versions REPOS="angr-management angr-doc angr claripy cle pyvex archinfo angrop ailment" BASE_DIR=$(realpath $(dirname $0)/..) function version_to_tuple { awk -F '.' '{ printf "(%d, %d, \"%s\")\n", $1, $2, $3}' <<<"$1" } for repo in $REPOS; do pushd $BASE_DIR/$repo if [ "$repo" == "angr-doc" ]; then sed -i -e "s/version = u['\"][^'\"]*['\"]/version = u'$VERSION'/g" api-doc/source/conf.py sed -i -e "s/release = u['\"][^'\"]*['\"]/release = u'$VERSION'/g" api-doc/source/conf.py else sed -i -e "s/version=['\"][^'\"]*['\"]/version='$VERSION'/g" setup.py sed -i -e "s/^__version__ = .*/__version__ = $(version_to_tuple $VERSION)/g" */__init__.py for dep in $REPOS; do if [ "$dep" != "$repo" ]; then sed -i -e "s/'$dep\(\(==[^']*\)\?\)',\$/'$dep==$VERSION',/" setup.py fi done fi if ! git diff --exit-code &> /dev/null; then git add -A git commit --author "angr bot <angr-dev@asu.edu>" -m "[ci skip] Update version to $VERSION" git push fi popd done
Install Advanced Toolchain 9.0 release for RHEL LE
#!/bin/bash #Add repo cat <<EOF>/etc/yum.repos.d/at9_0.repo [at9.0] name=Advance Toolchain Unicamp FTP baseurl=ftp://ftp.unicamp.br/pub/linuxpatch/toolchain/at/redhat/RHEL7 failovermethod=priority enabled=1 gpgcheck=1 gpgkey=ftp://ftp.unicamp.br/pub/linuxpatch/toolchain/at/redhat/RHEL7/gpg-pubkey-6976a827-5164221b EOF yum install -y advance-toolchain-at9.0-runtime \ advance-toolchain-at9.0-devel \ advance-toolchain-at9.0-perf \ advance-toolchain-at9.0-mcore-libs echo "export PATH=/opt/at9.0/bin:/opt/at9.0/sbin:$PATH" >> /etc/profile.d/at9.sh source /etc/profile.d/at9.sh /opt/at9.0/sbin/ldconfig
Add wrapper script to use during development
#!/bin/bash #set -o nounset # (set -u) Treat unset variables as error and exit script #set -o errexit # (set -e) Exit if any command returns a non-zero status #set -o pipefail # Return non-zero status if any piped commands fail ############################################################################## # # This is just a wrapper script that sources the shmark.sh functions so that # those functions can be tested without having to source them into the shell # environment. The wrapper script also allows stricter testing by setting extra # options such as 'nounset' and 'errexit'. NOTE: Tests should be run both with # and without the 'errexit' and other options set above. Run with the options # set when writing and testing the functions. Then comment the options out to # simulate the way the functions are likely to be run when sourced into the # shell environment. # # NOTE: Shell scripts like this can't change directories for a shell, so to # fully test any functions that change directories, the functions file will # need to be sourced into the shell environment and tested directly from the # shell instead of this wrapper script. # # @date 2014-01-21 First version # @author Steve Wheeler # ############################################################################## PROGNAME="${0##*/}" . ./shmark.sh echo >&2 "DEBUG: ${PROGNAME}: running..." shmark "$@" # call the main function (all other functions are private)
Exclude .cask and .osx from boostrap.
#!/bin/bash #cd "$(dirname "${BASH_SOURCE}")" #git pull function doIt() { rsync --exclude "init" --exclude ".git/" --exclude ".DS_Store" --exclude "bootstrap.sh" --exclude "Brewfile" --exclude "README.md" -av . ~ } if [ "${1}" == "--force" -o "${1}" == "-f" ]; then doIt else read -p "This may overwrite existing files in your home directory. Are you sure? (y/n) " -n 1 echo if [[ ${REPLY} =~ ^[Yy]$ ]]; then doIt fi fi unset doIt source ~/.bash_profile
#!/bin/bash #cd "$(dirname "${BASH_SOURCE}")" #git pull function doIt() { rsync --exclude "init" --exclude ".git/" --exclude ".DS_Store" --exclude "bootstrap.sh" --exclude "Brewfile" --exclude ".cask" --exclude ".osx" --exclude "README.md" -av . ~ } if [ "${1}" == "--force" -o "${1}" == "-f" ]; then doIt else read -p "This may overwrite existing files in your home directory. Are you sure? (y/n) " -n 1 echo if [[ ${REPLY} =~ ^[Yy]$ ]]; then doIt fi fi unset doIt source ~/.bash_profile
Add setup for encrypted volume
sudo apt-get -y install cryptsetup sudo dd if=/dev/urandom of=/root/tinfoil bs=1M count=256 sudo cryptsetup -y -h sha256 luksFormat /root/tinfoil sudo cryptsetup luksOpen /root/tinfoil tinfoil sudo mkfs.ext4 -j /dev/mapper/ sudo mkdir /mnt/tinfoil sudo mount /dev/mapper/tinfoil /mnt/tinfoil sudo umount /mnt/tinfoil sudo cryptsetup luksClose tinfoil
Add in autoenv from @kennethreitz
#!/usr/bin/env bash echo "hi from autoenv" if [[ -n "${ZSH_VERSION}" ]] then __array_offset=0 else __array_offset=1 fi autoenv_init() { typeset target home _file typeset -a _files target=$1 home="$(dirname $HOME)" _files=( $( while [[ "$PWD" != "/" && "$PWD" != "$home" ]] do _file="$PWD/.env" if [[ -e "${_file}" ]] then echo "${_file}" fi builtin cd .. done ) ) _file=${#_files[@]} while (( _file > 0 )) do source "${_files[_file-__array_offset]}" : $(( _file -= 1 )) done } cd() { if builtin cd "$@" then echo "running autoenv_init" autoenv_init return 0 else echo "else?" return $? fi } echo "cd has been replaced"
Add test for properly exported branches with conflicts.
rm -rf master branch1 branch2 gitrepo darcs init --repo master cd master echo a > a && darcs add a echo 'before merges' > b && darcs add b darcs rec -am 'Initial state' cd .. darcs get master branch1 darcs get master branch2 cd branch1 echo branch1 > b && darcs rec -am 'Add b branch1' darcs tag -m 'darcs-fastconvert merge pre-source: foo_merge_id' cd ../branch2 echo branch2 > b && darcs rec -am 'Add b branch2' darcs tag -m 'darcs-fastconvert merge pre-source: foo_merge_id' cd ../master echo master > b && darcs rec -am 'Add b master' darcs tag -m 'darcs-fastconvert merge pre-target: foo_merge_id' darcs pull -a ../branch1 darcs pull -a ../branch2 darcs rev -a echo 'master resolution' > b && darcs rec -am 'Resolve b conflict in master' darcs tag -m 'darcs-fastconvert merge post: foo_merge_id' cd .. git init gitrepo darcs-fastconvert export master branch1 branch2 | (cd gitrepo && git fast-import) cd gitrepo git reset --hard [[ $(head b) == 'master resolution' ]] git checkout branch1 [[ $(head b) == 'branch1' ]] git checkout branch2 [[ $(head b) == 'branch2' ]] git checkout master~1 [[ $(head b) == 'master' ]] git checkout master~2 [[ $(head b) == 'before merges' ]]
Add bash script to populate sqlite db with TGRC and UniProt data.
#!/bin/bash # # Populate db with TGRC tomato mutants (phenotypes) and (alternative) gene names from UniProt. # # Prerequisites: # Download TGRC data at http://tgrc.ucdavis.edu/Data/Acc/Genes.aspx # Post-process downloaded 'data.csv' file. # dos2unix data.csv # cut -f 1,3,6,7 -d ',' data.csv | sort -u # columns: Gene, Locus Name, Phenoptype, Chromosome # # Download tomato reference proteome in RDF/XML from UniProt # http://www.uniprot.org/proteomes/UP000004994 # Import the graph into RDF store & query # DB=tgrc-uniprot.db rm -f $DB sqlite3 $DB -init create_db.sql '.q' sqlite3 $DB -init tgrc-uniprot_query.sql '.q'
Add utility script for setting up test environment
#!/bin/sh export OPENSHIFT_USER=developer export OPENSHIFT_TOKEN=`oc config view -o jsonpath='{.users[?(@.name == "developer/localhost:8443")].user.token}'` export OPENSHIFT_URL=https://localhost:8443 export OPENSHIFT_NAMESPACE=myproject