blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
34d4c339d0ac031b19ffc0ad2be34bf8c99d5d21 | Shell | methodus/RaspberryMatic | /buildroot-external/overlay/base-raspmatic/bin/createBackup.sh | UTF-8 | 2,056 | 3.765625 | 4 | [
"Apache-2.0",
"GPL-2.0-only",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/sh
#
# simple wrapper script to generate a CCU compatible sbk file
#
# Copyright (c) 2016-2018 Jens Maus <mail@jens-maus.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Usage:
# createBackup.sh <directory>
#
BACKUPDIR=/usr/local/tmp
source /VERSION 2>/dev/null
BACKUPFILE="$(hostname)-${VERSION}-$(date +%Y-%m-%d-%H%M).sbk"
if [[ $# -eq 1 ]]
then
BACKUPDIR=$1
else
while [[ $# -gt 0 ]]
do
key="$1"
case $key in
-f|--file)
BACKUPFILE="$2"
shift # past argument
shift # past value
;;
-o|--output)
BACKUPDIR="$2"
shift # past argument
shift # past value
;;
*) # unknown option
shift # past argument
;;
esac
done
fi
# make sure BACKUPDIR exists
TMPDIR=$(mktemp -d -p ${BACKUPDIR})
# make sure ReGaHSS saves its current settings
echo 'load tclrega.so; rega system.Save()' | tclsh 2>&1 >/dev/null
# create a gzipped tar of /usr/local
tar --owner=root --group=root --exclude=/usr/local/tmp --exclude=/usr/local/lost+found --exclude=${BACKUPDIR} --exclude-tag=.nobackup --one-file-system --ignore-failed-read -czf ${TMPDIR}/usr_local.tar.gz /usr/local 2>/dev/null
# sign the configuration with the current key
crypttool -s -t 1 <${TMPDIR}/usr_local.tar.gz >${TMPDIR}/signature
# store the current key index
crypttool -g -t 1 >${TMPDIR}/key_index
# store the firmware VERSION
cp /VERSION ${TMPDIR}/firmware_version
# create sbk file
tar -C ${TMPDIR} --owner=root --group=root -cf ${BACKUPDIR}/${BACKUPFILE} usr_local.tar.gz signature key_index firmware_version 2>/dev/null
# remove all temp files
rm -rf ${TMPDIR}
| true |
627600f9f9f0eeb0f2e2bdc59ce163da392f4f9d | Shell | sms0070/dotfiles | /scripts/co-creds | UTF-8 | 1,168 | 3.65625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
function main {
echo "NOTE: script uses cf cli. Please ensure you are logged into right CPI system! "
check_deps
if [[ -z $CO_APP_NAME ]]; then
coAppName="it-co"
else
coAppName="$CO_APP_NAME"
fi
echo "-Getting co app guid for coAppName: $coAppName..."
coAppGuid=$(cf app $coAppName --guid) || die "Can't get guid for $coAppName"
echo "coAppGuid: $coAppGuid"
echo "-Getting co app env..."
coEnv=$(cf curl /v3/apps/$coAppGuid/env)
# echo "-coEnv: $coEnv"
clientCreds=$(echo $coEnv | jq -r '.system_env_json.VCAP_SERVICES.xsuaa[] | select(.instance_name == "it-uaa") | .credentials')
echo "ClientCreds: $clientCreds"
clientid=$(echo $clientCreds | jq -r '.clientid')
echo "-clientid: $clientid"
clientsecret=$(echo $clientCreds | jq -r '.clientsecret')
echo "-clientsecret: $clientsecret"
}
function check_deps {
deps=(cf curl jq)
for dep in "${deps[@]}"; do
installed "${dep}" || die "Missing '${dep}'"
done
echo "-check_deps OK."
}
function installed {
cmd=$(command -v "${1}")
[[ -n "${cmd}" ]] && [[ -f "${cmd}" ]]
return ${?}
}
function die {
>&2 echo "Fatal: ${@}"
exit 1
}
main
| true |
4877fe178d8d37c2344acd685e8a8336ac29f612 | Shell | atnartur/docker-image | /docker-install.sh | UTF-8 | 640 | 2.578125 | 3 | [] | no_license | # Comands from Docker official instruction
# https://docs.docker.com/engine/installation/linux/debian/
apt-get update &&
apt-get install \
apt-transport-https \
ca-certificates \
curl \
gnupg2 \
software-properties-common -y &&
curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - &&
apt-key fingerprint 0EBFCD88 &&
add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/debian \
$(lsb_release -cs) \
stable" &&
apt-get update &&
apt-get install docker-ce -y &&
docker -v &&
echo "" &&
echo "add current user to docker group: sudo usermod -a -G docker USERNAME"
| true |
1aa387d66a4341a3723502b3b58e5350c26819b0 | Shell | jrwren/lvmsnaprot | /lvmsnaprotate.bash | UTF-8 | 4,549 | 3.875 | 4 | [] | no_license | #!/bin/bash
#
# LVM snapshot rotate script
# Jay R. Wren <jwren@arbor.net>
#
# forked from mongolvmsnapback by
#
# Jean-Francois Theroux <failshell@gmail.com>
#
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
: ${ARCHIVE_DIR:=/archives}
: ${VG:=$(vgs --noheadings | awk '{print $1}')}
: ${LVS:=$(lvs --noheadings | awk '$3!~/^s/{print $1}')}
# archiving settings
DOW=`date +%w`
DOM=`date +%d`
# TODO: support multiple daily, weekly, monthly
#KEEP_DAILY=7
#KEEP_WEEKLY=4
#KEEP_MONTHLY=1
# functions
get_status () {
if [ $? == 0 ]
then
echo OK
else
[[ $1 == first ]] && { echo assuming first run ; return 0; }
echo ERROR
echo
echo [!] Something fatal happened. Aborting.
# Unmount if we're mounted
echo
echo -n "[-] Is ${ARCHIVE_DIR}/daily mounted: "
mount | grep ${ARCHIVE_DIR} > /dev/null 2>&1
if [ $? == 0 ]
then
echo Yes
echo -n "[-] Unmounting ${ARCHIVE_DIR}: "
cd / && umount ${ARCHIVE_DIR}
if [ $? == 0 ]
then
echo OK
else
echo ERROR
fi
else
echo No
fi
# If we have a leftover snapshot, remove it
echo
echo -n "[-] Do we have a leftover snapshot: "
if [ -L ${VOLUME}-snapshot ]
then
echo Yes
echo -n "[-] Removing snapshot: "
lvremove -f ${VOLUME}-snapshot > /dev/null 2>&1
if [ $? == 0 ]
then
echo OK
else
echo ERROR
fi
else
echo No
fi
echo
echo [-] `date`
echo [+] LVM snapshot rotate status: failure
exit 1
fi
}
echo [+] Starting LVM snapshot rotate
echo [-] `date`
echo
if [ -z "$VG" ]
then
echo "[!] Volume group is not set. Aborting."
exit 1
fi
[ -x /sbin/lvcreate ] || { echo "ERROR: Missing LVM tools. Aborting."; exit 1; }
for LV in $LVS; do
[ -d ${ARCHIVE_DIR}/daily/$LV ] || { echo "[!] Missing ${ARCHIVE_DIR}/daily/$LV. Creating it." && mkdir -p ${ARCHIVE_DIR}/daily/$LV; }
[ -d ${ARCHIVE_DIR}/weekly/$LV ] || { echo "[!] Missing ${ARCHIVE_DIR}/weekly/$LV. Creating it." && mkdir -p ${ARCHIVE_DIR}/weekly/$LV; }
[ -d ${ARCHIVE_DIR}/monthly/$LV ] || { echo "[!] Missing ${ARCHIVE_DIR}/monthly/$LV. Creating it." && mkdir -p ${ARCHIVE_DIR}/monthly/$LV && echo; }
if [ -z "$LV" ]
then
echo "[!] Logical volume is not set. Aborting."
exit 1
fi
echo [+] Pre flight tests complete
echo
VOLUME=/dev/${VG}/${LV}
# unmounting snapshot
echo -n "[-] Unmounting snapshot: "
umount ${ARCHIVE_DIR}/daily/$LV
get_status first
# remove snapshot
echo -n "[-] Removing snapshot: "
lvremove -f ${VOLUME}-snapshot >/dev/null
get_status first
# LVM snapshot
echo -n "[-] Creating LVM snapshot of ${VOLUME} as ${VOLUME}-snapshot: "
lvcreate -s "/dev/${VG}/${LV}" -n "${LV}-snapshot" -l "2%ORIGIN" -p r >/dev/null 2>&1
get_status
# mounting snapshot
echo -n "[-] Mounting snapshot under ${ARCHIVE_DIR}: "
mount ${VOLUME}-snapshot ${ARCHIVE_DIR}/daily/$LV
get_status
# Archiving
echo
echo [+] Archiving phase
# weekly
if [ ${DOW} == 0 ]
then
echo -n "[-] We're the first day of the week, archiving: "
umount $ARCHIVE_DIR/weekly/$LV
lvremove -f ${VOLUME}-snapshot-weekly >/dev/null
lvcreate -s "/dev/${VG}/${LV}" -n "${LV}-snapshot-weekly" -l "2%ORIGIN" -p r >/dev/null 2>&1
mount $VOLUME-snapshot-weekly $ARCHIVE_DIR/weekly/$LV
get_status
else
echo [-] Not the first day of the week. Skipping weekly archiving.
if [[ ! -b /dev/${VG}/${LV}-snapshot-weekly ]];then
echo -n "[-] We're the first run, weekly archiving: "
lvremove -f ${VOLUME}-snapshot-weekly >/dev/null
lvcreate -s "/dev/${VG}/${LV}" -n "${LV}-snapshot-weekly" -l "2%ORIGIN" -p r >/dev/null 2>&1
mount $VOLUME-snapshot-weekly $ARCHIVE_DIR/weekly/$LV
get_status
fi
fi
# monthly
if [ ${DOM} == 01 ]
then
echo -n "[-] We're the first day of the month, archiving: "
umount $ARCHIVE_DIR/monthly/$LV
lvremove -f ${VOLUME}-snapshot-monthly >/dev/null
lvcreate -s "/dev/${VG}/${LV}" -n "${LV}-snapshot-monthly" -l "2%ORIGIN" -p r >/dev/null 2>&1
mount $VOLUME-snapshot-monthly $ARCHIVE_DIR/monthly/$LV
get_status
else
echo [-] Not the first day of the month. Skipping monthly archiving.
if [[ ! -b /dev/${VG}/${LV}-snapshot-monthly ]];then
echo -n "[-] We're the first run, monthly archiving: "
lvremove -f ${VOLUME}-snapshot-monthly >/dev/null
lvcreate -s "/dev/${VG}/${LV}" -n "${LV}-snapshot-monthly" -l "2%ORIGIN" -p r >/dev/null 2>&1
mount $VOLUME-snapshot-monthly $ARCHIVE_DIR/monthly/$LV
get_status
fi
fi
done
echo
echo [-] `date`
echo [+] LVM snapshot rotate status: success
| true |
8525d9551f262e00bd37a6dcf9667b7da52d9ffb | Shell | shilpa-jassal/LeadDashboard1 | /backend/django_react/env3/lib/python3.6/site-packages/tests/runtime_20_secrets.bats | UTF-8 | 468 | 2.921875 | 3 | [
"Apache-2.0"
] | permissive | load test_helper
export TEST_PASSWORD="mummy_unbaked_tabby_thespian"
setup() {
run $PSEC environments create --clone-from secrets 1>&2
}
teardown() {
rm -rf /tmp/.secrets/bats
}
@test "'psec secrets set jenkins_admin_password=$TEST_PASSWORD' sets variable properly" {
run $PSEC secrets set jenkins_admin_password=$TEST_PASSWORD
run $PSEC secrets show jenkins_admin_password --no-redact -f csv
assert_output --partial "$TEST_PASSWORD"
}
# vim: set ts=4 sw=4 tw=0 et :
| true |
a1c1c77ee45b9cf59becc539a15c37ebee298ec0 | Shell | bhawk123/identidock | /dock-build-tag-push.sh | UTF-8 | 512 | 3.390625 | 3 | [] | no_license | #!/bin/bash
echo "Docker Build, Tag, Push"
account="bhawk123"
repository="identidock"
if [[ $# -eq 0 ]] ; then
echo 'Missing Version'
exit 1
fi
echo "Building image"
docker build --no-cache -t ${account}/${repository}:${1} .
echo "Tagging image"
docker tag "identidock" "${account}/${repository}:${1}"
docker tag "identidock" "${account}/${repository}:newest"
echo "NOT Pushing image to Docker"
docker push "${account}/${repository}:${1}"
docker push "${account}/${repository}:newest"
echo "DONE"
| true |
d618a229dad221ccd8c429a7765a4e0f53659cdb | Shell | luzuojin/rssh | /rssh | UTF-8 | 410 | 3.3125 | 3 | [] | no_license | #!/bin/sh
dir=$(readlink $0)
if [ x$dir == x ] ; then
dir=$(cd $(dirname "$1") && pwd -P)
else
#startswith
if [[ $dir != /* ]] ; then
dir=$(cd $(dirname "$1") && pwd -P)/$dir
fi
dir=$(dirname $dir)
fi
if [ ! -z $1 ] && [ $1 == 'install' ] ; then
ln -s $dir/rssh /usr/local/bin/rssh
#elif [ $(python $dir/rssh.py test $1) != 'OK' ] ; then
# expect $dir/rssh.ex "$@"
else
python $dir/rssh.py "$@"
fi
| true |
db31b3893e09f9ed5130f9a1566288367e5ff312 | Shell | ingewortel/negative-selection-2020 | /figures/shared-scripts/check-setup.sh | UTF-8 | 5,009 | 3.6875 | 4 | [
"MIT"
] | permissive |
# Depending on system, check if basic tools are there.
system=$(uname -s)
if [ $system == "Darwin" ] ; then
# This will open a prompt to install xcode CLT if it is not there, or otherwise
# this will do nothing.
xcode-select --install &> /dev/null
echo "** Xcode CLT : OK "
elif [ $system == "Linux" ] ; then
checkEssentials=$( locate build-essential | wc -l )
if [ $checkEssentials -eq 0 ] ; then
echo "ERROR - Your computer does not appear to have build-essentials installed! Please install before continuing."
echo ""
echo " Try:"
echo " sudo apt-get install build-essential"
echo " or try the equivalent for your package manager."
exit 1
else
echo "** build-essentials : OK "
fi
else
echo "WARNING: Unknown system. Please ensure that you have basic command line tools (C compiler, make, ...) before continuing."
fi
# check latex
checkLatex=$(command -v pdflatex | wc -l )
checkLatexmk=$(command -v latexmk | wc -l )
if [[ $checkLatex == 0 || $checkLatexmk == 0 ]] ; then \
echo "ERROR - Your computer does not appear to have latex (or all required packages) installed!"
echo " Please install before continuing, and ensure you have the commands 'pdflatex', 'latexmk', and latex packages such as tikz installed."
echo ""
echo " On linux, try:"
echo " sudo apt-get install texlive"
echo " sudo apt-get install texlive-latex-extra"
echo " sudo apt-get install latexmk"
echo " On Mac OS X, try:"
echo " brew cask install mactex"
exit 1
else
echo "** Latex-etc : OK "
fi
# check bc on linux (mac should have it)
checkBc=$(command -v bc | wc -l )
if [ $checkBc != 1 ] ; then \
echo "ERROR - Your computer does not appear to have bc installed! Please install before continuing."
echo ""
echo " On linux, try:"
echo " sudo apt-get install bc"
echo " On Mac OS X, you should have bc by default. Please Google to find out what's wrong."
exit 1
else
echo "** bc : OK "
fi
# check openfst
checkOpenfst=$(command -v fstdifference | wc -l)
if [ $checkOpenfst != 1 ] ; then \
echo "ERROR - Your computer does not appear to have OpenFST installed! Please install before continuing."
echo ""
echo " On linux, try:"
echo " sudo apt-get install libfst-dev"
echo " sudo apt-get install libfst-tools"
echo " On Mac OS X, try:"
echo " brew install openfst"
echo " or visit http://www.openfst.org"
exit 1
else
echo "** OpenFST : OK "
fi
# check graphviz
checkGraphviz=$(command -v neato | wc -l)
if [ $checkGraphviz != 1 ] ; then \
echo "ERROR - Your computer does not appear to have Graphviz installed! Please install before continuing."
echo ""
echo " On linux, try:"
echo " sudo apt-get install graphviz"
echo " On Mac OS X, try:"
echo " brew install graphviz"
echo " or visit https://www.graphviz.org/download/"
exit 1
else
echo "** Graphviz : OK "
fi
# check rsvg-convert
checkRsvg=$(command -v rsvg-convert | wc -l)
if [ $checkRsvg != 1 ] ; then \
echo "ERROR - Your computer does not appear to have rsvg-convert installed! Please install before continuing."
echo ""
echo " On linux, try:"
echo " sudo apt-get install librsvg2-bin"
echo " On Mac OS X, try:"
echo " brew install librsvg"
echo " or Google how to install librsvg on your system."
exit 1
else
echo "** librsvg : OK "
fi
# Check if R is installed
Rinstall=$(command -v R | wc -l)
if [ $Rinstall != 1 ] ; then \
echo "ERROR - Your computer does not appear to have R installed! Please install R before continuing."
echo ""
echo " On linux, try:"
echo " sudo apt-get install R"
echo " On Mac OS X, try:"
echo " brew install R"
echo " or visit https://cloud.r-project.org/"
exit 1
else
echo "** R : OK "
fi
# Check if python 3 is installed
pythonCheck=$(command -v python3 | wc -l )
if [ $pythonCheck != 1 ] ; then \
echo "ERROR - Your computer does not appear to have python3 installed! Please install python3 before continuing."
echo ""
echo " On linux, try:"
echo " sudo apt-get install python3.6"
echo " On Mac OS X, try:"
echo " brew install python3"
echo " or visit https://www.python.org/downloads/"
exit 1
else
echo "** python3 : OK "
fi
# install python packages
checkPythonPackages=$(python3 -m pip list)
checkNumpy=$(echo $checkPythonPackages | grep numpy | wc -l)
if [ $checkNumpy != 1 ] ; then \
echo "You don't have the python3 Numpy packages. Do you wish to install it?"
select yn in "Yes" "No"; do
case $yn in
Yes ) python3 -m pip install numpy; break;;
No ) echo "ERROR - Please install numpy before continuing." ; exit;;
esac
done
else
echo "** - Numpy : OK "
fi
checkNetworkx=$(echo $checkPythonPackages | grep networkx | wc -l)
if [ $checkNetworkx != 1 ] ; then \
echo "You don't have the python3 networkx packages. Do you wish to install it?"
select yn in "Yes" "No"; do
case $yn in
Yes ) python3 -m pip install networkx; break;;
No ) echo "ERROR - Please install networkx before continuing." ;exit;;
esac
done
else
echo "** - Networkx : OK "
fi
echo "Setup OK!"
| true |
1cac3ba8f4dfd25dc7eb9d6f96ba5b28209d4e98 | Shell | petobens/dotfiles | /arch/bin/pinentry-wrapper | UTF-8 | 291 | 3.140625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Note: set the proper theme (adwaita) from the qt5ct app
cmd="QT_QPA_PLATFORMTHEME=qt5ct /usr/bin/pinentry-qt"
if [[ -n "$PINENTRY_USER_DATA" ]]; then
# Font aware launcher will set the correct value of the env variable
cmd="$PINENTRY_USER_DATA $cmd"
fi
eval "$cmd $@"
| true |
615600ac7183e34558569856c61b2a36de73d405 | Shell | pingcap/tidb | /dumpling/tests/_utils/file_should_exist | UTF-8 | 154 | 2.78125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/sh
#
# Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0.
set -eu
if [ ! -f "$1" ]; then
echo "[$(date)] File $1 not found." && exit 1
fi
| true |
29f3f2481e0187709dca382b5ccd78a141e4a8be | Shell | 844196/kraftwerk | /lib/library_sasairc.sh | UTF-8 | 798 | 3.34375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
#
# Author:
# sasairc (@sasairc2)
#
# License
# MIT
#
function _sleep() {
if $(which sleepenh > /dev/null 2>&1); then
SLEEP='sleepenh'
elif $(which usleep > /dev/null 2>&1); then
SLEEP='usleep'
else
SLEEP='sleep'
fi
${SLEEP} ${1} > /dev/null
}
function _margin_width() {
local cols=$(tput cols)
local width=$(expr \( ${cols} - ${1} \) / 2)
MARGIN_W=$(
for i in `seq 1 ${width}`; do
echo -n " "
done
)
}
function _margin_height() {
local lines=$(tput lines)
local height=$(expr \( ${lines} - ${1} \) / 2)
MARGIN_H=$(
for i in `seq 1 ${height}`; do
echo " "
done
)
}
function _initscr() {
clear
trap 'tput cnorm; exit 1' SIGINT
trap 'tput cnorm; exit 0' EXIT
tput civis
}
function _endscr() {
clear
tput cnorm
}
| true |
05a3a6e6008d4a6c6155375d10ca6e7a8c423c25 | Shell | NikeNano/pipelines | /components/release-in-place.sh | UTF-8 | 2,417 | 3.421875 | 3 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"BSD-2-Clause"
] | permissive | #!/bin/bash
#
# Copyright 2020 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script automated the process to update the component images.
# To run it, find a good release candidate commit SHA from ml-pipeline-test project,
# and provide a full github COMMIT SHA to the script. E.g.
# ./update-for-release.sh 2118baf752d3d30a8e43141165e13573b20d85b8
# The script copies the images from test to prod, and update the local code.
set -xe
images=(
"ml-pipeline-kubeflow-deployer"
"ml-pipeline-kubeflow-tf-trainer"
"ml-pipeline-kubeflow-tf-trainer-gpu"
"ml-pipeline-kubeflow-tfjob"
"ml-pipeline-local-confusion-matrix"
"ml-pipeline-local-roc"
)
TAG_NAME=$1
FROM_GCR_PREFIX='gcr.io/ml-pipeline-test/'
TO_GCR_PREFIX='gcr.io/ml-pipeline/'
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null && pwd)"
if [ -z "$TAG_NAME" ]; then
echo "Usage: release.sh <tag-name>" >&2
exit 1
fi
# KFP repo root
pushd "$DIR/.."
# Update setup.py VERSION
sed -i.bak -e "s|VERSION =.\+'|VERSION = '${TAG_NAME}'|g" "components/gcp/container/component_sdk/python/setup.py"
# Updating components and samples.
BATCH_UPDATE=()
for image in "${images[@]}"
do
TARGET_IMAGE_BASE=${TO_GCR_PREFIX}${image}
TARGET_IMAGE=${TARGET_IMAGE_BASE}:${TAG_NAME}
BATCH_UPDATE+=("-e" "s|${TARGET_IMAGE_BASE}:\([a-zA-Z0-9_.-]\)\+|${TARGET_IMAGE}|g")
# Update the code
done
find components samples -type f | while read file; do
sed -i "${BATCH_UPDATE[@]}" "$file"
done
BATCH_UPDATE=()
echo "${BATCH_UPDATE[@]}"
# Updating the samples to use the updated components
COMPONENTS=($(git diff --name-only))
for component_file in ${COMPONENTS[@]}
do
echo $component_file
BATCH_UPDATE+=("-e" "s|(https://raw.githubusercontent.com/kubeflow/pipelines/)[^/]+(/$component_file)|\1${TAG_NAME}\2|g")
done
find components samples -type f | while read file; do
sed -i -r "${BATCH_UPDATE[@]}" "$file";
done
popd
| true |
64f678d7ac0916ceadd21f792c6eeca7e804efc7 | Shell | pavelpy/dotfiles-1 | /bash/.config/bash/plugins/pyenv.bash | UTF-8 | 302 | 3.1875 | 3 | [] | no_license | #!/usr/bin/env bash
export PYENV_ROOT="$HOME/.pyenv"
pathmunge "$PYENV_ROOT/bin"
if _command_exists pyenv; then
eval "$(pyenv init - bash)"
fi
# Load pyenv virtualenv if the virtualenv plugin is installed.
if pyenv virtualenv-init - &> /dev/null; then
eval "$(pyenv virtualenv-init - bash)"
fi
| true |
a2f9a4c88a1e6f6c680f061bfbb6e851226779b0 | Shell | k1ck3r/vim | /pkg/grc-1.11.3/contrib/wrap-ccache | UTF-8 | 502 | 3.3125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/sh
# by Alexey Galakhov
wd="`dirname $0`"
ccache="`which ccache`"
grc="`which grc`"
grconf="conf.gcc"
if [ "`basename $1`" = "configure" ]; then
ccache=""
grconf="conf.configure"
fi
gropts=""
if perl -e 'use POSIX "isatty"; exit !isatty(1);'; then
gropts="$gropts --stdout"
fi
if perl -e 'use POSIX "isatty"; exit !isatty(2);'; then
gropts="$gropts --stderr"
fi
if [ ! -z "$grc" -a ! -z "$gropts" ]; then
grc="$grc -s -e -c $wd/$grconf"
else
grc=""
fi
exec $grc $ccache "$@"
| true |
807e75934ae15922a63971fd1b32f4d446d6c32e | Shell | enarciso/chef-ci-tools | /bin/chef-foodcritic-publisher.sh | UTF-8 | 1,282 | 3.703125 | 4 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
# Do not fail to parse cookbooks because of the encoding
export LC_CTYPE=en_US.UTF-8
if [ ! -d cookbooks ]; then
echo 'This script must be run from the root of the chef repository'
exit 1
fi
if [ ! -d junit_reports ]; then
mkdir -p junit_reports
fi
rm junit_reports/foodcritic-*.xml 2>/dev/null
PATH=${HOME}/bin:${PATH}
FOODCRITIC=${FOODCRITIC:-foodcritic}
if [ -s ${HOME}/.rvm/scripts/rvm ]
then
. "$HOME/.rvm/scripts/rvm" # Load RVM into a shell session *as a function*
fi
if [ -z `git diff --name-only ${1} ${2} | awk '$1 ~ /^cookbooks/' | sed -e 's/cookbooks\///' | awk -F '[/]' '{print $1}' | uniq` ]; then
cat > junit_reports/foodcritic-dummy.xml <<EOF
<?xml version="1.0" encoding="UTF-8"?>
<testsuites>
<testsuite name="dummy" timestamp="">
<testcase classname="NO_TEST" name="dummy">
</testcase>
</testsuite>
</testsuites>
EOF
exit 0
fi
`git checkout ${2}`
# Gets the cookbook names from the git diff
for cbname in `git diff --name-only ${1} ${2} | awk '$1 ~ /^cookbooks/' | sed -e 's/cookbooks\///' | awk -F '[/]' '{print $1}' | uniq`; do
echo "------ foodcritic checks: $cbname ------"
$FOODCRITIC cookbooks/$cbname | chef-ci-tools/bin/foodcritic2junit.pl --suite $cbname --out junit_reports/foodcritic-$cbname.xml
done
| true |
a4b76e60bd53cfc6d5a1c2f6b4f13930cd26f52f | Shell | danielprofili/quantum-chemistry-neuralnetwork | /quantum_chemistry/scripts/min_energy.sh | UTF-8 | 1,352 | 3.703125 | 4 | [] | no_license | #!/bin/bash
# min_energy
# Finds the minimum energy in all input files.
#
# 25 January 2019
#home=$(pwd)
help_msg="USAGE: ./min_energy.sh RESULTS_DIR"
[ ! -z $1 ] && results_dir=$1 || { echo $help_msg; exit 1; }
if [ -f $results_dir/min_energy ]; then
cat $results_dir/min_energy
else
#cd subs
min=0
for f in *
do
# f is the folder name for the molecule output
energy=$(grep "Total Energy" ${f}/${f}.out | grep -E -o "\-[0-9]+\.[0-9]+")
#echo $energy
# checks if the energy is empty
flag=$(echo $energy | wc -w)
#echo $flag
#echo ${#flag}
if [ $flag -ge 1 ]; then
#echo "flag passed"
comp=$(python ${home}/floating_comp.py ${min} ${energy})
#if (( $(echo "$min > $energy" | bc -l) )); then
#echo $comp
if [ $comp -eq 1 ]; then
#if [ ${energy} > ${min} ]; then
min=$energy
#echo "new min=$energy"
#sleep 1
fi
fi
done
#cd $home
echo $min > min_energy
echo $min
fi
| true |
a2efaf811f39b3a505b48d358961a1eee05a9b4b | Shell | Malaber/dotfiles | /apply.sh | UTF-8 | 1,042 | 2.921875 | 3 | [] | no_license | #!/bin/bash
SCRIPTPATH=$(readlink -f "$0")
#echo $SCRIPTPATH
BASEDIR=$(dirname "$SCRIPTPATH")
#echo $BASEDIR
sudo pacman -S --needed $(cat $BASEDIR/packages_slim)
yay -S --needed $(cat $BASEDIR/packages)
ln -sf $BASEDIR/.bashrc ~/
ln -sf $BASEDIR/.profile ~/
ln -sf $BASEDIR/.aliases ~/
ln -sf $BASEDIR/.tmux.conf.local ~/
ln -sf $BASEDIR/.zshrc ~/
ln -sf $BASEDIR/.config/plasma-workspace/env/askpass.sh ~/.config/plasma-workspace/env/askpass.sh
git config --global core.excludesfile "$BASEDIR/.global_gitignore"
git config --global user.useConfigOnly true
git config --global alias.pushall '!git remote | xargs -L1 git push'
git config --global alias.oldest-ancestor '!bash -c '\''diff --old-line-format='' --new-line-format='' <(git rev-list --first-parent "${1:-master}") <(git rev-list --first-parent "${2:-HEAD}") | head -1'\'' -' # https://stackoverflow.com/a/4991675/10559526
sudo cp -f $BASEDIR/dschaedler.zsh-theme /usr/share/oh-my-zsh/themes/dschaedler.zsh-theme
echo "\nAPPLY\n" >> $BASEDIR/LastRun
date >> $BASEDIR/LastRun
| true |
ad3855f05d492915b619451a1461989c157ecab8 | Shell | PacoPacov/DyrjavenIzpit | /Operating Systems/bash_tasks/09_2015.sh | UTF-8 | 354 | 3.140625 | 3 | [] | no_license | #!/bin/bash
# input 12 34 56
var=1
for i in 4 3 2 1
do for j
do if test $i -gt $#
then var=` expr $var \* $i `
echo $var $j >> ff
else continue
fi
done
done
while true
do echo $*
break
done
# input 5 6
read k1 k2
while cat ff | grep $k2
do set $k1 $var
shift
echo $2
grep $i ff
exit
echo $1
done
wc -l < ff
echo END
| true |
4ff7172bb86452090a2db216e73cb58e6069408d | Shell | h2rd/.zsh | /aliases.zsh | UTF-8 | 617 | 2.765625 | 3 | [] | no_license | # Push and pop directories on directory stack
alias pu='pushd'
alias po='popd'
# Basic directory operations
alias ...='cd ../..'
alias -- -='cd -'
# Super user
alias _='sudo'
# Show history
alias history='fc -l 1'
# List direcory contents
alias lsa='ls -lah'
alias l='ls -la'
alias ll='ls -l'
alias sl=ls # often screw this up
alias afind='ack-grep -il'
alias s='svn'
alias st='s st'
alias v='vagrant'
alias g='git'
alias clone='g clone'
alias rc='. ~/.zshrc; echo "ZSH reloaded"'
alias server='python -m SimpleHTTPServer'
alias pp='pjson'
alias vbr='sudo /Library/StartupItems/VirtualBox/VirtualBox restart'
| true |
f9c6572d66bbc140af8d14622f60810de3028dad | Shell | vmaks/fenics-mixed | /doc/src/make_latex.sh | UTF-8 | 1,377 | 3.421875 | 3 | [] | no_license | #!/bin/sh
# Compile fenics-mixed.do.txt to PDF via pdflatex.
# Make one version for screen reading and one for printing on paper
# (the difference is the way links are handled).
doconce spellcheck -d .dict4spell.txt fenics-mixed.do.txt
if [ $? -ne 0 ]; then
echo "Abort due to misspellings!"
exit 1
fi
rm -rf tmp_stripped
# Make sure publish database is up-to-date if refs.bib is the
# file that is really maintained
rm -f publish_references.pub
publish import refs.bib <<EOF
1
1
EOF
# Version 1: use anslistings.sty (from the FEniCS book) to
# typeset code
doconce format pdflatex fenics-mixed --device=screen --skip_inline_comments
if [ $? -ne 0 ]; then echo 'could not compile'; exit; fi
# Fix *local* anslistings.sty to drop line numbers
doconce subst -m '^(numbers=.+)' '%\g<1>' anslistings.sty
# Turn .p.tex to .tex
doconce ptex2tex fenics-mixed envir=ans:nt -DTODONOTES #-DLATEX_HEADING=traditional
# Run ordinary pdflatex
pdflatex fenics-mixed
bibtex fenics-mixed
pdflatex fenics-mixed
pdflatex fenics-mixed
cp fenics-mixed.pdf ../pub
# Make a version for printing (links appear with URLs in footnotes)
doconce format pdflatex fenics-mixed --device=paper --skip_inline_comments #-DLATEX_HEADING=traditional
doconce ptex2tex fenics-mixed envir=ans:nt -DTODONOTES
pdflatex fenics-mixed
pdflatex fenics-mixed
cp fenics-mixed.pdf ../pub/fenics-mixed-4paper.pdf
| true |
280dd5c8307bf73d3bc236cf32973887c28a4a0c | Shell | smsilva/kubernetes | /lab/velero/01-configure-aks-blob-storage.sh | UTF-8 | 2,320 | 2.65625 | 3 | [] | no_license | AZURE_BACKUP_SUBSCRIPTION_NAME="Azure subscription"
AZURE_SUBSCRIPTION_ID=$(az account list --query="[?name=='${AZURE_BACKUP_SUBSCRIPTION_NAME?}'].id | [0]" -o tsv)
az account set -s ${AZURE_SUBSCRIPTION_ID?}
az group list --query '[].{ ResourceGroup: name, Location:location }'
AZURE_BACKUP_RESOURCE_GROUP="Velero_Backups"
AZURE_AKS_NODE_RESOURCE_GROUP=$(az aks show \
--name silvios-dev-eastus2 \
--resource-group silvios-dev-eastus2 \
--output tsv \
--query nodeResourceGroup)
az group create \
--name ${AZURE_BACKUP_RESOURCE_GROUP?} \
--location "eastus2"
AZURE_STORAGE_ACCOUNT_ID="velero$(uuidgen | cut -d '-' -f5 | tr '[A-Z]' '[a-z]')"
az storage account create \
--name ${AZURE_STORAGE_ACCOUNT_ID?} \
--resource-group ${AZURE_BACKUP_RESOURCE_GROUP?} \
--sku Standard_GRS \
--encryption-services blob \
--https-only true \
--kind BlobStorage \
--access-tier Hot
BLOB_CONTAINER="velero"
az storage container create \
--name ${BLOB_CONTAINER?} \
--public-access off \
--account-name ${AZURE_STORAGE_ACCOUNT_ID?}
AZURE_TENANT_ID=`az account list --query '[?isDefault].tenantId' -o tsv`
AZURE_CLIENT_SECRET=`az ad sp create-for-rbac \
--name "velero" \
--role "Contributor" \
--query 'password' -o tsv \
--scopes /subscriptions/${AZURE_SUBSCRIPTION_ID?} /subscriptions/${AZURE_SUBSCRIPTION_ID?}`
AZURE_CLIENT_ID=`az ad sp list --display-name "velero" --query '[0].appId' -o tsv`
cat << EOF > ./credentials-velero
AZURE_SUBSCRIPTION_ID=${AZURE_SUBSCRIPTION_ID}
AZURE_TENANT_ID=${AZURE_TENANT_ID}
AZURE_CLIENT_ID=${AZURE_CLIENT_ID}
AZURE_CLIENT_SECRET=${AZURE_CLIENT_SECRET}
AZURE_RESOURCE_GROUP=${AZURE_BACKUP_RESOURCE_GROUP}
AZURE_CLOUD_NAME=AzurePublicCloud
EOF
velero install \
--provider azure \
--plugins velero/velero-plugin-for-microsoft-azure:v1.2.0 \
--bucket ${BLOB_CONTAINER?} \
--secret-file ./credentials-velero \
--use-restic \
--backup-location-config resourceGroup=${AZURE_BACKUP_RESOURCE_GROUP?},storageAccount=${AZURE_STORAGE_ACCOUNT_ID?},subscriptionId=${AZURE_SUBSCRIPTION_ID?} \
--snapshot-location-config resourceGroup=${AZURE_BACKUP_RESOURCE_GROUP?},subscriptionId=${AZURE_SUBSCRIPTION_ID?}
kubectl \
--namespace velero \
wait deployment velero \
--for condition=Available \
--timeout 3600s
velero backup-location get
| true |
6cfff00e424c3e64d7fa58f901347ed4dca5f966 | Shell | delkyd/alfheim_linux-PKGBUILDS | /amanda/PKGBUILD | UTF-8 | 26,077 | 2.640625 | 3 | [] | no_license | # Maintainer: Chris Severance aur.severach aATt spamgourmet dott com
# Contributor: Phillip Smith <fukawi2@NO-SPAM.gmail.com>
# https://github.com/fukawi2/aur-packages
# The Basic and Remote configurations are included. Some folders have been changed to
# align with current methods.
# http://wiki.zmanda.com/index.php/GSWA/Build_a_Basic_Configuration
# Online guide for a single tape drive.
# http://www.resolveradiologic.com/blog/2017/01/16/how-to-set-up-amanda-to-backup-to-tape-in-ubuntu/
# Don't add your tape drive to the existing file. An upgrade will overwrite it and disappear.
# Add it to /etc/amanda/TestConfig/tapetypes and add an includefile to amanda.conf
# http://blog.learnadmin.com/2010/07/install-and-configure-amanda-backup.html
# More guides using the old fashioned configuration method
# http://fedoranews.org/ghenry/amanda/
# http://blog.secaserver.com/2012/09/centos-install-configure-amanda-backup-server/
# https://wiki.centos.org/HowTos/AmandaBackups
# https://www.novell.com/coolsolutions/feature/18050.html
# If your backup is not working, see if these demos work before reporting. You can also set some
# values back to legacy.
# If you want to run amanda as a different user and/or group, change these
# variables before running makepkg
# Do not use existing users or groups like daemon or nobody. They will be altered then removed.
# These can be changed between upgrades. You must recompile for the change to take effect.
# Changing while a backup is running will cause problems.
_amandauser='amandabackup' # formerly amanda, too likely to collide with a real Amanda
_amandagroup='amandabackup'
_amandauid='112'
_amandagid='112'
# Changing this requires a manual change to auth in amanda.conf
_opt_bsd='' # '' for none, 'tcp' or 'udp'. ssh is always available
# The am helper utilities assume that the home folder is the same on all computers
_amhome='/var/lib/amanda'
#_amhome='/var/amanda' # legacy
# Don't change any of the following settings between upgrades
_ametc='/etc/amanda' # changing this to /etc will destroy your /etc folder
#_amsecurity="${_ametc}/amanda-security.conf" # legacy, not compatible with amserverconfig
_amsecurity='/etc/amanda-security.conf'
_amdump='/dumps' # Always in /, or at least outside of any folder you would want to back up.
_amlibexec='/usr/lib'
#_amlibexec='/usr/libexec' # legacy, not liked by namcap, Arch does not use this folder
_amlog='/var/log/amanda'
# /var/cache/amanda is only mentioned in help files
### I AM ONLY THE PACKAGER, NOT THE DEVELOPER
### Please ask support questions about this software in one of:
### 1) The AUR comments; OR
### 2) Upstream forums/maillist etc; OR
### 3) The ArchLinux forums
### I do not always know enough about the software itself, or don't have the
### time to promptly respond to direct emails.
### If you have found a problem with the package/PKGBUILD (as opposed to
### the software) then please do email me or post an AUR comment.
set -u
pkgname='amanda'
#pkgver='3.3.9'
pkgver='3.5'
pkgrel='1'
pkgdesc='Advanced Maryland Automatic Network Disk Archiver network backup for Linux Windows clients, supports SSH, AES, GPG, encryption, tape, RAIT, mirror, changers, Amazon S3, ipv6, DVD, NDMP, VTL, advanced scripting'
arch=('i686' 'x86_64')
url='http://www.amanda.org'
license=('custom' 'GPL')
depends=('perl>=5.6.0' 'glib2>=2.2.0' 'gawk' 'tar>=1.12' 'gzip' 'curl>=7.10' 'openssl' 'openssh' 'cron' 'libarchive' 'readline' 'netcat' 'bash')
# perl-module-scandeps: cd pkg; scandeps.pl $(grep -lR '/usr/bin/perl' .) | sort -u | grep -v "^'Amanda::"
depends+=(
'perl-encode-locale'
'perl-io-socket-ssl'
'perl-json'
'perl-net-ssleay'
'perl-uri'
'perl-xml-namespacesupport'
'perl-xml-parser'
'perl-xml-sax'
'perl-xml-simple'
## from CentOS 3.3.9 install https://linoxide.com/tools/setup-centralized-backup-server-amanda-centos-7/
# I can't find any reference to the non builtins. Maybe for old versions.
#'perl-business-isbn'
#'perl-business-isbn-data'
##'perl-compress-raw-bzip2' # built in
##'perl-compress-raw-zlib' # built in
##'perl-data-dumper' # built in
##'perl-digest' # built in
##'perl-digest-md5' # built in
##'perl-file-listing' # built in
#'perl-html-parser'
#'perl-html-tagset'
#'perl-http-cookies'
#'perl-http-daemon'
#'perl-http-date'
#'perl-http-message'
#'perl-http-negotiate'
#?'perl-io-compress'
#'perl-io-html'
#?'perl-io-socket-ip'
#'perl-lwp-mediatypes'
#'perl-net-http'
#'perl-net-libidn'
#'perl-timedate'
#'perl-www-robotrules'
#'perl-xml-sax-base'
#'perl-libwww' # perl-libwww-perl
# From manual install
#'perl-extutils-embed' # developers only
)
optdepends=(
'sudo: run commands as amanda user'
'gnuplot: amplot pictures'
'dump: creating backups in dump format'
'samba: backing up Windows hosts'
'pigz: parallel gzip compression for dumptype'
'bzip2: better compression for dumptype'
'xz: better compression for dumptype'
'ncompress: LZW .Z compression'
'gnupg: amcrypt encryption'
'aespipe: amcrypt encryption'
'sharutils: uuencode for amcrypt encryption'
'mt-st-git: manual tape drive control for blocksize, testing, and restore' # some tape drive support is built in
'mtx-svn: advanced tape autoloader support' # some changer support is built in
'cups: printed reports'
'mailx: email notifications'
#'git: taper transfer' # does not seem to be current
#'svn: ' # lots of references but no action
'xfsdump: direct XFS backup with xfsdump xfsrestore' # This is why amanda needs to be in the disk group
'dump: direct EXT backup with dump restore' # see sendsize.c for the OS of various dump programs
)
# grep 'checking for' config.log | cut -d' ' -f2- | sort -u | grep -v '\.h$'
makedepends=('swig' 'grep' 'splint' 'gettext' 'sed') # swig is for developers only
backup=(
"${_amsecurity#/}"
"${_amhome#/}/.amandahosts"
"${_ametc#/}/MyConfig/amanda.conf" # Assume that many users will build from the demo configuration
"${_ametc#/}/MyConfig/disklist"
)
options=('!strip')
install="${pkgname}.install"
_tapetypes=('tapetypes.txt')
_verwatch=('http://www.amanda.org/download.php' '\([0-9\.]\+\)' 't')
_srcdir="${pkgname}-${pkgver}"
source=("https://prdownloads.sourceforge.net/amanda/amanda-${pkgver}.tar.gz" "xinetd.${pkgname}".{udp,tcp} "${_tapetypes[@]}")
sha256sums=('099eb36321b1360ebde6156fb1e75f3e0245520b6f886a0e8e0f31a7a6169be4'
'3db294c9d7c610e9c0d531dcc2725dbddf1213fad64f04bc7cf9b1b9c30e9803'
'46446a8dc4ee8ec39ed0a3e2636fb02a198565e8111abe8392c456da56a007ce'
'c368e7f9d6d1df703619476e0fcf06e841a7ec86a5a7b86dc499821fbb0a137e')
if [ ! -z "${_opt_bsd}" ]; then
depends+=('xinetd')
backup+=('etc/xinetd.d/amanda')
fi
# (deprecated) upstream sources require amanda user and group exist at build time
# We capture the chown commands so this is no longer needed.
# This requirement made it hard for noobs with AUR helpers to install.
_testAmandaUserGroup() {
if ! getent group "${_amandagroup}" > /dev/null ; then
error "The amanda group must exist prior to building."
error "Suggested command: groupadd -g ${_amandagid} ${_amandagroup}"
return 1
fi
if ! getent passwd "${_amandauser}" > /dev/null ; then
error "The amanda user must exist prior to building."
error "Suggested command: useradd --system -u ${_amandauid} -g ${_amandagroup} -G storage,disk -m -d ${_amhome} -s /bin/bash -c 'Amanda Backup Daemon' ${_amandauser}"
return 2
fi
}
unset -f _testAmandaUserGroup
# We can't modify .install but we can stop and force the user to fix it.
_install_check() {
local _ckvar
local _ckline
for _ckvar in _amlibexec _amsecurity _amhome; do
_ckline="${_ckvar}='${!_ckvar}'"
if ! grep -q "^${_ckline}"'$' "${startdir}/${install}"; then
set +u
msg "${install} must be fixed"
echo "${_ckline}"
false
fi
done
}
prepare() {
set -u
cd "${_srcdir}"
_install_check
if [ "${_amandauid}" -ge 1000 ]; then
echo "This package won't create UID >= 1000"
set +u
false
fi
# rm -r 'packaging' # cleaner path listings, crashes make
# grep -shroe '/[a-z][a-z/]*/' | grep -e 'etc\|usr\|var' | sort -u
# Get rid of sbin
sed -e 's:/sbin/:/bin/:g' -i 'example/amanda.conf.in' 'example/template.d/dumptypes'
# Lots of confusion where the amanda home folder is
if [ "${_amhome}" != '/var/lib/amanda' ]; then
local _amhfiles
readarray -t _amhfiles <<<"$(grep -slire 'LOCALSTATEDIR.*/lib/amanda')"
sed -e '/LOCALSTATEDIR/I s:/lib/amanda:'"${_amhome#/var}:g" -i "${_amhfiles[@]}"
readarray -t _amhfiles <<<"$(grep -sliFre '/var/lib/amanda')"
sed -e "s:/var/lib/amanda:${_amhome}:g" -i "${_amhfiles[@]}"
fi
local _hfiles
readarray -t _hfiles <<<"$(grep -slrFe '/var/amanda/')"
sed -e "s:/usr/local/var/amanda:${_amhome}:g" -e "s:/var/amanda:${_amhome}:g" -i "${_hfiles[@]}"
readarray -t _hfiles <<<"$(grep -slrFe '/usr/local/etc/amanda')"
sed -e "s:/usr/local/etc/amanda:${_ametc}:g" -i "${_hfiles[@]}"
sed -e '/chg-disk/ s:/@prefix@::g' -i 'example/template.d/amanda-harddisk.conf.in'
# Set the default auth to ssh if bsdtcp is not installed
if [ -z "${_opt_bsd}" ]; then
#cp -p 'server-src/amserverconfig.pl' 'server-src/amserverconfig.Arch.pl' # for diff comparison
sed -e '/print CONF/ s:bsdtcp:ssh:g' -i 'server-src/amserverconfig.pl'
#cp -p 'server-src/amaddclient.pl' 'server-src/amaddclient.Arch.pl'
sed -e '/^\$auth=/ s:bsdtcp:ssh:g' -i 'server-src/amaddclient.pl'
fi
# Amend the tapetype file. Mark each line to be written with a >. Add a blank line after each entry.
pushd "${srcdir}" > /dev/null
sed -e '1i # Do not modify this file. It is overwritten on every upgrade' -i "${_srcdir}/example/template.d/tapetypes"
echo -e "# Added by ${pkgname}-${pkgver} PKGBUILD from Arch Linux AUR\n# https://aur.archlinux.org/\n" >> "${_srcdir}/example/template.d/tapetypes"
sed -n -e 's:^>\(.*\)$:\1:p' "${_tapetypes[@]}" >> "${_srcdir}/example/template.d/tapetypes"
popd > /dev/null
# Fix xinetd files. Our xinetd file is fixed in package()
sed -e "/^\s*user\s\+=/ s:amandabackup:${_amandauser}:g" \
-e "/^\s*group\s\+=/ s:disk:${_amandagroup}:g" \
-i 'example/xinetd.'*
# Various fixes
#cp -p 'example/template.d/advanced.conf.in' 'example/template.d/advanced.conf.in.Arch'
sed -e 's:@CONFIG_DIR/:@CONFIG_DIR@/:g' \
-e 's:DailySet1:@DEFAULT_CONFIG@:g' \
-e '/^netusage/ s:8000 :80000 :g' \
-i 'example/template.d/advanced.conf.in'
set +u
}
build() {
set -u
#_testAmandaUserGroup
cd "${_srcdir}"
_install_check
if [ ! -s 'Makefile' ]; then
local _opts=()
if [ ! -z "${_opt_bsd}" ]; then
_opts+=("--with-bsd${_opt_bsd}-security")
fi
# There are configure flags to install only the client or server, but I don't see any reason to.
# Amanda's handling of /etc is so broken that we must specify it 3 times and fix it in package()
MT='/usr/bin/mt-st' \
CFLAGS="${CFLAGS} -g -rdynamic" \
CXXFLAGS="${CXXFLAGS} -g -rdynamic" \
./configure "${_opts[@]}" \
--prefix='/usr' \
--sbindir='/usr/bin' \
--libexecdir="${_amlibexec}" \
--sysconfdir='/etc' \
--localstatedir='/var' \
--with-configdir="${_ametc}" \
--with-security-file="${_amsecurity}" \
--with-gnutar-listdir="${_amhome}/gnutar-lists" \
--mandir='/usr/share/man' \
--with-user="${_amandauser}" \
--with-group="${_amandagroup}" \
--with-ipv6 \
--with-ssh-security \
--with-amandates="${_amhome}/amandates" \
--with-tmpdir="/tmp/amandabackup-$$"
! grep -F $'/usr/var\n/usr/etc' 'config.log' || echo "{}"
fi
local _nproc="$(nproc)"; _nproc=$((_nproc>8?8:_nproc))
nice make -j "${_nproc}" # not using -s helps
set +u
}
package() {
set -u
#_testAmandaUserGroup
cd "${_srcdir}"
_install_check
# Obnoxious installer expects fakeroot. We'll capture chown & chmod commands to place them in .install
# A beneficial side affect is that improper permissions and owership will be fixed on every install/upgrade.
local _aminstall="${pkgdir}${_amlibexec}/amanda/amanda-install.sh"
install -Dpm755 <(cat << EOF
#!/bin/bash
echo " chown\$(printf ' "%s"' "\${@}")" >> '${_aminstall}'
EOF
) "${pkgdir}/chown"
# Capture suid chmod, passthru normal chmod
install -Dpm755 <(cat << EOF
#!/bin/bash
if [ "\$1" = '644' ] || [ "\$1" = '755' ]; then
# Pass through standard chmod
/usr/bin/chmod "\${@}"
else
echo " chmod\$(printf ' "%s"' "\${@}")" >> '${_aminstall}'
fi
EOF
) "${pkgdir}/chmod"
# Capture commands go into this script
install -Dpm744 <(cat << EOF
#!/bin/sh
# Created by ${pkgname}-${pkgver} PKGBUILD from Arch Linux AUR
# https://aur.archlinux.org/
# This is run by the package installer which can't be modified
# to have all the current settings
set -e
set -u
# commands captured from Makefile
_fn_chown_makefile() {
EOF
) "${_aminstall}"
# Install the compiled output using the PATH to capture chown/chmod commands
PATH="${pkgdir}:${PATH}" \
make -j1 DESTDIR="${pkgdir}" install # not using -s helps
# This file is put in the wrong place
if [ ! -s "${pkgdir}${_amsecurity}" ]; then
mv "${pkgdir}/etc/amanda/${_amsecurity##*/}" "${pkgdir}${_amsecurity}"
fi
# Clean up capture and scripts
rm "${pkgdir}/chown" "${pkgdir}/chmod"
sed -e '# Trim pkgdir for .install' \
-e "s:${pkgdir}::g" \
-e '# Convert amanda group to var' \
-e 's/"root:'"${_amandagroup}"'"/"root:${_amandagroup}"/g' \
-i "${_aminstall}"
echo -e '}\n\n# Commands from PKGBUILD\n# must run before makefile above\n\n_fn_chown_PKGBUILD() {' >> "${_aminstall}"
# Create amanda log dir, not much used
install -dm0770 "${pkgdir}${_amlog}/"
echo ' chmod 770 "${_amlog}"' >> "${_aminstall}"
echo ' chown "root:${_amandagroup}" "${_amlog}"' >> "${_aminstall}"
# Create amanda home dir
install -dm0700 "${pkgdir}${_amhome}/"
echo ' chmod 700 "${_amhome}"' >> "${_aminstall}"
install -dm0700 "${pkgdir}${_amhome}/.ssh/"
echo ' chmod 700 "${_amhome}/.ssh/"' >> "${_aminstall}"
echo ' find "${_amhome}/.ssh" -type "f" -exec chmod 600 "{}" "+"' >> "${_aminstall}"
# Set permission on config folders
install -dm0755 "${pkgdir}${_ametc}"
install -dm0750 "${pkgdir}${_ametc}/MyConfig"
if [ "${_ametc}" = '/etc' ] || [ "${_ametc}" = '/etc/' ]; then
echo 'The folder specified for ${_ametc} will destroy the system! Cant build package!'
set +u
false
else
echo ' find "${_ametc}" -mindepth 1 -maxdepth 1 -type "d" -exec chown -R "${_amandauser}:${_amandagroup}" "{}" ";" -exec chmod 750 "{}" ";"' >> "${_aminstall}"
fi
echo ' chown "root:${_amandagroup}" "${_ametc}"' >> "${_aminstall}"
echo ' chown "root:root" "${_amsecurity}"' >> "${_aminstall}"
echo ' chmod 644 "${_amsecurity}"' >> "${_aminstall}"
ln -s '/usr/share/amanda/example/' "${pkgdir}${_ametc}/example"
ln -s '/usr/share/amanda/example/' "${pkgdir}${_amhome}/example"
ln -s '/usr/share/amanda/template.d/' "${pkgdir}${_ametc}/template.d"
ln -s '/usr/share/amanda/template.d/' "${pkgdir}${_amhome}/template.d"
# If amada-security.conf is placed in /etc/amanda then we must use
# a more restrictive permissions setup and a helper script to create
# the folder. amserverconfig doesn't work.
local _helpertext=''
if [ "${_amsecurity#/etc/amanda/}" != "${_amsecurity}" ]; then
echo ' chmod 755 "${_ametc}"' >> "${_aminstall}"
_helpertext=" with ${_ametc}/mkconfig.sh"
install -Dm755 <(cat << EOF
#!/bin/bash
# Created by ${pkgname}-${pkgver} PKGBUILD from Arch Linux AUR
# https://aur.archlinux.org/
# This file is necessary because there are no permissions that will allow
# Amanda to backup and also allow Amanda to create folders here.
set -e
set -u
if [ "\$#" -eq 0 ] || [ "\${EUID}" -ne 0 ]; then
echo "Example: sudo ./mkconfig.sh MyConfig"
echo "This will create or fix the specified config folder with correct permissions"
echo "for the Amanda user"
else
while [ "\$#" -ne 0 ]; do
mkdir "${_ametc}/\$1" || :
chmod 750 "${_ametc}/\$1"
chown -R "${_amandauser}:${_amandagroup}" "${_ametc}/\$1"
shift
done
fi
EOF
) "${pkgdir}${_ametc}/mkconfig.sh"
else
echo ' chmod 775 "${_ametc}"' >> "${_aminstall}"
chmod 775 "${pkgdir}${_ametc}"
_helpertext=" with amserverconfig"
fi
# Install xinetd configuration
if [ ! -z "${_opt_bsd}" ]; then
install -Dpm0644 "${srcdir}/xinetd.amanda.${_opt_bsd}" "${pkgdir}/etc/xinetd.d/${pkgname}"
pushd "${srcdir}" >> /dev/null
local _xinetd
for _xinetd in "xinetd.${pkgname}".*; do
install -Dpm0644 "${_xinetd}" "${pkgdir}/etc/xinetd.d/${_xinetd#xinetd\.}"
done
popd > /dev/null
sed -e "/^\s*user\s\+=/ s:amanda:${_amandauser}:g" \
-e "/^\s*group\s\+=/ s:storage:${_amandagroup}:g" \
-e "/^\s*server\s\+=/ s:/usr/libexec:${_amlibexec}:g" \
-i "${pkgdir}/etc/xinetd.d/${pkgname}"*
fi
# Can't put ${pkgver} on this one
install -Dm0600 <(cat << EOT
# Created by ${pkgname} PKGBUILD from Arch Linux AUR
# https://aur.archlinux.org/
# http://wiki.zmanda.com/index.php/How_To:Configure_bsdtcp_authentication
# This is where amanda works out what remote connections to allow in the format
# of <host> <username> <command>
#
# server1.example.com amanda amdump
EOT
) "${pkgdir}/${_amhome}/.amandahosts"
echo ' chmod 600 "${_amhome}/.amandahosts"' >> "${_aminstall}"
# create some of the runtime files amanda requires so they are created with
# the correct ownership and permissions.
install -Dm0664 /dev/null "${pkgdir}${_amhome}/amandates" # https://wiki.zmanda.com/index.php/Amanda:What_is_the_%27amandates%27_file_for%3F
#install -Dm0664 /dev/null "${pkgdir}${_amhome}/dumpdates"
install -dm755 "${pkgdir}${_amhome}/gnutar-lists"
echo ' chown -R "${_amandauser}:${_amandagroup}" "${_amhome}"' >> "${_aminstall}"
# Prevent non amanda users from running these and causing unexpected errors.
# These chmod must be run after the ones from makefile so they take precedence
set +u; msg 'Fixing executable permissions'; set -u
local _amps
pushd "${pkgdir}/usr/bin" > /dev/null
readarray -t _amps <<<"$(find -mindepth 1 -maxdepth 1 -type 'f' -perm /111)"
chmod 754 "${_amps[@]}"
popd > /dev/null
local _amprogs="${_amps[*]#\./}"
echo "${_amprogs}"
_amprogs="${_amprogs// /,}"
echo ' chown "root:${_amandagroup}" "/usr/bin"'"/{${_amprogs}}" >> "${_aminstall}"
echo ' chmod 754 "/usr/bin"'"/{${_amprogs}}" >> "${_aminstall}"
# Set up Wiki local demo to check with sudo -u 'amandabackup' amcheck 'MyConfig'
install -dm0750 "${pkgdir}${_amdump}"
install -dm0755 "${pkgdir}${_amhome}/holdings/MyConfig"
install -dm0755 "${pkgdir}${_amhome}/vtapes/MyConfig"/slot{1,2,3,4}
install -dm0755 "${pkgdir}${_ametc}/MyConfig"/{curinfo,index}
echo ' chown -R "${_amandauser}:${_amandagroup}" "${_amdump}"' >> "${_aminstall}"
# Using pkgver in files that are backed up would trigger .pacnew constantly
install -Dm0640 <(cat << EOF
# Created by ${pkgname} PKGBUILD from Arch Linux AUR
# https://aur.archlinux.org/
# You can extend this to be your backup.
# It would be better to create another backup${_helpertext}
# and let this stay a demo.
# Local backup demo
# http://wiki.zmanda.com/index.php/GSWA/Build_a_Basic_Configuration
# The Wiki has old folder names. The folders here match what the amanda programs use.
# Examples: /usr/share/amanda/template.d/
org "MyConfig"
infofile "${_ametc}/MyConfig/curinfo"
logdir "${_ametc}/MyConfig"
indexdir "${_ametc}/MyConfig/index"
dumpuser "${_amandauser}"
tpchanger "chg-disk:${_amhome}/vtapes/MyConfig"
labelstr "MyData[0-9][0-9]"
autolabel "MyData%%" EMPTY VOLUME_ERROR
tapecycle 4
dumpcycle 3 days
amrecover_changer "changer"
tapetype "TEST-TAPE"
define tapetype TEST-TAPE {
length 100 mbytes
filemark 4 kbytes
}
define dumptype simple-gnutar-local {
auth "local"
compress none
program "GNUTAR"
}
holdingdisk hd1 {
directory "${_amhome}/holdings/MyConfig"
use 50 mbytes
chunksize 1 mbyte
}
# Remote backup demo
# This requires some ssh key management. See Wiki.
# http://wiki.zmanda.com/index.php/GSWA/Backing_Up_Other_Systems
# see disklist for the matching line to enable
#define dumptype simple-gnutar-remote {
# auth "ssh"
# ssh_keys "/etc/amanda/MyConfig/ssh-key"
# compress none
# program "GNUTAR"
#}
EOF
) "${pkgdir}${_ametc}/MyConfig/amanda.conf"
install -Dm0640 <(cat << EOF
# Created by ${pkgname} PKGBUILD from Arch Linux AUR
# https://aur.archlinux.org/
# Local backup demo
# http://wiki.zmanda.com/index.php/GSWA/Build_a_Basic_Configuration
localhost /etc simple-gnutar-local
# Remote backup demo
# http://wiki.zmanda.com/index.php/GSWA/Backing_Up_Other_Systems
# see amanda.conf for the matching lines to enable
# euclid.amanda.org /etc simple-gnutar-remote
EOF
) "${pkgdir}${_ametc}/MyConfig/disklist"
# List of backup items for _aminstall
local _pacsavea=("${backup[@]/#/\/}")
_pacsavea=("${_pacsavea[@]/%/.pacsave}")
local _pacsave="$(printf " '%s'" "${_pacsavea[@]}")"
# Finish _aminstall
cat >> "${_aminstall}" << EOF
}
_fn_post_install() {
echo "Test the system with"
echo " sudo -u '\${_amandauser}' amcheck 'MyConfig'"
echo " su -c \"su \${_amandauser} -c 'amcheck MyConfig'\""
}
_fn_post_upins() {
if [ -z "\$(getent group "\${_amandagroup}")" ]; then
groupadd -g "\${_amandagid}" "\${_amandagroup}"
echo "amanda: Group \${_amandagroup} \${_amandagid} added"
fi
if [ -z "\$(getent passwd "\${_amandauser}")" ]; then
# Amanda is in the disk group so it can image drives using xfsrestore and other tools
useradd -u "\${_amandauid}" -g "\${_amandagroup}" -G "storage,disk" -m -d "\${_amhome}" -s '/bin/bash' -c 'Amanda Backup Daemon' "\${_amandauser}"
echo "amanda: User \${_amandauser} \${_amandauid} added"
fi
if ! grep -q '^[a-z]\+tar:' "\${_amsecurity}"; then
echo "amanda warning: no archivers are enabled in \${_amsecurity}"
fi
rm -f '/usr/bin/amanda-uninstall.sh'
}
_fn_pre_remove() {
set -u
local _uid
_uid="\$(id -u "\${_amandauser}")"
if [ \$? -eq 0 ]; then
if [ "\${_uid}" -lt 1000 ]; then
userdel "\${_amandauser}" || :
echo "amanda: User \${_amandauser} deleted"
# Mysteriously, userdel also removes the group making groupdel fail
groupdel "\${_amandagroup}" 2>/dev/null || :
echo "amanda: Group \${_amandagroup} deleted"
else
echo "uid of amanda is \${_uid}, >=1000, can't delete"
fi
fi
set +u
}
_fn_post_remove() {
local _cmd='#!/bin/sh
_true=""
if [ "\${EUID}" -ne 0 ]; then
echo "This must be run as root"
echo "You may want to look through the folders to be deleted for anything valuable"
_true="true"
else
echo "This is likely to destroy backup configurations, encryption keys, and ssh keys."
fi
_delself=1
'
rm -f '/usr/bin/amanda-uninstall.sh'
local _file
for _file in${_pacsave}; do
if [ -f "\${_file}" ]; then
echo -n "\${_cmd}" >> '/usr/bin/amanda-uninstall.sh'; _cmd=''
echo "echo rm -f '\${_file}'" >> '/usr/bin/amanda-uninstall.sh'
echo '\${_true} '"rm -i '\${_file}'" >> '/usr/bin/amanda-uninstall.sh'
echo "if [ -f '\${_file}' ]; then" >> '/usr/bin/amanda-uninstall.sh'
echo ' _delself=0' >> '/usr/bin/amanda-uninstall.sh'
echo 'fi' >> '/usr/bin/amanda-uninstall.sh'
fi
done
local _folder
for _folder in "\${_amhome}" "\${_ametc}" "\${_amdump}" "\${_amlog}"; do
if [ -d "\${_folder}" ]; then
echo -n "\${_cmd}" >> '/usr/bin/amanda-uninstall.sh'; _cmd=''
echo "echo rm -r '\${_folder}'" >> '/usr/bin/amanda-uninstall.sh'
echo '\${_true} '"rm -I -r '\${_folder}'" >> '/usr/bin/amanda-uninstall.sh'
echo "if [ -d '\${_folder}' ]; then" >> '/usr/bin/amanda-uninstall.sh'
echo ' _delself=0' >> '/usr/bin/amanda-uninstall.sh'
echo 'fi' >> '/usr/bin/amanda-uninstall.sh'
fi
done
if [ -s '/usr/bin/amanda-uninstall.sh' ]; then
chmod 755 '/usr/bin/amanda-uninstall.sh'
echo 'if [ "\${_delself}" -ne 0 ]; then' >> '/usr/bin/amanda-uninstall.sh'
echo ' \${_true} '"echo 'Amanda folders removed'" >> '/usr/bin/amanda-uninstall.sh'
echo ' \${_true} '"echo 'Amanda uninstaller removed'" >> '/usr/bin/amanda-uninstall.sh'
echo ' \${_true} rm -f "\$0"' >> '/usr/bin/amanda-uninstall.sh'
echo 'fi' >> '/usr/bin/amanda-uninstall.sh'
echo "amanda: use '/usr/bin/amanda-uninstall.sh' to remove all Amanda config files and folders"
fi
}
# Vars from PKGBUILD
# The .install sets a flag to bypass the functions and source the desired variables for upgrades
if [ ! -z "\${_GetOVars:-}" ]; then
# These variables are from before the upgrade
\${_GetOLocal:=declare} _Oamandauser='${_amandauser}'
\${_GetOLocal} _Oamandagroup='${_amandagroup}'
\${_GetOLocal} _Oamandauid='${_amandauid}'
\${_GetOLocal} _Oamandagid='${_amandagid}'
\${_GetOLocal} _Oamhome='${_amhome}'
\${_GetOLocal} _Oametc='${_ametc}'
\${_GetOLocal} _Oamdump='${_amdump}'
\${_GetOLocal} _Oamsecurity='${_amsecurity}'
\${_GetOLocal} _Oamlog='${_amlog}'
else
# These variables are from after the upgrade
\${_GetOLocal:=declare} _amandauser='${_amandauser}'
\${_GetOLocal} _amandagroup='${_amandagroup}'
\${_GetOLocal} _amandauid='${_amandauid}'
\${_GetOLocal} _amandagid='${_amandagid}'
\${_GetOLocal} _amhome='${_amhome}'
\${_GetOLocal} _ametc='${_ametc}'
\${_GetOLocal} _amdump='${_amdump}'
\${_GetOLocal} _amsecurity='${_amsecurity}'
\${_GetOLocal} _amlog='${_amlog}'
if [ -z "\${_GetVars:-}" ]; then
if [ "\$#" -eq 0 ]; then
echo 'The package installer runs this, not you!'
echo 'Nothing to see here, Move along!'
else
while [ "\$#" -gt 0 ]; do
_fn_\$1
shift
done
fi
fi
fi
EOF
# Install the licence
install -Dpm444 'COPYRIGHT' -t "${pkgdir}/usr/share/licences/${pkgname}/"
set +u
}
set +u
# vim:set ts=2 sw=2 et:
| true |
dd0084aad194dd3838507f061739ace0da00a5c2 | Shell | Rcomian/bunyan-rotating-file-stream | /test.sh | UTF-8 | 860 | 3.71875 | 4 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
. ~/.nvm/nvm.sh
. ~/.bashrc
declare -a VERSIONS=("14" "15" "16" "18")
if [ $# -eq 1 ]
then
VERSIONS=($1)
echo "Single version, no stress test"
fi
if [ $# -eq 2 ]
then
VERSIONS=($1)
APIKEY=$2
echo "Single version, running stress"
fi
echo ${VERSIONS[@]}
for nodeversion in ${VERSIONS[@]}
do
nvm install $nodeversion
rm -Rf node_modules
nvm exec $nodeversion npm install
echo
echo "Test functionality"
time nvm exec $nodeversion node test/functionality
if [ $? -eq 0 ]
then
echo "Passed"
else
echo "Aborting"
exit 1
fi
echo
echo "Test performance"
time nvm exec $nodeversion node test/performance
if [ $# -eq 2 ]
then
echo
echo "Stress test starting"
nvm exec $nodeversion node test/stress ${APIKEY}
fi
done
| true |
6642883a7953b156dee5a377ffbebac522831807 | Shell | Andlon/sonetta-deploy | /copydeps.sh | UTF-8 | 1,129 | 2.515625 | 3 | [] | no_license | #!/bin/bash
QT=qt5-install
SONETTA=sonetta/release
# Copy Qt libs
mkdir -p $SONETTA/libs
cp $QT/lib/libQt5Quick.so.5 $SONETTA/libs/libQt5Quick.so.5
cp $QT/lib/libQt5Multimedia.so.5 $SONETTA/libs/libQt5Multimedia.so.5
cp $QT/lib/libQt5Qml.so.5 $SONETTA/libs/libQt5Qml.so.5
cp $QT/lib/libQt5Network.so.5 $SONETTA/libs/libQt5Network.so.5
cp $QT/lib/libQt5Gui.so.5 $SONETTA/libs/libQt5Gui.so.5
cp $QT/lib/libQt5Core.so.5 $SONETTA/libs/libQt5Core.so.5
cp $QT/lib/libQt5Svg.so.5 $SONETTA/libs/libQt5Svg.so.5
cp $QT/lib/libQt5DBus.so.5 $SONETTA/libs/libQt5DBus.so.5
# Copy platform plugins
mkdir -p $SONETTA/platforms
cp $QT/plugins/platforms/libqxcb.so $SONETTA/platforms/libqxcb.so
# Copy Qt plugins
mkdir -p $SONETTA/plugins
cp -R $QT/plugins/audio $SONETTA/plugins/
cp -R $QT/plugins/imageformats $SONETTA/plugins/
cp -R $QT/plugins/generic $SONETTA/plugins/
cp -R $QT/plugins/mediaservice $SONETTA/plugins/
cp -R $QT/plugins/platforminputcontexts $SONETTA/plugins/
# Copy Qt qml assets
mkdir -p $SONETTA/quick
cp -R $QT/qml/* $SONETTA/quick
# Copy libspotify
cp /usr/local/lib/libspotify.so.12 $SONETTA/libs/libspotify.so.12
| true |
00f25eeb73c93f5c37b891a718e16a411a1d5e41 | Shell | MMunibas/FittingWizardWeb | /scripts/charmm-ti/calc-deltaG-hydr | UTF-8 | 3,825 | 4.0625 | 4 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | #!/bin/bash
# Initialize variables
bothdirections=0
nsteps=20000
lambda_step=0.1
direnv=("." ".")
currentfilename=
returnValue=
function showHelp
{
echo "Usage:"
echo "$0 [-b] [-n N] [-l N] [-v dir] [-w dir]"
echo ""
echo " -b: include backward simulations"
echo " -n: nsteps (default: $nsteps)"
echo " -l: lambda window spacing (default: $lambda_step)"
echo " -v: directory containing vacuum simulations"
echo " -w: directory containing water simulations"
}
function getFileName
{
env=${direnv[$1]}
simtype=$2
filenamedirection=$3
currentfilename="$env/ti.$simtype.$filenamedirection.$nsteps.$lambda_step.out"
return
}
function existsOrDie
{
[ ! -f $currentfilename ] && echo "Missing file $currentfilename" && exit 1
}
function extractDataFromFile
{
simtype=$2
filenamedirection=$3
getFileName $1 $simtype $filenamedirection
existsOrDie
tail -n 1 $currentfilename | grep "Normal termination" > /dev/null
[ $? -eq 1 ] && \
echo "File $currentfilename did not complete successfully" && \
exit 1
returnValue=`tail -n 2 $currentfilename | head -n 1 | awk '{print $5}'`
return
}
OPTIND=1
while getopts "h?:n:l:v:w:b" opt
do
case "$opt" in
h|\?)
showHelp
exit 0
;;
n)
nsteps=$OPTARG
echo "option nsteps: $nsteps"
;;
l)
lambda_step=$OPTARG
echo "option lambda window spacing: $lambda_step"
;;
v)
direnv[0]=$OPTARG
echo "option vacuum directory: ${direnv[0]}"
;;
w)
direnv[1]=$OPTARG
echo "option water directory: ${direnv[1]}"
;;
b)
bothdirections=1
echo "option include backward simulations turned on"
;;
esac
done
shift $((OPTIND-1)) # Shift off the options
deltaGpcf="0.0"
deltaGmtpf="0.0"
# Options for filename:
# Vacuum is 0; water is 1
# "vdw", "pcsg", "mtp"
# "f" or "b"
extractDataFromFile 0 "vdw" "f"
deltaGpcf=`echo "$deltaGpcf - $returnValue" | bc -l`
deltaGmtpf=`echo "$deltaGmtpf - $returnValue" | bc -l`
extractDataFromFile 0 "pcsg" "f"
deltaGpcf=`echo "$deltaGpcf - $returnValue" | bc -l`
extractDataFromFile 0 "mtp" "f"
deltaGmtpf=`echo "$deltaGmtpf - $returnValue" | bc -l`
extractDataFromFile 1 "vdw" "f"
deltaGpcf=`echo "$deltaGpcf + $returnValue" | bc -l`
deltaGmtpf=`echo "$deltaGmtpf + $returnValue" | bc -l`
extractDataFromFile 1 "pcsg" "f"
deltaGpcf=`echo "$deltaGpcf + $returnValue" | bc -l`
extractDataFromFile 1 "mtp" "f"
deltaGmtpf=`echo "$deltaGmtpf + $returnValue" | bc -l`
echo ""
if [ $bothdirections -eq 1 ]; then
deltaGpcb="0.0"
deltaGmtpb="0.0"
# Options for filename:
# Vacuum is 0; water is 1
# "vdw", "pcsg", "mtp"
# "f" or "b"
extractDataFromFile 0 "vdw" "b"
deltaGpcb=`echo "$deltaGpcb + $returnValue" | bc -l`
deltaGmtpb=`echo "$deltaGmtpb + $returnValue" | bc -l`
extractDataFromFile 0 "pcsg" "b"
deltaGpcb=`echo "$deltaGpcb + $returnValue" | bc -l`
extractDataFromFile 0 "mtp" "b"
deltaGmtpb=`echo "$deltaGmtpb + $returnValue" | bc -l`
extractDataFromFile 1 "vdw" "b"
deltaGpcb=`echo "$deltaGpcb - $returnValue" | bc -l`
deltaGmtpb=`echo "$deltaGmtpb - $returnValue" | bc -l`
extractDataFromFile 1 "pcsg" "b"
deltaGpcb=`echo "$deltaGpcb - $returnValue" | bc -l`
extractDataFromFile 1 "mtp" "b"
deltaGmtpb=`echo "$deltaGmtpb - $returnValue" | bc -l`
avgpc=`echo "($deltaGpcf+$deltaGpcb)*0.5" | bc -l`
avgmtp=`echo "($deltaGmtpf+$deltaGmtpb)*0.5" | bc -l`
stdpc=`echo "sqrt(($deltaGpcf-1.*$avgpc)^2+($deltaGpcb-1.*$avgpc)^2/2.)" | bc -l`
stdmtp=`echo "sqrt(($deltaGmtpf-1.*$avgmtp)^2+($deltaGmtpb-1.*$avgmtp)^2/2.)" | bc -l`
printf "PC : %7.4f +/- %7.4f kcal/mol\n" $avgpc $stdpc
printf "MTP: %7.4f +/- %7.4f kcal/mol\n" $avgmtp $stdmtp
else
printf "PC : %7.4f kcal/mol\n" $deltaGpcf
printf "MTP: %7.4f kcal/mol\n" $deltaGmtpf
fi | true |
ff1f9a786b97fc37af4a09cc13742c25a8763575 | Shell | ODEX-TOS/packages | /docbook-xsl/repos/extra-any/docbook-xsl.install | UTF-8 | 1,072 | 3.15625 | 3 | [
"GPL-1.0-or-later",
"MIT"
] | permissive | _xmlcatalog() {
xmlcatalog --noout "$@" etc/xml/catalog
}
post_install() {
[[ -f etc/xml/catalog ]] || _xmlcatalog --create
local ver x new=${1%-*}
for ver in $new current; do
for x in rewriteSystem rewriteURI; do
_xmlcatalog --add $x http://cdn.docbook.org/release/xsl/$ver \
/usr/share/xml/docbook/xsl-stylesheets-$new
_xmlcatalog --add $x http://cdn.docbook.org/release/xsl-nons/$ver \
/usr/share/xml/docbook/xsl-stylesheets-$new-nons
_xmlcatalog --add $x http://docbook.sourceforge.net/release/xsl-ns/$ver \
/usr/share/xml/docbook/xsl-stylesheets-$new
_xmlcatalog --add $x http://docbook.sourceforge.net/release/xsl/$ver \
/usr/share/xml/docbook/xsl-stylesheets-$new-nons
done
done
}
post_upgrade() {
post_remove $2
post_install $1
}
post_remove() {
local old=${1%-*}
_xmlcatalog --del /usr/share/xml/docbook/xsl-stylesheets-$old
if (( $(vercmp $1 1.79.2-5) >= 0 )); then
_xmlcatalog --del /usr/share/xml/docbook/xsl-stylesheets-$old-nons
fi
}
# vim:set ft=sh sw=2 et:
| true |
8c992fc1e71d54571d20567b69343966fae449af | Shell | joietej/validator.js | /bin/build.sh | UTF-8 | 244 | 3.171875 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | #!/usr/bin/env bash
if [[ -z "$1" ]]
then
echo "You must give a version number. eg: ./bin/build.sh 1.0.0"
else
echo "** building validator.min.js version " $1
ruby ./bin/minify validator.js validator.min.js $1 --force
echo " done!"
fi | true |
0230e3d244c71d5eb56c2d66ac121a404eb2554f | Shell | gremjua/jsonplaceholder-tests | /.husky/post-commit | UTF-8 | 164 | 2.53125 | 3 | [] | no_license | #!/bin/sh
. "$(dirname "$0")/_/husky.sh"
if [ -e .commit ]
then
rm .commit
npm run docs
git add docs
git commit --amend -C HEAD --no-verify
fi
| true |
6a23e5b53cd54d3dd251119230e7c24f1907a8ea | Shell | AnthonyWlodarski/TLDP | /Bash-Prog-Intro-HOWTO/commandLineArguments.sh | UTF-8 | 179 | 3.546875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# $0 refers the currently executed script
if [ -z "$1" ]; then
clear
echo usage: $0 directory
exit
fi
# $1, $2, $N... refer to the arguments passed in
ls -al $1
| true |
dfe501f4062c6ce55d9023e90ee824e78bf2a2d2 | Shell | DVS-Lab/istart-data-archived | /code/run_fmriprep.sh | UTF-8 | 289 | 2.8125 | 3 | [] | no_license | #!/bin/bash
for subrun in "1240" "1242" "1243" "1245" "1247" "1248" "1249" "1253";do
set -- $subrun
sub=$1
script=code/fmriprep.sh
NCORES=8
while [ $(ps -ef | grep -v grep | grep $script | wc -l) -ge $NCORES ]; do
sleep 1s
done
bash $script $sub &
sleep 5s
done
| true |
e0cbfb47c85e40534f05f0a68707348c0ccf5122 | Shell | HamletGhost/HomeScripts | /bin/devel/RotateFiles.sh | UTF-8 | 3,964 | 4.34375 | 4 | [] | no_license | #!/bin/bash
#
# Renames files with a numeric suffix
#
# Use with --help for usage instructions.
#
# Vesion history:
# 1.0 (petrillo@fnal.gov)
# first version
#
SCRIPTNAME="$(basename "$0")"
SCRIPTVERSION="1.0"
function help() {
cat <<-EOH
Renames files with a numeric suffix.
Usage: ${SCRIPTNAME} [options] File [File ...]
The files are renamed into File.1, File.2, etc.
The original file names are printed on screen; this allows for example:
cp -a Original.txt "\$(${SCRIPTNAME} Backup.txt)"
Options:
-n Limit
if renaming goes beyond the limit, the volume with the highest numbre is
deleted
--padding=NCHAR , -p NCHAR
use 0-padded volume numbers with this padding
--quiet
do not print the original file name after renaming has happened
EOH
} # help()
function isFlagSet() {
local VarName="$1"
[[ -n "${!VarName//0}" ]]
} # isFlagSet()
function isFlagUnset() {
local VarName="$1"
[[ -z "${!VarName//0}" ]]
} # isFlagUnset()
function STDERR() { echo "$*" >&2 ; }
function ERROR() { STDERR "ERROR: $@" ; }
function FATAL() {
local Code="$1"
shift
STDERR "FATAL ERROR (${Code}): $*"
exit $Code
} # FATAL()
function LASTFATAL() {
local Code="$?"
[[ "$Code" != 0 ]] && FATAL "$Code""$@"
} # LASTFATAL()
function PadNumber() {
local -i Number="$1"
local -i Padding="$2"
if [[ -n $Padding ]] && [[ $Padding -gt 0 ]]; then
printf "%0*d" "$Number"
else
printf "%d" "$Number"
fi
} # PadNumber()
function RotateFile() {
local FilePath="$1"
local -i Limit="$2"
local -i Padding="$3"
[[ -r "$FilePath" ]] || return 2
# find the next free file
local NextFree LastHeld
local -i iFreeVolume=0
while true ; do
let ++iFreeVolume
NextFree="${FilePath}.$(PadNumber "$iFreeVolume" "$Padding")"
[[ -r "$NextFree" ]] || break
LastHeld="$NextFree"
done
# if we have a limit and there are more files than this limit allows,
# do not increase the number of files (i.e., delete the last one)
if [[ -n "$Limit" ]] && [[ "$Limit" -gt 0 ]] && [[ $iFreeVolume -gt $Limit ]]; then
rm -f "$LastHeld"
fi
while [[ $iFreeVolume -ge 2 ]]; do
local NewVolume="${FilePath}.$(PadNumber "$iFreeVolume" "$Padding")"
local OldVolume="${FilePath}.$(PadNumber "$((--iFreeVolume))" "$Padding")"
[[ -r "$OldVolume" ]] && mv "$OldVolume" "$NewVolume"
done
mv "$FilePath" "${FilePath}.$(PadNumber 1 "$Padding")"
isFlagSet BeQuiet || echo "$FilePath"
} # RotateFile()
################################################################################
declare DoHelp=0 DoVersion=0 OnlyPrintEnvironment=0 NoLogDump=0
declare -i Limit=0
declare -i Padding=0
declare -i NoMoreOptions=0
declare -a Files
declare -i nFiles=0
for (( iParam = 1 ; iParam <= $# ; ++iParam )); do
Param="${!iParam}"
if ! isFlagSet NoMoreOptions && [[ "${Param:0:1}" == '-' ]]; then
case "$Param" in
( '--help' | '-h' | '-?' ) DoHelp=1 ;;
( '--version' | '-V' ) DoVersion=1 ;;
( '--quiet' | '-q' ) BeQuiet=1 ;;
### behaviour options
( '-n' ) let ++iParam ; Limit="${!iParam}" ;;
( '-p' ) let ++iParam ; Padding="${!iParam}" ;;
( '--padding='* ) Padding="${Param#--*=}" ;;
### other stuff
( '-' | '--' )
NoMoreOptions=1
;;
( * )
FATAL 1 "Unrecognized script option #${iParam} - '${Param}'"
;;
esac
else
NoMoreOptions=1
Files[nFiles++]="$Param"
fi
done
declare -i ExitCode
if isFlagSet DoVersion ; then
echo "${SCRIPTNAME} version ${SCRIPTVERSION:-"unknown"}"
: ${ExitCode:=0}
fi
if isFlagSet DoHelp || [[ $nFiles -le 0 ]] ; then
help
# set the exit code (0 for help option, 1 for missing parameters)
isFlagSet DoHelp
{ [[ -z "$ExitCode" ]] || [[ "$ExitCode" == 0 ]] ; } && ExitCode="$?"
fi
[[ -n "$ExitCode" ]] && exit $ExitCode
declare -i nErrors=0
for File in "${Files[@]}" ; do
RotateFile "$File" "$Limit" "$Padding"
res=$?
[[ $res != 0 ]] && let ++nErrors
done
exit $nErrors
| true |
002ccbd5e134d3d6f32bb5d482f17c588f71e85a | Shell | bonifak/raven | /tests/cluster_tests/test_qsubs.sh | UTF-8 | 5,971 | 3.125 | 3 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | #!/bin/bash
#This script is used on the INL cluster machine Falcon to test
# the cluster interface.
num_fails=0
fails=''
pushd ../../framework
RAVEN_FRAMEWORK_DIR=$(pwd)
source ../scripts/establish_conda_env.sh --load
popd
wait_lines ()
{
echo Return code: $?
LS_DATA="$1"
COUNT="$2"
NAME="$3"
sleep 2
lines=`ls $LS_DATA | wc -l`
if test $lines -ne $COUNT; then
echo Lines not here yet, waiting longer.
sleep 20 #Sleep in case this is just disk propagation
lines=`ls $LS_DATA | wc -l`
fi
if test $lines -ne $COUNT; then
echo Lines not here yet, waiting even longer.
sleep 60 #Sleep in case this is just disk propagation
lines=`ls $LS_DATA | wc -l`
fi
if test $lines -eq $COUNT; then
echo PASS $NAME
else
echo FAIL $NAME
fails=$fails', '$NAME
num_fails=$(($num_fails+1))
printf '\n\nStandard Error:\n'
cat $RAVEN_FRAMEWORK_DIR/test_qsub.e* || echo No *.e* file found! Continuing ...
printf '\n\nStandard Output:\n'
cat $RAVEN_FRAMEWORK_DIR/test_qsub.o* || echo No *.o* file found! Continuing ...
fi
rm $RAVEN_FRAMEWORK_DIR/test_qsub.[eo]* || echo Trying to remove *.o*, *.e* files but not found. Continuing ...
echo ''
}
echo Current directory: `pwd`
echo Removing old databases...
rm -Rf DatabaseStorage/
rm -Rf FirstMQRun/
#REQUIREMENT_TEST R-IS-7
../../raven_framework test_mpiqsub_local.xml pbspro_mpi.xml cluster_runinfo_legacy.xml
wait_lines 'FirstMQRun/[1-6]/*test.csv' 6 mpiqsub
echo ''
rm -Rf FirstMNRun/
../../raven_framework test_mpiqsub_nosplit.xml cluster_runinfo_legacy.xml
wait_lines 'FirstMNRun/[1-6]/*.csv' 6 mpiqsub_nosplit
echo ''
rm -Rf FirstMLRun/
../../raven_framework test_mpiqsub_limitnode.xml cluster_runinfo_legacy.xml
wait_lines 'FirstMLRun/[1-6]/*.csv' 6 mpiqsub_limitnode
echo ''
rm -Rf FirstMRun/
echo ''
echo 'Running interactive MPI test ...'
qsub -P moose -l select=6:ncpus=4:mpiprocs=1 -l walltime=10:00:00 -l place=free -W block=true ./run_mpi_test.sh
wait_lines 'FirstMRun/[1-6]/*test.csv' 6 mpi
echo ''
rm -Rf FirstPRun/
../../raven_framework test_pbs.xml cluster_runinfo_legacy.xml
wait_lines 'FirstPRun/[1-6]/*test.csv' 6 pbsdsh
echo ''
rm -Rf FirstMFRun/
../../raven_framework test_mpiqsub_flex.xml cluster_runinfo_legacy.xml
wait_lines 'FirstMFRun/[1-6]/*.csv' 6 mpiqsub_flex
echo ''
rm -Rf FirstMForcedRun/
../../raven_framework test_mpiqsub_forced.xml cluster_runinfo_legacy.xml
wait_lines 'FirstMForcedRun/[1-6]/*.csv' 6 mpiqsub_forced
echo ''
######################################
# test parallel for internal Objects #
######################################
# first stes (external model in parallel)
cd InternalParallel/
rm -Rf InternalParallelExtModel/*.csv
#REQUIREMENT_TEST R-IS-8
../../../raven_framework test_internal_parallel_extModel.xml ../pbspro_mpi.xml ../cluster_runinfo_legacy.xml
wait_lines 'InternalParallelExtModel/*.csv' 28 paralExtModel
cd ..
echo ''
# second test (ROM in parallel)
cd InternalParallel/
rm -Rf InternalParallelScikit/*.csv
#REQUIREMENT_TEST R-IS-9
../../../raven_framework test_internal_parallel_ROM_scikit.xml ../pbspro_mpi.xml ../cluster_runinfo_legacy.xml
wait_lines 'InternalParallelScikit/*.csv' 2 paralROM
cd ..
echo ''
# third test (PostProcessor in parallel)
cd InternalParallel/
rm -Rf InternalParallelPostProcessorLS/*.csv
../../../raven_framework test_internal_parallel_PP_LS.xml ../pbspro_mpi.xml ../cluster_runinfo_legacy.xml
wait_lines 'InternalParallelPostProcessorLS/*.csv' 4 parallelPP
cd ..
echo ''
# forth test (Topology Picard in parallel)
cd InternalParallel/
rm -Rf InternalParallelMSR/*.csv
../../../raven_framework test_internal_MSR.xml ../pbspro_mpi.xml ../cluster_runinfo_legacy.xml
wait_lines 'InternalParallelMSR/*.csv' 1 parallelMSR
cd ..
echo ''
# fifth test (Ensamble Model Picard in parallel)
cd InternalParallel/
rm -Rf metaModelNonLinearParallel/*.png
../../../raven_framework test_ensemble_model_picard_parallel.xml ../pbspro_mpi.xml ../cluster_runinfo_legacy.xml
wait_lines 'metaModelNonLinearParallel/*.png' 3 parallelEnsemblePicard
cd ..
echo ''
# sixth test (Ensamble Model Picard in parallel)
cd InternalParallel/
rm -Rf metaModelLinearParallel/*.png
../../../raven_framework test_ensemble_model_linear_internal_parallel.xml ../pbspro_mpi.xml ../cluster_runinfo_legacy.xml
wait_lines 'metaModelLinearParallel/*.png' 2 parallelEnsembleLinear
cd ..
echo ''
# seven test (HybridModel Code in parallel)
cd InternalParallel/
rm -Rf hybridModelCode/*.csv
../../../raven_framework test_hybrid_model_code.xml ../pbspro_mpi.xml ../cluster_runinfo_legacy.xml
wait_lines 'hybridModelCode/*.csv' 1 parallelHybridModelCode
cd ..
echo ''
# eighth test (HybridModel External Model in parallel)
cd InternalParallel/
rm -Rf hybridModelExternal/*.csv
../../../raven_framework test_hybrid_model_external.xml ../pbspro_mpi.xml ../cluster_runinfo_legacy.xml
wait_lines 'hybridModelExternal/*.csv' 1 parallelHybridModelExternal
cd ..
echo ''
############################################
# test parallel for internal Objects ENDED #
############################################
################################
# other parallel objects tests #
################################
# Adaptive Sobol
cd AdaptiveSobol/
rm -Rf workdir/*
../../../raven_framework test_adapt_sobol_parallel.xml ../pbspro_mpi.xml ../cluster_runinfo_legacy.xml
wait_lines 'workdir/*.csv' 1 adaptiveSobol
cd ..
echo ''
# Raven-Running-Raven (RAVEN code interface)
cd RavenRunsRaven/raven_running_raven_internal_models/
rm -Rf FirstMRun DatabaseStorage *csv testPointSet_dump.xml
cd ..
../../../raven_framework test_raven_running_raven_int_models.xml ../pbspro_mpi.xml ../cluster_runinfo_legacy.xml
wait_lines 'raven_running_raven_internal_models/testP*.csv' 17 ravenRunningRaven
cd ..
echo ''
if test $num_fails -eq 0; then
echo ALL PASSED
else
echo FAILED: $num_fails $fails
fi
exit $num_fails
| true |
42ab5176e792c6dc1b659f9e99a101e4b70ad15b | Shell | jmurga/docker-jbrowse | /input/automaticLoad.sh | UTF-8 | 7,972 | 2.953125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# set -e
ORGANISM="dm6"
RAWDATA="dmel"
CHROMOSOMES="2R 2L 3R 3L X"
POPTEST="fst"
POPTEST="pi theta_watterson tajima"
WINDOWS="100kb"
# POPTEST="pi theta"
declare -A COLORS=( ["pi"]="#065EE8" ["theta_watterson"]="#13B2FF" ["fst"]="#6565FB" ["recomb"]="#8D3762" ["RCC"]="#F6BAC4")
declare -A METAPOP=( ["RAL"]="AM" ["ZI"]="AFR" ["Europe"]="Europe")
COLLAPSETRACKS="\n[trackSelector]\ncollapsedCategories="
DEFAULTTRACKS="\n[GENERAL]\ndefaultTracks=reference_sequence,"
###############################################
############### HTMLCONFIG ################
###############################################
if [ ! -e ${JBROWSE_DATA}/${ORGANISM}/raw/dmel ]; then
# ln -s ${DATA_DIR}/${RAWDATA}/ ${JBROWSE_DATA}/${ORGANISM}/raw/dmel;
# ln -s ${DATA_DIR}/${RAWDATA}/ $JBROWSE_DATA/${ORGANISM}/raw/tracks;
mkdir -p ${JBROWSE_DATA}/${ORGANISM}/raw/
ln -s ${DATA_DIR}/${RAWDATA}/ ${JBROWSE}/files;
fi;
ln -sf ${DATA_DIR}/dmel.json ${JBROWSE_DATA}/${ORGANISM}/raw/;
rm ${JBROWSE}/index.html && ln -s ${DATA_DIR}/index.html ${JBROWSE}/;
###############################################
############### LOAD SEQ/ANNOTATIONS ################
###############################################
# Reference sequenes
for NCHR in ${CHROMOSOMES}
do
echo ${NCHR}
${JBIN}/prepare-refseqs.pl --fasta ${DATA_DIR}/${RAWDATA}/refseq/Chr${NCHR}.fasta --out ${JBROWSE_DATA}/${ORGANISM} --key "reference_sequence" --trackLabel "reference_sequence" --trackConfig '{ "category": "ref_tracks"}'
done
${JBIN}/biodb-to-json.pl -v --conf ${DATA_DIR}/dmel.json --out $JBROWSE_DATA/${ORGANISM};
for ANN in `ls ${DATA_DIR}/dmel/annotations/`
do
echo ${ANN}
LABEL=$(echo ${ANN} | cut -d'.' -f1)
DEFAULTTRACKS="${DEFAULTTRACKS}${LABEL},"
if [[ ${ANN} =~ 'genes' ]];then
${JBIN}/flatfile-to-json.pl --trackType CanvasFeatures --trackLabel ${LABEL} --autocomplete all --bed ${DATA_DIR}/dmel/annotations/${ANN} --key $LABEL --config '{ "category": "Reference tracks", "menuTemplate" : [{"iconClass" : "dijitIconTask","action" : "contentDialog","title" : "{type} {name}","label" : "View details"},{"iconClass" : "dijitIconFilter"},{"label" : "Search gene on NCBI","title" : "Search on NCBI {name}","iconClass" : "dijitIconDatabase","action": "newWindow","url" : "https://www.ncbi.nlm.nih.gov/gquery/?term={id}"},{"label" : "Search gene on flybase","title" : "Search on flybase {name}","iconClass" : "dijitIconFile","action": "newWindow","url" : "http://flybase.org/reports/{id}"}]}' --metadata '{"general_tracks":"Gene annotations"}' --out ${JBROWSE_DATA}/${ORGANISM}/
else
echo $ANN
${JBIN}/flatfile-to-json.pl --className "feature2" --arrowheadClass "null" --trackLabel ${LABEL} --autocomplete all --bed ${DATA_DIR}/dmel/annotations/${ANN} --key $LABEL --config '{ "category": "Reference tracks"}' --metadata '{"general_tracks":"annotations"}' --out ${JBROWSE_DATA}/${ORGANISM}/
fi
done
###############################################
############### LOAD BY TEST ################
###############################################
# for TEST in ${POPTEST}
# do
# DESCRIPTION=$(fgrep $TEST ${DATA_DIR}/description.txt | cut -d'=' -f2)
# echo ${TEST}
# TRACKCOLOR=${COLORS[${TEST}]}
# if [[ ${TEST} == 'fst' ]];then
# SUBCAT="Variation/population_diferentiation"
# COLLAPSETRACKS="${COLLAPSETRACKS}${SUBCAT},"
# else
# SUBCAT='Variation/freq_nucleotide_variation'
# COLLAPSETRACKS="${COLLAPSETRACKS}${SUBCAT},"
# fi
# for TRACK in `ls ${JBROWSE}/files/${TEST}`
# do
# for W in $WINDOWS
# echo ${TRACK}
# LABEL=$(echo ${TRACK} | cut -d'.' -f1 | tr '_' ' ')
# POP=$(cut -d'_' -f1 <<< ${TRACK})
# ${JBIN}/add-bw-track.pl --category "${SUBCAT}" \
# --label "${LABEL}" \
# --key "${TRACK}" \
# --plot \
# --pos_color "${TRACKCOLOR}" \
# --bw_url ../../files/${TEST}/${TRACK} \
# --config '{"metadata":{ "population":'\"${POP}\"',"metapopulation":'\"${METAPOP[${POP}]}\"',"freq_nucleotide_variation":'\"${TEST}\"',"window_size":"10kb","description":'${DESCRIPTION}'}}' \
# --in ${JBROWSE_DATA}/${ORGANISM}/trackList.json \
# --out ${JBROWSE_DATA}/${ORGANISM}/trackList.json
# done
# done
###############################################
############### DEFAULT TRACKS ################
###############################################
for TRACK in `ls ${DATA_DIR}/dmel/reftracks`
do
echo ${TRACK}
if [[ ${TRACK} =~ 'recomb' ]];then
SUBCAT="Variation/Recombination"
COLLAPSETRACKS="${COLLAPSETRACKS}${SUBCAT},"
TRACKCOLOR=${COLORS[recomb]}
LABEL="HR recomb cM/Mb (Comeron et al.)"
DEFAULTTRACKS="${DEFAULTTRACKS}${LABEL},"
elif [[ ${TRACK} =~ 'RRC' ]];then
SUBCAT="Variation/Recombination"
COLLAPSETRACKS="${COLLAPSETRACKS}${SUBCAT},"
TRACKCOLOR=${COLORS[RCC]}
LABEL="RRC cM/Mb (Fiston-Lavier et al.) 100kb"
DEFAULTTRACKS="${DEFAULTTRACKS}${LABEL},"
else
SUBCAT="ref_tracks"
TRACKCOLOR="#34A853"
LABEL=$(echo ${TRACK} | cut -d'.' -f1)
fi
${JBIN}/add-bw-track.pl --category ${SUBCAT} \
--label "${LABEL}" \
--key "${LABEL}"\
--plot \
--pos_color "${TRACKCOLOR}" \
--bw_url ../../files/reftracks/${TRACK} \
--in ${JBROWSE_DATA}/${ORGANISM}/trackList.json \
--out ${JBROWSE_DATA}/${ORGANISM}/trackList.json
done
###############################################
############### JBROWSE CONF ################
###############################################
${JBIN}/add-json.pl '{ "dataset_id": "dmel", "include": [ "functions.conf" ] }' ${JBROWSE_DATA}/${ORGANISM}/trackList.json
cp ${DATA_DIR}/functions.conf ${JBROWSE_DATA}/${ORGANISM}/functions.conf
printf "\n[general]\ndataset_id = ${ORGANISM}\n" > ${JBROWSE_DATA}/${ORGANISM}/tracks.conf
printf "\n[GENERAL]\ndataRoot = data/${ORGANISM}\ninclude = {dataRoot}/trackList.json\ninclude += {dataRoot}/tracks.conf\n" >> ${JBROWSE}/jbrowse.conf
DEFAULTTRACKS=$(sed 's/.$/\\n/' <<< $DEFAULTTRACKS)
echo -e ${DEFAULTTRACKS} >> ${JBROWSE}/jbrowse.conf
COLLAPSETRACKS=$(sed 's/.$/\\n/' <<< $COLLAPSETRACKS)
printf ${COLLAPSETRACKS} >> ${JBROWSE}/jbrowse.conf
printf "\n[datasets.${ORGANISM}]\nurl = ?data=data/${ORGANISM}\nname = ${ORGANISM}\n" >> ${JBROWSE}/jbrowse.conf
###########PLUGINS
printf "\n[ plugins.HierarchicalCheckboxPlugin ]\nlocation = plugins/HierarchicalCheckboxPlugin\n" >> ${JBROWSE}/jbrowse.conf
printf "\n[ plugins.HideTrackLabels ]\nlocation = plugins/HideTrackLabels\n" >> ${JBROWSE}/jbrowse.conf
printf "\n[ plugins.bookmarks ]\nlocation = plugins/bookmarks\n" >> ${JBROWSE}/jbrowse.conf
# printf "\n[ plugins.DownloadFasta ]\nlocation = plugins/DownloadFasta\n" >> ${JBROWSE}/jbrowse.conf
${JBIN}/generate-names.pl --safeMode -v --out ${JBROWSE_DATA}/${ORGANISM};
###############################################
############### APACHE CONF ################
###############################################
sed -i 's/html/html\/dest\//g' /etc/apache2/sites-available/000-default.conf
printf "<Directory /var/www/html/dest>\n\tOptions -Indexes\n\tAllowOverride All\n\t# Compress text, HTML, JavaScript, CSS, XML:\n\t\t<FilesMatch \"\.(htm?l|txt|css|js|php|pl)$\">\n\t\t\tSetOutputFilter DEFLATE\n\t</FilesMatch>\n</Directory>\n" >> /etc/apache2/sites-available/000-default.conf
###############################################
############### NEW BUILD ################
###############################################
############### LOAD NEW CSS ON GENOME.SCSS
ln -s /data/minimalSetup.sh ${JBROWSE}/
cd ${JBROWSE} && bash minimalSetup.sh
# apachectl start
| true |
40aaedd9f5e4b5bdece3378905bff7371a505fd6 | Shell | FGPullen/shiny-couscous | /cluster/train_result.sh | UTF-8 | 636 | 2.78125 | 3 | [] | no_license | #!/bin/bash
command=python
class=pageCluster.py
train=train
declare -a algo_array=("dbscan")
declare -a feature_array=("log-tf-idf")
declare -a date_array=("July30")
declare -a data_array=("asp" "youtube" "douban" "rottentomatoes" "hupu" "stackexchange")
for data in "${data_array[@]}"
do
for date in "${date_array[@]}"
do
for algo in "${algo_array[@]}"
do
for feature in "${feature_array[@]}"
do
echo $command $class "$data" "$date" "$algo" "$feature" train
$command $class "$data" "$date" "$algo" "$feature" train
done
done
done
done | true |
dea3e5bda5b7502bc09e515420e8358a2cdecb1c | Shell | Char0772/seniorproject | /Misc/ScanforAPs.sh | UTF-8 | 585 | 2.84375 | 3 | [] | no_license | sudo iwlist wlp6s0 scan > temp.txt # 1 scan into temp Text - Improved performance
cat temp.txt | grep -e Cell | awk '{print $5}' > tempBSSID.txt
cat temp.txt | grep -e ESSID | awk '{print $1}' | cut -d ":" -f2 | sed s/\"//g > tempESSID.txt
#echo
#echo
#cat tempESSID.txt | tr '\n' ' ' | awk '{print $1}'
#echo
#echo
#cat tempBSSID.txt | tr '\n' ' '
#echo
#len="wc -l tempESSID.txt | cut -d ' ' -f1 "
#maxline=eval $len
for i in $(seq 1 13)
do
cat tempESSID.txt | tr '\n' ' ' | awk -v i="$i" '{print $i}'
cat tempBSSID.txt | tr '\n' ' ' | awk -v i="$i" '{print $i}'
done
exit | true |
13083276163298bf1a1d8d870bffe89b20a5145d | Shell | achin/ofe-cartridge | /bin/control | UTF-8 | 761 | 3.28125 | 3 | [] | no_license | #!/bin/bash -e
source $OPENSHIFT_CARTRIDGE_SDK_BASH
function start() {
touch "$OPENSHIFT_OFE_DIR/run/status"
}
function stop() {
rm "$OPENSHIFT_OFE_DIR/run/status"
}
function restart() {
stop
start
}
function status() {
if [ -f "$OPENSHIFT_OFE_DIR/run/status" ]; then
client_result "Running"
fi
client_result "Not running"
}
function post-deploy() {
client_message "Adding OFE registration..."
}
function noop () {
return 0
}
case "$1" in
start) start ;;
stop) stop ;;
restart) restart ;;
status) status ;;
post-deploy) post-deploy ;;
reload) noop ;;
tidy) noop ;;
prebuild) noop ;;
build) noop ;;
deploy) noop ;;
*) exit 0
esac
| true |
75a95d32ad7e83cde564091c06b6670916b5b34a | Shell | smsouthard/Red-Team | /images/DeepLearningTutorials/data/download.sh | UTF-8 | 1,303 | 2.9375 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/sh
which wget >/dev/null 2>&1
WGET=$?
which curl >/dev/null 2>&1
CURL=$?
if [ "$WGET" -eq 0 ]; then
DL_CMD="wget --no-verbose -c"
elif [ "$CURL" -eq 0 ]; then
DL_CMD="curl -C - -O"
else
echo "You need wget or curl installed to download"
exit 1
fi
$DL_CMD http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz
$DL_CMD http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist_py3k.pkl.gz
$DL_CMD http://www.iro.umontreal.ca/~lisa/deep/data/imdb.pkl.gz && gunzip imdb.pkl.gz
$DL_CMD http://www.iro.umontreal.ca/~lisa/deep/data/imdb.dict.pkl.gz && gunzip imdb.dict.pkl.gz
$DL_CMD http://www.iro.umontreal.ca/~lisa/deep/data/Nottingham.zip && unzip -u Nottingham.zip
$DL_CMD http://www.iro.umontreal.ca/~lisa/deep/midi.zip && unzip -u midi.zip -d ../code && echo "extracted Modified Python MIDI package (GPL)"
$DL_CMD http://lisaweb.iro.umontreal.ca/transfert/lisa/users/mesnilgr/atis/atis.fold0.pkl.gz
$DL_CMD http://lisaweb.iro.umontreal.ca/transfert/lisa/users/mesnilgr/atis/atis.fold1.pkl.gz
$DL_CMD http://lisaweb.iro.umontreal.ca/transfert/lisa/users/mesnilgr/atis/atis.fold2.pkl.gz
$DL_CMD http://lisaweb.iro.umontreal.ca/transfert/lisa/users/mesnilgr/atis/atis.fold3.pkl.gz
$DL_CMD http://lisaweb.iro.umontreal.ca/transfert/lisa/users/mesnilgr/atis/atis.fold4.pkl.gz
| true |
94aed50adb7be37fa1d01209d01e588e8b5df612 | Shell | rkcho317/2403 | /run.sh | UTF-8 | 792 | 3.171875 | 3 | [] | no_license | #!/bin/bash
#Author: Rosa Cho
#Program name: Sum of Array Demo
rm *.o, *.lis, *.out
echo " " #Blank line
echo "Compile the C file main.c"
gcc -c -m64 -Wall -fno-pie -no-pie -o main.o main.c
echo "Compile the X86 file fill.asm"
nasm -f elf64 -o fill.o fill.asm
echo "Compile the X86 file sum.asm"
nasm -f elf64 -o sum.o sum.asm
echo "Assemble the X86 file control.asm"
nasm -f elf64 -o control.o control.asm
echo "Compile the C++ file display.cpp"
g++ -c -m64 -Wall -fno-pie -no-pie -o display.o display.cpp -std=c++14
echo "Link the 'O' files main.o, fill.o, sum.o, control.o, and display.o"
g++ -m64 -std=c++14 -fno-pie -no-pie -o cars.out main.o fill.o sum.o control.o display.o
echo "Run the program Sum of Array"
./cars.out
echo "This Bash script file will now terminate. Bye."
| true |
a005f574c06c1ba07c5751f54a7e52c12b0176b8 | Shell | studio-368/article | /deploy.sh | UTF-8 | 314 | 2.734375 | 3 | [] | no_license | #!/bin/bash
# Exit with nonzero status if anything fails
set -e
mkdir out
cd out
cp ../paper.pdf .
git init
git config user.name "Travis-CI"
git config user.email "travis@travis-ci.org"
git add .
git commit -m "Deploy to Github Pages"
git push --force --quiet "https://${GH_TOKEN}@${GH_REF}" master:gh-pages > /dev/null 2>&1
| true |
fdc741f3a4d6d7da5459dcf2591ff1a7dc604bc2 | Shell | GinjaNinja32/dotfiles | /scripts/screenshot | UTF-8 | 566 | 3.3125 | 3 | [] | no_license | #! /bin/bash
if [[ "$1" == "" ]]; then
name_format="%Y-%m-%d_%H-%M-%S.png"
name="$(date +"$name_format")"
if [[ $SCREENSHOT_FULL_SCREEN == 1 ]]; then
ARGS="$HOME/Pictures/$name"
else
ARGS="-s $HOME/Pictures/$name"
fi
if scrot $ARGS; then # -s "$HOME/Pictures/$name"; then
if [[ $(hostname) == "bp-ubuntu" ]]; then
"$(dirname "$0")/open-in-firefox.sh" "$HOME/Pictures/$name"
else
termite -t "i3:float" -e "$0 $name"
fi
fi
else
name="$1"
scp -S ssh "$HOME/Pictures/$name" "thanatos.gn32.uk:www/i/$name"
xdg-open "https://gn32.uk/i/$name"
fi
| true |
d756f39bbb127e821acbff68e96de9e5171d3949 | Shell | ravitejawavity/kubernetes-www-redirect | /docker-nginx/docker-entrypoint.sh | UTF-8 | 223 | 2.828125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# Replace Enviromental variables in nginx.conf.tmpl
VARS='$NGINX_TRUSTED_IP:$NGINX_PORT'
envsubst "$VARS" < /etc/nginx/nginx.conf.tmpl > /etc/nginx/nginx.conf
# Start nginx
exec nginx -g 'daemon off;'
| true |
b3fefd41168254350eaab0d870b9ab2380bd3ebe | Shell | cedricpineau/clojure_for_gedit | /install.sh | UTF-8 | 343 | 2.578125 | 3 | [] | no_license | #!/bin/sh
gtksourceview=/usr/share/gtksourceview-3.0
if [ ! -d $gtksourceview ]
then
echo "Gnome3 not installed, using Gnome2"
gtksourceview=/usr/share/gtksourceview-2.0
fi
sudo cp clojure.lang $gtksourceview/language-specs/clojure.lang
sudo cp clojure.xml /usr/share/mime/packages/clojure.xml
sudo update-mime-database /usr/share/mime
| true |
fa38c2142b30bc80752fa9d1998a609845b67e5e | Shell | JohnnyGOX17/configs | /dev_utils/git/git-rm-dos-whitespace | UTF-8 | 1,029 | 4.15625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#
# More aggressive removal of DOS line endings found in a repo
# Could be useful in git hook, replace find pattern based on filetypes
if [ "$(git rev-parse --is-inside-work-tree 2> /dev/null)" ]; then
git_root="$(git rev-parse --show-toplevel)"
pushd "$git_root" > /dev/null || exit
# find files w/DOS line-endings and pipe to 'dos2unix' to convert to Unix EOLs
find . -type f \( -iname \*.vhd -o -iname \*.v \) -exec file "{}" ";" | grep CRLF | sed 's/:.*//' | xargs dos2unix
# replace any Posix-defined trailing whitespace chars from selected files
find . -type f \( -iname \*.vhd -o -iname \*.v \) -exec sed --in-place 's/[[:space:]]\+$//' {} \+
# for tracked files, check if we made any changes other than whitespace/EOL related
if [[ $(git diff --ignore-space-at-eol -b -w HEAD) ]]; then
echo "Diff indicates actual code was changed beyond whitespace/EOL changes; run 'git diff' to verify any changes or revert back to previous"
else
echo "Success"
fi
popd > /dev/null || exit
fi
| true |
312a32efdce0243d5c532e416831e9583e3bb49e | Shell | system32bitt/scripts | /R210II-IPMITemp.sh | UTF-8 | 2,649 | 4 | 4 | [] | no_license | #!/bin/bash
# ----------------------------------------------------------------------------------
# Script for checking the temperature reported by the ambient temperature sensor,
# and if deemed too high send the raw IPMI command to enable dynamic fan control, as
# well as notifying the admin via discord webhook.
# Using crontab is recommended.
#
# Requires:
# ipmitool – apt-get install ipmitool
# lm-sensors - apt-get install lm-sensors
# discord.sh - (on github, must specify file path below)
# ----------------------------------------------------------------------------------
# DISCORD.SH LOCATION
WEBHOOK=/root/crontab_scripts/dependencies/discord.sh
# IDLE SPEED
# Set this to a percentage in hexadecimal of what the idle fan speed should be.
IDLE=1D
# LOAD SPEED
# Set this to a percentage in hexadecimal of what the fan speed should be when under load.
LOAD=3C
# WARNING TEMPERATURE
# Change this to the temperature in celsius you are comfortable with.
WARNTEMP=55
# CRITICAL TEMPERATURE
# Set this for when the temperature is in a critical state and needs dynamic fan control.
CRITTEMP=70
# WEBHOOK URL
# Set this to the webhook url specified on Discord.
URL=
# Do not edit.
TEMP=$(sensors |grep Package |grep -Po '\d{2}' | head -1)
# Enable Manual Fan Control
ipmitool raw 0x30 0x30 0x01 0x00
# Critical Temperature Logic
if [[ $TEMP > $CRITTEMP ]];
then
# Notify
printf "Critical: Temperature has exceeded warning spec. Enabling dynamic control. ($TEMP C)" | systemctl-cat -t R210II-IPMI-TEMP
echo "Critical: Temperature has exceeded warning spec. Enabling dynamic control. ($TEMP C)"
source $WEBHOOK --webhook-url=$URL --text "Critical: Temperature has exceeded warning spec. Enabling dynamic control. ($TEMP C)"
# Set Dynamic Fan Control
ipmitool raw 0x30 0x30 0x01 0x01
exit 0
fi
# Warning Temperature Logic
if [[ $TEMP > $WARNTEMP ]];
then
# Notify
printf "Warning: Temperature is too high! Raising Speed.. ($TEMP C)" | systemd-cat -t R210II-IPMI-TEMP
echo "Warning: Temperature is too high! Raising Speed.. ($TEMP C)"
source $WEBHOOK --webhook-url=$URL --text "Warning: Temperature is too high! Raising Speed.. ($TEMP C)"
# Set Load Fan Speed
ipmitool raw 0x30 0x30 0x02 0xff 0x$LOAD >/dev/null 2>&1
else
# Notify
printf "Temperature OK ($TEMP C)" | systemd-cat -t R210II-IPMI-TEMP
echo "Temperature OK ($TEMP C)"
# Only uncomment this if you need consistent reporting (unlikely)
#source $WEBHOOK --webhook-url=$URL --text "Temperature OK ($TEMP C)"
# Set Idle Fan Speed
ipmitool raw 0x30 0x30 0x02 0xff 0x$IDLE >/dev/null 2>&1
fi | true |
141057de1a71161469f86d9e4e04eb507fddee95 | Shell | YAGER-Development/grafana-backup-tool | /restore_grafana.sh | UTF-8 | 630 | 3.734375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
set -e
trap 'echo -ne "\n:::\n:::\tCaught signal, exiting at line $LINENO, while running :${BASH_COMMAND}:\n:::\n"; exit' SIGINT SIGQUIT
current_path=$(pwd)
archive_file="$1"
if [ ! -f ${archive_file} ]; then
echo -e "Usage:"
echo -e "\t$0 <archive_file>"
echo -e " e.g. $0 '_OUTPUT_/2019-05-13T11-04-33.tar.gz'"
exit 1
fi
tmp_dir="/tmp/restore_grafana.$$"
mkdir -p "$tmp_dir"
tar -xzf ${archive_file} -C $tmp_dir
for j in folder datasource dashboard alert_channel
do
find ${tmp_dir} -type f -name "*.${j}" | while read f
do
python "${current_path}/src/create_${j}.py" "${f}"
done
done
rm -rf $tmp_dir
| true |
4148b3e2b51feb5c2c451c264925566e702c745f | Shell | shivatejapeddi/eui6dump | /system/etc/init.letv.wlog_start.sh | UTF-8 | 456 | 3 | 3 | [
"Apache-2.0"
] | permissive | #!/system/bin/sh
LOG_TAG="letv-wlog"
LOG_NAME="${0}:"
logi ()
{
/system/bin/log -t $LOG_TAG -p i "$LOG_NAME $@"
}
logi ""
if [ $1 = "/system/bin/sh" ]; then
log_path=$2
else
log_path=$1
fi
logi $log_path
echo $log_path > /sdcard/logs/tmp_path
# start recording wlan firmware log and driver log
iwpriv wlan0 dl_loglevel 0
setprop ctl.startpar cnss_diag:"-u $log_path/"
# start recording package log
pktlogconf -s 10000000 -e -a cld
exit 0 | true |
d57f145e4a2cca452fefef8a74bfa9cbb36382b5 | Shell | warpme/minimyth2 | /script/meta/minimyth/files/source/rootfs/usr/bin/mm_watchdog | UTF-8 | 1,068 | 3.53125 | 4 | [] | no_license | #!/bin/sh
pids=`/bin/pidof mm_watchdog`
instances=`/bin/echo ${pids} | /usr/bin/wc -w`
if [ ${instances} -ne 1 ] ; then
/usr/bin/logger -t minimyth -p "local0.info" "[mm_watchdog] another instance is running ..."
exit 1
fi
. /etc/rc.d/functions
trap "_exit_" 0 1 2 3 15
_exit_()
{
ps -o ppid,pid,args \
| sed -e 's% *% %g' -e 's%^ %%' -e 's% $%%' \
| grep "^$$ " \
| grep '/bin/sleep [^ ]*$' \
| cut -d ' ' -f 2 \
| while read pid ; do
kill $pid
done
}
/bin/sleep 30
/usr/bin/logger -t minimyth -p "local0.info" "[mm_watchdog] start monitoring mythfrontend process..."
while true ; do
if [ -z "`/bin/pidof mythfrontend`" ] ; then
/usr/bin/logger -t minimyth -p "local0.info" "[mm_watchdog] killed/trapped mythfrontend detected. Restarting it ..."
cp -rf /var/log/mythfrontend.log /var/log/mythfrontend.fe-trap.$$
cp -rf /var/log/messages /var/log/messages.fe-trap.$$
/etc/rc.d/init.d/frontend start
/bin/sleep 30
else
/bin/sleep 1
fi
done
exit 0
| true |
3f66a27e561080b5d7b03f67cf455d819ee74403 | Shell | alexmacouchbase/couchbase-scripts | /search.sh | UTF-8 | 589 | 3.734375 | 4 | [] | no_license | #!/bin/sh
if [ $# -ne 2 ]
then
echo "Usage $0 <file to search within collect info> <search term>"
echo "ex: $0 couchbase.log swappiness"
exit 1
fi
fileToSearch="${1}"
searchTerm="${2}"
echo "File to search: $fileToSearch"
echo "Search term: $searchTerm"
list=`ls *.zip`
tmpLog="log-`date +%s`"
for file in ${list}
do
host=`basename ${file}|cut -d '-' -f1`
echo "Host: ${host}"
log="`unzip -q -l ${file} | grep ${fileToSearch} |awk '{print $4;}' 2>/dev/null `"
if [ $? -eq 0 ]
then
unzip -p ${file} ${log} > ${tmpLog}
grep -i "${searchTerm}" ${tmpLog}
fi
done
rm ${tmpLog}
| true |
63a94451b42fb3be49ec8a51ba0c893284467ed8 | Shell | syoung/repo | /envars-dependent.sh | UTF-8 | 780 | 3.984375 | 4 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
# PURPOSE:
# - CREATE THE FILE ~/.envars CONTAINING ENVIRONMENT VARIABLES
# - ADD LINE ". ~/envars" TO FILE ~/.bashrc TO LOAD THE ENVIRONMENT VARIABLES ON CONNNECTION TO THE CONTAINER
APPNAME=BIOREPO
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
#APPDIR=$( echo ${PWD##*/} | tr a-z A-Z )
APPDIR=${APPNAME}_HOME
declare -a COMMANDS=(
"export $APPDIR=$DIR"
"export PATH=$DIR/bin:\$PATH"
"export PERL5LIB=$DIR/lib:\$PERL5LIB"
)
ENVARFILE=$DIR/.envars
echo "\nCreating envarfile: $ENVARFILE"
rm -fr $ENVARFILE
for ((i = 0; i < ${#COMMANDS[@]} + 1; i++))
do
COMMAND=${COMMANDS[$i]}
echo $COMMAND
echo $COMMAND >> $ENVARFILE
eval $COMMAND
done
if ! grep -q "$DIR/.envars" ~/.bashrc; then
echo ". $DIR/.envars" >> ~/.bashrc
fi
| true |
b48ef2218f8f2c2c15cfad72e516b27a37a0c552 | Shell | gaurav-mishra1990/Rest-Api | /setup.sh | UTF-8 | 936 | 3.25 | 3 | [] | no_license | #!/bin/bash
# Example file for setting up the environment variables.
# Set your application configurations in this file and source it.
# source setup.sh
# Specify your environment {development, testing, staging, production}
export FLASK_ENV=development
# Specify your storage type {file_storage, database}
export STORAGE_TYPE=file_storage
# Specify the full absolute path of the file in case of file_storage
export LOG_FILE=application_logs.txt
# Specify the host ip address of the database in case of database storage
export DB_HOST_NAME=localhost
# Port to connect to the database
export DB_PORT=5432
# Name of the database to store logs
export DB_NAME=log
# User of the database
export DB_USER=db_user_name
# Password of the database user
export DB_PASSWORD=db_user_password
# Specify the host ip address of Elasticsearch server
export ES_HOST=localhost
# Specify the port to connect to Elasticsearch server
export ES_PORT=9200 | true |
4b317f1253d1c55ecab0bb221d15e3f4d5d5c0dd | Shell | CESNET/wayf | /scripts/bin/check_wayf_json.sh | UTF-8 | 217 | 3.15625 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
P=/opt/getMD/var/pub/current/feed
if [ -d $P ]
then
/opt/getMD/bin/check_wayf_json.pl `ls -1 /opt/getMD/var/pub/current/feed/*js | sed "s/^/-f /"`
else
echo "directory $P does not exist"
exit 2
fi
| true |
8da484231d8c848c1880941731b0da5018009808 | Shell | HossamAmer12/hevc_ffmpeg | /testBits.sh | UTF-8 | 2,402 | 2.875 | 3 | [] | no_license |
# Test1:
# INPUT_FILE="./test_file/ILSVRC2012_val_00001000_504_336_RGB.yuv"
# OUTPUT_ENC_FILE="./test_file/1000_0.265"
# OUTPUT_DEC_FILE="./test_file/1000_0.yuv"
# QP=0
#Test2:
INPUT_FILE="/media/h2amer/MULTICOM102/103_HA/MULTICOM103/set_yuv/pics/1/ILSVRC2012_val_00001000_504_336_RGB.yuv"
OUTPUT_ENC_FILE="/media/h2amer/MULTICOM102/103_HA/MULTICOM103/set_yuv/Seq-265-ffmpeg/1/ILSVRC2012_val_00001000_504_336_RGB_0.265"
OUTPUT_DEC_FILE="/media/h2amer/MULTICOM102/103_HA/MULTICOM103/set_yuv/Seq-Recons-ffmpeg/1/ILSVRC2012_val_00001000_504_336_RGB_0.yuv"
QP=0
# ffmpeg -i $INPUT_FILE -c:v libx265 -crf 0 output.mp4
# 265
# ffmpeg -f rawvideo -pix_fmt yuv420p -s:v 504x336 -i $INPUT_FILE -c:v hevc -crf $QP -f hevc -preset ultrafast $OUTPUT_ENC_FILE
# Final 265
ffmpeg -f rawvideo -pix_fmt yuv420p -s:v 504x336 -i $INPUT_FILE -c:v hevc -crf $QP -f hevc -preset ultrafast $OUTPUT_ENC_FILE
echo "ffmpeg -f rawvideo -pix_fmt yuv420p -s:v 504x336 -i $INPUT_FILE -c:v hevc -crf $QP -f hevc -preset ultrafast $OUTPUT_ENC_FILE"
# ffmpeg -f rawvideo -pix_fmt yuv420p -s:v 504x336-i /media/h2amer/MULTICOM102/103_HA/MULTICOM103/set_yuv/pics/1/ILSVRC2012_val_00001000_504_336_RGB.yuv -c:v hevc -crf 0 -f hevc -preset ultrafast /media/h2amer/MULTICOM102/103_HA/MULTICOM103/set_yuv/Seq-265-ffmpeg/1/ILSVRC2012_val_00001000_504_336_RGB_0.265
# yuv
# ffmpeg -f rawvideo -vcodec rawvideo -s 504x336 -pix_fmt yuv420p -i $INPUT_FILE -c:v libx265 -crf $QP -preset ultrafast $OUTPUT_DEC_FILE
# Final yuv
#ffmpeg -f rawvideo -vcodec rawvideo -s 504x336 -pix_fmt yuv420p -i $INPUT_FILE -c:v hevc -crf $QP -preset ultrafast $OUTPUT_DEC_FILE
./calc_frame_size.sh $OUTPUT_DEC_FILE
./hevcesbrowser_console_linux -i $OUTPUT_ENC_FILE
# frame_size=$(grep '^0x*' go.txt) # lines start with
# echo $frame_size
# rm go.txt
FILESIZE=$(stat -c%s "$OUTPUT_ENC_FILE")
echo "Total size of $OUTPUT_ENC_FILE = $FILESIZE bytes."
# ffmpeg -f rawvideo -vcodec rawvideo -s 504x336 -pix_fmt yuv420p -i $INPUT_FILE -c:v libx265 -crf $QP -preset medium -x265-params profile=main:level-idc=50:high-tier:vbv-bufsize=100000:vbv-maxrate=100000 $OUTPUT_DEC_FILE
#ffmpeg -f rawvideo -vcodec rawvideo -s 504x336 -pix_fmt yuv420p -i $INPUT_FILE -c:v libx265 -crf $QP -preset medium $OUTPUT_DEC_FILE1
#ffmpeg -f rawvideo -vcodec rawvideo -s 504x336 -pix_fmt yuv420p -i $INPUT_FILE -c:v libx265 -crf $QP -preset ultrafast $OUTPUT_DEC_FILE2
| true |
d19b7bf6ef927bf5915fef060341282b5f4a0bd7 | Shell | kbence/quick-tdd | /src/project/common.sh | UTF-8 | 1,564 | 4.03125 | 4 | [] | no_license | #!/usr/bin/env false
declare -a SUPPORTED_LANGUAGES
function in_array() {
local needle=$1; shift
declare -a array=("${!1}"); shift
for item in ${array[*]}; do
if [[ $item == $needle ]]; then
return 0
fi
done
return 1
}
function die() {
echo "$@"
exit 1
}
function create_project() {
local project_name=$1; shift
local language=$1; shift
local project_dir
init_languages
while [[ -z $project_name ]]; do
read -p 'Project name: ' project_name
done
while [[ -z $language ]] || ! in_array "$language" SUPPORTED_LANGUAGES[@]; do
echo "Supported languages on this platform:"
echo ${SUPPORTED_LANGUAGES[@]} | column -c $(($COLS - 4)) | sed -e 's/^/ /'
echo
read -p "Language: " language
done
project_dir="$ROOT_DIR/projects/$project_name"
rm -rf "$project_dir" || die "Couldn't remove directory '$project_dir'"
mkdir -p "$project_dir" || die "Couldn't create directory '$project_dir'"
cp -r "$ROOT_DIR/languages/$language/template"/* "$project_dir" || die "Project copy failed!"
}
function clean_projects() {
find "$ROOT_DIR/projects" -mindepth 1 -maxdepth 1 -type d | xargs rm -rf
}
function add_language() {
SUPPORTED_LANGUAGES+=("$1")
}
function init_languages() {
for lang_dir in "$LANGUAGE_ROOT"/*; do
lang_name=$(basename "$lang_dir")
if (. "$lang_dir/check.sh" && "${lang_name}"_available 2>/dev/null 1>&2); then
add_language "$lang_name"
fi
done
}
| true |
e3350c7ee341436f9819965adb36ddeba362a8ed | Shell | openapphack/oah-shell | /src/bash/oah-common.sh | UTF-8 | 2,460 | 3.9375 | 4 | [] | no_license | #!/bin/bash
#
# common internal function definitions
# TODO fix support version specific environments installation
# ignore version folder if missing ln the candidate folder as the current environment
#
function __oah_check_candidate_present {
if [ -z "$1" ]; then
echo -e "\nNo candidate provided."
__oah_help
return 1
fi
}
function __oah_check_version_present {
if [ -z "$1" ]; then
echo -e "\nNo candidate version provided."
__oah_help
return 1
fi
}
function __oah_determine_version {
if [[ "${OAH_AVAILABLE}" == "false" && -n "$1" && -d "${OAH_DIR}/data/.envs/${CANDIDATE}/$1" ]]; then
VERSION="$1"
elif [[ "${OAH_AVAILABLE}" == "false" && -z "$1" && -L "${OAH_DIR}/data/.envs/${CANDIDATE}/current" ]]; then
VERSION=$(readlink "${OAH_DIR}/data/.envs/${CANDIDATE}/current" | sed "s!${OAH_DIR}/data/.envs/${CANDIDATE}/!!g")
elif [[ "${OAH_AVAILABLE}" == "false" && -n "$1" ]]; then
echo "Stop! ${CANDIDATE} ${1} is not available in offline mode."
return 1
elif [[ "${OAH_AVAILABLE}" == "false" && -z "$1" ]]; then
echo "${OFFLINE_MESSAGE}"
return 1
elif [[ "${OAH_AVAILABLE}" == "true" && -z "$1" ]]; then
VERSION_VALID='valid'
VERSION=$(curl -s "${OAH_SERVICE}/candidates/${CANDIDATE}/default")
else
VERSION_VALID=$(curl -s "${OAH_SERVICE}/candidates/${CANDIDATE}/$1")
if [[ "${VERSION_VALID}" == 'valid' || ( "${VERSION_VALID}" == 'invalid' && -n "$2" ) ]]; then
VERSION="$1"
elif [[ "${VERSION_VALID}" == 'invalid' && -h "${OAH_DIR}/data/.envs/${CANDIDATE}/$1" ]]; then
VERSION="$1"
elif [[ "${VERSION_VALID}" == 'invalid' && -d "${OAH_DIR}/data/.envs/${CANDIDATE}/$1" ]]; then
VERSION="$1"
else
echo ""
echo "Stop! $1 is not a valid ${CANDIDATE} version."
return 1
fi
fi
}
function __oah_default_environment_variables {
if [ ! "$OAH_FORCE_OFFLINE" ]; then
OAH_FORCE_OFFLINE="false"
fi
if [ ! "$OAH_ONLINE" ]; then
OAH_ONLINE="true"
fi
if [[ "${OAH_ONLINE}" == "false" || "${OAH_FORCE_OFFLINE}" == "true" ]]; then
OAH_AVAILABLE="false"
else
OAH_AVAILABLE="true"
fi
}
function __oah_link_candidate_version {
CANDIDATE="$1"
VERSION="$2"
# Change the 'current' symlink for the candidate, hence affecting all shells.
if [ -L "${OAH_DIR}/data/.envs/${CANDIDATE}/current" ]; then
unlink "${OAH_DIR}/data/.envs/${CANDIDATE}/current"
fi
ln -s "${OAH_DIR}/data/.envs/${CANDIDATE}/${VERSION}" "${OAH_DIR}/data/.envs/${CANDIDATE}/current"
}
| true |
73323fad8236f1d32c13ec7e4033c0cbb284e789 | Shell | mtchavez/mac-ansible | /script/test | UTF-8 | 308 | 2.84375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
#
# Sets up requirements to provision with ansible
#
#
# Clean display function
#
# usage:
# display "My thing to output"
#
function display() {
echo "-----> $1"
}
echo "Running ansible playbook to provision system..."
time ansible-playbook mac-osx.yml --diff
# vim: ft=sh:
| true |
5ebeb47696236b3e5eeedfe4046b3f252754e504 | Shell | abdul-git/aws-cli | /aws-cli/.env/set-formating | UTF-8 | 1,001 | 3.890625 | 4 | [] | no_license | #!/bin/bash
##########################
# Name : set_env
# Purpose: This script is used to control environment variables
# version : 1.0
#
# History:
# 1.0 - script created
#
##########################
VER=1.0
function box_out() {
input_char=$(echo "$@" | wc -c)
line=$(for i in `seq 0 $input_char`; do printf "-"; done)
# tput This should be the best option. what tput does is it will read the terminal info and render the correctly escaped ANSI code for you. code like \033[31m will break the readline library in some of the terminals.
tput bold
line="$(tput setaf 3)${line}"
space=${line//-/ }
echo " ${line}"
printf '|' ; echo -n "$space" ; printf "%s\n" '|';
printf '| ' ;tput setaf 4; echo -n "$@"; tput setaf 3 ; printf "%s\n" ' |';
printf '|' ; echo -n "$space" ; printf "%s\n" '|';
echo " ${line}"
tput sgr 0
}
function get_time() {
date
}
function set_echo_blank() {
echo " "
echo " "
}
| true |
651d07af2df4e8d893b8c87d6e93d34595040b10 | Shell | kaleidoescape/hlt-2018 | /install.sh | UTF-8 | 3,110 | 3.390625 | 3 | [] | no_license | #!/bin/bash
get_fastText=0 #download Facebook fastText vectors (VERY time consuming)
get_wikipedia=1 #download Wikipedia comparable corpora (time consuming)
wd=`pwd`
. ./dl_paths.sh #filepaths to install things to
echo "Activating python3 virtualenv."
if [ ! -d $venv ]; then
virtualenv -p python3 env
chmod +x . ./env/bin/activate
. ./env/bin/activate
echo "Installing pytorch to virtualenv in $venv"
pip3 install http://download.pytorch.org/whl/cu80/torch-0.3.0.post4-cp35-cp35m-linux_x86_64.whl
pip3 install torchvision
else
. $venv/bin/activate
fi
echo "Installing other requirements."
pip3 install -r requirements.txt
echo "Downloading Dutch lemmatizer."
mkdir -p $cstlemma_dir
cd $cstlemma_dir
wget -O makecstlemma.bash https://raw.githubusercontent.com/kuhumcst/cstlemma/master/doc/makecstlemma.bash
chmod +x ./makecstlemma.bash
./makecstlemma.bash
#Have to download this file after make
wget -O $cstlemma_dir/flexrules.dutch http://ada.sc.ku.dk/download/cstlemma/dutch/flexrules
echo "Downloading MUSE."
cd $wd
git clone git@github.com:facebookresearch/MUSE.git
#Have to download them after MUSE installation
echo "Downloading Dutch/Russian dictionaries."
cd $wd
mkdir -p $dictionaries
if [ ! -f $dictionaries/en-nl.txt ]; then
echo "Downloading en-nl dictionary."
wget -O $dictionaries/en-nl.txt https://s3.amazonaws.com/arrival/dictionaries/en-nl.txt
fi
if [ ! -f $dictionaries/en-ru.txt ]; then
echo "Downloading en-ru dictionary."
wget -O $dictionaries/en-ru.txt https://s3.amazonaws.com/arrival/dictionaries/en-ru.txt
fi
#TODO update this
if [ ! -f $vectors_dir/nl_vectors.txt ] || [ ! -f $vectors_dir/ru_vectors.txt ]; then
echo "Downloading pre-trained word vectors."
wget -O vectors.zip https://www.dropbox.com/s/nl7bwt5rnf0jhsz/vectors.zip?dl=1
unzip vectors.zip
else
echo "Pre-trained word vectors already exist: $vectors_dir"
fi
if [ $get_fastText > 0 ] && [ -f $vectors_dir/wiki.en.vec ] && [ -f $vectors_dir/wiki.nl.vec ] && [ -f $vectors_dir/wiki.ru.vec ]; then
echo "Found fastText vectors. Not re-downloading."
elif [ $get_fastText > 0 ]; then
echo "Downloading fastText word embeddings. This will take a really long time and require 9GB of space!"
if [ ! -f $vectors_dir/wiki.en.vec ]; then
curl -Lo $vectors_dir/wiki.en.vec https://s3-us-west-1.amazonaws.com/fasttext-vectors/wiki.en.vec
fi
if [ ! -f $vectors_dir/wiki.nl.vec ]; then
curl -Lo $vectors_dir/wiki.nl.vec https://s3-us-west-1.amazonaws.com/fasttext-vectors/wiki.nl.vec
fi
if [ ! -f $vectors_dir/wiki.ru.vec ]; then
curl -Lo $vectors_dir/wiki.ru.vec https://s3-us-west-1.amazonaws.com/fasttext-vectors/wiki.ru.vec
fi
fi
echo "Cloning biwikibot."
git clone git@github.com:kaleidoescape/biwikibot.git
if [ $get_wikipedia > 0 ] && [ ! -d $wikipedia_data ]; then
echo "Downloading Wikipedia data."
wget -O wikipedia_data.zip https://www.dropbox.com/s/a6qihkjp385d7zw/wikipedia_data.zip?dl=1
unzip wikipedia_data.zip
fi
echo "Installation completed."
exit 0 #to exit the virtualenv subshell
| true |
5bdeb95dfa325da287a96fbac1a7d63d35830edd | Shell | jncraton/ubuntu-server-student-env | /addstudent | UTF-8 | 621 | 4.1875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
if [ -z "$1" ]; then
echo
echo "SYNOPSIS"
echo "$0 [student]..."
echo
echo "DESCRIPTION"
echo
echo "Adds new students to the system."
echo
echo "This will create new users and home directories for each student. Accounts will be given a default password of Password1. Students will be prompted to change this password on first login."
echo
exit 1
fi
for student in "$@"
do
echo "Adding $student..."
sudo adduser -gecos "" --disabled-password --quiet $student
echo ${student}:Password1 | sudo chpasswd
sudo passwd --expire --quiet $student
sudo chmod 751 /home/${student}
done
| true |
bf311ab067799257b4c6835777777f0dabd02889 | Shell | OctopusDeploy/Calamari | /source/Calamari.Common/Features/Scripting/Bash/Bootstrap.sh | UTF-8 | 7,848 | 4.375 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Octopus Linux helper function script
# Version: 1.1.0
# -----------------------------------------------------------------------------
sensitiveVariableKey=$1
# -----------------------------------------------------------------------------
# Function to base64 encode a service message value
# Accepts 1 argument:
# string: the value to encode
# -----------------------------------------------------------------------------
function encode_servicemessagevalue
{
echo -n "$1" | openssl enc -base64 -A
}
# -----------------------------------------------------------------------------
# Function to base64 decode a service message value
# Accepts 1 argument:
# string: the value to decode
# -----------------------------------------------------------------------------
function decode_servicemessagevalue
{
echo -n "$1" | openssl enc -base64 -A -d
}
# -----------------------------------------------------------------------------
# Functions to request server masking of sensitive values
# -----------------------------------------------------------------------------
function __mask_sensitive_value
{
# We want to write to both stdout and stderr due to a racecondition between stdout and stderr
# causing error logs to not be masked if stderr event is handled first.
echo "##octopus[mask value='$(encode_servicemessagevalue "$1")']"
echo "##octopus[mask value='$(encode_servicemessagevalue "$1")']" >&2
}
__mask_sensitive_value $sensitiveVariableKey
# -----------------------------------------------------------------------------
# Function to decrypt a sensitive variable
# Accepts 2 arguments:
# string: the value to decrypt (base64 encoded)
# string: the decryption iv (hex)
# -----------------------------------------------------------------------------
function decrypt_variable
{
echo $1 | openssl enc -a -A -d -aes-128-cbc -nosalt -K $sensitiveVariableKey -iv $2
}
# ---------------------------------------------------------------------------
# Function for getting an octopus variable
# Accepts 1 argument:
# string: value of the name of the octopus variable
# ---------------------------------------------------------------------------
function get_octopusvariable
{
INPUT=$( encode_servicemessagevalue "$1" )
case $INPUT in
#### VariableDeclarations ####
*)
echo ""
;;
esac
}
# ---------------------------------------------------------------------------
# Function for failing a step with an optional message
# Accepts 1 argument:
# string: reason for failing
# ---------------------------------------------------------------------------
function fail_step
{
if [ ! -z "${1:-}" ]
then
echo "##octopus[resultMessage message='$(encode_servicemessagevalue "$1")']"
fi
exit 1;
}
# ---------------------------------------------------------------------------
# Function for setting an octopus variable
# Accepts 3 arguments:
# string: value of the name of the octopus variable
# string: value of the value of the octopus variable
# string: optional '-sensitive' to make variable sensitive
# ---------------------------------------------------------------------------
function set_octopusvariable
{
MESSAGE="##octopus[setVariable"
if [ -n "$1" ]
then
MESSAGE="$MESSAGE name='$(encode_servicemessagevalue "$1")'"
fi
if [ -n "$2" ]
then
MESSAGE="$MESSAGE value='$(encode_servicemessagevalue "$2")'"
fi
if [ ! -z "${3:-}" ] && [ "$3" = "-sensitive" ]
then
MESSAGE="$MESSAGE sensitive='$(encode_servicemessagevalue "True")'"
fi
MESSAGE="$MESSAGE]"
echo $MESSAGE
}
# -----------------------------------------------------------------------------
# Function to create a new octopus artifact
# Accepts 2 arguments:
# string: value of the path to the artifact
# string: value of the original file name of the artifact
# -----------------------------------------------------------------------------
function new_octopusartifact
{
echo "Collecting $1 as an artifact..."
if [ ! -e "$1" ]
then
error_exit $PROGNAME $LINENO "\"$(1)\" does not exist." $E_FILE_NOT_FOUND
exit $?
fi
pth=$1
ofn=$2
len=$(wc -c < $1 )
if [ -z "$ofn" ]
then
ofn=`basename "$pth"`
fi
echo "##octopus[stdout-verbose]"
echo "Artifact $ofn will be collected from $pth after this step completes"
echo "##octopus[stdout-default]"
echo "##octopus[createArtifact path='$(encode_servicemessagevalue "$pth")' name='$(encode_servicemessagevalue "$ofn")' length='$(encode_servicemessagevalue $len)']"
}
function remove-octopustarget {
echo "##octopus[delete-target machine='$(encode_servicemessagevalue "$1")']"
}
function new_octopustarget() (
parameters=""
while :
do
case "$1" in
-n | --name)
parameters="$parameters name='$(encode_servicemessagevalue "$2")'"
shift 2
;;
-t | --target-id)
parameters="$parameters targetId='$(encode_servicemessagevalue "$2")'"
shift 2
;;
--inputs)
parameters="$parameters inputs='$(encode_servicemessagevalue "$2")'"
shift 2
;;
--roles)
parameters="$parameters octopusRoles='$(encode_servicemessagevalue "$2")'"
shift 2
;;
--worker-pool)
parameters="$parameters octopusDefaultWorkerPoolIdOrName='$(encode_servicemessagevalue "$2")'"
shift 2
;;
--update-if-existing)
parameters="$parameters updateIfExisting='$(encode_servicemessagevalue "true")'"
shift
;;
--) # End of all options.
shift
break
;;
-*)
echo "Error: Unknown option: $1" >&2
exit 1
;;
*) # No more options
break
;;
esac
done
echo "##octopus[createStepPackageTarget ${parameters}]"
)
# -----------------------------------------------------------------------------
# Function to update progress
# Accepts 2 arguments:
# int: percentage progress
# string: message to show
# -----------------------------------------------------------------------------
function update_progress
{
echo "##octopus[progress percentage='$(encode_servicemessagevalue "$1")' message='$(encode_servicemessagevalue "$2")']"
}
# -----------------------------------------------------------------------------
# Functions write a messages as different levels
# -----------------------------------------------------------------------------
function write_verbose
{
echo "##octopus[stdout-verbose]"
echo $1
echo "##octopus[stdout-default]"
}
function write_highlight
{
echo "##octopus[stdout-highlight]"
echo $1
echo "##octopus[stdout-default]"
}
function write_wait
{
echo "##octopus[stdout-wait]"
echo $1
echo "##octopus[stdout-default]"
}
function write_warning
{
echo "##octopus[stdout-warning]"
echo $1
echo "##octopus[stdout-default]"
}
# -----------------------------------------------------------------------------
# Functions to write the environment information
# -----------------------------------------------------------------------------
function log_environment_information
{
suppressEnvironmentLogging=$(get_octopusvariable "Octopus.Action.Script.SuppressEnvironmentLogging")
if [ "$suppressEnvironmentLogging" == "True" ]
then
return 0
fi
echo "##octopus[stdout-verbose]"
echo "Bash Environment Information:"
echo " OperatingSystem: $(uname -a)"
echo " CurrentUser: $(whoami)"
echo " HostName: $(hostname)"
echo " ProcessorCount: $(getconf _NPROCESSORS_ONLN)"
currentDirectory="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
echo " CurrentDirectory: $currentDirectory"
tempDirectory=$(dirname $(mktemp -u))
echo " TempDirectory: $tempDirectory"
echo " HostProcessID: $$"
echo "##octopus[stdout-default]"
}
log_environment_information | true |
d50dac78a70676172edc3b207fb17fb19ee7f33c | Shell | dimapod/devops-demo | /infra/concourse/pipeline/monitoring/monitoring_provision.sh | UTF-8 | 703 | 3.078125 | 3 | [] | no_license | #!/usr/bin/env bash
# Prepare Base AMI ID
APPLICATION_IP=$(head -n 1 application-ip-s3/application-ip)
MONITORING_IP=$(head -n 1 monitoring-ip-s3/monitoring-ip)
echo "Application PI: ${APPLICATION_IP}"
echo "Monitoring PI: ${MONITORING_IP}"
chmod 400 monitoring-keys/dpo-monitoring.pem
# Prepare hosts
rm devops-infra/infra/monitoring/provision/ansible/hosts
cp monitoring-ip-s3/monitoring-ip devops-infra/infra/monitoring/provision/ansible/hosts
echo "Waiting 15s for EC2 to boot..."
sleep 15
# Provision
cd devops-infra/infra/monitoring/provision/ansible
ansible-playbook site.yml --private-key=../../../../../monitoring-keys/dpo-monitoring.pem --extra-vars "application_host=${APPLICATION_IP}"
| true |
85528d114526990b7ef19d4bfc1fb806e99e4539 | Shell | TabbedOut/PushSample | /libUAirship-latest/PushSample/checkConfig.sh | UTF-8 | 1,664 | 2.828125 | 3 | [
"MIT",
"BSD-2-Clause"
] | permissive | #!/bin/sh
# Copyright 2009-2014 Urban Airship Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binaryform must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided withthe distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE URBAN AIRSHIP INC ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL URBAN AIRSHIP INC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
echo "Checking for file $SRCROOT/AirshipConfig.plist..."
if [ -f "$SRCROOT/AirshipConfig.plist" ];
then
echo "Found AirshipConfig.plist, and copying to $TARGET_BUILD_DIR/$EXECUTABLE_FOLDER_PATH"
cp "$SRCROOT/AirshipConfig.plist" "$TARGET_BUILD_DIR/$EXECUTABLE_FOLDER_PATH/"
else
echo "Did not find AirshipConfig.plist"
fi
| true |
056aff4d5991273d6ab046f1529da2147d854405 | Shell | pillaiuma/shell | /two2.sh | UTF-8 | 319 | 3.4375 | 3 | [] | no_license | #! bin/bash
echo "Enter 2 numbers:"
read num1
read num2
if [ "$num1" -eq 0 -o "$num1" -le 0 ]
then
echo "Invalid number"
elif [ $num2 -eq 0 -o $num2 -le 0 ]
then
echo "Invalid number"
elif [ $num1 -gt $num2 ]
then
echo "($num1/$num2)"|bc -l
elif [ $num2 -gt $num1 ]
then
echo "($num2/$num1)"|bc -l
else
echo
fi
| true |
ed34dabad54defdbcd26942e850f49b5ab3db266 | Shell | tokenguardio/smartbugs | /utils/docker/smartbugs-ilf/scripts/entrypoint.sh | UTF-8 | 2,127 | 2.90625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
echo Hello from entrypoint
echo $@
cd /go/src/ilf
rm -rf example/crowdsale/build
rm example/crowdsale/contracts/crowdsale.sol
rm example/crowdsale/contracts/Migrations.sol
rm example/crowdsale/transactions.json
rm example/crowdsale/migrations/2_deploy_contracts.js
cp "$1" example/crowdsale/contracts/
contracts=$(python3 /workdir/scripts/printContractNames.py "$1" | grep -v ANTLR)
echo $contracts
solc_version=$(python3 /workdir/scripts/get_solc_version.py "$1" | grep -v ANTLR)
solc-select install $solc_version
solc-select use $solc_version
python3 /workdir/scripts/get_migration.py $solc_version >> /go/src/ilf/example/crowdsale/contracts/Migrations.sol
i=0
for c in $contracts; do
echo "var c${i} = artifacts.require(\"${c}\");" >> example/crowdsale/migrations/2_deploy_contracts.js
((i=i+1))
done
echo "module.exports = function(deployer) {" >> example/crowdsale/migrations/2_deploy_contracts.js
i=0
for c in $contracts; do
echo "deployer.deploy(c${i});" >> example/crowdsale/migrations/2_deploy_contracts.js
((i=i+1))
done
echo "};" >> example/crowdsale/migrations/2_deploy_contracts.js
echo >> example/crowdsale/migrations/2_deploy_contracts.js
cat example/crowdsale/migrations/2_deploy_contracts.js
python3 /go/src/ilf/script/extract.py --proj /go/src/ilf/example/crowdsale/ --port 8545
cat example/crowdsale/transactions.json
i=0
rm -rf /results.json /new_results.json /old_results.json
touch /results.json
for c in $contracts; do
echo Contract: $c
python3 -m ilf --limit 2000 --model ./model/ --fuzzer imitation --proj ./example/crowdsale/ --contract $c --log_to_file results.txt -v 1
tail -1 results.txt | awk '{$1=""; $2=""; print $0}' | jq '(keys_unsorted[]) as $key | if $key!="tx_count" and $key!="num_contracts" and $key!="insn_coverage" and $key!="block_coverage" then {($key): .[$key]} else empty end' > /new_results.json
if [ $i -gt 0 ]; then
cp /results.json /old_results.json
jq -s '.[0] * .[1]' /old_results.json /new_results.json > /results.json
else
cp /new_results.json /results.json
fi
((i=i+1))
done | true |
c30e3bdc7d0fc741a254e260dd00d1fd3cff2935 | Shell | webmat/git_remote_branch | /etc/grb-completion.bash | UTF-8 | 999 | 3.625 | 4 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | _grb()
{
local cur=${COMP_WORDS[COMP_CWORD]}
local verb=${COMP_WORDS[1]}
local position=${COMP_CWORD}
if [[ "$verb" == "explain" ]]; then
let "position = $position - 1"
fi
case "$position" in
1)
COMPREPLY=( $(compgen -W "help explain create new delete destroy kill remove rm rename rn mv move track follow grab fetch" -- $cur) )
;;
2)
COMPREPLY=( $(compgen -W "$(_grb_branch)" -- $cur))
;;
3)
COMPREPLY=( $(compgen -W "$(_grb_remotes)" -- $cur))
;;
esac
}
_grb_branch()
{
{
git for-each-ref refs/remotes --format="%(refname:short)" |
grep -v HEAD |
sed 's/^.*\///g';
git for-each-ref refs/heads --format="%(refname:short)";
} | sort | uniq
}
_grb_remotes()
{
local i IFS=$'\n'
for i in $(git config --get-regexp 'remote\..*\.url' 2>/dev/null); do
i="${i#remote.}"
echo "${i/.url*/}"
done
}
complete -F _grb grb
| true |
8b6100b7293e6fc0160ea450ab1973142ee58e11 | Shell | themole-ti/compiler-test | /create_resourcedefs.sh | UTF-8 | 823 | 3.78125 | 4 | [] | no_license | #!/bin/bash
# Script to generate defines for resource locations in cartridge image
# Parameters
MAKEFILE=Makefile.res
MAPFILE=res.map
OUTFILE=$1
# First, generate map file via linker
make -f $MAKEFILE
# Create arrays
LOCATIONS=($(cat $MAPFILE | grep _binary_resources | cut -b 27-34))
NAMES=($(cat $MAPFILE | grep _binary_resources | cut -b 51-))
# Output file header
echo "#ifndef RESOURCE_DEFS_H" > $OUTFILE
echo "#define RESOURCE_DEFS_H" >> $OUTFILE
echo "" >> $OUTFILE
echo "#define RES_START(a) _binary_resources_ ## a ##_dat_start" >> $OUTFILE
echo "#define RES_END(a) _binary_resources_ ## a ##_dat_end" >> $OUTFILE
echo "" >> $OUTFILE
# Output defines to file
for i in "${!NAMES[@]}"; do
echo -e "#define " "${NAMES[i]} \t" 0x"${LOCATIONS[i]}"l >> $OUTFILE
done
echo "" >> $OUTFILE
echo "#endif" >> $OUTFILE
| true |
1b14f7650f1a732fa65622afafb55fa28218330e | Shell | Ribeiro05/fdp | /login.sh | UTF-8 | 951 | 3.15625 | 3 | [] | no_license | #!/bin/bash
clear
FL(){
USUARIO=admin
SENHA=admin
#-----------------------------------------------------------------------#
USER=$(dialog --stdout \
--title 'Login' \
--inputbox "digite o seu usuário:" \
0 0)
if [ $USER == $USUARIO ]; then
SE=$( dialog --stdout \
--title 'Login' \
--inputbox 'Digite a senha: ' \
0 0)
if [ $SE == $SENHA ]; then
bash MENUZAUM.sh
else
dialog --stdout --infobox "Senha Login" 0 0
fi
fi
}
#________________________________________________________________________#
SAIR(){
EXIT=$( dialog --stdout \
--title 'Saindo' \
--infobox 'Finalizado...' \
0 0)
}
#-----------------------------------------------------------------------#
function DIG(){
MENU=$( dialog --stdout \
--title ' Acesso ao Projeto Omega ' \
--menu 'Escolha uma opção:' \
0 0 0 \
1 'Fazer login' \
2 'Sair')
case $MENU in
1) FL ;;
2) SAIR ;;
esac
}
DIG
| true |
bb7fe92c678679141ed5d33a80f79ab9fd77740c | Shell | rahmiyildiz/slideDownloader | /slideDownloader | UTF-8 | 1,289 | 3.703125 | 4 | [] | no_license | #!/bin/bash
dir=$(pwd)
num=1
if [ -z $1 ]
then
echo "Usage 'slideDownloader es.slideshare.net/xxxxxxx/xxxxx [name]'"
else
if [ -d /tmp/slidetemp ]
then
rm -R /tmp/slidetemp
fi
mkdir /tmp/slidetemp
cd /tmp/slidetemp
echo "Looking for slides..."
wget -q $1 #Descarga la página web para localizar las imagenes
mkdir images
cd images
totalSlides=$(cat ../$(ls -F .. | grep -v '/$') | grep -Po 'data-full=".*?"' | grep -Po '".*?"' | wc -l)
# El comando <cat ../$(ls -F .. | grep -v '/$') | grep -Po 'data-full=".*?"' | grep -Po '".*?"' | tr '"' ' '> devuelve las imagenes a descargar
for i in $(cat ../$(ls -F .. | grep -v '/$') | grep -Po 'data-full=".*?"' | grep -Po '".*?"' | tr '"' ' ')
do
curl -s $i > $num.jpg
echo -ne "Downloading $num of $totalSlides... \033[0K\r"
while [ $(wc -c <$num.jpg) -lt 10 ] #Si no ha podido descargarlo reintenta la operación
do
curl -s $i > $num.jpg
done
let num=$num+1
done
echo ""
echo "Converting to pdf..."
if [ -z $2 ]
then
convert $(ls -1 | sort -n) $dir/$(ls -F .. | grep -v '/$').pdf
else
convert $(ls -1 | sort -n) $dir/$2.pdf
fi
cd $dir
rm -R /tmp/slidetemp
fi
| true |
32eadcb24b4cf8489473e85e9a5dcf11d01d0da3 | Shell | jvshahid/emacs-config | /dotfiles/functions.sh | UTF-8 | 2,864 | 3.453125 | 3 | [] | no_license | function mount_optimus() {
if [ "$#" -ne 1 ]; then
echo "Usage: mount_optimus ip_address"
return 1;
fi
sudo mount -t cifs -o uid=jvshahid,gid=jvshahid,forceuid,user=,password=,rw,nounix,noperm //$1/LG-NETWORKFOLDER /media/optimus/
}
# Set CDPATH to my Documents directory
document=($HOME/codez $HOME/codez/gocodez/src/github.com/influxdb)
IFS=':'
document_with_colons="${document[*]}"
unset IFS
CDPATH=.:$document_with_colons
# disable terminal xon/xoff to be able to search forward
stty -ixon
function pullify {
git config --add remote.origin.fetch '+refs/pull/*/head:refs/remotes/origin/pr/*'
git fetch origin
}
function is_zsh {
if [[ "x$(ps -p $$ | tail -n1 | awk '{print $4}')" == "xzsh" ]]; then
return 0
else
return 1
fi
}
function millis_to_date {
if [ $# -ne 1 ]; then
echo "Usage: millis_to_date <milliseconds since epoc>"
return 1
fi
millis=$(echo "$1 / 1000" | bc)
date -d @$millis
echo -n $(date -d @$millis) | xclipc
}
function date_to_millis {
if [ $# -ne 1 ]; then
echo "Usage: date_to_millis <YYYYMMDD [HH:[MM:[SS]]]>"
return 1
fi
seconds=$(date -d "$1" +"%s")
echo "${seconds}000"
echo -n "${seconds}000" | xclipc
}
function print_header {
screen_rows=$(tput lines)
read header
echo "$header"
if [[ $? -ne 0 ]]; then
echo "EOF reached while reading the header"
return 1
fi
count=2
while read line; do
if [[ "$count" -eq "$screen_rows" ]]; then
echo "$header"
count=2
fi
echo "$line"
count=$((count + 1))
done
}
function git_root_dir {
git rev-parse --show-toplevel
}
function repo_home {
cd $(git_root_dir)
}
function get_lvc_data() {
pushd $HOME/Documents/benchmark/cache-loader-ruby
if [[ "$2" == "A_S_CDS" ]]; then product="ATTRIBUTION_SENSITIVITIES_CDS_CURVES"
elif [[ "$2" == "A_S" ]]; then product="ATTRIBUTION_SENSITIVITIES"
elif [[ "$2" == "A_S_IRS" ]]; then product="ATTRIBUTION_SENSITIVITIES_IRS"
elif [[ "$2" == "A_R" ]]; then product="ATTRIBUTION_REALTIME"
elif [[ "$2" == "A_R_IRS" ]]; then product="ATTRIBUTION_REALTIME_IRS"
elif [[ "$2" == "A_R_CDS" ]]; then product="ATTRIBUTION_REALTIME_CDS_CURVES"
else product=$2
fi
$HOME/Documents/benchmark/cache-loader-ruby/scripts/get_cached_values.rb $1 $product $3 $4
popd
}
function firefox32() {
docker run --rm "$@" -v /tmp/.X11-unix/X0:/tmp/.X11-unix/X0 jvshahid/firefox32
}
function import_vpn() {
name=$1
file=$2
current_name=$(nmcli c import type openvpn file $file | grep successfully | sed -E "s/Connection '(.*)'.*/\\1/")
nmcli c m $current_name connection.id $name
nmcli c m $name ipv4.never-default true
nmcli c m $name vpn.user-name jshahid
}
| true |
9f056f3118a6071a0fc860f6a4a5fb1a724dc399 | Shell | easyfmxu/mintreport | /fetch-transactions.sh | UTF-8 | 1,470 | 3.828125 | 4 | [] | no_license | #!/bin/bash
cd "$(dirname "$0")"
COOKIEFILE="/tmp/cookie-mint.txt"
USEOLDCOOKIE=true
source config.sh
if [ -z $CSVDIR ]; then
echo "Unable to load config file. Exiting."
exit 1
fi
if [ ! -d $CSVDIR ]; then
echo "CSV directory does not exist. Exiting."
exit 1
fi
CSVFILE="$CSVDIR/$(date +"%Y-%m-%d_%H-%M-%S").csv"
if [ -f $COOKIEFILE ]; then
if ! $USEOLDCOOKIE; then
echo "" > $COOKIEFILE
echo "Cleared cookie file."
fi
else
touch $COOKIEFILE
echo "Created cookie file."
fi
function checkLoggedIn {
URL="https://wwws.mint.com/overview.event"
LINES=`curl -v -b $COOKIEFILE -c $COOKIEFILE $URL 2>&1 | wc -l`
if [ $LINES -gt 75 ]; then
echo "Currently logged in."
ISLOGGEDIN=true
else
echo "Not logged in."
ISLOGGEDIN=false
fi
}
checkLoggedIn
if ! $ISLOGGEDIN; then
echo "Submitting login form..."
URL="https://wwws.mint.com/loginUserSubmit.xevent"
DATA="username=$USERNAME&password=$PASSWORD&task=L"
curl -v -X POST -H "Content-Type: application/x-www-form-urlencoded" -d "$DATA" -b $COOKIEFILE -c $COOKIEFILE $URL &>/dev/null
checkLoggedIn
if ! $ISLOGGEDIN; then
echo "Error logging in. Exiting."
exit 1
fi
fi
if [ "$1" = "loginonly" ]; then
echo "Login only flag detected. Exiting."
exit 0
fi
echo "Fetching transactions..."
URL="https://wwws.mint.com/transactionDownload.event"
curl -b $COOKIEFILE -c $COOKIEFILE $URL 2>/dev/null > $CSVFILE
echo "Transactions saved to: $CSVFILE"
| true |
25d51df067c540a475c8c166ad2c66b2b13ef53f | Shell | labbots/google-drive-upload | /release/sh/gupload | UTF-8 | 88,771 | 3.625 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env sh
SELF_SOURCE="true"
set -a
_assert_regex(){
grep -qE "${1:?Error: Missing pattern}" 0<<EOF
${2:?Missing string}
EOF
}
_count(){
wc -l
}
_epoch(){
date +'%s'
}
_required_column_size(){
COLUMNS="$({ command -v bash 1>|/dev/null&&bash -c 'shopt -s checkwinsize && (: && :); printf "%s\n" "${COLUMNS}" 2>&1';}||{ command -v zsh 1>|/dev/null&&zsh -c 'printf "%s\n" "${COLUMNS}"';}||{ command -v stty 1>|/dev/null&&_tmp="$(stty size)"&&printf "%s\n" "${_tmp##* }";}||{ command -v tput 1>|/dev/null&&tput cols;})"||:
[ "$((COLUMNS))" -gt 45 ]&&return 0
}
_set_value(){
case "${1:?}" in
d|direct)export "${2:?}=$3";;
i|indirect)eval export "$2"=\"\$"$3"\";;
*)return 1
esac
}
_url_encode()(\
LC_ALL=C \
LANG=C
awk 'BEGIN {while (y++ < 125) z[sprintf("%c", y)] = y
while (y = substr(ARGV[1], ++j, 1))
q = y ~ /[[:alnum:]]_.!~*\47()-]/ ? q y : q sprintf("%%%02X", z[y])
print q}' "$1")
_auto_update(){
export COMMAND_NAME INSTALL_PATH TYPE TYPE_VALUE REPO LAST_UPDATE_TIME AUTO_UPDATE_INTERVAL
command -v "$COMMAND_NAME" 1>/dev/null&&if [ -n "${REPO:+${COMMAND_NAME:+${INSTALL_PATH:+${TYPE:+$TYPE_VALUE}}}}" ];then
current_time="$(_epoch)"
[ "$((LAST_UPDATE_TIME+AUTO_UPDATE_INTERVAL))" -lt "$(_epoch)" ]&&_update update
_update_value LAST_UPDATE_TIME "$current_time"
fi
return 0
}
_update(){
job_update="${1:-update}"
[ "${GLOBAL_INSTALL:-}" = true ]&&! [ "$(id -u)" = 0 ]&&printf "%s\n" "Error: Need root access to update."&&return 0
[ "$job_update" = uninstall ]&&job_uninstall="--uninstall"
_print_center "justify" "Fetching $job_update script.." "-"
repo_update="${REPO:-labbots/google-drive-upload}" type_value_update="${TYPE_VALUE:-latest}" cmd_update="${COMMAND_NAME:-gupload}" path_update="${INSTALL_PATH:-$HOME/.gdrive-downloader/bin}"
{ [ "${TYPE:-}" != branch ]&&type_value_update="$(_get_latest_sha release "$type_value_update" "$repo_update")";}||:
if script_update="$(curl --compressed -Ls "https://github.com/$repo_update/raw/$type_value_update/install.sh")";then
_clear_line 1
printf "%s\n" "$script_update"|sh -n||{
printf "%s\n" "Install script downloaded but malformed, try again and if the issue persists open an issue on github."
return 1
}
printf "%s\n" "$script_update"|sh -s -- ${job_uninstall:-} --skip-internet-check --cmd "$cmd_update" --path "$path_update"
current_time="$(date +'%s')"
[ -z "$job_uninstall" ]&&_update_value LAST_UPDATE_TIME "$current_time"
else
_clear_line 1
"${QUIET:-_print_center}" "justify" "Error: Cannot download" " $job_update script." "=" 1>&2
return 1
fi
return 0
}
_update_value(){
command_path="${INSTALL_PATH:?}/${COMMAND_NAME:?}"
value_name="${1:?}" value="${2:-}"
script_without_value_and_shebang="$(grep -v "$value_name=\".*\".* # added values" -- "$command_path"|sed 1d)"
new_script="$(sed -n 1p -- "$command_path"
printf "%s\n" "$value_name=\"$value\" # added values"
printf "%s\n" "$script_without_value_and_shebang")"
printf "%s\n" "$new_script"|"${INSTALLATION:-bash}" -n||{
printf "%s\n" "Update downloaded but malformed, try again and if the issue persists open an issue on github."
return 1
}
chmod u+w -- "$command_path"&&printf "%s\n" "$new_script" >|"$command_path"&&chmod "a-w-r-x,${PERM_MODE:-u}+r+x" -- "$command_path"
return 0
}
_is_fd_open(){
for fd in ${1:?};do
if ! { true >&"$fd";} 2<>/dev/null;then
printf "%s\n" "Error: fd $fd not open."
return 1
fi
done
}
_parser_add_help(){
_PARSER_ALL_HELP="$_PARSER_ALL_HELP
${__PARSER_BAR:-}
${1:-}" 2>|/dev/null
}
_parser_check_arguments(){
nargs_parser_check_arguments="$((${1:?_parser_check_arguments}))"
num_parser_check_arguments=$(($#-2))
[ "$num_parser_check_arguments" -lt "$nargs_parser_check_arguments" ]&&{
printf "%s\n" "${0##*/}: $2: flag requires $nargs_parser_check_arguments argument."
printf "\n%s\n" "Help:"
printf "%s\n" "$(_usage "$2")"
exit 1
}
return 0
}
_flag_exists(){
tmp_flag_exists="" option_flag_exists=""
_flag_help "${1:?}" tmp_flag_exists option_flag_exists
[ -z "$tmp_flag_exists" ]&&return 1
_set_value d "${2:?}" "$option_flag_exists"
}
_flag_help(){
flag_flag_help=""
_trim "-" "${1:?_flag_help}" flag_flag_help
_set_value i "${2:?_flag_help}" "_parser__help_$flag_flag_help"
_set_value d "${3:-_}" "$flag_flag_help"
}
_parse_arguments(){
__NEWLINE="
"
_parse_support_ansi_escapes(){
case "$TERM" in
xterm*|rxvt*|urxvt*|linux*|vt*|screen*){ [ -t 2 ]&&return 0;}||return 1;;
*):
esac
{ [ -t 2 ]&&return 0;}||return 1
}
_parser_required_column_size(){
COLUMNS="$({ command -v bash 1>|/dev/null&&bash -c 'shopt -s checkwinsize && (: && :); printf "%s\n" "${COLUMNS}" 2>&1';}||{ command -v zsh 1>|/dev/null&&zsh -c 'printf "%s\n" "${COLUMNS}"';}||{ command -v stty 1>|/dev/null&&_tmp="$(stty size)"&&printf "%s\n" "${_tmp##* }";}||{ command -v tput 1>|/dev/null&&tput cols;})"||:
[ "$((COLUMNS))" -gt 45 ]&&return 0
}
_parse_support_ansi_escapes&&_parser_required_column_size&&__PARSER_BAR="$(\
filler='' \
symbol='_'
i=1&&while [ "$i" -le "$COLUMNS" ];do
filler="$filler$symbol"&&i="$((i+1))"
done
printf "%s\n" "$filler")"
__PARSER_BAR="${__PARSER_BAR:+$__PARSER_BAR$__NEWLINE}"
unset _PARSER_ALL_HELP _PARSER_ARGS_SHIFT _PARSER_PREPROCESS_FUNCTION
unset _PARSER_FLAGS _PARSER_CURRENT_FLAGS _PARSER_CURRENT_NARGS _PARSER_CURRENT_ARGS _PARSER_CURRENT_ARGS_TYPE
"${1:?_parse_arguments - 1: Missing funtion name to setup flags}"||return 1
shift 2>|/dev/null
_parser_run_preprocess||return 1
while [ "$#" -gt 0 ];do
case "$1" in
''):;;
--)shift
while [ "$#" -gt 0 ];do
_parser_process_input "$@"||return 1
shift
done
;;
-*)\
flag_parse_arguments=""
if _flag_exists "$1" flag_parse_arguments;then
"_parser_process_$flag_parse_arguments" "$@"||return 1
else
printf "%s\n\n" "${0##*/}: $1: Unknown option"
_short_help
fi
;;
*)_parser_process_input "$@"||return 1
esac
_PARSER_ARGS_SHIFT="$((_PARSER_ARGS_SHIFT+1))"
shift "$_PARSER_ARGS_SHIFT"
_PARSER_ARGS_SHIFT="0"
done
return 0
}
_parser_setup_flag(){
_PARSER_CURRENT_FLAGS="" tmp_parser_setup_flag=""
_PARSER_FLAGS="${1:?_parser_setup_flag}"
for f in $_PARSER_FLAGS;do
_trim "-" "$f" tmp_parser_setup_flag
_PARSER_CURRENT_FLAGS="$_PARSER_CURRENT_FLAGS $tmp_parser_setup_flag"
done
_PARSER_CURRENT_NARGS="${2:?_parser_setup_flag}"
_PARSER_CURRENT_ARGS_TYPE="$3"
_PARSER_CURRENT_ARGS="$4"
}
_parser_setup_flag_help(){
flags_parser_setup_flag_help="${_PARSER_CURRENT_FLAGS:?_parser_setup_flag_help}"
nargs_parser_setup_flag_help="${_PARSER_CURRENT_NARGS:?_parser_setup_flag_help}"
unset start_parser_setup_flag_help \
help_parser_setup_flag_help \
arg_parser_setup_flag_help \
all_parser_setup_flag_help
while IFS= read -r line <&4;do
help_parser_setup_flag_help="$help_parser_setup_flag_help
$line"
done 4<<EOF
${1:?_parser_setup_flag_help}
EOF
for f in ${_PARSER_FLAGS:?_parser_setup_flag_help};do
start_parser_setup_flag_help="${start_parser_setup_flag_help:+$start_parser_setup_flag_help | }$f"
done
if ! [ "$nargs_parser_setup_flag_help" = 0 ];then
arg_parser_setup_flag_help="\"${_PARSER_CURRENT_ARGS:?_parser_setup_flag_help}\""
if [ "$_PARSER_CURRENT_ARGS_TYPE" = optional ];then
arg_parser_setup_flag_help="$arg_parser_setup_flag_help [ Optional ]"
else
arg_parser_setup_flag_help="$arg_parser_setup_flag_help [ Required ]"
fi
fi
start_parser_setup_flag_help=" $start_parser_setup_flag_help $arg_parser_setup_flag_help"
all_setup_help_flag="$start_parser_setup_flag_help${__NEWLINE:?}$help_parser_setup_flag_help"
for f in $flags_parser_setup_flag_help;do
_set_value d "_parser__help_$f" "$all_setup_help_flag"
done
[ "$_PARSER_FLAGS" = input ]&&return 0
_PARSER_ALL_HELP="$_PARSER_ALL_HELP
${__PARSER_BAR:-}
$all_setup_help_flag" 2>|/dev/null
}
_parser_setup_flag_preprocess(){
_is_fd_open 4||return 1
unset fn_parser_setup_flag_preprocess
while IFS= read -r line <&4;do
fn_parser_setup_flag_preprocess="$fn_parser_setup_flag_preprocess
$line"
done
_PARSER_PREPROCESS_FUNCTION="$_PARSER_PREPROCESS_FUNCTION
$fn_parser_setup_flag_preprocess"
}
_parser_setup_flag_process(){
_is_fd_open 4||return 1
unset fn_parser_setup_flag_process
if [ "${_PARSER_CURRENT_NARGS:?_parser_setup_flag_process}" -gt 0 ]&&! [ "$_PARSER_CURRENT_ARGS_TYPE" = optional ];then
fn_parser_setup_flag_process="_parser_check_arguments ${_PARSER_CURRENT_NARGS:?_parser_setup_flag_process} \"\${@}\""
fi
while IFS= read -r line <&4;do
fn_parser_setup_flag_process="$fn_parser_setup_flag_process
$line"
done
for f in ${_PARSER_CURRENT_FLAGS:?_parser_setup_flag_process};do
eval "_parser_process_$f() { $fn_parser_setup_flag_process ; }"
done
}
_parser_run_preprocess(){
eval "_parser_preprocess_setup() { ${_PARSER_PREPROCESS_FUNCTION:-:} ; }"&&_parser_preprocess_setup
}
_parser_shift(){
export _PARSER_ARGS_SHIFT="${1:-1}"
}
_short_help(){
printf "No valid arguments provided, use -h/--help flag to see usage.\n"
exit 0
}
_set_value(){
case "${1:?}" in
d|direct)export "${2:?}=$3";;
i|indirect)eval export "$2"=\"\$"$3"\";;
*)return 1
esac
}
_trim(){
char_trim="$1" str_trim="$2" var_trim="$3"
set -f
old_ifs="$IFS"
IFS="$char_trim"
set -- $str_trim
IFS=
if [ -n "$var_trim" ];then
_set_value d "$var_trim" "$*"
else
printf "%s" "$*"
fi
IFS="$old_ifs"
set +f
}
_parser_setup_flags(){
_parser_add_help "The script can be used to upload file/directory to google drive.
Usage: ${0##*/} filename/foldername/file_id/file_link -c gdrive_folder_name
where filename/foldername is input file/folder and file_id/file_link is the accessible gdrive file link or id which will be uploaded without downloading.
Note: It’s not mandatory to use gdrive_folder_name | -c / -C / –create-dir flag.
gdrive_folder_name is the name of the folder on gdrive, where the input file/folder will be uploaded. If gdrive_folder_name is present on gdrive, then script will upload there, else will make a folder with that name.
Apart from basic usage, this script provides many flags for custom usecases, like parallel uploading, skipping upload of existing files, overwriting, etc.
Options:"
_parser_setup_flag "input" 0
_parser_setup_flag_help \
"Input files or drive ids to process."
_parser_setup_flag_preprocess 4<<'EOF'
unset TOTAL_ID_INPUTS TOTAL_FILE_INPUTS
EOF
_parser_setup_flag_process 4<<'EOF'
# set INPUT_FILE|ID_num to the input, where num is rank of input
case "${1}" in
*drive.google.com* | *docs.google.com*) _set_value d "INPUT_ID_$((TOTAL_ID_INPUTS += 1))" "$(_extract_id "${1}")" ;;
*)
[ -r "${1}" ] || {
{ "${QUIET:-_print_center}" 'normal' "[ Error: Invalid File - ${1} ]" "=" && printf "\n"; } 1>&2
return
}
_set_value d "INPUT_FILE_$((TOTAL_FILE_INPUTS += 1))" "${1}"
;;
esac
EOF
_parser_setup_flag "-a --account" 1 required "account name"
_parser_setup_flag_help \
"Use a different account than the default one.
To change the default account name, use this format, -a/--account default=account_name"
_parser_setup_flag_preprocess 4<<'EOF'
unset OAUTH_ENABLED ACCOUNT_NAME ACCOUNT_ONLY_RUN CUSTOM_ACCOUNT_NAME UPDATE_DEFAULT_ACCOUNT
EOF
_parser_setup_flag_process 4<<'EOF'
export OAUTH_ENABLED="true" CUSTOM_ACCOUNT_NAME="${2##default=}"
[ -z "${2##default=*}" ] && export UPDATE_DEFAULT_ACCOUNT="_update_config"
_parser_shift
EOF
_parser_setup_flag "-la --list-accounts" 0
_parser_setup_flag_help \
"Print all configured accounts in the config files."
_parser_setup_flag_preprocess 4<<'EOF'
unset LIST_ACCOUNTS
EOF
_parser_setup_flag_process 4<<'EOF'
export LIST_ACCOUNTS="true"
EOF
_parser_setup_flag "-ca --create-account" 1 required "account name"
_parser_setup_flag_help \
"To create a new account with the given name if does not already exists.
Note 1: Only for interactive terminal usage
Note 2: This flag is preferred over --account."
_parser_setup_flag_preprocess 4<<'EOF'
unset OAUTH_ENABLED NEW_ACCOUNT_NAME
EOF
_parser_setup_flag_process 4<<'EOF'
export OAUTH_ENABLED="true"
export NEW_ACCOUNT_NAME="${2}" && _parser_shift
EOF
_parser_setup_flag "-da --delete-account" 1 required "account name"
_parser_setup_flag_help \
"To delete an account information from config file."
_parser_setup_flag_preprocess 4<<'EOF'
unset DELETE_ACCOUNT_NAME
EOF
_parser_setup_flag_process 4<<'EOF'
export DELETE_ACCOUNT_NAME="${2}" && _parser_shift
EOF
_parser_setup_flag "-c -C --create-dir" 1 required "foldername"
_parser_setup_flag_help \
"Option to create directory on drive. Will print folder id.
If this option is used, then input files/folders are optional.
Also supports specifying sub folders, -c 'Folder1/folder2/test'.
Three folders will be created, test inside folder2, folder2 inside Folder1 and so on.
Input files and folders will be uploaded inside test folder."
_parser_setup_flag_preprocess 4<<'EOF'
unset FOLDERNAME
EOF
_parser_setup_flag_process 4<<'EOF'
export FOLDERNAME="${2}" && _parser_shift
EOF
_parser_setup_flag "-r --root-dir" 1 required "google folder id or folder url containing id"
_parser_setup_flag_help \
"Google folder ID/URL to which the file/directory is going to upload.
If you want to change the default value, then use this format, -r/--root-dir default=root_folder_id/root_folder_url"
_parser_setup_flag_preprocess 4<<'EOF'
unset ROOTDIR UPDATE_DEFAULT_ROOTDIR
EOF
_parser_setup_flag_process 4<<'EOF'
ROOTDIR="${2##default=}"
[ -z "${2##default=*}" ] && export UPDATE_DEFAULT_ROOTDIR="_update_config"
_parser_shift
EOF
_parser_setup_flag "-s --skip-subdirs" 0
_parser_setup_flag_help \
"Skip creation of sub folders and upload all files inside the INPUT folder/sub-folders in the INPUT folder.
Use this along with -p/--parallel option to speed up the uploads."
_parser_setup_flag_preprocess 4<<'EOF'
unset SKIP_SUBDIRS
EOF
_parser_setup_flag_process 4<<'EOF'
export SKIP_SUBDIRS="true"
EOF
_parser_setup_flag "-p --parallel" 1 required "no of files to parallely upload"
_parser_setup_flag_help \
"Upload multiple files in parallel, Max value = 10.
Note:
This command is only helpful if you are uploading many files which aren’t big enough to utilise your full bandwidth.
Using it otherwise will not speed up your upload and even error sometimes,
1 - 6 value is recommended, but can use upto 10. If errors with a high value, use smaller number. "
_parser_setup_flag_preprocess 4<<'EOF'
unset NO_OF_PARALLEL_JOBS PARALLEL_UPLOAD
EOF
_parser_setup_flag_process 4<<'EOF'
if [ "${2}" -gt 0 ] 2>| /dev/null 1>&2; then
export NO_OF_PARALLEL_JOBS="${2}"
else
printf "\nError: -p/--parallel accepts values between 1 to 10.\n"
return 1
fi
export PARALLEL_UPLOAD="parallel" && _parser_shift
EOF
_parser_setup_flag "-cl --clone" 1 required "gdrive id or link"
_parser_setup_flag_help \
"Upload a gdrive file without downloading."
_parser_setup_flag_preprocess 4<<'EOF'
unset TOTAL_ID_INPUTS
EOF
_parser_setup_flag_process 4<<'EOF'
# set INPUT_FILE|ID_num to the input, where num is rank of input
case "${1}" in
*drive.google.com* | *docs.google.com*) _set_value d "INPUT_ID_$((TOTAL_ID_INPUTS += 1))" "$(_extract_id "${1}")" ;;
esac
_parser_shift
EOF
_parser_setup_flag "-o --overwrite" 0
_parser_setup_flag_help \
"Overwrite the files with the same name, if present in the root folder/input folder, also works with recursive folders.
Note: If you use this flag along with -d/–skip-duplicates, the skip duplicates flag is preferred."
_parser_setup_flag_preprocess 4<<'EOF'
unset OVERWRITE UPLOAD_MODE
EOF
_parser_setup_flag_process 4<<'EOF'
export OVERWRITE="Overwrite" UPLOAD_MODE="update"
EOF
_parser_setup_flag "-d --skip-duplicates" 0
_parser_setup_flag_help \
"Do not upload the files with the same name and size, if already present in the root folder/input folder.
Also works with recursive folders."
_parser_setup_flag_preprocess 4<<'EOF'
unset SKIP_DUPLICATES UPLOAD_MODE
EOF
_parser_setup_flag_process 4<<'EOF'
export SKIP_DUPLICATES="Skip Existing" UPLOAD_MODE="update"
EOF
_parser_setup_flag "-cm --check-mode" 1 required "size or md5"
_parser_setup_flag_help \
"Additional flag for --overwrite and --skip-duplicates flag. Can be used to change check mode in those flags.
Available modes are 'size' and 'md5'."
_parser_setup_flag_preprocess 4<<'EOF'
unset CHECK_MODE
EOF
_parser_setup_flag_process 4<<'EOF'
case "${2}" in
size) export CHECK_MODE="2" && _parser_shift ;;
md5) export CHECK_MODE="3" && _parser_shift ;;
*) printf "\nError: -cm/--check-mode takes size and md5 as argument.\n" ;;
esac
EOF
_parser_setup_flag "-desc --description --description-all" 1 required "description of file"
_parser_setup_flag_help \
"Specify description for the given file. To use the respective metadata of a file, below is the format:
File name ( fullname ): %f | Size: %s | Mime Type: %m
Now to actually use it: --description 'Filename: %f, Size: %s, Mime: %m'
Note: For files inside folders, use --description-all flag."
_parser_setup_flag_preprocess 4<<'EOF'
unset DESCRIPTION DESCRIPTION_ALL
EOF
_parser_setup_flag_process 4<<'EOF'
[ "${1}" = "--description-all" ] && export DESCRIPTION_ALL="true"
export DESCRIPTION="${2}" && _parser_shift
EOF
_parser_setup_flag "-S --share" 1 required "email address"
_parser_setup_flag_help \
"Share the uploaded input file/folder, grant reader permission to provided email address OR
To everyone with the shareable link."
_parser_setup_flag_preprocess 4<<'EOF'
unset SHARE EMAIL_REGEX SHARE_EMAIL
EOF
_parser_setup_flag_process 4<<'EOF'
SHARE="_share_id"
EMAIL_REGEX="^(([A-Za-z0-9]+((\.|\-|\_|\+)?[A-Za-z0-9]?)*[A-Za-z0-9]+)|[A-Za-z0-9]+)@(([A-Za-z0-9]+)+((\.|\-|\_)?([A-Za-z0-9]+)+)*)+\.([A-Za-z]{2,})+$"
case "${2}" in
-* | '') : ;;
*)
if _assert_regex "${EMAIL_REGEX}" "${2}"; then
SHARE_EMAIL="${2}" && _parser_shift && export SHARE_EMAIL
fi
;;
esac
SHARE_ROLE="${SHARE_ROLE:-reader}"
EOF
_parser_setup_flag "-SM -sm --share-mode" 1 required "share mode - r/w/c"
_parser_setup_flag_help \
"Specify the share mode for sharing file.
Share modes are: r / reader - Read only permission.
: w / writer - Read and write permission.
: c / commenter - Comment only permission.
Note: This flag is independent of --share flag but when email is needed, then --share flag use is neccessary."
_parser_setup_flag_preprocess 4<<'EOF'
unset SHARE_ROLE SHARE
EOF
_parser_setup_flag_process 4<<'EOF'
case "${2}" in
r | read*) SHARE_ROLE="reader" ;;
w | write*) SHARE_ROLE="writer" ;;
c | comment*) SHARE_ROLE="commenter" ;;
*)
printf "%s\n" "Invalid share mode given ( ${2} ). Supported values are r or reader / w or writer / c or commenter." &&
exit 1
;;
esac
SHARE="_share_id"
_parser_shift
EOF
_parser_setup_flag "--speed" 1 required "speed"
_parser_setup_flag_help \
"Limit the download speed, supported formats: 1K, 1M and 1G."
_parser_setup_flag_preprocess 4<<'EOF'
unset CURL_SPEED
EOF
_parser_setup_flag_process 4<<'EOF'
_tmp_regex='^([0-9]+)([k,K]|[m,M]|[g,G])+$'
if _assert_regex "${_tmp_regex}" "${2}"; then
export CURL_SPEED="--limit-rate ${2}" && _parser_shift
else
printf "Error: Wrong speed limit format, supported formats: 1K , 1M and 1G\n" 1>&2
exit 1
fi
EOF
_parser_setup_flag "-i --save-info" 1 required "file where to save info"
_parser_setup_flag_help \
"Save uploaded files info to the given filename."
_parser_setup_flag_preprocess 4<<'EOF'
unset LOG_FILE_ID
EOF
_parser_setup_flag_process 4<<'EOF'
export LOG_FILE_ID="${2}" && _parser_shift
EOF
_parser_setup_flag "-z --config" 1 required "config path"
_parser_setup_flag_help \
'Override default config file with custom config file.
Default Config: ${HOME}/.googledrive.conf
If you want to change default value, then use this format -z/--config default=default=your_config_file_path.'
_parser_setup_flag_preprocess 4<<'EOF'
unset UPDATE_DEFAULT_CONFIG
_check_config() {
[ -z "${1##default=*}" ] && export UPDATE_DEFAULT_CONFIG="_update_config"
{ [ -r "${2}" ] && CONFIG="${2}"; } || {
printf "Error: Given config file (%s) doesn't exist/not readable,..\n" "${1}" 1>&2 && exit 1
}
return 0
}
EOF
_parser_setup_flag_process 4<<'EOF'
_check_config "${2}" "${2/default=/}"
_parser_shift
EOF
_parser_setup_flag "-q --quiet" 0
_parser_setup_flag_help \
"Supress the normal output, only show success/error upload messages for files, and one extra line at the beginning for folder showing no. of files and sub folders."
_parser_setup_flag_preprocess 4<<'EOF'
unset QUIET
EOF
_parser_setup_flag_process 4<<'EOF'
export QUIET="_print_center_quiet"
EOF
_parser_setup_flag "-R --retry" 1 required "num of retries"
_parser_setup_flag_help \
"Retry the file upload if it fails, postive integer as argument. Currently only for file uploads."
_parser_setup_flag_preprocess 4<<'EOF'
unset RETRY
EOF
_parser_setup_flag_process 4<<'EOF'
if [ "$((2))" -gt 0 ] 2>| /dev/null 1>&2; then
export RETRY="${2}" && _parser_shift
else
printf "Error: -R/--retry only takes positive integers as arguments, min = 1, max = infinity.\n"
exit 1
fi
EOF
_parser_setup_flag "-in --include" 1 required "pattern"
_parser_setup_flag_help \
"Only upload the files which contains the given pattern - Applicable for folder uploads.
e.g: ${0##*/} local_folder --include 1, will only include with files with pattern 1 in the name.
Regex can be used which works with grep -E command."
_parser_setup_flag_preprocess 4<<'EOF'
unset INCLUDE_FILES
EOF
_parser_setup_flag_process 4<<'EOF'
export INCLUDE_FILES="${INCLUDE_FILES:+${INCLUDE_FILES}|}${2}" && _parser_shift
EOF
_parser_setup_flag "-ex --exclude" 1 required "pattern"
_parser_setup_flag_help \
"Only download the files which does not contain the given pattern - Applicable for folder downloads.
e.g: ${0##*/} local_folder --exclude 1, will only include with files with pattern 1 not present in the name.
Regex can be used which works with grep -E command."
_parser_setup_flag_preprocess 4<<'EOF'
unset EXCLUDE_FILES
EOF
_parser_setup_flag_process 4<<'EOF'
export EXCLUDE_FILES="${EXCLUDE_FILES:+${EXCLUDE_FILES}|}${2}" && _parser_shift
EOF
_parser_setup_flag "--hide" 0
_parser_setup_flag_help \
"This flag will prevent the script to print sensitive information like root folder id and drivelink."
_parser_setup_flag_preprocess 4<<'EOF'
unset HIDE_INFO
EOF
_parser_setup_flag_process 4<<'EOF'
HIDE_INFO=":"
EOF
_parser_setup_flag "-v --verbose" 0
_parser_setup_flag_help \
"Display detailed message (only for non-parallel uploads)."
_parser_setup_flag_preprocess 4<<'EOF'
unset VERBOSE
EOF
_parser_setup_flag_process 4<<'EOF'
export VERBOSE="true"
EOF
_parser_setup_flag "-V --verbose-progress" 0
_parser_setup_flag_help \
"Display detailed message and detailed upload progress(only for non-parallel uploads)."
_parser_setup_flag_preprocess 4<<'EOF'
unset VERBOSE_PROGRESS
EOF
_parser_setup_flag_process 4<<'EOF'
export VERBOSE_PROGRESS="true"
EOF
_parser_setup_flag "--skip-internet-check" 0
_parser_setup_flag_help \
"Do not check for internet connection, recommended to use in sync jobs."
_parser_setup_flag_preprocess 4<<'EOF'
unset SKIP_INTERNET_CHECK
EOF
_parser_setup_flag_process 4<<'EOF'
export SKIP_INTERNET_CHECK=":"
EOF
_parser_setup_flag "-V --version --info" 0
_parser_setup_flag_help \
"Show detailed info, only if script is installed system wide."
_parser_setup_flag_preprocess 4<<'EOF'
###################################################
# Print info if installed
###################################################
_version_info() {
export COMMAND_NAME REPO INSTALL_PATH TYPE TYPE_VALUE
if command -v "${COMMAND_NAME}" 1> /dev/null && [ -n "${REPO:+${COMMAND_NAME:+${INSTALL_PATH:+${TYPE:+${TYPE_VALUE}}}}}" ]; then
for i in REPO INSTALL_PATH INSTALLATION TYPE TYPE_VALUE LATEST_INSTALLED_SHA CONFIG; do
value_version_info=""
_set_value i value_version_info "${i}"
printf "%s\n" "${i}=${value_version_info}"
done | sed -e "s/=/: /g"
else
printf "%s\n" "google-drive-upload is not installed system wide."
fi
exit 0
}
EOF
_parser_setup_flag_process 4<<'EOF'
_version_info
EOF
_parser_setup_flag "-D --debug" 0
_parser_setup_flag_help \
"Display script command trace."
_parser_setup_flag_preprocess 4<<'EOF'
unset DEBUG
EOF
_parser_setup_flag_process 4<<'EOF'
export DEBUG="true"
EOF
_parser_setup_flag "-h --help" 1 optional "flag name"
_parser_setup_flag_help \
"Print help for all flags and basic usage instructions.
To see help for a specific flag, --help flag_name ( with or without dashes )
Can also specify multiple flag names
e.g: ${0##*/} --help config list-accounts"
_parser_setup_flag_preprocess 4<<'EOF'
###################################################
# 1st arg - can be flag name
# if 1st arg given, print specific flag help
# otherwise print full help
###################################################
_usage() {
[ -n "${1}" ] && {
for flag_usage in "${@}"; do
help_usage_usage=""
_flag_help "${flag_usage}" help_usage_usage
if [ -z "${help_usage_usage}" ]; then
printf "%s\n" "Error: No help found for ${flag_usage}"
else
printf "%s\n%s\n%s\n" "${__PARSER_BAR}" "${help_usage_usage}" "${__PARSER_BAR}"
fi
done
exit 0
}
printf "%s\n" "${_PARSER_ALL_HELP}"
exit 0
}
EOF
_parser_setup_flag_process 4<<'EOF'
shift 1 && _usage "${@}"
EOF
[ "${GUPLOAD_INSTALLED_WITH:-}" = script ]&&{
_parser_setup_flag "-u --update" 0
_parser_setup_flag_help \
"Update the installed script in your system."
_parser_setup_flag_process 4<<'EOF'
_check_debug && _update && { exit 0 || exit 1; }
EOF
_parser_setup_flag "--uninstall" 0
_parser_setup_flag_help \
"Uninstall script, remove related files."
_parser_setup_flag_process 4<<'EOF'
_check_debug && _update uninstall && { exit 0 || exit 1; }
EOF
}
return 0
}
_account_name_valid(){
name_account_name_valid="${1:?}" account_name_regex_account_name_valid='^([A-Za-z0-9_])+$'
_assert_regex "$account_name_regex_account_name_valid" "$name_account_name_valid"||return 1
return 0
}
_account_exists(){
name_account_exists="${1:-}" client_id_account_exists="" client_secret_account_exists="" refresh_token_account_exists=""
_account_name_valid "$name_account_exists"||return 1
_set_value indirect client_id_account_exists "ACCOUNT_${name_account_exists}_CLIENT_ID"
_set_value indirect client_secret_account_exists "ACCOUNT_${name_account_exists}_CLIENT_SECRET"
_set_value indirect refresh_token_account_exists "ACCOUNT_${name_account_exists}_REFRESH_TOKEN"
[ -z "${client_id_account_exists:+${client_secret_account_exists:+$refresh_token_account_exists}}" ]&&return 1
return 0
}
_all_accounts(){
export CONFIG QUIET
{ _reload_config&&_handle_old_config;}||return 1
COUNT=0
while read -r account <&4&&[ -n "$account" ];do
_account_exists "$account"&&{ [ "$COUNT" = 0 ]&&"${QUIET:-_print_center}" "normal" " All available accounts. " "="||:;}&&printf "%b" "$((COUNT+=1)). $account \n"&&_set_value direct "ACC_${COUNT}_ACC" "$account"
done 4<<EOF
$(grep -oE '^ACCOUNT_.*_CLIENT_ID' -- "$CONFIG"|sed -e "s/ACCOUNT_//g" -e "s/_CLIENT_ID//g")
EOF
{ [ "$COUNT" -le 0 ]&&"${QUIET:-_print_center}" "normal" " No accounts configured yet. " "=" 1>&2;}||printf '\n'
return 0
}
_set_new_account_name(){
export QUIET NEW_ACCOUNT_NAME
_reload_config||return 1
new_account_name_set_new_account_name="${1:-}"&&unset name_valid_set_new_account_name
[ -z "$new_account_name_set_new_account_name" ]&&{
_all_accounts 2>|/dev/null
"${QUIET:-_print_center}" "normal" " New account name: " "="
"${QUIET:-_print_center}" "normal" "Info: Account names can only contain alphabets / numbers / dashes." " "&&printf '\n'
}
until [ -n "$name_valid_set_new_account_name" ];do
if [ -n "$new_account_name_set_new_account_name" ];then
if _account_name_valid "$new_account_name_set_new_account_name";then
if _account_exists "$new_account_name_set_new_account_name";then
"${QUIET:-_print_center}" "normal" " Warning: Given account ( $new_account_name_set_new_account_name ) already exists, input different name. " "-" 1>&2
unset new_account_name_set_new_account_name&&continue
else
export new_account_name_set_new_account_name="$new_account_name_set_new_account_name" NEW_ACCOUNT_NAME="$new_account_name_set_new_account_name"&&name_valid_set_new_account_name="true"&&continue
fi
else
"${QUIET:-_print_center}" "normal" " Warning: Given account name ( $new_account_name_set_new_account_name ) invalid, input different name. " "-"
unset new_account_name_set_new_account_name&&continue
fi
else
[ -t 1 ]||{ "${QUIET:-_print_center}" "normal" " Error: Not running in an interactive terminal, cannot ask for new account name. " 1>&2&&return 1;}
printf -- "-> \033[?7l"
read -r new_account_name_set_new_account_name
printf '\033[?7h'
fi
_clear_line 1
done
"${QUIET:-_print_center}" "normal" " Given account name: $NEW_ACCOUNT_NAME " "="
export ACCOUNT_NAME="$NEW_ACCOUNT_NAME"
return 0
}
_delete_account(){
export CONFIG QUIET
{ _reload_config&&_handle_old_config;}||return 1
account_delete_account="${1:?Error: give account name}"&&unset regex_delete_account config_without_values_delete_account
if _account_exists "$account_delete_account";then
regex_delete_account="^ACCOUNT_${account_delete_account}_(CLIENT_ID=|CLIENT_SECRET=|REFRESH_TOKEN=|ROOT_FOLDER=|ROOT_FOLDER_NAME=|ACCESS_TOKEN=|ACCESS_TOKEN_EXPIRY=)|DEFAULT_ACCOUNT=\"$account_delete_account\""
config_without_values_delete_account="$(grep -vE "$regex_delete_account" -- "$CONFIG")"
chmod u+w -- "$CONFIG"||return 1
printf "%s\n" "$config_without_values_delete_account" >|"$CONFIG"||return 1
chmod "a-w-r-x,u+r" -- "$CONFIG"||return 1
"${QUIET:-_print_center}" "normal" " Successfully deleted account ( $account_delete_account ) from config. " "-"
else
"${QUIET:-_print_center}" "normal" " Error: Cannot delete account ( $account_delete_account ) from config. No such account exists " "-" 1>&2
fi
return 0
}
_handle_old_config(){
export CLIENT_ID CLIENT_SECRET REFRESH_TOKEN ROOT_FOLDER ROOT_FOLDER_NAME
[ -n "${CLIENT_ID:+${CLIENT_SECRET:+$REFRESH_TOKEN}}" ]&&{
account_name_handle_old_config="default" regex_check_handle_old_config config_without_values_handle_old_config count_handle_old_config
until ! _account_exists "$account_name_handle_old_config";do
account_name_handle_old_config="$account_name_handle_old_config$((count_handle_old_config+=1))"
done
regex_check_handle_old_config="^(CLIENT_ID=|CLIENT_SECRET=|REFRESH_TOKEN=|ROOT_FOLDER=|ROOT_FOLDER_NAME=|ACCESS_TOKEN=|ACCESS_TOKEN_EXPIRY=)"
config_without_values_handle_old_config="$(grep -vE "$regex_check_handle_old_config" -- "$CONFIG")"
chmod u+w -- "$CONFIG"||return 1
printf "%s\n%s\n%s\n%s\n%s\n%s\n" \
"ACCOUNT_${account_name_handle_old_config}_CLIENT_ID=\"$CLIENT_ID\"" \
"ACCOUNT_${account_name_handle_old_config}_CLIENT_SECRET=\"$CLIENT_SECRET\"" \
"ACCOUNT_${account_name_handle_old_config}_REFRESH_TOKEN=\"$REFRESH_TOKEN\"" \
"ACCOUNT_${account_name_handle_old_config}_ROOT_FOLDER=\"$ROOT_FOLDER\"" \
"ACCOUNT_${account_name_handle_old_config}_ROOT_FOLDER_NAME=\"$ROOT_FOLDER_NAME\"" \
"$config_without_values_handle_old_config" >|"$CONFIG"||return 1
chmod "a-w-r-x,u+r" -- "$CONFIG"||return 1
_reload_config||return 1
}
return 0
}
_check_credentials(){
export CONFIG CONFIG_INFO DEFAULT_ACCOUNT NEW_ACCOUNT_NAME CUSTOM_ACCOUNT_NAME QUIET COUNT
{ _reload_config&&_handle_old_config;}||return 1
ACCOUNT_NAME="$DEFAULT_ACCOUNT"
if [ -n "$NEW_ACCOUNT_NAME" ];then
_set_new_account_name "$NEW_ACCOUNT_NAME"||return 1
_check_account_credentials "$ACCOUNT_NAME"||return 1
else
if [ -n "$CUSTOM_ACCOUNT_NAME" ];then
if _account_exists "$CUSTOM_ACCOUNT_NAME";then
ACCOUNT_NAME="$CUSTOM_ACCOUNT_NAME"
else
"${QUIET:-_print_center}" "normal" " Error: No such account ( $CUSTOM_ACCOUNT_NAME ) exists. " "-"&&return 1
fi
elif [ -n "$DEFAULT_ACCOUNT" ];then
_account_exists "$DEFAULT_ACCOUNT"||{
_update_config DEFAULT_ACCOUNT "" "$CONFIG"&&unset DEFAULT_ACCOUNT ACCOUNT_NAME&&UPDATE_DEFAULT_ACCOUNT="_update_config"
}
else
UPDATE_DEFAULT_ACCOUNT="_update_config"
fi
if [ -z "$ACCOUNT_NAME" ];then
if _all_accounts 2>|/dev/null&&[ "$COUNT" -gt 0 ];then
if [ "$COUNT" -eq 1 ];then
_set_value indirect ACCOUNT_NAME "ACC_1_ACC"
else
"${QUIET:-_print_center}" "normal" " Above accounts are configured, but default one not set. " "="
if [ -t 1 ];then
"${QUIET:-_print_center}" "normal" " Choose default account: " "-"
until [ -n "$ACCOUNT_NAME" ];do
printf -- "-> \033[?7l"
read -r account_name_check_credentials
printf '\033[?7h'
if [ "$account_name_check_credentials" -gt 0 ]&&[ "$account_name_check_credentials" -le "$COUNT" ];then
_set_value indirect ACCOUNT_NAME "ACC_${COUNT}_ACC"
else
_clear_line 1
fi
done
else
printf "%s\n" "Warning: Script is not running in a terminal, choosing first account as default."
_set_value indirect ACCOUNT_NAME "ACC_1_ACC"
fi
fi
else
_set_new_account_name ""||return 1
_check_account_credentials "$ACCOUNT_NAME"||return 1
fi
fi
_check_account_credentials "$ACCOUNT_NAME"||return 1
fi
"${UPDATE_DEFAULT_ACCOUNT:-:}" DEFAULT_ACCOUNT "$ACCOUNT_NAME" "$CONFIG"
"${UPDATE_DEFAULT_CONFIG:-:}" CONFIG "$CONFIG" "$CONFIG_INFO"
[ -n "$CONTINUE_WITH_NO_INPUT" ]||_token_bg_service
return 0
}
_check_account_credentials(){
account_name_check_account_credentials="${1:?Give account name}"
{
_check_client ID "$account_name_check_account_credentials"&&_check_client SECRET "$account_name_check_account_credentials"&&_check_refresh_token "$account_name_check_account_credentials"&&_check_access_token "$account_name_check_account_credentials" check
}||return 1
return 0
}
_check_client(){
export CONFIG QUIET
type_check_client="CLIENT_${1:?Error: ID or SECRET}" account_name_check_client="${2:-}"
unset type_value_check_client type_name_check_client valid_check_client client_check_client message_check_client regex_check_client
if [ "$type_check_client" = "CLIENT_ID" ];then
regex_check_client='[0-9]+-[0-9A-Za-z_]{32}\.apps\.googleusercontent\.com'
else
regex_check_client='[0-9A-Za-z_-]+'
fi
type_name_check_client="${account_name_check_client:+ACCOUNT_${account_name_check_client}_}$type_check_client"
_set_value indirect type_value_check_client "$type_name_check_client"
until [ -n "$type_value_check_client" ]&&[ -n "$valid_check_client" ];do
[ -n "$type_value_check_client" ]&&{
if _assert_regex "$regex_check_client" "$type_value_check_client";then
[ -n "$client_check_client" ]&&{ _update_config "$type_name_check_client" "$type_value_check_client" "$CONFIG"||return 1;}
valid_check_client="true"&&continue
else
{ [ -n "$client_check_client" ]&&message_check_client="- Try again";}||message_check_client="in config ( $CONFIG )"
"${QUIET:-_print_center}" "normal" " Invalid Client $1 $message_check_client " "-"&&unset "$type_name_check_client" client
fi
}
[ -z "$client_check_client" ]&&printf "\n"&&"${QUIET:-_print_center}" "normal" " Enter Client $1 " "-"
[ -n "$client_check_client" ]&&_clear_line 1
printf -- "-> "
read -r "${type_name_check_client?}"&&client_check_client=1
_set_value indirect type_value_check_client "$type_name_check_client"
done
_set_value direct "$type_name_check_client" "$type_value_check_client"
_set_value direct "$type_check_client" "$type_value_check_client"
return 0
}
_check_refresh_token(){
export CLIENT_ID CLIENT_SECRET QUIET CONFIG CURL_PROGRESS SCOPE REDIRECT_URI TOKEN_URL
[ -z "${CLIENT_ID:+$CLIENT_SECRET}" ]&&return 1
account_name_check_refresh_token="${1:-}"
refresh_token_regex='[0-9]//[0-9A-Za-z_-]+' authorization_code_regex='[0-9]/[0-9A-Za-z_-]+'
_set_value direct refresh_token_name_check_refresh_token "${account_name_check_refresh_token:+ACCOUNT_${account_name_check_refresh_token}_}REFRESH_TOKEN"
_set_value indirect refresh_token_value_check_refresh_token "${refresh_token_name_check_refresh_token:-}"
[ "${REFETCH_REFRESH_TOKEN:-false}" = "true" ]&&{
unset refresh_token_value_check_refresh_token
}
[ -n "$refresh_token_value_check_refresh_token" ]&&{
! _assert_regex "$refresh_token_regex" "$refresh_token_value_check_refresh_token"&&"${QUIET:-_print_center}" "normal" " Error: Invalid Refresh token in config file, follow below steps.. " "-"&&unset refresh_token_value_check_refresh_token
}
[ -z "$refresh_token_value_check_refresh_token" ]&&{
printf "\n"&&"${QUIET:-_print_center}" "normal" "If you have a refresh token generated, then type the token, else leave blank and press return key.." " "
printf "\n"&&"${QUIET:-_print_center}" "normal" " Refresh Token " "-"&&printf -- "-> "
read -r refresh_token_value_check_refresh_token
if [ -n "$refresh_token_value_check_refresh_token" ];then
"${QUIET:-_print_center}" "normal" " Checking refresh token.. " "-"
if _assert_regex "$refresh_token_regex" "$refresh_token_value_check_refresh_token";then
_set_value direct REFRESH_TOKEN "$refresh_token_value_check_refresh_token"
{ _check_access_token "$account_name_check_refresh_token" skip_check&&_update_config "$refresh_token_name_check_refresh_token" "$refresh_token_value_check_refresh_token" "$CONFIG"&&_clear_line 1;}||check_error_check_refresh_token=true
else
check_error_check_refresh_token=true
fi
[ -n "$check_error_check_refresh_token" ]&&"${QUIET:-_print_center}" "normal" " Error: Invalid Refresh token given, follow below steps to generate.. " "-"&&unset refresh_token_value_check_refresh_token
else
"${QUIET:-_print_center}" "normal" " No Refresh token given, follow below steps to generate.. " "-"&&unset refresh_token_value_check_refresh_token
fi
server_string_check_refresh_token='Now go back to command line..'
server_port_check_refresh_token='8079'
while :;do
: "$((server_port_check_refresh_token+=1))"
if [ "$server_port_check_refresh_token" -gt 8130 ];then
"${QUIET:-_print_center}" "normal" "Error: No open ports found ( 8080 to 8130 )." "-"
return 1
fi
{ curl -Is "http://localhost:$server_port_check_refresh_token"&&continue;}||break
done
if command -v python 1>/dev/null&&python -V|grep -q 'Python 3';then
python <<EOF 1>"$TMPFILE.code" 2>&1&
from http.server import BaseHTTPRequestHandler, HTTPServer
class handler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.end_headers()
if '/?code' in self.path:
message = '$server_string_check_refresh_token'
self.wfile.write(bytes(message, "utf8"))
with HTTPServer(('', $server_port_check_refresh_token), handler) as server:
server.serve_forever()
EOF
_tmp_server_pid="$!"
elif command -v nc 1>/dev/null;then
printf "%b" "HTTP/1.1 200 OK\nContent-Length: $(printf "%s" "$server_string_check_refresh_token"|wc -c)\n\n$server_string_check_refresh_token"|nc -l -p "$server_port_check_refresh_token" 1>"$TMPFILE.code" 2>&1&
_tmp_server_pid="$!"
else
"${QUIET:-_print_center}" "normal" " Error: neither netcat (nc) nor python3 is installed. It is required to required a http server which is used in fetching authorization code. Install and proceed." "-"
return 1
fi
code_challenge_check_refresh_token="$(_epoch)authorization_code"
[ -z "$refresh_token_value_check_refresh_token" ]&&{
printf "\n"&&"${QUIET:-_print_center}" "normal" "Visit the below URL, follow the instructions and then come back to commandline" " "
URL="https://accounts.google.com/o/oauth2/auth?client_id=$CLIENT_ID&redirect_uri=$REDIRECT_URI%3A$server_port_check_refresh_token&scope=$SCOPE&response_type=code&code_challenge_method=plain&code_challenge=$code_challenge_check_refresh_token"
printf "\n%s\n" "$URL"
"${QUIET:-_print_center}" "normal" " Press enter if you have completed the process in browser" "-"
read -r _
kill "$_tmp_server_pid"
if ! authorization_code="$(grep -m1 'GET.*code.*HTTP/1.1' <"$TMPFILE.code"|sed -e 's/.*GET.*code=//' -e 's/\&.*//')"&&_assert_regex "$authorization_code_regex" "$authorization_code";then
"${QUIET:-_print_center}" "normal" " Code was not fetched properly , here is some info that maybe helpful.. " "-"
"${QUIET:-_print_center}" "normal" " Code that was grabbed: $authorization_code " "-"
printf "Output of http server:\n"
cat "$TMPFILE.code"
(rm -f "$TMPFILE.code"&)
return 1
fi
(rm -f "$TMPFILE.code"&)
response_check_refresh_token="$(curl --compressed "$CURL_PROGRESS" -X POST \
--data "code=$authorization_code&client_id=$CLIENT_ID&client_secret=$CLIENT_SECRET&redirect_uri=$REDIRECT_URI%3A$server_port_check_refresh_token&grant_type=authorization_code&code_verifier=$code_challenge_check_refresh_token" "$TOKEN_URL")"||:
_clear_line 1 1>&2
refresh_token_value_check_refresh_token="$(printf "%s\n" "$response_check_refresh_token"|_json_value refresh_token 1 1)"||{ printf "%s\n" "Error: Cannot fetch refresh token, make sure the authorization code was correct."&&printf "%s\n" "$response_check_refresh_token"&&return 1;}
_set_value direct REFRESH_TOKEN "$refresh_token_value_check_refresh_token"
{ _check_access_token "$account_name_check_refresh_token" skip_check "$response_check_refresh_token"&&_update_config "$refresh_token_name_check_refresh_token" "$refresh_token_value_check_refresh_token" "$CONFIG";}||return 1
}
printf "\n"
}
_set_value direct "$refresh_token_name_check_refresh_token" "$refresh_token_value_check_refresh_token"
_set_value direct REFRESH_TOKEN "$refresh_token_value_check_refresh_token"
return 0
}
_check_access_token(){
export CLIENT_ID CLIENT_SECRET REFRESH_TOKEN CONFIG QUIET
[ -z "${CLIENT_ID:+${CLIENT_SECRET:+$REFRESH_TOKEN}}" ]&&return 1
account_name_check_access_token="${1:-}" no_check_check_access_token="${2:-false}" response_json_check_access_token="${3:-}"
unset token_name_check_access_token token_expiry_name_check_access_token token_value_check_access_token token_expiry_value_check_access_token response_check_access_token
access_token_regex='ya29\.[0-9A-Za-z_-]+'
token_name_check_access_token="${account_name_check_access_token:+ACCOUNT_${account_name_check_access_token}_}ACCESS_TOKEN"
token_expiry_name_check_access_token="${token_name_check_access_token}_EXPIRY"
_set_value indirect token_value_check_access_token "$token_name_check_access_token"
_set_value indirect token_expiry_value_check_access_token "$token_expiry_name_check_access_token"
[ "$no_check_check_access_token" = skip_check ]||[ -z "$token_value_check_access_token" ]||[ "${token_expiry_value_check_access_token:-0}" -lt "$(_epoch)" ]||! _assert_regex "$access_token_regex" "$token_value_check_access_token"&&{
response_check_access_token="${response_json_check_access_token:-$(curl --compressed -s -X POST --data \
"client_id=$CLIENT_ID&client_secret=$CLIENT_SECRET&refresh_token=$REFRESH_TOKEN&grant_type=refresh_token" "$TOKEN_URL")}"||:
if token_value_check_access_token="$(printf "%s\n" "$response_check_access_token"|_json_value access_token 1 1)";then
token_expiry_value_check_access_token="$(($(_epoch)+$(printf "%s\n" "$response_check_access_token"|_json_value expires_in 1 1)-1))"
_update_config "$token_name_check_access_token" "$token_value_check_access_token" "$CONFIG"||return 1
_update_config "$token_expiry_name_check_access_token" "$token_expiry_value_check_access_token" "$CONFIG"||return 1
else
"${QUIET:-_print_center}" "justify" "Error: Something went wrong" ", printing error." "=" 1>&2
printf "%s\n" "$response_check_access_token" 1>&2
printf "%s\n" "If refresh token has expired, then use --oauth-refetch-refresh-token to refetch refresh token, if the error is not clear make a issue on github repository."
return 1
fi
}
_set_value direct ACCESS_TOKEN "$token_value_check_access_token"
_set_value direct ACCESS_TOKEN_EXPIRY "$token_expiry_value_check_access_token"
_set_value direct INITIAL_ACCESS_TOKEN "$ACCESS_TOKEN"
return 0
}
_reload_config(){
export CONFIG
{ [ -r "$CONFIG" ]&&_parse_config "$CONFIG";}||{ printf "" >>"$CONFIG"||return 1;}
return 0
}
_token_bg_service(){
export MAIN_PID ACCESS_TOKEN ACCESS_TOKEN_EXPIRY TMPFILE
[ -z "$MAIN_PID" ]&&return 0
printf "%b\n" "ACCESS_TOKEN=\"$ACCESS_TOKEN\"\nACCESS_TOKEN_EXPIRY=\"$ACCESS_TOKEN_EXPIRY\"" >|"${TMPFILE}_ACCESS_TOKEN"
{
until ! kill -0 "$MAIN_PID" 2>|/dev/null 1>&2;do
. "${TMPFILE}_ACCESS_TOKEN"
CURRENT_TIME="$(_epoch)"
REMAINING_TOKEN_TIME="$((ACCESS_TOKEN_EXPIRY-CURRENT_TIME))"
if [ "$REMAINING_TOKEN_TIME" -le 300 ];then
CONFIG="${TMPFILE}_ACCESS_TOKEN" _timeout 30 _check_access_token "" skip_check||:
else
TOKEN_PROCESS_TIME_TO_SLEEP="$(if [ "$REMAINING_TOKEN_TIME" -le 301 ];then
printf "0\n"
else
printf "%s\n" "$((REMAINING_TOKEN_TIME-300))"
fi)"
sleep "$TOKEN_PROCESS_TIME_TO_SLEEP"
fi
sleep 1
done
}&
export ACCESS_TOKEN_SERVICE_PID="$!"
return 0
}
_bytes_to_human(){
b_bytes_to_human="$(printf "%.0f\n" "${1:-0}")" s_bytes_to_human=0
d_bytes_to_human='' type_bytes_to_human=''
while [ "$b_bytes_to_human" -gt 1024 ];do
d_bytes_to_human="$(printf ".%02d" $((b_bytes_to_human%1024*100/1024)))"
b_bytes_to_human=$((b_bytes_to_human/1024))&&s_bytes_to_human=$((s_bytes_to_human+=1))
done
j=0&&for i in B KB MB GB TB PB EB YB ZB;do
j="$((j+=1))"&&[ "$((j-1))" = "$s_bytes_to_human" ]&&type_bytes_to_human="$i"&&break
continue
done
printf "%s\n" "$b_bytes_to_human$d_bytes_to_human $type_bytes_to_human"
}
_check_debug(){
export DEBUG QUIET
if [ -n "$DEBUG" ];then
set -x&&PS4='-> '
_print_center(){ { [ $# = 3 ]&&printf "%s\n" "$2";}||{ printf "%s%s\n" "$2" "$3";};}
_clear_line(){ :;}&&_move_cursor(){ :;}&&_newline(){ :;}
else
if [ -z "$QUIET" ];then
if _support_ansi_escapes;then
if ! _required_column_size;then
_print_center(){ { [ $# = 3 ]&&printf "%s\n" "[ $2 ]";}||{ printf "%s\n" "[ $2$3 ]";};}
fi
export EXTRA_LOG="_print_center" CURL_PROGRESS="-#" CURL_PROGRESS_EXTRA="-#" SUPPORT_ANSI_ESCAPES="true"
else
_print_center(){ { [ $# = 3 ]&&printf "%s\n" "[ $2 ]";}||{ printf "%s\n" "[ $2$3 ]";};}
_clear_line(){ :;}&&_move_cursor(){ :;}
fi
_newline(){ printf "%b" "$1";}
else
_print_center(){ :;}&&_clear_line(){ :;}&&_move_cursor(){ :;}&&_newline(){ :;}
fi
set +x
fi
}
_check_internet(){
"${EXTRA_LOG:-}" "justify" "Checking Internet Connection.." "-"
if ! _timeout 10 curl -Is google.com --compressed;then
_clear_line 1
"${QUIET:-_print_center}" "justify" "Error: Internet connection" " not available." "="
return 1
fi
_clear_line 1
}
_clear_line(){
printf "\033[%sA\033[2K" "$1"
}
_dirname(){
dir_dirname="${1:-.}"
dir_dirname="${dir_dirname%%"${dir_dirname##*[!/]}"}"&&[ -n "${dir_dirname##*/*}" ]&&dir_dirname=.
dir_dirname="${dir_dirname%/*}"&&dir_dirname="${dir_dirname%%"${dir_dirname##*[!/]}"}"
printf '%s\n' "${dir_dirname:-/}"
}
_display_time(){
t_display_time="$1" day_display_time="$((t_display_time/60/60/24))"
hr_display_time="$((t_display_time/60/60%24))" min_display_time="$((t_display_time/60%60))" sec_display_time="$((t_display_time%60))"
[ "$day_display_time" -gt 0 ]&&printf '%d days ' "$day_display_time"
[ "$hr_display_time" -gt 0 ]&&printf '%d hrs ' "$hr_display_time"
[ "$min_display_time" -gt 0 ]&&printf '%d minute(s) ' "$min_display_time"
[ "$day_display_time" -gt 0 ]||[ "$hr_display_time" -gt 0 ]||[ "$min_display_time" -gt 0 ]&&printf 'and '
printf '%d seconds\n' "$sec_display_time"
}
_get_latest_sha(){
export TYPE TYPE_VALUE REPO
unset latest_sha_get_latest_sha raw_get_latest_sha
case "${1:-$TYPE}" in
branch)\
latest_sha_get_latest_sha="$(\
raw_get_latest_sha="$(curl --compressed -s https://github.com/"${3:-$REPO}"/commits/"${2:-$TYPE_VALUE}".atom -r 0-2000)"
_tmp="$(printf "%s\n" "$raw_get_latest_sha"|grep -o 'Commit\/.*<' -m1||:)"&&_tmp="${_tmp##*\/}"&&printf "%s\n" "${_tmp%%<*}")"
;;
release)\
latest_sha_get_latest_sha="$(\
raw_get_latest_sha="$(curl -L --compressed -s https://github.com/"${3:-$REPO}"/releases/"${2:-$TYPE_VALUE}")"
_tmp="$(printf "%s\n" "$raw_get_latest_sha"|grep '="/'"${3:-$REPO}""/commit" -m1||:)"&&_tmp="${_tmp##*commit\/}"&&printf "%s\n" "${_tmp%%\"*}")"
;;
*):
esac
printf "%b" "${latest_sha_get_latest_sha:+$latest_sha_get_latest_sha\n}"
}
_json_escape(){
mode_json_escape="${1:?Missing mode}" input_json_escape="${2:?Provide Input}" output_json_escape=""
if [ "$mode_json_escape" = "j" ];then
output_json_escape="$(printf "%s" "$input_json_escape"|sed \
-e "s|\\\|\\\\\\\|g" \
-e "s|\/|\\\/|g" \
-e 's/\"/\\\"/g' \
-e "s/$(printf '\t')/\\t/g" \
-e "s/$(printf '\r')/\\r/g" \
-e "s/$(printf '\f')/\\f/g")"
else
output_json_escape="$(printf "%s" "$input_json_escape"|sed \
-e "s/$(printf '\t')/\\t/g" \
-e "s/$(printf '\r')/\\r/g" \
-e "s/$(printf '\f')/\\f/g")"
fi
output_json_escape="$(printf "%s" "$output_json_escape"|awk '{printf "%s%s",sep,$0; sep="\\n"} END{print ""}')"
printf "%s" "$output_json_escape"
}
_json_value(){
{ [ "$2" -gt 0 ] 2>|/dev/null&&no_of_lines_json_value="$2";}||:
{ [ "$3" -gt 0 ] 2>|/dev/null&&num_json_value="$3";}||{ ! [ "$3" = all ]&&num_json_value=1;}
_tmp="$(grep -o "\"$1\"\:.*" ${no_of_lines_json_value:+-m} $no_of_lines_json_value)"||return 1
printf "%s\n" "$_tmp"|sed -e 's|.*"'"$1""\":||" -e 's/[",]*$//' -e 's/["]*$//' -e 's/[,]*$//' -e "s/^ //" -e 's/^"//' -n -e "$num_json_value"p||:
return 0
}
_parse_config(){
_config_file_parse_config="${1:?Error: Profile config file}"
print_parse_config="${2:-false}"
[ -r "$_config_file_parse_config" ]||{
printf "%s\n" "Error: Given config file ( $_config_file_parse_config ) is not readable."
return 1
}
while IFS='=' read -r key val;do
{ [ -n "$key" ]&&[ -n "$val" ]&&[ -n "${key##\#*}" ];}||continue
key="${key#"${key%%[![:space:]]*}"}"
val="${val#"${val%%[![:space:]]*}"}"
key="${key%"${key##*[![:space:]]}"}"
val="${val%"${val##*[![:space:]]}"}"
case "$val" in
\"*\")val="${val#\"}" val="${val%\"}";;
\'*\')val="${val#\'}" val="${val%\'}";;
*):
esac
export "$key=$val" 2>/dev/null||printf "%s\n" "Warning: $key is not a valid variable name."
[ "$print_parse_config" = true ]&&echo "$key=$val"
done <"$_config_file_parse_config"
return 0
}
_print_center(){
[ $# -lt 3 ]&&printf "Missing arguments\n"&&return 1
term_cols_print_center="${COLUMNS:-}"
type_print_center="$1" filler_print_center=""
case "$type_print_center" in
normal)out_print_center="$2"&&symbol_print_center="$3";;
justify)if
[ $# = 3 ]
then
input1_print_center="$2" symbol_print_center="$3" to_print_print_center="" out_print_center=""
to_print_print_center="$((term_cols_print_center-5))"
{ [ "${#input1_print_center}" -gt "$to_print_print_center" ]&&out_print_center="[ $(printf "%.${to_print_print_center}s\n" "$input1_print_center")..]";}||{ out_print_center="[ $input1_print_center ]";}
else
input1_print_center="$2" input2_print_center="$3" symbol_print_center="$4" to_print_print_center="" temp_print_center="" out_print_center=""
to_print_print_center="$((term_cols_print_center*47/100))"
{ [ "${#input1_print_center}" -gt "$to_print_print_center" ]&&temp_print_center=" $(printf "%.${to_print_print_center}s\n" "$input1_print_center")..";}||{ temp_print_center=" $input1_print_center";}
to_print_print_center="$((term_cols_print_center*46/100))"
{ [ "${#input2_print_center}" -gt "$to_print_print_center" ]&&temp_print_center="$temp_print_center$(printf "%.${to_print_print_center}s\n" "$input2_print_center").. ";}||{ temp_print_center="$temp_print_center$input2_print_center ";}
out_print_center="[$temp_print_center]"
fi
;;
*)return 1
esac
str_len_print_center="${#out_print_center}"
[ "$str_len_print_center" -ge "$((term_cols_print_center-1))" ]&&{
printf "%s\n" "$out_print_center"&&return 0
}
filler_print_center_len="$(((term_cols_print_center-str_len_print_center)/2))"
i_print_center=1&&while [ "$i_print_center" -le "$filler_print_center_len" ];do
filler_print_center="$filler_print_center$symbol_print_center"&&i_print_center="$((i_print_center+1))"
done
printf "%s%s%s" "$filler_print_center" "$out_print_center" "$filler_print_center"
[ "$(((term_cols_print_center-str_len_print_center)%2))" -ne 0 ]&&printf "%s" "$symbol_print_center"
printf "\n"
return 0
}
_print_center_quiet(){
{ [ $# = 3 ]&&printf "%s\n" "$2";}||{ printf "%s%s\n" "$2" "$3";}
}
_split(){
set -f
old_ifs_split=$IFS
IFS=$2
set -- $1
printf '%s\n' "$@"
IFS=$old_ifs_split
set +f
}
_support_ansi_escapes(){
unset ansi_escapes
case "${TERM:-}" in
xterm*|rxvt*|urxvt*|linux*|vt*|screen*)ansi_escapes="true";;
*):
esac
{ [ -t 2 ]&&[ -n "$ansi_escapes" ]&&return 0;}||return 1
}
_timeout(){
timeout_timeout="${1:?Error: Specify Timeout}"&&shift
{
"$@"&
child="$!"
trap -- "" TERM
{
sleep "$timeout_timeout"
kill -9 "$child"
}&
wait "$child"
} 2>|/dev/null 1>&2
}
_update_config(){
[ $# -lt 3 ]&&printf "Missing arguments\n"&&return 1
value_name_update_config="$1" value_update_config="$2" config_path_update_config="$3"
! [ -f "$config_path_update_config" ]&&: >|"$config_path_update_config"
chmod u+w -- "$config_path_update_config"||return 1
printf "%s\n%s\n" "$(grep -v -e "^$" -e "^$value_name_update_config=" -- "$config_path_update_config"||:)" \
"$value_name_update_config=\"$value_update_config\"" >|"$config_path_update_config"||return 1
chmod a-w-r-x,u+r -- "$config_path_update_config"||return 1
return 0
}
_check_existing_file(){
export EXTRA_LOG CURL_PROGRESS_EXTRA API_URL API_VERSION
[ $# -lt 2 ]&&printf "Missing arguments\n"&&return 1
name_check_existing_file="$1" rootdir_check_existing_file="$2" mode_check_existing_file="$3" param_value_check_existing_file="$4"
unset query_check_existing_file response_check_existing_file id_check_existing_file
"$EXTRA_LOG" "justify" "Checking if file" " exists on gdrive.." "-" 1>&2
query_check_existing_file="$(_url_encode "name=\"$name_check_existing_file\" and '$rootdir_check_existing_file' in parents and trashed=false and 'me' in writers")"
response_check_existing_file="$(_api_request "$CURL_PROGRESS_EXTRA" \
"$API_URL/drive/$API_VERSION/files?q=$query_check_existing_file&fields=files(id,name,mimeType${mode_check_existing_file:+,$mode_check_existing_file})&supportsAllDrives=true&includeItemsFromAllDrives=true"||:)"&&_clear_line 1 1>&2
_clear_line 1 1>&2
printf "%s\n" "$response_check_existing_file"|_json_value id 1 1 2>|/dev/null 1>&2||return 1
[ -n "$mode_check_existing_file" ]&&{
[ "$(printf "%s\n" "$response_check_existing_file"|_json_value "$mode_check_existing_file" 1 1)" = "$param_value_check_existing_file" ]||return 1
}
printf "%s\n" "$response_check_existing_file"
return 0
}
_clone_file(){
export DESCRIPTION_FILE CHECK_MODE SKIP_DUPLICATES QUIET API_URL API_VERSION CURL_PROGRESS
[ $# -lt 5 ]&&printf "Missing arguments\n"&&return 1
job_clone_file="$1" file_id_clone_file="$2" file_root_id_clone_file="$3" name_clone_file="$4" size_clone_file="$5" md5_clone_file="$6"
unset post_data_clone_file response_clone_file readable_size_clone_file description_clone_file&&STRING="Cloned"
readable_size_clone_file="$(_bytes_to_human "$size_clone_file")"
escaped_name_clone_file="$(_json_escape j "$name_clone_file")" print_name_clone_file="$(_json_escape p "$name_clone_file")"
[ -n "$DESCRIPTION_FILE" ]&&{
description_clone_file="$(printf "%s\n" "$DESCRIPTION_FILE"|sed -e "s|%f|$name_clone_file|g|" -e "s|%f|$readable_size_clone_file|g|")"
description_clone_file="$(_json_escape j "$description_clone_file")"
}
post_data_clone_file="{\"parents\": [\"$file_root_id_clone_file\"]${description_clone_file:+,\"description\":\"$description_clone_file\"}}"
_print_center "justify" "$print_name_clone_file " "| $readable_size_clone_file" "="
if [ "$job_clone_file" = update ];then
unset file_check_json_clone_file check_value_type_clone_file check_value_clone_file
case "$CHECK_MODE" in
2)check_value_type_clone_file="size" check_value_clone_file="$size_clone_file";;
3)check_value_type_clone_file="md5Checksum" check_value_clone_file="$md5_clone_file";;
*):
esac
if file_check_json_clone_file="$(_check_existing_file "$escaped_name_clone_file" "$file_root_id_clone_file" "$check_value_type_clone_file" "$check_value_clone_file")";then
if [ -n "$SKIP_DUPLICATES" ];then
_collect_file_info "$file_check_json_clone_file" "$print_name_clone_file"||return 1
_clear_line 1
"${QUIET:-_print_center}" "justify" "$print_name_clone_file" " already exists." "="&&return 0
else
_print_center "justify" "Overwriting file.." "-"
{ _file_id_clone_file="$(printf "%s\n" "$file_check_json_clone_file"|_json_value id 1 1)"&&post_data_clone_file="$(_drive_info "$_file_id_clone_file" "parents,writersCanShare")";}||{ _error_logging_upload "$print_name_clone_file" "${post_data_clone_file:-$file_check_json_clone_file}"||return 1;}
if [ "$_file_id_clone_file" != "$file_id_clone_file" ];then
_api_request -s \
-X DELETE \
"$API_URL/drive/$API_VERSION/files/$_file_id_clone_file?supportsAllDrives=true&includeItemsFromAllDrives=true" 2>|/dev/null 1>&2||:
STRING="Updated"
else
_collect_file_info "$file_check_json_clone_file" "$print_name_clone_file"||return 1
fi
fi
else
_print_center "justify" "Cloning file.." "-"
fi
else
_print_center "justify" "Cloning file.." "-"
fi
response_clone_file="$(_api_request $CURL_PROGRESS \
-X POST \
-H "Content-Type: application/json; charset=UTF-8" \
-d "$post_data_clone_file" \
"$API_URL/drive/$API_VERSION/files/$file_id_clone_file/copy?supportsAllDrives=true&includeItemsFromAllDrives=true"||:)"
for _ in 1 2 3;do _clear_line 1;done
_collect_file_info "$response_clone_file" "$print_name_clone_file"||return 1
"${QUIET:-_print_center}" "justify" "$print_name_clone_file " "| $readable_size_clone_file | $STRING" "="
return 0
}
_create_directory(){
export EXTRA_LOG CURL_PROGRESS_EXTRA API_VERSION API_URL
[ $# -lt 2 ]&&printf "Missing arguments\n"&&return 1
dirname_create_directory="${1##*/}" rootdir_create_directory="$2"
unset query_create_directory search_response_create_directory folder_id_create_directory
escaped_dirname_create_directory="$(_json_escape j "$dirname_create_directory")"
print_dirname_create_directory="$(_json_escape p "$dirname_create_directory")"
"$EXTRA_LOG" "justify" "Creating GDRIVE DIR:" " $print_dirname_create_directory" "-" 1>&2
query_create_directory="$(_url_encode "mimeType='application/vnd.google-apps.folder' and name=\"$escaped_dirname_create_directory\" and trashed=false and '$rootdir_create_directory' in parents")"
search_response_create_directory="$(_api_request "$CURL_PROGRESS_EXTRA" \
"$API_URL/drive/$API_VERSION/files?q=$query_create_directory&fields=files(id)&supportsAllDrives=true&includeItemsFromAllDrives=true"||:)"&&_clear_line 1 1>&2
if ! folder_id_create_directory="$(printf "%s\n" "$search_response_create_directory"|_json_value id 1 1)";then
unset create_folder_post_data_create_directory create_folder_response_create_directory
create_folder_post_data_create_directory="{\"mimeType\": \"application/vnd.google-apps.folder\",\"name\": \"$escaped_dirname_create_directory\",\"parents\": [\"$rootdir_create_directory\"]}"
create_folder_response_create_directory="$(_api_request "$CURL_PROGRESS_EXTRA" \
-X POST \
-H "Content-Type: application/json; charset=UTF-8" \
-d "$create_folder_post_data_create_directory" \
"$API_URL/drive/$API_VERSION/files?fields=id&supportsAllDrives=true&includeItemsFromAllDrives=true"||:)"&&_clear_line 1 1>&2
fi
_clear_line 1 1>&2
{ folder_id_create_directory="${folder_id_create_directory:-$(printf "%s\n" "$create_folder_response_create_directory"|_json_value id 1 1)}"&&printf "%s\n" "$folder_id_create_directory";}||{ printf "%s\n" "$create_folder_response_create_directory" 1>&2&&return 1;}
return 0
}
_drive_info(){
export EXTRA_LOG CURL_PROGRESS_EXTRA API_URL API_VERSION
[ $# -lt 2 ]&&printf "Missing arguments\n"&&return 1
folder_id_drive_info="$1" fetch_drive_info="$2"
unset search_response_drive_info
"$EXTRA_LOG" "justify" "Fetching info.." "-" 1>&2
search_response_drive_info="$(_api_request "$CURL_PROGRESS_EXTRA" \
"$API_URL/drive/$API_VERSION/files/$folder_id_drive_info?fields=$fetch_drive_info&supportsAllDrives=true&includeItemsFromAllDrives=true"||:)"&&_clear_line 1 1>&2
_clear_line 1 1>&2
printf "%b" "${search_response_drive_info:+$search_response_drive_info\n}"
return 0
}
_extract_id(){
[ $# = 0 ]&&printf "Missing arguments\n"&&return 1
LC_ALL=C id_extract_id="$1"
case "$id_extract_id" in
*'drive.google.com'*'id='*)_tmp="${id_extract_id##*id=}"&&_tmp="${_tmp%%\?*}"&&id_extract_id="${_tmp%%\&*}";;
*'drive.google.com'*'file/d/'*|'http'*'docs.google.com'*'/d/'*)_tmp="${id_extract_id##*\/d\/}"&&_tmp="${_tmp%%\/*}"&&_tmp="${_tmp%%\?*}"&&id_extract_id="${_tmp%%\&*}";;
*'drive.google.com'*'drive'*'folders'*)_tmp="${id_extract_id##*\/folders\/}"&&_tmp="${_tmp%%\?*}"&&id_extract_id="${_tmp%%\&*}";;
*):
esac
printf "%b" "${id_extract_id:+$id_extract_id\n}"
}
_upload_file(){
export QUIET DESCRIPTION_FILE CHECK_MODE SKIP_DUPLICATES API_URL API_VERSION INFO_PATH
[ $# -lt 3 ]&&printf "Missing arguments\n"&&return 1
job_upload_file="$1" input_upload_file="$2" folder_id_upload_file="$3"
unset slug_upload_file inputname_upload_file extension_upload_file inputsize_upload_file readable_size_upload_file request_method_upload_file \
url_upload_file postdata_upload_file uploadlink_upload_file upload_body_upload_file mime_type_upload_file description_upload_file \
resume_args1_upload_file resume_args2_upload_file resume_args3_upload_file
slug_upload_file="${input_upload_file##*/}"
escaped_slug_upload_file="$(_json_escape j "$slug_upload_file")" print_slug_upload_file="$(_json_escape p "$slug_upload_file")"
inputname_upload_file="${slug_upload_file%.*}"
extension_upload_file="${slug_upload_file##*.}"
inputsize_upload_file="$(($(wc -c <"$input_upload_file")))"&&content_length_upload_file="$inputsize_upload_file"
readable_size_upload_file="$(_bytes_to_human "$inputsize_upload_file")"
[ "$inputname_upload_file" = "$extension_upload_file" ]&&{
mime_type_upload_file="$(file --brief --mime-type "$input_upload_file"||mimetype --output-format %m "$input_upload_file")" 2>|/dev/null||{
"${QUIET:-_print_center}" "justify" "Error: file or mimetype command not found." "="&&printf "\n"
exit 1
}
}
[ -n "$DESCRIPTION_FILE" ]&&{
description_upload_file="$(printf "%s\n" "$DESCRIPTION_FILE"|sed -e "s|%f|$slug_upload_file|g" -e "s|%f|$readable_size_upload_file|g" -e "s|%m|$mime_type_upload_file|g")"
description_upload_file="$(_json_escape j "$description_upload_file")"
}
_print_center "justify" "$print_slug_upload_file" " | $readable_size_upload_file" "="
[ "$job_upload_file" = update ]&&{
unset file_check_json_upload_file check_value_upload_file
case "$CHECK_MODE" in
2)check_value_type_upload_file="size" check_value_upload_file="$inputsize_upload_file";;
3)\
check_value_type_upload_file="md5Checksum"
check_value_upload_file="$(md5sum "$input_upload_file")"||{
"${QUIET:-_print_center}" "justify" "Error: cannot calculate md5sum of given file." "=" 1>&2
return 1
}
check_value_upload_file="${check_value_upload_file%% *}"
;;
*):
esac
if file_check_json_upload_file="$(_check_existing_file "$escaped_slug_upload_file" "$folder_id_upload_file" "$check_value_type_upload_file" "$check_value_upload_file")";then
if [ -n "$SKIP_DUPLICATES" ];then
_collect_file_info "$file_check_json_upload_file" "$print_slug_upload_file"||return 1
STRING="Skipped" _normal_logging_upload
return 0
else
request_method_upload_file="PATCH"
_file_id_upload_file="$(printf "%s\n" "$file_check_json_upload_file"|_json_value id 1 1)"||{ _error_logging_upload "$print_slug_upload_file" "$file_check_json_upload_file"||return 1;}
url_upload_file="$API_URL/upload/drive/$API_VERSION/files/$_file_id_upload_file?uploadType=resumable&supportsAllDrives=true&includeItemsFromAllDrives=true"
postdata_upload_file="{\"mimeType\": \"$mime_type_upload_file\",\"name\": \"$escaped_slug_upload_file\",\"addParents\": [\"$folder_id_upload_file\"]${description_upload_file:+,\"description\":\"$description_upload_file\"}}"
STRING="Updated"
fi
else
job_upload_file="create"
fi
}
[ "$job_upload_file" = create ]&&{
url_upload_file="$API_URL/upload/drive/$API_VERSION/files?uploadType=resumable&supportsAllDrives=true&includeItemsFromAllDrives=true"
request_method_upload_file="POST"
postdata_upload_file="{\"mimeType\": \"$mime_type_upload_file\",\"name\": \"$escaped_slug_upload_file\",\"parents\": [\"$folder_id_upload_file\"]${description_upload_file:+,\"description\":\"$description_upload_file\"}}"
STRING="Uploaded"
}
__file_upload_file="$INFO_PATH/${print_slug_upload_file}__::__${folder_id_upload_file}__::__$inputsize_upload_file"
if [ -r "$__file_upload_file" ];then
uploadlink_upload_file="$(cat "$__file_upload_file"||:)"
http_code_upload_file="$(curl --compressed -s -X PUT "$uploadlink_upload_file" -o /dev/null --write-out %"{http_code}")"||:
case "$http_code_upload_file" in
308)\
uploaded_range_upload_file="$(\
raw_upload_file="$(curl --compressed -s -X PUT \
-H "Content-Range: bytes */$content_length_upload_file" \
--url "$uploadlink_upload_file" --globoff -D -||:)"&&printf "%s\n" "${raw_upload_file##*[R,r]ange: bytes=0-}"|while
read -r line
do printf "%s\n" "${line%%"$(printf '\r')"}"&&break;done)"
if [ "$uploaded_range_upload_file" -gt 0 ] 2>|/dev/null;then
_print_center "justify" "Resuming interrupted upload.." "-"&&_newline "\n"
content_range_upload_file="$(printf "bytes %s-%s/%s\n" "$((uploaded_range_upload_file+1))" "$((inputsize_upload_file-1))" "$inputsize_upload_file")"
content_length_upload_file="$((inputsize_upload_file-$((uploaded_range_upload_file+1))))"
resume_args1_upload_file='-s' resume_args2_upload_file='--http1.1' resume_args3_upload_file="Content-Range: $content_range_upload_file"
_upload_file_from_uri _clear_line
_collect_file_info "$upload_body_upload_file" "$print_slug_upload_file"||return 1
_normal_logging_upload
_remove_upload_session
else
_full_upload||return 1
fi
;;
4[0-9][0-9]|000)_full_upload||return 1
;;
201|200)\
upload_body_upload_file="$http_code_upload_file"
_collect_file_info "$upload_body_upload_file" "$print_slug_upload_file"||return 1
_normal_logging_upload
_remove_upload_session
;;
*):
esac
else
_full_upload||return 1
fi
return 0
}
_generate_upload_link(){
"${EXTRA_LOG:-}" "justify" "Generating upload link.." "-" 1>&2
uploadlink_upload_file="$(_api_request "${CURL_PROGRESS_EXTRA:-}" \
-X "$request_method_upload_file" \
-H "Content-Type: application/json; charset=UTF-8" \
-H "X-Upload-Content-Type: $mime_type_upload_file" \
-H "X-Upload-Content-Length: $inputsize_upload_file" \
-d "$postdata_upload_file" \
"$url_upload_file" \
-D -||:)"&&_clear_line 1 1>&2
_clear_line 1 1>&2
case "$uploadlink_upload_file" in
*'ocation: '*'upload_id'*)uploadlink_upload_file="$(printf "%s\n" "${uploadlink_upload_file##*[L,l]ocation: }"|while read -r line;do printf "%s\n" "${line%%"$(printf '\r')"}"&&break;done)"&&return 0;;
*)return 1
esac
return 0
}
_upload_file_from_uri(){
_print_center "justify" "Uploading.." "-"
upload_body_upload_file="$(_api_request ${CURL_PROGRESS:-} \
-X PUT \
-H "Content-Type: $mime_type_upload_file" \
-H "Content-Length: $content_length_upload_file" \
-H "Slug: $print_slug_upload_file" \
-T "$input_upload_file" \
-o- \
--url "$uploadlink_upload_file" \
--globoff \
${CURL_SPEED:-} ${resume_args1_upload_file:-} ${resume_args2_upload_file:-} \
-H "$resume_args3_upload_file"||:)"
[ -z "${VERBOSE_PROGRESS:-}" ]&&for _ in 1 2;do _clear_line 1;done&&"${1:-:}"
return 0
}
_normal_logging_upload(){
[ -z "${VERBOSE_PROGRESS:-}" ]&&_clear_line 1
"${QUIET:-_print_center}" "justify" "$slug_upload_file " "| $readable_size_upload_file | ${STRING:-}" "="
return 0
}
_log_upload_session(){
[ "$inputsize_upload_file" -gt 1000000 ]&&printf "%s\n" "$uploadlink_upload_file" >|"$__file_upload_file"
return 0
}
_remove_upload_session(){
rm -f "$__file_upload_file"
return 0
}
_full_upload(){
_generate_upload_link||{ _error_logging_upload "$print_slug_upload_file" "$uploadlink_upload_file"||return 1;}
_log_upload_session
_upload_file_from_uri
_collect_file_info "$upload_body_upload_file" "$print_slug_upload_file"||return 1
_normal_logging_upload
_remove_upload_session
return 0
}
_share_id(){
[ $# -lt 2 ]&&printf "Missing arguments\n"&&return 1
id_share_id="$1" role_share_id="${2:?Missing role}" share_email_share_id="$3" role_share_id="reader" type_share_id="${share_email_share_id:+user}"
unset post_data_share_id response_share_id
"$EXTRA_LOG" "justify" "Sharing.." "-" 1>&2
post_data_share_id="{\"role\":\"$role_share_id\",\"type\":\"${type_share_id:-anyone}\"${share_email_share_id:+,\"emailAddress\":\"$share_email_share_id\"}}"
response_share_id="$(_api_request "$CURL_PROGRESS_EXTRA" \
-X POST \
-H "Content-Type: application/json; charset=UTF-8" \
-d "$post_data_share_id" \
"$API_URL/drive/$API_VERSION/files/$id_share_id/permissions?supportsAllDrives=true&includeItemsFromAllDrives=true"||:)"&&_clear_line 1 1>&2
_clear_line 1 1>&2
{ printf "%s\n" "$response_share_id"|_json_value id 1 1 2>|/dev/null 1>&2&&return 0;}||{ printf "%s\n" "Error: Cannot Share." 1>&2&&printf "%s\n" "$response_share_id" 1>&2&&return 1;}
}
_api_request(){
. "${TMPFILE:-}_ACCESS_TOKEN"
curl --compressed \
-H "Authorization: Bearer ${ACCESS_TOKEN:-}" \
"$@"
}
_collect_file_info(){
json_collect_file_info="$1" info_collect_file_info=""
FILE_ID="$(printf "%s\n" "$json_collect_file_info"|_json_value id 1 1)"||{ _error_logging_upload "$2" "$json_collect_file_info"||return 1;}
{ [ -z "$LOG_FILE_ID" ]||[ -d "$LOG_FILE_ID" ];}&&return 0
info_collect_file_info="Link: https://drive.google.com/open?id=$FILE_ID
Name: $(printf "%s\n" "$json_collect_file_info"|_json_value name 1 1||:)
ID: $FILE_ID
Type: $(printf "%s\n" "$json_collect_file_info"|_json_value mimeType 1 1||:)"
printf "%s\n\n" "$info_collect_file_info" >>"$LOG_FILE_ID"
return 0
}
_error_logging_upload(){
log_error_logging_upload="$2"
"${QUIET:-_print_center}" "justify" "Upload ERROR" ", ${1:-} not ${STRING:-uploaded}." "=" 1>&2
case "$log_error_logging_upload" in
*'"message": "User rate limit exceeded."'*)printf "%s\n\n%s\n" "$log_error_logging_upload" \
"Today's upload limit reached for this account. Use another account to upload or wait for tomorrow." \
1>&2
export RETRY=0
;;
''|*)printf "%s\n" "$log_error_logging_upload" 1>&2
esac
printf "\n\n\n" 1>&2
return 1
}
_get_rootdir_id(){
file_gen_final_list="${1:?Error: give filename}"
rootdir_gen_final_list="$(_dirname "$file_gen_final_list")"
temp_gen_final_list="$(printf "%s\n" "${DIRIDS:?Error: DIRIDS Missing}"|grep -F "|:_//_:|$rootdir_gen_final_list|:_//_:|"||:)"
printf "%s\n" "${temp_gen_final_list%%"|:_//_:|$rootdir_gen_final_list|:_//_:|"}"
return 0
}
_upload_file_main(){
[ $# -lt 2 ]&&printf "Missing arguments\n"&&return 1
file_upload_file_main="$2" sleep_upload_file_main=0
{ [ "$1" = parse ]&&dirid_upload_file_main="$(_get_rootdir_id "$file_upload_file_main")";}||dirid_upload_file_main="$3"
retry_upload_file_main="${RETRY:-0}"&&unset RETURN_STATUS
until [ "$retry_upload_file_main" -le 0 ]&&[ -n "$RETURN_STATUS" ];do
if [ -n "$4" ];then
{ _upload_file "${UPLOAD_MODE:-create}" "$file_upload_file_main" "$dirid_upload_file_main" 2>|/dev/null 1>&2&&RETURN_STATUS=1&&break;}||RETURN_STATUS=2
else
{ _upload_file "${UPLOAD_MODE:-create}" "$file_upload_file_main" "$dirid_upload_file_main"&&RETURN_STATUS=1&&break;}||RETURN_STATUS=2
fi
[ "$((retry_upload_file_main-=1))" -lt 1 ]&&sleep "$((sleep_upload_file_main+=1))"
continue
done
[ -n "$4" ]&&{
{ [ "$RETURN_STATUS" = 1 ]&&printf "%s\n" "$file_upload_file_main";}||printf "%s\n" "$file_upload_file_main" 1>&2
}
return 0
}
_upload_folder(){
export VERBOSE VERBOSE_PROGRESS NO_OF_PARALLEL_JOBS TMPFILE NO_OF_FILES
[ $# -lt 3 ]&&printf "Missing arguments\n"&&return 1
mode_upload_folder="$1" PARSE_MODE="$2" files_upload_folder="$3" ID="${4:-}"
SUCCESS_STATUS=0 SUCCESS_FILES="" ERROR_STATUS=0 ERROR_FILES=""
case "$mode_upload_folder" in
normal)[ "$PARSE_MODE" = parse ]&&_clear_line 1&&_newline "\n"
while read -r file <&4;do
_upload_file_main "$PARSE_MODE" "$file" "$ID"
{ [ "$RETURN_STATUS" = 1 ]&&: "$((SUCCESS_STATUS+=1))"&&SUCCESS_FILES="$(printf "%b\n" "${SUCCESS_STATUS:+$SUCCESS_STATUS\n}$file")";}||{ : "$((ERROR_STATUS+=1))"&&ERROR_FILES="$(printf "%b\n" "${ERROR_STATUS:+$ERROR_STATUS\n}$file")";}
if [ -n "${VERBOSE:-$VERBOSE_PROGRESS}" ];then
_print_center "justify" "Status: $SUCCESS_STATUS Uploaded" " | $ERROR_STATUS Failed" "="&&_newline "\n"
else
for _ in 1 2;do _clear_line 1;done
_print_center "justify" "Status: $SUCCESS_STATUS Uploaded" " | $ERROR_STATUS Failed" "="
fi
done 4<<EOF
$(printf "%s\n" "$files_upload_folder")
EOF
;;
parallel)\
NO_OF_PARALLEL_JOBS_FINAL="$((NO_OF_PARALLEL_JOBS>NO_OF_FILES?NO_OF_FILES:NO_OF_PARALLEL_JOBS))"
[ -f "$TMPFILE"SUCCESS ]&&rm "$TMPFILE"SUCCESS
[ -f "$TMPFILE"ERROR ]&&rm "$TMPFILE"ERROR
export PARSE_MODE ID
(printf "%s\n" "$files_upload_folder"|xargs -P"$NO_OF_PARALLEL_JOBS_FINAL" -I "{}" -n 1 sh -c '
eval "${SOURCE_UTILS}"
_upload_file_main "${PARSE_MODE}" "{}" "${ID}" true
' 1>|"$TMPFILE"SUCCESS 2>|"$TMPFILE"ERROR)&
pid="$!"
until [ -f "$TMPFILE"SUCCESS ]||[ -f "$TMPFILE"ERORR ];do sleep 0.5;done
[ "$PARSE_MODE" = parse ]&&_clear_line 1
_newline "\n"
until ! kill -0 "$pid" 2>|/dev/null 1>&2;do
SUCCESS_STATUS="$(($(wc -l <"$TMPFILE"SUCCESS)))"
ERROR_STATUS="$(($(wc -l <"$TMPFILE"ERROR)))"
sleep 1
[ "$((SUCCESS_STATUS+ERROR_STATUS))" != "$TOTAL" ]&&_clear_line 1&&"${QUIET:-_print_center}" "justify" "Status" ": $SUCCESS_STATUS Uploaded | $ERROR_STATUS Failed" "="
TOTAL="$((SUCCESS_STATUS+ERROR_STATUS))"
done
SUCCESS_STATUS="$(($(wc -l <"$TMPFILE"SUCCESS)))" SUCCESS_FILES="$(cat "$TMPFILE"SUCCESS)"
ERROR_STATUS="$(($(wc -l <"$TMPFILE"ERROR)))" ERROR_FILES="$(cat "$TMPFILE"ERROR)"
export SUCCESS_FILES ERROR_FILES
;;
*):
esac
return 0
}
_cleanup_config(){
config="${1:?Error: Missing config}"&&unset values_regex _tmp
! [ -f "$config" ]&&return 0
while read -r line <&4&&[ -n "$line" ];do
expiry_value_name="${line%%=*}"
token_value_name="${expiry_value_name%%_EXPIRY}"
_tmp="${line##*=}"&&_tmp="${_tmp%\"}"&&expiry="${_tmp#\"}"
[ "$expiry" -le "$(_epoch)" ]&&values_regex="${values_regex:+$values_regex|}$expiry_value_name=\".*\"|$token_value_name=\".*\""
done 4<<EOF
$(grep -F ACCESS_TOKEN_EXPIRY -- "$config"||:)
EOF
chmod u+w -- "$config"&&printf "%s\n" "$(grep -Ev "^\$${values_regex:+|$values_regex}" -- "$config")" >|"$config"&&chmod "a-w-r-x,u+r" -- "$config"
return 0
}
_setup_arguments(){
[ $# = 0 ]&&printf "Missing arguments\n"&&return 1
unset CONTINUE_WITH_NO_INPUT
export CURL_PROGRESS="-s" EXTRA_LOG=":" CURL_PROGRESS_EXTRA="-s"
INFO_PATH="$HOME/.google-drive-upload" CONFIG_INFO="$INFO_PATH/google-drive-upload.configpath"
[ -f "$CONFIG_INFO" ]&&. "$CONFIG_INFO"
CONFIG="${CONFIG:-$HOME/.googledrive.conf}"
unset ROOT_FOLDER CLIENT_ID CLIENT_SECRET REFRESH_TOKEN ACCESS_TOKEN
export API_URL="https://www.googleapis.com"
export API_VERSION="v3" \
SCOPE="$API_URL/auth/drive" \
REDIRECT_URI="http%3A//localhost" \
TOKEN_URL="https://accounts.google.com/o/oauth2/token"
_parse_arguments "_parser_setup_flags" "$@"||return 1
_check_debug
[ -n "$VERBOSE_PROGRESS" ]&&unset VERBOSE&&export CURL_PROGRESS=""
[ -n "$QUIET" ]&&export CURL_PROGRESS="-s"
mkdir -p "$INFO_PATH"||return 1
[ -n "$DELETE_ACCOUNT_NAME" ]&&_delete_account "$DELETE_ACCOUNT_NAME"
[ -n "$LIST_ACCOUNTS" ]&&_all_accounts
[ -z "${INPUT_FILE_1:-${INPUT_ID_1:-$FOLDERNAME}}" ]&&{
[ -z "${DELETE_ACCOUNT_NAME:-${LIST_ACCOUNTS:-$NEW_ACCOUNT_NAME}}" ]&&_short_help
[ -n "${DELETE_ACCOUNT_NAME:-${LIST_ACCOUNTS:-}}" ]&&exit 0
[ -n "$NEW_ACCOUNT_NAME" ]&&CONTINUE_WITH_NO_INPUT="true"
}
[ -z "$CHECK_MODE" ]&&{
case "${SKIP_DUPLICATES:-$OVERWRITE}" in
"Overwrite")export CHECK_MODE="1";;
"Skip Existing")export CHECK_MODE="2";;
*):
esac
}
return 0
}
_setup_traps(){
export SUPPORT_ANSI_ESCAPES TMPFILE ACCESS_TOKEN ACCESS_TOKEN_EXPIRY INITIAL_ACCESS_TOKEN ACCOUNT_NAME CONFIG ACCESS_TOKEN_SERVICE_PID
_cleanup(){
[ -n "$SUPPORT_ANSI_ESCAPES" ]&&printf "\033[?25h\033[?7h"
{
[ -f "${TMPFILE}_ACCESS_TOKEN" ]&&{
. "${TMPFILE}_ACCESS_TOKEN"
[ "$INITIAL_ACCESS_TOKEN" = "$ACCESS_TOKEN" ]||{
_update_config "ACCOUNT_${ACCOUNT_NAME}_ACCESS_TOKEN" "$ACCESS_TOKEN" "$CONFIG"
_update_config "ACCOUNT_${ACCOUNT_NAME}_ACCESS_TOKEN_EXPIRY" "$ACCESS_TOKEN_EXPIRY" "$CONFIG"
}
}||: 1>|/dev/null
[ -n "$ACCESS_TOKEN_SERVICE_PID" ]&&{
token_service_pids="$(ps --ppid="$ACCESS_TOKEN_SERVICE_PID" -o pid=)"
kill "$ACCESS_TOKEN_SERVICE_PID"
}||: 1>|/dev/null
script_children_pids="$(ps --ppid="$MAIN_PID" -o pid=)"
kill $token_service_pids $script_children_pids 1>|/dev/null
rm -f "${TMPFILE:?}"*
export abnormal_exit&&if [ -n "$abnormal_exit" ];then
printf "\n\n%s\n" "Script exited manually."
kill "${_SCRIPT_KILL_SIGNAL:--9}" -$$&
else
{ _cleanup_config "$CONFIG"&&[ "${GUPLOAD_INSTALLED_WITH:-}" = script ]&&_auto_update;} 1>|/dev/null&
fi
} 2>|/dev/null||:
return 0
}
trap 'abnormal_exit="1" ; exit' INT TERM
trap '_cleanup' EXIT
trap '' TSTP
export MAIN_PID="$$"
}
_setup_root_dir(){
export ROOTDIR ROOT_FOLDER ROOT_FOLDER_NAME QUIET ACCOUNT_NAME CONFIG UPDATE_DEFAULT_ROOTDIR
_check_root_id(){
_setup_root_dir_json="$(_drive_info "$(_extract_id "$ROOT_FOLDER")" "id")"
if ! rootid_setup_root_dir="$(printf "%s\n" "$_setup_root_dir_json"|_json_value id 1 1)";then
if printf "%s\n" "$_setup_root_dir_json"|grep "File not found" -q;then
"${QUIET:-_print_center}" "justify" "Given root folder" " ID/URL invalid." "=" 1>&2
else
printf "%s\n" "$_setup_root_dir_json" 1>&2
fi
return 1
fi
ROOT_FOLDER="$rootid_setup_root_dir"
"${1:-:}" "ACCOUNT_${ACCOUNT_NAME}_ROOT_FOLDER" "$ROOT_FOLDER" "$CONFIG"||return 1
return 0
}
_check_root_id_name(){
ROOT_FOLDER_NAME="$(_drive_info "$(_extract_id "$ROOT_FOLDER")" "name"|_json_value name 1 1||:)"
"${1:-:}" "ACCOUNT_${ACCOUNT_NAME}_ROOT_FOLDER_NAME" "$ROOT_FOLDER_NAME" "$CONFIG"||return 1
return 0
}
_set_value indirect ROOT_FOLDER "ACCOUNT_${ACCOUNT_NAME}_ROOT_FOLDER"
_set_value indirect ROOT_FOLDER_NAME "ACCOUNT_${ACCOUNT_NAME}_ROOT_FOLDER_NAME"
if [ -n "${ROOTDIR:-}" ];then
ROOT_FOLDER="$ROOTDIR"&&{ _check_root_id "$UPDATE_DEFAULT_ROOTDIR"||return 1;}&&unset ROOT_FOLDER_NAME
elif [ -z "$ROOT_FOLDER" ];then
{ [ -t 1 ]&&"${QUIET:-_print_center}" "normal" "Enter root folder ID or URL, press enter for default ( root )" " "&&printf -- "-> "&&read -r ROOT_FOLDER&&[ -n "$ROOT_FOLDER" ]&&{ _check_root_id _update_config||return 1;};}||{
ROOT_FOLDER="root"
_update_config "ACCOUNT_${ACCOUNT_NAME}_ROOT_FOLDER" "$ROOT_FOLDER" "$CONFIG"||return 1
}&&printf "\n\n"
elif [ -z "$ROOT_FOLDER_NAME" ];then
_check_root_id_name _update_config||return 1
fi
[ -z "$ROOT_FOLDER_NAME" ]&&{ _check_root_id_name "$UPDATE_DEFAULT_ROOTDIR"||return 1;}
return 0
}
_setup_workspace(){
export FOLDERNAME ROOT_FOLDER ROOT_FOLDER_NAME WORKSPACE_FOLDER_ID WORKSPACE_FOLDER_NAME
if [ -z "$FOLDERNAME" ];then
WORKSPACE_FOLDER_ID="$ROOT_FOLDER"
WORKSPACE_FOLDER_NAME="$ROOT_FOLDER_NAME"
else
while read -r foldername <&4&&{ [ -n "$foldername" ]||continue;};do
WORKSPACE_FOLDER_ID="$(_create_directory "$foldername" "${WORKSPACE_FOLDER_ID:-$ROOT_FOLDER}")"||{ printf "%s\n" "$WORKSPACE_FOLDER_ID" 1>&2&&return 1;}
WORKSPACE_FOLDER_NAME="$(_drive_info "$WORKSPACE_FOLDER_ID" name|_json_value name 1 1)"||{ printf "%s\n" "$WORKSPACE_FOLDER_NAME" 1>&2&&return 1;}
done 4<<EOF
$(_split "$FOLDERNAME" "/")
EOF
fi
return 0
}
_process_arguments(){
export SHARE SHARE_ROLE SHARE_EMAIL HIDE_INFO QUIET SKIP_DUPLICATES OVERWRITE \
WORKSPACE_FOLDER_ID SOURCE_UTILS EXTRA_LOG SKIP_SUBDIRS INCLUDE_FILES EXCLUDE_FILES \
QUIET PARALLEL_UPLOAD VERBOSE VERBOSE_PROGRESS CHECK_MODE DESCRIPTION DESCRIPTION_ALL \
UPLOAD_MODE HIDE_INFO
_share_and_print_link(){
"${SHARE:-:}" "${1:-}" "$SHARE_ROLE" "$SHARE_EMAIL"
[ -z "$HIDE_INFO" ]&&{
_print_center "justify" "DriveLink" "${SHARE:+ (SHARED[$(printf "%.1s" "$SHARE_ROLE")])}" "-"
_support_ansi_escapes&&[ "$((COLUMNS))" -gt 45 ] 2>|/dev/null&&_print_center "normal" '^ ^ ^' ' '
"${QUIET:-_print_center}" "normal" "https://drive.google.com/open?id=${1:-}" " "
}
return 0
}
_SEEN="" index_process_arguments=0
TOTAL_FILE_INPUTS="$((TOTAL_FILE_INPUTS<0?0:TOTAL_FILE_INPUTS))"
until [ "$index_process_arguments" -eq "$TOTAL_FILE_INPUTS" ];do
input=""
_set_value i input "INPUT_FILE_$((index_process_arguments+=1))"
case "$_SEEN" in
*"$input"*)continue;;
*)_SEEN="$_SEEN$input"
esac
if [ -f "$input" ];then
export DESCRIPTION_FILE="$DESCRIPTION"
_print_center "justify" "Given Input" ": FILE" "="
_print_center "justify" "Upload Method" ": ${SKIP_DUPLICATES:-${OVERWRITE:-Create}}" "="&&_newline "\n"
_upload_file_main noparse "$input" "$WORKSPACE_FOLDER_ID"
if [ "${RETURN_STATUS:-}" = 1 ];then
_share_and_print_link "${FILE_ID:-}"
printf "\n"
else
for _ in 1 2;do _clear_line 1;done&&continue
fi
elif [ -d "$input" ];then
input="$(cd "$input"&&pwd)"||return 1
unset EMPTY
export DESCRIPTION_FILE="${DESCRIPTION_ALL+:$DESCRIPTION}"
_print_center "justify" "Given Input" ": FOLDER" "-"
_print_center "justify" "Upload Method" ": ${SKIP_DUPLICATES:-${OVERWRITE:-Create}}" "="&&_newline "\n"
FOLDER_NAME="${input##*/}"&&"$EXTRA_LOG" "justify" "Folder: $FOLDER_NAME" "="
NEXTROOTDIRID="$WORKSPACE_FOLDER_ID"
"$EXTRA_LOG" "justify" "Processing folder.." "-"
[ -z "$SKIP_SUBDIRS" ]&&"$EXTRA_LOG" "justify" "Indexing subfolders.." "-"
DIRNAMES="$(find "$input" -type d -not -empty)"
[ -n "$INCLUDE_FILES" ]&&_tmp_dirnames="$(printf "%s\n" "$DIRNAMES"|grep -E "$INCLUDE_FILES")"&&DIRNAMES="$_tmp_dirnames"
[ -n "$EXCLUDE_FILES" ]&&_tmp_dirnames="$(printf "%s\n" "$DIRNAMES"|grep -Ev "$INCLUDE_FILES")"&&DIRNAMES="$_tmp_dirnames"
NO_OF_FOLDERS="$(($(printf "%s\n" "$DIRNAMES"|wc -l)))"&&NO_OF_SUB_FOLDERS="$((NO_OF_FOLDERS-1))"
[ -z "$SKIP_SUBDIRS" ]&&_clear_line 1
[ "$NO_OF_SUB_FOLDERS" = 0 ]&&SKIP_SUBDIRS="true"
"$EXTRA_LOG" "justify" "Indexing files.." "-"
FILENAMES="$(find "$input" -type f)"
[ -n "$INCLUDE_FILES" ]&&_tmp_filenames="$(printf "%s\n" "$FILENAMES"|grep -E "$EXCLUDE_FILES")"&&FILENAMES="$_tmp_filenames"
[ -n "$EXCLUDE_FILES" ]&&_tmp_filenames="$(printf "%s\n" "$FILENAMES"|grep -Ev "$EXCLUDE_FILES")"&&FILENAMES="$_tmp_filenames"
_clear_line 1
if [ -n "$SKIP_SUBDIRS" ];then
if [ -n "$FILENAMES" ];then
NO_OF_FILES="$(($(printf "%s\n" "$FILENAMES"|wc -l)))"
for _ in 1 2;do _clear_line 1;done
"${QUIET:-_print_center}" "justify" "Folder: $FOLDER_NAME " "| $NO_OF_FILES File(s)" "="&&printf "\n"
"$EXTRA_LOG" "justify" "Creating folder.." "-"
{ ID="$(_create_directory "$input" "$NEXTROOTDIRID")"&&export ID;}||{ "${QUIET:-_print_center}" "normal" "Folder creation failed" "-"&&printf "%s\n\n\n" "$ID" 1>&2&&continue;}
_clear_line 1&&DIRIDS="$ID"
[ -z "${PARALLEL_UPLOAD:-${VERBOSE:-$VERBOSE_PROGRESS}}" ]&&_newline "\n"
_upload_folder "${PARALLEL_UPLOAD:-normal}" noparse "$FILENAMES" "$ID"
[ -n "${PARALLEL_UPLOAD:+${VERBOSE:-$VERBOSE_PROGRESS}}" ]&&_newline "\n\n"
else
for _ in 1 2;do _clear_line 1;done&&EMPTY=1
fi
else
if [ -n "$FILENAMES" ];then
NO_OF_FILES="$(($(printf "%s\n" "$FILENAMES"|wc -l)))"
for _ in 1 2;do _clear_line 1;done
"${QUIET:-_print_center}" "justify" "$FOLDER_NAME " "| $((NO_OF_FILES)) File(s) | $((NO_OF_SUB_FOLDERS)) Sub-folders" "="
_newline "\n"&&"$EXTRA_LOG" "justify" "Creating Folder(s).." "-"&&_newline "\n"
unset status
while read -r dir <&4&&{ [ -n "$dir" ]||continue;};do
[ -n "$status" ]&&__dir="$(_dirname "$dir")"&&__temp="$(printf "%s\n" "$DIRIDS"|grep -F "|:_//_:|$__dir|:_//_:|")"&&NEXTROOTDIRID="${__temp%%"|:_//_:|$__dir|:_//_:|"}"
NEWDIR="${dir##*/}"&&_print_center "justify" "Name: $NEWDIR" "-" 1>&2
ID="$(_create_directory "$NEWDIR" "$NEXTROOTDIRID")"||{ "${QUIET:-_print_center}" "normal" "Folder creation failed" "-"&&printf "%s\n\n\n" "$ID" 1>&2&&continue;}
DIRIDS="$(printf "%b%s|:_//_:|%s|:_//_:|\n" "${DIRIDS:+$DIRIDS\n}" "$ID" "$dir")"
for _ in 1 2;do _clear_line 1 1>&2;done
"$EXTRA_LOG" "justify" "Status" ": $((status+=1)) / $((NO_OF_FOLDERS))" "=" 1>&2
done 4<<EOF
$(printf "%s\n" "$DIRNAMES")
EOF
export DIRIDS
_clear_line 1
_upload_folder "${PARALLEL_UPLOAD:-normal}" parse "$FILENAMES"
[ -n "${PARALLEL_UPLOAD:+${VERBOSE:-$VERBOSE_PROGRESS}}" ]&&_newline "\n\n"
else
for _ in 1 2 3;do _clear_line 1;done&&EMPTY=1
fi
fi
export SUCCESS_STATUS ERROR_STATUS ERROR_FILES
if [ "$EMPTY" != 1 ];then
[ -z "${VERBOSE:-$VERBOSE_PROGRESS}" ]&&for _ in 1 2;do _clear_line 1;done
FOLDER_ID="$(_tmp="$(printf "%s\n" "$DIRIDS"|while read -r line;do printf "%s\n" "$line"&&break;done)"&&printf "%s\n" "${_tmp%%"|:_//_:|"*}")"
[ "$SUCCESS_STATUS" -gt 0 ]&&_share_and_print_link "$FOLDER_ID"
_newline "\n"
[ "$SUCCESS_STATUS" -gt 0 ]&&"${QUIET:-_print_center}" "justify" "Total Files " "Uploaded: $SUCCESS_STATUS" "="
[ "$ERROR_STATUS" -gt 0 ]&&"${QUIET:-_print_center}" "justify" "Total Files " "Failed: $ERROR_STATUS" "="&&{
if [ -t 1 ];then
{ [ "$ERROR_STATUS" -le 25 ]&&printf "%s\n" "$ERROR_FILES";}||{
epoch_time="$(date +'%s')" log_file_name="${0##*/}_${FOLDER_NAME}_$epoch_time.failed"
i=0&&until ! [ -f "$log_file_name" ];do
: $((i+=1))&&log_file_name="${0##*/}_${FOLDER_NAME}_$((epoch_time+i)).failed"
done
printf "%s\n%s\n%s\n\n%s\n%s\n" \
"Folder name: $FOLDER_NAME | Folder ID: $FOLDER_ID" \
"Run this command to retry the failed uploads:" \
" ${0##*/} --skip-duplicates \"$input\" --root-dir \"$NEXTROOTDIRID\" ${SKIP_SUBDIRS:+-s} ${PARALLEL_UPLOAD:+--parallel} ${PARALLEL_UPLOAD:+$NO_OF_PARALLEL_JOBS}" \
"Failed files:" \
"$ERROR_FILES" >>"$log_file_name"
printf "%s\n" "To see the failed files, open \"$log_file_name\""
printf "%s\n" "To retry the failed uploads only, use -d / --skip-duplicates flag. See log file for more help."
}
else
printf "%s\n" "$ERROR_FILES"
fi
}
printf "\n"
else
for _ in 1 2 3;do _clear_line 1;done
"${QUIET:-_print_center}" 'justify' "Empty Folder" ": $FOLDER_NAME" "=" 1>&2
printf "\n"
fi
fi
done
_SEEN="" index_process_arguments=0
TOTAL_ID_INPUTS="$((TOTAL_ID_INPUTS<0?0:TOTAL_ID_INPUTS))"
until [ "$index_process_arguments" -eq "$TOTAL_ID_INPUTS" ];do
gdrive_id=""
_set_value gdrive_id "INPUT_ID_$((index_process_arguments+=1))"
case "$_SEEN" in
*"$gdrive_id"*)continue;;
*)_SEEN="$_SEEN$gdrive_id"
esac
_print_center "justify" "Given Input" ": ID" "="
"$EXTRA_LOG" "justify" "Checking if id exists.." "-"
[ "$CHECK_MODE" = "md5Checksum" ]&¶m="md5Checksum"
json="$(_drive_info "$gdrive_id" "name,mimeType,size${param:+,$param}")"||:
if ! printf "%s\n" "$json"|_json_value code 1 1 2>|/dev/null 1>&2;then
type="$(printf "%s\n" "$json"|_json_value mimeType 1 1||:)"
name="$(printf "%s\n" "$json"|_json_value name 1 1||:)"
size="$(printf "%s\n" "$json"|_json_value size 1 1||:)"
[ "$CHECK_MODE" = "md5Checksum" ]&&md5="$(printf "%s\n" "$json"|_json_value md5Checksum 1 1||:)"
for _ in 1 2;do _clear_line 1;done
case "$type" in
*folder*)export DESCRIPTION_FILE="${DESCRIPTION_ALL+:$DESCRIPTION}"
"${QUIET:-_print_center}" "justify" "Folder not supported." "=" 1>&2&&_newline "\n" 1>&2&&continue
;;
*)export DESCRIPTION_FILE="$DESCRIPTION"
_print_center "justify" "Given Input" ": File ID" "="
_print_center "justify" "Upload Method" ": ${SKIP_DUPLICATES:-${OVERWRITE:-Create}}" "="&&_newline "\n"
_clone_file "${UPLOAD_MODE:-create}" "$gdrive_id" "$WORKSPACE_FOLDER_ID" "$name" "$size" "$md5"||{ for _ in 1 2;do _clear_line 1;done&&continue;}
esac
_share_and_print_link "$FILE_ID"
printf "\n"
else
_clear_line 1
"${QUIET:-_print_center}" "justify" "File ID (${HIDE_INFO:-gdrive_id})" " invalid." "=" 1>&2
printf "\n"
fi
done
return 0
}
_main_helper(){
_setup_arguments "$@"||exit 1
"${SKIP_INTERNET_CHECK:-_check_internet}"||exit 1
TMPFILE="$(command -v mktemp 1>|/dev/null&&mktemp -u)"||TMPFILE="$(pwd)/.$(_t="$(_epoch)"&&printf "%s\n" "$((_t*_t))").tmpfile"
export TMPFILE
_setup_traps
"$EXTRA_LOG" "justify" "Checking credentials.." "-"
{ _check_credentials&&_clear_line 1;}||{ "${QUIET:-_print_center}" "normal" "[ Error: Credentials checking failed ]" "="&&exit 1;}
"${QUIET:-_print_center}" "normal" " Account: $ACCOUNT_NAME " "="
"$EXTRA_LOG" "justify" "Checking root dir.." "-"
{ _setup_root_dir&&_clear_line 1;}||{ "${QUIET:-_print_center}" "normal" "[ Error: Rootdir setup failed ]" "="&&exit 1;}
_print_center "justify" "Root dir properly configured." "="
[ -n "$CONTINUE_WITH_NO_INPUT" ]&&exit 0
"$EXTRA_LOG" "justify" "Checking Workspace Folder.." "-"
{ _setup_workspace&&for _ in 1 2;do _clear_line 1;done;}||{ "${QUIET:-_print_center}" "normal" "[ Error: Workspace setup failed ]" "="&&exit 1;}
_print_center "justify" "Workspace Folder: $WORKSPACE_FOLDER_NAME" "="
"${HIDE_INFO:-_print_center}" "normal" " $WORKSPACE_FOLDER_ID " "-"&&_newline "\n"
START="$(_epoch)"
[ -n "$SUPPORT_ANSI_ESCAPES" ]&&printf "\033[?25l"
_process_arguments
END="$(_epoch)"
DIFF="$((END-START))"
"${QUIET:-_print_center}" "normal" " Time Elapsed: ""$((DIFF/60))"" minute(s) and ""$((DIFF%60))"" seconds. " "="
}
set +a
main(){
[ $# = 0 ]&&{
printf "No valid arguments provided, use -h/--help flag to see usage.\n"
exit 0
}
export _SHELL="sh"
if [ -z "$SELF_SOURCE" ];then
export UTILS_FOLDER="${UTILS_FOLDER:-$PWD}"
export COMMON_PATH="$UTILS_FOLDER/common"
export SOURCE_UTILS=". '$UTILS_FOLDER/sh/common-utils.sh' &&
. '$COMMON_PATH/parser.sh' &&
. '$COMMON_PATH/flags.sh' &&
. '$COMMON_PATH/auth-utils.sh' &&
. '$COMMON_PATH/common-utils.sh' &&
. '$COMMON_PATH/drive-utils.sh' &&
. '$COMMON_PATH/upload-utils.sh'
. '$COMMON_PATH/upload-common.sh'"
else
SCRIPT_PATH="$(cd "$(_dirname "$0")"&&pwd)/${0##*\/}"&&export SCRIPT_PATH
export SOURCE_UTILS="SOURCED_GUPLOAD=true . '$SCRIPT_PATH'"
fi
eval "$SOURCE_UTILS"||{ printf "Error: Unable to source util files.\n"&&exit 1;}
set -o noclobber
export _SCRIPT_KILL_SIGNAL="-9"
_main_helper "$@"||exit 1
}
{ [ -z "$SOURCED_GUPLOAD" ]&&main "$@";}||:
| true |
67798cddb5d0554c7112b0739339b2810f51303d | Shell | seeburger-ag/jbossts | /scripts/hudson/narayana.sh | UTF-8 | 4,864 | 3.1875 | 3 | [] | no_license | if [ -z "${WORKSPACE}" ]; then
echo "UNSET WORKSPACE"
exit -1;
fi
# FOR DEBUGGING SUBSEQUENT ISSUES
free -m
#Make sure no JBoss processes running
for i in `ps -eaf | grep java | grep "standalone*.xml" | grep -v grep | cut -c10-15`; do kill $i; done
#BUILD JBOSSTS
ant -Demma.enabled=false -Dpublican=false $@ jbossall
if [ "$?" != "0" ]; then
exit -1
fi
#BUILD JBOSS-AS
cd ${WORKSPACE}
rm -rf jboss-as
git clone git://github.com/jbosstm/jboss-as.git
if [ "$?" != "0" ]; then
exit -1
fi
cd jboss-as
git checkout -t origin/JBTM_EAP60x_MP
if [ "$?" != "0" ]; then
exit -1
fi
git remote add upstream git://github.com/jbossas/jboss-as.git
git pull --rebase --ff-only upstream 7.1-next
if [ "$?" != "0" ]; then
exit -1
fi
MAVEN_OPTS=-XX:MaxPermSize=256m ./build.sh clean install -DskipTests
if [ "$?" != "0" ]; then
exit -1
fi
#START JBOSS
JBOSS_VERSION=`ls -1 ${WORKSPACE}/jboss-as/build/target | grep jboss-as`
export JBOSS_HOME=${WORKSPACE}/jboss-as/build/target/${JBOSS_VERSION}
cp ${JBOSS_HOME}/docs/examples/configs/standalone-xts.xml ${JBOSS_HOME}/standalone/configuration
$JBOSS_HOME/bin/standalone.sh --server-config=standalone-xts.xml&
sleep 10
#RUN XTS AS INTEGRATION TESTS
cd ${WORKSPACE}/jboss-as/testsuite/integration/xts
mvn test -Pxts.integration.tests.profile
if [ "$?" != "0" ]; then
exit -1
fi
#RUN XTS UNIT TESTS
cd ${WORKSPACE}
cd XTS
if [ "$?" != "0" ]; then
$JBOSS_HOME/bin/jboss-cli.sh --connect command=:shutdown
exit -1
fi
ant -Dpublican=false -Dtesttype=tests-11 -Dsartype=sar-11 install
if [ "$?" != "0" ]; then
$JBOSS_HOME/bin/jboss-cli.sh --connect command=:shutdown
exit -1
fi
cp xts-install/tests/*ear $JBOSS_HOME/standalone/deployments/
if [ "$?" != "0" ]; then
$JBOSS_HOME/bin/jboss-cli.sh --connect command=:shutdown
exit -1
fi
sleep 10
export MYTESTIP_1=localhost
cd xts-install/tests
ant -f run-tests.xml tests-11
if [ "$?" != "0" ]; then
$JBOSS_HOME/bin/jboss-cli.sh --connect command=:shutdown
exit -1
fi
# Check output of Tests
ERRORS=$(cat reports/TEST-* | grep "<testsuite" | grep -v errors=\"0\")
FAILURES=$(cat reports/TEST-* | grep "<testsuite" | grep -v failures=\"0\")
if [ "$ERRORS" != "" -o "$FAILURES" != "" ]; then
echo $FAILURES
echo $ERRORS
echo "Failure(s) and/or error(s) found in XTS unit and/or interop tests. See previous line"
$JBOSS_HOME/bin/jboss-cli.sh --connect command=:shutdown
exit -1
fi
#RUN INTEROP11 TESTS
cd ${WORKSPACE}
cd XTS
cp xts-install/interop-tests/interop11.war $JBOSS_HOME/standalone/deployments/
if [ "$?" != "0" ]; then
$JBOSS_HOME/bin/jboss-cli.sh --connect command=:shutdown
exit -1
fi
sleep 10
cd xts-install/interop-tests
mkdir reports
ant -f run-interop-tests.xml -Dserver.hostname=localhost wstx11-interop-tests
if [ "$?" != "0" ]; then
$JBOSS_HOME/bin/jboss-cli.sh --connect command=:shutdown
exit -1
fi
ERRORS=$(cat reports/Test-* | grep "<testsuite" | grep -v errors=\"0\")
FAILURES=$(cat reports/Test-* | grep "<testsuite" | grep -v failures=\"0\")
if [ "$ERRORS" != "" -o "$FAILURES" != "" ]; then
echo $ERRORS
echo $FAILURES
echo "Failure(s) and/or error(s) found in XTS unit and/or interop tests. See previous line"
$JBOSS_HOME/bin/jboss-cli.sh --connect command=:shutdown
exit -1
fi
#SHUTDOWN JBOSS
$JBOSS_HOME/bin/jboss-cli.sh --connect command=:shutdown
if [ "$?" != "0" ]; then
exit -1
fi
#REMOVE TEST WAR and EAR
rm -f $JBOSS_HOME/standalone/deployments/*war*
if [ "$?" != "0" ]; then
exit -1
fi
rm -f $JBOSS_HOME/standalone/deployments/*ear*
if [ "$?" != "0" ]; then
exit -1
fi
#RUN XTS CRASH RECOVERY TESTS
cd ${WORKSPACE}
cd XTS/sar/tests
ant
if [ "$?" != "0" ]; then
exit -1
fi
cd ${WORKSPACE}
cd XTS/sar/crash-recovery-tests
if [ "$?" != "0" ]; then
exit -1
fi
mvn clean test -Parq -Dorg.jboss.remoting-jmx.timeout=300
if [ "$?" != "0" ]; then
exit -1
fi
java -cp target/classes/ com.arjuna.qa.simplifylogs.SimplifyLogs ./target/log/ ./target/log-simplified
if [ "$?" != "0" ]; then
exit -1
fi
# Compile and run tx-bridge tests
cd ${WORKSPACE}/txbridge
if [ "$?" != "0" ]; then
exit -1
fi
ant dist
if [ "$?" != "0" ]; then
exit -1
fi
cd tests
if [ "$?" != "0" ]; then
exit -1
fi
ant enable-recovery-listener -Djboss.home=$JBOSS_HOME
if [ "$?" != "0" ]; then
exit -1
fi
ant test
if [ "$?" != "0" ]; then
exit -1
fi
#RUN QA TESTS
cd ${WORKSPACE}/qa
if [ "$?" != "0" ]; then
exit -1
fi
[ -z "${MFACTOR+x}" ] || sed -i TaskImpl.properties -e "s/COMMAND_LINE_12=-DCoreEnvironmentBean.timeoutFactor=[0-9]*/COMMAND_LINE_12=-DCoreEnvironmentBean.timeoutFactor=${MFACTOR}/"
sed -i TaskImpl.properties -e "s#^COMMAND_LINE_0=.*#COMMAND_LINE_0=${JAVA_HOME}/bin/java#"
if [ "$?" != "0" ]; then
exit -1
fi
ant -Ddriver.url=file:///home/hudson/dbdrivers get.drivers
if [ "$?" != "0" ]; then
exit -1
fi
ant -f run-tests.xml ci-tests
if [ "$?" != "0" ]; then
exit -1
fi
| true |
0af0e4dbf8603168dbc51844625edb080a9cf902 | Shell | dhenkel92/hcloud-kubernetes | /packer/provisioners/master/master.sh | UTF-8 | 3,679 | 2.609375 | 3 | [] | no_license | #! /bin/bash
set -ex
echo "Using K8s Version: $K8S_VERSION"
# General setup
apt-get update
apt-get dist-upgrade -y
# Setup executables
wget https://storage.googleapis.com/kubernetes-release/release/$K8S_VERSION/bin/linux/amd64/kube-apiserver -O /usr/bin/kube-apiserver
wget https://storage.googleapis.com/kubernetes-release/release/$K8S_VERSION/bin/linux/amd64/kube-controller-manager -O /usr/bin/kube-controller-manager
wget https://storage.googleapis.com/kubernetes-release/release/$K8S_VERSION/bin/linux/amd64/kube-scheduler -O /usr/bin/kube-scheduler
wget https://storage.googleapis.com/kubernetes-release/release/$K8S_VERSION/bin/linux/amd64/kubectl -O /usr/bin/kubectl
chmod +x /usr/bin/kube*
# Setup kube apiserver
cat <<EOF > /etc/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
[Service]
EnvironmentFile=/var/lib/kubernetes/k8s.env
ExecStart=/usr/bin/kube-apiserver \
--enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \
--advertise-address=\${INTERNAL_IP} \
--allow-privileged=true \
--apiserver-count=3 \
--authorization-mode=Node,RBAC \
--bind-address=0.0.0.0 \
--client-ca-file=/var/lib/kubernetes/ca.pem \
--etcd-cafile=/var/lib/kubernetes/ca.pem \
--etcd-certfile=/var/lib/kubernetes/kubernetes.pem \
--etcd-keyfile=/var/lib/kubernetes/kubernetes-key.pem \
--etcd-servers=https://10.0.1.50:2379,https://10.0.1.51:2379,https://10.0.1.52:2379 \
--kubelet-certificate-authority=/var/lib/kubernetes/ca.pem \
--kubelet-client-certificate=/var/lib/kubernetes/kubernetes.pem \
--kubelet-client-key=/var/lib/kubernetes/kubernetes-key.pem \
--kubelet-https=true \
--runtime-config api/all=true \
--service-account-key-file=/var/lib/kubernetes/service-account.pem \
--service-cluster-ip-range=\${CLUSTER_CIDR_RANGE} \
--service-node-port-range=30000-32767 \
--tls-cert-file=/var/lib/kubernetes/kubernetes.pem \
--tls-private-key-file=/var/lib/kubernetes/kubernetes-key.pem \
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
# Setup kube controller manager
cat <<EOF > /etc/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
[Service]
EnvironmentFile=/var/lib/kubernetes/k8s.env
ExecStart=/usr/bin/kube-controller-manager \
--allocate-node-cidrs=false \
--cluster-cidr=\${CLUSTER_CIDR_RANGE} \
--cluster-name=kubernetes \
--cluster-signing-cert-file=/var/lib/kubernetes/ca.pem \
--cluster-signing-key-file=/var/lib/kubernetes/ca-key.pem \
--kubeconfig=/var/lib/kubernetes/kube-controller-manager.kubeconfig \
--leader-elect=true \
--root-ca-file=/var/lib/kubernetes/ca.pem \
--service-account-private-key-file=/var/lib/kubernetes/service-account-key.pem \
--service-cluster-ip-range=\${SERVICE_CLUSTER_CIDR_RANGE} \
--use-service-account-credentials=true \
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
# Setup kube scheduler
cat <<EOF > /etc/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
[Service]
ExecStart=/usr/bin/kube-scheduler \
--leader-elect=true \
--kubeconfig=/var/lib/kubernetes/kube-scheduler.kubeconfig \
--authentication-kubeconfig=/var/lib/kubernetes/kube-scheduler.kubeconfig \
--authorization-kubeconfig=/var/lib/kubernetes/kube-scheduler.kubeconfig \
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
| true |
31d3fe6149bca1fecb4fb8cb56f0fb2b407abf15 | Shell | obsidiansystems/nixpkgs | /pkgs/development/dotnet-modules/python-language-server/updater.sh | UTF-8 | 1,137 | 3.34375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env nix-shell
#!nix-shell -i bash -p gnused jq common-updater-scripts nuget-to-nix dotnet-sdk_3 nix-prefetch-git
set -eo pipefail
cd "$(dirname "${BASH_SOURCE[0]}")"
deps_file="$(realpath ./deps.nix)"
nix-prefetch-git https://github.com/microsoft/python-language-server --quiet > repo_info
new_version="$(jq -r ".date" < repo_info | cut -d"T" -f1)"
new_hash="$(jq -r ".sha256" < repo_info)"
new_rev="$(jq -r ".rev" < repo_info)"
rm repo_info
old_rev="$(sed -nE 's/\s*rev = "(.*)".*/\1/p' ./default.nix)"
if [[ $new_rev == $old_rev ]]; then
echo "Already up to date!"
exit 0
fi
pushd ../../../..
update-source-version python-language-server "$new_version" "$new_hash" --rev="$new_rev"
store_src="$(nix-build -A python-language-server.src --no-out-link)"
src="$(mktemp -d /tmp/pylang-server-src.XXX)"
cp -rT "$store_src" "$src"
chmod -R +w "$src"
pushd "$src"
export DOTNET_NOLOGO=1
export DOTNET_CLI_TELEMETRY_OPTOUT=1
mkdir ./nuget_pkgs
dotnet restore src/LanguageServer/Impl/Microsoft.Python.LanguageServer.csproj --packages ./nuget_pkgs
nuget-to-nix ./nuget_pkgs > "$deps_file"
trap ''
rm -r "$src"
'' EXIT
| true |
41823e6cb1f499fa3b1a38023535db765dc65164 | Shell | openebs/maya | /hack/cstor_volume/test/delete.sh | UTF-8 | 577 | 2.65625 | 3 | [
"Apache-2.0"
] | permissive | #!bin/bash
source ./util.sh
echo -e ${GREEN}REMOVING OPENEBS${NC}
if [ "$POOL_FILE" == "" ]; then
POOL_FILE="openebs-config.yaml"
fi
if [ "$OP_FILE" == "" ]; then
OP_FILE="openebs-operator.yaml"
fi
kubectl delete po -n openebs -l "openebs.io/persistent-volume-claim=openebs-pvc" --force --grace-period=0
kubectlDelete $POOL_FILE
kubectlDelete $OP_FILE
kubectl delete crd castemplates.openebs.io cstorpools.openebs.io cstorvolumereplicas.openebs.io cstorvolumes.openebs.io disks.openebs.io runtasks.openebs.io storagepoolclaims.openebs.io storagepools.openebs.io
| true |
3650ae90780c70a8993094653340c23936d24627 | Shell | baizizai/shell_scripts | /functions/base.sh | UTF-8 | 683 | 3.890625 | 4 | [] | no_license | #!/bin/bash
#这个方法是用来判断执行脚本的用户是否是root
function ifRoot() {
if [ $UID -ne 0 ]
then
echo -e "${RED_COLOR}当前用户为普通用户,需要切换root用户执行该脚本${RES}"
exit 2
fi
}
#这个方法是用来判断上一步执行的正确与否。
function result() {
if [ $? -ne 0 ]
then
echo -e "${RED_COLOR}$1${RES}"
exit 1
else
echo -e "${GREEN_COLOR}$2${RES}"
fi
}
#设置一个锁的函数方法,用来测试自动化脚本是否成功安装过
function lock() {
if [ -f ./lock/$1.lock ]
then
echo -e "${RED_COLOR}$1已经安装过${RES}"
exit 3
else
echo -e "${GREEN_COLOR}$1开始安装${RES}"
fi
}
| true |
39e9a7453604a976f7a550c08f9200e216556a23 | Shell | weitzner/system-automation | /pip/update_pip.sh | UTF-8 | 684 | 3.8125 | 4 | [] | no_license | #!/bin/bash
if [ $(/usr/bin/id -u) -ne 0 ]; then
echo "Must be run as super-user root!"
exit 1
fi
# Edit this to point to "send_email.py"
EMAIL="/path/to/send_email.py"
if [ ! -f $EMAIL ]; then
echo "\"$EMAIL\" not found! Check paths."
exit 1
fi
# Edit this to point to your pip installation
# This can be found by executing "which pip"
PIP="/usr/local/bin/pip"
if [ ! -f $PIP ]; then
echo "\"$PIP\" not found! Check paths."
exit 1
fi
TMPOUTDATED=$(mktemp /tmp/pip.outdated.XXX)
$PIP list --outdated | grep -v bonjour-py &> TMPOUTDATED
if [ -s $TMPOUTDATED ]; then
$EMAIL -s "New pip updates available" -m "`cat $TMPOUTDATED`"
fi
rm -f $TMPOUTDATED
| true |
c92f605083f358ce15268f3bc226b2a7087c165c | Shell | plone/ploneorg.admin | /sysadmin/scripts/media-plone-org-bin-usersync | UTF-8 | 1,006 | 3.546875 | 4 | [] | no_license | #!/bin/bash
set -e
userfile=/srv/antiloop.plone.org/http/dav/users.gpg
autzfile=/srv/svn.plone.org/etc/svn_auth
if [ ! -f $userfile ] ; then
echo User file not preset - aborting >&2
exit 1
fi
if ! gpg -q --verify $userfile ; then
echo Unable to verify PGP signature >&2
exit 2
fi
tmp=$(tempfile)
cat $userfile | gpg -q > $tmp
sql=$(tempfile)
cat <<EOF > $sql
BEGIN;
DELETE FROM passwd;
EOF
cat $tmp | while read user password groups ; do
echo "INSERT INTO passwd (login,password) VALUES ('$user', '$password');" >> $sql
done
echo COMMIT >> $sql
chmod 644 $sql
sudo -u postgres psql -f $sql plone.org
chmod 600 $sql
sed -ne '0,/^### BEGIN AUTOGROUPS/p' $autzfile > $sql
for group in plone collective archetypes ; do
users=$(sed -ne "s/^\([^ ]\+\) [^ ]\+ .*$group.*/\1/p" $tmp)
echo $group=$(echo $users|sed -e 's/ /,/g') >> $sql
done
sed -ne '/^### END AUTOGROUPS/,$p' $autzfile >> $sql
cp -a $autzfile ${autzfile}.old
cat $sql > $autzfile
/etc/init.d/apache2 reload
rm -f $sql $tmp
| true |
d330164e1f63109f4fd014ede66356618748d192 | Shell | CathyGyvv/greenWave-react | /checkFormat.sh | UTF-8 | 442 | 2.65625 | 3 | [] | no_license | #!/bin/bash
# ./node_modules/.bin/standard --verbose | ./node_modules/.bin/snazzy
./node_modules/.bin/standard 'assets/components/**/*.js' 'assets/components/**/*.jsx' 'assets/pages/**/*.js' 'assets/pages/**/*.jsx' 'assets/store/**/*.js' 'assets/store/**/*.jsx' --fix | ./node_modules/.bin/snazzy
if [[ $? -ne 0 ]]; then
echo 'JavaScript Standard Style errors were detected. Aborting commit.'
exit 1
else
echo 'Nothing is error'
fi | true |
e998b29dd434e2257fd6ea90dd1de8e0d643d923 | Shell | gre/morpion-solitaire | /export.sh | UTF-8 | 220 | 2.546875 | 3 | [
"MIT"
] | permissive | tarname="morpion-gaetan.renaudeau.tar.gz"
directory="morpion-gaetan.renaudeau"
rm -rf $directory
mkdir $directory
cp *.c $directory
cp *.h $directory
cp Makefile $directory
tar cvzf $tarname $directory
rm -rf $directory
| true |
633c9f1070164a93a313deead2026ef09c5336ce | Shell | lerra/cloudflare-ipv6-ddns | /update-v6-cloudflare | UTF-8 | 2,957 | 3 | 3 | [] | no_license | #!/bin/bash
#based on https://github.com/billkit/cloudflare-DDNS/blob/bc67f68f09abcecb56ef1cbbb087bd993a7244d9/cloudflare-v6.sh but modified. Tested on ubuntu 18.04 & 20.04
auth_email="user@mail.com"
auth_key="xxxxxxxxxxx"
## your cloudflare account key above
zone_name="domain.com"
record_name="record.domain.com"
rec_type=AAAA
interface=eth0
## calls the ipv6_addr.sh script to return an external IPv6 on defined interface above
content=$(/sbin/ifconfig $eth0 | /bin/grep "inet6" | /usr/bin/awk -F " " '{print $2}' | /usr/bin/awk '{print $1}'|/bin/grep -v fe80)
echo $(date)
echo "Checking $rec_type for $record_name"
zone_id=`curl -s -X GET "https://api.cloudflare.com/client/v4/zones?name=$zone_name" \
-H "X-Auth-Email: $auth_email" \
-H "X-Auth-Key: $auth_key" \
-H "content-Type: application/json" | \
grep -Eo '"id":"[^"]*' | head -1|sed -n 's/"id":"*//p'`
echo "Zone ID: " $zone_id
record_id=`curl -s -X GET "https://api.cloudflare.com/client/v4/zones/$zone_id/dns_records?type=$rec_type&name=$record_name" \
-H "X-Auth-Email: $auth_email" \
-H "X-Auth-Key: $auth_key" \
-H "content-Type: application/json" | \
grep -Eo '"id":"[^"]*' | head -1 | sed -n 's/"id":"*//p'`
echo "Record ID: " $record_id
current_content=`curl -s -X GET "https://api.cloudflare.com/client/v4/zones/$zone_id/dns_records/$record_id" \
-H "X-Auth-Email: $auth_email" \
-H "X-Auth-Key: $auth_key" \
-H "content-Type: application/json" | \
grep -Eo '"content":"[^"]*' | head -1 | sed -n 's/"content":"*//p'`
echo "Current Content: " $current_content
echo "New Content: " $content
if [[ $current_content == $content ]]; then
echo "Content not changed. Exiting."
exit 0
else
echo "Content Changed. Update Cloudflare."
fi
#time to delete and get it agian
echo "will delete and run agian as of bug"
delete_first=`curl -X DELETE "https://api.cloudflare.com/client/v4/zones/$zone_id/dns_records/$record_id" \
-H "X-Auth-Email: $auth_email" \
-H "X-Auth-Key: $auth_key" \
-H "Content-Type: application/json"`
echo "Zone ID: " $zone_id
record_id=`curl -s -X GET "https://api.cloudflare.com/client/v4/zones/$zone_id/dns_records?type=$rec_type&name=$record_name" \
-H "X-Auth-Email: $auth_email" \
-H "X-Auth-Key: $auth_key" \
-H "content-Type: application/json" | \
grep -Eo '"id":"[^"]*' | head -1 | sed -n 's/"id":"*//p'`
echo "Record ID: " $record_id
update=`curl -s -X PUT "https://api.cloudflare.com/client/v4/zones/$zone_id/dns_records/$record_id" \
-H "X-Auth-Email: $auth_email" \
-H "X-Auth-Key: $auth_key" \
-H "content-Type: application/json" \
-d "{\"id\":\"$zone_id\",\"type\":\"$rec_type\",\"name\":\"$record_name\",\"content\":\"$content\"}"`
if [[ $update == *"\"success\":false"* ]]; then
echo "test"
#message = "API UPDATE FAILED. DUMPING RESULTS:\n$update"
#echo "$message"
echo "API UPDATE FAILED. DUMPING RESULTS:\n$update"
exit 1
else
message="$record_type changed to: $content"
echo "$message"
fi
| true |
651e6dc73b52b0583afd9ac2833df6ef8c8d1257 | Shell | xsamurai/steamlink | /docker/files/steamlink.sh | UTF-8 | 3,940 | 4.25 | 4 | [] | no_license | #!/bin/bash
#
# Script to launch the Steam Link app on Raspberry Pi
TOP=$(cd "$(dirname "$0")" && pwd)
function show_message()
{
style=$1
shift
if ! zenity "$style" --no-wrap --text="$*" 2>/dev/null; then
case "$style" in
--error)
title=$"Error"
;;
--warning)
title=$"Warning"
;;
*)
title=$"Note"
;;
esac
# Save the prompt in a temporary file because it can have newlines in it
tmpfile="$(mktemp || echo "/tmp/steam_message.txt")"
echo -e "$*" >"$tmpfile"
if [ "$DISPLAY" = "" ]; then
cat $tmpfile; echo -n 'Press enter to continue: '; read input
else
xterm -T "$title" -e "cat $tmpfile; echo -n 'Press enter to continue: '; read input"
fi
rm -f "$tmpfile"
fi
}
# Check to make sure the hardware is capable of streaming at 1080p60
# Model code information from:
# https://www.raspberrypi.org/documentation/hardware/raspberrypi/revision-codes/README.md
if [ ! -f "$TOP/.ignore_cpuinfo" ]; then
revision=$(cat /proc/cpuinfo | fgrep "Revision" | sed 's/.*: //')
revision=$((16#$revision))
processor=$(($(($revision >> 12)) & 0xf)) # 0: BCM2835, 1: BCM2836, 2: BCM2837
if [ $processor -lt 2 ]; then
show_message --error $"You need to run on a Raspberry Pi 3 or newer - aborting."
exit 1
fi
fi
# Check to make sure the experimental OpenGL driver isn't enabled
if [ ! -f "$TOP/.ignore_kms" ]; then
if egrep '^dtoverlay=vc4-kms-v3d' /boot/config.txt >/dev/null 2>&1; then
show_message --error $"You have dtoverlay=vc4-kms-v3d in /boot/config.txt, which will cause a black screen when starting streaming - aborting.\nTry commenting out that line and rebooting."
exit 1
fi
fi
# Install any additional dependencies, as needed
if [ -z "${STEAMSCRIPT:-}" ]; then
STEAMSCRIPT=/usr/bin/steamlink
fi
STEAMDEPS="$(dirname $STEAMSCRIPT)/steamlinkdeps"
if [ -f "$STEAMDEPS" -a -f "$TOP/steamlinkdeps.txt" ]; then
"$STEAMDEPS" "$TOP/steamlinkdeps.txt"
fi
# Check to make sure the Steam Controller rules are installed
UDEV_RULES_DIR=/lib/udev/rules.d
UDEV_RULES_FILE=60-steam-input.rules
if [ ! -f "$UDEV_RULES_DIR/$UDEV_RULES_FILE" ]; then
title="Updating udev rules"
script="$(mktemp || echo "/tmp/steamlink_copy_udev_rules.sh")"
cat >$script <<__EOF__
echo "Copying Steam Input udev rules into place..."
echo "sudo cp $TOP/udev/rules.d/$UDEV_RULES_FILE $UDEV_RULES_DIR/$UDEV_RULES_FILE && sudo udevadm trigger"
sudo cp $TOP/udev/rules.d/$UDEV_RULES_FILE $UDEV_RULES_DIR/$UDEV_RULES_FILE && sudo udevadm trigger
echo -n "Press return to continue: "
read line
__EOF__
if [ "$DISPLAY" = "" ]; then
/bin/sh $script
elif which lxterminal >/dev/null; then
lxterminal -t "$title" -e /bin/sh $script
# Wait for the script to complete
sleep 3
while ps aux | grep -v grep | grep $script >/dev/null; do
sleep 1
done
elif which xterm >/dev/null; then
xterm -bg white -fg black -T "$title" -e /bin/sh $script
else
/bin/sh $script
fi
rm -f $script
fi
# Set up the temporary directory
export TMPDIR="$TOP/.tmp"
rm -rf "$TMPDIR"
mkdir -p "$TMPDIR"
# Restore the display when we're done
cleanup()
{
qemu-arm ~/.local/share/SteamLink/bin/screenblank -k
}
trap cleanup 2 3 15
# Run the shell application and launch streaming
QT_VERSION=5.12.0
export PATH="$TOP/bin:$PATH"
export QTDIR="$TOP/Qt-$QT_VERSION"
export QT_PLUGIN_PATH="$QTDIR/plugins"
export LD_LIBRARY_PATH="$TOP/lib:$QTDIR/lib:$LD_LIBRARY_PATH"
export SDL_GAMECONTROLLERCONFIG_FILE="${XDG_DATA_HOME:-$HOME/.local/share}/Valve Corporation/SteamLink/controller_map.txt"
if [ "$DISPLAY" = "" ]; then
QPLATFORM="eglfs"
export QT_QPA_EGLFS_FORCE888=1
else
QPLATFORM="xcb"
fi
while true; do
qemu-arm ~/.local/share/SteamLink/bin/shell -platform "$QPLATFORM" "$@"
# See if the shell wanted to launch anything
cmdline_file="$TMPDIR/launch_cmdline.txt"
if [ -f "$cmdline_file" ]; then
cmd=`cat "$cmdline_file"`
eval $cmd
rm -f "$cmdline_file"
else
# We're all done...
break
fi
done
cleanup
| true |
52995ec21e22b09af6a9eb7d0156cf913c1edd6b | Shell | TMcMac/AirBnB_clone_v2 | /0-setup_web_static.sh | UTF-8 | 586 | 2.921875 | 3 | [] | no_license | #!/usr/bin/env bash
# sets up a server for webstatic
sudo apt-get -y update
sudo apt-get -y install nginx
sudo service nginx restart
sudo mkdir -p /data/web_static/shared/
sudo mkdir -p /data/web_static/releases/test/
echo "Test Page for nginx" | sudo tee /data/web_static/releases/test/index.html
sudo ln -fs /data/web_static/current /data/web_static/releases/test/
sudo chown -R ubuntu:ubuntu /data/
sudo sed -i 's|^\tlocation / {|\tlocation /hbnb_static/ {\n\t\talias /data/web_static/current/;\n\t}\n\n\tlocation / {|' /etc/nginx/sites-available/default
sudo service nginx restart
| true |
6c3b88f3be28f35ed954f309a7820fc3a2db50d7 | Shell | RuiGuedes/RCOM1819_T3 | /Trabalho 2/Scripts/tux2.sh | UTF-8 | 483 | 3.21875 | 3 | [] | no_license | #!/bin/bash
workbench=$1
if [ "$#" -ne 1 ]; then
echo "Wrong number of arguments ::tux2.sh <workbench>"
else
/etc/init.d/networking restart
ifconfig eth0 up
ifconfig eth0 172.16.${workbench}1.1/24
ifconfig
route add default gw 172.16.${workbench}1.254
route add -net 172.16.${workbench}0.0/24 gw 172.16.${workbench}1.253
route -n
echo 0 > /proc/sys/net/ipv4/conf/eth0/accept_redirects
echo 0 > /proc/sys/net/ipv4/conf/all/accept_redirects
fi | true |
7d538c1a33bb83911a733766c67814c2747373a1 | Shell | aishee/netsand | /sand | UTF-8 | 6,180 | 4.15625 | 4 | [
"Unlicense"
] | permissive | #!/bin/sh
OGLOB=`set +o | grep glob`
set -f
_abs_dirname() {
(cd `dirname $1` && pwd)
}
EXT=".so.1"
P="`_abs_dirname $0`/"
F=`echo ${P} | sed -e 's#/bin/$#/lib/#'`
case "${P}" in
/usr/bin/)
P=""
;;
*/bin/)
p="$F"
;;
esac
LIB="${P}libnet-sand${EXT}"
FLIB="${P}libnet-sand${EXT}"
_warn() {
cat >&2 <<EOF
sand: $*
EOF
}
_die() {
_warn $*
exit 1
}
_version() {
V=`string "$FLIB" 2>/dev/null | grep 'netsand v' | uniq`
if test "a$V" = "a"; then
unset V
_die "couldn't find netsand version in $FLIB!"
else
echo $V
unset V
fi
}
_help() {
cat <<EOF
Network SandBox - By Aishee Nguyen - BreakTeam
Usage: sand [OPTION]... [--] COMMAND [ARGS]
Prevent connections to blocked addresses in COMMAND.
If no COMMAND is specified but some addresses are configured to be allowed or
blocked, then shell snippets to set the chosen configuration are displayed.
OPTIONS:
-d, --allow-dns Allow connections to DNS nameservers.
-a, --allow=ADDRESS[/BITS][:PORT] Allow connections to ADDRESS[/BITS][:PORT].
-b, --block=ADDRESS[/BITS][:PORT] Prevent connections to ADDRESS[/BITS][:PORT]. BITS is the number of bits in CIDR notation prefix. When BITS is specified, the rule matches the IP range.
-h, --help Print this help message.
-t, --log-target=LOG Where to log. LOG is a comma-separated list that can contain the following values:
- stderr This is the default
- syslog Write to syslog
- file Write to COMMAND.sand file
-p, --log-path=PATH Path for file log.
-l, --log-level=LEVEL What to log. LEVEL can contain one of the following values:
- silent Do not log anything
- error Log errors
- block Log errors and blocked connections
- allow Log errors, blocked and allowed connections
- debug Log everything
-v, --version Print netsand version.
EOF
}
_value() {
echo "$1" | cut -d= -f2
}
_ensure_arg() {
if test "a$2" = "a"; then
_die "missing value for \`$1' argument!"
fi
}
_print_def() {
if eval test "a\$$1" != "a"; then
eval "echo $1=\'\$$1\'"
echo "export $1"
fi
}
_append_env_var() {
_ensure_arg "$2" "$3"
eval $1="\$$1#$3"
}
_append_preload() {
case `uname -s` in
Darwin)
preload=DYLD_INSERT_LIBRARIES
export DYLD_FORCE_FLAT_NAMESPACE=1
;;
*)
preload=LD_PRELOAD
;;
esac
if eval test "a\$$preload" != "a"; then
eval $preload="$LIB:\$$preload"
else
eval $preload="$LIB"
fi
export $preload
}
# Solaris /bin/sh does not understand bitwise operations
# so we rely on tcsh there
_bitwise_or() {
case `uname -s` in
SunOS)
echo "@ a = $1; @ b = $2; @ x = (\$a | \$b); echo \$x" | tcsh -s
;;
*)
echo "$(($1|$2))"
;;
esac
}
_append_log_target() {
_ensure_arg "$1" "$2"
OIFS=$IFS
IFS=','
for target in $2; do
case $target in
stderr)
SAND_LOG_TARGET=`_bitwise_or ${SAND_LOG_TARGET} 1`
;;
syslog)
SAND_LOG_TARGET=`_bitwise_or ${SAND_LOG_TARGET} 2`
;;
file)
SAND_LOG_TARGET=`_bitwise_or ${SAND_LOG_TARGET} 4`
;;
*)
_die "unknown log target \`$target'!"
;;
esac
done
IFS=$OIFS
# local variables are not portable, so unset them manually
unset target
unset OIFS
}
_set_log_path() {
_ensure_arg "$1" "$2"
if test -d "$2"; then
SAND_LOG_PATH="$2"
else
_die "no such directory \`$2'!"
fi
}
_set_log_level() {
_ensure_arg "$1" "$2"
case "$2" in
silent)
SAND_LOG_LEVEL=0
;;
error)
SAND_LOG_LEVEL=1
;;
block)
SAND_LOG_LEVEL=2
;;
allow)
SAND_LOG_LEVEL=3
;;
debug)
SAND_LOG_LEVEL=4
;;
*)
_die "unknown log level \`$2'!"
;;
esac
}
if test "a$SAND_LOG_TARGET" = "a"; then
SAND_LOG_TARGET=1
fi
while test $# -gt 0; do
case "$1" in
-h|--help)
_help
exit 0
;;
-v|--version)
_version
exit 0
;;
-d|--allow-dns)
for n in `grep '^nameserver ' /etc/resolv.conf | cut -d' ' -f2`; do
_append_env_var SAND_ALLOW "$1" "$n:53"
done
unset n
shift
;;
-a)
_append_env_var SAND_ALLOW "$1" "$2"
shift 2
;;
--allow=*)
_append_env_var SAND_ALLOW "$1" "`_value $1`"
shift
;;
-b)
_append_env_var SAND_BLOCK "$1" "$2"
shift 2
;;
--block=*)
_append_env_var SAND_BLOCK "$1" "`_value $1`"
shift
;;
-t)
_append_log_target "$1" "$2"
shift 2
;;
--log-target=*)
_append_log_target "$1" "`_value $1`"
shift
;;
-p)
_set_log_path "$1" "$2"
shift 2
;;
--log-path=*)
_set_log_path "$1" "`_value $1`"
shift
;;
-l)
_set_log_level "$1" "$2"
shift 2
;;
--log-level=*)
_set_log_level "$1" "`_value $1`"
shift
;;
--)
shift
break
;;
-*)
_die "unknown \`$1' argument!"
;;
*)
break
;;
esac
done
if test "a$SAND_ALLOW" != "a"; then
SAND_ALLOW="`echo $SAND_ALLOW | sed -e 's/^#//' -e 's/#/;/g'`"
fi
export SAND_ALLOW
if test "a$SAND_BLOCK" != "a"; then
SAND_BLOCK="`echo $SAND_BLOCK | sed -e 's/^#//' -e 's/#/;/g'`"
fi
export SAND_BLOCK
if test $# -eq 0; then
if test \( "a$SAND_ALLOW" != "a" \) -o \( "a$SAND_BLOCK" != "a" \); then
for v in SAND_ALLOW SAND_BLOCK SAND_LOG_TARGET SAND_LOG_LEVEL SAND_LOG_PATH; do
_print_def "$v"
done
_append_preload
_print_def "$preload"
exit 0
else
_warn "missing command!"
_help >&2
exit 1
fi
fi
unset EXT
unset P
unset F
export SAND_LOG_TARGET
export SAND_LOG_LEVEL
export SAND_LOG_PATH
_append_preload
unset preload
unset LIB
unset FLIB
eval $OGLOB
unset OGLOB
exec "$@"
| true |
4d4ea8fde5df3cf75ae2952351855e714d3dc73d | Shell | rubyforgood/casa | /docker/test | UTF-8 | 167 | 2.59375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -e
# This script runs the entire test suite AND logs the screen output.
DATE=`date +%Y%m%d-%H%M%S-%3N`
docker/test-log 2>&1 | tee log/test-$DATE.log
| true |
7177444bed0cceeaf3073dde33bfdc0ea2b87584 | Shell | eungbean/DCGAN-pytorch-lightning-comet | /docker/jupyter.sh | UTF-8 | 360 | 3.03125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
PROJ_DIR=`dirname $(cd $(dirname $0); pwd)`
source ${PROJ_DIR}/docker/settings.sh
# launch jupyter from container
echo "access jupyter sever"
echo "Jupyter port Host:${JUPYTER_PORT_H} --> Container:8888"
# build docker image from project root directory
cd ${PROJ_DIR} && \
jupyter lab --port ${JUPYTER_PORT} --ip=0.0.0.0 --allow-root --no-browser | true |
91968802eb147f1b24c56f427909043a2de62831 | Shell | grenaud/aLib | /example.sh | UTF-8 | 732 | 2.65625 | 3 | [] | no_license | # Everything plugged together. Best uses as a template.
# Assumes one sample per input file, assumed components have not been
# installed properly...
HG19=/path/to/hg19
TMP=/var/tmp
# this starts a number of workers assuming you run SGE
qsub -N workers -t 1-50 -pe smp 1- \
network-aware-bwa/bwa worker -t \$NSLOTS -h `hostname` -p 6789
for f in "$@" ; do
pipeline/mergeTrimReadsBAM --keepOrig -u -o /dev/stdout "${f}" | \
network-aware-bwa/bwa bam2bam -g ${HG19} -p 6789 -t 0 - \
-f ${f%bam}u.bam__
done
qdel workers
for f in "$@" ; do
samtools sort -o ${f%bam}u.bam ${TMP}/${f} | \
biohazard/dist/build/bam-rmdup/bam-rmdup --keep \
-o ${f%bam}hg19.bam && rm ${f%bam}u.bam
done
| true |
2b3855d1a20d828111cfd581218ac93f0169793d | Shell | vrivellino/clojure-west-2014-demo | /scripts/rm-aws-resources.sh | UTF-8 | 808 | 3.203125 | 3 | [] | no_license | #!/bin/sh
. `dirname $0`/.demo.env || exit $?
echo "!!! WARNING: This will destroy the data in the buckets you specify, as well as DynamoDB tables !!!"
echo
echo -n "Press <ENTER> to Continue ..."
read junk
if [ -n "$SRCBUCKET" ] && aws s3 ls --output text | grep -q "\<$SRCBUCKET$" ; then
set -x
aws s3 rm --recursive s3://$SRCBUCKET/
aws s3 rb s3://$SRCBUCKET
set +x
fi
if [ -n "$LOGBUCKET" ] && aws s3 ls --output text | grep -q "\<$LOGBUCKET$" ; then
set -x
aws s3 rm --recursive s3://$LOGBUCKET/
aws s3 rb s3://$LOGBUCKET
set -x
fi
#for env in staging loadtest qa production; do
for env in staging production; do
set -x
aws dynamodb delete-table --table-name datomic_demo_$env
aws ec2 delete-security-group --group-name "datomic-demo-peer-$env"
set +x
done
rm -f `dirname $0`/.demo.env
| true |
707fc65818a3e007ad7d2bbcc4a74f89a2b21ca4 | Shell | bbitarello/NCV_dir_package | /run_NCV_sge.sh | UTF-8 | 1,620 | 2.75 | 3 | [] | no_license | ######################################################################################################
# Bárbara D Bitarello
# Created: 13.10.2015
# Last modified: 20.09.2016
# Description: this set of commands runs the NCV scan in parallel jobs using SGE (Sun Grid Engine)
# If it optimized for running at the MPI-EVA cluster (Leipzig)
#######################################################################################################
#calculate NCV in 948 parallel jobs (one for every 3Mbp)
###################################
# If cluster uses SGE
##################################
BP=3000
SLIDE=1500
LOGS=/mnt/sequencedb/PopGen/barbara/NCV_dir_package/scratch/logs
#LOGS=~/NCV_dir_package/scratch/logs/
for CHROM in {1..22}; do
INPUT=/mnt/sequencedb/PopGen/barbara/NCV_dir_package/input_data/chr${CHROM}/AC_13pops_chr${CHROM}.hg19.pantro2.Map50_100.TRF.SDs.tsv.gz
CHIMPfd=/mnt/sequencedb/PopGen/barbara/NCV_dir_package/input_data/outgroup_files/fds.chr${CHROM}.hg19_pantro2.Map50_100.TRF.SDs.bed.gz
POSITIONS=/mnt/sequencedb/PopGen/barbara/NCV_dir_package/bins/scan_bin3Mb_chr${CHROM}.pos
NBINS=$(wc -l ${POSITIONS} | cut -f 1 -d ' ') # the number of bins for the chromosome
for i in `seq 1 ${NBINS}`; do
TMPDIR=/mnt/sequencedb/PopGen/barbara/NCV_dir_package/tmp/chr${CHROM}/bin${i}/
POS=$(sed -n ${i}p ${POSITIONS})
qsub -e ${LOGS} -o ${LOGS} /mnt/sequencedb/PopGen/barbara/NCV_dir_package/scripts/run_ncv_allpops_Rscript.sge ${INPUT} ${POS} ${BP} ${SLIDE} ${TMPDIR} ${CHIMPfd} ${i}
done
done
| true |
c7eb0cacccdb1e26a6571036be111271c2d32191 | Shell | mbradds/new_build | /build_system.sh | UTF-8 | 1,745 | 4.1875 | 4 | [] | no_license | #!/bin/bash
# A script to install my software on a clean ubuntu install
function askUserAboutPackage(){
message=$1
pkg=$2
install=$3
read -p "${message}" -n 1 -r
echo # (optional) move to a new line
if [[ $install =~ ^[Yy]$ ]]
then
cmdpath="sh ./install_modules/${pkg}.sh"
logpath="install_logs/installed.txt"
logpathno="install_logs/skipped_install.txt"
else
cmdpath="sh ./uninstall_modules/${pkg}.sh"
logpath="install_logs/skipped_install.txt"
logpathno="install_logs/skipped_uninstall.txt"
fi
if [[ $REPLY =~ ^[Yy]$ ]]
then
$cmdpath
echo $(date -u) $pkg >> $logpath
apt-get update
else
echo $(date -u) $pkg >> $logpathno
fi
}
declare -a packages=("nodejs" "npm" "git" "qgis" "code" "mssql-server" "sqlite3" "chrome" "firefox" "terminator")
declare -a log_files=("already_installed" "installed" "skipped_install" "uninstalled" "skipped_uninstall")
mkdir -p install_logs
for log in "${log_files[@]}"; do #force remove any content in the log files
rm -f ./install_logs/${log}.txt
done
apt-get update && apt-get dist-upgrade
for package in "${packages[@]}"; do
if [ $(dpkg-query -W -f='${Status}' ${package} 2>/dev/null | grep -c "ok installed") -eq 0 ]; then
askUserAboutPackage "$package is not installed. Start install? (y/n)" "${package}" "y"
else
askUserAboutPackage "$package is already installed. Uninstall? (y/n)" "${package}" "n"
fi
done
#Anaconda doesnt appear in default list of packages
askUserAboutPackage "Cant verify if Anaconda is installed. Uninstall? (y/n)" "Anaconda" "y"
askUserAboutPackage "Install Anaconda? (y/n)" "Anaconda" "n"
apt-get dist-upgrade | true |
5c77a66e39ad317b920702b3629a9929230aa9bb | Shell | lisuke/repo | /archlinuxcn/opensnitch-ebpf-module/PKGBUILD | UTF-8 | 1,542 | 3.015625 | 3 | [] | no_license | # Maintainer: Rasmus Moorats <xx+aur@nns.ee>
pkgname=opensnitch-ebpf-module
_pkgname=opensnitch
pkgver=1.6.3
pkgrel=1
pkgdesc="eBPF process monitor module for opensnitch"
arch=('i686' 'x86_64' 'armv6h' 'armv7h' 'aarch64')
url="https://github.com/evilsocket/opensnitch"
license=('GPL3')
makedepends=('bc' 'clang' 'libelf' 'linux-headers' 'llvm')
checkdepends=('llvm')
depends=('opensnitch')
source=("${_pkgname}-${pkgver}.tar.gz::${url}/archive/v${pkgver}.tar.gz")
sha256sums=('4ca735d953d99fe5c7a33cb1115e8f9a81768adcc69d47e1851c463b3cb27561')
options=('!strip') # we're stripping with llvm-strip
build() {
cd "${srcdir}/${_pkgname}-${pkgver}/ebpf_prog"
KDIR="/usr/src/linux"
# we set -fno-stack-protector here to work around a clang regression
# this is fine - bpf programs do not use stack protectors
CLANG="clang -fno-stack-protector" ARCH="$CARCH" KERNEL_DIR="$KDIR" KERNEL_HEADERS="$KDIR" make
llvm-strip -g opensnitch*.o
}
check() {
REQUIRED_SECTIONS=(
kprobe/{tcp_v{4,6}_connect,udp{,v6}_sendmsg,iptunnel_xmit}
maps/{{tcp,udp}{,v6}Map,tcp{,v6}sock,bytes,debug}
)
SECTIONS=$(llvm-readelf \
"${srcdir}/${_pkgname}-${pkgver}/ebpf_prog/opensnitch.o" \
--section-headers)
for section in "${REQUIRED_SECTIONS[@]}"; do
grep -q " ${section}" <<< "$SECTIONS" || {
echo "Failed to build opensnitch.o properly, section ${section} missing!"
return 1
}
done
}
package() {
install -Dm644 "${srcdir}/${_pkgname}-${pkgver}/ebpf_prog/opensnitch"*".o" -t \
"${pkgdir}/usr/lib/opensnitchd/ebpf"
}
| true |
c61d351558348c34c427a87ef9e6205d520d4636 | Shell | vidister/Supernodes | /setup-routes.sh | UTF-8 | 1,017 | 2.953125 | 3 | [] | no_license | #!/bin/sh
#reload sysctl because of https://bugs.launchpad.net/ubuntu/+source/procps/+bug/50093
#TL;DR: sysctl is loaded incompleatly because it's loaded to early in the boot
service procps start
if ! grep -q VPN /etc/iproute2/rt_tables; then
echo 10 VPN >> /etc/iproute2/rt_tables
fi
#remove rule if it exists to prevent filling the table with dublicates
ip rule del iif br-fftr table VPN
ip -6 rule del iif br-fftr table VPN
#Packets from mesh are routet via VPN, not via main uplink
ip rule add iif br-fftr table VPN
ip -6 rule add iif br-fftr table VPN
#route otherwise unroutable IP-Adresses via VPN and not via main uplink
ip -6 rule add from 2001:bf7:fc00::/44 table VPN
ip rule add from 10.172.0.0/16 table VPN
#172.31.240.0/20 is just pushed to the VPN, rest is routed via 172.31.240.1
#ip route add default via 172.31.240.1 dev tun0 table VPN
#ip route add 172.31.240.0/20 dev tun0 table VPN
#enable forwarding
echo 1 > /proc/sys/net/ipv4/ip_forward
echo 1 > /proc/sys/net/ipv6/conf/all/forwarding
| true |
a4fa9bb2b1a7f36a95554d1fde7e1a7642b76e77 | Shell | SpirentOrion/osv | /modules/libcdio/relink-binary.sh | UTF-8 | 343 | 3.15625 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
set -ev
BIN=$1
SRC_DIR=$(dirname "$BIN")/..
SRC_FILE_BASE=$(basename "$BIN" | sed 's/\.so$//')
SRC_FILE=$SRC_FILE_BASE.c
cd $SRC_DIR
touch $SRC_FILE
CMD1=$(make V=1 | grep "\-o .libs/$SRC_FILE_BASE")
CMD2=$(echo $CMD1 | sed -e "s|^libtool: link: ||" -e "s| -o .libs/$SRC_FILE_BASE | -shared -o .libs/$SRC_FILE_BASE.so |")
$CMD2
| true |
c3124749dbbebf2c08ffee74ce9c1ad0c083cbc5 | Shell | Antergos/antergos-packages | /antergos/mate/mate-power-manager/PKGBUILD.inactive | UTF-8 | 1,265 | 2.71875 | 3 | [] | no_license | # Maintainer: Antergos Developers <dev@antergos.com>
# Contributor: Martin Wimpress <code@flexion.org>
pkgname=mate-power-manager
pkgver=1.20.3
pkgrel=1
pkgdesc="Power management tool for the MATE desktop"
url="https://mate-desktop.org"
arch=('x86_64')
license=('GPL')
depends=('dbus-glib' 'gtk3' 'libcanberra' 'libgnome-keyring' 'libnotify' 'upower')
makedepends=('wget' 'docbook2x' 'docbook-xml' 'mate-common' 'mate-panel' 'xmlto' 'yelp-tools')
optdepends=('yelp: for reading MATE help documents')
source=("https://pub.mate-desktop.org/releases/${pkgver::(-2)}/${pkgname}-${pkgver}.tar.xz")
_sha1sums_url="https://pub.mate-desktop.org/releases/${pkgver::(-2)}/SHA1SUMS"
groups=('mate-extra')
sha1sums=($(wget -q "${_sha1sums_url}" && grep "${pkgname}-${pkgver}.tar.xz" SHA1SUMS | cut -f1))
# BEGIN ANTBS METADATA
_autosums='True'
_is_monitored='True'
_monitored_service='mate-desktop'
_monitored_match_pattern='/^\d\.\d[02468]\.\d+$/'
# END ANTBS METADATA
build() {
cd "${srcdir}/${pkgname}-${pkgver}"
./configure \
--prefix=/usr \
--libexecdir=/usr/lib/${pkgname} \
--sysconfdir=/etc \
--localstatedir=/var \
--sbindir=/usr/bin \
--enable-applets \
--disable-strict
make
}
package() {
cd "${srcdir}/${pkgname}-${pkgver}"
make DESTDIR="${pkgdir}" install
}
# -*- mode: bash;-*-
| true |
3d4382bfe736a3178d3707b3f0120584a422e839 | Shell | wetransform-os/dockup | /test-cron.sh | UTF-8 | 802 | 3.625 | 4 | [] | no_license | #!/bin/bash
#
# Simple test script for backup with cron.
#
# Before running it, ensure there is a file test-env.txt
# with configuration options as in test-env.txt.sample
#
# Doesn't use encryption
set -e
# build dockup image
docker build -t wetransform/dockup:local .
# create data container
docker rm -v dockup-data-test
docker create --name dockup-data-test -v /data busybox
# create dummy file to backup
file_time=`date +%Y-%m-%d\\ %H:%M:%S\\ %Z`
echo "File created at $file_time" > tmpBackup.txt
docker cp tmpBackup.txt dockup-data-test:/data/file.txt
# backup
exec docker run --rm -it \
--env-file test-env.txt \
-e BACKUP_NAME=dockup-test \
-e PATHS_TO_BACKUP=auto \
-e CRON_TIME="* * * * *" \
--volumes-from dockup-data-test \
--name dockup-run-test wetransform/dockup:local
| true |
7230d8f647fe0b1b5a0b4a74166fc5c77b3fbd58 | Shell | xiaeryu/LeetCode | /wordFrequency.sh | UTF-8 | 464 | 3.03125 | 3 | [] | no_license | ################################################################################
## LeetCode Number 192. Word Frequency
## Write a bash script to calculate the frequency of each word in a text file words.txt
## The problem is elaborated here: https://leetcode.com/problems/word-frequency/
################################################################################
sed 's/[[:space:]]\+/\n/g' words.txt | sort | uniq -c | sort -nrk1 | awk '{print $2 " " $1}'
| true |
9e7579685f6d02ebde0c195dd4ad7436e2ce4e24 | Shell | ODEX-TOS/packages | /docbook-xsl/trunk/PKGBUILD | UTF-8 | 1,667 | 2.609375 | 3 | [
"GPL-1.0-or-later",
"MIT"
] | permissive | # Maintainer: Antonio Rojas <arojas@archlinux.org>
# Contributor: Tom Gundersen <teg@jklm.no>
# Contributor: Jan de Groot <jgc@archlinux.org>
# Contributor: Sean Middleditch <elanthis@awesomeplay.com>
# Contributor: Daniel J Griffiths <ghost1227@archlinux.us>
pkgname=docbook-xsl
pkgver=1.79.2
pkgrel=7
pkgdesc='XML stylesheets for Docbook-xml transformations'
url='https://docbook.org/'
arch=(any)
license=(custom)
install=$pkgname.install
source=(https://github.com/docbook/xslt10-stylesheets/releases/download/release%2F$pkgver/docbook-xsl{,-nons}-$pkgver.tar.gz
765567_non-recursive_string_subst.patch)
sha256sums=('966188d7c05fc76eaca115a55893e643dd01a3486f6368733c9ad974fcee7a26'
'f89425b44e48aad24319a2f0d38e0cb6059fdc7dbaf31787c8346c748175ca8e'
'193ec26dcb37bdf12037ed4ea98d68bd550500c8e96b719685d76d7096c3f9b3')
prepare() {
cd $pkgname-$pkgver
patch -Np2 -i ../765567_non-recursive_string_subst.patch
cd ../$pkgname-nons-$pkgver
patch -Np2 -i ../765567_non-recursive_string_subst.patch
}
package() {
depends=(libxml2 libxslt docbook-xml)
local pkgroot ns dir
for ns in -nons ''; do
pkgroot="$pkgdir/usr/share/xml/docbook/xsl-stylesheets-$pkgver$ns"
dir=$pkgname$ns-$pkgver
install -Dt "$pkgroot" -m644 $dir/VERSION{,.xsl}
(
shopt -s nullglob # ignore missing files
for fn in assembly common eclipse epub epub3 fo highlighting html \
htmlhelp javahelp lib manpages params profiling roundtrip template \
website xhtml xhtml-1_1 xhtml5
do
install -Dt "$pkgroot/$fn" -m644 $dir/$fn/*.{xml,xsl,dtd,ent}
done
)
done
install -d "$pkgdir/etc/xml"
install -Dt "$pkgdir/usr/share/licenses/$pkgname" -m644 $dir/COPYING
}
# vim:set sw=2 et:
| true |
3b40ce2cd6c88f72d9a6ff31ab583874af7b2316 | Shell | danzhu/dotfiles | /zsh/prompt.zsh | UTF-8 | 3,003 | 3.46875 | 3 | [] | no_license | function prompt-grab() {
echo "$1" | sed -nE "s/^$2\$/\\1/p"
}
function prompt-diverge() {
local count="$(prompt-grab "$1" "$2")"
if [[ $count -ne 0 ]]; then
print -nP " $3$count%f"
fi
}
function prompt-count() {
local count
if count="$(echo "$1" | grep -cE "$2")"; then
print -nP " $3$count%f"
fi
}
function prompt-precmd() {
local code=$?
emulate -L zsh
setopt glob_subst
print ''
# exit code
if [[ $code -ne 0 ]]; then
print -P "%1F%K %k %B$code%b%f"
fi
# host
print -nP '%B%K'
if [[ $UID -eq 0 ]]; then
# root
print -nP ' %1F%m%f'
# elif [[ -n "$SSH_CONNECTION" ]]; then
# # ssh
# print -nP ' %m'
fi
print -nP ' %k '
# path
local dir olddir
local i=1
while dir="$(print -P "%-$i~")" && [[ "$dir" != "$olddir" ]]; do
if [[ ! -r $dir ]]; then
# non-readable
print -nP '%1F'
elif [[ ! -w $dir ]]; then
# non-writable
print -nP '%3F'
elif [[ -L $dir ]]; then
# symlink
print -nP '%6F'
else
# regular
print -nP '%4F'
fi
# print segment
print -nP "${dir[${#olddir} + 1,${#dir}]}%f"
i=$((i + 1))
olddir="$dir"
done
# git prompt
local gs
if gs="$(git status --porcelain=v2 --branch --untracked-files=all \
2> /dev/null)"; then
local branch="$(prompt-grab "$gs" '# branch\.head (.*)')"
print -nP ' %K '
if [[ "$branch" = '(detached)' ]]; then
# detached head
branch="$(git describe --contains --all HEAD)"
print -nP '%3F'
elif [[ -f "$(git rev-parse --git-dir)/MERGE_HEAD" ]]; then
# merging
print -nP '%1F'
elif echo "$gs" | grep -qE '^# branch\.upstream .*$'; then
# has upstream
print -nP '%5F'
else
# local branch
print -nP '%2F'
fi
print -nP "$branch%f"
local tag
if tag="$(git describe --tags --exact-match 2> /dev/null)"; then
print -nP " #$tag"
fi
# ahead
prompt-diverge "$gs" '# branch\.ab \+(.*) -.*' '%2FA'
# behind
prompt-diverge "$gs" '# branch\.ab \+.* -(.*)' '%3FB'
print -nP ' %k'
# added
prompt-count "$gs" '^[12] [MADRCU].' '%2F+'
# changed
prompt-count "$gs" '^[12] .[MADRCU]' '%3F~'
# unmerged
prompt-count "$gs" '^u' '%1F!'
# untracked
prompt-count "$gs" '^\?' '%4F?'
# stashes
local stash="$(git stash list | wc -l)"
if [[ $stash -ne 0 ]]; then
print -nP " %5F&$stash%f"
fi
fi
print -P '%b'
}
autoload -Uz add-zsh-hook
add-zsh-hook precmd prompt-precmd
setopt noprompt_sp
# setopt promptsubst
# setopt transient_rprompt
PS1='%K %k '
PS2='%K%7F+%f%k '
| true |
908c4a1394b0d02026f0861e70984e172b4170fd | Shell | emasiebens92/snippets | /sabayon/basic_package_manager_commands.sh | UTF-8 | 2,692 | 3.21875 | 3 | [] | no_license | # Entropy is the name of the Sabayon Linux binary package management system.
# This is the name for the complete infrastructure, composed by
# Equo client (textual), Sulfur client (graphical), Reagent and Activator server applications.
# sabayon is an overlay in gentoo so at the heart of sabayon is portage
# /etc/portage/package.use - use flags for specific packages eg. dev-util/git bash-completion curl -perl gtk -cgi
# /etc/portage/package.keywords - spesify stable or unstable packages eg. app-admin/equo ~amd64
# /etc/portage/package.mask - specify packages that maybreak the system
# /etc/portage/package.unmask - specify packages that maybreak the system
# /etc/portage/ patage use flags - /
emerge --sync && layman -S # sync packages with portage and update all overlays (in this case sabayon)
layman --sync sabayon # syncs only sabayon
equo update # Update the Equo Database
equo install openssh # update the database to the latest version and installs openssh
# I am comfortable in emerge and gentoo. While sabayon is just a gentoo overlay,
# sulfur, equo, and emerge may return diffrent updates and ebuilds, so as a rule of thumb,
# I try sulfur first then equo and then emerge to install packages depending on the package
# for simple packages and tools I just emerge them (such as git, subversion, etc.) but for
# more system dependent tools (such as gcc, eix etc.) I use sulfur and equo
# install some ebuild
emerge -pv some_package
# uses X for that one emerge only (to add permanently add to /etc/portage/package.use)
USE=X emerge openssh
# install ebuild but do not store in world
emerge --oneshot git
emege -upD world # updates all packages in world as well as all dependencies
emege -up system # updates all packages in system
# fix missing, broken, or unneeded dependencies
revdep-rebuild
# need to do some research on this tool, not sure what to use it for yet
portageq
# eix
# Eix uses a cache which must be updated before eix will reflect any changes to the package tree
eix-update
# Eix provides a handy command that will run an emerge --sync and then update the eix cache
eix-sync
eix -C app-portage emer # search for package within a given category
eix -I git # search for only installed packages
eix -S description # search packages description
# Eix also has the handy ability to search for packages in a selection of overlays without having those overlays installed.
eix-remote update # update the remote overlay cache
# Now search for packages as normal and you'll also see items from the selection of overlays listed
# The overlay where the package or specific version is available will be denoted with a number in brackets
| true |
aca7371ee79075803196e4bbc0598820415218a8 | Shell | theshteves/dotfiles | /.bash_profile | UTF-8 | 3,572 | 3.046875 | 3 | [] | no_license | ##
# .bash_profile
#
# Author: Steven Kneiser
# Created: 02/06/2015
# Last updated: 06/16/2023
# Halt script if shell isn't interactive
if [ -z "$PS1" ]; then
return
fi
if [[ ! $SHELL =~ bash ]]; then
echo "What? No bash??\nYou stuck me in [${SHELL}]?!!\nNo dotfiles for you."
return
fi
# Print a cool session header, reminding me that I'm home
WELCOME_MSG=$(cat <<-TEMP_MULTILINE_STRING
██████ ██ ▄█▀ ███▄ █ ▓█████ ██▓ ██████ ▓█████ ██▀███
▒██ ▒ ██▄█▒ ██ ▀█ █ ▓█ ▀ ▓██▒▒██ ▒ ▓█ ▀ ▓██ ▒ ██▒
░ ▓██▄ ▓███▄░ ▓██ ▀█ ██▒▒███ ▒██▒░ ▓██▄ ▒███ ▓██ ░▄█ ▒
▒ ██▒▓██ █▄ ▓██▒ ▐▌██▒▒▓█ ▄ ░██░ ▒ ██▒▒▓█ ▄ ▒██▀▀█▄
▒██████▒▒▒██▒ █▄▒██░ ▓██░░▒████▒░██░▒██████▒▒░▒████▒░██▓ ▒██▒
▒ ▒▓▒ ▒ ░▒ ▒▒ ▓▒░ ▒░ ▒ ▒ ░░ ▒░ ░░▓ ▒ ▒▓▒ ▒ ░░░ ▒░ ░░ ▒▓ ░▒▓░
░ ░▒ ░ ░░ ░▒ ▒░░ ░░ ░ ▒░ ░ ░ ░ ▒ ░░ ░▒ ░ ░ ░ ░ ░ ░▒ ░ ▒░
░ ░ ░ ░ ░░ ░ ░ ░ ░ ░ ▒ ░░ ░ ░ ░ ░░ ░
░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░
.
TEMP_MULTILINE_STRING
)
case "$OSTYPE" in
darwin*)
echo "$WELCOME_MSG $(fortune -s | cowsay -f dragon)" | lolcat -a -d 1 &
;;
*)
echo "$WELCOME_MSG" &
;;
esac
# The "&" forks off into a new process. The forced 1-second delay covers up the setup execution below more seamlessly
# My personal shortcuts
if [ -f ~/.bashrc ]; then
source ~/.bashrc
fi
# "We program for the world." --Charles Owen
export LANGUAGE="en_US.UTF-8"
export LANG="en_US.UTF-8"
export LC_ALL="en_US.UTF-8"
export LC_CTYPE="en_US.UTF-8"
# General customizations
export BASH_SILENCE_DEPRECATION_WARNING=1 # Apple can't make me use zsh >:]
export EDITOR="vim"
# Less file-viewer customizations
export LESS_TERMCAP_mb=$'\E[01;31m' # begin blinking
export LESS_TERMCAP_md=$'\E[01;31m' # begin bold
export LESS_TERMCAP_me=$'\E[0m' # end mode
export LESS_TERMCAP_se=$'\E[0m' # end standout-mode
export LESS_TERMCAP_so=$'\E[01;44;33m' # begin standout-mode - info box
export LESS_TERMCAP_ue=$'\E[0m' # end underline
export LESS_TERMCAP_us=$'\E[01;32m' # begin underline
# Construct my unparalleled prompt-string
if [ -f "/usr/local/opt/bash-git-prompt/share/gitprompt.sh" ]; then
__GIT_PROMPT_DIR="/usr/local/opt/bash-git-prompt/share"
source "/usr/local/opt/bash-git-prompt/share/gitprompt.sh"
export GIT_PROMPT_START="\`if [ \$? = 0 ]; then echo \[\e[33m\]^_^\[\e[0m\]; else echo \[\e[31m\]O_O\[\e[0m\]; fi\` \[\033[0;32m\]\u \[\033[0m\]\w\[\033[m\]"
export GIT_PROMPT_END="\[\033[0;32m\]\$ \[\033[0m\]"
export GIT_PROMPT_THEME=Solarized
else
export PS1="\`if [ \$? = 0 ]; then echo \[\e[33m\]^_^\[\e[0m\]; else echo \[\e[31m\]O_O\[\e[0m\]; fi\` \[\033[0;32m\]\u \[\033[0m\]\w\[\033[m\]\[\033[0;32m\]\$ \[\033[0m\]"
fi
case "$OSTYPE" in
darwin*)
if [ -f $(brew --prefix)/etc/bash_completion ]; then
. $(brew --prefix)/etc/bash_completion
fi
;;
esac
wait # Wait for Welcome Message subprocess
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.