blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
81181f44e09aa4e1ad8ceee0ac69dde22a92f016 | Shell | msys2/MINGW-packages | /mingw-w64-emacs/PKGBUILD | UTF-8 | 4,025 | 2.78125 | 3 | [
"BSD-3-Clause"
] | permissive | # Maintainer: Haroogan <Haroogan@gmail.com>
# Maintainer: Oscar Fuentes <ofv@wanadoo.es>
_enable_jit=$([[ "${MINGW_PREFIX}" =~ /clang.* ]] || echo yes)
_sanity_check=$([[ "${MINGW_PREFIX}" =~ /clang.* ]] || [[ "${MINGW_PREFIX}" =~ /ucrt.* ]] || echo yes)
_realname=emacs
pkgbase=mingw-w64-${_realname}
pkgname="${MINGW_PACKAGE_PREFIX}-${_realname}"
pkgver=29.1
pkgrel=2
pkgdesc="The extensible, customizable, self-documenting, real-time display editor (mingw-w64)"
url="https://www.gnu.org/software/${_realname}/"
license=('spdx:GPL-3.0')
arch=('any')
mingw_arch=('mingw32' 'mingw64' 'ucrt64' 'clang64' 'clang32')
depends=("${MINGW_PACKAGE_PREFIX}-universal-ctags"
$([[ "$_enable_jit" == "yes" ]] && echo "${MINGW_PACKAGE_PREFIX}-libgccjit")
"${MINGW_PACKAGE_PREFIX}-zlib"
"${MINGW_PACKAGE_PREFIX}-xpm-nox"
"${MINGW_PACKAGE_PREFIX}-freetype"
"${MINGW_PACKAGE_PREFIX}-harfbuzz"
"${MINGW_PACKAGE_PREFIX}-jansson"
"${MINGW_PACKAGE_PREFIX}-gnutls"
"${MINGW_PACKAGE_PREFIX}-tree-sitter"
"${MINGW_PACKAGE_PREFIX}-libwinpthread")
optdepends=("${MINGW_PACKAGE_PREFIX}-giflib"
"${MINGW_PACKAGE_PREFIX}-libjpeg-turbo"
"${MINGW_PACKAGE_PREFIX}-libpng"
"${MINGW_PACKAGE_PREFIX}-librsvg"
"${MINGW_PACKAGE_PREFIX}-libtiff"
"${MINGW_PACKAGE_PREFIX}-libxml2")
makedepends=("${MINGW_PACKAGE_PREFIX}-cc"
"${MINGW_PACKAGE_PREFIX}-autotools"
"texinfo"
"patch"
"git"
"${optdepends[@]}")
# Don't zip info files because the built-in info reader uses gzip to
# decompress them. gzip is not available as a mingw binary.
options=('strip' '!zipman')
source=("https://ftp.gnu.org/gnu/${_realname}/${_realname}-${pkgver}.tar.xz"{,.sig}
"001-ucrt.patch"
"002-clang-fixes.patch")
# source=("https://alpha.gnu.org/gnu/${_realname}/pretest/${_realname}-${pkgver}.tar.xz"{,.sig})
sha256sums=('d2f881a5cc231e2f5a03e86f4584b0438f83edd7598a09d24a21bd8d003e2e01'
'SKIP'
'e1347064ec30094e21679764f784fa7557738946485359041473e6e9d7f3c3dc'
'6f3a3260d8fd6c1fbeafd0611f604c46799005dc776af076bff1fd4d8a3b6304')
validpgpkeys=('28D3BED851FDF3AB57FEF93C233587A47C207910'
'17E90D521672C04631B1183EE78DAE0F3115E06B'
'E6C9029C363AD41D787A8EBB91C1262F01EB8D39'
'CEA1DE21AB108493CC9C65742E82323B8F4353EE')
prepare() {
cd "${_realname}-${pkgver}"
patch -Np1 -i "${srcdir}/001-ucrt.patch"
patch -Np1 -i "${srcdir}/002-clang-fixes.patch"
./autogen.sh
}
build() {
[[ -d "build-${MSYSTEM}" ]] && rm -rf "build-${MSYSTEM}"
mkdir -p "build-${MSYSTEM}" && cd "build-${MSYSTEM}"
_extra_cfg=""
if [[ "$_enable_jit" == "yes" ]] ; then
_extra_cfg="$_extra_cfg --with-native-compilation"
fi
# Required for nanosleep with clang
export LDFLAGS="${LDFLAGS} -lpthread"
# -D_FORTIFY_SOURCE breaks build
CFLAGS=${CFLAGS//"-Wp,-D_FORTIFY_SOURCE=2"}
# -foptimize-sibling-calls breaks native compilation (GCC 13.1)
CFLAGS+=" -fno-optimize-sibling-calls"
../${_realname}-${pkgver}/configure \
--prefix="${MINGW_PREFIX}" \
--host="${MINGW_CHOST}" \
--build="${MINGW_CHOST}" \
--with-modules \
--without-dbus \
--without-compress-install \
--with-tree-sitter \
$_extra_cfg
# --without-compress-install is needed because we don't have gzip in
# the mingw binaries and it is also required by native compilation.
# 001-ucrt.patch breaks stdout, causing make sanity-check to fail
if [[ "$_sanity_check" == "yes" ]] ; then
make
else
make actual-all
fi
}
package() {
cd "build-${MSYSTEM}"
make DESTDIR="${pkgdir}" install
rm -f "${pkgdir}${MINGW_PREFIX}/bin/ctags.exe"
rm -f "${pkgdir}${MINGW_PREFIX}/share/man/man1/ctags.1.gz"
local dir="${pkgdir}${MINGW_PREFIX}/share/${_realname}"
dir="${dir}/$(ls -1 ${dir} | grep -E '([0-9]+\.[0-9]+)(\.[0-9]+)?')/src"
mkdir -p "${dir}"
cd "${srcdir}/${_realname}-${pkgver}/src"
cp *.c *.h *.m "${dir}"
}
# TODO:
# Patch `shell-file-name' default in the C source code similarly to
# `source-directory'.
| true |
8d3cc857c80ded0e7025738838c1e8f0bbdf9e58 | Shell | discoposse/devops-2.0 | /provisioners/scripts/centos/install_centos7_jenkins.sh | UTF-8 | 1,035 | 3.375 | 3 | [
"MIT"
] | permissive | #!/bin/sh -eux
# install jenkins on centos linux 7.x.
# install jenkins platform. -----------------------------------------------------
wget --no-verbose --output-document /etc/yum.repos.d/jenkins.repo http://pkg.jenkins.io/redhat-stable/jenkins.repo
rpm --import http://pkg.jenkins.io/redhat-stable/jenkins.io.key
yum -y install jenkins
# modify jenkins configuration. ------------------------------------------------
jenkinsconfig="/etc/sysconfig/jenkins"
# check if jenkins config file exists.
if [ -f "${jenkinsconfig}" ]; then
cp -p ${jenkinsconfig} ${jenkinsconfig}.orig
# modify jenkins config file.
sed -i -e '/^JENKINS_JAVA_CMD/s/^.*$/JENKINS_JAVA_CMD="\/usr\/local\/java\/jdk180\/bin\/java"/' ${jenkinsconfig}
sed -i -e '/^JENKINS_PORT/s/^.*$/JENKINS_PORT="9080"/' ${jenkinsconfig}
fi
# start and enable jenkins daemon. ---------------------------------------------
systemctl start jenkins
systemctl enable jenkins
# display network configuration. -----------------------------------------------
#ip addr show
| true |
5f7f7a33e57b48160a5d7c96251a4f9da12531ab | Shell | sibis-platform/sibispy | /cmds/run_all_tests | UTF-8 | 1,269 | 3.0625 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
SRC_DIR=`dirname "$0"`
TEST_DIR=`cd $SRC_DIR; echo $PWD | rev | cut -d'/' -f2- | rev`/tests
CFG_FILE=~/.sibis-general-config.yml
LEGACY_TESTS=(test_utils.py test_config_file_parser.py test_redcap_to_casesdir.py test_redcap_locking_data.py test_redcap_compute_summary_scores.py)
MODERN_TESTS=(test_post_issues_to_github.py test_sibislogger.py test_xnat_util.py test_session_valuedictkey.py test_session.py test_sibis_svn_util.py test_sibis_email.py test_check_dti_gradients.py)
logFile=`mktemp`
for TEST in ${MODERN_TESTS[@]}; do
echo " "
echo " "
CMD="pytest -v --config-file=${CFG_FILE} ${TEST_DIR}/${TEST}"
echo "========================="
echo "==== $CMD"
echo "========================="
$CMD | tee -a $logFile
done
echo " "
echo " "
echo "========================="
echo "========================="
echo "========================="
echo "Failed pytests:"
grep FAILED $logFile
rm $logFile
echo "========================="
echo "========================="
echo "========================="
for TEST in ${LEGACY_TESTS[@]}; do
echo " "
echo " "
CMD="${TEST_DIR}/$TEST ${CFG_FILE}"
echo "========================="
echo "==== LEGACY ${TEST}"
echo "==== $CMD"
echo "========================="
$CMD
done
| true |
961b8c85fa43e8654f8cf7518c6fb448e7407688 | Shell | alisher-matkurbanov/simplepayments | /prestart.sh | UTF-8 | 187 | 2.640625 | 3 | [] | no_license | # prestart.sh
echo "Waiting for postgres connection"
while ! nc -z db 5432; do
sleep 0.1
done
echo "PostgreSQL started"
# apply migrations
cd ./app
alembic upgrade head
exec "$@" | true |
cc615eddaf38ea09ec685a19e42eb5d727e13f1e | Shell | NicholasShatokhin/xlockmore-for-13.04 | /5.31/xlockmore-5.31/debian/patches/06_novtlock.dpatch | UTF-8 | 1,134 | 2.640625 | 3 | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-mit-old-style",
"xlock",
"LicenseRef-scancode-other-permissive"
] | permissive | #! /bin/sh /usr/share/dpatch/dpatch-run
## 06_novtlock.dpatch by Michael Stone <mstone@debian.org>
##
## All lines beginning with `## DP:' are a description of the patch.
## DP: remove references to vtlock in man page
@DPATCH@
diff -urNad xlockmore-5.27~/xlock/xlock.man xlockmore-5.27/xlock/xlock.man
--- xlockmore-5.27~/xlock/xlock.man 2009-02-22 14:15:56.959932486 -0500
+++ xlockmore-5.31/xlock/xlock.man 2009-02-22 14:17:01.951932525 -0500
@@ -60,9 +60,6 @@
.BI -/+allowaccess
]
[
-.BI \-vtlock " modename"
-]
-[
.BI -/+nolock
]
[
@@ -1169,23 +1166,6 @@
.B xlock
is killed -KILL, the access control list is not lost.
.TP 5
-.BI \-vtlock \ modename
-This option is used on a XFree86 system to manage VT switching in
-[off|noswitch|switch|restore] mode.
-.TP 10
-.I off
-means no VT switch locking.
-.TP 10
-.I switch
-means VT switch locking + switching to xlock VT when activated.
-.TP 10
-.I restore
-means VT switch locking + switching to xlock VT when activated +
-switching back to previous VT when desactivated.
-.TP 10
-.I noswitch
-means VT switch locking only when xlock VT is active.
-.TP 5
.B -/+allowroot
The
.I allowroot
| true |
2f908a4a5b579795fb697876ccbd43c781577f75 | Shell | dballesteros7/ASL-2013 | /FTaB/tools/amazon_tools/initialize_machines.sh | UTF-8 | 554 | 2.859375 | 3 | [] | no_license | #!/bin/bash
DB_INSTANCE="ec2-54-194-13-218.eu-west-1.compute.amazonaws.com"
SERVER_A="ec2-54-194-35-0.eu-west-1.compute.amazonaws.com"
SERVER_B="ec2-54-194-22-131.eu-west-1.compute.amazonaws.com"
CLIENT_A="ec2-54-194-13-53.eu-west-1.compute.amazonaws.com"
# Delete everything on the servers
for server in $DB_INSTANCE $SERVER_A $SERVER_B $CLIENT_A
do
echo $server
ssh -t $server "mkdir /home/ec2-user/instance_store; sudo mount -t ext3 /dev/xvdf /home/ec2-user/instance_store/;sudo chown -R ec2-user:ec2-user /home/ec2-user/instance_store"
done
| true |
2b1d3d0c0da0b92eb7c319cf939ea61d8743454d | Shell | Lunderberg/installation_scripts | /scripts/python.sh | UTF-8 | 1,274 | 3.875 | 4 | [] | no_license | #!/bin/bash
# sudo apt install libssl-dev libsqlite3-dev libreadline-dev libbz2-dev
set -e -o pipefail -u
#########################################################
################ CONFIGURATION SECTION ##################
#########################################################
SRC_DIR=/scratch/Programs/python-build
INST_DIR=/scratch/Programs/python
# The version to be installed.
# Should be of form X.Y.Z
VERSION=3.6.0
# Number of threads to be used when compiling
THREADS=10
#########################################################
################ FUNCTION_DEFINITIONS ###################
#########################################################
function initialize() {
mkdir -p "$SRC_DIR"
}
function download_tarball() {
cd "$SRC_DIR"
if [ ! -d Python-$VERSION ]; then
local url=https://www.python.org/ftp/python/$VERSION/Python-$VERSION.tgz
wget $url
tar -xzf Python-$VERSION.tgz
fi
cd Python-$VERSION
}
function configure() {
CPPFLAGS=-g ./configure \
--enable-loadable-sqlite-extensions \
--enable-optimizations \
--prefix="${INST_DIR}"
}
function compile() {
make -j$THREADS
}
function install() {
make install
}
initialize
download_tarball
configure
compile
install
| true |
fe37f5dbef6323b684d6bbd915a050863a057c1b | Shell | puppetlabs/PIE_tools | /pds_setup/tasks/get_pe_and_install.sh | UTF-8 | 2,580 | 3.65625 | 4 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | #! /usr/bin/env bash
set -e
# shellcheck disable=SC2154
platform_tag=$PT_platform_tag
# shellcheck disable=SC2154
pe_version=$PT_version
# shellcheck disable=SC2154
pe_family=$PT_family
# shellcheck disable=SC2154
workdir=${PT_workdir:-/root}
if [ -z "$pe_version" ] && [ -z "$pe_family" ]; then
echo "Must set either version or family" >&2
exit 1
fi
cd "$workdir"
if [ -n "$pe_version" ]; then
pe_family=$(echo "$pe_version" | grep -oE '^[0-9]+\.[0-9]+')
fi
if [[ "$pe_version" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
release_version='true'
base_url="https://artifactory.delivery.puppetlabs.net/artifactory/generic_enterprise__local/archives/releases/${pe_version}"
elif [[ "$pe_version" =~ ^[0-9]+\.[0-9]+\.[0-9]+-rc[0-9]+$ ]]; then
release_version='true'
base_url="https://artifactory.delivery.puppetlabs.net/artifactory/generic_enterprise__local/archives/internal/${pe_family}"
else
pe_major="${pe_family%.*}"
# 2021 dev builds are actually all under 'main'
if [ "$pe_major" -eq 2021 ]; then
pe_branch='main'
fi
base_url="https://artifactory.delivery.puppetlabs.net/artifactory/generic_enterprise__local/${pe_branch:-$pe_family}/ci-ready"
fi
if [ -z "$pe_version" ]; then
pe_version=$(curl "${base_url}/LATEST")
fi
pe_dir="puppet-enterprise-${pe_version}-${platform_tag}"
if [ "$release_version" == 'true' ]; then
pe_tarball="${pe_dir}.tar.gz"
else
pe_tarball="${pe_dir}.tar"
fi
pe_tarball_url="${base_url}/${pe_tarball}"
wget_code=0
tar_code=0
set +e
if [ ! -f "${pe_tarball}" ]; then
wget -nv --quiet "${pe_tarball_url}"
wget_code=$?
fi
if [ ! -d "${pe_dir}" ]; then
tar -xf "${pe_tarball}"
tar_code=$?
fi
set -e
if [ "$wget_code" != 0 ] || [ "$tar_code" != 0 ]; then
echo "{
\"_error\": {
\"msg\": \"Failed either to wget or untar the PE tarball from ${pe_tarball_url}\",
\"kind\": \"enterprise_tasks/get_pe\",
\"details\": {
\"wget_exit_code\": \"${wget_code}\",
\"tar_exit_code\": \"${tar_code}\",
\"pe_tarball_url\": \"${pe_tarball_url}\",
\"pe_tarball\": \"${pe_tarball}\",
\"pe_dir\": \"${pe_dir}\"
}
}
}"
exit 1
fi
echo "{
\"workdir\":\"${workdir}\",
\"pe_dir\":\"${workdir}/${pe_dir}\",
\"pe_tarball\":\"${pe_tarball}\",
\"pe_tarball_url\":\"${pe_tarball_url}\",
\"pe_family\":\"${pe_family}\",
\"pe_version\":\"${pe_version}\"
}"
${workdir}/${pe_dir}/puppet-enterprise-installer -y -c ${workdir}/${pe_dir}/conf.d/pe.conf
echo "install complete"
puppet infra console_password --password puppetlabs
echo "admin password changed to 'puppetlabs'"
| true |
a09af7857dfbce9e9ff2a28e6ccd441b783a1c43 | Shell | mahilet/ImagineCupRepo2018 | /nsc-practicum-winter2018-api/Tests/test_workflow_endpoints.sh | UTF-8 | 3,000 | 3.125 | 3 | [] | no_license | #!/bin/bash
# set output file for test results and clear/init the file
test_results="results.txt"
> test_results
# set base server path for swagger mock api
swagger_server="https://virtserver.swaggerhub.com/kari_bullard/Cloud-Practicum/1.0.4/"
# set base server path for dev api
development_server="dev-server-someday"
# endpoint name
workflow_endpoint="workflow/"
# task endpoint calls
# --include outputs both headers and body of response. Some of the calls from
# POST workflow/
echo $"Test POST workflow/\n" >> ${test_results}
json_payload_post="{ \"id\": \"ECCD3A6ED4C54D2D-A28C9CDD28F6417E\", \"name\": \"Cloud\", \"description\": \"Onboard a Cloud Developer.\", \"tasks\": [ { \"id\": \"ECCD3A6ED4C54D2DA28C9CDD28F6417E\", \"name\": \"Setup Slack\", \"description\": \"Add user to UST Slack account.\", \"employeeInstructions\": \"ECCD3A6E-D4C5-4D2D-A28C-9CDD28F6417E.md\", \"managerInstructions\": \"ECCD3A6E-D4C5-4D2D-A28C-9CDD28F6417E.md\" } ]}"
curl --request POST --include --insecure --header "accept: application/json" --header "access-token: a thing" \
--header "Content-Type: application/json" --data "${json_payload_post}" \
"${swagger_server}${workflow_endpoint}" \
>> ${test_results}
echo $"\n" >> ${test_results}
# GET workflow/{id}
echo $"Test GET workflow/id\n" >> ${test_results}
# does GET by default, no need for --request
curl --include --insecure --header "accept: application/json" --header "access-token: a thing" \
"${swagger_server}${workflow_endpoint}1" \
>> ${test_results}
# curl --include --insecure --header "accept: application/json" --header "access-token: a thing" \
# "${swagger_server}${workflow_endpoint}2" \
# >> ${test_results}
echo $"\n" >> ${test_results}
# PUT workflow/{id}
echo $"Test PUT workflow/id \n" >> ${test_results}
json_payload_put="{ \"id\": \"ECCD3A6ED4C54D2D-A28C9CDD28F6417E\", \"name\": \"Cloud\", \"description\": \"Onboard a Cloud Developer.\", \"tasks\": [ { \"id\": \"ECCD3A6ED4C54D2DA28C9CDD28F6417E\", \"name\": \"Setup Slack\", \"description\": \"Add user to UST Slack account.\", \"employeeInstructions\": \"ECCD3A6E-D4C5-4D2D-A28C-9CDD28F6417E.md\", \"managerInstructions\": \"ECCD3A6E-D4C5-4D2D-A28C-9CDD28F6417E.md\" } ]}"
task_id="ECCD3A6ED4C54D2DA28C9CDD28F6417E"
curl --request PUT --include --insecure --header "accept: application/json" --header "access-token: a thing" \
--header "Content-Type: application/json" --data "${json_payload_post}" \
"${swagger_server}${workflow_endpoint}${task_id}" \
>> ${test_results}
echo $"\n" >> ${test_results}
# DELETE workflow/{id}
echo $"Test DELETE workflow/id \n" >> ${test_results}
curl --request DELETE --include --insecure --header "accept: application/json" --header "access-token: a thing" \
"${swagger_server}${workflow_endpoint}${task_id}" \
>> ${test_results}
echo $"\n" >> ${test_results}
# archive test results
timestamp=$(date +%s)
mv ./results.txt ./results/${timestamp}_workflow_test_results.txt
| true |
f7429a1ea377ebb7f1238306903c1ff9993a91b9 | Shell | lithind/Networking_Lab | /15shellpgm/equalornot.sh | UTF-8 | 153 | 3.375 | 3 | [] | no_license | echo "enter the number 1 :"
read a
echo "enter the number 2 :"
read b
if [ $a -eq $b ]
then
echo "number is equal "
else
echo "number is not equal"
fi
| true |
3d6f695be9f8c87184f5f2fda8ec3757e6f379ea | Shell | delkyd/alfheim_linux-PKGBUILDS | /dott/PKGBUILD | UTF-8 | 1,039 | 2.609375 | 3 | [] | no_license | # Maintainer: enricostn <enricostn at gmail dot com>
pkgname=dott
pkgver="20161228"
pkgrel=1
epoch=
pkgdesc="Day of the Tentacle Remastered"
arch=('i686' 'x86_64')
url="http://dott.doublefine.com"
license=('unknown')
groups=('games')
depends_i686=('mesa-libgl' 'alsa-lib')
depends_x86_64=('lib32-mesa-libgl' 'lib32-alsa-lib' 'lib32-libudev0-shim')
makedepends=('tar')
checkdepends=()
optdepends=()
provides=()
conflicts=()
replaces=()
backup=()
options=()
install=
changelog="${pkgname}.changelog"
_archivename="DayOfTheTentacle_Linux_NoSteam_v1.4.1"
source=("local://${_archivename}.tar.gz"
"${pkgname}.desktop"
"${pkgname}.changelog")
noextract=("${_archivename}.tar.gz")
md5sums=('667b2a8a082702832242321515e55e70')
validpgpkeys=()
package() {
mkdir -p "${pkgdir}/opt/${pkgname}/"
tar zxvf "${source}" -C "${pkgdir}/opt/${pkgname}"
mkdir -p "${pkgdir}/usr/bin"
ln -s "/opt/${pkgname}/Dott" "${pkgdir}/usr/bin/${pkgname}"
install -D -m644 "${srcdir}/${pkgname}.desktop" "${pkgdir}/usr/share/applications/${pkgname}.desktop"
}
| true |
2d2fce1e1a21994d3cfda82116bab3e97307e55f | Shell | sonos/tract | /.travis/cost_model_task_build.sh | UTF-8 | 1,894 | 3.203125 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"Apache-2.0"
] | permissive | #!/bin/sh
set -ex
ARCH=$1
ID=$2
case $ARCH in
aarch64)
MUSL_TRIPLE=aarch64-linux-musl
RUST_TRIPLE=aarch64-unknown-linux-musl
PLATFORM=aarch64-unknown-linux-musl
;;
armv7)
MUSL_TRIPLE=armv7l-linux-musleabihf
RUST_TRIPLE=armv7-unknown-linux-musleabihf
PLATFORM=armv7-unknown-linux-musl
;;
*)
exit "Can't build with musl for $ARCH"
;;
esac
rustup update
rustup target add $RUST_TRIPLE
#curl -s https://musl.cc/${MUSL_TRIPLE}-cross.tgz | tar zx
curl -s https://s3.amazonaws.com/tract-ci-builds/toolchains/${MUSL_TRIPLE}-cross.tgz | tar zx
MUSL_BIN=`pwd`/${MUSL_TRIPLE}-cross/bin
export PATH=$MUSL_BIN:$PATH
export TARGET_CC=$MUSL_BIN/${MUSL_TRIPLE}-gcc
RUST_TRIPLE_ENV=$(echo $RUST_TRIPLE | tr 'a-z-' 'A-Z_')
export CARGO_TARGET_${RUST_TRIPLE_ENV}_CC=$TARGET_CC
export CARGO_TARGET_${RUST_TRIPLE_ENV}_LINKER=$TARGET_CC
( cd linalg/cost_model ; cargo build --target $RUST_TRIPLE --release )
TASK_NAME=cost-model-dataset-$ID
mkdir $TASK_NAME
mv linalg/cost_model/target/${RUST_TRIPLE}/release/cost_model $TASK_NAME
echo "export TIMEOUT=$((86400*4))" > $TASK_NAME/vars
echo "#!/bin/sh" > $TASK_NAME/entrypoint.sh
echo "mkdir product" >> $TASK_NAME/entrypoint.sh
echo "./cost_model ds --size 10000 product/$TASK_NAME.txt" >> $TASK_NAME/entrypoint.sh
# echo "./cost_model ds --size 2000 -k 128 -n 16 product/$TASK_NAME-small-k-tiny-n.txt" >> $TASK_NAME/entrypoint.sh
# echo "./cost_model ds --size 5000 -m 1-512 -k 16,64,256 -n 1-20 product/$TASK_NAME-multiple-k-tiny-n.txt" >> $TASK_NAME/entrypoint.sh
# echo "./cost_model ds --size 1000 -m 1-512 -k 256,1024 -n 1-512 product/$TASK_NAME-bigmn" >> $TASK_NAME/entrypoint.sh
chmod +x $TASK_NAME/entrypoint.sh
tar czf $TASK_NAME.tgz $TASK_NAME
if [ -n "$AWS_ACCESS_KEY_ID" ]
then
aws s3 cp $TASK_NAME.tgz s3://tract-ci-builds/tasks/$PLATFORM/$TASK_NAME.tgz
fi
| true |
bb21de539de82e842bb44839e99be35d1fb77bdf | Shell | Ybrazel/Dotfiles | /rsync/setup.sh | UTF-8 | 411 | 2.890625 | 3 | [] | no_license | #!/usr/bin/env bash
pip install pexpect
pip install watchdog
yum install -y rsync
CONFIG_PATH="/etc/zrsync"
SCRIPT_PATH="/var/zrsync"
if [ ! -d $CONFIG_PATH ] ; then
mkdir $CONFIG_PATH
cp ./config.yml $CONFIG_PATH
fi
if [ ! -d $SCRIPT_PATH ] ; then
mkdir $SCRIPT_PATH
cp ./zrsync.py $SCRIPT_PATH
fi
cp ./zrsync.service /usr/lib/systemd/system
systemctl enable zrsync
systemctl start zrsync
| true |
f81a9b38c3b56b74051670d28bdf5d41fa018018 | Shell | itsmesanju/testCookies | /checkLogPattern/logalert.sh | UTF-8 | 294 | 2.921875 | 3 | [] | no_license | mkdir -p /root/monitoring/
tail -fn0 /var/log/messages | while read line
do
echo "$line" | grep "MY PATTERN"
if [ $? = 0 ]
then
echo "Pattern detected at `date` ">/root/monitoring/data.txt
echo $line >>/root/monitoring/data.txt
#More action come here; send mail etc.
fi
done
| true |
7fc1af0ec0208b7dd9f461b9eab9b89ed4f2fbf7 | Shell | arximboldi/dotfiles | /bash/.bash.d/history.bash | UTF-8 | 395 | 2.921875 | 3 | [] | no_license | #
# Make bash history work on multiple tabs and be very big.
# http://unix.stackexchange.com/a/48116
#
HISTSIZE=100000
HISTFILESIZE=$HISTSIZE
HISTCONTROL=ignorespace:ignoredups
bash_history_sync() {
builtin history -a
HISTFILESIZE=$HISTSIZE
builtin history -c
builtin history -r
}
history() {
bash_history_sync
builtin history "$@"
}
PROMPT_COMMAND=bash_history_sync
| true |
69ca5a110ff07eea4fa0eb25ee715c3c63c4461f | Shell | msdz/4K-IPTV | /iptv72.sh | UTF-8 | 1,655 | 2.96875 | 3 | [] | no_license | #!/bin/sh
#echo 欢迎使用本脚本
ping 127.0.0.1 -c 2 > /dev/null
echo 本脚本只适用于merlin 7.2改版固件,其他固件请勿运行本脚本
ping 127.0.0.1 -c 2 > /dev/null
echo 仅支持7.1及以上固件
ping 127.0.0.1 -c 2 > /dev/null
echo 本提示保留5秒,如果您不是merlin改版固件请立刻按下Ctrl+C中止本脚本
ping 127.0.0.1 -c 10 > /dev/null
echo 本脚本适用于上海电信需要AB面认证的IPTV,其他地区请按实际情况修改脚本
ping 127.0.0.1 -c 2 > /dev/null
echo 使用前请确认光猫已经设置桥接
ping 127.0.0.1 -c 2 > /dev/null
echo 使用脚本前确认SS是工作状态或者是关闭状态,否则会导致脚本运行失败
ping 127.0.0.1 -c 2 > /dev/null
#空行
echo
#脚本提示
echo 正在修改ss dnsmasq配置文件
#移动到ss dnsmasq目录
cd /koolshare/ss/rules
#删除旧配置文件
echo 正在删除旧文件
rm -rf dnsmasq.postconf
#下载新配置文件
echo 正在下载新文件
wget -q --no-check-certificate https://raw.githubusercontent.com/msdz/4K-IPTV/master/dnsmasq.conf
#重命名新配置文件
mv dnsmasq.conf dnsmasq.postconf
#设置权限
chmod -R 0755 dnsmasq.postconf
#完成提示
echo 成功
#延迟运行
ping 127.0.0.1 -c 2 > /dev/null
#空行
echo
#运行提示
echo 正在重启dnsmasq
#重启dnsmasq
service restart_dnsmasq > /dev/null
#完成提示
echo dnsmasq已重启
#延迟运行
ping 127.0.0.1 -c 2 > /dev/null
#空行
echo
#返回默认目录
cd
#提示成功
ping 127.0.0.1 -c 2 > /dev/null
echo 脚本运行完成,如果光猫已经设置完毕,您可以把IPTV接在Lan1-4任意接口
ping 127.0.0.1 -c 2 > /dev/null
| true |
d4b5c64204cf03f7455cece6024af540eb406d52 | Shell | YogSottot/bitrix-env-rpm | /SOURCES/bitrix-env/etc/ansible/roles/monitor/files/process_status_ | UTF-8 | 2,282 | 3.796875 | 4 | [] | no_license | #!/bin/sh
# -*- sh -*-
: << =cut
=head1 NAME
process_status - Plugin to monitor resource usage by processes.
=head1 ABOUT
This plugin requires munin-server version 1.2.5 or 1.3.3 (or higher).
This plugin is backwards compatible with the old processes-plugins found on
SunOS, Linux and *BSD (i.e. the history is preserved).
=head1 CONFIGURATION
list of process names that must be processed
This configuration snipplet is an example with the defaults:
[process_status]
env.warning 80
env.critical 90
=head1 AUTHOR
Copyright (C) 2014 ksh
=head1 LICENSE
GNU General Public License, version 2
=begin comment
no comment
=end comment
=head1 MAGIC MARKERS
=begin comment
These magic markers are used by munin-node-configure when installing
munin-node.
=end comment
#%# family=auto
#%# capabilities=autoconf
=cut
. /usr/share/munin/plugins/plugin.sh || exit 1
PROCESS=${0##*process_status_}
# select process performance value and return it to munin
process_stats(){
CPUN=$(cat /proc/cpuinfo | grep -c "^processor")
PROCESS_TMP=/dev/shm/process_status_$PROCESS
# create process information
ps axo %mem,pcpu,comm,args > $PROCESS_TMP 2>&1
[[ $? -gt 0 ]] && exit 2
process_pmem=$(grep -v grep $PROCESS_TMP | grep " $PROCESS " | \
awk '{sum+=$1} END {printf "%.2f",sum}')
process_pcpu=$(grep -v grep $PROCESS_TMP | grep " $PROCESS " | \
awk -v cpu=$CPUN '{sum+=$2} END {printf "%.2f",sum/cpu}'
)
echo "pcpu.value $process_pcpu"
echo "pmem.value $process_pmem"
rm -f $PROCESS_TMP
}
# graph information
process_graph(){
printf "multigraph_%s_cpu\n" "$PROCESS"
printf "graph_title CPU usage by %s\n" "$PROCESS"
printf "graph_vlabel %s\n" '%';
printf "graph_category processes\n\n";
printf "pcpu.label CPU %s\n" '%';
printf "pcpu.min 0\n";
printf "pcpu.draw LINE1\n";
printf "pcpu.info $PROCESS CPU Usage\n\n";
printf "multigraph_%s_mem\n" "$PROCESS"
printf "graph_title Memory usage by %s\n" "$PROCESS"
printf "graph_vlabel %s\n" '%';
printf "graph_category processes\n\n";
printf "pmem.label Mem %s\n" '%';
printf "pmem.min 0\n";
printf "pmem.draw LINE1\n";
printf "pmem.info $PROCESS Memory Usage\n\n";
}
case "$1" in
"config")
process_graph
;;
*)
process_stats
;;
esac
exit 0
| true |
6148b9f690e82d9bb774c424ecc3ca5e21a27272 | Shell | mpage/dotfiles | /bashrc | UTF-8 | 2,813 | 3 | 3 | [] | no_license | # For cowsay
export PATH="/usr/games:$PATH"
# Ruby
export PATH="$HOME/.rbenv/bin:/usr/local/heroku/bin:$PATH"
export PATH="/usr/local/bin:$PATH"
# Personal binaries
export PATH="$HOME/bin:$PATH"
# Haskell
export PATH="$HOME/bin/ghc/bin:$PATH"
export PATH="$HOME/.cabal/bin:$PATH"
# Homebrew
export PATH="/usr/local/bin:$PATH"
export PATH="/Users/mpage/.local/bin:$PATH"
# Golang
export GOPATH="$HOME/src/go"
export GOBIN="$GOPATH/bin"
export PATH="$GOBIN:$PATH"
set -o emacs
if [ -f /etc/environment ]; then
. /etc/environment
fi
# If not running interactively, don't do anything
[ -z "$PS1" ] && return
# don't put duplicate lines in the history. See bash(1) for more options
# ... or force ignoredups and ignorespace
HISTCONTROL=ignoredups:ignorespace
# append to the history file, don't overwrite it
shopt -s histappend
# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
HISTSIZE=1000
HISTFILESIZE=2000
# check the window size after each command and, if necessary,
# update the values of LINES and COLUMNS.
shopt -s checkwinsize
# make less more friendly for non-text input files, see lesspipe(1)
[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
# Prompt
PS1="> "
# If this is an xterm set the title to user@host:dir
case "$TERM" in
xterm*|rxvt*)
PS1="\[\e]0;${debian_chroot:+($debian_chroot)}\u@\h: \w\a\]$PS1"
;;
*)
;;
esac
# Alias definitions.
# You may want to put all your additions into a separate file like
# ~/.bash_aliases, instead of adding them here directly.
# See /usr/share/doc/bash-doc/examples in the bash-doc package.
if [ -f ~/.bash_aliases ]; then
. ~/.bash_aliases
fi
# enable programmable completion features (you don't need to enable
# this, if it's already enabled in /etc/bash.bashrc and /etc/profile
# sources /etc/bash.bashrc).
if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
. /etc/bash_completion
fi
if [ -f $(brew --prefix)/etc/bash_completion ]; then
. $(brew --prefix)/etc/bash_completion
fi
# Make Control-v paste, if in X and if xclip available - Josh Triplett
if [ -n "$DISPLAY" ] && [ -x /usr/bin/xclip ] ; then
# Work around a bash bug: \C-@ does not work in a key binding
bind '"\C-x\C-m": set-mark'
# The '#' characters ensure that kill commands have text to work on; if
# not, this binding would malfunction at the start or end of a line.
bind 'Control-v: "#\C-b\C-k#\C-x\C-?\"$(xclip -o -selection c)\"\e\C-e\C-x\C-m\C-a\C-y\C-?\C-e\C-y\ey\C-x\C-x\C-d"'
fi
export EDITOR="mg"
export CLICOLOR=1
export LSCOLORS=gxBxhxDxfxhxhxhxhxcxcx
if [ -z "$NOCOWSAY" ]; then
cowsay -f ~/cows/ackcat.cow "`fortune`" | lolcat
fi
# OPAM configuration
. /Users/mpage/.opam/opam-init/init.sh >> /tmp/opam_init.out 2>> /tmp/opam_init.out
export SSH_AUTH_SOCK=/tmp/ssh-agent.sock
| true |
7d68614cedac68570de619844af430c51d6f38ac | Shell | moutainhigh/eis | /standard/script/install_front_current.sh | UTF-8 | 2,916 | 3.0625 | 3 | [] | no_license | #!/bin/sh
COPY_STANDARD_CONFIG=true;
SRCDIR=~/svn/v4/CURRENT
cd $SRCDIR
if [ ! $1 ]
then
echo "Usage: script install destination..."
exit 1
fi
export LC_ALL=en_US.UTF-8
DEST=$1
if [ ! -d $DEST ]
then
echo "Destination $DEST not exist.";
exit 2
fi
DEST_APP=$DEST/webapps/v4-front
cd standard
svn up
ant -q jar
if [ -d ../front ];then
cd ../front
svn up
fi
if [ -d ../gallery ];then
cd ../gallery
svn up
rm -f *.jar
ant -q jar
cp eis-v4-current-gallery.jar $DEST_APP/WEB-INF/lib/
cp src/bean-v4-gallery.xml $DEST_APP/WEB-INF/classes/
else
if [ -d ../front ];then
rm ../front/src/com/maicard/front/controller/jrsj/processor/JrsjZazhiNodeProcessorImpl.java
rm ../front/src/com/maicard/front/controller/jrsj/processor/JrsjCartNodeProcessorImpl.java
rm ../front/src/com/maicard/front/api/DownloadController.java
fi
fi
if [ -d ../wpt ];then
cd ../wpt
svn up
rm -f *.jar
ant -q jar
if [ ! -e eis-wpt.jar ];then
echo "WPT module ERROR, jar package NOT found!!!"
exit 3
fi
cp eis-wpt.jar $DEST_APP/WEB-INF/lib/
if [ -d WebContent/config/ ];then
mkdir -p $DEST_APP/WEB-INF/classes/config/
cp WebContent/config/*.xml $DEST_APP/WEB-INF/classes/config/
COPY_STANDARD_CONFIG=false;
fi
fi
if [ -d ../ec ];then
cd ../ec
svn up
rm -f *.jar
ant -q jar
cp eis-v4-current-ec.jar $DEST_APP/WEB-INF/lib/
fi
if [ -d ../o2o ];then
cd ../o2o
svn up
rm -f *.jar
ant -q jar
cp eis-v4-current-o2o.jar $DEST_APP/WEB-INF/lib/
fi
if [ -d ../game ];then
cd ../game
svn up
rm -f *.jar
ant -q clean
ant -q jar
if [ ! -e eis-game.jar ];then
echo "Game module ERROR, jar package NOT found!!!"
exit 3
fi
cp eis-game.jar $DEST_APP/WEB-INF/lib/
if [ -d WebContent/config/ ];then
mkdir -p $DEST_APP/WEB-INF/classes/config/
cp WebContent/config/*.xml $DEST_APP/WEB-INF/classes/config/
COPY_STANDARD_CONFIG=false;
fi
fi
if [ -d ../transaction-system ];then
cd ../transaction-system
svn up
rm -f *.jar
ant -q jar
cp eis-v4-current-ts.jar $DEST_APP/WEB-INF/lib/
fi
if [ -d ../front ];then
cd ../front
ant -q jar
cp eis-v4-current-front.jar $DEST_APP/WEB-INF/lib/
fi
cd ..
# cp standard/lib/* $DEST_APP/WEB-INF/lib/
cp standard/lib/mysql-connector-*.jar $DEST/lib/
cp standard/eis-v4-current-standard.jar $DEST_APP/WEB-INF/lib/
cp standard/src/*.xml $DEST_APP/WEB-INF/classes/
cp standard/src/*.xsd $DEST_APP/WEB-INF/classes/
cp standard/src/uuwise.properties $DEST_APP/WEB-INF/classes/
if [ ! -d $DEST_APP/WEB-INF/classes/config ];then
mkdir -p $DEST_APP/WEB-INF/classes/config
fi
if $COPY_STANDARD_CONFIG;then
echo "Copy standard config xml...";
rsync -au standard/config/my* $DEST_APP/WEB-INF/classes/config/ --exclude .svn --exclude .svn/*
fi
cd $DEST_APP
svn up
cd $DEST_APP/WEB-INF/lib/
svn up
rm -f $DEST_APP/WEB-INF/lib/servlet-api.jar
rm -f $DEST_APP/WEB-INF/lib/mysql-connector-*.jar
rm -f $DEST_APP/WEB-INF/lib/tomcat-jdbc.jar
export LC_ALL=zh_CN.GBK
| true |
74c92f4a800dd1168eb04e95b95930633e4d9e4c | Shell | Fesuszilard/Tododb_PSQL_BashScript | /todo.sh | UTF-8 | 676 | 3.1875 | 3 | [] | no_license | #!/bin/bash
. list.sh
. add.sh
. mark.sh
. delete.sh
main(){
if [[ "$1" == "list-users" ]]
then
list_users
elif [[ "$1" == "list-todos" ]]
then
list_todos
elif [[ "$1" == "list-user-todos" ]]
then
list_user_todos "$2"
elif [[ "$1" == "add-user" ]]
then
add_user "$2"
elif [[ "$1" == "add-todo" ]]
then
add_todo "$2" "$3"
elif [[ "$1" == "mark-todo" ]]
then
mark-todo "$2"
elif [[ "$1" == "unmark-todo" ]]
then
unmark-todo "$2"
elif [[ "$1" == "delete-todo" ]]
then
delete-todo "$2"
elif [[ "$1" == "delete-done" ]]
then
delete-done
fi
}
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]
then
main "$@" 2> /dev/null
fi
| true |
f4b7b654b76c29ad9e46e00a9c969b148cc5b22a | Shell | spwhitt/dotfiles | /home/.config/waybar/waybar-vpn.sh | UTF-8 | 465 | 2.828125 | 3 | [] | no_license | # VPN Status - is tun0 working?
# test -d /proc/sys/net/ipv4/conf/tun0
# Not set by waybar
export PATH="$PATH:/run/wrappers/bin:/run/current-system/sw/bin/"
# Killswitch Status
# -w 10: Wait up to 10 seconds on lock conflict
# -n: Don't attempt to resolve hostnames, etc. speeds up -L
if sudo iptables -w 10 -n -L OUTPUT | grep nixos-vpn-killswitch > /dev/null; then
printf "SAFE\nKillswitch Enabled\nsafe"
else
printf "OPEN\nKillswitch Disabled\nwarning"
fi
| true |
9eeb0cf32d109da66fcb3693f3dba8834a892857 | Shell | po-miyasaka/dotfiles-cloned | /bootstrap.sh | UTF-8 | 4,079 | 3.515625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# Ask for the administrator password upfront.
sudo -v
# Keep-alive: update existing `sudo` time stamp until the script has finished.
while true; do
sudo -n true
sleep 60
kill -0 "$$" || exit
done 2>/dev/null &
echo "Starting bootstrapping"
############ Homebrew ###############
# Check for Homebrew, install if we don't have it
if test ! "$(command -v brew)"; then
echo "Installing homebrew..."
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
fi
brew install mas # Install Mac App Store Homebrew integration
# If no CLI tools, then install
if test ! "$(command -v gcc)"; then
sudo xcode-select --install
sudo xcodebuild -license accept
fi
# If no xcode, then install
if ! [ -d "/Applications/Xcode.app" ]; then
mas install 497799835 # Xcode ID
if ! [ -d "/Applications/Xcode.app" ]; then
echo "No Xcode installed"; exit 1
fi
fi
# If no ansible, then install
if test ! "$(command -v ansible)"; then
brew install ansible
fi
# Ansible
ansible-playbook ansible/main_playbook.yml --ask-become-pass
# TODO: Move the below, then xcode stuff to ansible playbooks
# Select directory and run brewfile
# The Brewfile is generated automatically through 'brew bundle dump'
mv ~/dotfiles ~/.dotfiles
cd ~/.dotfiles/ || { echo "Could not cd dotfiles"; exit 1; }
while true; do
read -r -p "Do you want to install brew packages? " yn
case $yn in
[Yy]*)
brew update
brew upgrade
echo "Installing packages..."
brew bundle
brew cleanup
break
;;
[Nn]*)
echo "Skipping brew packages"
break
;;
*) echo "Please input yes or no" ;;
esac
done
############ LLDB ###############
mkdir -p ~/.lldb
(cd ~/.lldb && git clone git@github.com:DerekSelander/LLDB.git)
############ Ruby ###############
# Setting for RBENV/Ruby
echo "Setting up Ruby. Please make sure to check that the version of bundler matches the version installed by rbenv"
eval "$(rbenv init -)"
echo "Installing Ruby versions"
rbenv install 2.5.1
rbenv install 2.6.0
# Installs bundler for system version of Ruby, may cause
# headaches and issues if the rbenv version doesn't have bundler installed
# XCPretty required for vim-xcode and nice for running xcodebuild in command line
sudo gem install bundler xcpretty
########## Other ################
# Zsh plugins
antibody bundle <~/.dotfiles/zsh/.zsh_plugins.txt >~/.zsh_plugins.sh
# Tmux plugins
git clone https://github.com/tmux-plugins/tpm ~/.tmux/plugins/tpm
# Dependencies for Deoplete on other Pythonic stuff
pip3 install -r pip-requirements.txt
# Golang dependencies
python3 go/install.py
# JS
npm install -g prettier
npm install -g remark-lint
# Make iTerm/Terminal "Last login:" message from Login utility stop appearing
touch ~/.hushlogin
# Git aliases
git config --global alias.unstage 'reset HEAD --'
git config --global alias.last 'log -1 --stat HEAD'
git config --global alias.shortlog 'log -4 --pretty --oneline'
git config --global alias.current 'rev-parse --abbrev-ref HEAD'
# amend w/ previous commit name
git config --global alias.amend 'commit --amend --no-edit'
git config --global core.excludesfile ~/.gitignore_global
# Set pager to delta, a nice Rust differ
git config --global core.pager "delta --dark"
# Set GHQ
git config --global ghq.root ~/dev
echo "Setting symlinks"
python3 make_symlinks.py
echo "Please update hosts file with contents of https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts"
# We installed the new shells, now we have to activate them
echo "Adding newly installed shells to the list of allowed shells"
# Prompts for password
sudo bash -c 'echo /usr/local/bin/bash >> /etc/shells'
sudo bash -c 'echo /usr/local/bin/zsh >> /etc/shells'
# Gives user choice for preferred shell
while true; do
read -r -p "Do you want Zsh to be your default shell? " yn
case $yn in
[Yy]*)
chsh -s /usr/local/bin/zsh
echo "Bootstrapping complete"
zsh
break
;;
[Nn]*)
echo "Bootstrapping complete"
exit
;;
*) echo "Please input yes or no" ;;
esac
done
| true |
eedc2abb8f55124c18b738b950e2dd8494e52da0 | Shell | Jacarte/HPC_2 | /stream/run_stream.sh | UTF-8 | 377 | 3.078125 | 3 | [] | no_license |
threads="1 2 4 8 12 16 20 24 28 32"
schedules="static guided dynamic"
for s in $schedules
do
export SCHEDULE="schedule($s)"
echo -n "data_$s = ["
for i in $threads
do
export OMP_NUM_THREADS=$i
make clean 1>/dev/null 2> /dev/null
make stream.out 1>/dev/null 2> /dev/null
o=$(./stream.out | grep Copy | awk '{ print $2 }')
echo -n $i, $o,
done
echo "]"
done | true |
00c5c1e29e4c4798a3091336564bf5ef3a55d009 | Shell | rufaswan/CX | /junk/AMY/cpp.sh | UTF-8 | 484 | 3.4375 | 3 | [] | no_license | #!/bin/bash
[ $# = 0 ] && exit
for i in "$@"; do
# data-vmem => DATA_VMEM
up=$(printf "%s" "$i" | tr [a-z] [A-Z] | tr -c [0-9A-Z] '_' )
[ -f "$i".hpp ] || touch "$i".hpp
cat << _HPP > "$i".hpp
#ifndef $up
#define $up
#include ""
namespace ###
{
class @@@
{
public:
@@@();
~@@@();
protected:
private:
};
}
#endif
_HPP
[ -f "$i".cpp ] || touch "$i".cpp
cat << _CPP >> "$i".cpp
#include "$i.hpp"
namespace ###
{
@@@::@@@() {}
@@@::~@@@() {}
}
_CPP
done
| true |
26cda24b35810d6464f89a99704efefbfcc019c7 | Shell | kenferrara/pbis-open | /build/mk-make | UTF-8 | 1,911 | 4.21875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Controlled only by:
#
# BUILD_DEBUG
# BUILD_MKOUT_ROOT
# BUILD_OS_TYPE
usage()
{
cat <<EOF
usage: `basename $0` [options]
Runs make in BUILD_MKOUT_ROOT/{debug,release}
as determined by whether BUILD_DEBUG is defined.
Unrecognized options are passed through to make.
MAKE can be defined to override the default make.
If defined, BUILD_OS_TYPE is used to figure out
a default for -j flags.
options:
--make This help.
--mkhelp Show make help (can use -- --help instead).
--noj Skip figuring out -j to pass into make.
-- Pass remaining options through to make (used
to escape any options listed above).
EOF
exit 1
}
if [ -z "${MAKE}" ]; then
MAKE=make
fi
OPT_J=true
# Parse options
while true ; do
case "$1" in
--)
break
;;
--help)
usage
;;
--mkhelp)
exec "${MAKE}" --help
;;
--noj)
OPT_J=false
;;
*)
break
;;
esac
shift
done
if [ -z "${BUILD_MKOUT_ROOT}" ]; then
echo "You must define BUILD_MKROOT_ROOT (optionally by running buildenv)"
exit 1
fi
if [ -n "${BUILD_DEBUG}" ]; then
TYPE=debug
else
TYPE=release
fi
jflag=
if ${OPT_J} ; then
if [ "${BUILD_OS_TYPE}" = 'linux' ]; then
jflag="-j$((`cat /proc/cpuinfo | grep '^processor' | wc -l` * 2))"
elif [ "${BUILD_OS_TYPE}" = 'solaris' ]; then
num_procs=`/usr/sbin/psrinfo | /usr/bin/wc -l`
if [ -n "${num_procs}" ]; then
jflag="-j$((${num_procs} * 2))"
fi
elif [ "${BUILD_OS_TYPE}" = 'darwin' ]; then
num_procs=`sysctl -n hw.ncpu`
if [ -n "${num_procs}" ]; then
jflag="-j$((${num_procs} * 2))"
fi
fi
fi
DIR="${BUILD_MKOUT_ROOT}/${TYPE}"
set -x -e
cd "${DIR}"
exec "${MAKE}" ${jflag} "$@"
| true |
841568ec3832d9e288be9af7f0d3c83296676f1b | Shell | abhinaynag/bash | /regex-bash.txt | UTF-8 | 1,830 | 2.75 | 3 | [] | no_license | #!/bin/bash
echo -e "**BRIEF** \n \n" > out.txt
echo -e -n "* DateTime: " >> out.txt
cat "in.txt" | grep -P -o "(\d{2}\/\d{2}\/\d{2}\s\d{2}:\d{2}:\d{2})\s" | sort -r | grep -m1 "" >> out.txt
echo -e -n "\n\n* Sensors: " >> out.txt
cat in.txt | grep -P -o "\sesm(\w+)?(\W+)?(\w+)?\s" | sort -u | tr '\n' ', ' >> out.txt
echo -e "\n \n* Signatures:\n\n" >> out.txt
cat in.txt | grep -P -o "((snort:\s\[1:\d+:\d+\]).*: \d{1}\])" | sort -u | sed "s/^/* /g">> out.txt
echo -e "\n \n* PCAP Attached: " >> out.txt
echo -e "\n \n**SOURCE**\n\n" >> out.txt
echo -e -n "* Source IP(s): " >> out.txt
cat in.txt | grep -P -o "\s(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}):\d+\s->" | grep -P -o "\s\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}" | sort -u | tr '\n' ', ' >> out.txt
echo -e -n "\n \n* X-Forward-For : (Please Confirm with PCAP) " >> out.txt
cat in.txt | grep -P -o "(\d{2}\/\d{2}\/\d{2,4})\s(\d{2}:\d{2}:\d{2})\s+(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})" | grep -P -o "(\s\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})" | sort -u |tr '\n' ', ' >> out.txt
echo -e -n "\n\n* Port: Number of unique ports is: " >> out.txt
cat in.txt | grep -P -o "\s(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}):\d+\s->" | grep -P -o ":\d+\s" | sort -u | wc -l >> out.txt
echo -e -n "\n* Hostname: " >> out.txt
echo -e "\n\n\n**Destination**\n\n" >> out.txt
echo -e -n "* IP(s): ">> out.txt
cat in.txt | grep -P -o -- "->\s(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}):\d+" | grep -P -o "\s(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})" | sort -u | tr '\n' ', ' >> out.txt
echo -e -n "\n\n* Port: " >> out.txt
cat in.txt | grep -P -o -- "->\s(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}):\d+" | grep -P -o ":\d+"| sort -u | sed "s/://g"| tr '\n' ', ' >> out.txt
echo -e "\n\n* Hostname: " >> out.txt
echo -e "\n\n\n **Add RAW data as a Nugget**" >> out.txt
| true |
eac7ce8f883fc51fc9a63ed4056453ff7624c054 | Shell | FokhrulAlam/Linux | /Bash Shell Scripting/1.4/hello.sh | UTF-8 | 335 | 3.703125 | 4 | [] | no_license | #! /bin/bash
#Passing arguments to an array
arguments=("$@") #pass as many arguments as you want
echo $@
echo $# #printing the number of arguments
#Another way to pass arguments to an array
#arguments=("$@")
#echo ${arguments[0]} ${arguments[1]}
#Another way to pass arguments. It is not an array
#echo $1 $2 $3 '> echo $1 $2 $3'
| true |
e3f24fdf748d9cc5845746ff4baade77268baf25 | Shell | yangheqingvip/FlinkSupport | /bin/flink-support | UTF-8 | 1,962 | 3.171875 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
# Copyright [2020] [xiaozhennan1995@gmail.com]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
source "$(cd "`dirname "$0"`"/..; pwd)"/bin/support-env
echo "-------------------[Support App Env]-------------------"
echo " SUPPORT_HOME: "$SUPPORT_HOME
echo " SUPPORT_CONF_DIR: "$SUPPORT_CONF_DIR
echo " SUPPORT_LIB_DIR: "$SUPPORT_LIB_DIR
echo " SUPPORT_EXTLIB_DIR: "$SUPPORT_EXTLIB_DIR
echo " SUPPORT_PLUGINS_DIR: "$SUPPORT_PLUGINS_DIR
echo " SUPPORT_LOG_DIR: "$SUPPORT_LOG_DIR
echo "----------------------------------------------------------"
JAVA_RUN="$JAVA_RUN -Dlog.file=${SUPPORT_LOG_DIR}/flink-support.log -Dlog4j.configurationFile=${SUPPORT_CONF_DIR}/log4j-support.properties"
ENTER_CLASS_NAME=com.weiwan.support.launcher.SupportAppClient
RUN_CMD="$JAVA_RUN -classpath $CLASS_PATH $ENTER_CLASS_NAME"
CMD=$(echo "${1}" | tr 'a-z' 'A-Z')
ARGS=("${@:2}")
case $CMD in
(RUN)
RUN_CMD="$RUN_CMD -cmd run"
;;
(STOP)
RUN_CMD="$RUN_CMD -cmd stop"
;;
(CANCEL)
RUN_CMD="$RUN_CMD -cmd cancel"
;;
(SAVEPOINT)
RUN_CMD="$RUN_CMD -cmd savepoint"
;;
(LIST)
RUN_CMD="$RUN_CMD -cmd list"
;;
(INIT)
RUN_CMD="$RUN_CMD -m init"
;;
(*)
echo "Unknown service '${CMD}'. Usage: flink-support (run|stop|cancel|list|savepoint) [args]."
exit 1
;;
esac
RUN_CMD="$RUN_CMD ${ARGS[@]}"
echo "Starting FlinkSupport ApplicationClient With CMD: $RUN_CMD ."
echo "Client Running pid: $$"
exec $RUN_CMD
exit $?
| true |
4c6769be0d01c779f28b34843d696d51dc76c5df | Shell | breakthewall/restify | /scripts/functions | UTF-8 | 348 | 3.359375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
function print() {
local msg=$1
local msglen=${#msg}
# banner=$(printf '%*s\n' "$((COLUMNS-2))" '' | tr ' ' '-')
local banner=$(printf '%*s' $((msglen+2)) "" | tr ' ' '-')
tput bold
printf '\n'
# printf '|'"$banner"'\n'
# printf '| '"$msg"'\n'
# printf '|'"$banner"'\n'
printf "$msg"'\n'
printf '\n'
tput sgr0
}
| true |
0ab23918e8d9e88bb085fd967c6043962a7f5a62 | Shell | afcarl/dco | /thesis/scripts/runme.sh | UTF-8 | 2,071 | 3.421875 | 3 | [] | no_license | #!/bin/sh
### parameters for the job ###
# use current working directory, else defaults to home
#$ -cwd
# parallel environment: symmetric multiprocessing
# use all 4 cores on dblade machines
#$ -pe smp 4
# run for up to a day
#$ -l day
# use only machines in group dblade
#$ -q '*@@dblade'
# use 1-x number of machines
#$ -t 1-40
# mail me when job aborts, begins, exits, and suspends
#$ -m abes
host=`hostname -f`
my_machine="torch.cs.brown.edu"
# clear any old data
if [ -e /ltmp/jakelley/ ] ; then
rm -rf /ltmp/jakelley/
fi
# set up install-dir
mkdir /ltmp/jakelley
# check exit status
if [ $? != 0 ] ; then
echo "failed to created /ltmp/jakelley" > /home/jakelley/error."$host"
exit -1
fi
chmod 0700 /ltmp/jakelley
cp -r /home/jakelley/hadoop/hadoop-secure/hadoop-install/ /ltmp/jakelley
# check exit status
if [ $? != 0 ] ; then
echo "failed to install hadoop" > /home/jakelley/hadoop/benchmarking/error."$host"
rm -rf /ltmp/jakelley
exit -2
fi
# set up environment
export HADOOP_HOME="/ltmp/jakelley/hadoop-install"
export JAVA_HOME="/usr"
export HADOOP_CONF_DIR="$HADOOP_HOME/conf"
export PATH="$HADOOP_HOME/bin":$PATH
# start datanode and tasktracker as daemons
nohup $HADOOP_HOME/bin/hadoop --config $HADOOP_CONF_DIR datanode 2> /home/jakelley/hadoop/benchmarking/logs/"$host"-data.out &
nohup $HADOOP_HOME/bin/hadoop --config $HADOOP_CONF_DIR tasktracker 2> /home/jakelley/hadoop/benchmarking/logs/"$host"-task.out &
sleep 5
# sleep until both die, then exit
java_procs=`ps -u jakelley | grep "java"`
# if length of string is non-zero, there are some java processes
while [ -n "$java_procs" ] ; do
java_procs=`ps -u jakelley | grep "java"`
# this is a fail-safe in case things are taking too long
# kill the processes and then clear the data
sleep 82800 # 23 hours, limit of 24 hours imposed by grid for the queue
killall -9 -u jakelley java
rm -rf /ltmp/jakelley
done
#clean up a bit, only if not on my machine
if [ "$host" != "$my_machine" ] ; then
rm -rf /ltmp/jakelley
fi
echo "exiting"
| true |
84b6821f4104f6dc6330bda0ab8572ae662732c5 | Shell | mike-carey/dns-check | /dns-check | UTF-8 | 6,165 | 4.25 | 4 | [] | no_license | #!/usr/bin/env bash
export __SCRIPT__=$(basename $0)
export __VERSION__=1.0.0
export __VERBOSE__=${VERBOSE:-false}
export __REQUIREMENTS__=(dig nmap sed cat jq grep awk)
function log() {
if [[ "$__VERBOSE__" == "true" ]]; then
echo "[INFO] $@" >&2
fi
}
function error() {
local code=$(($1))
shift
echo "[ERROR] $@" >&2
exit $code
}
function function_exists() {
declare -f -F $1 > /dev/null
return $?
}
function check_requirements() {
local origFlags="$-"
set +eET
# Check for required clis
for cli in ${__REQUIREMENTS__[@]}; do
hash $cli
s=$?
if [[ $s != 0 ]]; then
error 255 "Missing required cli: $cli"
fi
done
set "-$origFlags"
}
check_requirements
function dns-check() {
local verbose=false
local config_file=${DNS_CHECK_CONFIG:-~/.dns-check.json}
local args=()
while [[ -n "${1:-}" ]]; do
case "$1" in
--config )
config_file="$2"
shift
;;
--verbose )
verbose=true
;;
--version )
echo "$__SCRIPT__ $__VERSION__"
return 0
;;
-- )
shift
args+=("$@")
break
;;
* )
args+=("$1")
;;
esac
shift
done
set -- ${args[@]:-}
command=${1:-}
if [[ -z "$command" ]]; then
command=check
else
shift
fi
if function_exists $__SCRIPT__.$command; then
$__SCRIPT__.$command ${@:-}
return $?
fi
error 1 "Unknown subcommand: $command" >&2
}
function dns-check.add() {
if [[ $# -lt 2 ]]; then
echo "Usage: $__SCRIPT__ add HOST IPS..." >&2
return 2
fi
local host="$1"
shift
local ips=''
for ip in $@; do
ips="${ips}\"${ip}\","
done
ips="[${ips%,}]"
log "Adding $host as $ips"
local tmp="$config_file".tmp
trap "rm -f $tmp" EXIT
(cat "$config_file" 2>/dev/null || echo "{}") | jq '.hosts["'"$host"'"] = (.hosts["'"$host"'"] // []) '"${ips}" > "$tmp"
mv "$tmp" "$config_file"
}
function dns-check.check() {
local issues=()
for host in $((cat "$config_file" 2>/dev/null || echo "{}")| jq -rc '.hosts // {} | keys[]'); do
ips=()
for ip in $((cat "$config_file" 2>/dev/null || echo "{}") | jq -rc '.hosts // {} | .["'"$host"'"] // [] | .[]'); do
ips+=("$ip")
done
# Check ips returned from digging are in the configuration
for ip in $(dig +short $host); do
if [[ " ${ips[@]} " =~ " $ip " ]]; then
log "$host has an ip of $ip and it is already in the configuration"
else
log "$host has a new ip of $ip"
issues+=("$host has a new ip of $ip")
fi
done
# Check ips currently in the configuration reverse searched are the host
for ip in ${ips[@]}; do
if [[ "$(dig -x $ip +short)" == "$host." ]]; then
log "$ip still points to $host"
else
log "$ip no longer points to $host"
issues+=("$ip no longer points to $host")
fi
done
done
if [[ ${#issues[@]} -gt 0 ]]; then
log "There were issues"
for issue in "${issues[@]}"; do
echo "$issue"
done
return 3
fi
log "There were no issues"
}
function dns-check.load() {
# local tmp="$(mktemp -d)"
# trap "rm -rf $tmp" EXIT
local tmp=.tmp
local js_file=$tmp/js
local ip_file=$tmp/ips
local next_page='/v2/security_groups'
while [[ "$next_page" != null ]]; do
cf curl $next_page > $js_file
for resource in "$(cat $js_file | jq -rc '.resources[]')"; do
for ip_range in $(echo "$resource" | jq -rc '.entity.rules[] | .destination'); do
local origFlags="$-"
set +eET
log "Expanding: $ip_range"
local expansion="$(dns-check.expand "$ip_range")"
local status=$?
set -$origFlags
if [[ $status -ne 0 ]]; then
log "Expanding $ip_range failed, moving on"
continue
fi
for ip in $expansion; do
echo $ip >> $ip_file
done
done
done
next_page="$(cat $js_file | jq -r .next_url)"
log "Moving onto next page: $next_page"
done
local ips="$(cat $ip_file)"
for ignored in $(dns-check.ignored); do
ips="$(echo "$ips" | grep -ve "$ignored")"
done
for ip in $(echo "$ips" | sort | uniq); do
# Resolve the dns
dns=$(dig -x $ip +short)
if [[ -z "$dns" ]]; then
log "$ip did not resolve to a DNS entry, moving on"
continue
fi
# add it
log "Adding $dns $ip"
dns-check.add $dns $ip
done
}
###
# @description Expands an IP range
##
function dns-check.expand() {
local ip_range="$1"
local format=ip
if [[ "${ip_range##*/}" != "$ip_range" ]]; then
format=cidr
elif [[ "${ip_range##*-}" != "$ip_range" ]]; then
format=range
fi
case $format in
ip )
log "$ip_range is only an IP"
echo $ip_range
return 0
;;
range )
log "$ip_range is a range"
local range_size=${ip_range##*.}
local min=$((${range_size%%-*}))
local max=$((${range_size##*-}))
if [[ $max -le $min ]]; then
error 5 "Invalid range: $ip_range"
fi
if [[ -z "$min" ]]; then
error 6 "Invalid range: $ip_range"
fi
if [[ $max -gt 255 ]]; then
error 7 "Invalid range: $ip_range"
fi
;;
cidr )
log "$ip_range is a cidr"
local block_size=${ip_range##*/}
if [[ "$block_size" -lt 24 ]]; then
error 4 "$ip_range's block size is too large"
fi
;;
esac
local nmap="$(nmap -sL $ip_range 2>&1)"
if grep -q 'Failed to resolve' <<< "$nmap"; then
error 8 "$nmap"
fi
echo "$nmap" | awk '/Nmap scan report/{print $NF}' | sed 's/(\(.*\))/\1/g'
}
function dns-check.ignored() {
echo "^10\..*"
for i in {16..31}; do
echo "^172.$i\..*"
done
echo "^192.168\..*"
}
###
# @description Prints the available subcommands
##
function dns-check.subcommands() {
local fn_match=".*$__SCRIPT__.\(.*\)"
declare -f -F | grep -e "$fn_match" | sed -n "s/$fn_match/\1/p"
}
if [[ ${BASH_SOURCE[0]} != $0 ]]; then
for fn in $(dns-check.subcommands); do
export -f $__SCRIPT__.$fn
done
else
set -euo pipefail
dns-check "${@:-}"
exit $?
fi
| true |
90a5e9c6ff80044280440b9444197f7eebaec63d | Shell | JustinHop/synfig-docker | /run.sh | UTF-8 | 529 | 2.796875 | 3 | [] | no_license | #!/bin/bash
set -x
# \/ be aware of this \/
xhost +
USER_UID=$(id -u)
USER_HOME=$HOME
TAG=justinhop/synfig:latest
AARG=""
docker run -it --rm \
--runtime=nvidia \
--gpus=all \
-e NVIDIA_VISIBLE_DEVICES=all \
-e NVIDIA_DRIVER_CAPABILITIES=all \
-e LD_LIBRARY_PATH=/synfig/lib \
--volume=$USER_HOME/.config/synfig:/home/synfig/.config/synfig \
--volume=/run/user/${USER_UID}/pulse:/run/user/${USER_UID}/pulse \
--volume=/tmp/.X11-unix:/tmp/.X11-unix:rw \
-e DISPLAY=$DISPLAY \
$TAG "$@"
| true |
f0aeee58abdd21888dcd4dc1bfec44ce2c04f051 | Shell | chiahsoon/go-scaffold | /hooks/pre-commit | UTF-8 | 316 | 3.125 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
set -ex
# Filter Golang files match Added (A), Copied (C), Modified (M) conditions.
gofiles=$(git diff --cached --name-only --diff-filter=ACM | grep '\.go$' || true)
if [ -n "$gofiles" ]; then
gofmt -s -w "$gofiles"
goimports -w "$gofiles"
git add "$gofiles"
fi
golangci-lint run
| true |
fa660e7c3377e58461ed5462cd2de470ac29c0bb | Shell | UofM-CEOS/remote_sensing | /odata-demo.sh | UTF-8 | 16,916 | 4.03125 | 4 | [] | no_license | #!/bin/bash
#------------------------------------------------------------------------------#
# Demo script illustrating some examples using the OData interface #
# of the Data Hub Service (DHuS) #
#------------------------------------------------------------------------------#
# GAEL Systems 2014 #
#------------------------------------------------------------------------------#
# Change log #
# 2014-11-07 v1.0 First version listing collections, products, matching name #
# or ingestion date. Download of the Manifest file. #
# 2014-11-11 v1.1 Add search by AOI, product type and last <n> days. Get #
# Polarisation & relative orbit values, Download quick-look #
# or full product. #
# 2014-11-18 v1.2 Add case listing products from specific acquisition date #
# Date operations are now forced in UTC #
# Date operations supporting both Linux and Max OSX syntax #
# -V command line option added to display script version #
#------------------------------------------------------------------------------#
# Define default options and variables
VERSION="1.2"
DHUS_SERVER_URL="https://scihub.esa.int/dhus"
DHUS_USER="username"
DHUS_PASSWD="password"
JSON_OPT=false
VERBOSE=false
RESULT_LIST=""
# Display help
function show_help()
{
echo "USAGE: odata-demo.sh [OPTIONS]... "
echo "This script illustrates sample scenarios using the OData inteface of the Data Hub Service (DHuS)."
echo "OPTIONS are:"
echo " -h, --help display this help message"
echo " -j, --json use json output format for OData (default is xml)"
echo " -p, --password=PASSWORD use PASSWORD as password for the Data Hub"
echo " -s, --server=SERVER use SERVER as URL of the Data Hub Server"
echo " -u, --user=NAME use NAME as username for the Data Hub"
echo " -v, --verbose display curl command lines and results"
echo " -V, --version display the current version of the script"
}
# Display version
function show_version()
{
echo "odata-demo.sh $VERSION"
}
# Display a banner with the passed text (limited to 20 lines)
function show_text()
{
echo "--------------------------------------------------"
echo "$1" | head -20
[ $(echo "$1" | wc -l) -gt 20 ] && echo "[Truncated to 20 lines]..."
echo "--------------------------------------------------"
}
# Return list of values for the passed field name from the result file depending on its json or xml format
function get_field_values()
{
field_name="$1"
if [ "$USE_JQ" = "true" ]
then
RESULT_LIST=$(jq ".d.results[].$field_name" "$OUT_FILE" | tr -d '"')
else
RESULT_LIST=$(cat "$OUT_FILE" | xmlstarlet sel -T -t -m "//*[local-name()='entry']//*[local-name()='$field_name']" -v '.' -n)
fi
}
# Display numbered list of items from a multiple lines variable
function show_numbered_list()
{
# Get number of items in the list
LIST="$1"
if [ ! "$LIST" ]
then
nb_items=0
echo "Result list is empty."
return
fi
# Loop on list and add number as prefix
nb_items=$(echo "$LIST" | wc -l | tr -d ' ')
echo "Result list has $nb_items item(s):"
OLD_IFS=$IFS
IFS=$'\n'
i=0
for item in $LIST
do
i=$(expr $i + 1)
echo " $i. $item"
done
IFS=$OLD_IFS
}
# Query the server and return json or xml output, with optional verbose mode
# Args are URL JSON_FILTER XML_FILTER
function query_server()
{
# Get URL and filter space characters
URL="${1// /%20}"
# Version using JSON output and jq parsing
if [ "$USE_JQ" = "true" ]
then
# Add "?" to URL if not yet present, or "&" with json format option
if (echo $URL | grep "?" > /dev/null)
then URL="${URL}&\$format=json"
else URL="${URL}?\$format=json"
fi
[ "$VERBOSE" = "true" ] && show_text "$CURL_PREFIX \"$URL\""
$CURL_PREFIX "$URL" > "$OUT_FILE"
[ "$VERBOSE" = "true" ] && show_text "$(jq "." "$OUT_FILE")"
# Version using XML output and xmlstarlet parsing
else
[ "$VERBOSE" = "true" ] && show_text "$CURL_PREFIX \"$URL\""
$CURL_PREFIX "$URL" > "$OUT_FILE"
[ "$VERBOSE" = "true" ] && show_text "$(xmlstarlet fo "$OUT_FILE")"
fi
}
# Parse command line arguments
for arg in "$@"
do
case "$arg" in
-h | --help) show_help; exit 0 ;;
-j | --json) JSON_OPT=true ;;
-p=* | --password=*) DHUS_PASSWD="${arg#*=}" ;;
-s=* | --server=*) DHUS_SERVER_URL="${arg#*=}" ;;
-u=* | --user=*) DHUS_USER="${arg#*=}" ;;
-v | --verbose) VERBOSE=true ;;
-V | --version) show_version; exit 0 ;;
*) echo "Invalid option: $arg" >&2; show_help; exit 1 ;;
esac
done
# Set variables depending to optional command line arguments
ROOT_URL_ODATA="$DHUS_SERVER_URL/odata/v1"
ROOT_URL_SEARCH="$DHUS_SERVER_URL/search"
CURL_PREFIX="curl -gu $DHUS_USER:$DHUS_PASSWD"
# Check if needed commands are present (date (differs on Linux and OSX), curl, then jq or xmlstarlet)
USE_JQ=false
USE_XMLST=false
USE_DATEV=false
if $(date -v-1d &> /dev/null)
then USE_DATEV=true
fi
if ! $(type curl &> /dev/null)
then echo "Command \"curl\" is missing, please install it first!"; exit 1
fi
if [ "$JSON_OPT" = "true" ]
then
if ! $(type jq &> /dev/null)
then echo "Command \"jq\" is missing, please install it first!"; exit 1
fi
USE_JQ=true
OUT_FILE="/tmp/result.json"
else
if ! $(type xmlstarlet &> /dev/null)
then echo "Command \"xmlstarlet\" is missing, please install it first!"; exit 1
fi
OUT_FILE="/tmp/result.xml"
fi
# Menu: Ask which scenario to start
while true
do
echo ""
echo "Choose a sample demo:"
echo " 1. List the collections"
echo " 2. List <n> products from a specified collection"
echo " 3. List first 10 products matching part of product name"
echo " 4. List first 10 products matching a specific ingestion date"
echo " 5. List first 10 products matching a specific aquisition date"
echo " 6. List first 10 products since last <n> days, by product type and intersecting an AOI"
echo " 7. Get product id from product name"
echo " 8. Get polarisation from a product id"
echo " 9. Get relative orbit from a product id"
echo " 10. Download Manifest file from a product id"
echo " 11. Download quick-look from a product id"
echo " 12. Download full product from its id"
echo " q. Quit"
echo -n "Please enter the selected item number: "
read answer
case $answer in
1) # List the collections
# Build URL and query server to get result list
query_server "${ROOT_URL_ODATA}/Collections"
get_field_values "Name"
# Display result list
show_numbered_list "$RESULT_LIST"
;;
2) # List <n> products from a specified collection
# Ask for a collection name and filter potential quotes
echo -n "Please enter the name of a collection (e.g. one from step 1., default=none): "
read colname; colname=${colname//\"/}
# Ask for top and skip limiters
echo -n "How many products to list [1-n], default=10: "
read nbtop; [ -z "$nbtop" ] && nbtop=10
echo -n "Starting from [0-p], default=0: "
read nbskip; [ -z "$nbskip" ] && nbskip=0
# Build URL and query server to get result list
if [ -z "$colname" ]
then query_server "${ROOT_URL_ODATA}/Products?\$skip=$nbskip&\$top=$nbtop"
else query_server "${ROOT_URL_ODATA}/Collections('$colname')/Products?\$skip=$nbskip&\$top=$nbtop"
fi
get_field_values "Name"
# Display result list
show_numbered_list "$RESULT_LIST"
;;
3) # List first 10 products matching part of product name
# Ask for a product name part and remove potential quotes
echo -n "Please enter the name part to match (e.g. GRD, EW, 201410, default=SLC): "
read namepart; namepart=${namepart//\"/}; [ -z "$namepart" ] && namepart="SLC"
# Build URL and query server to get result list
query_server "${ROOT_URL_ODATA}/Products?\$select=Id&\$filter=substringof('$namepart',Name)&\$top=10"
get_field_values "Name"
# Display result list
show_numbered_list "$RESULT_LIST"
;;
4) # List first 10 products matching a specific ingestion date
# Ask for ingestion date, default is today
echo -n "Please enter the ingestion date (YYYYMMDD format, default today=$(date -u +%Y%m%d)): "
read idate; [ -z "$idate" ] && idate=$(date -u +%Y%m%d)
year=${idate:0:4}
month=${idate:4:2}
day=${idate:6:2}
# Build URL and query server to get result list
query_server "${ROOT_URL_ODATA}/Products?\$filter=year(IngestionDate)+eq+$year+and+month(IngestionDate)+eq+$month+and+day(IngestionDate)+eq+$day&\$top=10"
get_field_values "Name"
# Display result list
show_numbered_list "$RESULT_LIST"
;;
5) # List first 10 products matching a specific aquisition date
# Display a notice due to the current limitations
echo "PLEASE NOTE: the current version is getting a list of 1000 products from the server and filters the results locally. Getting this list may take some time. Additional filter at server level will be available in future evolutions. Another way to do this is to use demo case 3. with the following date pattern YYYYMMDD, which is part of the product name."
# Ask for acquisition date, default is yesterday, manage date command syntax for Linux or Mac OSX
if [ "$USE_DATEV" = "true" ]
then yesterday=$(date -u -v-1d +%Y%m%d)
else yesterday=$(date -u -d 'yesterday' +%Y%m%d)
fi
echo -n "Please enter the acquisition date (YYYYMMDD format, default yesterday=$yesterday): "
read acq_date; [ -z "$acq_date" ] && acq_date=$yesterday
# Build URL and query server to get result list
query_server "${ROOT_URL_ODATA}/Products?\$top=1000"
if [ "$USE_DATEV" = "true" ]
then
millistart=$(date -u -j -f %Y%m%d-%H%M%S "$acq_date-000000" +%s000)
millistop=$(expr $millistart \+ 86399999)
acq_date2=$(date -u -j -f %Y%m%d "$acq_date" +%Y-%m-%d)
else
millistart=$(date -u -d "$acq_date" +%s000)
millistop=$(date -u -d "$acq_date+1day-1sec" +%s999)
acq_date2=$(date -u -d "$acq_date" +%Y-%m-%d)
fi
if [ "$USE_JQ" = "true" ]
then
RESULT_LIST=$(jq ".d.results | map(select(.ContentDate.Start >= \"/Date($millistart)/\" and .ContentDate.Start <= \"/Date($millistop)/\")) | .[].Name" "$OUT_FILE" | tr -d '"')
else
RESULT_LIST=$(xmlstarlet sel -T -t -m '//_:entry' -i "contains(.//d:ContentDate/d:Start/text(),\"$acq_date2\")" -c './_:title/text()' -n "$OUT_FILE")
fi
# Display result list
show_numbered_list "$RESULT_LIST"
;;
6) # List first 10 products since last <n> days, by product type and intersecting an AOI
# Display a notice due to the current use of /search
echo "PLEASE NOTE: the current syntax is using the /search api instead of pure OData. Additional functions including geographical search will be also available via the OData API in future evolutions."
# Ask for the query parameters
echo -n "Please enter the number of days from today (default=1): "
read lastdays; [ -z "$lastdays" ] && lastdays=1
echo -n "Please enter the selected product type (e.g. SLC, default=GRD): "
read ptype; [ -z "$ptype" ] && ptype="GRD"
polygon_default="POLYGON((-15.0 47.0,5.5 47.0,5.5 60.0,-15.5 60.0,-15.50 47.0,-15.0 47.0))"
echo -n "Please enter the AOI polygon, first and last points shall be the same. Defaults is $polygon_default: "
read polygon; [ -z "$polygon" ] && polygon="$polygon_default"
# Build query and replace blanks spaces by '+'
query="ingestiondate:[NOW-${lastdays}DAYS TO NOW] AND producttype:${ptype} AND footprint:\"Intersects(${polygon})\""
query_server "${ROOT_URL_SEARCH}?q=${query// /+}"
if [ "$USE_JQ" = "true" ]
then
RESULT_LIST=$(jq ".feed.entry[].id" "$OUT_FILE" | tr -d '"')
else
RESULT_LIST=$(cat "$OUT_FILE" | xmlstarlet sel -T -t -m '//_:entry/_:id/text()' -v '.' -n)
fi
# Display result list
show_numbered_list "$RESULT_LIST"
;;
7) # Get product id from product name
# Ask for a product name and remove potential quotes
echo -n "Please enter the name of the product (e.g. one from previous steps): "
read prodname; prodname=${prodname//\"/}
# Build URL and query server to get result list
query_server "${ROOT_URL_ODATA}/Products?\$filter=Name+eq+'$prodname'"
get_field_values "Id"
# Display result list
show_numbered_list "$RESULT_LIST"
;;
8) # Get polarisation from a product id
# Ask for a product id
echo -n "Please enter the id of the product (e.g. one from step 5.): "
read prodid; prodid=${prodid//\"/}
# Build URL to get polarisation
URL="${ROOT_URL_ODATA}/Products('$prodid')/Attributes('Polarisation')/Value/\$value"
[ "$VERBOSE" = "true" ] && show_text "$CURL_PREFIX \"$URL\""
value=$($CURL_PREFIX "$URL")
show_text "Polarisation = $value"
;;
9) # Get relative orbit from a product id
# Ask for a product id
echo -n "Please enter the id of the product (e.g. one from step 5.): "
read prodid; prodid=${prodid//\"/}
# Build URL to get relative orbit
URL="${ROOT_URL_ODATA}/Products('$prodid')/Attributes('Relative%20orbit%20(start)')/Value/\$value"
[ "$VERBOSE" = "true" ] && show_text "$CURL_PREFIX \"$URL\""
value=$($CURL_PREFIX "$URL")
show_text "Relative orbit (start) = $value"
;;
10) # Download Manifest file from a product id
# Ask for a product id
echo -n "Please enter the id of the product (e.g. one from step 5.): "
read prodid; prodid=${prodid//\"/}
# Build URL to get product name
URL="${ROOT_URL_ODATA}/Products('$prodid')/Name/\$value"
[ "$VERBOSE" = "true" ] && show_text "$CURL_PREFIX \"$URL\""
prodname=$($CURL_PREFIX "$URL")
[ "$VERBOSE" = "true" ] && show_text "$prodname"
# Build URL to get Manifest node
URL="${ROOT_URL_ODATA}/Products('$prodid')/Nodes('$prodname.SAFE')/Nodes('manifest.safe')/\$value"
output_file="$prodname.manifest.safe"
[ "$VERBOSE" = "true" ] && show_text "$CURL_PREFIX -o "$output_file" \"$URL\""
$CURL_PREFIX -o "$output_file" "$URL" && echo "Manifest file saved as \"$output_file\""
[ "$VERBOSE" = "true" ] && show_text "$(cat "$output_file")"
;;
11) # Download quick-look from a product id
# Ask for a product id
echo -n "Please enter the id of the product (e.g. one from step 5.): "
read prodid; prodid=${prodid//\"/}
# Build URL to get product name
URL="${ROOT_URL_ODATA}/Products('$prodid')/Name/\$value"
[ "$VERBOSE" = "true" ] && show_text "$CURL_PREFIX \"$URL\""
prodname=$($CURL_PREFIX "$URL")
[ "$VERBOSE" = "true" ] && show_text "$prodname"
# Build URL to get quick-look
URL="${ROOT_URL_ODATA}/Products('$prodid')/Nodes('$prodname.SAFE')/Nodes('preview')/Nodes('quick-look.png')/\$value"
output_file="$prodname.quick-look.png"
[ "$VERBOSE" = "true" ] && show_text "$CURL_PREFIX -o "$output_file" \"$URL\""
$CURL_PREFIX -o "$output_file" "$URL" && echo "Quick-look file saved as \"$output_file\""
;;
12) # Download full product from its id
# Ask for a product id
echo -n "Please enter the id of the product (e.g. one from step 5.): "
read prodid; prodid=${prodid//\"/}
# Build URL to get product
URL="${ROOT_URL_ODATA}/Products('$prodid')/\$value"
[ "$VERBOSE" = "true" ] && show_text "$CURL_PREFIX -JO \"$URL\""
$CURL_PREFIX -JO "$URL"
;;
q) echo "Bye."
exit 0;;
*) echo "Invalid selection \"$answer\"!" ;;
esac
#echo -n "Press ENTER to continue..."; read key
done
# Exit
exit 0
| true |
4030ad39ad6faf65d8b1165cbe4f1e72377a4ae9 | Shell | firedrakeproject/petsc | /src/contrib/style/checks/space-in-lists.sh | UTF-8 | 418 | 3.46875 | 3 | [
"BSD-2-Clause"
] | permissive | #!/bin/bash
# Checks for compliance with
# Rule: 'No space before or after a , in lists'
# Steps:
# - strip off comments using GCC
# - find lines with ' ,' or ', '
#
for f in "$@"
do
isvalid=`echo "$f" | grep -v "/ftn-auto/\|src/docs/";`
if [ -n "$isvalid" ]; then
#echo "$f"
output=`gcc -fpreprocessed -dD -E -w -x c++ $f | grep " ,\|, "`
if [ -n "$output" ]; then echo "$f: $output"; fi;
fi
done
| true |
473170766cee049b70650fe513a45f4bd3edaa87 | Shell | sirkkalap/dotfiles-sirkkalap | /bin/status-git | UTF-8 | 173 | 3.078125 | 3 | [] | no_license | #!/bin/bash
proj=${1-~/proj}
repos=$(cat $proj/repos.lst)
opts=""
for r in $repos; do
(
echo $proj/$r
cd $proj/$r
git status $opts
)
done
| true |
31b7af604aca99d5d55878b2483376f4bbfdfda4 | Shell | kavita314/59d16e74-8ed5-4b1d-8935-ed1445e3c050 | /.score.sh | UTF-8 | 745 | 3.078125 | 3 | [] | no_license | #!/bin/sh
SCORE=0
mvn clean compile
mvn package | tee .pack.txt
TEST_1=$(grep -o -w -e "<packaging>war</packaging>" -e "maven-war-plugin" pom.xml | wc -l)
TEST_2=$(grep -o -w "Welcome to first maven war project" ./src/main/java/javaApp/Demo.java | wc -l)
TEST_3=$((grep -o -w "BUILD SUCCESS" .pack.txt ) | wc -l)
TEST_4=$((find ./target/*.war ) | wc -l)
if [ "$TEST_1" -ge 2 ]
then ((pass++))
else
((fail++))
fi;
if [ "$TEST_2" -eq 1 ]
then ((pass++))
else
((fail++))
fi;
if [ "$TEST_3" -eq 1 ]
then ((pass++))
else
((fail++))
fi;
if [ "$TEST_4" -eq 1 ]
then ((pass++))
else
((fail++))
fi;
echo "Total testcase: 4"
echo "Total testcase passed: $pass"
echo "Total testcase fail: $fail"
echo "total score: $(( ($pass * 100) / 4))" | true |
70167225dccd7740e28d9a199ef8c2afde40f1d3 | Shell | lnxbusdrvr/myScriptCollection | /access.geoip/access.geoip.script.sh | UTF-8 | 543 | 3.59375 | 4 | [] | no_license | #!/bin/bash
# Get one row (tail -n1) from Apache/Nginx's access.log
# print only first column (awk) from the file, which is ip-address.
# Tell ip-address from access.log to API's url
# Get result with using JSON's keywords (jq): city, region and country -infos
# Tells visitor city, region and country-code
accesslog="/var/log/nginx/access.log"
apiurl="http://ipinfo.io/"
for i in $(tail -n 1 $accesslog | awk '{print $1}')
# Tell access.log's 1st column (ip) to API
do curl -s $apiurl$i | jq -r '.city, .region, .country, .timezone'
done
| true |
6009a7fc87f161d02691e0f0f84c0e7d508fb0f5 | Shell | horance-liu/rcde | /name.sh | UTF-8 | 115 | 2.609375 | 3 | [] | no_license | #!/usr/bin/env bash
user_name=$(git config user.email | tr '@' '.')-$(git rev-parse --abbrev-ref HEAD)-${PWD##*/}
| true |
fbb1c21f2f732e4daac9ba088dd672fabbc9f33e | Shell | glegoux/browsing-history-shell | /test/test_suite | UTF-8 | 5,720 | 4.375 | 4 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/usr/bin/env bash
#
# test_suite
#
# Run test suite for a set of unit tests.
# Usage
usage() {
cat << EOF
Usage: $(basename "$0") unittest_file
Run test suite for a set of unit tests.
Each unit test is runned in the order where you defined it in the file.
unittest_file:
file where you defined your set of unit tests in respecting
the appropriate format:
Implementation of following abstract functions at beginning of file:
- void init_test_suite(): executed before set of unit tests.
- void clean_test_suite(): executed after set of unit tests.
- void init_test(test_name): executed before each unit test.
- void clean_test(test_name): executed after each unit test.
A set of unit tests where each test should have this format:
test_<name> {
## stdout
# <optional line(s) with test stdout>
## stderr
# <optional line(s) with test stderr>
## exit status <exit_status>
## location <location>
<your test>
}
EOF
}
# Global variables
declare -r UNIT_TEST_PATHNAME="$(realpath "$1")"
declare -ar FUNCTIONS=(init_test_suite clean_test_suite init_test clean_test)
# Helper
die() {
>&2 echo "ERROR: $1 !"
exit 1
}
check_functions() {
for f in ${FUNCTIONS[*]}; do
declare -F "$f" > /dev/null || die "'$0', no implementation in $UNIT_TEST_PATHNAME for $f"
done
}
get_comments() {
local res=$(cat "${UNIT_TEST_PATHNAME}" | sed -n "/^$1() {$/,/^}$/p" | grep '#' | sed "s/^[ ]*//")
if [[ -z "$res" ]]; then
return 1
fi
echo "$res"
return 0
}
get_content() {
local pattern="$1"
echo "$2" | grep -q "$pattern" || return 1
echo "$2" | sed -n "/$pattern/,/##/p" | sed 1d | sed '$d' | sed 's/^# //'
return 0
}
get_line() {
local pattern="$1"
echo "$2" | grep -q "$pattern" || return 1
echo "$2" | grep "$pattern" | sed "s/$pattern//"
return 0
}
get_stdout() {
get_content "^## stdout" "$1"
return $?
}
get_stderr() {
get_content "^## stderr" "$1"
return $?
}
get_exit_status() {
get_line "^## exit status " "$1"
return $?
}
get_location() {
get_line "^## location " "$1"
return $?
}
close_test_suite() {
local is_ko="${1-false}"
echo "Cleaning test suite ..."
clean_test_suite
echo
if "$is_ko"; then
echo "--> Latest unit test KO."
else
echo "--> All unit tests OK."
fi
}
close_test() {
echo
local unit_test="$1"
local is_ko="${2-false}"
echo "Cleaning unit test: ${unit_test} ..."
clean_test "$unit_test"
echo
close_test_suite "$is_ko"
}
main() {
source "$UNIT_TEST_PATHNAME" \
|| die "impossible to import '$UNIT_TEST_PATHNAME' in bash environment"
check_functions
unit_tests=$(typeset -F | sed "s/declare -f //" | grep ^test_)
[[ -z "${unit_tests}" ]] && die "no one unit test"
echo -e "\nInitialize test suite ..."
init_test_suite
echo -e "... end\n"
for unit_test in ${unit_tests}; do
trap '[ $? -eq 0 ] || close_test "${unit_test}" true' EXIT
echo "Initialize unit test: ${unit_test} ..."
init_test "${unit_test}"
echo
echo "Run unit test: ${unit_test} ..."
stdout=$(tempfile -p "bhist" -s "_${unit_test}.stdout")
stderr=$(tempfile -p "bhist" -s "_${unit_test}.stderr")
"${unit_test}" > "${stdout}" 2> "${stderr}"
exit_status=$?
location="${PWD}"
comments=$(get_comments "${unit_test}")
if [[ $? -ne 0 ]]; then
die "no comment found !"
fi
expected_stderr=$(get_stderr "${comments}")
if [[ $? -eq 0 ]]; then
echo "--STDERR:"
echo "- given stderr:"
stderr=$(cat "${stderr}" | sed 's/^.*line [0-9]*: /bash: /')
[[ -n "${stderr}" ]] && echo "${stderr}"
echo "- expected stderr:"
[[ -n "${expected_stderr}" ]] && echo "${expected_stderr}"
if [[ "$(echo "${expected_stderr}")" == "$(echo "${stderr}")" ]]; then
echo "--> stderr OK"
else
die "--> stderr KO"
fi
fi
expected_stdout=$(get_stdout "${comments}")
if [[ $? -eq 0 ]]; then
echo "--STDOUT:"
echo "- given stdout:"
cat "$stdout"
echo "- expected stdout:"
[[ -n "${expected_stdout}" ]] && echo "${expected_stdout}"
if [[ "$(echo -n "${expected_stdout}")" == "$(cat "${stdout}")" ]]; then
echo "--> stdout OK"
else
die "--> stdout KO"
fi
fi
expected_exit_status=$(get_exit_status "${comments}")
if [[ $? -eq 0 ]]; then
echo "--EXIT STATUS:"
echo "- given exit status: ${exit_status}"
echo "- expected exit status: ${expected_exit_status}"
if [[ "${expected_exit_status}" == "${exit_status}" ]]; then
echo "--> exit status OK"
else
die "--> exit status KO"
fi
fi
expected_location=$(get_location "${comments}")
if [[ $? -eq 0 ]]; then
echo "--LOCATION:"
echo "- given location: ${location}"
echo "- expected location: ${expected_location}"
if [[ "${expected_location}" == "${location}" ]]; then
echo "--> location OK"
else
die "--> location KO"
fi
fi
clean_test "${unit_test}"
done
echo -e "\nClose test suite ..."
close_test_suite
echo -e "... end\n"
}
# Run unit tests
if [[ "${FUNCNAME[0]}" == "main" ]]; then
# Script arguments
if [[ $# -ne 1 ]]; then
echo "ERROR: '$(basename "$0")', require one argument !"
exit 1
else
case "$1" in
"-h"|"--help")
usage
exit 0
;;
*)
if [[ ! -f "$1" ]]; then
echo "ERROR: '$(basename "$0")', incorrect argument '$1' !"
exit 1
fi
;;
esac
fi
main
fi
| true |
1e3590406aa911d865dea4a3c662d597262217a6 | Shell | iamchiwon/sparta7 | /script.sh | UTF-8 | 486 | 3.609375 | 4 | [] | no_license | #!/bin/bash
repos=( "홍길동:https://github.com/~~~~"
"유산슬:https://github.com/~~~~~"
"고길동:https://github.com/~~~~~" )
echo "=== Started ==="
for repo in ${repos[@]} ; do
NAME=${repo%%:*}
REPO=${repo#*:}
if [ -d $NAME ]; then
echo "== Pulling ${NAME}'s repository =="
cd $NAME; git pull; cd ..;
else
echo "== Initializing ${NAME}'s repository =="
git clone $REPO $NAME
fi
done
echo "=== Finished ===" | true |
7590fbd9df027da936b57df6a293a256504a90c2 | Shell | polarcat/yawm | /tools/run-menu | UTF-8 | 425 | 3.25 | 3 | [] | no_license | #!/bin/sh
. $YAWM_HOME/lib/menu-utils
ifs_=$IFS
tmp_=$YAWM_HOME/tmp
out_=$tmp_/bin
mkdir -p $tmp_
#printf "\a\t\n" > $out_
printf "\a\t\n" > $out_
IFS=':'
for dir in $PATH; do
ls -1 $dir | while read line; do
printf "$line\t$dir\n" >> $out_
done
done
IFS=$ifs_
startmenu -b -d -a $out_ | while read cmd path extra; do
if [ -z "$extra" ]; then
exec $cmd &
else
IFS=' '
exec $extra &
fi
break
done
| true |
3afc5c63ab321e861c9f97d9142869ee72bd4cf3 | Shell | yosoyfunes/warp-engine | /.warp/setup/init/base.sh | UTF-8 | 3,238 | 3.984375 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash +x
while : ; do
private_registry_mode=$( warp_question_ask_default "Do you want to configure this project with a private Docker Registry? $(warp_message_info [y/N]) " "N" )
if [ "$private_registry_mode" = "Y" ] || [ "$private_registry_mode" = "y" ] || [ "$private_registry_mode" = "N" ] || [ "$private_registry_mode" = "n" ] ; then
break
else
warp_message_warn "wrong answer, you must select between two options: $(warp_message_info [Y/n]) "
fi
done
if [ "$private_registry_mode" = "Y" ] || [ "$private_registry_mode" = "y" ] ; then
while : ; do
namespace_name=$( warp_question_ask "Namespace name, it should be only letters or numbers with a maximum of 12 characters separated by hyphens, for example 'Starfleet': " )
if [[ $namespace_name =~ ^[a-zA-Z0-9]{2,12}(-[a-zA-Z0-9]{2,12})?$ ]] ; then
warp_message_info2 "The namespace name: $(warp_message_bold $namespace_name)"
break
else
warp_message_warn "incorrect value, please enter only letters and lowercase\n"
fi;
done
while : ; do
project_name=$( warp_question_ask "Project Name, it should be only letters or numbers with a maximum of 12 characters separated by hyphens, for example 'WARP Engine' should be 'warp-engine': " )
if [[ $project_name =~ ^[a-zA-Z0-9]{2,12}(-[a-zA-Z0-9]{2,12})?$ ]] ; then
warp_message_info2 "The project name is: $(warp_message_bold $project_name)"
break
else
warp_message_warn "incorrect value, please enter only letters and lowercase\n"
fi;
done
while : ; do
docker_private_registry=$( warp_question_ask "Docker Private Registy URL: " )
if [ ! -z "$docker_private_registry" ] ; then
warp_message_info2 "The docker private registry url is: $(warp_message_bold $docker_private_registry)"
break
fi;
done
fi;
while : ; do
framework=$( warp_question_ask_default "Select the main framework for this project. Possible values are $(warp_message_info [m1/m2/oro/php]): " "m2" )
case $framework in
'm1')
break
;;
'm2')
break
;;
'oro')
break
;;
'php')
break
;;
*)
warp_message_info2 "Selected: $framework, the available options are m1, m2, oro, php"
;;
esac
done
echo "# Project configurations" >> $ENVIRONMENTVARIABLESFILESAMPLE
echo "NAMESPACE=${namespace_name}" >> $ENVIRONMENTVARIABLESFILESAMPLE
echo "PROJECT=${project_name}" >> $ENVIRONMENTVARIABLESFILESAMPLE
echo "DOCKER_PRIVATE_REGISTRY=${docker_private_registry}" >> $ENVIRONMENTVARIABLESFILESAMPLE
echo "FRAMEWORK=${framework}" >> $ENVIRONMENTVARIABLESFILESAMPLE
echo "" >> $ENVIRONMENTVARIABLESFILESAMPLE
echo "# Docker configurations" >> $ENVIRONMENTVARIABLESFILESAMPLE
echo "COMPOSE_HTTP_TIMEOUT=$DOCKER_COMPOSE_HTTP_TIMEOUT" >> $ENVIRONMENTVARIABLESFILESAMPLE
echo "" >> $ENVIRONMENTVARIABLESFILESAMPLE
echo "# VERSION Configuration" >> $ENVIRONMENTVARIABLESFILESAMPLE
echo "WARP_VERSION=$WARP_VERSION" >> $ENVIRONMENTVARIABLESFILESAMPLE
echo "" >> $ENVIRONMENTVARIABLESFILESAMPLE | true |
368a12ac0ddc41e45e297c969c6868c2080c2792 | Shell | polubognonet/bashrepo | /firstscript.sh | UTF-8 | 466 | 3.765625 | 4 | [] | no_license | #!/usr/bin/env bash
count=$1
echo "$count"
if [[ $count -eq 10 ]]; then
echo "This is true, it's 10"
elif [[ $count -eq 9 ]]; then
echo "it is not, it is 9"
else
echo "it is some different number"
fi
while [[ $count -le 10 ]]; do
count=$(( count+1 ))
echo "$count"
done
for (( count = 0; count < 10; count++ )); do
if [[ $count -gt 3 ]] && [[ $count -lt 7 ]];
then
echo "$count"
fi
done
args=("$@")
echo ${args[0]}
echo ${args[1]}
echo $#
| true |
49a97ffda70ba186a2fd455a28dbd0140e09132a | Shell | joepaw/desktop-setup | /.bash_aliases | UTF-8 | 788 | 2.71875 | 3 | [] | no_license | ## ls ## {{{
#alias ls='ls -hF --color=auto'
alias lr='ls -R' # recursive ls
alias ll='ls -l'
alias la='ll -A'
alias lx='ll -BX' # sort by extension
alias lz='ll -rS' # sort by size
alias lt='ll -rt' # sort by date
alias lm='la | more'
alias tf='tail -f'
# }}}
alias cls=' echo -ne "\033c"' # clear screen for real (it does not work in Terminology)
## New commands ## {{{
alias da='date "+%A, %B %d, %Y [%T]"'
alias du1='du --max-depth=1'
alias hist='history | grep' # requires an argument
alias openports='ss --all --numeric --processes --ipv4 --ipv6'
alias pgg='ps -Af | grep' # requires an argument
alias ..='cd ..'
alias ...='cd ../..'
alias websrv='python3 -m http.server 8080'
# }}}
| true |
e9f6d9e28d76353d3b02d3713d15ff7b1e8b0ea6 | Shell | volltin/mkdocs-demo | /deploy.sh | UTF-8 | 394 | 2.96875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
USER=volltin
EMAIL=volltin@live.com
REPO=mkdocs-demo
set -e
git remote add gh-token "https://${GH_TOKEN}@github.com/$USER/$REPO.git";
git fetch gh-token && git fetch gh-token gh-pages:gh-pages;
if [ "$1" != "dry" ]; then
# Update git config.
git config user.name "Travis Builder"
git config user.email "$EMAIL"
fi
mkdocs gh-deploy -v --clean --remote-name gh-token;
| true |
3a863995091f715694cbab82cb5d0b62ae353693 | Shell | trscavo/bash-library | /bin/http_cache_check.bash | UTF-8 | 11,711 | 3.625 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#######################################################################
# Copyright 2018 Tom Scavo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#######################################################################
#######################################################################
# Help message
#######################################################################
display_help () {
/bin/cat <<- HELP_MSG
This script checks to see if a previously cached HTTP resource
is up-to-date. By default, the script performs a lightweight
network operation intended to be run as a cron job.
$usage_string
The script takes a single command-line argument, which is the
absolute URL of an HTTP resource. Assuming the resource is
already cached, the script requests the resource via an HTTP
conditional (HEAD) request [RFC 7232]. The resource is up-to-date
if (and only if) the web server responds with 304 Not Modified.
If the server supports HTTP conditional requests (as indicated
by an ETag in the response header), a successful response will
be either 304 or 200. If the server responds with 304, the
script exits normally with exit code 0. If the server responds
with 200 (instead of 304), the script logs a warning and exits
with code 1, indicating that the cache is dirty and in need of
update. See below for details about exit codes.
Regardless of the exit status, this script produces no output
and, moreover, no cache write will occur under any circumstances.
Options:
-h Display this help message
-D Enable DEBUG logging
-W Enable WARN logging
-t Enable "Try Hard Mode"
-z Enable "Compressed Mode"
Option -h is mutually exclusive of all other options.
Options -D or -W enable DEBUG or WARN logging, respectively.
This temporarily overrides the LOG_LEVEL environment variable.
Option -t enables Try Hard Mode. If the server responds with 200,
but HTTP conditional requests are not supported (indicated by the
absence of an ETag in the response header), the script issues an
ordinary HTTP GET request for the resource. It then compares the
cached file with the file on the server byte-by-byte (using diff).
The script exits normally with exit code 0 if (and only if) the
two files are identical.
Compressed Mode (option -z) enables HTTP Compression by adding an
Accept-Encoding header to the request; that is, if option -z is
enabled, the client merely indicates its support for HTTP Compression
in the request. The server will indicate its support for HTTP
Compression (or not) in the response header.
Important! This implementation treats compressed and uncompressed
requests for the same resource as two distinct cachable resources.
EXIT CODES
The following exit codes are emitted by this script:
0: Cache is up-to-date
1: Cache is NOT up-to-date
2: Initialization failure
3: Unexpected failure
4: HTTP conditional requests not supported (no ETag)
5: Unexpected HTTP response (neither 304 nor 200)
To work around exit code 4, enable Try Hard Mode (option -t). In
any case, if the resource was not previously cached at the time
the script was called, the exit code is guaranteed to be nonzero.
ENVIRONMENT
The following environment variables are REQUIRED:
$( printf " %s\n" ${env_vars[*]} )
The optional LOG_LEVEL variable defaults to LOG_LEVEL=3.
The following directories will be used:
$( printf " %s\n" ${dir_paths[*]} )
The following log file will be used:
$( printf " %s\n" $LOG_FILE )
INSTALLATION
At least the following source library files MUST be installed
in LIB_DIR:
$( printf " %s\n" ${lib_filenames[*]} )
EXAMPLES
For some HTTP location:
\$ cget.bash \$url # prime the cache
\$ ${0##*/} \$url
\$ echo \$?
0 # the cache is up-to-date
When the resource on the server changes:
\$ ${0##*/} \$url
\$ echo \$?
1 # the cache is NOT up-to-date
\$ cget.bash \$url # update the cache
\$ ${0##*/} \$url
\$ echo \$?
0 # the cache is up-to-date
For some other HTTP location:
\$ cget.bash \$url2 # prime the cache
\$ ${0##*/} \$url2
\$ echo \$?
4 # HTTP conditional requests not supported
\$ ${0##*/} -t \$url2
\$ echo \$?
0 # the cache is up-to-date
HELP_MSG
}
#######################################################################
# Bootstrap
#######################################################################
script_name=${0##*/} # equivalent to basename $0
# required environment variables
env_vars[1]="LIB_DIR"
env_vars[2]="CACHE_DIR"
env_vars[3]="TMPDIR"
env_vars[4]="LOG_FILE"
# check environment variables
for env_var in ${env_vars[*]}; do
eval "env_var_val=\${$env_var}"
if [ -z "$env_var_val" ]; then
echo "ERROR: $script_name requires env var $env_var" >&2
exit 2
fi
done
# required directories
dir_paths[1]="$LIB_DIR"
dir_paths[2]="$CACHE_DIR"
dir_paths[3]="$TMPDIR"
# check required directories
for dir_path in ${dir_paths[*]}; do
if [ ! -d "$dir_path" ]; then
echo "ERROR: $script_name: directory does not exist: $dir_path" >&2
exit 2
fi
done
# check the log file
if [ ! -f "$LOG_FILE" ]; then
echo "ERROR: $script_name: file does not exist: $LOG_FILE" >&2
exit 2
fi
# default to INFO logging
if [ -z "$LOG_LEVEL" ]; then
LOG_LEVEL=3
fi
# library filenames
lib_filenames[1]=core_lib.bash
lib_filenames[2]=http_tools.bash
lib_filenames[3]=http_cache_tools.bash
# check lib files
for lib_filename in ${lib_filenames[*]}; do
lib_file="${LIB_DIR%%/}/$lib_filename"
if [ ! -f "$lib_file" ]; then
echo "ERROR: $script_name: file does not exist: $lib_file" >&2
exit 2
fi
done
#######################################################################
# Process command-line options and arguments
#######################################################################
usage_string="Usage: $script_name [-hDWtz] HTTP_LOCATION"
# defaults
help_mode=false; try_hard_mode=false
while getopts ":hDWtz" opt; do
case $opt in
h)
help_mode=true
;;
D)
LOG_LEVEL=4 # DEBUG
;;
W)
LOG_LEVEL=2 # WARN
;;
t)
try_hard_mode=true
;;
z)
compression_opt="$compression_opt -$opt"
;;
\?)
echo "ERROR: $script_name: Unrecognized option: -$OPTARG" >&2
exit 2
;;
:)
echo "ERROR: $script_name: Option -$OPTARG requires an argument" >&2
exit 2
;;
esac
done
if $help_mode; then
display_help
exit 0
fi
# check the number of remaining arguments
shift $(( OPTIND - 1 ))
if [ $# -ne 1 ]; then
echo "ERROR: $script_name: wrong number of arguments: $# (1 required)" >&2
exit 2
fi
location="$1"
#######################################################################
# Initialization
#######################################################################
# source lib files
for lib_filename in ${lib_filenames[*]}; do
[[ ! $lib_filename =~ \.bash$ ]] && continue
lib_file="${LIB_DIR%%/}/$lib_filename"
source "$lib_file"
status_code=$?
if [ $status_code -ne 0 ]; then
echo "ERROR: $script_name failed ($status_code) to source lib file $lib_file" >&2
exit 2
fi
done
# create a temporary subdirectory
tmp_dir="${TMPDIR%%/}/${script_name%%.*}_$$"
/bin/mkdir "$tmp_dir"
status_code=$?
if [ $status_code -ne 0 ]; then
echo "ERROR: $script_name failed ($status_code) to create tmp dir $tmp_dir" >&2
exit 2
fi
# special log messages
initial_log_message="$script_name BEGIN"
final_log_message="$script_name END"
#######################################################################
#
# Main processing
#
# 1. issue a conditional HEAD request
# 2. compute the HTTP response code
# 3. process the response code
#
#######################################################################
print_log_message -I "$initial_log_message"
# issue a conditional HEAD request
http_conditional_head $compression_opt -d "$CACHE_DIR" -T "$tmp_dir" "$location" > /dev/null
status_code=$?
if [ $status_code -ne 0 ]; then
print_log_message -E "$script_name: http_conditional_head failed ($status_code)"
clean_up_and_exit -d "$tmp_dir" -I "$final_log_message" 3
fi
# sanity check
tmp_header_file="$tmp_dir/$( tmp_response_headers_filename )"
if [ ! -f "$tmp_header_file" ]; then
print_log_message -E "$script_name unable to find header file $tmp_header_file"
clean_up_and_exit -d "$tmp_dir" -I "$final_log_message" 3
fi
# compute the HTTP response code
response_code=$( get_response_code "$tmp_header_file" )
status_code=$?
if [ $status_code -ne 0 ]; then
print_log_message -E "$script_name: get_response_code failed ($status_code)"
clean_up_and_exit -d "$tmp_dir" -I "$final_log_message" 3
fi
# process terminal conditions
if [ "$response_code" = 304 ]; then
print_log_message -I "$script_name: cache is up-to-date for resource: $location"
clean_up_and_exit -d "$tmp_dir" -I "$final_log_message" 0
elif [ "$response_code" = 200 ]; then
header_value=$( get_header_value "$tmp_header_file" ETag )
if [ -n "$header_value" ]; then
print_log_message -W "$script_name: cache is NOT up-to-date for resource: $location"
clean_up_and_exit -d "$tmp_dir" -I "$final_log_message" 1
elif ! $try_hard_mode; then
print_log_message -E "$script_name: HTTP conditional request not supported for resource: $location"
clean_up_and_exit -d "$tmp_dir" -I "$final_log_message" 4
fi
else
print_log_message -E "$script_name: unexpected HTTP response ($response_code) for resource: $location"
clean_up_and_exit -d "$tmp_dir" -I "$final_log_message" 5
fi
# there's only one way to get through the above if-block:
# HTTP conditional requests are not supported but try_hard_mode is enabled
# in which case, prepare to try harder by comparing files byte-by-byte
print_log_message -I "$script_name: HTTP conditional request not supported for resource: $location"
# determine the cached file path
cache_file_path=$( cache_response_body_file $compression_opt -d "$CACHE_DIR" "$location" )
status_code=$?
if [ $status_code -ne 0 ]; then
print_log_message -E "$script_name: cache_response_body_file failed ($status_code) on location $location"
clean_up_and_exit -d "$tmp_dir" -I "$final_log_message" 3
fi
print_log_message -I "$script_name using cached file $cache_file_path"
# GET the resource, do not write to cache
print_log_message -D "$script_name fetching HTTP resource $location"
http_get $compression_opt -x -d "$CACHE_DIR" -T "$tmp_dir" "$location" > /dev/null
status_code=$?
if [ $status_code -ne 0 ]; then
print_log_message -E "$script_name: http_get failed ($status_code) on location $location"
clean_up_and_exit -d "$tmp_dir" -I "$final_log_message" 3
fi
# sanity check
http_file_path="$tmp_dir/$( tmp_response_body_filename )"
if [ ! -f "$http_file_path" ]; then
print_log_message -E "$script_name unable to find response file $http_file_path"
clean_up_and_exit -d "$tmp_dir" -I "$final_log_message" 3
fi
# compute the diff and exit
/usr/bin/diff -q "$cache_file_path" "$http_file_path" > /dev/null
clean_up_and_exit -d "$tmp_dir" -I "$final_log_message" $?
| true |
f7f49bc65523409c509e287cadcd29bbedefbefe | Shell | SuzhouBus/crawler | /crawl_buses_sz3.sh | UTF-8 | 1,424 | 3.90625 | 4 | [] | no_license | #!/bin/bash
dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
basedir="$dir/data/sz/buses"
guid_file="$basedir/guids"
log_file="$basedir/logs"
if [ "$1" != "" ]; then
guid_file="$basedir/$1"
fi
line_count=0
bus_count=0
start_time=`date +%s`
grep -v '^#\|^$' "$guid_file" | (
while read entry; do
line=`echo "$entry" | cut -d ' ' -f 1`
buses_file="$basedir/$line.buses"
csv_file="$basedir/$line.csv"
echo $line
line_count=$((line_count + 1))
direction_id=0
for guid in `echo "$entry" | cut -d ' ' -f 2-`; do
echo $guid
results=$(wget -q -O /dev/stdout "http://www.szjt.gov.cn/BusQuery/APTSLine.aspx?LineGuid=$guid" | "$dir/parse_sz_html.sh")
echo "$results" | grep -v ',,$' | sed s/\$/,$(date +%Y-%m-%d),"$direction_id"/ >> "$csv_file"
buses=`echo "$results" | cut -d , -f 3 | grep -v '^$'`
direction_id=$((direction_id + 1))
sleep 0.9
for bus in $buses; do
bus_count=$((bus_count + 1))
if ! grep -F "$bus" "$buses_file" > /dev/null; then
echo "$bus" >> "$buses_file"
echo New bus of "$line": "$bus"
echo `date '+[%Y-%m-%d %H:%M:%S]'` New bus of "$line": "$bus" >> $log_file
fi
done
done
done
end_time=`date +%s`
echo `date '+[%Y-%m-%d %H:%M:%S]'` Processed $bus_count buses of $line_count lines from $guid_file in $((end_time - start_time)) seconds. >> $log_file
)
| true |
a0d350843469c5d8e3f898c8c464be522647ddad | Shell | yaseenlotfi/mydotfiles | /utils/file_utils.sh | UTF-8 | 2,758 | 4.25 | 4 | [] | no_license | #!/bin/sh
#############################
### Pretty Echo functions ###
#############################
# Courtesy of https://github.com/rkalis/dotfiles
coloredEcho() {
local exp="$1";
local color="$2";
local arrow="$3";
if ! [[ $color =~ '^[0-9]$' ]] ; then
case $(echo $color | tr '[:upper:]' '[:lower:]') in
black) color=0 ;;
red) color=1 ;;
green) color=2 ;;
yellow) color=3 ;;
blue) color=4 ;;
magenta) color=5 ;;
cyan) color=6 ;;
white|*) color=7 ;; # white or invalid color
esac
fi
tput bold;
tput setaf "$color";
echo "$arrow $exp";
tput sgr0;
}
info() {
coloredEcho "$1" blue "========>"
}
success() {
coloredEcho "$1" green "========>"
}
error() {
coloredEcho "$1" red "========>"
}
substep_info() {
coloredEcho "$1" magenta "===="
}
substep_success() {
coloredEcho "$1" cyan "===="
}
substep_error() {
coloredEcho "$1" red "===="
}
##############################
### File Utility functions ###
##############################
realpath() { # Not all systems give a "realpath" builtin function
echo "$(cd "$(dirname "$1")"; pwd -P)/$(basename "$1")"
}
now() { # Fetch the current timestamp. Formatted.
echo "$(date "+%Y%m%d_%H%M%S")"
}
backup() { # Backup given filepath in a specific directory
# Only accept one argument: filepath
if [[ "$#" -gt 1 || "$#" -lt 1 ]]; then
error "Expecting only one argument!"
return 1
else
local src_fp="$1"
fi
local src_fn=$(basename "$1")
local bkproot="$HOME/.dotfiles_bkps"
local bkpdir="$bkproot/${src_fn}_bkps"
local bkp_fp="$bkpdir/${src_fn}.$(now)"
# Check if backup directory exists as ~/.old_dotefiles/.dotfile_type
if [[ ! -d "$bkpdir" ]]; then
if [[ ! -d "$bkproot" ]]; then
mkdir "$bkproot"
fi
mkdir "$bkpdir"
substep_info "Backup directory created at $bkpdir"
else
substep_info "Backup directory exists: $bkpdir"
fi
# Move target file to backup directory
if [[ -e "$src_fp" ]]; then
mv "$src_fp" "$bkp_fp"
substep_info "Archived: $src_fn to $bkp_fp"
else
error "Can't find source file for backup!"
return 1
fi
}
symlink() {
# Expect two args: SOURCE filepath and DESTINATION filepath
local SRC="$1"
local DST="$2"
substep_info "Source: $SRC"
# Check if file or symlink exists, move it to a backup dir
if [ -e "$DST" ] || [ -h "$DST" ]; then
backup "$DST"
fi
# Create symlink
if ln -s "$SRC" "$DST"; then
substep_success "Symlink: $DST to $SRC."
else
substep_error "Symlinking $DST to $SRC failed!"
fi
}
| true |
d61ec367bfc3e8507bdbf70140bc04d7d36887ab | Shell | cyrinux/dotfiles | /.local/bin/notmuch-deduplicate | UTF-8 | 521 | 2.9375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
query="$@"
function job {
echo "${fn}"
IFS=$'\t' read -r -a myArray < <(echo "$1")
fn="${myArray[0]}"
for duplicate in "${myArray[@]:1}"; do
rm -rf "${duplicate}"
ln "${fn}" "${duplicate}"
done
}
export -f job
parallel -j4 job {} ::: < \
<(notmuch show \
--format=json \
--entire-thread=false \
--body=false \
"${query}" |
jq -n \
--stream \
'fromstream(1 | truncate_stream(inputs))' |
jq -r '.. | .filename? //empty | @tsv') |
pv -l -s $(notmuch count "${query}") >/dev/null
| true |
b1165edf779c65115cb195c4cdc98063118483c4 | Shell | desertedscorpion/angrybeta | /angrybeta.sh | UTF-8 | 310 | 2.765625 | 3 | [] | no_license | #!/bin/bash
if [ ! -f /root/.ssh/id_rsa ]
then
cp /var/private/id_rsa /root/.ssh/id_rsa &&
chmod 0600 /root/.ssh/id_rsa
true
fi &&
if [ -d /srv/.git ]
then
git -C /srv fetch origin master &&
git -C /srv rebase origin/master &&
git -C /srv push origin master &&
true
fi &&
true
| true |
8088177a93a1a05ea1a38eb6e28c833eba7658fb | Shell | KazuakiM/dotfiles | /lib/Vagrant/modules/ftp.sh | UTF-8 | 3,373 | 2.953125 | 3 | [
"MIT"
] | permissive | #!/bin/sh
#--------------------------------
# FTP setting
#
# $ ftp -A ftp://vagrant:vagrant@192.168.1.1:2224
#
# @author KazuakiM
#--------------------------------
#variable {{{
log=/var/log/vagrantfile.log
#}}}
echo "FTP setting ..."
mkdir -p /tmp/ftp
yum -y --enablerepo=epel,remi install vsftpd >> $log 2>> $log && \
systemctl enable vsftpd.service >> $log 2>> $log && \
firewall-cmd --add-service=ftp --permanent >> $log 2>> $log && \
firewall-cmd --reload >> $log 2>> $log && \
sed -i 's/anonymous_enable=YES/anonymous_enable=NO/g' /etc/vsftpd/vsftpd.conf >> $log 2>> $log && \
sed -i 's/local_umask/#local_umask/g' /etc/vsftpd/vsftpd.conf >> $log 2>> $log && \
sed -i 's/#xferlog_file/xferlog_file/g' /etc/vsftpd/vsftpd.conf >> $log 2>> $log && \
sed -i 's/#ascii_upload_enable=YES/ascii_upload_enable=YES/g' /etc/vsftpd/vsftpd.conf >> $log 2>> $log && \
sed -i 's/#ascii_download_enable=YES/ascii_download_enable=YES/g' /etc/vsftpd/vsftpd.conf >> $log 2>> $log && \
sed -i 's/#chroot_local_user=YES/chroot_local_user=YES/g' /etc/vsftpd/vsftpd.conf >> $log 2>> $log && \
sed -i 's/#chroot_list_enable=YES/chroot_list_enable=YES/g' /etc/vsftpd/vsftpd.conf >> $log 2>> $log && \
sed -i 's:#chroot_list_file=/etc/vsftpd/chroot_list:chroot_list_file=/etc/vsftpd/chroot_list:g' /etc/vsftpd/vsftpd.conf >> $log 2>> $log && \
sed -i 's/listen=NO/listen=YES/g' /etc/vsftpd/vsftpd.conf >> $log 2>> $log && \
sed -i 's/listen_ipv6=YES/listen_ipv6=NO/g' /etc/vsftpd/vsftpd.conf >> $log 2>> $log && \
sed -i '$ a allow_writeable_chroot=YES' /etc/vsftpd/vsftpd.conf >> $log 2>> $log && \
sed -i '$ a local_root=/tmp/ftp' /etc/vsftpd/vsftpd.conf >> $log 2>> $log && \
sed -i '$ a ls_recurse_enable=YES' /etc/vsftpd/vsftpd.conf >> $log 2>> $log && \
sed -i '$ a pasv_enable=NO' /etc/vsftpd/vsftpd.conf >> $log 2>> $log && \
sed -i '$ a use_localtime=YES' /etc/vsftpd/vsftpd.conf >> $log 2>> $log && \
echo '' > /etc/vsftpd/chroot_list >> $log 2>> $log && \
sed -i '$ a vagrant' /etc/vsftpd/chroot_list >> $log 2>> $log && \
systemctl restart vsftpd.service >> $log 2>> $log
| true |
ee8fca75c354b439203e79b3cc28cffa8f5c9b8e | Shell | teej/py-lambda-builder | /build-lambda-dependency | UTF-8 | 413 | 3.484375 | 3 | [] | no_license | #!/bin/bash
if [ "$#" -lt 2 ] ; then
"Usage: $(basename $0) <python version> <pypi package> [<pypi package> ...]"
" Currently supported Python version: 2.7 and 3.6"
exit 1
fi
WHEELHOUSE="$(pwd)/wheelhouse"
BUILDER_CONTAINER=gaqzi/lambda-builder
mkdir -p ${WHEELHOUSE}
PYTHON_VERSION=$1
shift
docker run \
-v "${WHEELHOUSE}:/wheelhouse" \
${BUILDER_CONTAINER}:python-${PYTHON_VERSION} $@
| true |
cca1d75f2092ebffab35074b3f02c7ad3c4a357c | Shell | chipster/chipster-openshift | /download-tools-bin-k3s.bash | UTF-8 | 2,571 | 3.96875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
set -e
command -v yq >/dev/null 2>&1 || { echo >&2 "I require yq but it's not installed. Aborting."; exit 1; }
source scripts/utils.bash
tools_bin_version="$1"
tools_bin_size="$2"
if [ -z $tools_bin_version ]; then
echo "Usage: bash download-tools-bin.bash TOOLS_BIN_VERSION TOOLS_BIN_SIZE"
echo "Example: bash download-tools-bin.bash chipster-3.15.6 550Gi"
echo ""
echo "Create an OpenShift job for downloading the specified tools-bin version from the object storage and follow its output."
echo ""
exit 1
fi
pvc_name="tools-bin-$tools_bin_version"
if kubectl get pvc $pvc_name; then
echo "$pvc_name exists already"
exit 1
fi
cat <<EOF | kubectl apply -f -
apiVersion: "v1"
kind: "PersistentVolumeClaim"
metadata:
annotations:
volume.beta.kubernetes.io/storage-class: local-path
name: ${pvc_name}
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: ${tools_bin_size}
EOF
temp_pvc="${pvc_name}-temp"
while kubectl get pvc $temp_pvc; do
kubectl delete pvc $temp_pvc
sleep 1
done
cat <<EOF | kubectl apply -f -
apiVersion: "v1"
kind: "PersistentVolumeClaim"
metadata:
annotations:
volume.beta.kubernetes.io/storage-class: local-path
name: ${temp_pvc}
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: 400Gi
EOF
name=download-tools-bin-bash-job
cat <<EOF | yq e -o json - | jq .spec.template.spec.containers[0].command[2]="$(cat templates/jobs/download-tools-bin.bash | jq -s -R .)" | kubectl apply -f -
apiVersion: batch/v1
kind: Job
metadata:
name: ${name}
spec:
parallelism: 1
completions: 1
template:
metadata:
name: ${name}
spec:
containers:
- env:
- name: TOOLS_BIN_VERSION
value: ${tools_bin_version}
name: ${name}
image: docker-registry.rahti.csc.fi/chipster-images/base
command: ["bash", "-c", ""]
resources:
limits:
cpu: 2
memory: 8Gi
requests:
cpu: 2
memory: 8Gi
volumeMounts:
- mountPath: /mnt/tools
name: tools-bin
- mountPath: /mnt/temp
name: temp
volumes:
- name: tools-bin
persistentVolumeClaim:
claimName: tools-bin-${tools_bin_version}
- name: temp
persistentVolumeClaim:
claimName: ${temp_pvc}
#emptyDir: {}
restartPolicy: OnFailure
EOF
#TODO how to run this after the job has finished?
#oc delete pvc $temp_pvc
| true |
c41439fcd8e79e8466b20796643592c7d83a845c | Shell | mwaldegg/MaggoMirrorTools | /hdmi.sh | UTF-8 | 528 | 3.75 | 4 | [] | no_license | #!/bin/sh
# Enable and disable HDMI output on the Raspberry Pi
is_off ()
{
#tvservice -s | grep "TV is off" >/dev/null
/usr/bin/vcgencmd display_power | grep "display_power=0" >/dev/null
}
case $1 in
off)
#tvservice -o
/usr/bin/vcgencmd display_power 0
;;
on)
if is_off
then
#tvservice -p
#fbset -depth 8
#fbset -depth 16
/usr/bin/vcgencmd display_power 1
fi
;;
status)
if is_off
then
echo OFF
else
echo ON
fi
;;
*)
echo "Usage: $0 on|off|status" >&2
exit 2
;;
esac
exit 0
| true |
e969ab5544d85d37893cf50851efe273fae9b5a7 | Shell | desireall/summary | /record/linux shell--charge statistical/charge.sh | UTF-8 | 551 | 2.9375 | 3 | [] | no_license | #! /bin/bash
chargeorderNum=$(grep "get charge order" gameserver.log | wc -l);
echo 充值订单总数 : ${chargeorderNum}
total=0;
yuka=$(grep "get charge order" gameserver.log | grep "ma62.yueka" | wc -l);
echo 充值月卡数量 : ${yuka};
let "total+=${yuka}*30";
array=(
6
18
68
128
198
328
648
)
azuan=1;
for data in ${array[@]}
do
azuan=$(grep "get charge order" gameserver.log | grep "ma62.${data}0zuanshi" | wc -l);
echo 充值${data}0钻石订单数量 : ${azuan};
let "total+=${azuan}*${data}";
done
echo "总充值RMB: ${total}" | true |
f67470e2ea64f38871727d259fee3d0aa29b4cc7 | Shell | xionghaihua1986/ansible | /keepalived/templates/check_haproxy.sh.j2 | UTF-8 | 177 | 2.921875 | 3 | [] | no_license | #!/usr/bin/bash
num=$(ps -C haproxy --no-header|wc -l)
if [ $num -eq 0 ];then
/etc/init.d/haproxy start &>/dev/null
if [ $? -ne 0 ];then
systemctl stop keepalived
fi
fi
| true |
97edc9e9b884e3dafbefd924d7d9e595d138a63d | Shell | polarcat/share | /desktop-goodies/xmenu | UTF-8 | 1,923 | 3.328125 | 3 | [] | no_license | #!/bin/sh
#
# Copyright (C) 2015, Aliaksei Katovich <aliaksei.katovich at gmail.com>
#
# Released under the GNU General Public License, version 2
theme=$HOME/.gtkrc-2.0-lite
if [ -f $theme ]; then
export GTK2_RC_FILES=$theme
fi
getpid() { pid=$3; }
getpid $(wmctrl -l -x -p | grep -iE "yad.menu")
if [ -n "$pid" ]; then
kill $pid
exit 0
fi
CMD="\
yad --title 'Applications' --class 'menu' --borders 4 \
--width=720 --height=450 \
--skip-taskbar --list --no-markup --no-buttons \
--no-headers --sort-by-name --center \
--search-column 1 --print-column 3 --expand-column 5 \
--hide-column 1 \
--column 1 --column 2:IMG --column 3 --column 4 --column 5 "
run()
{
awk -v cmd="$CMD" ' {
split($0, data, "=");
if (index(data[1], "Icon") == 1) {
col2 = data[2];
} else if (index(data[1], "Name") == 1) {
if (col4 == NULL)
col4 = data[2];
} else if (index(data[1], "Comment") == 1) {
if (col5 == NULL)
col5 = data[2];
} else if (index(data[1], "Exec") == 1) {
col3 = data[2];
split(col3, exec, " ");
if (index(col3, "%") != 0)
col3 = exec[1];
if (index(exec[1], "/") != 1) {
col1 = col3;
} else {
split(exec[1], base, "/");
col1 = base[length(base)];
}
}
} ENDFILE {
if (length(col1))
args = sprintf("%s\"%s\" ", args, col1);
else
args = sprintf("%s\"\" ", args);
if (length(col2))
args = sprintf("%s\"%s\" ", args, col2);
else
args = sprintf("%s\"\" ", args);
if (length(col3))
args = sprintf("%s\"%s\" ", args, col3);
else
args = sprintf("%s\"\" ", args);
if (length(col4))
args = sprintf("%s\"%s\" ", args, col4);
else
args = sprintf("%s\"\" ", args);
if (length(col5))
args = sprintf("%s\"%s\" ", args, col5);
else
args = sprintf("%s\"\" ", args);
col4 = col5 = NULL;
} END {
cmd = sprintf("exec %s %s 2>/dev/null", cmd, args);
system(cmd);
}' $(ls -1 /usr/share/applications/*.desktop)
}
res=$(run | cut -f1 -d'|')
if [ -n "$res" ]; then
$res
fi
| true |
1a3be76e62738efd29a761b404a32c4d79065432 | Shell | pahoughton/ansible-bacula.sd | /tests/unittest.guest.bash | UTF-8 | 308 | 3 | 3 | [
"CC0-1.0"
] | permissive | #!/bin/bash
# 2015-06-17 (cc) <paul4hough@gmail.com>
#
status=0
echo -n exists /etc/bacula/bacula-sd.conf:
if [ -f /etc/bacula/bacula-sd.conf ] ; then
echo pass
else
echo FAIL
status=1
fi
echo -n running service bacual-sd:
if service bacula-sd status ; then
echo pass
else
echo FAIL
status=1
fi
exit $status
| true |
962049758e05d7e4dce1c10b46ca3116980ff28e | Shell | superuser-miguel/flatpaks | /com.google.chrome/resources/chrome | UTF-8 | 339 | 2.703125 | 3 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | #!/bin/bash
# chrome's process singleton
export TMPDIR=$XDG_RUNTIME_DIR/app/$FLATPAK_ID
if [[ -f $XDG_CONFIG_HOME/chrome-flags.conf ]]; then
CHROME_USER_FLAGS="$(cat ${XDG_CONFIG_HOME}/chrome-flags.conf | egrep -v -e '^#' -e '^$')"
fi
# Launch
exec zypak-wrapper.sh /app/extra/opt/google/chrome/google-chrome $CHROME_USER_FLAGS "$@"
| true |
e089a2bc35e0cf31d8e1ce2e9a17d8ff00a907ec | Shell | dejanr/dotfiles-retired | /i3blocks/battery | UTF-8 | 411 | 3.046875 | 3 | [] | no_license | #!/usr/bin/env bash
BATTERY_STATE=$(battery | grep -wo "Full\|Charging\|Discharging")
BATTERY_POWER=$(battery | grep -o "[0-9]\+")
URGENT_VALUE=10
if [[ $BATTERY_STATE = "Charging" ]]; then
echo "${BATTERY_POWER}%+"
echo ""
elif [[ $BATTERY_STATE = "Discharging" ]]; then
echo "${BATTERY_POWER}%-"
echo ""
elif [[ $BATTERY_STATE = "" ]]; then
echo ""
else
echo "${BATTERY_POWER}%"
echo ""
fi
| true |
bb5adfecc8795fcaf494b8feedc5541226996aab | Shell | 0110G/BashPractice | /TheWorldOfNumbers.sh | UTF-8 | 325 | 2.640625 | 3 | [] | no_license | #Link: https://www.hackerrank.com/challenges/bash-tutorials---the-world-of-numbers/problem?h_r=next-challenge&h_v=zen&h_r=next-challenge&h_v=zen&h_r=next-challenge&h_v=zen&h_r=next-challenge&h_v=zen
read num1
read num2
echo `expr $num1 + $num2`
echo `expr $num1 - $num2`
echo `expr $num1 \* $num2`
echo `expr $num1 / $num2`
| true |
88d7206fdf4dff633fc9a4ebd1471dad74116692 | Shell | alyssonajackson/scripts | /sitedown.sh | UTF-8 | 644 | 3.671875 | 4 | [] | no_license | #!/bin/bash
# author: Alysson Ajackson
# date Ter Mar 4 20:08:41 BRT 2014
# Version 1:
# Delete a site from apache config and remove its reference from the hosts file
sitename="$1"
test "$sitename" = "" && exit;
echo "shutting down: $sitename..."
a2dissite "$sitename"
#rm -f /etc/apache2/sites-available/$sitename
#rm -f /etc/apache2/sites-enabled/$sitename
echo "Site was removed: http://$sitename"
sitefolder="/var/www/sites/$sitename"
if test "$2" = "--del" && test -d "$sitefolder"
then
rm "$sitefolder" -rf
echo "Directory was deleted: $sitefolder"
fi
sed --in-place "/^.*$sitename$/d" /etc/hosts
/etc/init.d/apache2 restart
| true |
8c4b9e70322d7d0579d2afa4d9afcae8b648698b | Shell | msavvop/simple_bash_scipts | /mycopy script/mycopy | UTF-8 | 1,894 | 4.03125 | 4 | [] | no_license | #!/bin/bash
#check if parameters are less than 2
if [ "$#" -lt 2 ]; then
echo "mycopy: use at least two parameters"
exit 1
#check if parameters are more than 2
elif [ "$#" -gt 2 ]; then
#find last parameter
for i in "$@"; do
:
done
lp=$i
# The last parameter can also be found with the command
# lp=`eval "echo \#$#"`
# which can be used for up to 9 parameters
# check if the last parameter is a directory
if [ -d "$lp" ];then
# start copying the files to directory
for i in "$@"; do
if [ "$i" != "$lp" ]; then
# check if source file exists
if [ ! -f "$i" ]; then
echo "The file $i does not exist. Skipping...."
continue
fi
# check if target file exists
target1="$lp/$i"
if [ -f "$target1" ]; then
while true; do
echo -n " The file $target1 exists. Overwrite?Yy/Nn: "
read answer
case "$answer" in
Y|y) cp "$i" "$lp"
echo "file $target Overwriten"
break;;
N|n) echo "File $target NOT overwritten"
break;;
*) echo "Unacceptable answer.Please give Yes:(Yy) or No:(Nn)"
esac
done
else
cp "$i" "$lp"
echo "File $i copied"
fi
fi
done
else
echo "last parameter: $lp must be a directory"
exit 1
fi
else
#exactly 2 parameters
if [ ! -f $1 ]; then
echo "The file $1 does NOT exist"
exit 1
fi
#define target file
if [ -d "$2" ]; then
target="$2/$1"
else
target="$2"
fi
# check if target file exists
if [ -f "$target" ]; then
while true; do
echo -n "The file $target exists. Overwrite? Yy/Nn: "
read answer
case "$answer" in
Y|y) cp "$1" "$target"
echo "File $target overwriten"
break;;
N|n) echo "File $target NOT overwritten"
break;;
*) echo "Wrong Reply. Give Yes(Yy) or No(Nn)";;
esac
done
else
cp "$1" "$target"
echo "file $1 copied"
fi
fi
exit 0
| true |
eea7c52132b34923a2f18e07f5ad41c0d47c6b38 | Shell | yuyongpeng/install_shell | /mongodb/install_mongo.sh | UTF-8 | 2,461 | 3.1875 | 3 | [] | no_license | #!/bin/bash
#while getopts "v:bc" arg
#do
# case $arg in
# a)
# #参数存在$OPTARG中
# echo "a's arg:$OPTARG" ;;
# v)
# echo "b" ;;
# c)
# echo "c" ;;
# ?)
# #当有不认识的选项的时候arg为?
# echo "unkonw argument" exit 1 ;;
# esac
#done
mongoRepoFile=/etc/yum.repos.d/mongodb-org-3.2.repo
mongoDbPath=/data/mongodb
replSetName=rs_push
# root 角色的用户
rootAsUser=root
rootAsPass=1q2w3e4r
# userAdminAnyDatabase 角色用户
userAdminAnyDatabase_user1=user1
userAdminAnyDatabase_pass1=pass1
# readWriteAnyDatabase 角色用户
readWriteAnyDatabase_user1=user1
readWriteAnyDatabase_pass1=pass1
touch $mongoRepoFile
cat > $mongoRepoFile <<END
[mongodb-org-3.2]
name=MongoDB Repository
baseurl=https://repo.mongodb.org/yum/redhat/\$releasever/mongodb-org/3.2/x86_64/
gpgcheck=1
enabled=1
gpgkey=https://www.mongodb.org/static/pgp/server-3.2.asc
END
yum install -y mongodb-org
mkdir -p ${mongoDbPath}
chown -R mongod.mongod ${mongoDbPath}
cp mongod.conf /etc/
repl_mongoDbPath=${mongoDbPath//\//\\/}
echo $repl_mongoDbPath
sed -i "s/dataMongoPath/${repl_mongoDbPath}/g" /etc/mongod.conf
sed -i "s/rs_name/${replSetName}/g" /etc/mongod.conf
# 配置副本集
keyFile=mongodb-keyfile
openssl rand -base64 90 > ${mongoDbPath}/${keyFile}
chown mongod.mongod ${mongoDbPath}/${keyFile}
/etc/init.d/mongod start
#service mongod restart
arbiter=10.11.27.95
master=10.11.82.245
master_pass=22
second=10.11.91.180
second_pass=11
PORT=27017
# 在master 节点上执行即可
# 初始化副本集集群配置和数据库的权限用户
mongo <<END
config={_id:"${replSetName}",members:[{_id:0,host:"${arbiter}:${PORT}",arbiterOnly:true},{_id:1,host:"${master}:${PORT}",priority:1},{_id:2,host:"${second}:${PORT}",priority:2}]};
rs.initiate(config);
use admin;
db.createUser({user:"${rootAsUser}",pwd:"${rootAsPass}",roles:["root"]});
db.createUser({user:"${userAdminAnyDatabase_user1}",pwd:"${userAdminAnyDatabase_pass1}",roles:["userAdminAnyDatabase"]});
db.createUser({user:"${readWriteAnyDatabase_user1}",pwd:"${readWriteAnyDatabase_pass1}",roles:["readWriteAnyDatabase]});
END
sed -i "s/# authorization/ authorization/g" /etc/mongod.conf
service mongod restart
mongo -u ${readWriteAnyDatabase_user1} -p ${readWriteAnyDatabase_pass1} --authenticationDatabase admin <<END
use test;
db.testkk.insert({"id":"111"});
db.testkk.find();
END
| true |
deb915deb67e71b45cf57fd352c56ef202668634 | Shell | bboymega/dbdump | /dbdump.sh | UTF-8 | 318 | 3.375 | 3 | [] | no_license | #!/bin/bash
while getopts s:u:d:p: flag
do
case "${flag}" in
s) src=${OPTARG};;
u) username=${OPTARG};;
d) dst=${OPTARG};;
p) password=${OPTARG};;
esac
done
DATE=$(date +"%Y-%m-%d-%H-%M-%S")
mysqldump -u "$username" -p"$password" "$src" | gzip > "${dst}/${src}-${DATE}.sql.gz"
| true |
7e88e119a8fca1b8f4df948bae6bdf6881be898b | Shell | caqg/linux-home | /cmd/Obsolete/ClearCASE/hasbranch.sh | UTF-8 | 355 | 3.171875 | 3 | [] | no_license | #!/bin/sh
PATH=/usr/atria/bin:/usr/bin
export PATH
usage="`basename $0` branchtypename (on that branch, recursively down from .)"
case $# in
1) case $1 in
-h*|--h*|-?) echo $usage; exit 0;;
esac
;;
*) echo $usage; exit 1;;
esac
#cleartool lstype -s -brtype $1 &&
exec cleartool find . -all -branch 'brtype('$1')' -print
#end hasbranch.sh
| true |
2e0b5fe04b01a5c3ab3964683f3e9d9d41c8e6f2 | Shell | jameshilliard/ethos-update | /updates/1.1.1/opt/ethos/sbin/ethos-postlogin | UTF-8 | 1,286 | 3.03125 | 3 | [] | no_license | #!/bin/bash
#/opt/ethos/bin/minertimer "Miner Startup - Up to 5 minutes" 5 &
if [ -e "/home/ethos/custom.sh" ]; then
/home/ethos/custom.sh
fi
panelid=$(cat /var/run/panel.file)
rword=$(shuf -n 1 /opt/ethos/lib/english.txt)
if [ ! -f /home/ethos/.irssi/persistent ]; then
cp /home/ethos/.irssi/base.config /home/ethos/.irssi/nickchange.conf
eval "sed -i.bak -e s/ETHOSNICK/$rword-$panelid/g /home/ethos/.irssi/nickchange.conf"
mv /home/ethos/.irssi/nickchange.conf /home/ethos/.irssi/config
else
echo "Not auto setting irssi nickname, persistent file present."
fi
if [ ! -f /home/ethos/.config/hexchat/persistent ]; then
cp /home/ethos/.config/hexchat/base.conf /home/ethos/.config/hexchat/nickchange.conf
eval "sed -i.bak -e s/ETHOSNICK/$rword-$panelid/g /home/ethos/.config/hexchat/nickchange.conf"
mv /home/ethos/.config/hexchat/nickchange.conf /home/ethos/.config/hexchat/hexchat.conf
else
echo "Not auto setting hexchat nickname, persistent file present."
fi
cp /opt/ethos/etc/browser.default /opt/ethos/bin/browser
eval "sed -i.bak -e s/ETHOSPANEL/$panelid/g /opt/ethos/bin/browser"
LOCKSCREEN=$(/opt/ethos/sbin/ethos-readconf lockscreen)
if [ ! -z "$LOCKSCREEN" ]; then
if [ "$LOCKSCREEN" = "enabled" ];then
sleep 10
/opt/ethos/bin/lock
fi
fi
| true |
ab91e128cf2914a55a68d5b2e3442c7156458835 | Shell | parevalo/CBookie | /scripts/report.sh | UTF-8 | 1,092 | 3.484375 | 3 | [] | no_license | #!/bin/bash
# bash script to preprocess VIIRS data
# Input Arguments:
# -p searching pattern
# -t report period
# -i reporting lapse
# -n number of jobs
# -l line by line processing
# -R recursive
# --overwrite overwrite
# ori: origin
# des: destination
# default values
pattern=carbon_r*.npz
t1=2000001
t2=2015365
njob=1
lapse=1
overwrite=''
recursive=''
line=''
# parse input arguments
while [[ $# > 0 ]]; do
InArg="$1"
case $InArg in
-p)
pattern=$2
shift
;;
-n)
njob=$2
shift
;;
-t)
t1=$2
t2=$3
shift
shift
;;
-i)
lapse=$2
shift
;;
-l)
line='-l '
;;
-R)
recursive='-R '
;;
--overwrite)
overwrite='--overwrite '
;;
*)
ori=$1
des=$2
break
esac
shift
done
# submit jobs
echo 'Total jobs to submit is' $njob
for i in $(seq 1 $njob); do
echo 'Submitting job no.' $i 'out of' $njob
qsub -j y -N Report_$i -V -b y cd /projectnb/landsat/users/xjtang/documents/CBookie';' python -m pyCBook.report ${overwrite}${recursive}$line-p $pattern -i $lapse -t $t1 $t2 -b $i $njob $ori $des
done
| true |
a2af068994618501f66738937bf6232ab7bedde7 | Shell | ralucado/SPro | /test/lpcep.sh | UTF-8 | 1,040 | 3.28125 | 3 | [
"MIT"
] | permissive | #!/bin/sh -x
#
# Test slpcep program
#
# $Author: ggravier $
# $Date: 2007-01-19 17:35:24 +0100 (Fri, 19 Jan 2007) $
# $Revision: 132 $
#
prog=slpcep
echo "checking ${prog}:"
if test ! -x ../${prog}; then
echo " you should probably build ${prog} before running test..."
exit 1
fi
mkdir -p log
#
# check usage
#
../${prog} --version > log/${prog}.stdout 2> log/${prog}.stderr
# diff log/${prog}.stdout ref/${prog}.usage > /dev/null 2> /dev/null
if test $? -eq 0 -a x`awk '{print NR}' log/${prog}.stderr | tail -1` = x; then
echo " build test passed"
else
echo " build test failed (see test/log/${prog}.stderr for details)"
exit 1
fi
\rm -f log/*
#
# run test
#
../${prog} dat/know1.lin log/lpcep.1 > log/${prog}.stdout 2> log/${prog}.stderr
../scompare log/lpcep.1 ref/lpcep.1 > /dev/null 2> /dev/null
if test $? -eq 0 -a x`awk '{print NR}' log/${prog}.stderr | tail -1` = x; then
echo " run test passed"
else
echo " run test failed (see test/log/${prog}.stderr for details)"
exit 1
fi
\rm -f log/*
rmdir log
| true |
840f26de49938fe530a63a1be9cc2ad79c47914a | Shell | gattschardo/tox4j | /tools/travis/script | UTF-8 | 568 | 3.3125 | 3 | [
"BSD-2-Clause"
] | permissive | #!/bin/sh
set -ex
# Compile first, so a compile error fails the build.
COMMANDS="scalastyle test:scalastyle checkstyle test:checkstyle test:compile"
# Don't run tests if we are cross-compiling.
if [ "$TOX4J_TARGET" = "host" ]; then
COMMANDS="coverage $COMMANDS test"
fi
case "$TEST_GOAL" in
correctness)
tools/sbt $COMMANDS
;;
performance)
#tools/sbt benchmark:benchmark
tools/sbt "testOnly *TravisBenchSuite" || true
tools/sbt benchmark:upload
;;
esac
# Check whether the code was formatted correctly on push.
git diff --exit-code
| true |
71db6c402ac9d4fe0324aa5e819d592ac5a91f5f | Shell | QubesOS/qubes-app-linux-split-gpg | /gpg-import-key | UTF-8 | 1,208 | 2.875 | 3 | [] | no_license | #!/bin/bash
#
# The Qubes OS Project, http://www.qubes-os.org
#
# Copyright (C) 2014 Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
if [ -z "$QUBES_GPG_DOMAIN" ]; then
echo "ERROR: Destination domain not defined! Set it with QUBES_GPG_DOMAIN env variable." >&2
exit 1
fi
if [ $# -gt 0 ]; then
exec /usr/lib/qubes/qrexec-client-vm $QUBES_GPG_DOMAIN qubes.GpgImportKey /bin/cat "$@"
else
exec /usr/lib/qubes/qrexec-client-vm $QUBES_GPG_DOMAIN qubes.GpgImportKey /bin/sh -c 'cat /proc/self/fd/$SAVED_FD_0'
fi
| true |
07b959963e38bda3d3411788210ea5f860d44474 | Shell | Hives/dotfiles-old | /.scripts/wallpaper | UTF-8 | 380 | 2.65625 | 3 | [] | no_license | #!/bin/sh
# filename=$(ls /home/hives/Pictures/nice\ stuff/brainstorm/wallpapers/*.png | sort -R | tail -1)
# feh --bg-fill "$filename"
darkmode="$HOME/.config/darkmode"
if [ -e $darkmode ]
then
feh --randomize --bg-fill /home/hives/Pictures/brainstorm/wallpapers/*.png
else
feh --randomize --bg-fill /home/hives/Pictures/brainstorm/wallpapers/inverted/*.png
fi
| true |
2e3345fccc9db3e6fa5850f551c771e51bb4b890 | Shell | mattmeisinger/a4 | /test-hadoop-local.sh | UTF-8 | 769 | 2.90625 | 3 | [] | no_license | #!/bin/bash
# Compiles java code, jars it, and executes in hadoop. deletes output folder before
HADOOP_VERSION=1.1.2
HADOOP_HOME=~/hadoop-1.1.2
# compile java
rm -r java/MapReduce/bin
mkdir java/MapReduce/bin
javac -classpath ${HADOOP_HOME}/hadoop-core-${HADOOP_VERSION}.jar -d java/MapReduce/bin java/MapReduce/src/org/columbia/*.java
# jar java file (it requires 'cd'ing into the proper path...not sure how to get the 'jar' app to work without all these 'cd's)
rm java/MapReduce/EmailGrapher.jar
cd java/MapReduce/bin
jar cvf EmailGrapher.jar ../bin .
cd ..
cd ..
cd ..
# run hadoop on java jar
rm -r test-output # remove existing output
~/hadoop-1.1.2/bin/hadoop jar java/MapReduce/bin/EmailGrapher.jar org.columbia.ExtractEmailAddresses test-input test-output | true |
6700d1598bb56900fe2b1bca8d8b4f7d50715587 | Shell | oyhel/inf9380 | /chipseq-slurm/ex2/single_getbedgraph.slurm | UTF-8 | 469 | 2.546875 | 3 | [] | no_license | #!/bin/bash
#SBATCH --job-name=getbedgraph
#SBATCH --account=ln0002k
#SBATCH --ntasks=1
#SBATCH --mem-per-cpu=3G
#SBATCH --time=01:00:00
source /cluster/bin/jobsetup
module purge
module load samtools
module load bedtools
# Input: sorted bam file
input=$1
# Output: output name of bedGraph file
output=$2
chkfile $output
cp $input $SCRATCH
cp ../data/sacisizes.txt $SCRATCH
# script
cd $SCRATCH
time genomeCoverageBed -ibam $input -bg -g sacisizes.txt > $output
| true |
a074a4f8aadef08801ae3143b14b8caa90f48bef | Shell | jfarrington/XRD | /Test_Scripts/XRD_OLK_TEST.sh | UTF-8 | 3,191 | 3.421875 | 3 | [] | no_license | #!bash.sh
#XRD_OLK_TEST: Record Leakage as a function of temperature. Write to a textfile (Detector_LEAK-year month day-hour min). Use bash.
#9/10/2013
#10/4/2013: Added measurement of OLK for all 640 pixels
proc_dly=2 #Delay between caput and caget commands for OLK
TEC_SENS_I=0 #Initial Sensor TEC current=TEC_SENS_I/100 (i.e 150=1.5A)
TEC_STEP_I=25 #Sensor TEC current increment=TEC_STEP_I/100 (i.e 150=1.5A)
#Leave TECs at the values below before endindg the script
TEC_SENS_FINAL=220
TEC_ASIC_FINAL=170
printf "Enter Detector number:\n"
read DETECTOR
printf "Enter acquisition time interval in seconds:\n"
read DELAY
caput det1.SHPT 4.0us
printf "Shaping time set to 4us\n"
caput det1.GAIN H
printf "Gain set to High\nStarting...\n"
caput det1:ao3 $(bc <<<"scale=2;0/100") #Set the sensor TEC current to 0A
caput det1:ao5 $(bc <<<"scale=2;100/100") #Setthe ASIC TEC current to 1.A
printf "TECs changed to start values. Waiting for temperature to stabilize for $DELAY sec\n"
filename1=`date "+XRD$DETECTOR-OLK_TEST_PVs-%y%m%d-%H%M"`
filename2=`date "+XRD$DETECTOR-OLK_TEST_OLK-%y%m%d-%H%M"`
#Create filenames
printf "XRD $DETECTOR\n">$filename1.txt
printf "ITERATION OLK0 OLK159 OLK319 OLK479 OLK639 AI1(Bias_V) AI2(RTD_Sensor) AI3(RTD_HOT) AI4(RTD_ASIC) AI5(CTRL_TEMP) AI6(Bias_I) AO3(Sensor_TEC) AO5(ASIC_TEC) START END\n">>$filename1.txt
printf "XRD $DETECTOR\n">$filename2.txt
printf "ITERATION OLK0-639 START END\n">>$filename2.txt
i=0
#Start measurement
while [ $TEC_SENS_I -le 250 ];
do
caput det1:ao3 $(bc <<<"scale=2;$TEC_SENS_I/100")
sleep $DELAY
STARTPV=$(date)
ITERATION=$(echo $i)
printf "\n$ITERATION $STARTPV\n"
caput det1.LOEN 0
sleep $proc_dly
OLK0=$(caget det1:ai0|awk '{print $2}')
caput det1.LOEN 159
sleep $proc_dly
OLK159=$(caget det1:ai0|awk '{print $2}')
caput det1.LOEN 319
sleep $proc_dly
OLK319=$(caget det1:ai0|awk '{print $2}')
caput det1.LOEN 639
sleep $proc_dly
OLK639=$(caget det1:ai0|awk '{print $2}')
AI1=$(caget det1:ai1|awk '{print $2}')
AI2=$(caget det1:ai2|awk '{print $2}')
AI3=$(caget det1:ai3|awk '{print $2}')
AI4=$(caget det1:ai4|awk '{print $2}')
AI5=$(caget det1:ai5|awk '{print $2}')
AI6=$(caget det1:ai6|awk '{print $2}')
AO3=$(caget det1:ao3|awk '{print $2}')
AO5=$(caget det1:ao5|awk '{print $2}')
ENDPV=$(date)
printf "$ITERATION $OLK0 $OLK159 $OLK319 $OLK479 $OLK639 $AI1 $AI2 $AI3 $AI4 $AI5 $AI6 $AO3 $AO5 $STARTPV $ENDPV \n">>$filename1.txt
#Get OLK
printf "$i " >>$filename2.txt
printf "\nOLK 0-639\n"
CHAN=0
STARTOLK=$(date)
while [ $CHAN -le 639 ]
do
caput det1.LOEN $CHAN
sleep $proc_dly
OLK=$(caget det1:ai0|awk '{print $2}')
printf "$OLK " >> $filename2.txt
CHAN=$(( $CHAN +1 ))
done
ENDOLK=$(date)
printf "$STARTOLK $ENDOLK\n" >> $filename2.txt
#Wait until next point
i=$(( $i +1 ))
TEC_SENS_I=$(($TEC_SENS_I+$TEC_STEP_I))
printf "$ENDOLK\n"
done
caput det1:ao3 $(bc <<<"scale=2;$TEC_SENS_FINAL/100") #Final sensor TEC current
caput det1:ao5 $(bc <<<"scale=2;$TEC_ASIC_FINAL/100") #Final ASIC TEC current
printf "TECs changed to final values. Waiting for temperature to stabilize for $DELAY sec\n"
sleep $DELAY
printf "END\n"
| true |
2a54b04c6c724b31af83e442cf14716764f06e6d | Shell | gsiems/go-marc21 | /cmd/test-build.sh | UTF-8 | 660 | 2.6875 | 3 | [
"MIT"
] | permissive | #!/bin/sh
echo "Building marcdump"
go build marcdump.go
echo "Building xml2marc"
go build xml2marc.go
echo "Building marc2xml"
go build marc2xml.go
echo "Building marcsplit"
go build marcsplit.go
echo ""
echo "Testing marcdump"
time ./marcdump ../git_ignore/malc-20180115.mrc > marcdump.out
echo ""
echo "Testing xml2marc"
time ./xml2marc ../git_ignore/collection.xml > xml2marc.out
echo ""
echo "Testing marc2xml"
time ./marc2xml ../git_ignore/malc-20180112.mrc > marc2xml.out
echo ""
echo "Testing marcsplit"
[ -d split_out ] && rm split_out/*.mrc
[ -d split_out ] || mkdir split_out
time ./marcsplit -m ../git_ignore/malc-20180112.mrc -d split_out
| true |
04ed62f32b6655e29fe3997231ce3a3a05b33d5b | Shell | mitzerh/docker-apollo-php | /build.sh | UTF-8 | 319 | 3.140625 | 3 | [] | no_license | #!/bin/sh
# docker cloud repo
REPO_ACCOUNT="mitzerh"
REPO_NAME="apollo-php"
REPO_TAG="latest"
if [ -n "$1" ]; then
REPO_TAG="$1"
fi
TAG_OK=$(git tag | grep $REPO_TAG)
if [ -n "${TAG_OK}" ]; then
git checkout $REPO_TAG
docker build -t ${REPO_ACCOUNT}/${REPO_NAME}:${REPO_TAG} .
git checkout master
fi
| true |
f64bdd37a8d4d1094e9fcd7aa24a54a582c70bab | Shell | YuxianMeng/gcn-lm | /hgt_scripts/wiki103/prepare_wiki103.sh | UTF-8 | 3,150 | 2.859375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Adapted from https://github.com/facebookresearch/MIXER/blob/master/prepareData.sh
URLS=(
"https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-v1.zip"
)
FILES=(
"wikitext-103-v1.zip"
)
for ((i=0;i<${#URLS[@]};++i)); do
file=${FILES[i]}
if [ -f $file ]; then
echo "$file already exists, skipping download"
else
url=${URLS[i]}
wget "$url"
if [ -f $file ]; then
echo "$url successfully downloaded."
else
echo "$url not successfully downloaded."
exit -1
fi
if [ ${file: -4} == ".tgz" ]; then
tar zxvf $file
elif [ ${file: -4} == ".tar" ]; then
tar xvf $file
elif [ ${file: -4} == ".zip" ]; then
unzip $file
fi
fi
done
# preprocess
TEXT=/userhome/yuxian/data/lm/wiki-103 # yunnao
TEXT=/data/nfsdata2/nlp_application/datasets/corpus/english/wikitext-103 # gpu11
fairseq-preprocess \
--only-source \
--trainpref $TEXT/wiki.train.tokens \
--validpref $TEXT/wiki.valid.tokens \
--testpref $TEXT/wiki.test.tokens \
--destdir /data/nfsdata2/nlp_application/datasets/corpus/english/wikitext-103/data-bin \
--workers 12
# train
#DATA_BIN="/userhome/yuxian/data/lm/wiki-103/data-bin"
#MODEL_DIR="/userhome/yuxian/train_logs/lm/wiki-103/fairseq_baseline"
DATA_BIN="/data/nfsdata2/nlp_application/datasets/corpus/english/wikitext-103/data-bin"
MODEL_DIR="/data/yuxian/train_logs/lm/wiki-103/fairseq_baseline"
mkdir -p $MODEL_DIR
LOG=$MODEL_DIR/log.txt
fairseq-train --task language_modeling \
$DATA_BIN \
--save-dir $MODEL_DIR \
--arch transformer_lm_wiki103 \
--max-update 286000 --max-lr 1.0 --t-mult 2 --lr-period-updates 270000 --lr-scheduler cosine --lr-shrink 0.75 \
--warmup-updates 16000 --warmup-init-lr 1e-07 --min-lr 1e-09 --optimizer nag --lr 0.0001 --clip-norm 0.1 \
--criterion adaptive_loss --max-tokens 3072 --update-freq 3 --tokens-per-sample 3072 --seed 1 --fp16 \
--sample-break-mode none --skip-invalid-size-inputs-valid-test --ddp-backend=no_c10d \
>$LOG 2>&1 & tail -f $LOG
# eval
DATA_BIN="/userhome/yuxian/data/lm/wiki-103/data-bin"
MODEL_DIR="/userhome/yuxian/train_logs/lm/wiki-103/fairseq_baseline"
#DATA_BIN="/data/nfsdata2/nlp_application/datasets/corpus/english/wikitext-103/data-bin"
#MODEL_DIR="/data/yuxian/train_logs/lm/wiki-103/urvashi"
LOG=$MODEL_DIR/ppl.txt
CUDA_VISIBLE_DEVICES=1 fairseq-eval-lm $DATA_BIN \
--path $MODEL_DIR/checkpoint_best.pt \
--sample-break-mode complete --max-tokens 3072 --tokens-per-sample 3072 \
--context-window 2560 --softmax-batch 1024 \
--gen-subset test \
> $LOG 2>&1 & tail -f $LOG
# save features
for subset in "test" "valid" "train"; do
python eval_lm.py $DATA_BIN \
--path $MODEL_DIR/checkpoint_best.pt \
--sample-break-mode complete --max-tokens 3072 --tokens-per-sample 3072 \
--context-window 2560 --softmax-batch 1024 \
--gen-subset $subset \
--dstore-mmap $DATA_BIN \
--save-knnlm-dstore --dstore-fp16 --first 1000000000 # we add --first to preserve order of dataset
done
| true |
b1d8099abe3a03f9ec636b62b94a49cdba64e9ef | Shell | akan72/PiazzaTextualAnalysis | /piazzatextualanalysis/build.sh | UTF-8 | 797 | 3.390625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Check to see if the two Piazza login environment variables are set
if [[ -z "${PIAZZA_EMAIL}" ]]; then
echo "PIAZZA_EMAIL is not set!"
exit 1
fi
if [[ -z "${PIAZZA_PASSWORD}" ]]; then
echo "PIAZZA_PASSWORD is not set!"
exit 1
fi
mkdir data data/posts data/dataframes
# Install Python package requirements
python -m pip install -r requirements.txt
# Run the jupyter notebook pipeline
if [ ! "$(ls -A data/dataframes)" ]; then
# jupyter nbconvert --to notebook --inplace --execute --ExecutePreprocessor.timeout=300 pickle_posts.ipynb
# jupyter nbconvert --to notebook --inplace --execute --ExecutePreprocessor.timeout=300 transform_posts.ipynb
python pickle_posts.py
python transform_posts.py
fi
# Run data analysis script and output plots
# TODO: Convert to Makefile | true |
9d1dc1e6a9a12f99389543af5dd3b15fa4be03eb | Shell | danangcrysnanto/phdthesis | /compile.sh | UTF-8 | 855 | 2.796875 | 3 | [] | no_license | #!/usr/bin/env bash
# compile project file
echo "Compiling PhD thesis"
echo "Compile online pdf"
# compile online pdf
pdflatex -jobname=main_online main
for comp in chapters/*tex
do
bibtex ${comp/.tex/}
done
pdflatex -jobname=main_online main
pdflatex -jobname=main_online main
# compile print pdf
echo "Compile print pdf"
sed '/twosidetrue/s/%//' main.tex > main_temp.tex
pdflatex -jobname=main_print main_temp
for comp in chapters/*tex
do
bibtex ${comp/.tex/}
done
pdflatex -jobname=main_print main_temp
pdflatex -jobname=main_print main_temp
# remove intermidaet file
echo "Cleaning temporary files"
rm main_temp.tex
rm *aux chapters/*bbl chapters/*blg chapters/*aux
for type in online print
do
rm main_${type}.lof main_${type}.log main_${type}.lot main_${type}.out main_${type}.toc
done
echo "Success!"
| true |
5de272a57a6fb38ed5848a61da2f21c3d3f32269 | Shell | onap/integration-devtool | /onap-offline/bash/tools/setup_nfs_mount.sh | UTF-8 | 1,231 | 3.46875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# COPYRIGHT NOTICE STARTS HERE
#
# Copyright 2018 © Samsung Electronics Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# COPYRIGHT NOTICE ENDS HERE
usage () {
echo "Usage:"
echo " ./$(basename $0) nfs_master_ip"
exit 1
}
if [ "$#" -ne 1 ]; then
echo "Missing NFS mater node"
usage
fi
MASTER_IP=$1
#Install NFS common
#sudo apt-get update
#sudo apt-get install -y nfs-common
#Create NFS directory
sudo mkdir -p /dockerdata-nfs
#Mount the remote NFS directory to the local one
sudo mount $MASTER_IP:/dockerdata-nfs /dockerdata-nfs/
echo "$MASTER_IP:/dockerdata-nfs /dockerdata-nfs nfs auto,nofail,noatime,nolock,intr,tcp,actimeo=1800 0 0" | sudo tee -a /etc/fstab
| true |
a6819a5673c26991d737e06ed07f8eff6586065e | Shell | moduda/pub | /tools/gam/gam-script-lib-default.sh | UTF-8 | 154 | 3.1875 | 3 | [] | no_license | #! /bin/bash
#
# Library for the scripts
#
# 15.jan.2016 ykim
#
DOMAIN="default.com"
gen_usage () {
echo "Usage: $(basename $0) [dom1|dom2|dom3] $*"
}
| true |
7e93e4931b2873a354fda021bbc969c307381bfe | Shell | zhangjyr/LambdaObjectstore | /evaluation/util.sh | UTF-8 | 2,309 | 2.921875 | 3 | [] | no_license | #!/bin/bash
PWD=`dirname $0`
REDBENCH=$GOPATH/src/github.com/wangaoone/redbench
echo $PWD
function update_lambda_timeout() {
NAME=$1
TIME=$2
echo "updating lambda store timeout"
# for i in {0..13}
for i in {0..63}
do
# aws lambda update-function-code --function-name $prefix$i --zip-file fileb://Lambda2SmallJPG.zip
# aws lambda update-function-configuration --function-name $prefix$i --memory-size $mem
aws lambda update-function-configuration --function-name $NAME$i --timeout $TIME
# aws lambda update-function-configuration --function-name $prefix$name$i --handler lambda
# aws lambda put-function-concurrency --function-name $name$i --reserved-concurrent-executions $concurrency
done
}
function update_lambda_mem() {
NAME=$1
MEM=$2
echo "updating lambda store mem"
# for i in {0..13}
for i in {0..63}
do
# aws lambda update-function-code --function-name $prefix$i --zip-file fileb://Lambda2SmallJPG.zip
aws lambda update-function-configuration --function-name $NAME$i --memory-size $MEM
# aws lambda update-function-configuration --function-name $NAME$i --timeout $TIME
# aws lambda update-function-configuration --function-name $prefix$name$i --handler lambda
# aws lambda put-function-concurrency --function-name $name$i --reserved-concurrent-executions $concurrency
done
}
function start_proxy() {
echo "running proxy server"
PREFIX=$1
GOMAXPROCS=36 go run $PWD/../proxy/proxy.go -debug=true -prefix=$PREFIX
}
function bench() {
N=$1
C=$2
KEYMIN=$3
KEYMAX=$4
SZ=$5
D=$6
P=$7
OP=$8
FILE=$9
go run $REDBENCH/bench.go -addrlist localhost:6378 -n $N -c $C -keymin $KEYMIN -keymax $KEYMAX \
-sz $SZ -d $D -p $P -op $OP -file $FILE -dec -i 1000
}
function playback() {
D=$1
P=$2
SCALE=$3
CLUSTER=$4
FILE=$5
COMPACT=$6
$REDBENCH/simulator/playback/playback -addrlist localhost:6378 -d $D -p $P -scalesz $SCALE -cluster $CLUSTER $COMPACT $FILE
}
function dryrun() {
D=$1
P=$2
SCALE=$3
CLUSTER=$4
FILE=$5
COMPACT=$6
$REDBENCH/simulator/playback/playback -dryrun -lean -d $D -p $P -scalesz $SCALE -cluster $CLUSTER $COMPACT $FILE
}
| true |
df71ef7dc08cf1556cc9f5a194b6ccce48c4446e | Shell | nickharrismcr/love2d-defender | /tools/findallvi | UTF-8 | 132 | 2.734375 | 3 | [] | no_license | #! /usr/bin/env bash
for file in `find *|grep lua$ | xargs grep $1 2>/dev/null|cut -d":" -f1|sort -u`
do
echo $file
vi $file
done
| true |
713fa1d9de680f348e42b8617c12e2aaac73c807 | Shell | vioshyvo/genome_test | /files/list_files_ukko.sh | UTF-8 | 73 | 2.6875 | 3 | [] | no_license | for f in $1/f*
do
id=$(basename $f)
echo $id "Ecol/$id"
done > $2
| true |
eaf90db940553ffc1423b6a1a2868b8133aa0051 | Shell | mysociety/misc-scripts | /bin/very-backup | UTF-8 | 1,474 | 3.875 | 4 | [] | no_license | #!/bin/sh
#
# very-backup:
# Backup from very to another host, using snapshots and rsync.
#
# TODO:
# - trap EXIT and clean up on error/abort
# - exclude /tmp, corefiles, etc.
#
# Copyright (c) 2005 UK Citizens Online Democracy. All rights reserved.
# Email: chris@mysociety.org; WWW: http://www.mysociety.org/
#
# $Id: very-backup,v 1.3 2005-02-12 01:32:19 chris Exp $
#
set -ex
stderr () {
echo "very-backup: $@" 1>&2
}
tempdirname () {
echo "/tmp/very-backup.$( date +%s ).$( ps | sum | cut '-d ' -f1 ).$$"
}
# Create a mountpoint for the snapshot. Usually creating these in /tmp is a
# Bad Idea, but these are for read-only use....
mountpoint=$( tempdirname )
while ! mkdir -m 0700 $mountpoint ; do
mountpoint=$( tempdirname )
done
RSYNC_RSH='ssh -i /root/.ssh/id_dsa_backup_caesious'
export RSYNC_RSH
for filesystem in / /usr /var /data1 ; do
remote="/scratch/very-backups/$( echo $filesystem | sed 's#/#_#g' )"
snap=$( echo "$filesystem/snapshot" | sed 's#//#/#g' )
if [ -e $snap ] ; then
stderr "$snap already exists; aborting"
exit 1
else
mount -u -o snapshot $snap $filesystem
md_unit=$( mdconfig -a -t vnode -f $snap -n )
mount -o ro /dev/md$md_unit $mountpoint
rsync -vaHrSz --delete $mountpoint/. caesious.beasts.org:$remote/.
umount $mountpoint
mdconfig -d -u $md_unit
rm -f $snap # -f in case we're run from an interactive shell
fi
done
rmdir $mountpoint
| true |
662aac34cc99def683371dae6fe267f84bf050e9 | Shell | marianod92/aws-gitops-terraform | /bin/common.sh | UTF-8 | 2,473 | 3.765625 | 4 | [] | no_license | #!/bin/bash
# usage:
# ./ssh-key.sh create create the SSH key and upload to AWS EC2
# ./ssh-key.sh destroy remove the local SSH key and from AWS EC2
# ./ssh-key.sh display some informations
# the project root directory, parent directory of this script file
dir="$(cd "$(dirname "$0")/.."; pwd)"
cd "$dir"
# ./ssh-key.sh create
# ./ssh-key.sh destroy
# ./ssh-key.sh
# log $1 in underline green then $@ in yellow
log() {
local arg=$1
shift
echo -e "\033[1;4;32m${arg}\033[0m \033[1;33m${@}\033[0m"
}
# echo $1 in underline magenta then $@ in cyan (to the stderr)
err() {
local arg=$1
shift
echo -e "\033[1;4;35m${arg}\033[0m \033[1;36m${@}\033[0m" >&2
}
# log $1 in underline then $@ then a newline
under() {
local arg=$1
shift
echo -e "\033[0;4m${arg}\033[0m ${@}"
echo
}
# create() {
# echo create
# }
# destroy() {
# echo destroy
# }
# show() {
# echo show
# }
# if [[ $1 == 'create' ]]; then
# create
# elif [[ $1 == 'destroy' ]]; then
# destroy
# else
# show
# fi
# source settings.sh
# cd infra
#
# Create SSH keys + import public key to AWS
#
# if [[ ! -f $SSH_KEY.pub ]]
# then
# KEY=$(aws ec2 describe-key-pairs \
# --key-names $SSH_KEY \
# --query KeyPairs \
# --output text \
# 2>/dev/null)
# if [[ -n "$KEY" ]]
# then
# err abort "the $SSH_KEY key already exists"
# exit
# fi
# log create "$SSH_KEY.pem + $SSH_KEY.pub keys (without passphrase)"
# ssh-keygen \
# -q \
# -t rsa \
# -N '' \
# -f $SSH_KEY.pem
# mv $SSH_KEY.pem.pub $SSH_KEY.pub
# chmod 400 $SSH_KEY.{pem,pub}
# log import "$SSH_KEY.pub key to AWS EC2"
# aws ec2 import-key-pair \
# --key-name $SSH_KEY \
# --public-key-material \
# file://./$SSH_KEY.pub
# fi
# #
# #
# #
# # cd "$dir"
# # if [[ ! -f rand.txt ]]
# # then
# # RAND=$(mktemp --dry-run XXXX)
# # echo $RAND > rand.txt
# # fi
# BUCKET=$(aws s3api list-buckets \
# --query 'Buckets[].Name' \
# --output text | grep $S3_BUCKET)
# if [[ -z $BUCKET ]]
# then
# log create "$S3_BUCKET bucket"
# aws s3 mb s3://$S3_BUCKET
# fi
# # infra init (a modifier)
# terraform init \
# -input=false \
# -backend=true \
# -backend-config="region=eu-west-3" \
# -backend-config="bucket=$S3_BUCKET" \
# -backend-config="key=terraform"
# terraform init | true |
d13428a2ace49baa73dad0c3bf2b565433d0ee03 | Shell | knmkr/bio-docker-run-wrapper | /bin/bcftools | UTF-8 | 394 | 3.09375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
if [ -w /var/run/docker.sock ]; then
# rootful docker
CONTAINER_USER=$(id -u):$(id -g)
else
# rootless docker
CONTAINER_USER=root
fi
# https://bioconda.github.io/recipes/bcftools/README.html
docker run -it --rm \
-u $CONTAINER_USER \
-e HOME=$HOME \
-e USER=$USER \
-v $HOME:$HOME \
-w "$PWD" \
quay.io/biocontainers/bcftools:1.10.2--h4f4756c_2 bcftools "$@"
| true |
338de5da412e8984265c353c1d79bc83e3d25633 | Shell | sunyi00/lain-cli | /future_lain_cli/generate-kube-config.sh | UTF-8 | 1,643 | 2.921875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -euo pipefail
IFS=$'\n\t'
server=$(cat ~/.kube/config | grep -o -P "(?<=server: ).+")
# the name of the secret containing the service account token goes here
token_name=$(kubectl describe sa default | grep -o -P "(?<=Mountable secrets:).+" | awk '{$1=$1};1')
ca=$(kubectl get secret/$token_name -o jsonpath='{.data.ca\.crt}')
token=$(kubectl get secret/$token_name -o jsonpath='{.data.token}' | base64 --decode)
namespace=$(kubectl get secret/$token_name -o jsonpath='{.data.namespace}' | base64 --decode)
cat > rbac.yml << EOF
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: default
rules:
- apiGroups:
- ""
resources:
- events
verbs:
- list
- get
- apiGroups:
- ""
- "apps"
- "batch"
- "networking.k8s.io"
- "extensions"
resources:
- jobs
- secrets
- services
- deployments
- pods
- pods/log
- pods/exec
- replicasets
- cronjobs
- ingresses
verbs:
- list
- get
- create
- patch
- update
- delete
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: default
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: default
subjects:
- kind: ServiceAccount
name: default
namespace: default
EOF
kubectl apply -f rbac.yml > /dev/null
echo "---
apiVersion: v1
kind: Config
clusters:
- name: default-cluster
cluster:
certificate-authority-data: ${ca}
server: ${server}
contexts:
- name: default-context
context:
cluster: default-cluster
namespace: default
user: default-user
current-context: default-context
users:
- name: default-user
user:
token: ${token}
"
| true |
be63a755167d7465cc973b2823d41b93c052da68 | Shell | ytyou/yongtao | /elk/cmds/synonym/add-docs | UTF-8 | 598 | 2.8125 | 3 | [] | no_license | #!/bin/bash
. ./setenv
curl -s -XPUT $ES_URL/$INDEX/linux/1 -d '{"symptom":"top: command not found","solution":"Run yum install top to install the package"}'
curl -s -XPUT $ES_URL/$INDEX/linux/2 -d '{"symptom":"rm: cannot remove file: Permission denied","solution":"Use sudo or login as root"}'
curl -s -XPUT $ES_URL/$INDEX/linux/3 -d '{"symptom":"Why is my machine so slow?","solution":"Use top to identify process that uses the most CPU"}'
curl -s -XPUT $ES_URL/$INDEX/linux/4 -d '{"symptom":"My disk is full, what now?","solution":"Use the du command to find out who uses them"}'
echo
exit 0
| true |
154f7ad14b54b43341011bf0ec05af8bd77beeb4 | Shell | greenplum-db/GreenplumPython | /concourse/scripts/entry.sh | UTF-8 | 6,342 | 4.21875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Entry point for GPDB source & cluster related tasks.
# This script setup basic build/test environment including:
# - Create a gpadmin user
# - Copy all files from /tmp/build/xxx (concourse WORKDIR) to /home/gpadmin/ and chown to gpadmin
# - Some dependencies doesn't exist in build/test image.
# - Special setup for individual task which needs root permission.
# - At the end, call the task script with gpadmin permission.
#
# Simple rules:
# 1. Any root operations should happen in this script.
# 2. Task script only requires gpadmin permission.
# 3. Since everything has been copied to the /home/gpadmin directory, use absolute path as much as
# as possible in the task script, it will reduce the confusion when we fly into the concourse
# container.
# 4. Bash functions should be idempotent as much as possible to make fly hijack debugging easier.
set -eox
if [[ ! ${PWD} =~ /tmp/build/[0-9a-z]* ]]; then
echo "This script should always be started from concourse WORKDIR."
fi
# Internal utilty functions
_determine_os() {
local name version
if [ -f /etc/redhat-release ]; then
name="rhel"
version=$(sed </etc/redhat-release 's/.*release *//' | cut -f1 -d.)
elif [ -f /etc/SuSE-release ]; then
name="sles"
version=$(awk -F " *= *" '$1 == "VERSION" { print $2 }' /etc/SuSE-release)
elif grep -q photon /etc/os-release ; then
name="photon"
version=$( awk -F " *= *" '$1 == "VERSION_ID" { print $2 }' /etc/os-release | cut -f1 -d.)
elif grep -q ubuntu /etc/os-release ; then
name="ubuntu"
version=$(awk -F " *= *" '$1 == "VERSION_ID" { print $2 }' /etc/os-release | tr -d \")
else
echo "Could not determine operating system type" >/dev/stderr
exit 1
fi
echo "${name}${version}"
}
OS_NAME=$(_determine_os)
# Global ENV defines
# /tmp/build/xxxxx. it should not be used in normal conditions. Use /home/gpadmin instead.
# Everything has been linked there.
export CONCOURSE_WORK_DIR=${PWD}
install_extra_build_dependencies() {
case "$OS_NAME" in
rhel7)
yum install -y wget
wget https://bootstrap.pypa.io/pip/get-pip.py
;;
rhel8)
;;
ubuntu*)
wget https://bootstrap.pypa.io/pip/get-pip.py
;;
*) ;;
esac
}
# Dependency installers
# Ideally all dependencies should exist in the docker image. Use this script to install them only
# if it is more difficult to change it in the image side.
# Download the dependencies with concourse resources as much as possible, then we could benifit from
# concourse's resource cache system.
install_dependencies() {
unset PYTHONPATH
unset PYTHONHOME
local python_bin=$(which "python3.9")
if [[ -z $python_bin ]]; then
local python_bin="${GPHOME}/ext/python3.9/bin/python3.9"
fi
${python_bin} -m ensurepip
${python_bin} -m pip install tox
}
# Create gpadmin user and chown all files in the PWD. All files will be linked to /home/gpadmin.
# All of our work should be started from there.
setup_gpadmin() {
# If the gpadmin exist, quit
if grep -c '^gpadmin:' /etc/passwd; then
return
fi
# If the image has sshd, then we call gpdb's setup_gpadmin_user.sh to create the gpadmin user
# and setup the ssh.
# Otherwise, create the gpadmin user only.
if [ -f /etc/ssh/sshd_config ]; then
pushd "${CONCOURSE_WORK_DIR}"
local gpdb_concourse_dir="${CONCOURSE_WORK_DIR}/gpdb_src/concourse/scripts"
"${gpdb_concourse_dir}/setup_gpadmin_user.bash"
popd
else
# Below is copied from setup_gpadmin_user.bash
groupadd supergroup
case "$OS_NAME" in
rhel*)
/usr/sbin/useradd -G supergroup,tty gpadmin
;;
ubuntu*)
/usr/sbin/useradd -G supergroup,tty gpadmin -s /bin/bash
;;
sles*)
# create a default group gpadmin, and add user gpadmin to group gapdmin, supergroup,
# tty
/usr/sbin/useradd -U -G supergroup,tty gpadmin
;;
photon*)
/usr/sbin/useradd -U -G supergroup,tty,root gpadmin
;;
*) echo "Unknown OS: $OS_NAME"; exit 1 ;;
esac
echo -e "password\npassword" | passwd gpadmin
fi
mkdir -p /home/gpadmin
chown gpadmin:gpadmin /home/gpadmin
chown -R gpadmin:gpadmin /tmp/build
ln -s "${CONCOURSE_WORK_DIR}"/* /home/gpadmin
}
function install_plpython3() {
mkdir -p bin_plpython3/install_tmp
pushd bin_plpython3/install_tmp
find .. -maxdepth 1 -regex ".*-[0-9\.]*-.*\.tar\.gz" -exec tar xfv {} \;
./install_gpdb_component
popd
}
# Extract gpdb binary
function install_gpdb() {
[ ! -d /usr/local/greenplum-db-devel ] && mkdir -p /usr/local/greenplum-db-devel
tar -xzf "${CONCOURSE_WORK_DIR}"/bin_gpdb/*.tar.gz -C /usr/local/greenplum-db-devel
local python_bin=$(which "python3.9")
if [[ -z $python_bin ]]; then
GPHOME=/usr/local/greenplum-db-devel install_plpython3
fi
chown -R gpadmin:gpadmin /usr/local/greenplum-db-devel
# Start cluster
source "/home/gpadmin/gpdb_src/concourse/scripts/common.bash"
make_cluster
source /home/gpadmin/gpdb_src/gpAux/gpdemo/gpdemo-env.sh
}
function setup_gpadmin_bashrc() {
{
echo "source /usr/local/greenplum-db-devel/greenplum_path.sh"
echo "source /home/gpadmin/gpdb_src/gpAux/gpdemo/gpdemo-env.sh"
echo "export OS_NAME=${OS_NAME}"
echo "export PATH=\$PATH:${GPHOME}/ext/python3.9/bin"
# psycopg2 needs Python.h
echo "export CFLAGS=-I/usr/local/greenplum-db-devel/ext/python3.9/include/python3.9"
} >> /home/gpadmin/.bashrc
}
# Setup common environment
install_extra_build_dependencies
setup_gpadmin
install_gpdb
install_dependencies
setup_gpadmin_bashrc
# Do the special setup with root permission for the each task, then run the real task script with
# gpadmin. bashrc won't be read by 'su', it needs to be sourced explicitly.
case "$1" in
test)
# To make fly debug easier
su gpadmin -c \
"source /home/gpadmin/.bashrc &&\
/home/gpadmin/greenplumpython_src/concourse/scripts/test.sh"
;;
*)
echo "Unknown target task $1"
exit 1
;;
esac
| true |
4dbaef7636bfce337ea5b25d28e2b9e22f6aafd3 | Shell | jessefriedland/dotfiles | /lib/utils | UTF-8 | 2,087 | 4.15625 | 4 | [] | no_license | #!/bin/bash
# Header logging
e_header() {
printf "\n$(tput setaf 7)%s$(tput sgr0)\n" "$@"
}
# Success logging
e_success() {
printf "$(tput setaf 64)✓ %s$(tput sgr0)\n" "$@"
}
# Error logging
e_error() {
printf "$(tput setaf 1)x %s$(tput sgr0)\n" "$@"
}
# Warning logging
e_warning() {
printf "$(tput setaf 136)! %s$(tput sgr0)\n" "$@"
}
# Ask for confirmation before proceeding
seek_confirmation() {
printf "\n"
e_warning "$@"
read -p "Continue? (y/n) " -n 1
printf "\n"
}
# Test whether the result of an 'ask' is a confirmation
is_confirmed() {
if [[ "$REPLY" =~ ^[Yy]$ ]]; then
return 0
fi
return 1
}
# Test whether we're in a git repo
is_git_repo() {
$(git rev-parse --is-inside-work-tree &> /dev/null)
}
# Test whether a command exists
# $1 - cmd to test
type_exists() {
if [ $(type -P $1) ]; then
return 0
fi
return 1
}
# Test whether a Homebrew formula is already installed
# $1 - formula name (may include options)
formula_exists() {
if $(brew list $1 >/dev/null); then
printf "%s already installed.\n" "$1"
return 0
fi
e_warning "Missing formula: $1"
return 1
}
# Check if xCode is present
check_xcode() {
if type_exists 'gcc'; then
e_success "xCode is installed"
else
e_warning "The XCode Command Line Tools must be installed first."
install_xcode
fi
}
# Install xCode Command Line Tools
install_xcode() {
# figure out what version of OS X is running
darwin_version=$(uname -r)
# are you on Mavericks, Darwin kernal 13.0.0 or above
if (( ${darwin_version%%.*} > 12 )); then
e_header "Installing xCode Command Line Tools. Follow the prompt"
xcode-select --install
seek_confirmation "Is xCode done installing"
if is_confirmed; then
check_xcode
else
check_xcode
fi
else
printf " Download them from: https://developer.apple.com/downloads\n"
printf " Then run: bash ~/.dotfiles/bin/dotfiles\n"
exit 1
fi
} | true |
e066ac2d7757f2ad484700618db6a99c4639da7b | Shell | Smithsonian/sidora-devops | /usr/lib64/nagios/plugins/check_islandora_test_ingest.sh | UTF-8 | 5,185 | 3.515625 | 4 | [] | no_license | #!/bin/bash
FH="/usr/local/fedora"
USERNAME="username"
PASSWORD="password"
FEDORA_HOST="localhost"
SOLR_HOST="localhost"
PORT="8080"
PROT="http"
PID="nagios:check_all"
CURRENT_TIME=$(date +"%T")
LABEL_DEFAULT="NagiosCheck$CURRENT_TIME"
IMAGE_URL="http://si-islandora.si.edu/sites/all/themes/smithsonian-theme/logo.png"
DSID="NAGIOS"
DSID_MICRO="TN"
TMP_FILE="/tmp/nagios_check_islandora_test_ingest.tmp" #/tmp/nagios_check_islandora_test_ingest.settings
TEXT_OUTPUT="Islandora test ingest"
# Exit codes
STATE_OK=0
STATE_WARNING=1
STATE_CRITICAL=2
STATE_UNKNOWN=3
#number of attempts before script returns state critical
NUM_ATTEMPT_FAIL=6 #default, nagios checks every 5 minutes. eg: 6 attempts would be 30 minutes waiting for messages to get through
#default exit var
EXIT_CODE=$STATE_OK
function reset_file() {
cat <<EOF > $TMP_FILE
0
$LABEL_DEFAULT
EOF
}
if [ -f $TMP_FILE ]; then
NAGIOS_STACK_NUM=`head -1 $TMP_FILE`
if [ "$NAGIOS_STACK_NUM" = "" ]; then
NAGIOS_STACK_NUM=0
fi
LABEL=`head -2 $TMP_FILE | tail -1`
if [[ "$LABEL" = "$NAGIOS_STACK_NUM" || "$LABEL" = "" ]]; then
LABEL=$LABEL_DEFAULT
fi
if [ $NAGIOS_STACK_NUM -lt $NUM_ATTEMPT_FAIL ]; then
NAGIOS_STACK_NUM=`expr $NAGIOS_STACK_NUM + 1`
cat <<EOF > $TMP_FILE
$NAGIOS_STACK_NUM
$LABEL
EOF
# else
# cat <<EOF > test_stack.settings
#$NAGIOS_STACK_NUM
#$LABEL
#EOF
fi
else
NAGIOS_STACK_NUM=1
LABEL=$LABEL_DEFAULT
cat <<EOF > $TMP_FILE
$NAGIOS_STACK_NUM
$LABEL
EOF
fi
function fedora_ingest_check() {
#ingest into fedora
if [[ $NAGIOS_STACK_NUM -eq 1 || $NAGIOS_STACK_NUM -eq $NUM_ATTEMPT_FAIL ]]; then
image_check=`curl -s -o /dev/null -w "%{http_code}" $IMAGE_URL`
if [ "$image_check" != "200" ]; then
TEXT_OUTPUT="$TEXT_OUTPUT; Failed to access image"
echo $TEXT_OUTPUT
exit $STATE_CRITICAL
fi
#first make sure pid nagios:check is deleted
delete=`curl -XDELETE -u"$USERNAME:$PASSWORD" "$PROT://$FEDORA_HOST:$PORT/fedora/objects/$PID" 2> /dev/null`
#try add new object
ingest_object=`curl -XPOST -u"$USERNAME:$PASSWORD" "$PROT://$FEDORA_HOST:$PORT/fedora/objects/$PID?label=$LABEL" 2> /dev/null`
ingest_relsext=`curl -s -w "%{http_code}" -u "$USERNAME:$PASSWORD" -H "Content-type:text/xml" -X POST --upload-file /usr/lib64/nagios/plugins/relsext.xml "$PROT://$FEDORA_HOST:$PORT/fedora/objects/$PID/datastreams/RELS-EXT?mimeType=text/xml&controlGroup=X&dsLabel=RELSEXT" 2> /dev/null`
if [[ "$ingest_object" = "$PID" ]]; then
#add datastream to nagios:check to check microservices
ingest_datastream=`curl -s -o /dev/null -w "%{http_code}" -XPOST "$PROT://$FEDORA_HOST:$PORT/fedora/objects/$PID/datastreams/$DSID?mimeType=image/jpeg&controlGroup=M&dsLabel=$LABEL&dsLocation=$IMAGE_URL" --data "" -u $USERNAME:$PASSWORD 2> /dev/null`
if [ "$ingest_datastream" != "201" ]; then
TEXT_OUTPUT="$TEXT_OUTPUT; Datastream ingest failed"
echo $TEXT_OUTPUT
exit $STATE_CRITICAL
fi
else
TEXT_OUTPUT="$TEXT_OUTPUT; Object ingest failed"
echo $TEXT_OUTPUT
exit $STATE_CRITICAL
fi
fi
}
function fedora_check() {
object_check=`curl -s -o /dev/null -w "%{http_code}" -u"$USERNAME:$PASSWORD" "$PROT://$FEDORA_HOST:$PORT/fedora/objects/$PID"`
if [ "$object_check" != "200" ]; then
TEXT_OUTPUT="$TEXT_OUTPUT; Ingested object can't be found"
echo $TEXT_OUTPUT
exit $STATE_CRITICAL
fi
datastream_check=`curl -s -o /dev/null -w "%{http_code}" -u"$USERNAME:$PASSWORD" "$PROT://$FEDORA_HOST:$PORT/fedora/objects/$PID/datastreams/$DSID"`
if [ "$datastream_check" != "200" ]; then
TEXT_OUTPUT="$TEXT_OUTPUT; Ingested datastream can't be found"
echo $TEXT_OUTPUT
exit $STATE_CRITICAL
fi
}
function micro_check() {
micro_check=`curl -s -o /dev/null -w "%{http_code}" -u"$USERNAME:$PASSWORD" "$PROT://$FEDORA_HOST:$PORT/fedora/objects/$PID/datastreams/$DSID_MICRO"`
if [ "$micro_check" != "200" ]; then
TEXT_OUTPUT="$TEXT_OUTPUT; Microservices failed"
EXIT_CODE=$STATE_CRITICAL
fi
}
function solr_check() {
#check solr for new dc.title value to match label const
solr_index=`curl "http://$SOLR_HOST:8080/solr/select/?q=PID:%22$PID%22&indent=on&fl=DC.content.title_s&qt=standard&wt=json" 2> /dev/null`
solr_label=`echo "$solr_index" | grep $LABEL`
if [ "$solr_label" = "" ]; then
TEXT_OUTPUT="$TEXT_OUTPUT; Solr index failed"
EXIT_CODE=$STATE_CRITICAL
fi
}
TEXT_OUTPUT="$TEXT_OUTPUT; Attempt: $NAGIOS_STACK_NUM of $NUM_ATTEMPT_FAIL"
#run checks
fedora_ingest_check
fedora_check
sleep 20
micro_check
#solr_check
if [[ "$EXIT_CODE" = "$STATE_CRITICAL" && $NAGIOS_STACK_NUM -lt NUM_ATTEMPT_FAIL ]]; then
TEXT_OUTPUT="$TEXT_OUTPUT; JMS Message queue backlog (unknown state)"
EXIT_CODE=$STATE_UNKNOWN
fi
if [[ "$EXIT_CODE" = "$STATE_CRITICAL" && $NAGIOS_STACK_NUM -eq $NUM_ATTEMPT_FAIL ]]; then
TEXT_OUTPUT="$TEXT_OUTPUT; Islandora test ingest failed after $NAGIOS_STACK_NUM of $NUM_ATTEMPT_FAIL attempts"
EXIT_CODE=$STATE_CRITICAL
fi
#reset test_stack.settings to 0 if everything checks out ok
if [ "$EXIT_CODE" = "$STATE_OK" ]; then
TEXT_OUTPUT="$TEXT_OUTPUT; Islandora test ingest was successful"
reset_file
fi
echo $TEXT_OUTPUT
#exit with EXIT_CODE var
exit $EXIT_CODE
| true |
8295b7c1126b5f0c3545c5263ff92c1c37dbea73 | Shell | tonooo71/dotfiles | /.config/i3/script/set_volume.sh | UTF-8 | 837 | 2.90625 | 3 | [] | no_license | #!/bin/bash
volume=$(pamixer --get-volume)
mute=$(pamixer --get-mute)
if [ $1 = + ]; then
pactl set-sink-volume @DEFAULT_SINK@ +5%
if [ $(pamixer --get-mute) = true ]; then
notify-send --urgency=low "Mute: $(pamixer --get-volume)%"
else
notify-send --urgency=low "VOL: $(pamixer --get-volume)%"
fi
elif [ $1 = - ]; then
pactl set-sink-volume @DEFAULT_SINK@ -5%
if [ $(pamixer --get-mute) = true ]; then
notify-send --urgency=low "Mute: $(pamixer --get-volume)%"
else
notify-send --urgency=low "VOL: $(pamixer --get-volume)%"
fi
elif [ $1 = mute ]; then
pactl set-sink-mute @DEFAULT_SINK@ toggle
if [ $(pamixer --get-mute) = true ]; then
notify-send --urgency=low "Mute"
else
notify-send --urgency=low "VOL: $(pamixer --get-volume)%"
fi
fi
| true |
1fd976f22bf8d02d845fb701348ae13ff81dff56 | Shell | devops-soko/bash_script_practice | /check_string_num_and_len/check_str_num_and_len.sh | UTF-8 | 302 | 3.375 | 3 | [] | no_license | #! /bin/bash
num_check=`echo $1 | grep [^0-9]`
len_check=`echo $1 | wc -L`
echo $len_check
if [[ -n $num_check ]]
then
echo false
else
if [ $len_check -eq 4 ] || [ $len_check -eq 6 ]
then
echo true
else
echo false
fi
fi
exit 0
| true |
4bec18d3256a64b28a4b6787a1384e304605bfe2 | Shell | googleapis/google-cloud-cpp | /google/cloud/spanner/ci/lib/spanner_emulator.sh | UTF-8 | 5,978 | 3.71875 | 4 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/usr/bin/env bash
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Make our include guard clean against set -o nounset.
test -n "${GOOGLE_CLOUD_SPANNER_CI_LIB_SPANNER_EMULATOR_SH__:-}" || declare -i GOOGLE_CLOUD_SPANNER_CI_LIB_SPANNER_EMULATOR_SH__=0
if ((GOOGLE_CLOUD_SPANNER_CI_LIB_SPANNER_EMULATOR_SH__++ != 0)); then
return 0
fi # include guard
source module /ci/lib/io.sh
# Global variables that hold the PIDs of the Spanner emulators. These will be set
# when the emulators are started, and will be used to kill the emulators.
SPANNER_EMULATOR_PID=0
SPANNER_HTTP_EMULATOR_PID=0
# Outputs the port number that the emulator chose to listen on.
function spanner_emulator::internal::read_emulator_port() {
local -r logfile="$1"
shift
local emulator_port="0"
local -r expected=" : Server address: "
for _ in $(seq 1 8); do
if grep -q -s "${expected}" "${logfile}"; then
# The port number is whatever is after the last ':'.
emulator_port=$(grep "${expected}" "${logfile}" | awk -F: '{print $NF}')
break
fi
sleep 1
done
echo "${emulator_port}"
}
# Outputs the port number that the rest emulator chose to listen on.
function spanner_emulator::internal::read_http_emulator_port() {
local -r logfile="$1"
shift
local http_emulator_port="0"
local -r expected=": REST server listening at localhost:"
for _ in $(seq 1 8); do
if grep -q -s "${expected}" "${logfile}"; then
# The port number is whatever is after the last ':'.
http_emulator_port=$(grep "${expected}" "${logfile}" | awk -F: '{print $NF}')
break
fi
sleep 1
done
echo "${http_emulator_port}"
}
# Starts the cloud spanner emulator. On success, exports the
# SPANNER_EMULATOR_HOST environment variable containing the host:port where the
# emulator is listening.
function spanner_emulator::start() {
io::log "Launching Cloud Spanner emulator in the background"
if [[ -z "${CLOUD_SDK_LOCATION:-}" ]]; then
echo 1>&2 "You must set CLOUD_SDK_LOCATION to find the emulator"
return 1
fi
local emulator_port=0
if [[ $# -ge 1 ]]; then
emulator_port=$1
fi
# We cannot use `gcloud beta emulators spanner start` because there is no way
# to kill the emulator at the end using that command.
readonly SPANNER_EMULATOR_CMD="${CLOUD_SDK_LOCATION}/bin/cloud_spanner_emulator/emulator_main"
if [[ ! -x "${SPANNER_EMULATOR_CMD}" ]]; then
echo 1>&2 "The Cloud Spanner emulator does not seem to be installed, aborting"
return 1
fi
# The tests typically run in a Docker container, where the ports are largely
# free; when using in manual tests, you can set SPANNER_EMULATOR_HOST.
rm -f emulator.log
"${SPANNER_EMULATOR_CMD}" --host_port "localhost:${emulator_port}" >emulator.log 2>&1 </dev/null &
SPANNER_EMULATOR_PID=$!
emulator_port="$(spanner_emulator::internal::read_emulator_port emulator.log)"
if [[ "${emulator_port}" = "0" ]]; then
io::log_red "Cannot determine Cloud Spanner emulator port." >&2
cat emulator.log >&2
spanner_emulator::kill
return 1
fi
export SPANNER_EMULATOR_HOST="localhost:${emulator_port}"
io::log "Spanner emulator started at ${SPANNER_EMULATOR_HOST}"
# Repeat the process to launch the emulator with HTTP support. We launch a separate process
# as existing tests fail if we try and use gateway_main for both gRPC and REST.
readonly SPANNER_HTTP_EMULATOR_CMD="${CLOUD_SDK_LOCATION}/bin/cloud_spanner_emulator/gateway_main"
if [[ ! -x "${SPANNER_HTTP_EMULATOR_CMD}" ]]; then
echo 1>&2 "The Cloud Spanner HTTP emulator does not seem to be installed, aborting"
return 1
fi
local http_emulator_port=$((emulator_port + 1))
if [[ $# -ge 2 ]]; then
http_emulator_port=$2
fi
local grpc_emulator_port=$((http_emulator_port + 1))
if [[ $# -ge 3 ]]; then
grpc_emulator_port=$3
fi
# The tests typically run in a Docker container, where the ports are largely
# free; when using in manual tests, you can set SPANNER_EMULATOR_REST_HOST.
rm -f http_emulator.log
"${SPANNER_HTTP_EMULATOR_CMD}" --hostname "localhost" \
--grpc_port "${grpc_emulator_port}" --http_port "${http_emulator_port}" \
--copy_emulator_stdout=true >http_emulator.log 2>&1 </dev/null &
SPANNER_HTTP_EMULATOR_PID=$!
http_emulator_port="$(spanner_emulator::internal::read_http_emulator_port http_emulator.log)"
if [[ "${http_emulator_port}" = "0" ]]; then
io::log_red "Cannot determine Cloud Spanner HTTP emulator port." >&2
cat http_emulator.log >&2
spanner_emulator::kill
return 1
fi
# Using https:// results in SSL errors.
export SPANNER_EMULATOR_REST_HOST="http://localhost:${http_emulator_port}"
io::log "Spanner HTTP emulator started at ${SPANNER_EMULATOR_REST_HOST}"
}
# Kills the running emulator.
function spanner_emulator::kill() {
if (("${SPANNER_EMULATOR_PID}" > 0)); then
echo -n "Killing Spanner Emulator [${SPANNER_EMULATOR_PID}] "
kill "${SPANNER_EMULATOR_PID}" || echo -n "-"
wait "${SPANNER_EMULATOR_PID}" >/dev/null 2>&1 || echo -n "+"
echo -n "."
echo " done."
SPANNER_EMULATOR_PID=0
fi
if (("${SPANNER_HTTP_EMULATOR_PID}" > 0)); then
echo -n "Killing Spanner HTTP Emulator [${SPANNER_HTTP_EMULATOR_PID}] "
kill "${SPANNER_HTTP_EMULATOR_PID}" || echo -n "-"
wait "${SPANNER_HTTP_EMULATOR_PID}" >/dev/null 2>&1 || echo -n "+"
echo -n "."
echo " done."
SPANNER_HTTP_EMULATOR_PID=0
fi
}
| true |
b82831cae8f3db5bd0e47947ebe25e1a54944054 | Shell | kimdepypere/userscripts | /installation/installscripts/installDirs | UTF-8 | 945 | 2.71875 | 3 | [] | no_license | #!/bin/bash
##########################################################################
## Author: Kim Depypere
## Date: 04/12/2016
## Description:
## Install personal directory-structure
##########################################################################
cd ~/
echo "Making documents folder"
mkdir ~/Documents
echo "Making projects folder"
mkdir ~/Documents/projects
echo "Making go folder"
mkdir ~/Documents/projects/golang
cd ~/Documents/projects/golang
mkdir {src,pkg,bin}
cd src
mkdir github.com
cd github.com
mkdir kimdepypere
cd kimdepypere
git clone https://github.com/kimdepypere/golangProjects
cd golangProjects
git remote set-url origin git@github.com:kimdepypere/golangProjects.git
cd ~/
echo "Making ruby folder"
mkdir ~/Documents/projects/ruby
echo "Making php folder"
mkdir ~/Documents/projects/php
echo "Making python folder"
mkdir ~/Documents/projects/python
echo "Making published folder"
mkdir ~/Documents/projects/published
| true |
0e1846cb6864d32cec02214f92dc02875354838c | Shell | tlmakinen/21cm-unet | /jupyter.sh | UTF-8 | 662 | 2.953125 | 3 | [] | no_license | #!/bin/bash
#SBATCH -N 1
#SBATCH -n 1
#SBATCH --time 02:00:00
#SBATCH --job-name jupyter-notebook
#SBATCH --output jupyter-notebook-%J.log
# get tunneling info
XDG_RUNTIME_DIR=""
node=$(hostname -s)
user=$(whoami)
cluster="tigercpu"
port=8889
# print tunneling instructions jupyter-log
echo -e "
Command to create ssh tunnel:
ssh -N -f -L ${port}:${node}:${port} ${user}@${cluster}.princeton.edu
Use a Browser on your local machine to go to:
localhost:${port} (prefix w/ https:// if using password)
"
# load modules or conda environments here
module load anaconda3
conda activate 21cm
# Run Jupyter
jupyter-lab --no-browser --port=${port} --ip=${node} | true |
b5b9d55983f380a8f33de4daaecf757144753de2 | Shell | fuzzy/old-cpkg | /lib.d/cpkg.sh | UTF-8 | 6,683 | 4.25 | 4 | [] | no_license |
cpkg() {
case "${1}" in
help)
echo
echo -e "\033[1;36mUsage\033[4;37m\033[1;37m:\033[0m"
echo "cpkg <command> <...>"
echo
echo -e "\033[1;36mCommands\033[4;37m\033[1;37m:\033[0m"
echo " list [pkg] List packages or versions of [pkg]"
echo " use [global|(session)] <pkg>-<ver> Use <pkg>-<ver>"
echo " drop [global|(session)] <pkg>-<ver> Stop using <pkg>-<ver>"
echo " log Shows the logfile for your session."
echo " recycle Regenerate the session paths."
echo " renv Rebuild environment variables."
echo " freeze [pkg] Freeze a package at a given state."
echo " freezer <pkg> Show the contents of the freezer, or filtered by <pkg>."
echo " unfreeze [pkg] Unfreeze a package from a given state."
echo " remove [pkg] Remove <pkg>."
echo " search [filter] Search through available pkgscripts for [filter]."
echo " install [pkg] Install package denoted by [pkg]."
echo
;;
list)
echo -e "\033[1;36mPackages\033[4;37m\033[1;37m:\033[0m"
for itm in $(ls ${CPKG[PKG_DIR]}/|grep "${CPKG[OS_STAMP]}${2}"); do
echo -e "[$(cpkg_belongs_to ${itm})] $(echo ${itm}|gawk -F'__' '{print $3}')"
done
;;
use)
if [ $(cpkg_in_use ${3}) -eq 0 ]; then
case "${2}" in
global)
log_info "Adding ${3} to the global profile."
echo "${CPKG[OS_STAMP]}${3}" >> ${CPKG[GLOBAL]}/.packages
cpkg recycle
;;
session)
log_info "Adding ${3} to the session profile."
echo "${CPKG[OS_STAMP]}${3}" >> ${CPKG[SESSION]}/.packages
cpkg recycle
;;
*)
cpkg ${1} session ${2}
;;
esac
fi
;;
drop)
case "${2}" in
global)
if [ $(cpkg_in_use ${3}) -eq 1 ]; then
log_info "Dropping ${3} from the global profile."
tempf=$(mktemp /tmp/cpkg.${RND})
grep -v "${CPKG[OS_STAMP]}${3}" ${CPKG[GLOBAL]}/.packages >${tempf}
mv ${tempf} ${CPKG[GLOBAL]}/.packages
cpkg recycle
fi
;;
session)
if [ $(cpkg_in_use ${3}) -eq 1 ]; then
log_info "Dropping ${3} from the session profile."
tempf=$(mktemp /tmp/cpkg.${RND})
grep -v "${CPKG[OS_STAMP]}${3}" ${CPKG[SESSION]}/.packages >${tempf}
mv ${tempf} ${CPKG[SESSION]}/.packages
cpkg recycle
fi
;;
*)
for i in ${CPKG[GLOBAL]}/.packages ${CPKG[SESSION]}/.packages; do
tempf=$(mktemp /tmp/cpkg.${RND})
grep -v "${CPKG[OS_STAMP]}${3}" ${i} >${tempf}
mv ${tempf} ${i}
done
cpkg recycle
;;
esac
;;
log)
# This needs to show the current sessions logfile
if [ ! -z "${PAGER}" ]; then
${PAGER} ${CPKG[LOGFILE]}
else
less ${CPKG[LOGFILE]}
fi
;;
recycle)
log_info "Rebuilding session paths: "
tmp=$(mktemp /tmp/cpkg.${RND})
cp ${CPKG[SESSION]}/.packages ${tmp}
mv ${CPKG[SESSION]} ${CPKG[SESSION]}.old
mkdir -p ${CPKG[SESSION]}
cp ${tmp} ${CPKG[SESSION]}/.packages
cat ${CPKG[GLOBAL]}/.packages ${CPKG[SESSION]}/.packages > ${tmp}
for itm in $(cat ${tmp}); do
${CPKG[LNDIR]} ${CPKG[PKG_DIR]}/${itm}/ ${CPKG[SESSION]}/ $(echo ${itm}|sed -e "s/${CPKG[OS_STAMP]}//g") >/dev/null
done
rm -f ${tmp} ; unset tmp
cpkg renv
log_info "Cleaning up"
$(which rm) -rf ${CPKG[SESSION]}.old
;;
renv)
log_info "Rebuilding user environment: "
for itm in $(ls ${CPKG[ENV_DIR]}/); do
. ${CPKG[ENV_DIR]}/${itm}
done
;;
freeze)
# Check to see that we have a $2
if [ -z "${2}" ]; then
log_error "You must provide a package name."
# Then check to see if our package exists
elif [ ! -d ${CPKG[PKG_DIR]}/${CPKG[OS_STAMP]}${2} ]; then
log_error "You must provide a valid package name."
else
# Set a few things straight
s_dir=${PWD}
dstamp=$(date +%m%d%y%H%M)
log_info "Copying ${2}"
# Run into the pkgdir and sync to temp
cd ${CPKG[PKG_DIR]}
tar -cf- ${CPKG[OS_STAMP]}${2} | tar -C ${CPKG[TMP_DIR]}/ -xf-
# Create the tarball
log_info "Freezing ${2}"
cd ${CPKG[TMP_DIR]}
mv ${CPKG[OS_STAMP]}${2} ${2}--${dstamp}
tar -czf ${CPKG[FREEZER]}/${2}--${dstamp}.tgz ${2}--${dstamp}/
# and clean up after ourselves
log_info "Cleaning up"
rm -rf ${CPKG[TMP_DIR]}/${2}--${dstamp}
cd ${s_dir}
fi
;;
freezer)
echo -e "\033[1;36mThe freezer\033[4;37m\033[1;37m:\033[0m"
if [ -z "${2}" ]; then
for itm in $(ls ${CPKG[FREEZER]}/); do
pkg=$(echo $(basename ${itm})|awk -F'--' '{print $1}')
tme=$(echo $(basename ${itm})|awk -F'--' '{print $2}'|awk -F. '{print $1}')
printf "\t%-20s %8s\n" ${pkg} ${tme}
done
else
if [ ! -z "$(ls ${CPKG[FREEZER]}|grep ${2})" ]; then
for itm in $(ls ${CPKG[FREEZER]}/*${2}*); do
pkg=$(echo $(basename ${itm})|awk -F'--' '{print $1}')
tme=$(echo $(basename ${itm})|awk -F'--' '{print $2}'|awk -F. '{print $1}')
printf "\t%-20s %13s\n" ${pkg} ${tme}
done
fi
fi
;;
unfreeze)
if [ -z "${2}" ] || [ -z "${3}" ]; then
log_error "You must supply a package, and a hash."
else
if [ ! -f ${CPKG[FREEZER]}/${2}--${3}.tgz ]; then
log_error "The package and hash you specified are invalid."
else
log_info "Extracting iceball"
tar -C ${CPKG[TMP_DIR]}/ -zxf ${CPKG[FREEZER]}/${2}--${3}.tgz
mv ${CPKG[TMP_DIR]}/${2}--${3} ${CPKG[PKG_DIR]}/${CPKG[OS_STAMP]}${2}-${3}
fi
fi
;;
remove)
if [ -d ${CPKG[PKG_DIR]}/${CPKG[OS_STAMP]}${2} ] && [ ! -z "${2}" ]; then
log_info "Removing ${2}"
rm -rf ${CPKG[PKG_DIR]}/${CPKG[OS_STAMP]}${2}
cpkg recycle
fi
;;
search)
echo -e "\033[1;36mPkgscripts\033[4;37m\033[1;37m:\033[0m"
if [ ! -z "${2}" ]; then
for itm in $(ls ${CPKG[PKGSCRIPT]}/|grep ${2}); do
echo " #) ${itm}" | sed -e 's/\.sh//g'
done
else
for itm in $(ls ${CPKG[PKGSCRIPT]}/); do
echo " #) ${itm}" | sed -e 's/\.sh//g'
done
fi
;;
install)
if [ ! -z "${2}" ]; then
if [ ! -f ${CPKG[PKGSCRIPT]}/${2}.sh ]; then
log_error "You must specify a valid pkgscript."
else
${CPKG[CMD_BUILDER]} ${2}
fi
else
log_error "You must specify a pkgscript."
fi
;;
*)
cpkg help
;;
esac
}
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.