blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
22ac4225a185a7f195d5e1985e2a9fb31e2eb032
|
Shell
|
darthsuogles/build_scripts
|
/libuuid/build_libuuid.sh
|
UTF-8
| 222
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
source ../build_pkg.sh
source ../gen_modules.sh
BUILD_LIBUUID=yes
guess_build_pkg libuuid "http://downloads.sourceforge.net/project/libuuid/libuuid-1.0.3.tar.gz"
guess_print_modfile libuuid ${libuuid_ver}
| true
|
3d710d9b0a9a368ad97e6f1e83ff24e345be2de3
|
Shell
|
gary083/GAN_Harmonized_with_HMMs
|
/src/train_GAN.sh
|
UTF-8
| 1,054
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/bash
iteration=$1
prefix=${bnd_type}_iter${iteration}_${setting}_gan
# Train GAN and output phoneme posterior
cd GAN-based-model
python3 main.py --mode train --cuda_id 0 \
--bnd_type $bnd_type --iteration $iteration \
--setting $setting \
--data_dir $DATA_PATH \
--save_dir $DATA_PATH/save/${prefix} \
--config "./config.yaml"
cd ../
# WFST decode the phoneme sequences
cd WFST-decoder
python3 scripts/decode.py --set_type test --lm_type $setting \
--data_path $DATA_PATH --prefix $prefix \
--jobs $jobs
python3 scripts/decode.py --set_type train --lm_type $setting \
--data_path $DATA_PATH --prefix $prefix \
--jobs $jobs
cd ../
# Evalution
python3 eval_per.py --bnd_type $bnd_type --set_type test --lm_type $setting \
--data_path $DATA_PATH --prefix $prefix \
--file_name test_output.txt | tee $DATA_PATH/result/${prefix}.log
| true
|
d6ee4183e16b9f657a13a25ef6f738f50610ddfd
|
Shell
|
wesraph/dotfiles-1
|
/home/.shared_env.d/__.sh
|
UTF-8
| 647
| 3.359375
| 3
|
[
"MIT"
] |
permissive
|
###############################################################################
# __.sh
###############################################################################
###############################################################################
# Verbose Init
#
# Print the current file path when starting up with verbose output. This file's
# presence makes it possible to confirm that files in this directory are
# sourced.
###############################################################################
. "$HOME/.shared_verbose_init"
__verbose_init printf "Loading %s\n" \
"$([[ -n $BASH_VERSION ]] && echo "$BASH_SOURCE" || echo "$0")"
| true
|
f888b37c648675b4406d2b301f8124914a717d6e
|
Shell
|
edwardt/HVLearn
|
/dfa_learner/build-tools/compile
|
UTF-8
| 525
| 2.515625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
PACKAGE=org.learner
SSL_ROOT=../ssl
CLASS_PATH=./core/target/classes:./learners/target/classes
HEADER_PATH=$SSL_ROOT/utils
mvn -q compile
mvn -q install
javah -classpath $CLASS_PATH -o $HEADER_PATH/jni_initcert.h -jni $PACKAGE.core.CertificateTemplate
javah -classpath $CLASS_PATH -o $HEADER_PATH/jni_verifier.h -jni $PACKAGE.learners.JNIVerifier
# Compile SSL/TLS libraries
#make -C $SSL_ROOT/openssl
make -C $SSL_ROOT/gnutls
#make -C $SSL_ROOT/mbedtls
#make -C $SSL_ROOT/matrixssl
#make -C $SSL_ROOT/curl
| true
|
64baf592d9703727046a4194a37a87780fe02ab7
|
Shell
|
hanqqv/kafka-data-catalog
|
/environment/data/produce-test-products.sh
|
UTF-8
| 470
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
set -u -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
echo "$DIR"
cat $DIR/products-test-dataset.json | kafka-avro-console-producer --topic global.bank.local.ops.products.product-catalog --bootstrap-server broker:29092 --property schema.registry.url=http://schema-registry:8081 --property key.schema='{"type":"string"}' --property value.schema="$(< $DIR/schemas/product.avsc)" --property parse.key=true --property key.separator=":"
| true
|
b32225779fea81cadebf7edf4c3b0f781d8ea2a8
|
Shell
|
nalabelle/dotfiles
|
/bashrc.d/osx
|
UTF-8
| 335
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
if [[ "$OSTYPE" != "darwin"* ]]; then
return
fi
touch ~/.bash_sessions_disable
if [[ "${BASH_VERSINFO:-99}" -lt 5 ]]; then
>&2 printf "bash version is lower than you're expecting\n"
>&2 printf "install the newer one, add it to /etc/shells, and then chsh -s to it\n"
export BASH_SILENCE_DEPRECATION_WARNING=1
fi
| true
|
94918f2d1761b2cfbfc0fa4d04f456b5f4d43849
|
Shell
|
arshadaleem66/test-swu-script
|
/test-swu.sh
|
UTF-8
| 2,461
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
DEV="sda1"
COUNT_FILE=/run/media/persistent/count
SYSTEM_FILE=/run/media/persistent/system
OVERFLOW=6
KERNEL_A="itbImageA"
KERNEL_B="itbImageB"
ROOTFS_A="2"
ROOTFS_B="3"
increment_count()
{
i=`read_count`
if [ -z $i ]; then
i=0
fi
i=$((${i}+1))
echo $i > $COUNT_FILE
read_count
}
read_count()
{
cat $COUNT_FILE
}
update() {
/usr/bin/swupdate-progress -r -w &
/bin/mount /dev/sda1 /mnt/swupdate
swupdate-client -v /mnt/swupdate/compound-image.swu
umount /mnt/swupdate
}
read_system_id () {
cat $SYSTEM_FILE
}
write_system_id () {
echo $1 > $SYSTEM_FILE
}
##############
# Tests if Rootfs and Kernel are updated successfully.
#
# Contains bootloader specific reads of bootloader environment.
#
# Called by: validate-system_update
#
# Takes: No arguments
#
# Returns: 0 => Success, 1 => Failed
#
check_compound_update () {
KERNEL_FILE=$(fw_printenv | grep ^kernelfile | sed 's/.*=//')
ROOTFS_ID=$(fw_printenv | grep ^part | sed 's/.*=//')
SYSTEM_ID=`read_system_id`
if [ "$SYSTEM_ID" = "A" -a "$KERNEL_FILE" = "$KERNEL_A" -a "$ROOTFS_ID" = "$ROOTFS_A" ] ; then
echo "0"
elif [ "$SYSTEM_ID" = "B" -a "$KERNEL_FILE" = "$KERNEL_B" -a "$ROOTFS_ID" = "$ROOTFS_B" ] ; then
echo "0"
else
echo "1"
fi
}
############
#
validate_system_update () {
SYSTEM_ID=`read_system_id`
# if system_id is NULL || system_id is A
if [ -z "$SYSTEM_ID" -o "$SYSTEM_ID" = "A" ]; then
write_system_id "B"
COMPOUND_UPDATE_STATUS=`check_compound_update`
elif [ $SYSTEM_ID = "B" ]; then
write_system_id "A"
COMPOUND_UPDATE_STATUS=`check_compound_update`
else
echo "ERROR! Should never get here! Some unexpected thing written in system file!"
fi
# return status to main
echo "$COMPOUND_UPDATE_STATUS"
}
# start the test
## check overflow
## update in else
COUNT=`read_count`
if [ -z $COUNT ]; then
COUNT=`increment_count`
echo "update() count =$COUNT"
update
elif [ $COUNT -lt $OVERFLOW -a -n "$COUNT" ] ; then
COUNT=`increment_count`
echo "update() count = $COUNT"
STATUS=`validate_system_update`
if [ $STATUS -eq 0 ] ; then
echo "Compound Update Successful!"
else
echo "Compound Update Failed!"
#TODO: Can define status 1 & 2, to indicate successful kernel/rootfs update but failed compound update
fi
#TODO: save_status
update
else
echo "Overflow exceeded"
fi
| true
|
5aa27669222e77fe17b375d5b89349fb74d4d316
|
Shell
|
dmarcoux/shell_script_templates
|
/env
|
UTF-8
| 525
| 4.0625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Setup environment for other scripts. Source this script in other scripts
# -e: Exit on commands not found
# -u: Exit on unset variables
# -x: Write to standard error a trace for each command after it expands the command and before it executes it
set -eux
# Check if ABC is installed, just in case people don't read the README
if ! type ABC > /dev/null 2>&1; then
echo 'Install ABC before running scripts. Refer to the README'
exit 1;
fi
# ENV variables in alphabetical order
SOME_ENV="variable"
| true
|
5416a48778ce104aefe0c5dcca84eae7e5493ac2
|
Shell
|
yuriy0/homedir
|
/path/retrack_lfs.sh
|
UTF-8
| 1,078
| 4.15625
| 4
|
[] |
no_license
|
#! bash
# This script tries to fix broken LFS files in a git repo
# This creates two new commits on the local copy and pushes the first
set -e # git should not fail
usage () {
echo "${FUNCNAME} GIT_PATHSPEC LFS_TRACKING_INFO"
echo " If any arguments have spaces, enclose in single quotes"
echo >&2 "$@"
exit 1
}
br () {
echo "## $@"
}
[ "$#" -eq 2 ] || usage "2 arguments required, $# provided"
pathspec="$1"
lfs_tracking="$2"
lfs_tracking_nm=("${lfs_tracking[@]//\"/}")
quo=\"
br "Untracking path spec..."
git lfs untrack "$lfs_tracking"
br "Staging for commit..."
git rm --cached "$pathspec"
git add .gitattributes
br "Committing removal..."
git commit -m "${quo}Remove broken LFS files ($lfs_tracking_nm)${quo}"
br "Pushing..."
git push
br "Re-tracking path spec..."
git lfs track "$lfs_tracking"
br "Staging for commit..."
git add .gitattributes "$pathspec"
br "Committing re-addition..."
git commit -m "${quo}Re-adding broken LFS Files ($lfs_tracking_nm)${quo}"
br "Completed; verify that the state of the repo is correct, then push."
git log -n2
| true
|
6cbaee707bd569026949c27200a962e8ce7506f0
|
Shell
|
htelsiz/chainlink
|
/internal/bin/cldev
|
UTF-8
| 724
| 2.8125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Runs a Chainlink node preconfigured to communicate with smartcontract/devnet(parity).
# Steps:
# 0. Have docker installed and configured
# 1. ./devnet/devnet
# 2. cd solidity && truffle migrate --network devnet
# 3. ./cldevnet
export LOG_LEVEL=debug
export ROOT=./internal/devnet
export ETH_URL=ws://localhost:18546
export ETH_CHAIN_ID=17
LDFLAGS="-X github.com/smartcontractkit/chainlink/store.Sha=`git rev-parse HEAD`"
if [ "$#" == 0 ] || [ "$1" == "node" ]; then
go run -ldflags "$LDFLAGS" \
main.go node -d -p "T.tLHkcmwePT/p,]sYuntjwHKAsrhm#4eRs4LuKHwvHejWYAC2JP4M8HimwgmbaZ"
elif [ "$1" == "clean" ]; then
rm $ROOT/db.bolt
rm $ROOT/log.jsonl
else
go run -ldflags "$LDFLAGS" main.go $@
fi
| true
|
cbbcd5fdf97338981c8034f30cbdb5e51f1a5778
|
Shell
|
markcmiller86/SAF
|
/tools/build_sierra_saf/build.janus2.dbg
|
UTF-8
| 2,786
| 3.15625
| 3
|
[
"LicenseRef-scancode-generic-export-compliance"
] |
no_license
|
#!/bin/bash
#
# This script is intended to work with or without build_tpl
#
export TFLOPS_XDEV=/usr/local/x-comp/intel/tflop/current
export PATH=$TFLOPS_XDEV/tflops/bin.solaris:/usr/xpg4/bin:$PATH
#
# If you don't use build_tpl modify $DEST or set SRC_DIR externally. Build_tpl
# does it for you.
#
export DEST=/sierra/Release/saf/2.0.0
test $SRC_DIR && export DEST=${SRC_DIR}
export SOURCE=${DEST}/saf
export SIERRA_PLATFORM=janus2
export SIERRA_OPT_DBG=dbg
export SIERRA_INSTDIR=dbg_dp_${SIERRA_PLATFORM}
export HDF_VER=1.7.45
export CONFDIR=cfg_${SIERRA_PLATFORM}_${SIERRA_OPT_DBG}
export OPTION=install_$SIERRA_PLATFORM
export HDF_DIR=/sierra/Release/hdf5/${HDF_VER}/hdf5-tflops
export INSTDIR=$DEST/$OPTION/$SIERRA_OPT_DBG
mkdir $DEST/include > /dev/null 2>&1
mkdir $DEST/lib > /dev/null 2>&1
mkdir ${DEST}/lib/${SIERRA_INSTDIR} > /dev/null 2>&1
mkdir ${DEST}/include/${SIERRA_INSTDIR} > /dev/null 2>&1
mkdir ${DEST}/include/${SIERRA_INSTDIR}/private > /dev/null 2>&1
mkdir $DEST/$OPTION > /dev/null 2>&1
mkdir ${INSTDIR} > /dev/null 2>&1
mkdir ${INSTDIR}/lib > /dev/null 2>&1
mkdir ${INSTDIR}/include > /dev/null 2>&1
export all=1
if [ $all ]
then
mkdir $DEST/$CONFDIR > /dev/null 2>&1
cd $DEST/$CONFDIR
rm -rf $DEST/$CONFDIR/*
else
cd $DEST/$CONFDIR
fi
export LIBS=" -lz -lmpich -lnoop_stubs"
export CC="icc -cougar -DJANUS "
export CPP="icc -cougar -E "
# libz/1.1.3 is temporary; try removing it. Fix might be in place.
export CPPFLAGS="-g -I${TFLOPS_XDEV}/tflops/cougar/include.fixmpi -I${HDF_DIR}/include -I/sierra/Release/libz/1.1.3"
#export CPPFLAGS="-g -I${TFLOPS_XDEV}/tflops/cougar/include.fixmpi -I${HDF_DIR}/include"
# export LDFLAGS=" -L${TFLOPS_XDEV}/tflops/cougar/lib/puma -L${HDF_DIR}/lib/${SIERRA_INSTDIR} -L/sierra/Release/libz/1.1.3/lib/dp_janus2_opt -L${TFLOPS_XDEV}/tflops/lib/RW"
export LDFLAGS=" -L${TFLOPS_XDEV}/tflops/cougar/lib/puma -L${HDF_DIR}/lib -L/sierra/Release/libz/1.1.3/lib/dp_janus2_opt"
export RUNSERIAL="yod -size 1"
export RUNPARALLEL="yod -size 3"
if [ $all ]
then
$SOURCE/src/configure \
--host=i386-intel-osf1 \
--enable-parallel --disable-serial \
--enable-static --disable-shared --without-zlib \
--enable-debug --disable-funcstack --disable-production \
--prefix=$INSTDIR \
--without-python \
--without-java | tee $OPTION.configure
cp ${SOURCE}/tools/SAFconfig-JANUS.h ${SOURCE}/src/safapi/lib
cp ${SOURCE}/tools/SAFconfig-JANUS.h ${SOURCE}/src/safapi/lib/SAFconfig.h
gmake | tee $OPTION.make
# gmake check | tee $OPTION.test
gmake install
else
gmake install
fi
cp $INSTDIR/lib/*.* ${DEST}/lib/${SIERRA_INSTDIR}/.
cp $INSTDIR/include/*.* ${DEST}/include/${SIERRA_INSTDIR}/.
cp $INSTDIR/include/private/*.* ${DEST}/include/${SIERRA_INSTDIR}/private/.
| true
|
2f302273c3029f332a052aa5195b2a1f9d1cbf74
|
Shell
|
awslabs/amazon-ebs-autoscale
|
/install.sh
|
UTF-8
| 8,598
| 3.625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Copyright Amazon.com, Inc. or its affiliates.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
# THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
set -e
set -x
USAGE=$(cat <<EOF
Install Amazon EBS Autoscale
$0 [options] [[-m] <mount-point>]
Options
-d, --initial-device DEVICE
Initial device to use for mountpoint - e.g. /dev/xvdba.
(Default: none - automatically create and attaches a volume)
If provided --initial-size is ignored.
-f, --file-system btrfs | lvm.ext4
Filesystem to use (default: btrfs).
Options are btrfs or lvm.ext4
-h, --help
Print help and exit.
-m, --mountpoint MOUNTPOINT
Mount point for autoscale volume (default: /scratch)
-t, --volume-type VOLUMETYPE
Volume type (default: gp3)
--volume-iops VOLUMEIOPS
Volume IOPS for gp3, io1, io2 (default: 3000)
--volume-throughput VOLUMETHOUGHPUT
Volume throughput for gp3 (default: 125)
--min-ebs-volume-size SIZE_GB
Mimimum size in GB of new volumes created by the instance.
(Default: 150)
--max-ebs-volume-size SIZE_GB
Maximum size in GB of new volumes created by the instance.
(Default: 1500)
--max-total-created-size SIZE_GB
Maximum total size in GB of all volumes created by the instance.
(Default: 8000)
--max-attached-volumes N
Maximum number of attached volumes. (Default: 16)
--initial-utilization-threshold N
Initial disk utilization treshold for scale-up. (Default: 50)
-s, --initial-size SIZE_GB
Initial size of the volume in GB. (Default: 200)
Only used if --initial-device is NOT specified.
-i, --imdsv2
Enable imdsv2 for instance metadata API requests.
EOF
)
MOUNTPOINT=/scratch
# defaults to set into ebs-autoscale.json
SIZE=200
VOLUMETYPE=gp3
VOLUMEIOPS=3000
VOLUMETHOUGHPUT=125
MIN_EBS_VOLUME_SIZE=150
MAX_EBS_VOLUME_SIZE=1500
MAX_LOGICAL_VOLUME_SIZE=8000
MAX_ATTACHED_VOLUMES=16
INITIAL_UTILIZATION_THRESHOLD=50
DEVICE=""
FILE_SYSTEM=btrfs
BASEDIR=$(dirname $0)
. ${BASEDIR}/shared/utils.sh
# parse options
PARAMS=""
while (( "$#" )); do
case "$1" in
-s|--initial-size)
SIZE=$2
shift 2
;;
-t|--volume-type)
VOLUMETYPE=$2
shift 2
;;
--volume-iops)
VOLUMEIOPS=$2
shift 2
;;
--volume-throughput)
VOLUMETHOUGHPUT=$2
shift 2
;;
--min-ebs-volume-size)
MIN_EBS_VOLUME_SIZE=$2
shift 2
;;
--max-ebs-volume-size)
MAX_EBS_VOLUME_SIZE=$2
shift 2
;;
--max-total-created-size)
MAX_LOGICAL_VOLUME_SIZE=$2
shift 2
;;
--max-attached-volumes)
MAX_ATTACHED_VOLUMES=$2
shift 2
;;
--initial-utilization-threshold)
INITIAL_UTILIZATION_THRESHOLD=$2
shift 2
;;
-d|--initial-device)
DEVICE=$2
shift 2
;;
-f|--file-system)
FILE_SYSTEM=$2
shift 2
;;
-m|--mountpoint)
MOUNTPOINT=$2
shift 2
;;
-i|--imdsv2)
IMDSV2="true"
shift 1
;;
-h|--help)
echo "$USAGE"
exit
;;
--) # end parsing
shift
break
;;
-*|--*=)
error "unsupported argument $1"
;;
*) # positional arguments
PARAMS="$PARAMS $1"
shift
;;
esac
done
eval set -- "$PARAMS"
initialize
# for backwards compatibility evaluate positional parameters like previous 2.0.x and 2.1.x releases
# this will be removed in the future
if [ ! -z "$PARAMS" ]; then
MOUNTPOINT=$1
if [ ! -z "$2" ]; then
DEVICE=$2
fi
fi
# Install executables
# make executables available on standard PATH
mkdir -p /usr/local/amazon-ebs-autoscale/{bin,shared}
cp ${BASEDIR}/bin/{create-ebs-volume,ebs-autoscale} /usr/local/amazon-ebs-autoscale/bin
chmod +x /usr/local/amazon-ebs-autoscale/bin/*
ln -sf /usr/local/amazon-ebs-autoscale/bin/* /usr/local/bin/
ln -sf /usr/local/amazon-ebs-autoscale/bin/* /usr/bin/
# copy shared assets
cp ${BASEDIR}/shared/utils.sh /usr/local/amazon-ebs-autoscale/shared
## Install configs
# install the logrotate config
cp ${BASEDIR}/config/ebs-autoscale.logrotate /etc/logrotate.d/ebs-autoscale
# install default config
cat ${BASEDIR}/config/ebs-autoscale.json | \
sed -e "s#%%MOUNTPOINT%%#${MOUNTPOINT}#" | \
sed -e "s#%%VOLUMETYPE%%#${VOLUMETYPE}#" | \
sed -e "s#%%VOLUMEIOPS%%#${VOLUMEIOPS}#" | \
sed -e "s#%%VOLUMETHOUGHPUT%%#${VOLUMETHOUGHPUT}#" | \
sed -e "s#%%FILESYSTEM%%#${FILE_SYSTEM}#" | \
sed -e "s#%%MINEBSVOLUMESIZE%%#${MIN_EBS_VOLUME_SIZE}#" | \
sed -e "s#%%MAXEBSVOLUMESIZE%%#${MAX_EBS_VOLUME_SIZE}#" | \
sed -e "s#%%MAXLOGICALVOLUMESIZE%%#${MAX_LOGICAL_VOLUME_SIZE}#" | \
sed -e "s#%%MAXATTACHEDVOLUMES%%#${MAX_ATTACHED_VOLUMES}#" | \
sed -e "s#%%INITIALUTILIZATIONTHRESHOLD%%#${INITIAL_UTILIZATION_THRESHOLD}#" \
> /etc/ebs-autoscale.json
## Create filesystem
if [ -e $MOUNTPOINT ] && ! [ -d $MOUNTPOINT ]; then
echo "ERROR: $MOUNTPOINT exists but is not a directory."
exit 1
elif ! [ -e $MOUNTPOINT ]; then
mkdir -p $MOUNTPOINT
fi
# If a device is not given, or if the device is not valid
if [ -z "${DEVICE}" ] || [ ! -b "${DEVICE}" ]; then
DEVICE=$(create-ebs-volume --size $SIZE --type $VOLUMETYPE)
fi
# create and mount the BTRFS filesystem
if [ "${FILE_SYSTEM}" = "btrfs" ]; then
mkfs.btrfs -f -d single $DEVICE
mount $DEVICE $MOUNTPOINT
# add entry to fstab
# allows non-root users to mount/unmount the filesystem
echo -e "${DEVICE}\t${MOUNTPOINT}\tbtrfs\tdefaults\t0\t0" | tee -a /etc/fstab
elif [ "${FILE_SYSTEM}" = "lvm.ext4" ]; then
VG=$(get_config_value .lvm.volume_group)
LV=$(get_config_value .lvm.logical_volume)
pvcreate $DEVICE
vgcreate $VG $DEVICE
lvcreate $VG -n $LV -l 100%VG
mkfs.ext4 /dev/mapper/${VG}-${LV}
mount /dev/mapper/${VG}-${LV} $MOUNTPOINT
echo -e "/dev/mapper/${VG}-${LV}\t${MOUNTPOINT}\text4\tdefaults\t0\t0" | tee -a /etc/fstab
else
echo "Unknown file system type: ${FILE_SYSTEM}"
exit 1
fi
chmod 1777 ${MOUNTPOINT}
## Install service
INIT_SYSTEM=$(detect_init_system 2>/dev/null)
case $INIT_SYSTEM in
upstart|systemd)
echo "$INIT_SYSTEM detected"
cd ${BASEDIR}/service/$INIT_SYSTEM
. ./install.sh
;;
*)
echo "Could not install EBS Autoscale - unsupported init system"
exit 1
esac
cd ${BASEDIR}
| true
|
a01855d4edd5db3bc9a7ddecbd232170c9616c56
|
Shell
|
mlorenzo-stratio/scriptcellar
|
/monitoring/utils/update_nagios_ui.sh
|
UTF-8
| 1,525
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
cd /var/www/html/
wget --http-user nagiosadmin --http-passwd n4g10s 'http://10.150.20.15/nagios/cgi-bin/status.cgi?hostgroup=Production&style=detail&serviceprops=270346&hostprops=270346&sorttype=2&hoststatustypes=14&sortoption=3&limit=100&servicestatustypes=30&noheader=1' && \
grep -v "^<td valign=top align=left width=33%" status.cgi\?hostgroup\=Production* > .status && \
sed -i -e "s:</td><td valign=top align=center width=33%>:<td valign=top align=center width=100%>:" \
-e "s:^<head>:<head>\n<meta http-equiv="refresh" content="60">:" \
-e "s:<div id='pagelimit'>:_COSA_\n<div id='pagelimit'>:" \
-e "s:Service Status Details For Host Group 'Production':INTERXION MONITOR:" .status && \
{
backIFS=$IFS
IFS=$'\n'
for i in $(cat .status) ; do
if [ "$i" = "_COSA_" ]; then
cat << EOM
<div align=center>
<form name=countdown class=c>
Refresh in <input type=text size=2 name=secs>
$(date "+%d-%m-%y %H:%M:%S")
</form>
</div>
<script>
<!--
//
var milisec=0
var seconds=60
document.countdown.secs.value='60'
function display(){
if (milisec<=0){
milisec=9
seconds-=1
}
if (seconds<=-1){
milisec=0
seconds+=1
}
else
milisec-=1
document.countdown.secs.value=seconds+"."+milisec+"s"
setTimeout("display()",100)
}
display()
-->
</script>
EOM
continue
else
echo $i
fi
done
IFS=$backIFS
} > /var/www/html/nagiosdivision2.html
rm -f .status status.cgi\?hostgroup\=Production*
| true
|
0b63a482d5b88e73166613d3a59a47a047d5fa8f
|
Shell
|
SecDorks/libfastbit
|
/.git-svn/post-merge
|
UTF-8
| 623
| 2.953125
| 3
|
[
"BSD-3-Clause-LBNL"
] |
permissive
|
#!/bin/sh
#
# An example hook script that is called after a successful
# merge is made.
# restore svn configuration files
mkdir -p ./.git/logs/refs/remotes/origin/
#mkdir -p ./.git/refs/remotes/origin/
cp ./.git-svn/logs/refs/remotes/origin/trunk ./.git/logs/refs/remotes/origin/trunk
#cp ./.git-svn/refs/remotes/origin/trunk ./.git/refs/remotes/origin/trunk
cp -r ./.git-svn/svn ./.git/
# remove old svn configuration in config file
git config --remove-section svn-remote.svn 2> /dev/null
# restore svn configuration in config file
while read line; do
git config --local $line
done < ./.git-svn/svn-config
exit 0
| true
|
1ccad232f94890de2d057b1eb7568cfc1e1450f3
|
Shell
|
snakka-hv/jfrog-cli-publisher
|
/entrypoint.sh
|
UTF-8
| 2,700
| 3.578125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
set -e
echo "Authenticating using $INPUT_CREDENTIALS_TYPE";
# Authenticate to the server
if [ $INPUT_CREDENTIALS_TYPE == "username" ];
then
sh -c "jfrog rt c action-server --interactive=false --url=$INPUT_URL --user=$INPUT_USER --password=$INPUT_PASSWORD"
elif [ $INPUT_CREDENTIALS_TYPE == "apikey" ];
then
sh -c "jfrog rt c action-server --interactive=false --url=$INPUT_URL --apikey=$INPUT_APIKEY"
elif [ $INPUT_CREDENTIALS_TYPE == "accesstoken" ];
then
sh -c "jfrog rt c action-server --interactive=false --url=$INPUT_URL --access-token=$INPUT_ACCESS_TOKEN"
fi
sh -c "jfrog rt use action-server"
# Set working directory if specified
if [ $INPUT_WORKING_DIRECTORY != '.' ];
then
cd $INPUT_WORKING_DIRECTORY
fi
rest=$INPUT_ARTIFACTFROM
while [ -n "$rest" ] ; do
str=${rest%%;*} # Everything up to the first ';'
# Trim up to the first ';' -- and handle final case, too.
[ "$rest" = "${rest/;/}" ] && rest= || rest=${rest#*;}
str="$(echo -e "${str}" | sed -e 's/^[[:space:]]*//')"
if [ -z "$str" ]
then
echo "Extra semicolons detected. Safe Skip"
else
echo "+ \"$str\""
echo "[Info] Uploading artifact: jfrog rt u $str $INPUT_ARTIFACTTO --build-name=$INPUT_BUILDNAME --build-number=$INPUT_BUILDNUMBER"
outputUpload=$( sh -c "jfrog rt u $str $INPUT_ARTIFACTTO --build-name=$INPUT_BUILDNAME --build-number=$INPUT_BUILDNUMBER" )
echo "$outputUpload" > "${HOME}/${GITHUB_ACTION}.log"
echo "$outputUpload"
fi
done
## Log command for info
#echo "[Info] Uploading artifact: jfrog rt u $INPUT_ARTIFACTFROM $INPUT_ARTIFACTTO --build-name=$INPUT_BUILDNAME --build-number=$INPUT_BUILDNUMBER"
## Capture output
#outputUpload=$( sh -c "jfrog rt u $INPUT_ARTIFACTFROM $INPUT_ARTIFACTTO --build-name=$INPUT_BUILDNAME --build-number=$INPUT_BUILDNUMBER" )
## Write for further analysis if needed
#echo "$outputUpload" > "${HOME}/${GITHUB_ACTION}.log"
## Write output to STDOUT
#echo "$outputUpload"
# Conditional build publish
if [ $INPUT_PUBLISH == "true" ];
then
echo "[Info] Pushing build info: jfrog rt bce $INPUT_BUILDNAME $INPUT_BUILDNUMBER"
outputPushInfo=$( sh -c "jfrog rt bce $INPUT_BUILDNAME $INPUT_BUILDNUMBER")
echo "[Info] Pushing build artifacts: jfrog rt bp $INPUT_BUILDNAME $INPUT_BUILDNUMBER"
outputPublish=$( sh -c "jfrog rt bp $INPUT_BUILDNAME $INPUT_BUILDNUMBER")
# echo "[Info] Promoting build artifacts: jfrog rt bp $INPUT_BUILDNAME $INPUT_BUILDNUMBER"
# outputPublish=$( sh -c "jfrog rt bpr $INPUT_BUILDNAME $INPUT_BUILDNUMBER lafayette_release")
echo "$outputPushInfo" > "${HOME}/${GITHUB_ACTION}.log"
echo "$outputPublish" > "${HOME}/${GITHUB_ACTION}.log"
echo "$outputPushInfo"
echo "$outputPublish"
fi
| true
|
d7f5ba876f9b317af859b715062515b44a4c95ef
|
Shell
|
naoto0822/dotfiles
|
/.zshrc
|
UTF-8
| 4,202
| 2.515625
| 3
|
[
"MIT"
] |
permissive
|
### Added by Zinit's installer
if [[ ! -f $HOME/.zinit/bin/zinit.zsh ]]; then
print -P "%F{33}▓▒░ %F{220}Installing DHARMA Initiative Plugin Manager (zdharma/zinit)…%f"
command mkdir -p "$HOME/.zinit" && command chmod g-rwX "$HOME/.zinit"
command git clone https://github.com/zdharma/zinit "$HOME/.zinit/bin" && \
print -P "%F{33}▓▒░ %F{34}Installation successful.%f%b" || \
print -P "%F{160}▓▒░ The clone has failed.%f%b"
fi
source "$HOME/.zinit/bin/zinit.zsh"
autoload -Uz _zinit
(( ${+_comps} )) && _comps[zinit]=_zinit
# Load a few important annexes, without Turbo
# (this is currently required for annexes)
zinit light-mode for \
zinit-zsh/z-a-patch-dl \
zinit-zsh/z-a-as-monitor \
zinit-zsh/z-a-bin-gem-node
### End of Zinit's installer chunk
### Added by Powerlevel10k
# Enable Powerlevel10k instant prompt. Should stay close to the top of ~/.zshrc.
# Initialization code that may require console input (password prompts, [y/n]
# confirmations, etc.) must go above this block, everything else may go below.
if [[ -r "${XDG_CACHE_HOME:-$HOME/.cache}/p10k-instant-prompt-${(%):-%n}.zsh" ]]; then
source "${XDG_CACHE_HOME:-$HOME/.cache}/p10k-instant-prompt-${(%):-%n}.zsh"
fi
### End of Powerlevel10k
# Zsh options
export ZSH_DISABLE_COMPFIX=1
export ZSH_AUTOSUGGEST_HIGHLIGHT_STYLE='fg=23'
export CLICOLOR=1
export LSCOLORS=gxfxcxdxbxegedabagacad
export LS_COLORS="di=36:ln=35:so=32:pi=33:ex=31:bd=34;46:cd=34;43:su=30;41:sg=30;46:tw=30;42:ow=30;43:"
autoload -U compaudi
autoload -U compinit
compinit
autoload -U colors
colors
setopt auto_list
setopt auto_menu
setopt list_packed
setopt list_types
setopt auto_cd
# Alias
alias tt="tmux"
# Function
# theme_default() {
# }
#
config_powerline_prompt() {
# theme_default
}
ttn() {
tt new -s $1
}
tta() {
tt a -t $1
}
ttd() {
tt kill-session -t $1
}
# Plugins
zinit light changyuheng/zsh-interactive-cd
zinit ice wait'0' atload'_zsh_autosuggest_start' lucid
zinit light zsh-users/zsh-autosuggestions
zinit snippet OMZ::lib/bzr.zsh
zinit snippet OMZ::lib/clipboard.zsh
zinit snippet OMZ::lib/compfix.zsh
zinit snippet OMZ::lib/completion.zsh
zinit snippet OMZ::lib/correction.zsh
zinit snippet OMZ::lib/diagnostics.zsh
zinit snippet OMZ::lib/directories.zsh
zinit snippet OMZ::lib/functions.zsh
zinit snippet OMZ::lib/git.zsh
zinit snippet OMZ::lib/grep.zsh
zinit snippet OMZ::lib/history.zsh
zinit snippet OMZ::lib/key-bindings.zsh
zinit snippet OMZ::lib/misc.zsh
zinit snippet OMZ::lib/prompt_info_functions.zsh
zinit snippet OMZ::lib/spectrum.zsh
zinit snippet OMZ::lib/termsupport.zsh
zinit snippet OMZ::lib/theme-and-appearance.zsh
zinit snippet OMZ::plugins/git/git.plugin.zsh
zinit cdclear -q
zinit snippet OMZ::plugins/github/github.plugin.zsh
zinit snippet OMZ::plugins/colored-man-pages/colored-man-pages.plugin.zsh
zinit light zdharma/fast-syntax-highlighting
zinit env-whitelist 'POWERLEVEL9K_*'
zinit ice atinit"config_powerline_prompt"; zinit light romkatv/powerlevel10k
# Override Alias
alias ls="lsd"
alias l="lsd -la"
alias ll="lsd -l"
alias ga="git add"
alias gpush="git push"
alias gpull="git pull"
alias gmg="git merge"
alias grebase="git rebase"
alias gd="git diff"
alias gc="git commit"
alias gout="git checkout"
# Override zsh option
zstyle ':completion:*:default' menu select=1
zstyle ':completion:*' list-colors ${(s.:.)LS_COLORS}
zstyle ':completion:*' matcher-list 'm:{a-z}={A-Z}'
# peco command search
setopt hist_ignore_all_dups
function peco_select_history() {
local tac
if which tac > /dev/null; then
tac="tac"
else
tac="tail -r"
fi
BUFFER=$(fc -l -n 1 | eval $tac | peco --query "$LBUFFER")
CURSOR=$#BUFFER
zle clear-screen
}
zle -N peco_select_history
bindkey '^r' peco_select_history
# peco ghq search
function peco-src () {
local selected_dir=$(ghq list -p | peco --query "$LBUFFER")
if [ -n "$selected_dir" ]; then
BUFFER="cd ${selected_dir}"
zle accept-line
fi
zle clear-screen
}
zle -N peco-src
bindkey '^g' peco-src
### Added by Powerlevel10k
# To customize prompt, run `p10k configure` or edit ~/.p10k.zsh.
[[ ! -f ~/.p10k.zsh ]] || source ~/.p10k.zsh
### End of Powerlevel10k
| true
|
787d85f530aa7f4bd184e958c6cb518ffaaaa105
|
Shell
|
liquid-sky/etcd-aws-cluster
|
/kubemaster-aws-proxy
|
UTF-8
| 1,507
| 3.765625
| 4
|
[] |
no_license
|
#! /bin/bash
# This script extracts private ipv4 from from /etc/environment
# and uploads it to s3, where it will be downloaded and used by workers.
pkg="kubemaster-aws-proxy"
version="0.4"
environment_file="/etc/sysconfig/environment"
private_ipv4_file="/etc/sysconfig/private_ipv4"
# Get S3 bucket url. Either pass in as S3BUCKET (e.g. s3://my-coreos-cluster-clountinit)
# or generate one from AWS account name: s3://${account}-coreos-cluster-cloudinit.
# initial-cluster file will be uploaded as S3BUCKET/etcd/initial-cluster.
account=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | jq --raw-output .accountId)
if [[ ! $account ]]; then
echo "$pkg: failed to get the aws account id."
exit 4
fi
discovery_bucket=${S3BUCKET:-"s3://${account}-coreos-cluster-cloudinit"}
if [[ ! -f "$environment_file" ]]; then
echo "$pkg: $$environment_file doesn't exist."
exit 4
else
if private_ipv4=$(grep COREOS_PRIVATE_IPV4 $environment_file | cut -d'=' -f2) ;
then
echo $private_ipv4 > $private_ipv4_file
else
echo "$pkg: $$environment_file doesn't have COREOS_PRIVATE_IPV4."
exit 4
fi
fi
region=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | jq --raw-output .region)
if ! aws --region $region s3 ls $discovery_bucket > /dev/null 2>&1 ;
then
aws --region $region s3 mb $discovery_bucket
fi
aws --region $region s3 cp $private_ipv4_file $discovery_bucket/kubemaster/$(basename $private_ipv4_file)
| true
|
28fd956b18d4999b1d8c4bbb4ad4c8d94f88dd72
|
Shell
|
jsrgqinbin/docker-compose
|
/zookeeper_cluster/init_data_dir.sh
|
UTF-8
| 1,019
| 2.921875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
PRJ_PATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/../ >/dev/null && pwd )"
ZK1_DATA_PATH=${PRJ_PATH}/zk1/data
ZK1_CONF_PATH=${PRJ_PATH}/zk1/conf
ZK1_LOG_PATH=${PRJ_PATH}/zk1/log
ZK2_DATA_PATH=${PRJ_PATH}/zk2/data
ZK2_CONF_PATH=${PRJ_PATH}/zk2/conf
ZK2_LOG_PATH=${PRJ_PATH}/zk2/log
ZK3_DATA_PATH=${PRJ_PATH}/zk3/data
ZK3_CONF_PATH=${PRJ_PATH}/zk3/conf
ZK3_LOG_PATH=${PRJ_PATH}/zk3/log
if [ ! -d ${ZK1_DATA_PATH} ]; then
mkdir -p ${ZK1_DATA_PATH}
fi
if [ ! -d ${ZK1_CONF_PATH} ]; then
mkdir -p ${ZK1_CONF_PATH}
fi
if [ ! -d ${ZK1_LOG_PATH} ]; then
mkdir -p ${ZK1_LOG_PATH}
fi
if [ ! -d ${ZK2_DATA_PATH} ]; then
mkdir -p ${ZK2_DATA_PATH}
fi
if [ ! -d ${ZK2_CONF_PATH} ]; then
mkdir -p ${ZK2_CONF_PATH}
fi
if [ ! -d ${ZK2_LOG_PATH} ]; then
mkdir -p ${ZK2_LOG_PATH}
fi
if [ ! -d ${ZK3_DATA_PATH} ]; then
mkdir -p ${ZK3_DATA_PATH}
fi
if [ ! -d ${ZK3_CONF_PATH} ]; then
mkdir -p ${ZK3_CONF_PATH}
fi
if [ ! -d ${ZK3_LOG_PATH} ]; then
mkdir -p ${ZK1_DATA_PATH}
fi
echo "done"
| true
|
723749c8448de9e2f10f652928edece9f41bc86c
|
Shell
|
agalue/alec-playground
|
/scripts/minion-centos7.sh
|
UTF-8
| 2,304
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/bash
# OpenNMS Minion
# Designed for CentOS/RHEL 7
# Author: Alejandro Galue <agalue@opennms.org>
ONMS_REPO_NAME="${1-stable}"
ONMS_SERVER="${2-192.168.205.1:8980}"
KAFKA_SERVER="${3-192.168.205.1:9092}"
# Install OpenNMS Minion packages
if ! rpm -qa | grep -q opennms-minion; then
sudo yum install -y -q http://yum.opennms.org/repofiles/opennms-repo-$ONMS_REPO_NAME-rhel7.noarch.rpm
sudo rpm --import /etc/yum.repos.d/opennms-repo-$ONMS_REPO_NAME-rhel7.gpg
sudo yum install -y -q opennms-minion
fi
# Configure Minion
TOTAL_MEM_IN_MB=$(free -m | awk '/:/ {print $2;exit}')
MEM_IN_MB=$(expr $TOTAL_MEM_IN_MB / 2)
if [ "$MEM_IN_MB" -gt "8192" ]; then
MEM_IN_MB="8192"
fi
sudo sed -r -i "/export JAVA_MIN_MEM/s/.*/export JAVA_MIN_MEM=${MEM_IN_MB}m/" /etc/sysconfig/minion
sudo sed -r -i "/export JAVA_MAX_MEM/s/.*/export JAVA_MAX_MEM=${MEM_IN_MB}m/" /etc/sysconfig/minion
sudo sed -r -i "/export JAVA_HOME/s/.*/export JAVA_HOME=\/usr\/lib\/jvm\/java/" /etc/sysconfig/minion
MINION_HOME=/opt/minion
MINION_ETC=$MINION_HOME/etc
if [ ! -f "$MINION_ETC/configured" ]; then
cd $MINION_ETC
sudo git init .
sudo git add .
sudo git commit -m "Default Minion configuration for repository $ONMS_REPO_NAME."
cat <<EOF | sudo tee $MINION_ETC/featuresBoot.d/hawtio.boot
hawtio-offline
EOF
cat <<EOF | sudo tee $MINION_ETC/featuresBoot.d/kafka.boot
!minion-jms
!opennms-core-ipc-sink-camel
opennms-core-ipc-sink-kafka
!opennms-core-ipc-rpc-jms
opennms-core-ipc-rpc-kafka
EOF
MINION_ID=$(hostname)
cat <<EOF | sudo tee $MINION_ETC/org.opennms.minion.controller.cfg
id=$MINION_ID
location=Vagrant
http-url=http://$ONMS_SERVER/opennms
EOF
sed -r -i '/sshHost/s/127.0.0.1/0.0.0.0/' $MINION_ETC/org.apache.karaf.shell.cfg
cat <<EOF | sudo tee $MINION_ETC/org.opennms.core.ipc.sink.kafka.cfg
bootstrap.servers=$KAFKA_SERVER
EOF
cat <<EOF | sudo tee $MINION_ETC/org.opennms.core.ipc.rpc.kafka.cfg
bootstrap.servers=$KAFKA_SERVER
EOF
cat <<EOF | sudo tee $MINION_ETC/org.opennms.netmgt.trapd.cfg
trapd.listen.interface=0.0.0.0
trapd.listen.port=1162
EOF
cat <<EOF | sudo tee $MINION_ETC/org.opennms.netmgt.syslog.cfg
syslog.listen.interface=0.0.0.0
syslog.listen.port=1514
EOF
sudo systemctl enable minion
sudo systemctl start minion
sudo touch $MINION_ETC/configured
fi
echo "Done!"
| true
|
5db195286d1db359aaaefedb41240595978163e2
|
Shell
|
mshabunin/container-for-mediasdk
|
/scripts/test.sh
|
UTF-8
| 369
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
if [ ! -f /.dockerenv ] ; then
echo "This script should be run in a Docker container"
exit 1
fi
SRCDIR=/opencv
DATADIR=/opencv_extra/testdata
BUILDDIR=/build
export OPENCV_TEST_DATA_PATH=$DATADIR
# export OPENCV_VIDEOIO_DEBUG=1
# export OPENCV_LOG_LEVEL=INFO
export LIBVA_MESSAGING_LEVEL=0
pushd $BUILDDIR
./bin/opencv_test_videoio
popd
| true
|
fb50e7d57ac915b19ec32637ce805059d23f656e
|
Shell
|
jameshilliard/PythonCode
|
/AutomationTools/bin/2.0/common/switch_controller.sh
|
UTF-8
| 12,812
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
#---------------------------------
# Name: Howard Yin
# Description:
# This script is used to call switch_controller
#
#--------------------------------
# History :
# DATE | REV | AUTH | INFO
#05 SEP 2012 | 1.0.0 | howard | Inital Version
REV="$0 version 1.0.0 (05 SEP 2012 )"
# print REV
echo "${REV}"
while [ $# -gt 0 ]
do
case "$1" in
-line)
line=$2
echo "change phyx line to ${line}"
shift 2
;;
-alloff)
alloff=1
echo "off all lines"
shift 1
;;
-power)
p_status=$2
echo "change DUT power status to ${p_status}"
shift 2
;;
-u)
u_status=$2
echo "change usb port status to ${u_status}"
shift 2
;;
-w)
w_status=$2
echo "change usb port status to ${w_status}"
shift 2
;;
-e)
e_status=$2
echo "change usb port status to ${e_status}"
shift 2
;;
-change_line)
change_line=$2
echo "change switch board to ${change_line}"
shift 2
;;
-test)
echo "test mode"
export U_PATH_TOOLS=/root/automation/tools/2.0
export U_PATH_TBIN=/root/automation/bin/2.0/common
export G_CURRENTLOG=/tmp
export U_CUSTOM_VLANAS=616
export U_CUSTOM_VLANAB=620
export G_HOST_IP1=192.168.100.121
export G_HOST_USR1=root
export G_HOST_PWD1=actiontec
export U_CUSTOM_WECB_IP=192.168.8.35
export U_CUSTOM_WECB_USR=root
export U_CUSTOM_WECB_PSW=admin
export U_CUSTOM_WECB_VER=1.0
shift 1
;;
*)
echo "bash $0 -line <line mode>"
exit 1
;;
esac
done
if [ "$U_CUSTOM_WECB_VER" == "2.0" ];then
echo "U_CUSTOM_WECB_VER=$U_CUSTOM_WECB_VER"
append_para="-P 23 -y telnet"
else
append_para=""
fi
#./switch_controller -h
#./switch_controller version 1.0.1 (28 Jun 2012)
#
#usage function!
#Usage:
#
# -m/--line-mode: ADSL or VDSL for WAN connection
# -B/--Bonding: 1 or 0, 1 means Bonding enable and 0 is disable for WAN connection
# -e/--Ethernet: 1 or 0, 1 means Ethernet connection ON
# -l/--line-index: switch index to operate, from 1 to 12, switch 1/2 is for WAN connection
# -u/--usb1: 1 or 0, set usb1 ON or OFF
# -w/--usb2: 1 or 0, set usb2 ON or OFF
# -p/--dut-power: 1 or 0, set dut power ON or OFF
# -a/--ac-power1: 1 or 0, set AC power1 ON or OFF
# -b/--ac-power2: 1 or 0, set AC power2 ON or OFF
# -c/--ac-power3: 1 or 0, set AC power3 ON or OFF
# -d/--ac-power4: 1 or 0, set AC power4 ON or OFF
# -D/--delay-time: set a duration,if delay_time > 0,then line1 or line2(specify by line-index) will OFF first, and ON after the duration;
# the same action for other switches(specify by line-index)
# -s/--serial-dev: serial_dev(ex. /dev/ttyS0) responding to switch controller in use
# -n/--off-all: off all line
# -v/--verbose: verbose
#-v U_CUSTOM_VLANAS = 616
#-v U_CUSTOM_VLANVS = 619
#-v U_CUSTOM_VLANAB = 614
#-v U_CUSTOM_VLANVB = 620
#-v U_CUSTOM_WECB_IP = 192.168.100.123
#-v U_CUSTOM_WECB_USR = root
#-v U_CUSTOM_WECB_PSW = admin
#-v U_CUSTOM_ALIAS_AST =
#-v U_CUSTOM_ALIAS_VST =
#-v U_CUSTOM_ALIAS_ABT =
#-v U_CUSTOM_ALIAS_VBT =
#-v U_CUSTOM_VLANAST =
#-v U_CUSTOM_VLANVST =
#-v U_CUSTOM_VLANABT =
#-v U_CUSTOM_VLANVBT =
#-v U_CUSTOM_NO_WECB = 1 no wecb , 0 wecb , undefined wecb
# uw_status
u_status(){
if [ "$u_status" == "1" ] ;then
switch_param="-u 1"
elif [ "$u_status" == "0" ] ;then
switch_param="-u 0"
else
echo "AT_ERROR : no supported"
exit 1
fi
if [ "$U_CUSTOM_NO_WECB" == "1" ] ;then
echo "NO WECB using"
$U_PATH_TOOLS/common/switch_controller $switch_param
else
rm -f $G_CURRENTLOG/WECB_SWLINE.log
$U_PATH_TBIN/clicmd -o $G_CURRENTLOG/WECB_SWLINE.log -d $U_CUSTOM_WECB_IP -P 22 -u $U_CUSTOM_WECB_USR -p $U_CUSTOM_WECB_PSW \
-v "switch_controller $switch_param" ${append_para}
grep "last_cmd_return_code:0" $G_CURRENTLOG/WECB_SWLINE.log
rc_SWLINE=$?
if [ $rc_SWLINE -eq 0 ] ;then
echo "AT_INFO : changing usb1 status passed"
exit 0
else
echo "AT_ERROR : changing usb1 status FAILED"
exit 1
fi
fi
}
e_status(){
if [ "$e_status" == "1" ] ;then
switch_param="-e 1"
elif [ "$e_status" == "0" ] ;then
switch_param="-e 0"
else
echo "AT_ERROR : no supported"
exit 1
fi
if [ "$U_CUSTOM_NO_WECB" == "1" ] ;then
echo "NO WECB using"
$U_PATH_TOOLS/common/switch_controller $switch_param
else
rm -f $G_CURRENTLOG/WECB_SWLINE.log
$U_PATH_TBIN/clicmd -o $G_CURRENTLOG/WECB_SWLINE.log -d $U_CUSTOM_WECB_IP -P 22 -u $U_CUSTOM_WECB_USR -p $U_CUSTOM_WECB_PSW \
-v "switch_controller $switch_param" ${append_para}
grep "last_cmd_return_code:0" $G_CURRENTLOG/WECB_SWLINE.log
rc_SWLINE=$?
if [ $rc_SWLINE -eq 0 ] ;then
echo "AT_INFO : changing eth status passed"
exit 0
else
echo "AT_ERROR : changing eth status FAILED"
exit 1
fi
fi
}
w_status(){
if [ "$w_status" == "1" ] ;then
switch_param="-w 1"
elif [ "$w_status" == "0" ] ;then
switch_param="-w 0"
else
echo "AT_ERROR : no supported"
exit 1
fi
if [ "$U_CUSTOM_NO_WECB" == "1" ] ;then
echo "NO WECB using"
$U_PATH_TOOLS/common/switch_controller $switch_param
else
rm -f $G_CURRENTLOG/WECB_SWLINE.log
$U_PATH_TBIN/clicmd -o $G_CURRENTLOG/WECB_SWLINE.log -d $U_CUSTOM_WECB_IP -P 22 -u $U_CUSTOM_WECB_USR -p $U_CUSTOM_WECB_PSW \
-v "switch_controller $switch_param" ${append_para}
grep "last_cmd_return_code:0" $G_CURRENTLOG/WECB_SWLINE.log
rc_SWLINE=$?
if [ $rc_SWLINE -eq 0 ] ;then
echo "AT_INFO : changing usb2 status passed"
exit 0
else
echo "AT_ERROR : changing usb2 status FAILED"
exit 1
fi
fi
}
DUT_power_down(){
if [ "$U_CUSTOM_NO_WECB" == "1" ] ;then
echo "NO WECB using"
$U_PATH_TOOLS/common/switch_controller -p 0
else
rm -f $G_CURRENTLOG/WECB_DUTP0.log
$U_PATH_TBIN/clicmd -o $G_CURRENTLOG/WECB_DUTP0.log -d $U_CUSTOM_WECB_IP -P 22 -u $U_CUSTOM_WECB_USR -p $U_CUSTOM_WECB_PSW \
-v "switch_controller -p 0" ${append_para}
grep "last_cmd_return_code:0" $G_CURRENTLOG/WECB_DUTP0.log
rc_DUTP0=$?
if [ $rc_DUTP0 -eq 0 ] ;then
echo "AT_INFO : DUT power down OK"
else
echo "AT_INFO : DUT power down FAILED"
exit 1
fi
fi
}
DUT_power_up(){
if [ "$U_CUSTOM_NO_WECB" == "1" ] ;then
echo "NO WECB using"
$U_PATH_TOOLS/common/switch_controller -p 1
else
rm -f $G_CURRENTLOG/WECB_DUTP1.log
$U_PATH_TBIN/clicmd -o $G_CURRENTLOG/WECB_DUTP1.log -d $U_CUSTOM_WECB_IP -P 22 -u $U_CUSTOM_WECB_USR -p $U_CUSTOM_WECB_PSW \
-v "switch_controller -p 1" ${append_para}
grep "last_cmd_return_code:0" $G_CURRENTLOG/WECB_DUTP1.log
rc_DUTP1=$?
if [ $rc_DUTP1 -eq 0 ] ;then
echo "AT_INFO : DUT power on OK"
else
echo "AT_INFO : DUT power on FAILED"
exit 1
fi
fi
}
restart_WAN_server(){
rm -f $G_CURRENTLOG/RESTART_WANSERVER.log
$U_PATH_TBIN/clicmd -o $G_CURRENTLOG/RESTART_WANSERVER.log -d $G_HOST_IP1 -P 22 -u $G_HOST_USR1 -p $G_HOST_PWD1 \
-v "cd /root/START_SERVERS/;sed -i \"s/^VLAN_LIST.*/VLAN_LIST $VLAN_ID/g\" config_net.conf;./config_net.sh"
rc_wan_server=$?
if [ $rc_wan_server -eq 0 ] ;then
echo "AT_INFO : restart WAN server OK"
else
echo "AT_INFO : restart WAN server FAILED"
exit 1
fi
}
# offallline
offallline(){
if [ "x$U_CUSTOM_IS_MANUAL_SET_PHYSICAL_LINE" == "x" -o "$U_CUSTOM_IS_MANUAL_SET_PHYSICAL_LINE" == "1" ] ;then
echo "AT_INFO : no switch board using"
exit 0
fi
switch_param="-n"
if [ "$U_CUSTOM_NO_WECB" == "1" ] ;then
echo "NO WECB using"
$U_PATH_TOOLS/common/switch_controller $switch_param
else
rm -f $G_CURRENTLOG/WECB_SWLINE.log
$U_PATH_TBIN/clicmd -o $G_CURRENTLOG/WECB_SWLINE.log -d $U_CUSTOM_WECB_IP -P 22 -u $U_CUSTOM_WECB_USR -p $U_CUSTOM_WECB_PSW \
-v "switch_controller $switch_param" ${append_para}
grep "last_cmd_return_code:0" $G_CURRENTLOG/WECB_SWLINE.log
rc_SWLINE=$?
if [ $rc_SWLINE -eq 0 ] ;then
echo "AT_INFO : off all lines OK"
exit 0
else
echo "AT_INFO : off all lines FAILED"
exit 1
fi
fi
}
change_line(){
if [ "$change_line" == "ab" ] ;then
switch_param="-m ADSL -B 1"
elif [ "$change_line" == "vb" ] ;then
switch_param="-m VDSL -B 1"
elif [ "$change_line" == "eth" ] ;then
switch_param="-e 1"
fi
if [ "$U_CUSTOM_NO_WECB" == "1" ] ;then
echo "NO WECB using"
$U_PATH_TOOLS/common/switch_controller $switch_param
else
rm -f $G_CURRENTLOG/WECB_SWLINE.log
$U_PATH_TBIN/clicmd -o $G_CURRENTLOG/WECB_SWLINE.log -d $U_CUSTOM_WECB_IP -P 22 -u $U_CUSTOM_WECB_USR -p $U_CUSTOM_WECB_PSW \
-v "switch_controller $switch_param" ${append_para}
grep "last_cmd_return_code:0" $G_CURRENTLOG/WECB_SWLINE.log
rc_SWLINE=$?
if [ $rc_SWLINE -eq 0 ] ;then
echo "AT_INFO : switch phyx line OK"
exit 0
else
echo "AT_INFO : switch phyx line FAILED"
exit 1
fi
fi
}
switch_line(){
# as -- adsl single 1
# ab -- adsl bonding
# vs -- vdsl single 1
# vb -- vdsl bonding
# ast -- adsl single tagged 1
# abt -- adsl bonded tagged
# vst -- vdsl single tagged 1
# vbt -- vdsl bonded tagged
if [ "$line" == "as" ] ;then
VLAN_ID=$U_CUSTOM_VLANAS
switch_param="-m ADSL -B 0 -l 1"
elif [ "$line" == "ab" ] ;then
VLAN_ID=$U_CUSTOM_VLANAB
switch_param="-m ADSL -B 1"
elif [ "$line" == "vs" ] ;then
VLAN_ID=$U_CUSTOM_VLANVS
switch_param="-m VDSL -B 0 -l 1"
elif [ "$line" == "vb" ] ;then
VLAN_ID=$U_CUSTOM_VLANVB
switch_param="-m VDSL -B 1"
######## TAGGED #######
elif [ "$line" == "ast" ] ;then
VLAN_ID=$U_CUSTOM_VLANAST
switch_param="-m $U_CUSTOM_ALIAS_AST -B 0 -l 1"
elif [ "$line" == "abt" ] ;then
VLAN_ID=$U_CUSTOM_VLANABT
switch_param="-m $U_CUSTOM_ALIAS_ABT -B 1"
elif [ "$line" == "vst" ] ;then
VLAN_ID=$U_CUSTOM_VLANVST
switch_param="-m $U_CUSTOM_ALIAS_VST -B 0 -l 1"
elif [ "$line" == "vbt" ] ;then
VLAN_ID=$U_CUSTOM_VLANVBT
switch_param="-m $U_CUSTOM_ALIAS_VBT -B 1"
fi
if [ -z $VLAN_ID ] ;then
echo "AT_ERROR : must specify a VLAN ID to use"
exit 1
else
# to shut down DUT first
DUT_power_down
if [ "$U_CUSTOM_NO_WECB" == "1" ] ;then
echo "NO WECB using"
$U_PATH_TOOLS/common/switch_controller $switch_param
else
rm -f $G_CURRENTLOG/WECB_SWLINE.log
$U_PATH_TBIN/clicmd -o $G_CURRENTLOG/WECB_SWLINE.log -d $U_CUSTOM_WECB_IP -P 22 -u $U_CUSTOM_WECB_USR -p $U_CUSTOM_WECB_PSW \
-v "switch_controller $switch_param" ${append_para}
grep "last_cmd_return_code:0" $G_CURRENTLOG/WECB_SWLINE.log
rc_SWLINE=$?
if [ $rc_SWLINE -eq 0 ] ;then
echo "AT_INFO : switch phyx line OK"
else
echo "AT_INFO : switch phyx line FAILED"
exit 1
fi
fi
# to restart WAN servers
restart_WAN_server
# turn DUT on AGAIN
DUT_power_up
sleep 5m
exit 0
fi
}
if [ "x$change_line" != "x" ] ;then
change_line
fi
if [ "x$line" != "x" ] ;then
switch_line
fi
if [ "x$alloff" != "x" ] ;then
offallline
fi
# uw_status
if [ "x$u_status" != "x" ] ;then
u_status
fi
if [ "x$w_status" != "x" ] ;then
w_status
fi
if [ "x$e_status" != "x" ] ;then
e_status
fi
if [ "$p_status" == "0" ] ;then
DUT_power_down
fi
if [ "$p_status" == "1" ] ;then
DUT_power_up
fi
| true
|
428a691b876fccbbc8d2208f13611529482508dc
|
Shell
|
aallrd/dotfiles
|
/.functions
|
UTF-8
| 1,098
| 4.125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
function fdnfgroup() {
fdnfgroup_usage() { echo "fdnfgroup <package>"; return 0; }
if [[ -z ${1+x} || ${1} == "" ]] ; then fdnfgroup_usage; return 1; fi
local pkg
pkg="${1}"
dnf groupinfo '*' | sed -n '/Group:/h;/'"${pkg}"'/{x;p;x;p}'
return 0
}
function ssh_remote_pubkey() {
local server user pubkey identity_file
server=${1:?missing remove server name parameter}
user=${2:-$USER}
pubkey="$(find "${HOME}"/.ssh/*.pub | tail -1)"
identity_file="${pubkey//.pub}"
echo "Configuring public key access for ${user}@${server}"
ssh-copy-id -i "${identity_file}" -o StrictHostKeyChecking=no "${user}@${server}"
cat <<EOF >> ~/.ssh/config
Host ${server}
Hostname ${server}
IdentityFile ${identity_file}
StrictHostKeyChecking no
EOF
test=$(ssh -o BatchMode=yes -o ConnectTimeout=3 "${user}@${server}" true 2>/dev/null && echo 0 || echo 1)
if [[ ${test} -eq 0 ]] ; then
echo "Public key access configured successfully."
return 0
else
echo "Failed to configure the public key access for ${user}@${server}."
return 1
fi
}
| true
|
a2cf917d88bab7e4ec90ddc29ced42572f37973c
|
Shell
|
agolotin/CS484
|
/CUDA/hello.sh
|
UTF-8
| 767
| 2.765625
| 3
|
[] |
no_license
|
#! /bin/bash
#SBATCH --time=00:10:00 # walltime
#SBATCH --ntasks=6 # number of processor cores (i.e. tasks)
#SBATCH --nodes=1 # number of nodes
#SBATCH --gres=gpu:4
#SBATCH --mem-per-cpu=1024# memory per CPU core
#SBATCH -J "hello_test" # job name
#SBATCH --mail-user=artem.golotin@gmail.com
# Compatibility variables for PBS. Delete if not needed.
export PBS_NODEFILE=`/fslapps/fslutils/generate_pbs_nodefile`
export PBS_JOBID=$SLURM_JOB_ID
export PBS_O_WORKDIR="$SLURM_SUBMIT_DIR"
export PBS_QUEUE=batch
# Set the max number of threads to use for programs using OpenMP. Should be <= ppn. Does nothing if the program doesn't use OpenMP.
export OMP_NUM_THREADS=$SLURM_CPUS_ON_NODE
# LOAD MODULES, INSERT CODE, AND RUN YOUR PROGRAMS HERE
echo $CUDA_VISIBLE_DEVICES
./hello
| true
|
d872f6a8f1822c7fef383f3df314f11284bef9a9
|
Shell
|
Kongjh/server_used_script
|
/replace_chr_I.sh
|
UTF-8
| 1,227
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
str=("chrI" "chrII" "chrIII" "chrIV" "chrV" "chrVI" "chrVII" "chrVIII" "chrIX" "chrX" "chrXI" "chrXII" "chrXIII" "chrXIV" "chrXV" "chrXVI")
num=("chr1" "chr2" "chr3" "chr4" "chr5" "chr6" "chr7" "chr8" "chr9" "chr10" "chr11" "chr12" "chr13" "chr14" "chr15" "chr16")
for i in `seq 0 15`
do
# echo ${str[i]} ${num[i]}
# echo ${str[$i]} ${num[$i]}
## < lock the word, \ for 转义
# sed -i 's/\<'${str[$i]}'\>/'${num[$i]}'/g' /Dell/Dell4/kongjh/kozak_40s_ver2/numaddchrSaccharomyces_cerevisiae.R64-1-1.94.gtf
sed -i 's/\<'${str[$i]}'\>/'${num[$i]}'/g' /Dell/Dell4/kongjh/kozak_40s_ver2/numaddchr-all-0-16-mapped-ssu.bw.wig
sed -i 's/\<'${str[$i]}'\>/'${num[$i]}'/g' /Dell/Dell4/kongjh/kozak_40s_ver2/numaddchrsacCer3.chrom.sizes
# sed 's/${str[i]}/${num[i]}/' /Dell/Dell4/kongjh/kozak_40s_ver2/num${i}addchrSaccharomyces_cerevisiae.R64-1-1.94.gtf > num${i+1}addchrSaccharomyces_cerevisiae.R64-1-1.94.gtf
done
echo -e "\a"
# fin="num${i}addchrSaccharomyces_cerevisiae.R64-1-1.94.gtf"
# ii=$[${i}+1]
# fout="num${ii}addchrSaccharomyces_cerevisiae.R64-1-1.94.gtf"
# sed 's/`echo ${str[i]}`/`echo ${num[i]}`/' /Dell/Dell4/kongjh/kozak_40s_ver2/${fin} > /Dell/Dell4/kongjh/kozak_40s_ver2/${fout}
| true
|
369bb1d2916a6d214683192fddc3e489dc02131e
|
Shell
|
truemark-saltstack-formulas/oracle-formula
|
/localdev-local.sh
|
UTF-8
| 611
| 3.140625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# This is a simple script to setup a folder structure for local development.
# To execute this script run the following:
# bash < <(curl -s https://raw.githubusercontent.com/truemark-saltstack-formulas/oracle-formula/master/localdev-local.sh)
set -uex
mkdir -p salt-oracle
cd salt-oracle
mkdir -p pillar
mkdir -p ext_pillar
mkdir -p salt
mkdir -p formulas
cd formulas
if [ ! -d oracle-formula ]; then
git clone git@github.com:truemark-saltstack-formulas/oracle-formula.git
fi
if [ ! -d proservices-formula ]; then
git clone git@github.com:truemark-saltstack-formulas/proservices-formula.git
fi
| true
|
b87c4728eb5ddaf9994e100e30e729c5d4aff1d0
|
Shell
|
MahdiMuhaddesi/open-pryv.io
|
/scripts/restore-attachments-native.sh
|
UTF-8
| 318
| 2.953125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
# working dir fix
SCRIPT_FOLDER=$(cd $(dirname "$0"); pwd)
cd $SCRIPT_FOLDER/..
BACKUP_DIR=$(echo $1 | sed 's:/*$::')
BACKUP_DIR="${BACKUP_DIR}/"
export VAR_PRYV_FOLDER=$SCRIPT_FOLDER/../var-pryv
rsync --recursive --times --human-readable --verbose --perms $BACKUP_DIR ${VAR_PRYV_FOLDER}/attachment-files/
| true
|
e3aca24d4f3bca574c531446acea87f2ce15916d
|
Shell
|
Michael-Rhodes/DFIR_mjr2563_CSEC464
|
/getArtifacts.sh
|
UTF-8
| 6,292
| 4.21875
| 4
|
[] |
no_license
|
#!/bin/bash
# Author: Michael Rhodes
#
# Description: gathers artifacts from linux operating systems and outputs them
# in a table or to a file in csv format. It also allows the option
# to send the csv file to an email address.
# Artifacts include:
# Time, OS version, hw specs, hostname, domain, users, startup,
# sched tasks, network, software, processes, drivers, files,
# aliases, setUID binaries, and tmp files.
#
# Table format:
# ---------------------------------------
# | Artifact type | subtype | result(s) |
# ---------------------------------------
# USAGE STATEMENT
usage() {
echo """
USAGE: getArtifacts.sh [-csv [filename]] [-r | -remoteHost [user@ip-address]] [-m | -mail [email]]
Example: gatherArtifacts.sh -csv out.csv -m foo@foo.org
"""
}
#check if root
if [[ $EUID -ne 0 ]]; then
echo "You need root privileges!"
exit 1
fi
# get arguments
while [ "$1" != "" ]; do
case $1 in
-csv ) shift
csv=$1
echo "Saving artifacts to" $csv
;;
-r | -remoteHost ) shift
remoteHost=$1
echo "Connecting to " $remoteHost
;;
-m | -mail ) shift
mail=$1
echo "Sending file to " $mail
;;
* ) usage
exit 1
esac
shift
done
add_to_table() {
if [[ ! -z $csv ]]; then
echo "$1,$2,$3" >> $csv
else
echo "$1 | $2 | $3"
fi
}
# time: current time, time zone of PC, PC uptime
add_to_table "time" "current" "`uptime | cut -d' ' -f2`"
add_to_table "time" "time zone" "`date +”%Z”`"
add_to_table "time" "uptime" "`uptime | cut -d ' ' -f 5 | tr -d ','`"
# OS version: numerical, typical name, kernel version
add_to_table "OS version" "numerical" "`uname -r`"
add_to_table "OS version" "typical name" "`cat /etc/*-release | grep -E "^ID="`"
add_to_table "OS version" "kernel vers." "`uname -v`"
# hardware: CPU brand and type, RAM, HDD (list HDDs, list mounted file systems)
add_to_table "hardware" "CPU" "`cat /proc/cpuinfo | grep "model name" | cut -d':' -f1 --complement`"
add_to_table "hardware" "RAM" "`cat /proc/meminfo | grep MemTotal | cut -d: -f2 | tr -d ' '`"
(lsblk -al | grep disk) | while read -r disk; do
add_to_table "hardware" "HDD" "$disk";
done
(df -h | tail -n +2) | while read -r fs; do
add_to_table "hardware" "filesystem" "$fs";
done
# hostname and domain
add_to_table "hostname" "N/A" "`hostname`"
add_to_table "domain" "N/A" "`domainname`"
# list of users: user/group id, include user login history
for line in $(cut -f-1,3,4,6,7 -d: /etc/passwd);do
user=$(echo $line | cut -f1 -d:)
add_to_table "Users" "$user" "$line"
lastlogin=$(last $user | grep $user)
if [[ ! -z $lastlogin ]]; then
while read -r log; do
add_to_table "Users" "$user-login" "$log"
done <<< "$lastlogin"
fi
done
# start at boot
while read -r file; do
add_to_table "StartAtBoot" "init.d" "$file"
done <<< "$(find '/etc/init.d/' -type f)"
while read -r file; do
add_to_table "StartAtBoot" "xdg" "$file"
done <<< "$(find '/etc/xdg/autostart/' -type f)"
# scheduled tasks
for user in $(cut -f1 -d: /etc/passwd);do
if [[ ! -z $user ]]; then
add_to_table "Scheduled tasks" "$user" "$(crontab -u $user -l 2>/dev/null)";
fi
done
# network: arp table, MAC addresses for interface(s), routing table, IP addresses
# DHCP server, DNS server, gateway(s), listening services (ipaddr,port,proto,service)
# established connections (remote IP,local/remote port,proto,timestamp,service),
# DNS cache
interfaces=$(ip a | grep -E '^[1-9][0-9]?:' | cut -d: -f2 | tr -d ' ')
while read -r intf; do
mac=$(cat /sys/class/net/$intf/address)
inet=$(ip a s $intf | grep -E '^\W*inet ' | cut -d' ' -f6 | tr '\n' ' ')
add_to_table "network" "intf: $intf" "MAC = $mac"
add_to_table "network" "intf: $intf" "IP = $inet"
done <<< "$interfaces"
add_to_table "network" "arp table" "`arp`"
while read -r a; do
add_to_table "network" "arp table entry" "$a"
done <<< "$(arp -n | tail -n+2)"
while read -r a; do
add_to_table "network" "routing table entry" "$a"
done <<< "$(ip r)"
servers=()
while read -r dhcp; do
servers+=("$(echo $dhcp | cut -d: -f4)")
done <<< "$(grep -R 'DHCPOFFER' /var/log/messages)"
add_to_table "network" "DHCP servers" "$servers"
add_to_table "network" "DNS server" "`grep nameserver /etc/resolv.conf`"
##TODO add to table??
add_to_table "network" "listening services" "`netstat -ltunp`"
##TODO add to table??
add_to_table "network" "established connections" "`netstat -anp | grep ESTAB`"
# network shares, printers, wifi access profiles
#TODO
# all installed software
mgr=$(which dpkg 2>/dev/null)
if [[ ! -z $mgr ]]; then
while read -r pkg; do
add_to_table "InstalledSoftware" "dpkg" "$pkg"
done <<< "$(dpkg --get-selections | grep -v deinstall | cut -d' ' -f1)"
fi
mgr=$(which rpm 2>/dev/null)
if [[ ! -z $mgr ]]; then
while read -r pkg; do
add_to_table "InstalledSoftware" "rpm" "$pkg"
done <<< "$($mgr -qa)"
fi
# process list
##TODO add to table??
add_to_table "processes" "N/A" "`ps aux`"
# driver list
while read -r mod; do
add_to_table "Kernel" "Module" "$mod"
done <<< "$(lsmod)"
# list of all files in Downloads and Documents for each user directory
documents="/Documents"
downloads="/Downloads"
while read line; do
homedir=`echo $line | cut -d: -f6`;
user=$(echo $line | cut -f1 -d:)
if [ -d "$homedir$documents" ]; then
while read -r file; do
add_to_table "Documents" "$user" "$file"
done <<< "$(find "$homedir$documents" -type f)"
fi
if [ -d "$homedir$downloads" ]; then
while read -r file; do
add_to_table "Downloads" "$user" "$file"
done <<< "$(find "$homedir$downloads" -type f)"
fi
done < /etc/passwd
#TODO won't loop over alias
#alias
while read -r line; do
echo "$line"
add_to_table "Aliases" "" "$(echo $line | cut -d' ' -f1 --complement)"
done <<< "$(alias)"
#setuid
while read -r file; do
add_to_table "SetUID" "/usr/bin" "$file"
done <<< "$(find '/usr/bin' -perm -4000)"
#tmp
while read -r file; do
add_to_table "tmp files" "tmp" "$file"
done <<< "$(ls -Al /tmp | tail -n+2)"
| true
|
13993c24ac07c2be18fc0267f2a9fa11cf874672
|
Shell
|
mathewmanueljm/rob_cam
|
/bin/prepare_release.sh
|
UTF-8
| 2,222
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/sh
################################################################################
## Copyright (C) 2020 Alejandro Colomar Andrés ##
## SPDX-License-Identifier: GPL-2.0-only ##
################################################################################
##
## Prepare the repo for release
## ============================
##
## - Remove the files that shouldn't go into the release
## - Update version numbers
##
################################################################################
################################################################################
## functions ##
################################################################################
update_version()
{
local version=$1
sed "/--branch master/s/master/v${version}/" \
-i ./cam/Dockerfile
sed "/--branch master/s/master/v${version}/" \
-i ./rob/Dockerfile
sed "/--branch master/s/master/v${version}/" \
-i ./rob/robot/ur/Dockerfile
sed "/alejandrocolomar\/rob_cam:cam/s/:cam/:cam_${version}/" \
-i ./docker/docker-compose.yaml
sed "/alejandrocolomar\/rob_cam:rob/s/:rob/:rob_${version}/" \
-i ./docker/docker-compose.yaml
sed "/alejandrocolomar\/rob_cam:ur-sim/s/:ur-sim/:ur-sim_${version}/" \
-i ./docker/docker-compose.yaml
sed "/alejandrocolomar\/rob_cam:cam/s/:cam/:cam_${version}/" \
-i ./kubernetes/kube-compose.yaml
sed "/alejandrocolomar\/rob_cam:rob/s/:rob/:rob_${version}/" \
-i ./kubernetes/kube-compose.yaml
sed "/alejandrocolomar\/rob_cam:ur-sim/s/:ur-sim/:ur-sim_${version}/" \
-i ./kubernetes/kube-compose.yaml
}
################################################################################
## main ##
################################################################################
main()
{
local version=$1
update_version ${version}
}
################################################################################
## run ##
################################################################################
main $1
################################################################################
## end of file ##
################################################################################
| true
|
a7bd77d76438851504d5c047820d7c4f951e6b18
|
Shell
|
sahil-rao/deployment-sahil
|
/Compile-Server/usr/local/bin/xplaincompileserver
|
UTF-8
| 657
| 3.03125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
CLASSPATH="/usr/lib/baaz_compiler"
XPLAIN_HOME="/usr/lib/baaz_compiler"
# needed for execution
if [ ! -f ${XPLAIN_HOME}/navopt*.jar ]; then
echo "Missing Compiler Execution Jar: ${XPLAIN_HOME}/navopt*.jar"
exit 1;
fi
for f in ${XPLAIN_HOME}/*.jar; do
CLASSPATH=${CLASSPATH}:$f;
done
HIVE_MODULES='/usr/lib/hive/lib'
for f in ${HIVE_MODULES}/*.jar; do
CLASSPATH=${CLASSPATH}:$f;
done
java -Dcompiler.servicename=$SERVICE_NAME -Dcompiler.port=$COMPILER_PORT -Dnavopt.log.dir=/var/log/cloudera/navopt -Dlogback.configurationFile=$XPLAIN_HOME/logback.xml -classpath $CLASSPATH com.cloudera.navopt.compiler.QueryCompilerServer
| true
|
1fc8fe959938881fa6f48c3cd0a5448f44b35705
|
Shell
|
hyamamoto/is_client.dart
|
/test/run_dart.sh
|
UTF-8
| 304
| 2.921875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# This script is written for drone.io CI
result=`content_shell --args --dump-render-tree test/test_client_dart.html 2>&1 | tee /dev/stdout | sed -n 2p | grep PASS`
if [ "$result" != "PASS" ]; then
echo "Dart Client Test Failed"
exit 1
else
echo "Dart Client Test Passed"
exit 0
fi
| true
|
4b8052f46f4456eb2b86418344e40bff7b3fd9e7
|
Shell
|
peterrrre/commonweal-on-blockchain
|
/BLOCKCHAIN/extended.sh
|
UTF-8
| 304
| 3.40625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#
set -u
SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}")
LINTALL=$(basename "${BASH_SOURCE[0]}")
for f in "${SCRIPTDIR}"/extended-lint-*.sh; do
if [ "$(basename "$f")" != "$LINTALL" ]; then
if ! "$f"; then
echo "^---- failure generated from $f"
exit 1
fi
fi
done
| true
|
34af3a9716151194481ed3a63713e790d176b57c
|
Shell
|
luyanda/PracticeStuff
|
/testing-tutorial03-mutator/src/main/java/fallout.sh
|
UTF-8
| 1,133
| 3.84375
| 4
|
[] |
no_license
|
#!/usr/bin/bash
# Create the mutants
java -cp /home/phillip/.m2/repository/com/github/javaparser/javaparser-core/3.5.14/javaparser-core-3.5.14.jar:. Mutator "$1"
# Get the class name
CLASS=$(echo $1 | cut -d . -f 1)
echo "Class name is $CLASS"
# Compile driver
javac Driver.java
# Hide the original file
mv "$1" _"$1"
# Count the number of mutants
TOTAL=$(find -type f -name "$CLASS*.java" | wc | awk '{print $2'})
echo "Total number of mutants generated: $TOTAL"
KILLED=0
# Operate on each mutant
for MUTANT in $(find -type f -name "$CLASS*.java");
do
echo "Testing mutant with name $MUTANT"
# Rename the mutant
mv "$MUTANT" "$CLASS".java
# Compile all that dank shit
javac "$CLASS".java
# Run that dank shit
java Driver
# Check the return value of that dank shit
if [[ "$?" -eq "1" ]]; then KILLED=$((KILLED + 1)); fi
done
# Log total number of killed mutants
echo "Total number of killed mutants: $KILLED"
# Log the ratio
echo "Score is"
echo "$KILLED / $TOTAL" | bc -l
# Restore original file
mv _"$1" "$1"
| true
|
ed2c0a4f76ba91b18e18757ef53e5f12e5ff16bd
|
Shell
|
vincevalenz/SecureFileManager
|
/secureFileManager.sh
|
UTF-8
| 4,479
| 4.21875
| 4
|
[] |
no_license
|
#!/bin/bash
###############################################################
# |
# |
# Secure File Manager |
# |
# |
###############################################################
## If gpg is not installed, exit and let user know it is required
#
gpg --version >/dev/null || { echo "This program requires gpg"; exit 1; }
createDir() {
## Check if the SFS-GPG dir exists, if not, create it
#
if [ ! -d ~/Documents/SFS-GPG ]; then
mkdir ~/Documents/SFS-GPG
echo "Created ~/Documents/SFS-GPG"
sleep 2
fi
## After checking/creating, run the intro function
#
intro
}
createFile(){
## User creates filename that will be encrypted
#
echo "Enter a filename"
read temp
## If entry is null, return to main menu
#
if [ -z $temp ]; then
echo -e "\nEntry was null, Returning to main menu"
sleep 2
intro
fi
## If filename has a space, replpace it with an underscore
#
file=$(echo $temp | tr ' ' '_')
## Ask user to input a phrase twice
#
while [ -z "$data" ]; do
echo "Enter a string"
read data
done
echo "Repeat the string"
read repeatData
## Check both entries match
#
while [ $data != $repeatData ]; do
echo "Strings did not match. Enter again"
read repeatData
done
## Create file with filename and push the data to the file, then move to right dir
#
touch $file.txt; echo $data > $file.txt
mv $file.txt ~/Documents/SFS-GPG
## Have user enter a master password to secure the file
#
clear
echo -e "Enter a Master Password to secure\n"
gpg -c ~/Documents/SFS-GPG/"$file.txt"
shred ~/Documents/SFS-GPG/"$file.txt"
echo "Done!"
sleep 2
## Clear gpg-agent cache and run the intro() again when finished
#
pkill -SIGHUP gpg-agent
intro
}
viewFile(){
## List all files in the SFS-GPG dir so the user can choose which one to view
#
ls ~/Documents/SFS-GPG
## Make sure the user makes a valid coice
#
echo -e "\nChoose a file"
read file
while [ ! -e ~/Documents/SFS-GPG/$file* ]; do
echo -e "\nPlease choose an existing file"
read file
done
## Ensure $file isnt empty then decrypt the file. This will print the contents
# to the terminal, then wait for the user to exit.
#
if [ ! -z "$file" ]; then
gpg --decrypt ~/Documents/SFS-GPG/$file*
echo -e "\nPress any key to continue"; read ;
else
echo -e "\nEntry was null. Returning to main menu"
sleep 2
fi
clear; clear;
## Clear gpg-agent cache and run intro() after finishing
#
pkill -SIGHUP gpg-agent
intro
}
removeFile(){
## List files that can be deleted
#
ls ~/Documents/SFS-GPG/
## Make sure it is a valid filename
#
echo -e "\nEnter a filename"
read file
while [ ! -e ~/Documents/SFS-GPG/$file* ]; do
echo -e "\nPlease choose an existing file"
read file
done
## Ensure choice isnt null or else all files will be removed
#
if [ ! -z $file ]; then
rm ~/Documents/SFS-GPG/$file*
echo -e "\nRemoved!"
sleep 2
else
echo -e "\nEntry was null. Returning to main menu"
sleep 2
fi
intro
}
intro(){
## Main screen for the user to interact with
#
clear
echo "##########################################################################
# Secure File Storage #
# #
# Create a unique filename #
# Add some information #
# And encrypt it with GPG! #
# #
##########################################################################"
## Choices for user
#
echo -e "\n(1) View file"
echo "(2) Create a new file"
echo "(3) Remove a file"
echo "(4) Exit"
read choice
case $choice in
1) clear; viewFile ;;
2) clear; createFile ;;
3) clear; removeFile ;;
# exit and clear gpg cache if passwords are cached
4) pkill -SIGHUP gpg-agent; clear; exit 1 ;;
*) clear; intro ;;
esac
}
createDir
| true
|
2d51679d38b048b2964485d0ff9dd316099dd805
|
Shell
|
iamtew/jolla-scripts
|
/usb_tether.sh
|
UTF-8
| 950
| 3.46875
| 3
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
#! /usr/bin/env bash
# -*- mode: shell; tab-width: 2; sublime-rulers: [100]; -*-
# vim: tabstop=2 cc=100
# Set up IP forwarding and iptables rules to route traffic over the RNDIS interface. After this is
# setup you need to add the default gateway to point to the Jolla. On Linux something like this
# will suffice:
# sudo ip route add default via <Jolla IP> dev <Jolla NIC device>
#
# Information gathered from
# http://elinux.org/Jolla#How_do_I_enable_USB_tethering.3F
script="$BASH_SOURCE"
main ()
{
echo "Setting up forwarding and iptables rules"
echo 1 > /proc/sys/net/ipv4/ip_forward
/sbin/iptables -t nat -A POSTROUTING -o rmnet0 -j MASQUERADE
/sbin/iptables -A FORWARD -i rmnet0 -o rndis0 -m state --state RELATED,ESTABLISHED -j ACCEPT
/sbin/iptables -A FORWARD -i rndis0 -o rmnet0 -j ACCEPT
}
case "$(id -u)" in
0)
main
;;
*)
echo "Running as non-privileged user, trying through sudo"
sudo "$script"
;;
esac
exit 0
| true
|
7674bf3c224abeeed8213085a770e3c74f58085e
|
Shell
|
dhickin/epicsv4Training
|
/mybin/make_slides
|
UTF-8
| 265
| 2.578125
| 3
|
[] |
no_license
|
#! /bin/sh
if [ "$#" -ne 1 ]; then
echo "Usage: $0 <presentation>"
exit 1
fi
ipython nbconvert ${1}.ipynb --to slides --reveal-prefix "http://cdn.jsdelivr.net/reveal.js/2.6.2"
cp ${1}.slides.html standalone
cp custom.css standalone
cp diamond.png standalone
| true
|
349c7ad117ace61e7862258f96d8644a3eaea544
|
Shell
|
conda-forge/gst-orc-feedstock
|
/recipe/build.sh
|
UTF-8
| 236
| 2.671875
| 3
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
BUILD_DIR="${SRC_DIR}/build"
# configure
meson setup \
${BUILD_DIR} \
${SRC_DIR} \
--prefix ${PREFIX} \
--libdir "lib"
pushd ${BUILD_DIR}
# build
ninja -j ${CPU_COUNT} all
# check
ninja test
# install
ninja install
| true
|
9df24ac91e0951455cf1ce2e849d503d13fa5ae0
|
Shell
|
mkienenb/crosslinux
|
/pkg-cfg/readline-6.3/bld.sh
|
UTF-8
| 4,706
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/${cl_bash}
# This file is part of the crosslinux software.
# The license which this software falls under is GPLv2 as follows:
#
# Copyright (C) 2014-2014 Douglas Jerome <djerome@crosslinux.org>
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59 Temple
# Place, Suite 330, Boston, MA 02111-1307 USA
# ******************************************************************************
# Definitions
# ******************************************************************************
PKG_URL="http://ftp.gnu.org/gnu/readline/"
PKG_ZIP="readline-6.3.tar.gz"
PKG_SUM=""
PKG_TAR="readline-6.3.tar"
PKG_DIR="readline-6.3"
# ******************************************************************************
# pkg_patch
# ******************************************************************************
pkg_patch() {
PKG_STATUS="patch error"
cd "${PKG_DIR}"
sed -e '/MV.*old/d' -i Makefile.in
sed -e '/OLDSUFF/c:' -i support/shlib-install
cd ..
PKG_STATUS=""
return 0
}
# ******************************************************************************
# pkg_configure
# ******************************************************************************
pkg_configure() {
PKG_STATUS="./configure error"
cd "${PKG_DIR}"
source "${CROSSLINUX_SCRIPT_DIR}/_xbt_env_set"
PATH="${CONFIG_XTOOL_BIN_DIR}:${PATH}" \
AR="${CONFIG_XTOOL_NAME}-ar" \
AS="${CONFIG_XTOOL_NAME}-as --sysroot=${TARGET_SYSROOT_DIR}" \
CC="${CONFIG_XTOOL_NAME}-cc --sysroot=${TARGET_SYSROOT_DIR}" \
CXX="${CONFIG_XTOOL_NAME}-c++ --sysroot=${TARGET_SYSROOT_DIR}" \
LD="${CONFIG_XTOOL_NAME}-ld --sysroot=${TARGET_SYSROOT_DIR}" \
NM="${CONFIG_XTOOL_NAME}-nm" \
OBJCOPY="${CONFIG_XTOOL_NAME}-objcopy" \
RANLIB="${CONFIG_XTOOL_NAME}-ranlib" \
SIZE="${CONFIG_XTOOL_NAME}-size" \
STRIP="${CONFIG_XTOOL_NAME}-strip" \
CFLAGS="${CONFIG_CFLAGS}" \
bash_cv_wcwidth_broken='no' \
./configure \
--build=${MACHTYPE} \
--host=${CONFIG_XTOOL_NAME} \
--prefix=/usr \
--libdir=/lib || return 0
source "${CROSSLINUX_SCRIPT_DIR}/_xbt_env_clr"
cd ..
PKG_STATUS=""
return 0
}
# ******************************************************************************
# pkg_make
# ******************************************************************************
pkg_make() {
PKG_STATUS="make error"
cd "${PKG_DIR}"
source "${CROSSLINUX_SCRIPT_DIR}/_xbt_env_set"
NJOBS=1 # I think a multi-job build is not stable.
PATH="${CONFIG_XTOOL_BIN_DIR}:${PATH}" make \
--jobs=${NJOBS} \
CROSS_COMPILE=${CONFIG_XTOOL_NAME}- \
SHLIB_LIBS=-lncurses || return 0
source "${CROSSLINUX_SCRIPT_DIR}/_xbt_env_clr"
cd ..
PKG_STATUS=""
return 0
}
# ******************************************************************************
# pkg_install
# ******************************************************************************
pkg_install() {
PKG_STATUS="install error"
cd "${PKG_DIR}"
source "${CROSSLINUX_SCRIPT_DIR}/_xbt_env_set"
PATH="${CONFIG_XTOOL_BIN_DIR}:${PATH}" make \
CROSS_COMPILE=${CONFIG_XTOOL_NAME}- \
DESTDIR=${TARGET_SYSROOT_DIR} \
install || return 0
source "${CROSSLINUX_SCRIPT_DIR}/_xbt_env_clr"
# Put static libraries into /usr/lib and give them the correct permissions.
#
mv ${TARGET_SYSROOT_DIR}/lib/libhistory.a ${TARGET_SYSROOT_DIR}/usr/lib/
mv ${TARGET_SYSROOT_DIR}/lib/libreadline.a ${TARGET_SYSROOT_DIR}/usr/lib/
# Give the shared libraries the correct permissions and make links to them
# in /usr/lib.
#
chmod 755 ${TARGET_SYSROOT_DIR}/lib/libhistory.so.6.3
chmod 755 ${TARGET_SYSROOT_DIR}/lib/libreadline.so.6.3
rm -f ${TARGET_SYSROOT_DIR}/usr/lib/libhistory.so
rm -f ${TARGET_SYSROOT_DIR}/usr/lib/libreadline.so
ln -fs ../../lib/libhistory.so.6 ${TARGET_SYSROOT_DIR}/usr/lib/libhistory.so
ln -fs ../../lib/libreadline.so.6 ${TARGET_SYSROOT_DIR}/usr/lib/libreadline.so
cd ..
if [[ -d "rootfs/" ]]; then
find "rootfs/" ! -type d -exec touch {} \;
cp --archive --force rootfs/* "${TARGET_SYSROOT_DIR}"
fi
PKG_STATUS=""
return 0
}
# ******************************************************************************
# pkg_clean
# ******************************************************************************
pkg_clean() {
PKG_STATUS=""
return 0
}
# end of file
| true
|
c109902f3e644da94bfa47a9e95fa0bd038e1af3
|
Shell
|
OlaAronsson/nenRip
|
/src/nenRip.sh
|
UTF-8
| 3,434
| 3.5
| 4
|
[] |
no_license
|
#!/bin/sh
# -------- To Configure Mandatory --------
# Basic file root - where temporary files and wavs
# will be put (and removed at the end of execution)
FS_ROOT=/tmp/rips/
# Where my MP3s should eventually end up
MP3_ROOT=/media/KOZAK/mp3
# Setup if needed, empty if not to be
# used (syntax : host:port).
#
# Example :
#PROXY=localhost:3128
PROXY=
# -------- To Configure Optional --------
# cdrom speed (default it will be around 4x when grabbing..)
CDROM_SPEED=32
# mode cdparanoia or dagrab
MODE=DAGRAB
# MP3 kps Quality
KBS_NORMAL=192
KBS_HIGHQ=320
# set this to 1 in order to ask whether to move on
GO_ON_EVEN_IF_GRAB_FAILED=0
# width of runtime xterm
XTERMWIDTH=90
# Your Discogs API token
DISCOGSTOKEN=sGvVgNzyisfYBWkgctcqTVrWeKJLdCqXXxjQTqFc
# -------- Runtime environment --------
# PATH
export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
# UTF-8 support
export LANG=en_US.UTF-8
export LESSCHARSET=utf-8
export PERL_UTF8_LOCALE=1 PERL_UNICODE=AS
# grab the args
ARGS="$@"
# grab current fs location
HERE=`pwd`
# Meaning, until we detect that you're using a FAT-fs
# for saving your output, we will accept '?' and ':'
# in file names; is we detect ootherwise, these
# characters will simple be stripped from artist's
# and album names!
FATMODE=1
# ------------------ MAIN -------------------
main(){
BAIL=1
[ ! -d /usr/local/bin/nenrip-modules ] && installModules
getFunctions || BAIL=0
[ $BAIL -eq 0 ] && echo "Could not source modules" && exitCode
preFlow
if [ $INSTALL -eq 1 ] && [ $UNINSTALL -eq 1 ]; then
initFlow
thenewcd=`gotNewCd`
if [ $thenewcd -eq 1 ]; then
logit "Either no cd in drive or it was just ripped.."
else
while [ true ]; do
mainFlow
thenewcd=`gotNewCd`
[ $thenewcd -eq 1 ] && logit "Either no cd in drive or it was just ripped.." && break
done
fi
else
[ $INSTALL -eq 0 ] && runInstallation
[ $UNINSTALL -eq 0 ] && runUnInstall
fi
exitCode
cd $HERE
}
# ----------------- FLOWS ------------------
preFlow(){
chk && setupArgs
chk && basicSetup
}
initFlow(){
chk && init
chk && guessCdromDevice
chk && logit "Setting configured cdrom speed : $CDROM_SPEED"
chk && setcd -x $CDROM_SPEED $CDROMDEV
chk && echo
}
mainFlow(){
chk && getMetaData
chk && makeTempFolders
chk && getTrackData
chk && grabWav
chk && ejectCdromAndSaveCdId
if [ $FLAC -eq 1 ]; then
chk && mp3Encode
else
chk && flacEncode
fi
chk && cleanup
}
installModules(){
isInstall=1
echo "$ARGS" | grep "install" >/dev/null 2>&1 && isInstall=0
[ $isInstall -eq 1 ] && echo "You are missing the main modules - run the installation in order to get them!" && BAIL=0 && exit 1
[ `whoami` != "root" ] && echo "Son or girlie, you need to be root." && BAIL=0 && exit 1
echo
echo "Installing the nenRip modules"
echo
mkdir -p /usr/local/bin/nenrip-modules
cd /usr/local/bin/nenrip-modules
wget "http://thehole.black/nenRip/modules.tar.gz"
tar zxvf modules.tar.gz
chmod 777 *
echo "Done."
echo
}
getFunctions(){
. /usr/local/bin/nenrip-modules/install.sh || BAIL=0
. /usr/local/bin/nenrip-modules/metaData.sh || BAIL=0
. /usr/local/bin/nenrip-modules/cdrom.sh || BAIL=0
. /usr/local/bin/nenrip-modules/grabAndEncode.sh || BAIL=0
. /usr/local/bin/nenrip-modules/variousFunctions.sh || BAIL=0
. /usr/local/bin/nenrip-modules/usage.sh || BAIL=0
}
# Main
main
| true
|
820b61f2ab6b90299fda8d2f71321932e30ad09b
|
Shell
|
swmcc/talk-tmux-lightning
|
/start_development
|
UTF-8
| 1,362
| 2.65625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
BASE=`pwd`
PROJECT='tmux-talk'
tmux start-server
# new-session creates first window named 'console'
tmux new-session -d -s $PROJECT -n console
# split window 'h'orizontally (into two vertical panes)
tmux split-window -t $PROJECT:console -h
# select the left-most pane
tmux last-pane
# split this pane 'v'ertically (into two horizontal panes)
tmux split-window -t $PROJECT:console -v
# create a second window for 'logs'
tmux new-window -t $PROJECT:2 -n logs
# start a vim editor in the left-most vertical pane
tmux send-keys -t $PROJECT:console.2 "cd $BASE;" C-m
# widen the vim editor pane by 20 cells
tmux resize-pane -L -t $PROJECT:console.2 20
# run npm install and run the app
tmux send-keys -t $PROJECT:console.0 "cd $BASE; npm install; node plugin/multiplex" C-m
# start rails server
tmux send-keys -t $PROJECT:console.1 "cd $BASE;" C-m
# start logging
tmux send-keys -t $PROJECT:logs "cd $BASE;" C-m
# select the vim pane in the rails window
tmux select-window -t $PROJECT:console
tmux select-pane -t $PROJECT:console.2
# new-session creates a third window named 'dev'
tmux new-window -t $PROJECT:3 -n dev
tmux split-window -t $PROJECT:dev -h
tmux send-keys -t $PROJECT:dev.0 "cd $BASE; vim" C-m
tmux send-keys -t $PROJECT:dev.1 "cd $BASE; vim" C-m
# make the tmux session active
tmux attach-session -d -t $PROJECT
| true
|
8bfc032d005b6b0bd6a999cfc1a2207e83d8f06b
|
Shell
|
elentok/dotfiles
|
/extra/ulauncher
|
UTF-8
| 341
| 2.828125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
source $DOTF/framework
VERSION=5.3.0
if dotf-command-missing ulauncher; then
dotf-apt python3-distutils-extra python3-levenshtein python3-websocket
cd $TMP
curl -L -O https://github.com/Ulauncher/Ulauncher/releases/download/${VERSION}/ulauncher_${VERSION}_all.deb
sudo dpkg -i ulauncher_${VERSION}_all.deb
fi
| true
|
647fce503a903694eb2fbf5ae93c7e016cfe23ec
|
Shell
|
actuated/user-parse
|
/user-mod.sh
|
UTF-8
| 14,895
| 4.0625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# user-mod.sh
# 11/3/2015 by Ted R (http://github.com/actuated)
# Script to generate additions to possible usernames
# ie, f/m/l initials, appended numbers
# 1/1/2016 - Aesthetic change
varDateCreated="11/3/2015"
varDateLastMod="1/1/2016"
varTempRandom=$(( ( RANDOM % 9999 ) + 1 ))
varTempFile="temp-user-mod-$varTempRandom.txt"
if [ -f $varTempFile ]; then rm $varTempFile; fi
varInFile="null"
varOutFile="null"
varFormat="null"
varGMode="n"
varSMode="n"
varStatic="null"
varPos="4"
# Function for displaying help/usage information
function usage
{
echo
echo "====================[ username modifier - Ted R (github: actuated) ]====================="
echo
echo "Script used to add a string, numbers, or letters to possible usernames."
echo
echo "Created $varDateCreated, last modified $varDateLastMod."
echo
echo "=======================================[ syntax ]========================================"
echo
echo "./user-mod.sh -i [input file] [mode [parameter]] -p [position] [-o [outout file]]"
echo
echo
echo -e "-i [file] \t - Input file, must exist"
echo
echo -e "-s [string] \t - String mode"
echo -e "\t\t - 'String' will be added to each line"
echo
echo -e "-g [format] \t - Generator mode"
echo -e "\t\t - Add letters a-z or numbers to each line"
echo -e "\tabc \t - Format for letters a-z"
echo -e "\tn \t - Format for numbers 0-9"
echo -e "\t\t - Can be used 1-4 times (ex: -g n / -g nnnn)"
echo
echo -e "-p [value] \t - Position to insert new text"
echo -e "\t1\t - Add text to the beginning of each line"
echo -e "\t2\t - Add text to the middle of each line*"
echo -e "\t3\t - Add text to the end of each line"
echo
echo -e "-o [file] \t - Output file, must not exist"
echo
echo "========================================[ info ]========================================="
echo
echo "- For -p 1 and 3, each line of the input file will be read as an input string."
echo "- For -p 2, each line of the input file should contain two space-delimited substrings."
echo " - These substrings would appear on either side of the inserted string."
echo "- One mode, -s or -g, must be specified."
echo " - For -s [string], the specified string will be added to each input file string."
echo " - '~' can be used to insert your input file lines in between two substrings."
echo " - Ex: -s first~third"
echo " - '~' can only be used once."
echo " - For -g [format], letters or numbers will be generated to add to each string."
echo " - abc = letters a-z."
echo " - n = numbers 0-9, can be repeated 1-4 times (n, nn, nnn, nnnn)."
echo "- Position (-p) controls where the inserted or generated text will be added to the input."
echo " - 1 = prepended to each input string."
echo " - 2 = inserted in between two space-delimited input substrings."
echo " - 3 = appended to the end of each input string."
echo " - -p is not required when using '~' with -s."
echo "- An output file is required, given the bulk nature of this script."
echo "- If the script is stopped or fails, check for temp-user-mod-*.txt."
echo "- A line might be skipped if (1) if is blank, (2) it starts with a '#', or (3) you are"
echo " using -p 2 with lines that do not contain two space-delimited substrings."
echo
echo "Usage Scenarios:"
echo
echo "Employee IDs - You have a list of 'jsmith'-format possible usernames, but the target"
echo " adds a 4-digit employee ID to the end of each username."
echo " - Make your input file the list of jsmith-format names"
echo " - Run: ./user-mod.sh -i input.txt -g nnnn -p 3 -o output.txt"
echo " - Results: 'jsmith0000 - jsmith9999'"
echo
echo "Middle Initials - You have a list of first and last names, but the target uses the"
echo " username format john.x.smith."
echo " - Make your input file the list of space-delimited names (ex: john. .smith)"
echo " - Run: ./user-mod.sh -i input.txt -g abc -p 2 -o output.txt"
echo " - Results: 'john.a.smith - john.z.smith'"
echo
echo "Admin Accounts - You have a list of user 'jsmith' usernames, and the target has"
echo " separate 'admin-jsmith' accounts for privileged users."
echo " - Make your input file the list of jsmith-format names"
echo " - Run: ./user-mod.sh -i input.txt -s admin- -p 1 -o output.txt"
echo " - Results: 'admin-jsmith', 'admin-x', where x is any other line of the input"
echo
exit
}
function check_mask
{
if [ "$varGMode" = "y" ] && [ "$varFormat" != "abc" ] && [ "$varFormat" != "n" ] && [ "$varFormat" != "nn" ] && [ "$varFormat" != "nnn" ] && [ "$varFormat" != "nnnn" ] ; then
echo; echo "Error: Invalid format type supplied (abc, n, nn, nnn, or nnnn)."; usage; fi
}
# Check input for necessary length
#varTestInput=$(echo "$1 $2 $3 $4 $5 $6 $7 $8 $9" | awk '{print NF}')
#if [ "$varTestInput" != "8" ]; then
# echo
# echo "Error: Input appears to be incomplete or incorrect."
# usage
#fi
# Check for options and parameters
while [ "$1" != "" ]; do
case $1 in
-i ) shift
varInFile=$1
if [ "$varInFile" = "" ]; then varInFile="null"; fi # Flag for error if no file name was given
if [ ! -f "$varInFile" ]; then varInFile="existerror"; fi # Flag for error if input file does not exist
;;
-s ) shift
varSMode="y"
varStatic=$1
if [ "$varStatic" = "" ]; then varStatic="null"; fi # Flag for error if no static string was given
;;
-g ) shift
varGMode="y"
varFormat=$(echo "$1" | tr 'A-Z' 'a-z')
if [ "$varFormat" = "" ]; then varFormat="null"; fi # Flag for error if no generate format was given
;;
-p ) shift
varPos=$1
if [ "$varPos" != "1" ] && [ "$varPos" != "2" ] && [ "$varPos" != "3" ]; then varPos="4"; fi # Flag for error on invalid position
;;
-o ) shift
varOutFile=$1
if [ "$varOutFile" = "" ]; then varOutFile="null"; fi # Flag for error if no file name was given
if [ -f "$varOutFile" ]; then varOutFile="existerror"; fi # Flag for error if output file exists
;;
-h ) usage
exit
;;
* ) usage
exit 1
esac
shift
done
varCheckStringWrap=$(echo "$varStatic" | grep '\~')
if [ "$varCheckStringWrap" != "" ]; then varPos=0; fi
# Check parameters for errors
if [ "$varInFile" = "null" ]; then echo; echo "Error: Input file was not set."; usage; fi
if [ "$varInFile" = "existerror" ]; then echo; echo "Error: Input file does not exist."; usage; fi
if [ "$varOutFile" = "null" ]; then echo; echo "Error: Output was enabled but no file name was given."; usage; fi
if [ "$varOutFile" = "existerror" ]; then echo; echo "Error: Output file already exists."; usage; fi
if [ "$varPos" -gt "3" ]; then echo; echo "Error: Position (-p) was not set to 1, 2 or 3."; usage; fi
if [ "$varSMode" = "y" ] && [ "$varGMode" = "y" ]; then echo; echo "Error: Both -s and -g were provided."; usage; fi
if [ "$varSMode" = "n" ] && [ "$varGMode" = "n" ]; then echo; echo "Error: No mode (-s or -g) was provded."; usage; fi
if [ "$varSMode" = "y" ] && [ "$varStatic" = "null" ]; then echo; echo "Error: -s was used with no string provided."; usage; fi
if [ "$varGMode" = "y" ] && [ "$varFormat" = "null" ]; then echo; echo "Error: -g was used with no format provided (abc, n, nn, nnn, nnnn)."; usage; fi
check_mask
# Display parameters to user for confirmation before starting
echo
echo "====================[ username modifier - Ted R (github: actuated) ]====================="
echo
if [ "$varSMode" = "y" ]; then
if [ "$varPos" = "0" ]; then echo "Inserting lines from $varInFile into '$varStatic'."; fi
if [ "$varPos" = "1" ]; then echo "Prepending each line of $varInFile with '$varStatic'."; fi
if [ "$varPos" = "2" ]; then echo "Inserting '$varStatic' into each space-delimited line of $varInFile."; fi
if [ "$varPos" = "3" ]; then echo "Appending '$varStatic' to the end of each line of $varInFile."; fi
fi
if [ "$varGMode" = "y" ]; then
if [ "$varFormat" = "abc" ]; then
if [ "$varPos" = "1" ]; then echo "Generating letters a-z to prepend each line of $varInFile."; fi
if [ "$varPos" = "2" ]; then echo "Inserting letters a-z into each space-delimited line of $varInFile."; fi
if [ "$varPos" = "3" ]; then echo "Appending letters a-z to the end of each line of $varInFile."; fi
else
if [ "$varPos" = "1" ]; then echo "Generating numbers 0-9 ($varFormat) to prepend each line of $varInFile."; fi
if [ "$varPos" = "2" ]; then echo "Inserting numbers 0-9 ($varFormat) into each space-delimited line of $varInFile."; fi
if [ "$varPos" = "3" ]; then echo "Appending numbers 0-9 ($varFormat) to the end of each line of $varInFile."; fi
fi
fi
echo
echo "Output will be written to $varOutFile."
echo
read -p "Press Enter to continue..."
echo
echo "====================================[ modification ]====================================="
echo
# Process usernames to temp file
varCountLine=0
varCountCreated=0
varCountSkipped=0
while read -r varLine; do
varLineOut=""
varSkip="n"
varCheckComment=$(echo "$varLine" | grep '^\#')
varCheckFields=$(echo "$varLine" | awk '{print NF}')
# Check for line issues
if [ "$varLine" = "" ]; then varSkip="y"; let varCountSkipped=varCountSkipped+1; fi
if [ "$varSkip" = "n" ] && [ "$varCheckComment" != "" ]; then varSkip="y"; let varCountSkipped=varCountSkipped+1; fi
if [ "$varSkip" = "n" ] && [ "$varPos" = "2" ] && [ "$varCheckFields" != "2" ]; then varSkip="y"; let varCountSkipped=varCountSkipped+1; fi
if [ "$varSMode" = "y" ] && [ "$varSkip" = "n" ]; then
if [ "$varPos" = "0" ]; then
varCountWrap=$(echo "$varStatic" | awk -F '\~' '{print NF}')
if [ "$varCountWrap" = "2" ]; then
varLineA=$(echo "$varStatic" | awk -F '\~' '{print $1}')
varLineB=$(echo "$varStatic" | awk -F '\~' '{print $2}')
varLineOut="$varLineA$varLine$varLineB"
echo "$varLineOut" >> $varTempFile
echo -ne "Created $varLineOut... "\\r
let varCountCreated=varCountCreated+1
fi
fi
if [ "$varPos" = "1" ]; then
varLineOut="$varStatic$varLine"
echo "$varLineOut" >> $varTempFile
echo -ne "Created $varLineOut... "\\r
let varCountCreated=varCountCreated+1
fi
if [ "$varPos" = "2" ]; then
varLineA=$(echo "$varLine" | awk '{print $1}')
varLineB=$(echo "$varLine" | awk '{print $2}')
varLineOut="$varLineA$varStatic$varLineB"
echo "$varLineOut" >> $varTempFile
echo -ne "Created $varLineOut... "\\r
let varCountCreated=varCountCreated+1
fi
if [ "$varPos" = "3" ]; then
varLineOut="$varLine$varStatic"
echo "$varLineOut" >> $varTempFile
echo -ne "Created $varLineOut... "\\r
let varCountCreated=varCountCreated+1
fi
fi
if [ "$varGMode" = "y" ] && [ "$varSkip" = "n" ]; then
if [ "$varFormat" = "abc" ]; then
for varABC in {a..z}; do
if [ "$varPos" = "1" ]; then varLineOut="$varABC$varLine"; fi
if [ "$varPos" = "2" ]; then
varLineA=$(echo "$varLine" | awk '{print $1}')
varLineB=$(echo "$varLine" | awk '{print $2}')
varLineOut="$varLineA$varABC$varLineB"
fi
if [ "$varPos" = "3" ]; then varLineOut="$varLine$varABC"; fi
echo "$varLineOut" >> $varTempFile
echo -ne "Created $varLineOut... "\\r
let varCountCreated=varCountCreated+1
done
elif [ "$varSkip" = "n" ] && [ "$varFormat" = "n" ]; then
for varN in {0..9}; do
if [ "$varPos" = "1" ]; then varLineOut="$varN$varLine"; fi
if [ "$varPos" = "2" ]; then
varLineA=$(echo "$varLine" | awk '{print $1}')
varLineB=$(echo "$varLine" | awk '{print $2}')
varLineOut="$varLineA$varN$varLineB"
fi
if [ "$varPos" = "3" ]; then varLineOut="$varLine$varN"; fi
echo "$varLineOut" >> $varTempFile
echo -ne "Created $varLineOut... "\\r
let varCountCreated=varCountCreated+1
done
elif [ "$varSkip" = "n" ] && [ "$varFormat" = "nn" ]; then
for varN in {00..99}; do
if [ "$varPos" = "1" ]; then varLineOut="$varN$varLine"; fi
if [ "$varPos" = "2" ]; then
varLineA=$(echo "$varLine" | awk '{print $1}')
varLineB=$(echo "$varLine" | awk '{print $2}')
varLineOut="$varLineA$varN$varLineB"
fi
if [ "$varPos" = "3" ]; then varLineOut="$varLine$varN"; fi
echo "$varLineOut" >> $varTempFile
echo -ne "Created $varLineOut... "\\r
let varCountCreated=varCountCreated+1
done
elif [ "$varSkip" = "n" ] && [ "$varFormat" = "nnn" ]; then
for varN in {000..999}; do
if [ "$varPos" = "1" ]; then varLineOut="$varN$varLine"; fi
if [ "$varPos" = "2" ]; then
varLineA=$(echo "$varLine" | awk '{print $1}')
varLineB=$(echo "$varLine" | awk '{print $2}')
varLineOut="$varLineA$varN$varLineB"
fi
if [ "$varPos" = "3" ]; then varLineOut="$varLine$varN"; fi
echo "$varLineOut" >> $varTempFile
echo -ne "Created $varLineOut... "\\r
let varCountCreated=varCountCreated+1
done
elif [ "$varSkip" = "n" ] && [ "$varFormat" = "nnnn" ]; then
for varN in {0000..9999}; do
if [ "$varPos" = "1" ]; then varLineOut="$varN$varLine"; fi
if [ "$varPos" = "2" ]; then
varLineA=$(echo "$varLine" | awk '{print $1}')
varLineB=$(echo "$varLine" | awk '{print $2}')
varLineOut="$varLineA$varN$varLineB"
fi
if [ "$varPos" = "3" ]; then varLineOut="$varLine$varN"; fi
echo "$varLineOut" >> $varTempFile
echo -ne "Created $varLineOut... "\\r
let varCountCreated=varCountCreated+1
done
fi
fi
let varCountLine=varCountLine+1
done < $varInFile
let varLinesUsed=varCountLine-varCountSkipped
echo -ne "Created $varCountCreated from $varLinesUsed Lines ($varCountSkipped Skipped of $varCountLine Lines)."
echo; echo
read -p "Press Enter to display results..."
echo
# Display results
echo "=======================================[ output ]========================================"
echo
if [ -f $varTempFile ]; then
cat $varTempFile | sort | uniq > $varOutFile
varOutCount=$(wc -l < $varOutFile)
if [ "$varOutCount" -gt "10" ]; then
echo "Top 10 Lines of $varOutFile:"
echo
head $varOutFile
echo "... cont'd..."
else
cat $varOutFile
fi
if [ -f $varTempFile ]; then rm $varTempFile; fi
else
echo "No results..."
fi
echo
echo "========================================[ fin. ]========================================="
echo
| true
|
9106e18cdee9c826f3b0ce0c29d88e30ee74e479
|
Shell
|
skatterbug/code
|
/fade.sh
|
UTF-8
| 225
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/bash
BRANCH="r-nfldev"
git checkout $BRANCH
git pull
team=$1
file=src/scss/modules/_variables.scss
sed "s/\(.*$team.*eliminated: \)\(false\|true\)\(.*\)/\1true\3/" $file > $file.temp
mv -f $file.temp $file
git diff
| true
|
1e73a1bd0ae59deb88e061834de479a5a8f4af17
|
Shell
|
SUSE/scf
|
/make/stratos/metrics/stop
|
UTF-8
| 466
| 2.96875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -o errexit -o nounset
: "${GIT_ROOT:=$(git rev-parse --show-toplevel)}"
source "${GIT_ROOT}/make/include/defaults"
source "${GIT_ROOT}/make/include/has_namespace"
source "${GIT_ROOT}/make/include/has_release"
if has_release "${STRATOS_METRICS_HELM_RELEASE}"; then
helm delete --purge "${STRATOS_METRICS_HELM_RELEASE}"
fi
if has_namespace "${STRATOS_METRICS_NAMESPACE}"; then
kubectl delete namespace "${STRATOS_METRICS_NAMESPACE}"
fi
| true
|
2773cea34597371ac8ecf3bcc4af61b3a300a8aa
|
Shell
|
biodranik/landing-hugo
|
/tools/build.sh
|
UTF-8
| 676
| 3.71875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Generates production version of static html site.
#
# Created by Alexander Borsuk <me@alex.bio> from Minsk, Belarus.
#
# Useful debug options:
# -e aborts if any command has failed.
# -u aborts on using unset variable.
# -x prints all executed commands.
# -o pipefail aborts if on any failed pipe operation.
set -euo pipefail
HUGO_BINARY=hugo
command -v $HUGO_BINARY >/dev/null 2>&1 || { echo >&2 "Please install the latest hugo binary from brew (MacOS) or from here: https://github.com/gohugoio/hugo/releases"; exit 1; }
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
$HUGO_BINARY -s "$SCRIPT_DIR/.." --cleanDestinationDir=true --environment production
| true
|
db7484121931c26fa0c2e084f87f2728e930e2c4
|
Shell
|
SoxSokko/Shell_Script
|
/xBoxScript/ver/update_ver.sh
|
UTF-8
| 756
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
source /data/xBoxScript/func/ver_func.sh
source /data/xBoxScript/func/run_func.sh
set -e
FullPath=$1
file=${FullPath##*/}
filename=${file%.*}
extension=${file##*.}
TargetTempDir="/data/ver/pkg_tmp"
NewFileName="$filename"_"$(date +%Y%m%d%k%M%S)"
if [ $# -lt 2 ]; then
PrintRunErr "Usage: too few paramter"
fi
if [ "$extension" == "zip" ]; then
unzip -o -d $TargetTempDir/$filename $1
else
PrintRunErr "file ext is not zip"
fi
if [ "$(ls -a $TargetTempDir/$filename)" == "" ]; then
PrintRunErr "$TargetTempDir/$filename dir is empty"
else
mv "$TargetTempDir/$filename" $TargetTempDir/$NewFileName
funcMVFolder $TargetTempDir/$NewFileName/ $2
rm -rf $TargetTempDir/$NewFileName/
chmod +x "$2"$3
fi
PrintRunInfo "update $filename ok"
| true
|
887ef8a1123e706b1c38c1f2e08f17f99f5bcb74
|
Shell
|
cha63506/oldshit
|
/client/Sepaq/sync.sh
|
UTF-8
| 4,729
| 3.125
| 3
|
[] |
no_license
|
#!/bin/sh
# vim: set ai et ts=~4 sw=4 ft=sh:
DATE=`/bin/date +%F`
message=/tmp/emailmessage.txt
function mailperl {
perl <<'EOF'
use Mail::Sender;
$data_file="/tmp/emailmessage.txt";
open(DATA, $data_file) || die("Could not open file!");
@raw_data=<DATA>;
close(DATA);
$sender = new Mail::Sender
{smtp => 'mail.sepaq.com', from => 'root@sepaq.com'};
$sender->MailMsg({to => "doyon4\@gmail.com",
$sender->MailMsg({to => "replication\@sepaq.com",
subject => "Syncronisation",
msg => "probleme avec
@raw_data",});
EOF
}
# TODO
# ajouter un sync du data live de ldap et mysql
# utiliser user sogo pour sync mysql
# gestion d'erreur, rapport ou msg d'erreur a replique@sepaq.com
# logging avec logger
#FAIT # prendre /opt/backups/sogo (backups individuels)
#FAIT # sync /drbd/opt/www /opt/www
#FAIT # sync /var/www/sogo-sepaq
# temps
# 8, 12, 18, 24
# list packages a reinstaller
# [FAIT]openldap, [FAIT-marchepas]imapproxy, [FAIT]cyrus, [FAIT]postfix
#exec Live backup
#ssh -i "/opt/replique/ssh/sally-cmd.key" mail.sepaq.com /opt/local/scripts/dump.sh
#sync WWW
logger $0 "start syncing /drbd/opt/www/"
rsync -avz --delete -e "ssh -i /opt/replique/ssh/sally-replique-rsync.key" mail.sepaq.com:/drbd/opt/www/ /opt/www
RC=$?
if [ $RC -ne 0 ]; then
logger $0 "an error occured while syncing /drbd/opt/www/"
echo "an error occured while syncing /drbd/opt/www/" >> $message
else
logger $0 "successful syncing of /drbd/opt/www/"
fi
logger $0 "start syncing /var/www/html/sogo-sepaq/"
rsync -avz --delete -e "ssh -i /opt/replique/ssh/sally-replique-rsync.key" mail.sepaq.com:/var/www/html/sogo-sepaq/ /var/www/html/sogo-sepaq
RC=$?
if [ $RC -ne 0 ]; then
logger $0 "an error occured while syncing /var/www/html/sogo-sepaq/"
echo "an error occured while syncing /var/www/html/sogo-sepaq/" >> $message
else
logger $0 "successful syncing of /var/www/html/sogo-sepaq/"
fi
logger $0 "stopping cyrus"
/etc/init.d/cyrus-imapd stop
#sync mail
logger $0 "start syncing /drbd/var/spool/"
rsync -avz --delete -e "ssh -i /opt/replique/ssh/sally-replique-rsync.key" mail.sepaq.com:/drbd/var/spool/ /data/var/spool
RC=$?
if [ $RC -ne 0 ]; then
logger $0 "an error occured while syncing /drbd/var/spool/"
echo "an error occured while syncing /drbd/var/spool/" >> $message
else
logger $0 "successful syncing of /drbd/var/spool/"
fi
#sync cyrus DB
logger $0 "start syncing /drbd/var/lib/imap/ (CYRUS-DB)"
rsync -avz --delete -e "ssh -i /opt/replique/ssh/sally-replique-rsync.key" mail.sepaq.com:/drbd/var/lib/imap/ /var/lib/imap
RC=$?
if [ $RC -ne 0 ]; then
logger $0 "an error occured while syncing /drbd/var/lib/imap/"
echo "an error occured while syncing /drbd/var/lib/imap/" >> $message
else
logger $0 "successful syncing of /drbd/var/lib/imap/"
fi
#sync ldap-mysql-sogo
logger $0 "start syncing /opt/backups/"
rsync -avz --delete -e "ssh -i /opt/replique/ssh/sally-replique-rsync.key" mail.sepaq.com:/opt/backups/ /opt/backups
RC=$?
if [ $RC -ne 0 ]; then
logger $0 "an error occured while syncing /opt/backups/"
echo "an error occured while syncing /opt/backups/" >> $message
else
logger $0 "successful syncing of /opt/backups/"
fi
bzip2 -d /opt/backups/ldap/${DATE}_ldap.ldif.bz2
/etc/init.d/ldap stop
rm -rf /var/lib/ldap/*
echo "set_cachesize 0 268435456 1" >> /var/lib/ldap/DB_CONFIG
echo "set_lg_regionmax 262144" >> /var/lib/ldap/DB_CONFIG
echo "set_lg_bsize 2097152" >> /var/lib/ldap/DB_CONFIG
# retrait des entrees pour la config master/master
sed -i 's/contextCSN: .*.001#000000//' /opt/backups/ldap/${DATE}_ldap.ldif
sed -i 's/contextCSN: .*.002#000000//' /opt/backups/ldap/${DATE}_ldap.ldif
slapadd -l /opt/backups/ldap/${DATE}_ldap.ldif
#slapadd -l /opt/backups/ldap/ldap-live.ldif
chown ldap:ldap /var/lib/ldap/*
/etc/init.d/ldap start
#restore mysql
bzip2 -d /opt/backups/mysql/${DATE}_sogo.sql.bz2
#mysql -u sogo -plwigtR0zFvscN4xz < /opt/backups/mysql/${DATE}_sogo.sql
mysql -u sogo -plwigtR0zFvscN4xz < /opt/backups/mysql/${DATE}_sogo.sql
#sync sogo
logger $0 "start syncing /var/spool/sogo/"
rsync -avz --delete -e "ssh -i /opt/replique/ssh/sally-replique-rsync.key" mail.sepaq.com:/var/spool/sogo/ /var/spool/sogo
RC=$?
if [ $RC -ne 0 ]; then
logger $0 "an error occured while syncing /var/spool/sogo/"
echo "an error occured while syncing /var/spool/sogo/" >> $message
else
logger $0 "successful syncing of /var/spool/sogo/"
fi
/etc/init.d/cyrus-imapd start
if [ ! -s $message ]; then
logger $0 "No problem while syncing"
else
mailperl;
fi
| true
|
0cb7a4b2d2fc25cb0c50a6d10c50dfbd356a6051
|
Shell
|
dlux/InstallScripts
|
/install_kanboard.sh
|
UTF-8
| 3,942
| 3.515625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# ============================================================================
# Script installs and configure kanboard - SW to manage kanban projects
# Assume: Ubuntu distro. Mysql DB.
# ============================================================================
# Uncomment the following line to debug this script
# set -o xtrace
# ================== Processes Functions =====================================
INSTALL_DIR=$(cd $(dirname "$0") && pwd)
_password="secrete9"
_release="1.2.6"
source $INSTALL_DIR/common_packages
function _PrintHelp {
installTxt="Install and configure kanboard"
scriptName=$(basename "$0")
opts=" --password | -p Use given password when needed.\n"
opts="$opts --release | -r Install given kanboard release. Default to 1.2.3"
PrintHelp "${installTxt}" "${scriptName}" "${opts}"
}
# ================== Processes script options ================================
EnsureRoot
SetLocale /root
while [[ ${1} ]]; do
case "${1}" in
--password|-p)
msg="Missing password."
if [[ -z $2 ]]; then PrintError "${msg}"; else _password="${2}"; fi
shift
;;
--release|-r)
msg="Missing release."
if [[ -z $2 ]]; then PrintError "${msg}"; else _release="${2}"; fi
shift
;;
--help|-h)
_PrintHelp
;;
*)
HandleOptions "$@"
shift
esac
shift
done
# ================== Prerequisites ===========================================
# Install development tools
eval $_PROXY apt-get update
eval $_PROXY apt-get install -y wget curl unzip
eval $_PROXY apt-get install -y build-essential libapache2-mod-proxy-html libxml2-dev
# Apache, Mysql, Php
eval $_PROXY InstallApache
eval $_PROXY InstallMysql "${_password}"
eval $_PROXY InstallPhp
# ================== Installation & Configuration ============================
# Customize Apache Error pages
CustomizeApache
# Create kanboard mysql configuration
mysql -uroot -p"${_password}" <<MYSQL_SCRIPT
CREATE DATABASE kanboard;
CREATE USER 'kanboard'@'localhost' IDENTIFIED BY '$_password';
GRANT ALL PRIVILEGES ON kanboard . * TO 'kanboard'@'localhost';
FLUSH PRIVILEGES;
MYSQL_SCRIPT
# Get kanboard zip file
pushd /var/www/html
release_file="v${_release}.zip"
eval $_PROXY wget "https://github.com/kanboard/kanboard/archive/$release_file"
# Install kanboard
unzip $release_file
chown -R www-data:www-data "kanboard-${_release}"/data
rm $release_file
# Install Auth Github plugin
#pushd kanboard/plugins
#eval $_PROXY wget https://github.com/kanboard/plugin-github-auth/archive/v1.0.3.zip
#unzip v1.0.3.zip
#rm v1.0.3.zip
#popd
# Configure kanboard - users, mysql, dbinfo
pushd "kanboard-${_release}"
# Create php configuration
cp config.default.php config.php
sed -i "s/DEBUG', false/DEBUG', true/g" config.php
sed -i "s/LOG_DRIVER', ''/LOG_DRIVER', 'file'/g" config.php
sed -i "s/MAIL_CONFIGURATION', true/MAIL_CONFIGURATION', false/g" config.php
sed -i "s/sqlite'/mysql'/g" config.php
sed -i "s/DB_USERNAME', 'root/DB_USERNAME', 'kanboard/g" config.php
sed -i "s/DB_PASSWORD.*.'/DB_PASSWORD', '${_password}'/g" config.php
# These must be set once app is registered on git
# Further info https://developer.github.com/v3/guides/basics-of-authentication/
#echo "// Github client id (Copy it from your settings -> Applications -> Developer applications)" >> config.php
#echo "define('GITHUB_CLIENT_ID', 'YOUR_GITHUB_CLIENT_ID');" >> config.php
#echo "// Github client secret key (Copy it from your settings -> Applications -> Developer applications)" >> config.php
#echo "define('GITHUB_CLIENT_SECRET', 'YOUR_GITHUB_CLIENT_SECRET');" >> config.php
# Initialize DB by importing Kanboard MySql schema
mysql -uroot -p"${_password}" kanboard < app/Schema/Sql/mysql.sql
popd
popd
# Cleanup proxy
UnsetProxy $_ORIGINAL_PROXY
echo "Installation Completed Successfully"
echo "Goto http://localhost/kanboard-${_release}. U/P: admin/admin"
| true
|
0bccc1c663a8730b2fd062836c0e5da1a89073cc
|
Shell
|
rlminers/rick
|
/bin/logfile_cleanup.sh
|
UTF-8
| 2,150
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/sh
FIND=/usr/bin/find
PURGE_DAYS=1
HOSTNAME=`hostname | cut -d '.' -f 1`
ASM_NAME=`cat /etc/oratab | grep -v "^#" | grep -v '^$' | grep ASM | cut -d":" -f1`
RAC_NODE=`echo ${ASM_NAME: -1}`
if [ -z "$HOSTNAME" ]; then
echo HOSTNAME not set. Exiting...
exit 1
fi
echo "--------------------------------------------------------------------------------------------------------------"
echo " Purge trace, audit, trm, ..., Rotate alert logs."
echo " Host: $HOSTNAME"
echo " Date: `date`"
echo "--------------------------------------------------------------------------------------------------------------"
echo
purge_dir ()
{
echo "Start Purge Dir Function"
DIR=$1
echo "DIR = $DIR"
FILE_EXTS="log_*.xml *.trc *.trm *.aud"
for FILE_EXT in $FILE_EXTS
do
if [ -d $DIR ]
then
echo "$FIND $DIR -name '$FILE_EXT' -mtime +${PURGE_DAYS} -exec ls -l {} \;"
$FIND $DIR -name "$FILE_EXT" -mtime +${PURGE_DAYS} -exec ls -l {} \;
$FIND $DIR -name "$FILE_EXT" -mtime +${PURGE_DAYS} -exec rm -f {} \;
fi
done
}
echo "###########################################"
echo "###########################################"
GRID_HOME=/u01/app/12.1.0.2/grid
ORACLE_HOME=/u01/app/oracle/product/12.1.0.2/db_1
ORACLE_ADMIN=/u01/app/oracle/admin
DIAG_DEST=/u01/app/oracle/diag
echo "##### ASM ##### "
purge_dir "$DIAG_DEST/asm/+asm/${ASM_NAME}/trace"
echo "##### LISTENER ##### "
purge_dir "$DIAG_DEST/tnslsnr/${HOSTNAME}/listener/alert"
purge_dir "$DIAG_DEST/tnslsnr/${HOSTNAME}/listener_scan1/alert"
purge_dir "$DIAG_DEST/tnslsnr/${HOSTNAME}/listener_scan2/alert"
purge_dir "$DIAG_DEST/tnslsnr/${HOSTNAME}/listener_scan3/alert"
echo "##### DB ##### "
purge_dir "$DIAG_DEST/rdbms/orcl/ORCL/trace"
echo "##### AUDIT ##### "
purge_dir "$ORACLE_HOME/rdbms/audit"
purge_dir "$GRID_HOME/rdbms/audit"
# ####################################################################
# Rotate logs
# ####################################################################
/usr/sbin/logrotate -v -s /home/oracle/dba/etc/logrotate${RAC_NODE}.status /home/oracle/dba/etc/logrotate${RAC_NODE}.conf
exit 0
| true
|
8c08ac456f5ab338ea3c97421e67f727756c206a
|
Shell
|
malston/pks-azure
|
/scripts/deploy-director.sh
|
UTF-8
| 6,078
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
# only exit with zero if all commands of the pipeline exit successfully
set -o pipefail
__DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
if [ -z "$ENVIRONMENT_NAME" ]; then
echo "Must provide environment name ENVIRONMENT_NAME as environment variable"
echo "Set this to the same value of environment_name var in terraform.tfvars"
exit 1
fi
if [[ -z "${OPSMAN_USER}" ]]; then
echo "Enter a username for the opsman administrator account: "
read -r OPSMAN_USER
printf "\nexport OPSMAN_USER=%s" "${OPSMAN_USER}" >> "${__DIR}/../.envrc"
fi
if [[ -z "${OPSMAN_PASSWORD}" ]]; then
echo "Enter a password for the opsman administrator account: "
read -rs OPSMAN_PASSWORD
printf "\nexport OPSMAN_PASSWORD=%s" "${OPSMAN_PASSWORD}" >> "${__DIR}/../.envrc"
fi
if [[ -z "${OPSMAN_DECRYPTION_PASSPHRASE}" ]]; then
echo "Enter a decryption passphrase to unlock the opsman ui: "
read -rs OPSMAN_DECRYPTION_PASSPHRASE
printf "\nexport OPSMAN_DECRYPTION_PASSPHRASE=%s" "${OPSMAN_DECRYPTION_PASSPHRASE}" >> "${__DIR}/../.envrc"
fi
if [[ -z "${OPSMAN_SKIP_SSL_VALIDATION}" ]]; then
echo "Disable ssl validation for Ops Manager [true/false]: "
read -r OPSMAN_SKIP_SSL_VALIDATION
printf "\nexport OPSMAN_SKIP_SSL_VALIDATION=%s" "${OPSMAN_SKIP_SSL_VALIDATION}" >> "${__DIR}/../.envrc"
fi
# shellcheck source=/dev/null
[[ -f "${__DIR}/../.envrc" ]] && \
source "${__DIR}/../.envrc" || \
(echo "${__DIR}/../.envrc not found" && exit 1)
export STATE_FILE=${__DIR}/../pcf/state/"$ENVIRONMENT_NAME"/terraform.tfstate
export director_iaas_configuration_environment_azurecloud="AzureCloud"
director_bosh_root_storage_account="$(terraform output -state="${STATE_FILE}" bosh_root_storage_account)"
export director_bosh_root_storage_account
director_client_id="$(terraform output -state="${STATE_FILE}" client_id)"
export director_client_id
director_client_secret="$(terraform output -state="${STATE_FILE}" client_secret)"
export director_client_secret
director_bosh_deployed_vms_security_group_name="$(terraform output -state="${STATE_FILE}" bosh_deployed_vms_security_group_name)"
export director_bosh_deployed_vms_security_group_name
director_resource_group_name="$(terraform output -state="${STATE_FILE}" pcf_resource_group_name)"
export director_resource_group_name
director_ops_manager_ssh_public_key="$(terraform output -state="${STATE_FILE}" ops_manager_ssh_public_key)"
export director_ops_manager_ssh_public_key
director_ops_manager_ssh_private_key="$(terraform output -state="${STATE_FILE}" ops_manager_ssh_private_key)"
export director_ops_manager_ssh_private_key
director_subscription_id="$(terraform output -state="${STATE_FILE}" subscription_id)"
export director_subscription_id
director_tenant_id="$(terraform output -state="${STATE_FILE}" tenant_id)"
export director_tenant_id
director_network_name="$(terraform output -state="${STATE_FILE}" network_name)"
export director_network_name
director_infrastructure_subnet_name="$(terraform output -state="${STATE_FILE}" infrastructure_subnet_name)"
export director_infrastructure_subnet_name
director_infrastructure_subnet_cidr="$(terraform output -state="${STATE_FILE}" infrastructure_subnet_cidr)"
export director_infrastructure_subnet_cidr
director_infrastructure_subnet_gateway="$(terraform output -state="${STATE_FILE}" infrastructure_subnet_gateway)"
export director_infrastructure_subnet_gateway
director_infrastructure_subnet_range="$(terraform output -state="${STATE_FILE}" infrastructure_subnet_range)"
export director_infrastructure_subnet_range
director_pks_subnet_name="$(terraform output -state="${STATE_FILE}" pks_subnet_name)"
export director_pks_subnet_name
director_pks_subnet_cidr="$(terraform output -state="${STATE_FILE}" pks_subnet_cidr)"
export director_pks_subnet_cidr
director_pks_subnet_gateway="$(terraform output -state="${STATE_FILE}" pks_subnet_gateway)"
export director_pks_subnet_gateway
director_pks_subnet_range="$(terraform output -state="${STATE_FILE}" pks_subnet_range)"
export director_pks_subnet_range
director_services_subnet_name="$(terraform output -state="${STATE_FILE}" services_subnet_name)"
export director_services_subnet_name
director_services_subnet_cidr="$(terraform output -state="${STATE_FILE}" services_subnet_cidr)"
export director_services_subnet_cidr
director_services_subnet_gateway="$(terraform output -state="${STATE_FILE}" services_subnet_gateway)"
export director_services_subnet_gateway
director_services_subnet_range="$(terraform output -state="${STATE_FILE}" services_subnet_range)"
export director_services_subnet_range
director_env_dns_zone_name_servers="$(terraform output -state="${STATE_FILE}" -json env_dns_zone_name_servers | jq -r .[] |tr '\n' ',' | sed -e 's/.,/, /g' -e 's/, $//')"
export director_env_dns_zone_name_servers
export director_pcf_pks_api_sg="pcf-pks-api-sg"
# director_pks_api_app_sec_group="$(terraform output -state="${STATE_FILE}" pks-api-app-sec-group)"
director_pks_api_app_sec_group="pcf-pks-api-app-sec-group"
export director_pks_api_app_sec_group
# director_pks_master_app_sec_group="$(terraform output -state="${STATE_FILE}" pks-master-app-sec-group)"
director_pks_master_app_sec_group="pcf-pks-master-app-sec-group"
export director_pks_master_app_sec_group
director_pks_lb="pcf-pks-lb"
export director_pks_lb
# shellcheck source=/dev/null
[[ -f "${__DIR}/set-om-creds.sh" ]] && \
source "${__DIR}/set-om-creds.sh" || \
(echo "set-om-creds.sh not found" && exit 1)
# Validate template
om interpolate --config "${__DIR}/../templates/director.yml" --vars-env=director
# Configure Ops Manager Authentication
om -t "$OM_TARGET" --skip-ssl-validation \
configure-authentication \
--decryption-passphrase "$OM_DECRYPTION_PASSPHRASE" \
--username "$OM_USERNAME" \
--password "$OM_PASSWORD"
# Configure Ops Manager Director
om -t "$OM_TARGET" --skip-ssl-validation \
configure-director --config "${__DIR}/../templates/director.yml" --vars-env=director
# Deploy Ops Manager Director
om -t "$OM_TARGET" --skip-ssl-validation apply-changes --skip-deploy-products
| true
|
f73687234d7c50bf036127a52060cb7125f9ff9e
|
Shell
|
jander99/dotfiles
|
/.config/yadm/scripts/docker-cleanup.sh
|
UTF-8
| 684
| 3.46875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
echo "Cleaning up Docker Components"
if ! command -v docker > /dev/null 2>&1; then
echo "Docker not installed"
exit 0
fi
echo "Removing Volumes"
docker volume rm $(docker volume ls -qf dangling=true) > /dev/null 2>&1
echo "Removing Networks"
docker network rm $(docker network ls | grep "bridge" | awk '/ / { print $1 }') > /dev/null 2>&1
echo "Removing Images"
docker rmi $(docker images --filter "dangling=true" -q --no-trunc) > /dev/null 2>&1
docker rmi $(docker images | grep "none" | awk '/ / { print $3 }') > /dev/null 2>&1
echo "Removing Containers"
docker rm $(docker ps -qa --no-trunc --filter "status=exited") > /dev/null 2>&1
echo "Cleaning Complete"
| true
|
7a7a4db1f49b1d43e84b35f90e20826df9099df6
|
Shell
|
petronny/aur3-mirror
|
/haskell-base-unicode-symbols-git/PKGBUILD
|
UTF-8
| 1,722
| 3.0625
| 3
|
[] |
no_license
|
# Maintainer: Carlos Ruiz-Henestrosa <ruizh.cj+aur@gmail.com>
_hkgname=base-unicode-symbols
pkgname=haskell-${_hkgname}-git
pkgver=20120706
pkgrel=1
pkgdesc="Unicode alternatives for common functions and operators"
arch=('i686' 'x86_64')
url="http://hackage.haskell.org/package/$_hkgname"
license=('custom:BSD3')
depends=('ghc')
provides=("haskell-${_hkname}")
conflicts=("haskell-${_hkname}")
install=$pkgname.install
_gitroot=git://github.com/roelvandijk/base-unicode-symbols.git
_gitname=$_hkgname
build() {
cd "$srcdir"
msg "Connecting to GIT server...."
if [[ -d "$_gitname" ]]; then
cd "$_gitname" && git pull origin
msg "The local files are updated."
else
git clone "$_gitroot" "$_gitname"
fi
msg "GIT checkout done or server timeout"
msg "Starting build..."
rm -rf "$srcdir/$_gitname-build"
git clone "$srcdir/$_gitname" "$srcdir/$_gitname-build"
cd "$srcdir/$_gitname-build"
#
# BUILD HERE
#
runhaskell Setup configure -O -p --enable-split-objs --enable-shared \
--prefix=/usr --docdir="/usr/share/doc/$pkgname" \
--libsubdir=\$compiler/site-local/\$pkgid
runhaskell Setup build
runhaskell Setup haddock
runhaskell Setup register --gen-script
runhaskell Setup unregister --gen-script
sed -i -r -e "s|ghc-pkg.*unregister[^ ]* |&'--force' |" unregister.sh
}
package() {
cd "$srcdir/$_gitname-build"
install -D -m744 register.sh "$pkgdir/usr/share/haskell/$pkgname/register.sh"
install -m744 unregister.sh "$pkgdir/usr/share/haskell/$pkgname/unregister.sh"
install -d -m755 "$pkgdir/usr/share/doc/ghc/html/libraries"
ln -s "/usr/share/doc/$pkgname/html" "$pkgdir/usr/share/doc/ghc/html/libraries/$_hkgname"
runhaskell Setup copy --destdir="$pkgdir"
}
# vim:set ts=2 sw=2 et:
| true
|
f77ac8d528dd75492e587e4b3e918b571707c829
|
Shell
|
fanboimsft/lfs-script
|
/misc/config_files/bash_profile
|
UTF-8
| 587
| 3.046875
| 3
|
[] |
no_license
|
# Begin ~/.bash_profile
# Written for Beyond Linux From Scratch
# by James Robertson <jameswrobertson@earthlink.net>
# updated by Bruce Dubbs <bdubbs@linuxfromscratch.org>
# Personal environment variables and startup programs.
# Personal aliases and functions should go in ~/.bashrc. System wide
# environment variables and startup programs are in /etc/profile.
# System wide aliases and functions are in /etc/bashrc.
if [ -f "$HOME/.bashrc" ] ; then
source $HOME/.bashrc
fi
if [ -d "$HOME/bin" ] ; then
pathprepend $HOME/bin
fi
# Having . in the PATH is dangerous
#if [ $EUID -gt 99 ]; then
# pathappend .
#fi
# End ~/.bash_profile
| true
|
1eadc11d2371b24a25a661ff33831caf405c6250
|
Shell
|
balajipothula/k8s
|
/setup-k8s-kubectl-redhat.sh
|
UTF-8
| 512
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
# Author : BALAJI POTHULA <balaji.pothula@techie.com>,
# Date : 20 June 2019,
# Description : Kubernetes kube setup on RHEL7.
# Please run this script as non root user (with out sudo)
# creating .kube directory for current user (non root user)
mkdir -p $HOME/.kube
# copying kubernetes admin.conf to
# current user's .kube directory with name config.
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
# changing ownership of config file to current user.
sudo chown $(id -u):$(id -g) $HOME/.kube/config
| true
|
4ff6e5f1b4c9f757329fc7beba083b1ba56fb97d
|
Shell
|
hedlx/bbs
|
/travis/deploy.sh
|
UTF-8
| 2,237
| 3.640625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# deploy.sh - push things to the production server.
# This file can be executed by travis or locally.
set -e
die() { echo "$*"; exit 1; }
ssh_opts="-o Port=359 -o User=bbs-backend -o StrictHostKeyChecking=no -o IdentityFile=travis/ssh-key"
if [ ! -f travis/ssh-key ];then
openssl aes-256-cbc -K $encrypted_db197bbd43df_key -iv $encrypted_db197bbd43df_iv -in travis/ssh-key.enc -out travis/ssh-key -d
chmod 600 travis/ssh-key
fi
eval "$(ssh-agent -s)"
trap 'eval "$(ssh-agent -ks)"' EXIT
case "$1" in
rust)
bin=(backend/target/*/backend)
bin="${bin[0]}"
[ -f "$bin" ] || die "No $bin"
rm -rf tmp
mkdir tmp
cp -lr -t tmp \
$bin ./backend/migrations ./backend/im
tar cz -C tmp . | ssh $ssh_opts hedlx.org '
set -e
rm -rf tmp/rust
mkdir -p tmp/rust
tar xzf - -C tmp/rust
sudo systemctl stop bbs-backend
mv tmp/rust/backend ./backend
fail=
~/.cargo/bin/diesel migration run \
--database-url \
postgres://bbs-backend@%2Fvar%2Frun%2Fpostgresql/bbs-staging \
--migration-dir tmp/rust/migrations || fail=1
rm -rf im
mv tmp/rust/im .
sudo systemctl start bbs-backend
sleep 1
if systemctl is-active --quiet bbs-backend
then echo Service is running
else echo Service is not running; exit 1
fi
if [ "$fail" ]
then echo "Migration failed"; exit 1
fi
'
rm -rf tmp
;;
elm)
[ -f "front-elm/public/index.html" ] || die "No front-elm/public/index.html"
# TODO move to ~/tmp/elm
ssh -C $ssh_opts hedlx.org 'rm -rf front-elm.tmp'
scp -C $ssh_opts -r front-elm/public hedlx.org:front-elm.tmp
ssh -C $ssh_opts hedlx.org 'cd front-elm.tmp && rsync -av index.html main.js style.css manifest.json icons /srv/www/bbs/elm/ && echo moved'
;;
clojure)
[ -f "./front/resources/public/index.html" ] || die "No front/resources/public/index.html"
# TODO move to ~/tmp/clojure
ssh -C $ssh_opts hedlx.org 'rm -rf front-clj.tmp'
mv front/resources/public/index.prod.html front/resources/public/index.html
scp -C $ssh_opts -r front/resources/public hedlx.org:front-clj.tmp
# TODO: atomic swap
ssh -C $ssh_opts hedlx.org 'cd front-clj.tmp && rm -rf /srv/www/bbs/clj/* && mv * /srv/www/bbs/clj/ && echo moved'
;;
*)
echo Invalid parameters
exit 1
esac
| true
|
aa268f44a0ec7fe341286a26d7b8bb9a8d5ea99c
|
Shell
|
aland-zhang/scriptisms
|
/misc/markdown-rsync
|
UTF-8
| 6,500
| 4.21875
| 4
|
[] |
no_license
|
#!/bin/bash
# Command-line
[ $# -lt 1 -o "${1##*-}" == 'help' ] && cat << EOF && exit 1
USAGE: ${0##*/} [<option> ...] <source-dir> [<destination-dir>]
SYNOPSIS:
Synchronize the given source directory to the given destination directory,
replacing all Markdown (*.md) files with their HTML (*.html) equivalent.
This script is "thread-safe", meaning that it can be executed concurrently
with the same arguments in order to benefit from multiple CPUs/cores (for
very large directory structures).
OPTIONS:
-c, --copy
Copy the *.md files to the destination directory
(along the generated *.html files)
-i, --inplace
Synchronize files in the same "source" directory
-r, --recursive
Recurse into subdirectories
-t, --template <html-file>
Use the given file as HTML template, where the %{markdown} tag
shall be substituted with the actual markdown-to-HTML content
-m, --chmod <chmod-stanza>
Destination files permissions
-M, --chmod-dir <chmod-stanza>
Destination directories permissions
-o, --chown <chown-stanza>
Destination files owner
-O, --chown-dir <chown-stanza>
Destination directories owner
--force
(Re-)generate the *.html file even if it already exists
(do NOT use this option with multiple "threads")
AUTHOR:
Cedric Dufour - http://cedric.dufour.name
EOF
# Arguments
COPY=
INPLACE=
NOSUB='yes'
TEMPLATE=
CHMOD=
CHMOD_DIR=
CHOWN=
CHOWN_DIR=
FORCE=
SRC_DIR=
DST_DIR=
while [ -n "${1}" ]; do
case "${1}" in
'-c'|'--copy')
COPY='yes'
;;
'-i'|'--inplace')
INPLACE='yes'
;;
'-r'|'--recursive')
NOSUB=
;;
'-t'|'--template')
[ -z "${2}" ] && echo "ERROR: Missing option argument (${1})" >&2 && exit 1
shift; TEMPLATE="${1}"
;;
'-m'|'--chmod')
[ -z "${2}" ] && echo "ERROR: Missing option argument (${1})" >&2 && exit 1
shift; CHMOD="${1}"
;;
'-M'|'--chmod-dir')
[ -z "${2}" ] && echo "ERROR: Missing option argument (${1})" >&2 && exit 1
shift; CHMOD_DIR="${1}"
;;
'-o'|'--chown')
[ -z "${2}" ] && echo "ERROR: Missing option argument (${1})" >&2 && exit 1
shift; CHOWN="${1}"
;;
'-O'|'--chown-dir')
[ -z "${2}" ] && echo "ERROR: Missing option argument (${1})" >&2 && exit 1
shift; CHOWN_DIR="${1}"
;;
'--force')
FORCE='yes'
;;
*)
if [ -z "${SRC_DIR}" ]; then SRC_DIR="${1}"
elif [ -z "${DST_DIR}" ]; then DST_DIR="${1}"
else echo "ERROR: Invalid (extra) argument (${1})" >&2 && exit 1
fi
;;
esac
shift
done
# Check dependencies (and format support)
[ -z "$(which markdown)" ] && echo "ERROR[$$]: 'markdown' cannot be found" >&2 && exit 1
# Check directories and files
SRC_DIR=${SRC_DIR%%/}
DST_DIR=${DST_DIR%%/}
[ -n "${INPLACE}" -a -z "${DST_DIR}" ] && DST_DIR="${SRC_DIR}"
[ -n "${TEMPLATE}" -a ! -r "${TEMPLATE}" ] && echo "ERROR[$$]: Invalid/unreadable template file (${TEMPLATE})" >&2 && exit 1
[ ! -d "${SRC_DIR}" ] && echo "ERROR[$$]: Invalid/missing source directory (${SRC_DIR})" >&2 && exit 1
[ ! -r "${SRC_DIR}" ] && echo "ERROR[$$]: Unreadable source directory (${SRC_DIR})" >&2 && exit 1
[ ! -d "${DST_DIR}" ] && echo "ERROR[$$]: Invalid/missing destination directory (${DST_DIR})" >&2 && exit 1
[ ! -w "${DST_DIR}" ] && echo "ERROR[$$]: Unwritable destination directory (${DST_DIR})" >&2 && exit 1
# Lock function
DST_LOCK="${DST_DIR}/.${0##*/}.lock"
function atomic_begin {
n=100; while true; do # loop for ~5 seconds (100 x 0.05 seconds in average)
n=$(( ${n}-1 ))
[ ${n} -le 0 ] && echo "ERROR[$$]: Failed to acquire lock (${DST_LOCK})" >&2 && exit 1
[ $(( ${n} % 10 )) -eq 0 ] && echo "WARNING[$$]: Waiting for lock (${DST_LOCK})" >&2
if ( set -o noclobber; echo -n > "${DST_LOCK}" ) 2>/dev/null; then
break
fi
sleep 0.0$(( ${RANDOM} % 10 ))
done
}
function atomic_end {
rm -f "${DST_LOCK}"
}
# Trap signals
trap 'echo "INTERRUPT[$$]: Cleaning and aborting" >&2; rm -f "${DST_LOCK}" "${p_dst}" "${p_dst_copy}"; exit 2' INT TERM
# Loop through files
echo "INFO[$$]: Looking for files to synchronize..."
IFS=$'\n'; for p_src in $(eval "find '${SRC_DIR}' ${NOSUB:+-maxdepth 1} \( -type f -o -type l \) -not -path '*/.git/*' ${INPLACE:+-name '*.md'}" | sort); do
# Compute source parameters
d_src="$(dirname ${p_src})"
[ -e "${d_src}/.nomarkdown" ] && continue
e_src="${p_src##*.}"
# Compute destination parameters
p_dst="${p_src}"
p_dst="${DST_DIR}/${p_dst#${SRC_DIR}/}"
d_dst="$(dirname ${p_dst})"
if [ "${p_dst##*.}" == 'md' ]; then
[ -n "${COPY}" ] && p_dst_copy="${p_dst}" || p_dst_copy=
p_dst="${p_dst%.*}.html"
fi
# Check destination file
atomic_begin
if [ -z "${FORCE}" ]; then
if [ -z "${INPLACE}" ]; then
[ -e "${p_dst}" ] && atomic_end && continue
else
[ ! "${p_src}" -nt "${p_dst}" ] && atomic_end && continue
fi
fi
if [ ! -d "${d_dst}" ]; then
mkdir -p "${d_dst}"
[ -n "${CHMOD_DIR}" ] && chmod ${CHMOD_DIR} "${d_dst}"
[ -n "${CHOWN_DIR}" ] && chown ${CHOWN_DIR} "${d_dst}"
fi
[ ! -d "${d_dst}" ] && echo "WARNING[$$]: Failed to create destination directory (${d_dst}); skipping..." >&2 && atomic_end && continue
[ ! -w "${d_dst}" ] && echo "WARNING[$$]: Unable to write to destination directory (${d_dst}): skipping..." >&2 && atomic_end && continue
touch "${p_dst}"
atomic_end
# Permissions
[ -n "${CHMOD}" ] && chmod ${CHMOD} "${p_dst}"
[ -n "${CHOWN}" ] && chown ${CHOWN} "${p_dst}"
if [ -n "${p_dst_copy}" ]; then
touch "${p_dst_copy}"
[ -n "${CHMOD}" ] && chmod ${CHMOD} "${p_dst_copy}"
[ -n "${CHOWN}" ] && chown ${CHOWN} "${p_dst_copy}"
fi
# Synchronize
echo "INFO[$$]: ${p_src} -> ${p_dst}"
# ... markdown
if [ "${p_src##*.}" == 'md' ]; then
if [ -z "${TEMPLATE}" ]; then
cat > "${p_dst}" << EOF
<!DOCTYPE html>
<HTML>
<BODY>
EOF
markdown "${p_src}" \
| sed 's,href="\([^:"]*/[^"]*\|[^/"]*\).md",href="\1.html",gi' \
>> "${p_dst}"
cat >> "${p_dst}" << EOF
</BODY>
</HTML>
EOF
else
markdown "${p_src}" \
| sed -e '/%{markdown}/ {r /dev/stdin
; d}' "${TEMPLATE}" \
| sed 's,href="\([^:"]*/[^"]*\|[^/"]*\).md",href="\1.html",gi' \
> "${p_dst}"
fi
[ -n "${p_dst_copy}" ] && cat "${p_src}" > "${p_dst_copy}"
else
cat "${p_src}" > "${p_dst}"
fi
done
# Done
echo "INFO[$$]: Done"
exit 0
| true
|
78cc612740edfd6764695f636664d1f20795f56b
|
Shell
|
syedjafar01/Syed
|
/ShellScripts/Assignment_1/script2.sh
|
UTF-8
| 218
| 3.796875
| 4
|
[] |
no_license
|
#checking file present or not, if present then delete or else archive the folder
if [ -f $2/$1 ]
then
cd $2
rm $1
echo "file deleted"
else
echo "file not exist"
tar -zcvf $1.tar.gz $2
echo "archived"
fi
| true
|
50aac993d4889f6a449ed411cc22a4dd06deb9bb
|
Shell
|
tmarques/chromebook
|
/spring-thermal.sh
|
UTF-8
| 2,045
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/bash
MAX_FREQ_BAT=1600000
MAX_FREQ_ACP=1700000
TEMP_TARGET=75
NOD_SUBSYS='/sys/devices/system/cpu/cpu0/cpufreq/'
NOD_FREQMIN=$NOD_SUBSYS'/scaling_min_freq'
NOD_FREQMAX=$NOD_SUBSYS'/scaling_max_freq'
NOD_FREQ0=$NOD_SUBSYS'/scaling_cur_freq'
NOD_FREQS=$NOD_SUBSYS'/scaling_available_frequencies';
NOD_PS='/sys/class/power_supply/cros-ec-charger/online'
NOD_TEMP='/sys/class/thermal/thermal_zone0/temp'
FREQS=`cat $NOD_FREQS`
function work {
tput cup 0 0;
# Dynamic limited frequency
FREQT=`cat $NOD_FREQMAX`
FREQO=$FREQT;
FREQ0=`cat $NOD_FREQ0`
FREQ_UP=0;
if [ `cat $NOD_PS` -eq 0 ]; then
MAX_FREQ=$MAX_FREQ_BAT;
else
MAX_FREQ=$MAX_FREQ_ACP;
fi;
TEMP=`cat $NOD_TEMP`
let TEMP=TEMP/1000
if [ $TEMP -gt $TEMP_TARGET ]; then
for FREQ in $FREQS; do
if [ $FREQ -lt $FREQT ]; then
FREQT=$FREQ
break
fi;
done
else
if [ $FREQO -lt $MAX_FREQ ]; then
FREQ_UP=1
fi;
fi
# Don't want to clock up if we're already very
# close to TEMP_TARGET.
if [ $TEMP -gt $(( $TEMP_TARGET-2 )) ]; then
FREQ_UP=0;
fi
if [ $FREQ_UP -eq 1 ]; then
FREQS_TMP=( $FREQS )
let I=${#FREQS_TMP[@]}-1
while [ $I -ge 0 ]; do
if [ ${FREQS_TMP[I]} -gt $FREQT ]; then
FREQT=${FREQS_TMP[I]}
break
fi;
let I=I-1
done
fi
if [ $FREQT -gt $MAX_FREQ ]; then
FREQT=$MAX_FREQ;
fi;
# This is only allowed in really bad spots, because we kind of
# actually want a minimally responsive system.
if [ $FREQT -lt `cat $NOD_FREQMIN` ]; then
echo -n $FREQT >> $NOD_FREQMIN;
fi
if [ $FREQO -ne $FREQT ]; then
echo -n $FREQT >> $NOD_FREQMAX;
if [ $FREQ_UP -eq 1 ]; then
echo ${FREQT} >> /tmp/cpufreq.log
else
echo " ${FREQT}" >> /tmp/cpufreq.log
fi;
fi;
let FREQ0=FREQ0/1000
let FREQT=FREQT/1000
printf "%4d/%4dMHz - %dC\n\n" ${FREQ0} ${FREQT} ${TEMP}
free -m
printf '\n'
ectool powerinfo 2> /dev/null
}
tput clear;
while [ 1 ]; do
work;
sleep 1;
done
| true
|
27d90a5aa2a60061f58cbf3305673c443ffd06a3
|
Shell
|
gfd-dennou-club/Dennou-CCM
|
/exp/APESpinUpSolarDepExp/common/APESpinUpSolarDepExp_job_inc.sh
|
UTF-8
| 9,581
| 3.296875
| 3
|
[
"MIT"
] |
permissive
|
#------------------------------------------------------------------------------------------
# Copyright (c) 2016-2016 Yuta Kawai. All rights reserved.
#-------------------------------------------------------------------------------------------
# * Dennou-CCM launcher script
# A shell script used to perform climate numerical experiments with a coupled model.
#
# The coupled system is composed of atmopheric general circulation model, ocean general
# circulation model, sea ice model and others. This script assumes that coupling 'Jcup'
# is used to couple their models.
#
# The coupled system is integrated temporally in the following steps.
# 1) First, the coupled model is integrated for a short period (ex. 6 months). After that,
# Some ruby scripts generate data files necessary to specify initial and boundary
# condition in next OGCM standalone run.
# 2) Ocean model (often run with sea ice model) alone is integrated for much longer period
# than that of 1) (ex. 10 years). After that, some ruby scripts generate data files necessary
# to specify initial condition of ocean in next coupled run.
# 3) Go back to step 1.
#
#********************************************************************************************
#--------------------------------------------------------------------------------------------
## Definition of some functions ##############################
function create_dir() {
dirPath=$1
if [ ! -e $dirPath ]; then
echo "Create directory '${dirPath}' .."
mkdir $dirPath
# chown ykawai:ykawai $dirPath
# chmod g+w $dirPath
else
echo "Skip mkdir operation because '${dirPath}' already exist."
fi
}
### Main parts ##############################################
# Prepare directories to save output data.
cp ${TOPDIR}/bin/atm_driver ${atm_wdir}
cp ${TOPDIR}/bin/ocn_driver ${ocn_wdir}
cp ${ocn_standalone_pedir}/${ocn_standalone_pename} ${ocn_wdir}/ocn_standalone
cp -r ${ocn_standalone_libdir} ${ocn_wdir}
echo "Create some directories to save data.."
for ((n=1; n<=nCycle; n++)) ; do
create_dir "${atm_wdir}/cycle${n}-couple"
create_dir "${ocn_wdir}/cycle${n}-couple"
create_dir "${ocn_wdir}/cycle${n}-standalone"
done
cd $PBS_O_WORKDIR
#- Perform temporal integration of coupled system -------------------------------
coupledRunRestartTime=$(((StartCycleNum-1)*coupledTimeIntrvPerCycle))
for ((n=StartCycleNum; n<=nCycle; n++)) ; do
######################################################################
# Run coupled model
######################################################################
atmDirPath="${atm_wdir}/cycle${n}-couple"
ocnDirPath="${ocn_wdir}/cycle${n}-couple"
ocnDirPath_standalone="${ocn_wdir}/cycle${n}-standalone"
coupledRunEndTime=$((coupledRunRestartTime + coupledTimeIntrvPerCycle))
echo "-- cycle=${n} (coupled AOGCM run) -- time range =${coupledRunRestartTime} - ${coupledRunEndTime} [day]"
echo "** Create configuration file for AGCM **"
sedArgs=`cat <<EOF
s!#restart_file_io_nml_InputFile#!${atm_wdir}/cycle$((n-1))-couple/rst.nc!g;
s!#restart_file_io_nml_IntValue#!730.0!g;
s!#timeset_nml_RestartTimeValue#!${coupledRunRestartTime}!g;
s!#timeset_nml_InitYear#!2000!g;
s!#timeset_nml_EndYear#!2000!g;
s!#timeset_nml_EndDay#!$((coupledRunEndTime+1))!g;
s!#gtool_historyauto_nml_IntValue#!146.0!g;
s!#rad_DennouAGCM_nml_RstInputFile#!${atm_wdir}/cycle$((n-1))-couple/rst_rad.nc!g;
s!#rad_DennouAGCM_nml_SolarConst#!${SolarConst}!g;
EOF
`
atm_nml=${atmDirPath}/${atm_nml_template##*/}
sed -e "${sedArgs}" ${atm_nml_template} > ${atm_nml}
echo "** Create configuration file for OGCM **"
OcnRestartInFile=""
SIceRestartInFile=""
sedArgs=`cat << EOF
s!#gtool_historyauto_nml_IntValue#!146.0!g;
s!#gtool_historyauto_nml_OriginValue#!${coupledRunRestartTime}!g;
s!#gtool_historyauto_nml_TerminusValue#!${coupledRunEndTime}!g;
s!#OcnRestartFile_nml_InputFileName#!${OcnRestartInFile}!g;
s!#OcnRestartFile_nml_OutputFileName#!RestartOcnData.nc!g;
s!#OcnRestartFile_nml_IntValue#!730.0!g;
s!#SIceRestartFile_nml_InputFileName#!${SIceRestartInFile}!g;
s!#SIceRestartFile_nml_OutputFileName#!RestartSIceData.nc!g;
s!#SIceRestartFile_nml_IntValue#!730.0!g;
s!#TemporalInteg_nml_DelTimeHour#!${coupleODelTimeHour}!g;
s!#TemporalInteg_nml_RestartTimeVal#!${coupledRunRestartTime}!g;
s!#TemporalInteg_nml_InitYear#!2000!g;
s!#TemporalInteg_nml_EndYear#!2000!g; s!#TemporalInteg_nml_EndDay#!$((coupledRunEndTime+1))!g;
s!#BoundaryCondition_nml_ThermBCSurface#!PrescFlux!g;
s!#BoundaryCondition_nml_SaltBCSurface#!PrescFlux!g;
s!#Exp_APECoupleClimate_nml_RunCycle#!${n}!g;
s!#Exp_APECoupleClimate_nml_RunTypeName#!Coupled!g;
s!#Exp_APECoupleClimate_nml_SfcBCDataDir#!${ocn_wdir}/cycle$((n-1))-couple/!g;
s!#Exp_APECoupleClimate_nml_SfcBCMeanInitTime#!${coupledRunRestartTime}.0!g;
s!#Exp_APECoupleClimate_nml_SfcBCMeanEndTime#!${coupledRunRestartTime}.0!g;
EOF
`
sedArgs2=""
if [ $standaloneTimeIntrvPerCyc -gt 0 ] ; then
sedArgs2=`cat << EOF
s!#Exp_APECoupleClimate_nml_RestartDataDir#!${ocn_wdir}/cycle$((n-1))-standalone/!g;
s!#Exp_APECoupleClimate_nml_RestartMeanInitTime#!$((standaloneTimeIntrvPerCycle))!g;
s!#Exp_APECoupleClimate_nml_RestartMeanEndTime#!${standaloneTimeIntrvPerCycle}.0!g;
EOF
`
else
sedArgs2=`cat << EOF
s!#Exp_APECoupleClimate_nml_RestartDataDir#!${ocn_wdir}/cycle$((n-1))-couple/!g;
s!#Exp_APECoupleClimate_nml_RestartMeanInitTime#!${coupledRunRestartTime}.0!g;
s!#Exp_APECoupleClimate_nml_RestartMeanEndTime#!${coupledRunRestartTime}.0!g;
EOF
`
fi
ocn_nml=${ocnDirPath}/${ocn_nml_template##*/}
sed -e "${sedArgs}" ${ocn_nml_template} | sed -e "${sedArgs2}" > ${ocn_nml}
#
if [ $n -eq $StartCycleNum ] && $coupledRunSkipSCyc ; then
echo "skip coupled run .."
else
echo "** Execute Dennou-OGCM ******************************"
cp ${EXPDIR}/DCCM_AtmT21.conf ${atmDirPath}/DCCM.conf
cp ${EXPDIR}/DCCM_AtmT21.conf ${ocnDirPath}/DCCM.conf
${MPIRUN} \
-wdir ${atmDirPath} -env OMP_NUM_THREADS ${atm_THREADS_NUM} \
-n ${atm_PE_NUM} ${atm_pe} -N=${atm_nml} : \
-wdir ${ocnDirPath} -env OMP_NUM_THREADS ${ocn_THREADS_NUM} \
-env LD_LIBRARY_PATH ${ocn_wdir}/lib \
-n ${ocn_PE_NUM} ${ocn_pe} --N=${ocn_nml} \
1> Stdout_couple_${exp_name} 2>Stderr_couple_${exp_name}
if [ $? -ne 0 ]; then
echo "Exit stauts is 0. Fail to run DCPCM. Exit.."; exit
fi
coupledRunEndTimeSec=`echo "$coupledRunEndTime*86400" | bc`
fi
coupledRunRestartTime=${coupledRunEndTime}
#########################################################################
# Run standalone ocean model with sea-ice model
########################################################################
if [ $standaloneTimeIntrvPerCyc -gt 0 ] ; then
echo "-- cycle=${n} (OGCM stadalone run) -- ${standaloneTimeIntrvPerCycle} [day]"
sedArgs=`cat << EOF
s!#gtool_historyauto_nml_IntValue#!1825.0!g;
s!#gtool_historyauto_nml_OriginValue#!0.0!g;
s!#gtool_historyauto_nml_TerminusValue#!${standaloneTimeIntrvPerCycle}!g;
s!#OcnRestartFile_nml_InputFileName#!!g;
s!#OcnRestartFile_nml_OutputFileName#!RestartOcnData.nc!g;
s!#OcnRestartFile_nml_IntValue#!9125.0!g;
s!#SIceRestartFile_nml_InputFileName#!!g;
s!#SIceRestartFile_nml_OutputFileName#!RestartSIceData.nc!g;
s!#SIceRestartFile_nml_IntValue#!9125.0!g;
s!#TemporalInteg_nml_DelTimeHour#!${standaloneODelTimeHour}!g;
s!#TemporalInteg_nml_RestartTimeVal#!0.0!g;
s!#TemporalInteg_nml_InitYear#!2000!g;
s!#TemporalInteg_nml_EndYear#!$((2000 + standaloneTimeIntrvPerCycle/365))!g;
s!#TemporalInteg_nml_EndDay#!10!g;
s!#BoundaryCondition_nml_ThermBCSurface#!PrescFlux_Han1984!g;
s!#BoundaryCondition_nml_SaltBCSurface#!PrescFlux!g;
s!#Exp_APECoupleClimate_nml_RunCycle#!${n}!g;
s!#Exp_APECoupleClimate_nml_RunTypeName#!Standalone!g;
s!#Exp_APECoupleClimate_nml_SfcBCDataDir#!${ocn_wdir}/cycle$((n))-couple/!g;
s!#Exp_APECoupleClimate_nml_SfcBCMeanInitTime#!$((coupledRunEndTime - 438)).0!g;
s!#Exp_APECoupleClimate_nml_SfcBCMeanEndTime#!${coupledRunEndTime}.0!g;
s!#Exp_APECoupleClimate_nml_RestartDataDir#!${ocn_wdir}/cycle$((n))-couple/!g;
s!#Exp_APECoupleClimate_nml_RestartMeanInitTime#!${coupledRunEndTime}.0!g;
s!#Exp_APECoupleClimate_nml_RestartMeanEndTime#!${coupledRunEndTime}.0!g;
EOF
`
ocn_nml=${ocnDirPath_standalone}/${ocn_nml_template##*/}
sed -e "${sedArgs}" ${ocn_nml_template} > ${ocn_nml}
#
${MPIRUN} \
-wdir ${ocnDirPath_standalone} -env OMP_NUM_THREADS ${ocn_standalone_THREADS_NUM} \
-env LD_LIBRARY_PATH ${LD_LIBRARY_PATH} \
-n ${ocn_standalone_PE_NUM} \
${ocn_standalone_pe} --N=${ocn_nml} \
1> Stdout_standalone_${exp_name} 2>Stderr_standalone_${exp_name}
if [ $? -ne 0 ]; then
echo "Exit stauts is 0. Fail to run Dennou-OGCM(stand-alone mode). Exit.."; exit
fi
fi
done
| true
|
bb19df009c9cf0d105c0f28720e74e1cb206990e
|
Shell
|
wellington1993/packages-community
|
/manjaro-browser-settings/PKGBUILD
|
UTF-8
| 1,471
| 2.78125
| 3
|
[] |
no_license
|
# Maintainer: Ramon Buldó <rbuldo@gmail.com>
# Maintainer: Bernhard Landauer <oberon@manjaro.org
# Maintainer: Stefano Capitani <stefano@manjaro.org>
pkgname=manjaro-browser-settings
pkgver=20180220
pkgrel=1
pkgdesc="Manjaro Linux settings browser defaults"
arch=('i686' 'x86_64')
url="https://github.com/manjaro/$pkgname"
license=('GPL')
_gitcommit=f5c4c5d06b5256a92342c73c9069f659e02152cb
_hook=manjaro-browser-settings
conflicts=('manjaro-firefox-settings')
replaces=('manjaro-firefox-settings')
source=("$pkgname-$_gitcommit.tar.gz::$url/archive/$_gitcommit.tar.gz")
md5sums=('d6c0c31fa92511e0f6d4ac40b2080cb3')
pkgver() {
date +%Y%m%d
}
package() {
cd $pkgname-$_gitcommit
mkdir -p $pkgdir/usr/lib/{chrome,chromium,{firefox,firefox-developer-edition,palemoon}/distribution}
cp chrome/* $pkgdir/usr/lib/chrome
cp chrome/* $pkgdir/usr/lib/chromium
cp palemoon/* $pkgdir/usr/lib/palemoon/distribution
install -dm755 $pkgdir/etc/skel/.config/qupzilla/profiles
cp -r qupzilla/* $pkgdir/etc/skel/.config/qupzilla/profiles
[[ "$CARCH" = "i686" ]] && rm -r $pkgdir/usr/lib/firefox{,-developer-edition}
install -Dm644 firefox/distribution.ini $pkgdir/etc/manjaro-firefox.ini
#Hook
install -Dm644 $_hook-pre.hook $pkgdir/usr/share/libalpm/hooks/$_hook-pre.hook
install -Dm644 $_hook-post.hook $pkgdir/usr/share/libalpm/hooks/$_hook-post.hook
install -Dm755 $_hook-pre.script $pkgdir/usr/share/libalpm/scripts/$_hook-pre
install -Dm755 $_hook-post.script $pkgdir/usr/share/libalpm/scripts/$_hook-post
}
| true
|
655b482bf94923fc27a8c099a41d68ca219014c8
|
Shell
|
juliendelplanque/PostgresV3
|
/app
|
UTF-8
| 2,647
| 4.21875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# This script is inspired by st-exec.sh from http://stfx.eu/pharo-server/
# originally written by Sven Van Caekenberghe
function usage() {
cat <<END
Usage: $0 <command>
manage a Smalltalk server.
You *must* provide install.st and start.st files right next to the image
file.
start and stop command takes an optional pid file. By the default, the
pid file will be '${script_home}/pharo.pid'.
Commands:
install run install.sh on the image and then quit.
start run the image with start.st in background
stop stop the server.
deploy deploy to the server using the `deploy.yml` ansible recipe
pid print the process id
END
exit 1
}
# Setup vars
script_home=$(dirname $0)
script_home=$(cd $script_home && pwd)
command=$1
image="$script_home/pharo.image"
pid_file=${2:-"$script_home/pharo.pid"}
echo $pid_file
if [[ "$OSTYPE" == "linux-gnu" ]]; then
vm=pharo-vm-nox
elif [[ "$OSTYPE" == "darwin"* ]]; then
vm=/Applications/Pharo.app/Contents/MacOS/Pharo
fi
# echo Working directory $script_home
function deploy() {
ansible-playbook -i ansible/hosts.ini ansible/deploy.yml
}
function install() {
echo $vm $image install.st
$vm $image install.st
}
function start() {
echo Starting $script in background
if [ -e "$pid_file" ]; then
rm -f $pid_file
fi
echo $pid_file
echo $vm $image start.st
nohup $vm $image start.st 2>&1 >/dev/null
echo $! >$pid_file
}
function stop() {
echo Stopping $pid_file
if [ -e "$pid_file" ]; then
pid=`cat $pid_file`
echo Killing $pid
kill $pid
rm -f $pid_file
else
echo Pid file not found: $pid_file
echo Searching in process list for $script
pids=`ps ax | grep $script | grep -v grep | grep -v $0 | awk '{print $1}'`
if [ -z "$pids" ]; then
echo No pids found!
else
for p in $pids; do
if [ $p != "$pid" ]; then
echo Killing $p
kill $p
fi
done
fi
fi
}
function printpid() {
if [ -e $pid_file ]; then
cat $pid_file
else
echo Pid file not found: $pid_file
echo Searching in process list for $script
pids=`ps ax | grep $script | grep -v grep | grep -v $0 | awk '{print $1}'`
if [ -z "$pids" ]; then
echo No pids found!
else
echo $pids
fi
fi
}
case $command in
install)
install
;;
start)
start
;;
stop)
stop
;;
deploy)
deploy
;;
pid)
printpid
;;
*)
usage
;;
esac
| true
|
a58d810af4eec25eaeab31cea8366c2122795dab
|
Shell
|
williamswalsh/BASh-scripting-bible
|
/31_B_dollar.sh
|
UTF-8
| 138
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
# Dollar dollar allows you to access the pid of the script
while [[ true ]]; do
echo "Process PID: $$";
sleep 1;
done
| true
|
58300711840bce6436804fcf33f9a5644a37d05e
|
Shell
|
gpdolotina/vagrant-gpdolotina-xenial64
|
/provisioning/move_mysql.sh
|
UTF-8
| 1,424
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
## NOTE/WARNING
##
## ONLY EXECUTE THIS SCRIPT FOR INITIAL PROVISIONING
## AND ONLY IF YOUR LOCAL MYSQL FOLDER DO NOT ANY
## DATABASES ALREADY CREATED
##
## References:
# https://www.digitalocean.com/community/tutorials/how-to-move-a-mysql-data-directory-to-a-new-location-on-ubuntu-16-04
if [ -d "/var/mysql" ]; then
echo -e "\n\n---------MOVING MYSQL datadir---------\n\n"
# create new datadir
mkdir -pv /var/mysql/
# stop mysql
sudo systemctl stop mysql && sudo systemctl status mysql
# copy files from the original mysql datadir to the new datadir
rsync -rtavzh /var/lib/mysql/ /var/mysql/
# make sure that the copy is the same as the original
sudo diff -r /var/mysql/ /var/lib/mysql/
# backup the original datadir
sudo mv /var/lib/mysql/ /var/lib/mysql.bak/
# change the datadir target in the conf files
sudo sh -c "sed -i 's/datadir\(.*\)/datadir \= \/var\/mysql/g' /etc/mysql/mysql.conf.d/mysqld.cnf"
sudo sh -c "sed -i 's/datadir\(.*\)/datadir \= \/var\/mysql/g' /etc/mysql/my.cnf"
# modify apparmor settings
sudo systemctl stop apparmor && sudo systemctl status apparmor
sudo sh -c "echo alias /var/lib/mysql \-\> /var/mysql, >> /etc/apparmor.d/tunables/alias"
sudo systemctl start apparmor && sudo systemctl status apparmor
# start mysql server
sudo systemctl start mysql && sudo systemctl status mysql
fi
| true
|
cfa4e04851a80379c6f2c6e09719777e2617a246
|
Shell
|
TheFoundation/duplicity-restore-external
|
/s3restore
|
UTF-8
| 864
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash
# Export some ENV variables so you don't have to type anything
export AWS_ACCESS_KEY_ID="AWSKEYAWSKEY"
export AWS_SECRET_ACCESS_KEY="AWSSECRETKEYAWSSECRETKEY"
export PASSPHRASE="ENCRYPTIONPASSPHRASE"
# The S3 destination followed by bucket name
DEST="s3+http://my-bucket-name/folder"
# Your GPG key
#GPG_KEY=YOUR_GPG_KEY
##COMMENT OUT GPG_KEY AND UNCOMMENT "export PASSPHRASE" to enable backup just with password
#duplicity list-current-files --timeout=2400 --tempdir /tmp/dupltemp --num-retries=500 -t 6M ${DEST}
if [ $# -lt 3 ]; then echo "Usage $0 <date> <file> <restore-to>"; exit; fi
duplicity -v9 --s3-use-new-style \
--s3-european-buckets \
--restore-time $1 \
--file-to-restore $2 \
${DEST} $3
# Reset the ENV variables. Don't need them sitting around
unset AWS_ACCESS_KEY_ID
unset AWS_SECRET_ACCESS_KEY
unset PASSPHRASE
| true
|
9a55168ef4b806bf2aa6c8e473211dd0b67f361f
|
Shell
|
j3N0/arch-config
|
/.zshrc
|
UTF-8
| 375
| 2.640625
| 3
|
[] |
no_license
|
ZSH=/usr/share/oh-my-zsh/
export LANG=zh_CN.UTF-8
alias sstart='sudo sslocal -c ~/.shadowsocks.json -d start'
alias sstop='sudo sslocal -c ~/.shadowsocks.json -d stop'
ZSH_THEME="robbyrussell"
DISABLE_AUTO_UPDATE="true"
plugins=(
git
)
ZSH_CACHE_DIR=$HOME/.cache/oh-my-zsh
if [[ ! -d $ZSH_CACHE_DIR ]]; then
mkdir $ZSH_CACHE_DIR
fi
source $ZSH/oh-my-zsh.sh
| true
|
cd20d4735d1a7dd155edb4e6667620f30119a8e0
|
Shell
|
castle/zsh-test-suite
|
/random_traffic.sh
|
UTF-8
| 448
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/zsh
while true; do
DICE=$((RANDOM %= 100))
if [[ $DICE < 60 ]]; then
zsh ./login_from_new_client.sh;
elif [[ $DICE < 90 ]]; then
zsh ./login_from_new_region.sh;
elif [[ $DICE < 94 ]]; then
zsh ./forgetful_user.sh;
elif [[ $DICE < 96 ]]; then
zsh ./credential_stuffing.sh;
elif [[ $DICE < 98 ]]; then
zsh ./brute_force.sh;
elif [[ $DICE < 101 ]]; then
zsh ./brute_force_botnet.sh;
fi
sleep 60;
done
| true
|
0faafc55f7d205a8f53ef8efaab88cd26ff2554c
|
Shell
|
rmoorewrs/tic-windows-remote-clients
|
/wrs-remote-clients-2.0.2/python-heatclient-1.11.0/tools/tox_install.sh
|
UTF-8
| 831
| 3.5
| 4
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# Client constraint file contains this client version pin that is in conflict
# with installing the client from source. We should remove the version pin in
# the constraints file before applying it for from-source installation.
CONSTRAINTS_FILE=$1
shift 1
set -e
localfile="$VIRTUAL_ENV/log/upper-constraints.txt"
if [[ $CONSTRAINTS_FILE != http* ]]; then
CONSTRAINTS_FILE=file://$CONSTRAINTS_FILE
fi
curl $CONSTRAINTS_FILE --insecure --progress-bar --output $localfile
pip install -c$localfile openstack-requirements
# This is the main purpose of the script: Allow local installation of
# the current repo. It is listed in constraints file and thus any
# install will be constrained and we need to unconstrain it.
edit-constraints $localfile -- $CLIENT_NAME
pip install -c$localfile -U $*
exit $?
| true
|
c6b9ccb295fc76cae2687e69f64b364a7216e07e
|
Shell
|
Patrik-Stas/indyjump
|
/scripts/indyjump-utils.sh
|
UTF-8
| 6,046
| 3.953125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# INDYJUMPSCRIPT
INDYJUMP_INDY="libindy"
INDYJUMP_VCX="libvcx"
INDYJUMP_NULLPAY="libnullpay"
INDYJUMP_PGWALLET="libindystrgpostgres"
errcho(){ >&2 echo ">> ERROR: $@"; }
function magentaEcho() {
local text="$1"
echo -en "\033[1;95m$text\033[0m"
}
function greenEcho() {
local text="$1"
echo -en "\033[1;32m$text\033[0m"
}
function exitWithErrMsg() {
errcho "$1"
exit 1
}
function listAvailableProvisions() {
ls "`getStoragePath`/"* | tr -s " " | awk -F"/" '{print $NF}' | awk -F"-" '{print $1}' | uniq
}
function getLibrariesForProvision() {
local PROVISION_TAG="$1"
validateProvisionName "$PROVISION_TAG"
ls "`getStoragePath`/"* | tr -s " " | awk -F"/" '{print $NF}' | grep "$PROVISION_TAG" | awk -F"-" '{print $2}' | awk -F"." '{print $1}' | tr '\r\n' ' '
}
function isLibraryUnderProvision() {
local LIBRARY="$1"
validateLibName "$LIBRARY" || exit 1
local PROVISION_TAG="$2"
local LIBRARY_FILE_PATH=`getPathForManagedBinary "$LIBRARY" "$PROVISION_TAG"`
if [ ! -f "$LIBRARY_FILE_PATH" ]; then
echo "false"
else
echo "true"
fi;
}
function getProvisionLibraryPathIfExists() {
local LIBRARY="$1"
validateLibName "$LIBRARY" || exit 1
local PROVISION_TAG="$2"
validateProvisionName "$PROVISION_TAG"
local LIBRARY_FILE_PATH=`getPathForManagedBinary "$LIBRARY" "$PROVISION_TAG"`
if [ ! -f "$LIBRARY_FILE_PATH" ]; then
exitWithErrMsg "Library $LIBRARY doesn't exists in provision $PROVISION_TAG"
fi;
echo $LIBRARY_FILE_PATH
}
function validatePathIsCargoProject() {
SRC_BASE_PATH="$1"
if [ ! -f "$SRC_BASE_PATH/Cargo.toml" ]; then
exitWithErrMsg "Rust project file Cargo.toml was not found in directory $SRC_BASE_PATH"
fi;
}
function validateProvisionName() {
local PROVISION_TAG="$1"
if [ -z "$PROVISION_TAG" ]; then
exitWithErrMsg "You have to specify provision tag."
fi
if [[ "$PROVISION_TAG" =~ .*-.* ]]; then
exitWithErrMsg "Provision name '${PROVISION_TAG}' is invalid. Dash character (-) is not allowed in provision name."
fi
}
function validateLibName() {
local LIBNAME="$1"
case "$LIBNAME" in
"$INDYJUMP_INDY")
;;
"$INDYJUMP_VCX")
;;
"$INDYJUMP_NULLPAY")
;;
"$INDYJUMP_PGWALLET")
;;
*)
exitWithErrMsg "Got library name '${LIBNAME}' Valid names for libraries are: '$INDYJUMP_INDY' '$INDYJUMP_VCX' '$INDYJUMP_NULLPAY'"
;;
esac
}
function getActiveProvisionName() {
local LIBNAME="$1"
validateLibName "$LIBNAME" || exit 1
ls -l `getSysLibsPath` | grep -v ^d | grep indyjump | grep "$LIBNAME\.${getLibExtension}"| grep -o -e "->.*" | awk -F"/" '{print $NF}' | awk -F"-" '{print $1}'
}
function getSysLibsPath() {
case "$(uname -s)" in
Darwin)
echo "/usr/local/lib"
return 0
;;
Linux)
echo "/usr/lib"
return 0
;;
*)
exitWithErrMsg 'Unsupported OS.'
;;
esac
}
function getStoragePath() {
local LIBSYSPATH=`getSysLibsPath` || exit 1
echo "$LIBSYSPATH/indyjump"
}
function getPathForManagedBinary() {
local LIBNAME="$1"
validateLibName "$LIBNAME" || exit 1
local TAG="$2"
if [ -z "$TAG" ]; then
errcho "[error] getPathForManagedBinary >>> Function argument TAG was not passed."
exit 1
fi;
local LIB_FILENAME=`getLibraryFilename $LIBNAME` || exit 1
local STORAGE_DIR=`getStoragePath $LIBNAME` || exit 1
echo "$STORAGE_DIR/$TAG-$LIB_FILENAME"
}
function getSymlinkPath() {
local LIBNAME="$1"
validateLibName "$LIBNAME" || exit 1
echo "`getSysLibsPath`/`getLibraryFilename $LIBNAME`"
}
function manageIndyjumpBinary() {
local LIBNAME="$1"
validateLibName "$LIBNAME" || exit 1
local srcPath="$2"
if [ ! -f $srcPath ]; then
exitWithErrMsg "[error] manageIndyjumpBinary >>> srcPath was set to '$srcPath'. No such file exists."
fi
destinationPath="$3"
if [ -f $destinationPath ]; then
echo "[warn] manageIndyjumpBinary >>> destinationPath was set to '$destinationPath'. This file will be rewritten!"
fi
}
function validateBuildMode() {
local BUILD_MODE="$1"
case "$BUILD_MODE" in
debug)
return 0
;;
release)
return 0
;;
*)
exitWithErrMsg "Invalid build mode '$BUILD_MODE'."
;;
esac
}
function getLibExtension() {
case "$(uname -s)" in
Darwin)
echo 'dylib'
return 0
;;
Linux)
echo 'so'
return 0
;;
*)
exitWithErrMsg 'Unsupported OS.'
;;
esac
}
function getBasePath(){
local LIBNAME="$1"
validateLibName "$LIBNAME" || exit 1
if [ -z "$INDY_SDK_SRC" ]; then
exitWithErrMsg "getBasePath() >>> Exiting. Env variable 'INDY_SDK_SRC' is not set"
fi
case "$LIBNAME" in
"$INDYJUMP_INDY")
echo "$INDY_SDK_SRC/libindy"
;;
"$INDYJUMP_VCX")
echo "$INDY_SDK_SRC/vcx/libvcx"
;;
"$INDYJUMP_NULLPAY")
echo "$INDY_SDK_SRC/libnullpay"
;;
"$INDYJUMP_PGWALLET")
echo "$INDY_SDK_SRC/experimental/plugins/postgres_storage"
;;
esac
}
function getLibraryFilename() {
local LIBNAME="$1"
validateLibName "$LIBNAME" || exit 1
case "$LIBNAME" in
"$INDYJUMP_INDY")
echo "libindy.`getLibExtension`"
;;
"$INDYJUMP_VCX")
echo "libvcx.`getLibExtension`"
;;
"$INDYJUMP_NULLPAY")
echo "libnullpay.`getLibExtension`"
;;
"$INDYJUMP_PGWALLET")
echo "libindystrgpostgres.`getLibExtension`"
;;
esac
}
# Gets full path of a library libindy|libvcx|libnullpay|pgwallet based on repo path and whether debug/release binary
# is being seeked for
function getFullPath() {
local LIBNAME="$1"
validateLibName "$LIBNAME" || exit 1
local BUILD_MODE=${2:-debug}
validateBuildMode "$BUILD_MODE" || exit 1
local basePath=`getBasePath "$LIBNAME"` || exit 1
local libraryFilename=`getLibraryFilename "$LIBNAME"` || exit 1
local buildPath="target/$BUILD_MODE"
echo "$basePath/$buildPath/$libraryFilename"
}
| true
|
9c2a9f1d2ec812b30b30ca67210beadb758434a8
|
Shell
|
UesleiJf/lpi-essentials
|
/MaterialEstudo/Scripts/ScriptApagar
|
UTF-8
| 508
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
echo "O script já começou o trabalho sujo!"
echo "vamos pegar o arquivo listado abaixo..."
ls | grep -i "para_" || echo "Não existe esse arquivo listado..."
echo "Vamos formatá-lo e jogar o resultado em um novo arquivo chamado"
echo "NOVO_ARQUIVO"
touch NOVO_ARQUIVO
fmt -w 70 para_apagar > NOVO_ARQUIVO
echo "... e agora vamos apagá-lo!"
rm para_apagar && echo "Apagou tranquilo! Confira o novo arquivo" || echo "Deu erro, confira seu script"
echo "Acabou com o status" $?
exit 0
| true
|
d500f2fef01b15dd52e50642e881e604490ff1da
|
Shell
|
cchantep/ReactiveMongo
|
/.ci_scripts/integrationTests.sh
|
UTF-8
| 2,688
| 3.265625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#! /bin/bash
ENV_FILE="$1"
source "$ENV_FILE"
export LD_LIBRARY_PATH
MONGOD_PID=`ps -o pid,comm -u $USER | grep 'mongod$' | awk '{ printf("%s\n", $1); }'`
if [ "x$MONGOD_PID" = "x" ]; then
echo "[ERROR] MongoDB process not found" > /dev/stderr
tail -n 100 /tmp/mongod.log
exit 1
fi
# Check MongoDB connection
SCRIPT_DIR=`dirname $0 | sed -e "s|^\./|$PWD/|"`
MONGOSHELL_OPTS=""
# prepare SSL options
if [ "$MONGO_PROFILE" = "invalid-ssl" ]; then
MONGOSHELL_OPTS="$MONGOSHELL_OPTS --ssl --sslAllowInvalidCertificates"
fi
if [ "$MONGO_PROFILE" = "mutual-ssl" -o "$MONGO_PROFILE" = "x509" ]; then
MONGOSHELL_OPTS="$MONGOSHELL_OPTS --ssl --sslAllowInvalidCertificates"
MONGOSHELL_OPTS="$MONGOSHELL_OPTS --sslCAFile $SCRIPT_DIR/server-cert.pem"
MONGOSHELL_OPTS="$MONGOSHELL_OPTS --sslPEMKeyFile $SCRIPT_DIR/client-cert.pem"
MONGOSHELL_OPTS="$MONGOSHELL_OPTS --sslPEMKeyPassword $SSL_PASS"
fi
if [ "$MONGO_PROFILE" = "x509" ]; then
CLIENT_CERT_SUBJECT=`openssl x509 -in "$SCRIPT_DIR/client-cert.pem" -inform PEM -subject -nameopt RFC2253 | grep subject | awk '{sub("subject= ",""); print}'`
MONGOSHELL_OPTS="$MONGOSHELL_OPTS -u $CLIENT_CERT_SUBJECT"
MONGOSHELL_OPTS="$MONGOSHELL_OPTS --authenticationMechanism=MONGODB-X509"
MONGOSHELL_OPTS="$MONGOSHELL_OPTS --authenticationDatabase=\$external"
fi
# prepare common options
MONGOSHELL_OPTS="$MONGOSHELL_OPTS --eval"
MONGODB_NAME=`mongo "$PRIMARY_HOST/FOO" $MONGOSHELL_OPTS 'db.getName()' 2>/dev/null | tail -n 1`
if [ ! "x$MONGODB_NAME" = "xFOO" ]; then
echo -e -n "\n[ERROR] Fails to connect using the MongoShell: $PRIMARY_HOST ($MONGO_PROFILE); Retrying with $MONGOSHELL_OPTS ...\n"
mongo "$PRIMARY_HOST/FOO" $MONGOSHELL_OPTS 'db.getName()'
tail -n 100 /tmp/mongod.log
exit 2
fi
# Check MongoDB options
echo -n "- server version: "
mongo "$PRIMARY_HOST/FOO" $MONGOSHELL_OPTS 'var s=db.serverStatus();s.version' 2>/dev/null | tail -n 1
echo -n "- security: "
mongo "$PRIMARY_HOST/FOO" $MONGOSHELL_OPTS 'var s=db.serverStatus();var x=s["security"];(!x)?"_DISABLED_":x["SSLServerSubjectName"];' 2>/dev/null | tail -n 1
if [ ! "v$MONGO_VER" = "v2_6" ]; then
echo -n "- storage engine: "
mongo "$PRIMARY_HOST/FOO" $MONGOSHELL_OPTS 'var s=db.serverStatus();JSON.stringify(s["storageEngine"]);' 2>/dev/null | grep '"name"' | cut -d '"' -f 4
fi
if [ "$MONGO_PROFILE" = "rs" ]; then
mongo "$PRIMARY_HOST" $MONGOSHELL_OPTS "rs.initiate({\"_id\":\"testrs0\",\"version\":1,\"members\":[{\"_id\":0,\"host\":\"$PRIMARY_HOST\"}]});" || (
echo "[ERROR] Fails to setup the ReplicaSet" > /dev/stderr
false
)
fi
source "$SCRIPT_DIR/runIntegration.sh"
| true
|
406c8d252c64bc46be27ef014438bc51e53e8b4f
|
Shell
|
jgiovatto/letce2-emane-example
|
/distributed/exp-03/emane-spectrum-anayzer.sh
|
UTF-8
| 333
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash -
# usage ./emane-spectrum-analyzer.sh container id
if [ "$#" != '1' ]
then
echo 'usage: emane-spectrum-analyzer.sh <hostname>'
exit 1
fi
emane-spectrum-analyzer \
$1:8883 \
--hz-min 2390000000 \
--hz-max 2410000000 \
--with-waveforms \
--subid-name 1,radio-1 \
--subid-name 2,radio-2
| true
|
7b0bde4e1e598e90d17d3a68360fd70629da0d6d
|
Shell
|
archlinux-lucjan/archlinux-packages
|
/mkinitcpio-git/PKGBUILD
|
UTF-8
| 1,479
| 2.546875
| 3
|
[] |
no_license
|
# Maintainer: Piotr Gorski <lucjan.lucjanov@gmail.com>
# Contributor: Christian Hesse <mail@eworm.de>
# Contributor: Dave Reisner <dreisner@archlinux.org> ([core] package)
# Contributor: Thomas Bächler <thomas@archlinux.org> ([core] package)
pkgname=mkinitcpio-git
pkgver=32.r25.g1862ed5
pkgrel=1
pkgdesc='Modular initramfs image creation utility - git checkout'
arch=('any')
url='https://gitlab.archlinux.org/archlinux/mkinitcpio/mkinitcpio'
license=('GPL')
depends=('awk' 'mkinitcpio-busybox>=1.19.4-2' 'kmod' 'util-linux>=2.23' 'libarchive' 'coreutils'
'bash' 'diffutils' 'findutils' 'grep' 'filesystem>=2011.10-1' 'zstd' 'systemd')
makedepends=('git' 'asciidoc')
optdepends=('gzip: Use gzip compression for the initramfs image'
'xz: Use lzma or xz compression for the initramfs image'
'bzip2: Use bzip2 compression for the initramfs image'
'lzop: Use lzo compression for the initramfs image'
'lz4: Use lz4 compression for the initramfs image'
'mkinitcpio-nfs-utils: Support for root filesystem on NFS')
provides=('mkinitcpio' 'initramfs')
conflicts=('mkinitcpio')
backup=('etc/mkinitcpio.conf')
source=('git+https://gitlab.archlinux.org/archlinux/mkinitcpio/mkinitcpio')
#source=('git+https://github.com/archlinux/mkinitcpio.git')
sha256sums=('SKIP')
pkgver() {
cd mkinitcpio
git describe --long --tags | sed 's/^v//;s/\([^-]*-g\)/r\1/;s/-/./g'
}
prepare() {
cd mkinitcpio
}
check() {
make -C mkinitcpio check
}
package() {
make -C mkinitcpio DESTDIR="$pkgdir" install
}
| true
|
06c3de36ce8c82ee2e561373d191caed60692ee8
|
Shell
|
infeeeee/kimai2-cmd-argos
|
/kimai.1r.1m.sh
|
UTF-8
| 1,401
| 2.953125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# <bitbar.title>Kimai2-cmd</bitbar.title>
# <bitbar.version>v0.1</bitbar.version>
# <bitbar.author>infeeeee</bitbar.author>
# <bitbar.author.github>infeeeee</bitbar.author.github>
# <bitbar.desc>Client for kimai2 the open source self hosted timetracker</bitbar.desc>
# <bitbar.image>http://www.hosted-somewhere/pluginimage</bitbar.image>
# <bitbar.dependencies>kimai2-cmd</bitbar.dependencies>
# <bitbar.abouturl>https://github.com/infeeeee/kimai2-cmd-argos/</bitbar.abouturl>
# --------------------------------- Settings --------------------------------- #
# path to kimai:
kimaipath=kimai
# Images: Uncomment what you want to use
# Full color: (on mac change imagetype to 'image')
#kimailogo='iVBORw0KGgoAAAANSUhEUgAAACQAAAAkCAYAAAGWB6gOAAAACXBIWXMAABYlAAAWJQFJUiTwAAANG0lEQVRYCY1YCXBW1RX+3vqv2QkhIWzighCKiKBoQJC6VOkoiu3UcUErttriUmuVcd/qTLXqKIoraq0zrRsuKCJLQZBgcMmwxRUhISEBspB//9/W79wQRHDUx7z/vbx377nf/c453zkPYN8RBEFd370mN+cvnhpEy30cYR2LuTUPdOr5fL4+Ugg8e8xyNBkbMeO1U0rVyAtWTAnsgVloPRHMG/l2r5VsNtt5/nuTgp5ET9BnF1ykgufL8kDjTTB73a/gWXl07MhBP+fNKdDLHNw9+mkUFFnQblz2x2B32RcIRQxUp8b2mlnfvDq4aOnpARe4Ti0vj8XWue/WYtqQM7F+12oMtIbi3tr50ORwXbfi7IUnt/XrH4dVkgdiDuwgjEyPhyMxHqdVnAfT9/35lmnCN/IwbA2PDloBFw6u+WI6Nu2pR8O2DVDL0Vow893JiBeHoId97kPDhPip+LhpHeZPfeXaXpT7fv3Av5PQNtH6WQe+MOUPPny2rmXFZectmYRRpaOxYXfDIs/3ZTcxwzDSmud5o+Ysu2hTj9UGMx5A4zQ/FyCbCDDvxJdRHq/QdB6bYHuwYgGOrRyPWIkJu9CAHdVxW901cBy3Wd/U1oAOpw0adzajbDYeGfw+9IindronT+umUa0X26VE1cspQcPzXBRoxXz2nQfN6tLBOKrwF9ie3YC67qVYmVkIN+PDJa5CsxyMkmeVic1tDcGitv9gS2YdyiIVaE/uRD6p4aWTl8GOWJoa1NXVpUdDMW/ux5djr9eF8SW1uLzmeuIxe3EITwcexFVD3vK8Bnmu2bhnQ/Bp24dBe6pVHqmD71WEHThPES4PyOkCUnbpnGUXYEjZ4fi06wPofDt5wJloyzajZes2JPNJaK6JkBc9/+uOxmB46QiZajCWfAWLRnJ1zSvtJxofgmsmYUU0mCGNdJI/Itc5yvMD+A5IDMnJanAyAdLJHBbNXCueP1F3XOfuvJOz//HJHXD1NMywTkOAGdNw+5FP4fGjFmNgWRXChTpCcY0O5RkOYIQCxCIRXL3kUnAna83AD2Y2tNdjytDTYBJBfc9SaIYJ3RLUEk8a5pYukGQA4wpPNd+NdbmVGFwwGANKh+HT7etlIISjG4+vnPzm3fU3oaAgAitqwHe5FfrWc7kyjT7VfSs+S61B4Bjw0twi329PbcXWzHacUnkmXMflcjyYnwuJ7JzzFk+hsRj58dQWJbl0QyOaAAHBeeTIybkMbF3xNK5oIm6YeC8yuazQ2Hskk8l4NBpN7OxuwQ31l8HTHZjcnmboGFt6Aj7ZsxaBR8OejTFF43DN6NvhwVvIOef22TjkmslkTk6lUq0C2c25gZPl1fGCdDot6rLgkAl8sB/RwS+Z6dOZJxeT5JGMwgTfL+H9fJ7tB4/9wb856ZXe+A0YzS0qqj9ndEuU7ztyXGTUwZP3I2JQxbhakoNwy5orsTmxAf2iZSiPVmJgdAhWtbwH3wP+NuYenDBwimjmM4yf2X0GdbnhQwnz5FedWzBjUS2lN4x4LIJwxIIdMhlfJsJRnmEb77e+hT8xjTj+cmbE032GFCI+6OxM7y65fPlMRGImKgur0BG0giGkAtEjSs3XVYr0N4dge+d29DeqMO/Uf0t6jGSgNuo0UkuIJdeuvAyWbTCiNXQFlKuQDkOli04tISrJPZ7tzjYVFjszTVjXtEoAfSY/Oi3+95OWdcj5WYwsH00UAY0F3JKlqosRoaGwGKFhxpUsZJk6TqiejHvqb5Y8CxFMf53CVPX8xvmMYODrxBa1Hd3SoVOB5w17H5eU/xVeOAtLENGIbNc3PHzKADV0HdlcVji+SpHdlNjG3PTIB/OAL0U3TEa05Oy48DTMH7Aax8Rr4bImyTudZ8CxRIOt3V8KT2coQ2XRfpg4cBpdXUVjQjCLBa8UcVkNLrN0VsnNeGz4Upa0PO17OGnALzFp0DQk8t1CUVwZSlH5WJKwO92qJspkn0Im14AeM1mCViRewZVfT2UJDbH46fiwbTlWNy9HdXyYjNuis0JiMtFIsGmBxcTkZJERZjrzHq300pWtU/BG57MwvYhawGfyykKMdlTFq4lee8Ek48/Pqrlq1m63Hc25L/nSAxMfPj1zS9OFKp50ypbjUJ9Ike/4sBHBcRUT0WI0QaMDbM1+h04zf2/boVkRQk5l0qCsIa+TZXK0y9sJjcQSJBGLIeq1o1G3c1iy9S0sPHO1aNnrKo5oyM/nc9f/ZdydiGhRpYKjCyZSwEhyBijwyin0pDfrowSViAclyGWzeLB2ASzLQiQSOU8Zkp9wOPwgRerNJ6e8huOKT8RHzWs4EchzcpU5nLKr8R7YubeVWwvw+KSXMaK8BrlcLi7z5eAGvjsI83oifCCdSeGlr5/A2j3LYZsh9OT2YlB0KH532BUYUzoBvu53kuB+oRBLyY8dNPhcOp1RqqjUkSopaknVbKFKTv6xuT/5jrFSy/MZRnEzrz/3kLFP8zzpJxfYN+B7FB04iUbk3TU8b+UpzQ9cBunmPZ9hffsH+KJ7M3bldpLCKFuCDpKtIW6WIO9lUB6uwoiiURg/gL1b2VimrSq2rABBB43excGPkGIxechxyFPGuSj3izxnyOg96V14YdOjaNy7EUm/R+q3EhLJ/SK7GGPKjkdrervKxjICaeioQ8pJ9OopkyjnMEkYyrUV0zCrZg7KouUKBDf8Km8uoX6k1YN9P98DRDDXcsBD8u7Lji24r34uupxO2LaOyoJqjCitwcbueiTYrkn5tbjzwdHD2F7uYl4ZKLLK0JT6RmkOCMbzfAyNjkBleAgadq3HrmQ7+tkVuHnCfdjXWImQXU3Bf7QPlAJEGdHJyKsEM8NlK/rPj27DSmpNQTiu9BvUb4ONBCivotUaT+jUdSazop6vRNSIQemXyAvYvwWuz0Rn98KrJ1nPZPdcDalsClMrT8e1E26lO03pFF8lqN/QVm9HQ0AL+ODSHJPy5lVzQGnDoOLBbBt3oCXzDRson5pPg+wADekCDe6eeiUMiWSzV1cbDNjjS+iJ2gTUSKU6bK/A0yMYn1dpZKrImO+xIBHcXZMfhm2FBNQzTPHZ8lHzayJ7U6d2Pbb+frzb9Doi4Qh7WalsVGrLYLHmPVmRRlC+HWw+u2Xwk0gHKbyz93k0uvUUEAtGwGq2HwjZEmYo2CI60sIKO+GggAwlKT4O5TCP6UNn4srjrie7PsMqmG7yZxbZ0Vq6mvFBC3t8Co8u7qBtKWpyFdewjVbMSNMtSUMFxHC7Bn/ufz/dpaHH78Lq9Bv4X3ohErkukhnmCM5n+fHZMLCWS7wg4+wVbzIEWLrJzKody3D2Eb9FVXG1kDNLAI0UvncktqrO9Zh+o1kRCIIMtWa3IRF0CiIFUnXsBCjx03f4rF8sEYhphTgjdjHOil9KID42p+uwqONf2Oo20qUsb6oqBygLV6CisJoS0stcY/tGrv2tAJL4G2lSHJJiPGYVIpVL4fPOjXQTAdBdriENLQOX6/dBkOotBVau6rNw/5teiOSBZOoYYA3B4NBR2JlupmulvyDDZLIztwvdabJEFzp5qa0u1y5SkwkoaXLMEv513NDiI/gxMARN6W8RJkU6UcjJQkEZ4SIszrJL8Z5QTpdzEQYx/5kc7zOKN+XrsDjxIr8DNnN8iDEkgSwTJPv4I70eL67L9oOxlaVGDSsYjmFFhytAjOUl4rJ5rG6Xxe145cWjZuPOj25CzCzEYaWHE4CLzxMNMGlUGPKkaeIhqW4yiJPUo2VsTdZk3kImSMLkF74AlQ7Do0t6g7r3g0WCO6xFMDBWhRZW16SXJMgAF9ZcgagdlQrbQruPEb76kJnELFvGRLQ/bFmOhxvuUV+M0vaJyyS7VFfLgJYPG35hwdFzSkNMRnkgTNIhsgB/oJFN94DUZ0emYsajiwRYqVWBZCaBP4y6DicNPIX/C0HKgKms1msVoH2gRhPhB4ye4t3pdtxVfwN28vM1bDMgqUPF4SKUsCtsz36rMk4JI4EFjBflE8WhwfgSMeQTMsX2BFHE0ZHezWcilNx8PofqyFDcMv5+lpH+4uourjuJYDYLjv2A5A+mncHv/wXsci52sg6+2rsFj2z8O3JBBmMrJrBsfKLcJCmrMdNUfWQGji2eqNzY0KU+jUkSY4VlQ2fM1JSMV+DqdvCDNTYcc0bPxWGFI2CFLWnLnrNDodlsrLmF3uN7gPoeJlPJAkM3HqVyXiJx4JDn9XvWYHHzq/iyZwuD2lUqbSo5MHBs2UQV8J/truPidAAj12FnYAYmDi88GmcNOh/jymth0b1SdqjKz3HzV8fjcZXhfevK9QcBHTiAn6FjSOntFLXptmVbql2mOzJuGt0svHvzXbzvUVOiTIZC/jdNsVWCCNsSaVw1xlwun3Movm8zxu4g+xsPtH/w/U8COngCu71KXTcmMI3Hc4Gj+X4QARfIOP6d4H0zbxt5recm6mOxWJu8+7nH/wFEitED4YtkxwAAAABJRU5ErkJggg=='
# Black with alpha:
#kimailogo='iVBORw0KGgoAAAANSUhEUgAAACQAAAAkCAYAAAGWB6gOAAAACXBIWXMAABYlAAAWJQFJUiTwAAAFJ0lEQVRYCbWX3WseRRTG3w8FqQ2ClPpBpQgiSFoVpKK2ahCCYP24yWUoSEEoopWW3rcq+A9415ra25RUrwUpFEFbCyJIRQolEC1CTBSlVENIfH6759nMTnbNRuuB5z3fZ2Znz8zs2+uJVlZWYF/yU1AYrI5JWEC5xI9otWT6zSKxLw/Cuxh8Wfy2kHv3SJi2slbHFni/35+3/ogF8fGBRvtewsUw7iqGjym41sFwkmlbMWrY17FxWb7Fei5zkc3c1ygbas3R63nYw5Uxgk/I8J2wv3IghPMjiefDMRqcYbeE3MM4YiXlg8HgpvU04zUbg1NpDvmHMLC8P0smKaXVwXAw4O0UpCCL64Yn86nwvuuo4LeLn/LTXZXyUjjuDg4rFzWWgG7BcD8e0SFhNXyFIf/ZJcMSQYFr4pc1398T2zQFakUSw1QE8mATgulNCX4u2+AsH4OVfR1V/5LhvPBviGJPk/ieUO8wrCK93XkxAh9GbyHWjJgeu+ILYVI4IFSkQn7ZBBqvR8BOcXKgotCrFgpT/ed6+JxQ95Yaa1it/CdSGh+vKTOxvSyZ2fTT/b0Vo17zb0mgxbzpKcDA+Y52fLX/npfFj8WIJJlPuWXija8lp5KDZGNUjjZ2O2fpcaHaUJI3pLOKYHQek26+LNDdhU2cnhtNBpQqSgx3SnVw2jfIaVtciLiTtUcLZRjOWXHIWwL7o8IbGIOeFWcbMejJsFWLuyjDjcrY621L5FzcGYa7xClWnfv7whD+TTF2xJ/O+EnCN6E8aWMH7t5iVtuJR2h7FHyfE/QPRI8dx09wG+EzDrYE0Rpf4XOhScn5zFzEnNFNtAQ5NCtNWxVCzskF4GdyJ7qOGvYmO6AolPYJNhMFrltp4cTsx3daYMo0Wk47ckOi81gPChQqiMMbxfdRae32ywRmitDYIkekYDSlRdOLDpltA3kjl1qyaT+VhWITpaf6deNVhuFwyKnAU7DRS3KhmNlRWQngguQmTYnbgiOFwRaEfuRIbKBwnpaLBIqaI7OdniPGkN6NkoR9yjglzAkU7QJiOWr2uk7O5atTHmBdUXzkvCOwHOngfA++LewQ2ggfucSmub9IJ7d9ZTwBc8XyQs8JRaH4tlvXAPKbOBNGhScE5Daa0FXCd6IneFbylnXBnghcxFM5YRZDQrQ0Aw4TG+LjAoW5DZBz4kDPJzonm8d5q5YQE2HTVqsiOd1ntfhbqLANPSlWixaprrQpiTiXBY4XMClU15XkW0lc3NQGjMnY1T37ihTeF8ZDQifSIUKzzwr/dTVpcsZmDkWtGQxxDUjsRmrQ/AuVw+tDgX7aFMXnIZOaJpHPYhQ+BKCxBPxTaaSGCVEjBadoU5OPyP6CMCb4xuJyJbe4YL8OpfozJn1D2mhCcS8c27BQGXBNjAkVnw7vh0JzbRM6UcufiotK5jzaDHEnubFPkHivwNcBM6SgiWXdayXnmtCCVom++UDIz6Y8PNXZWSnxahn7R2F7cYRL4F3y8Y3jM+H/Jjc+n5KMydjPFIP6pJayW/hVWI3376QiLn4eSpWOclOdEa2uvz0WVYerpyRPKE5slv6MwKx5r1eFOwToQMkafzl1QRO9KON4ONhhNLB7Zkpy/XWnE7KsIBI/FjwxCvAqOWGbiMu37QJmVcmlRjqRrbEIMm+OHlM499ySkE5wScs+L9sV4ULgSpzgxKYTQJ8RdvuhzWWrUb+mNSj5U2h33acw/jTtEdgxDwisKPSHwE3OYXspwInemf4GnqxeegFEzTIAAAAASUVORK5CYII='
# White with alpha:
kimailogo='iVBORw0KGgoAAAANSUhEUgAAACQAAAAkCAYAAAGWB6gOAAAACXBIWXMAABYlAAAWJQFJUiTwAAAFz0lEQVRYCaWYXYhVVRTH752bIOoUiGmJJVEIOkpBJZVaQzAEavni4zAQRRDSB/XYSx8PPfQa9DA25uuEU48ZlCFBOQyUIEoEYkyFMY5JKZXce0//395r7dl3z7E7Tgv+s77XXmefffbedxoNUbPZbFRV9Q1yIDMEWY5hYZ6IaQFeWVxK9Uic7QHzXrbAtsrd4hEbZJx0RXIsBSetiafb7c7RAcW2ZhEjA6JzCjhlxu2hc2tWttDbc8bJTMUZlXIBjAGZPiJ+GmXKA4yLVe0Yan+LobwC3Id9JSVY8FtynhH2JQeCOT+U44RAhSHjYtWqEGzGQXOkAuidTucvD5JerbKgAx5lOvZZyv9gBqb3omTIY/FXA51ud4NbMueg2wInUvSIQNbb7jR9hfhhf7ofpew1x1rjJMVJtSkYwCBstIAXxSt8tSTfduE6QUbnxWf0UH+4QXySAj1F3CDnhAUyrwcFWgKHBH8ut8FnBSiuawpJ+Uc4IdR2mRuJKSC1ehTjOwLPXgbwdudkh7YIwZ8XRZadOQvOc+JfC6PCmNlCkgr5y5Y50bOS8G8WRq1YMDwjAxSSC/5r8MRBgr+mI+aw6+//Eynh8crAUi8G2m8DxVdoE74Go17zlSKYTg4UNgow8FQ5UNKt6BMK8seSGJKcT/iSgTstSGZxpzrbL9OYsE34UzgufCD/b+L9ScN+zNCQreYZiaxuJ9bckHeVKrpBztUeKZ7Wjcn5sjhpcePePcXC8tbILclXhZ8EHnetAGFfaUCH3hBuFZ5X3jiGQFTVCOz/1wRfS+tc9jjnsm82323i0Nbgk7A7qFbEE27EFeuDwfki/vZCv0j5zgJ2Gk/BZcHM72tLpmo9CVB6FMmpSPBU1Re5raYwC/NNL5Qn57LVCiwcLTWFWBrfNgmRkzfFl/yZcElwKjeojhzxMI6LtSv9kDB4Mx0djWP6GJFrq+HbnPRCLxBUA5nCN5d8vWXCxkbMPgKOCEzYHiElmLypJtFjRhVzjxAfX5xjBgrnUZlY6orzQnAaOBZibGW/ZkYP8kMOPT/okFsC9vAhp4Gyj/ZTORkhP4pI8IXngzTa7bafcasXFbLOXlcixAHJSZqSJXNasKUw2LyglEXbWarre/gRSxALiXCIz+lxf4r/LLRQMkqepAK7hcOCn7AS+xKx48Iur1PycrzwJGUQuorw9bwqMB05nZbysrBJyKcwl/GRS2xOl6SQm8bt25CC2bmnhEDc7SQsWgCyeQPsCUPCgwKy20t+UBsztZw4YuLlMu8qnx0F8FROFyTkRVnSDNgq7A9I567JpQo5z0HmOl42Oiub00t5P774+GjTrEhO31lP8DIU1SobdJ3P0InZip+IrZcJ87TF2V7AqBCPq2U04imq4Q3knEOcmQOMCcVzVsLTQheLqPzW8yI9sjYRFvsFIc2m5J4Y9H6kGBY5RA97KXAMzY6BRQXlqrVpgZY3VDav9wXWU8rp1xB+1bqiHGiS6ww3sYZ+NJ6Bi4YzrMCwRCKWQ/KawNTMCHWLnMX/pDAs7FEM65gfrNA2Tl3uVdDtkTW+Mv6/mGZ8S6vVGtFg3xeFuIZ+Wdj8N95VZog7KnSvsC5IS/ijp6uLmpbxIaGpZrgUvlcXlNtUZ6P0u812nHd9h+A38lMMZGBad2W62wPXDMzr3bNu3hXKvakntqjBl5X7OSChn4X1vg9x++DyDX0upIT8aZYj57Uy2Rc+V0mIsR8L9X2nlmGH8LtQ2b3Ak1Jzct0n5HrfHhVfV2dQs+t3j8uKGUqFvCG4HEz9UQFiw+JfACsFfGPGexoyG7tueQHyuKfk45806Hxh5wXfDCck88NkgfKGXLbEj8QhkgGvMv+Z5APCOXxvdAAzq+R6HYnhnwprGO+mScn3C5xzLGDIC1/XtM9JPyucNJy1HZxYj5MYctmAd/hDOy8b6tti+RRq4k4V2Sk8LHDW3SX4/4XYY2YFNjq2gGnlXxRfMv0LzZwRDr1/DFIAAAAASUVORK5CYII='
# Image type
# On mac use 'image' for the colored icon, 'templateImage' for black icon
#imagetype=image
imagetype=templateImage
# -------------------------- Beginning of the script ------------------------- #
echo "$(${kimaipath} -b list-active) $imagetype=$kimailogo imageHeight=16"
echo "---"
echo "Active measurements: (Click to stop)"
$kimaipath -a list-active
echo "---"
echo "Recent measurements:"
$kimaipath -a list-recent
echo "---"
echo "Open Kimai in browser | href='$($kimaipath url)'"
echo "Reload | refresh=true"
# ----------------------------- End of the script ---------------------------- #
| true
|
da5edf5146f6dce846743a0c8ef8637f65ab4168
|
Shell
|
watchdog-wms/watchdog-wms-modules
|
/bamstats/test_bamstats.sh
|
UTF-8
| 1,483
| 3.15625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
SCRIPT_FOLDER=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
source $SCRIPT_FOLDER/../../core_lib/includeBasics.sh # include some basic functions
SCRIPT=$(readlink_own -m $SCRIPT_FOLDER/bamstats.sh)
GLOBAL_TEST_DATA=$SCRIPT_FOLDER/../../test_data
TEST_DATA_FOLDER=$SCRIPT_FOLDER/test_data
FAILED_TESTS=0
TMP_OUT="/tmp"
# call with invalid parameter
testExitCode "/bin/bash $SCRIPT" "$EXIT_MISSING_ARGUMENTS" "Missing parameter test"
testExitCode "/bin/bash $SCRIPT -b $TEST_DATA_FOLDER/notExistingFile.super -o $TMP_OUT/bamstats1.test -r 100" "$EXIT_MISSING_INPUT_FILES" "Missing input file test"
testExitCode "/bin/bash $SCRIPT -b $GLOBAL_TEST_DATA/test.file.notReadable -o $TMP_OUT/bamstats2.test -r 100" "$EXIT_MISSING_INPUT_FILES" "Not readable input file test"
# real calls
testExitCode "/bin/bash $SCRIPT -b $GLOBAL_TEST_DATA/fastq/joined_bam/test_paired_new.bam -o $TMP_OUT/bamstats3.test -r 101 -a $GLOBAL_TEST_DATA/annotation/test.bed --paired" "$EXIT_OK" "Simple stats test" "$TMP_OUT/bamstats3.test/test_paired_new/idxstats.txt"
testExitCode "/bin/bash $SCRIPT -b $GLOBAL_TEST_DATA/fastq/joined_bam/test_paired_new.bam --disableAllDefault -o $TMP_OUT/bamstats4.test -r 101 -a $GLOBAL_TEST_DATA/annotation/test.bed --paired" "$EXIT_OK" "Disable all test" " " "$TMP_OUT/bamstats4.test/test_paired_new/idxstats.txt"
# delete all the temporary file
rm -f -r $TMP_OUT/bamstats*.test 2>&1 > /dev/null
# return the number of failed tests
exit $FAILED_TESTS
| true
|
a5bee70cf55129d63bc2a4d648a591e2f2c51992
|
Shell
|
mbogoevici/strimzi
|
/docker-images/kafka/scripts/kafka_run.sh
|
UTF-8
| 2,615
| 3.28125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# volume for saving Kafka server logs
export KAFKA_VOLUME="/var/lib/kafka/"
# base name for Kafka server data dir
export KAFKA_LOG_BASE_NAME="kafka-log"
export KAFKA_APP_LOGS_BASE_NAME="logs"
export KAFKA_BROKER_ID=$(hostname | awk -F'-' '{print $NF}')
echo "KAFKA_BROKER_ID=$KAFKA_BROKER_ID"
# create data dir
export KAFKA_LOG_DIRS=$KAFKA_VOLUME$KAFKA_LOG_BASE_NAME$KAFKA_BROKER_ID
echo "KAFKA_LOG_DIRS=$KAFKA_LOG_DIRS"
# Disable Kafka's GC logging (which logs to a file)...
export GC_LOG_ENABLED="false"
# ... but enable equivalent GC logging to stdout
export KAFKA_GC_LOG_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps"
if [ -z "$KAFKA_LOG_LEVEL" ]; then
KAFKA_LOG_LEVEL="INFO"
fi
if [ -z "$KAFKA_LOG4J_OPTS" ]; then
export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$KAFKA_HOME/config/log4j.properties -Dkafka.root.logger.level=$KAFKA_LOG_LEVEL,CONSOLE"
fi
# enabling Prometheus JMX exporter as Java agent
if [ "$KAFKA_METRICS_ENABLED" = "true" ]; then
export KAFKA_OPTS="-javaagent:/opt/prometheus/jmx_prometheus_javaagent.jar=9404:/opt/prometheus/config/config.yml"
fi
# We don't need LOG_DIR because we write no log files, but setting it to a
# directory avoids trying to create it (and logging a permission denied error)
export LOG_DIR="$KAFKA_HOME"
# Write the config file
cat > /tmp/strimzi.properties <<EOF
broker.id=${KAFKA_BROKER_ID}
# Listeners
listeners=CLIENT://:9092,REPLICATION://:9091
advertised.listeners=CLIENT://$(hostname -f):9092,REPLICATION://$(hostname -f):9091
listener.security.protocol.map=CLIENT:PLAINTEXT,REPLICATION:PLAINTEXT
inter.broker.listener.name=REPLICATION
# Zookeeper
zookeeper.connect=${KAFKA_ZOOKEEPER_CONNECT:-zookeeper:2181}
zookeeper.connection.timeout.ms=6000
# Logs
log.dirs=${KAFKA_LOG_DIRS}
num.partitions=1
num.recovery.threads.per.data.dir=1
default.replication.factor=${KAFKA_DEFAULT_REPLICATION_FACTOR:-1}
offsets.topic.replication.factor=${KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR:-3}
transaction.state.log.replication.factor=${KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR:-3}
transaction.state.log.min.isr=1
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
# Network
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
# Other
group.initial.rebalance.delay.ms=0
EOF
echo "Starting Kafka with configuration:"
cat /tmp/strimzi.properties
echo ""
# starting Kafka server with final configuration
exec $KAFKA_HOME/bin/kafka-server-start.sh /tmp/strimzi.properties
| true
|
0d3ff921dae380e5661ff4a325ce9bdb169da4e5
|
Shell
|
rootid/v2.config
|
/bin/git_java_fmt.sh
|
UTF-8
| 146
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/sh
updated_files=`git diff --name-only --diff-filter=AM | grep java`
for f in ${updated_files[@]}
do
echo $f
clang-format -i $f
done
| true
|
edaffb2e6b994ede3abda947b65943a403b0f48f
|
Shell
|
johnwickerson/netlistfuzz
|
/time.sh
|
UTF-8
| 152
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/bash
start=$(date +%S%N)
#%N is nano second
sh check_abc_test.sh
end=$(date +%S%N)
cost=$((($end - start)/1000000))
echo "Time cost is $cost"
| true
|
7dc84425e2cdd9a77cb1201e611408794bbfcf66
|
Shell
|
Nurdilin/web_dev
|
/setup_env.sh
|
UTF-8
| 1,742
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
#in order to connect as root every time
echo "sudo su -" >> /home/$(whoami)/.bashrc
sudo su -
cp bashrc /root/.bashrc
cp inedit_secrets /root/.inedit_secrets
source /root/.bashrc
touch setup_env.log
# Will automatically update
yum -y update
####################VARIOUS##########################
yum -y install git
yum -y install gcc
yum -y install telnet
######################git installations#################
git clone https://github.com/magicmonty/bash-git-prompt.git /root/.bash-git-prompt --depth=1
git clone git@github.com:Nurdilin/scripts-utilities.git
####################APACHE#############################
#sudo yum -y install -y httpd
yum -y install httpd24
yum -y install httpd24-devel.x86_64
#start apache
service httpd start
#configure the Apache web server to start at each system boot.
chkconfig httpd on
######################PYTHON#########################
yum -y install python34
ln -sfn /usr/bin/python3 /usr/bin/python
yum -y install python34-pip
pip install --upgrade pip
ln -sfn /usr/local/bin/pip /usr/bin/pip
pip install pymysql
pip install mysql-connector
pip install virtualenv
pip install Flask
pip install flask-mysql
echo "pip version $(pip --version)" >> setup.log
######################MYSQL#########################
yum -y remove 'mysql*'
yum -y install mysql-server
/etc/init.d/mysqld start
#/usr/libexec/mysql55/mysql_secure_installation
echo "mySql version $(mysql --version)" >> setup.log
######################ENV SET UP#####################
ln -sfn /var/www/html /root/files_apache
ln -sfn /etc/httpd/conf.d/ /root/conf_apache
ln -sfn /var/log/httpd/ /root/logs_apache
ln -sfn /root/inedit-flask /var/www/html/inedit-flask
ln -sfn /root/web_dev/inedit-flask/ /root/inedit-flask
| true
|
2874e5d4251543faf1c1c4a67d444141d5310464
|
Shell
|
codejianhongxie/sequoiadbutil
|
/mountDisk/mounthDisk.sh
|
UTF-8
| 3,650
| 4.25
| 4
|
[] |
no_license
|
#!/bin/bash
function usage() {
echo -e "Usage: Options"
echo -e "-t, --type=<type> filesystem type; when unspecified, ext4 is used"
echo -e "-h, --help display this help text and exit"
echo -e "-d, --disks=<disk> disk to format, separated by commas(,), such as: /dev/sdb,/dev/sdc"
echo -e "-p, --prepath=<path> disk to mount path prefix, default: \"/sdbdata/data\""
}
fileSystemType="ext4"
mountPathPre="/sdbdata/data"
disks=""
while [ "$#" -ge 1 ];
do
key="$1"
case "$key" in
-h|--help)
usage
exit 0
;;
-t|--type)
fileSystemType="$2"
shift
shift
;;
-d|--disks)
disks="$2"
shift
shift
;;
-p|--prepath)
mountPathPre="$2"
shift
shift
;;
esac
done
if [ -z "${disks}" ]; then
echo "Error! please specified disk"
usage
# unspecified disk
#diskArr=(`fdisk -l | grep "Disk /" | cut -f1 -d: | awk '{print $2}' | sed ':a;N;$!ba;s/\n/ /g'`)
else
diskArr=(`echo "${disks//,/ }"`)
fi
diskIndex=1
for disk in ${diskArr[*]}
do
# check whether disk is already mount
diskMountInfo=`df | grep "${disk}" | awk '{print $NF}'`
if [ ! -z "${diskMountInfo}" ]; then
echo "${disk} is already format and mount to ${diskMountInfo}"
continue
fi
echo "format ${disk}"
diskInfo=(`fdisk ${disk} -l | grep "Disk ${disk}" | cut -f2 -d: | awk '{print $1,$2}' | cut -f1 -d\,`)
diskSize=`echo ${diskInfo[0]} | awk -F. '{print $1}'`
sizeUnit=${diskInfo[1]}
if [ "${sizeUnit}" == "GB" -a ${diskSize} -ge 2048 ]; then
parted -s $disk mklabel gpt
parted -s $disk mkpart primary 0 100
parted -s $disk print
if [ "${fileSystemType}" == "xfs" ]; then
mkfs.xfs -f ${disk}
elif [ "${fileSystemType}" == "ext4" ]; then
echo -e "y\n" | mkfs.ext4 ${disk}
elif [ "${fileSystemType}" == "ext3" ]; then
echo -e "y\n" | mkfs.ext3 ${disk}
elif [ "${fileSystemType}" == "ext2" ]; then
echo -e "y\n" | mkfs.ext2 ${disk}
else
echo "unable support this filesystem type: ${fileSystemType}"
exit 1
fi
elif [ "${sizeUnit}" == "GB" -a ${diskSize} -lt 2048 ]; then
echo -e "n\np\n1\n\n\nw" | fdisk ${disk}
else
echo "${disk} size is less than 10 GB, don't format"
continue
fi
# format disk
if [ "${fileSystemType}" == "xfs" ]; then
mkfs.xfs -f ${disk}
elif [ "${fileSystemType}" == "ext4" ]; then
echo -e "y\n" | mkfs.ext4 ${disk}
elif [ "${fileSystemType}" == "ext3" ]; then
echo -e "y\n" | mkfs.ext3 ${disk}
elif [ "${fileSystemType}" == "ext2" ]; then
echo -e "y\n" | mkfs.ext2 ${disk}
else
echo "unable support this filesystem type: ${fileSystemType}"
exit 1
fi
if [ ${diskIndex} -lt 10 ]; then
mountPath="${mountPathPre}0$diskIndex"
else
mountPath="${mountPathPre}${diskIndex}"
fi
if grep -qs "${mountPath}" /proc/mounts; then
echo "${mountPath} is already mount, umount it"
umount $mountPath
fi
echo "mount ${disk} to ${mountPath}"
mount ${disk} ${mountPath}
diskFstabConf="${disk} ${mountPath} ${fileSystemType} defaults 1 2"
diskFstabCount=`cat /etc/fstab | grep "${diskFstabConf}" | wc -l`
if [ "${diskFstabCount}" -lt 1 ]; then
echo -e "${diskFstabConf}" >> /etc/fstab
fi
diskIndex=`expr $diskIndex + 1`
done
| true
|
6d6baf41808654ec4184746eaa5c9c9b253f37b8
|
Shell
|
ismip/ismip6-gris-results-processing
|
/Archive_sc/A4tools/scalar_05_batch_func.sh
|
UTF-8
| 2,101
| 3.53125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Calculate scalar values for a models/experiment
if [[ $# -ne 8 ]]; then
echo "Illegal number of parameters. Need 8 got $#"
exit 2
fi
set -x
set -e
# parameters
outp=$1
lab=$2
model=$3
exp_res=$4
flg_GICmask=$5
flg_OBSmask=$6
res=$7
outpsc=$8
##############################
## wherever we are, use a unique directory to run in, created by meta
proc=${lab}_${model}_${exp_res}
cd ${proc}
apath=${outp}/${lab}/${model}/${exp_res}
# strip resolution suffix from exp
exp=${exp_res%???}
# input file name
anc=${apath}/lithk_GIS_${lab}_${model}_${exp}.nc
ncks -3 -O -v lithk ${anc} model_pre.nc
anc=${apath}/topg_GIS_${lab}_${model}_${exp}.nc
ncks -3 -A -v topg ${anc} model_pre.nc
anc=${apath}/sftflf_GIS_${lab}_${model}_${exp}.nc
ncks -3 -A -v sftflf ${anc} model_pre.nc
anc=${apath}/sftgif_GIS_${lab}_${model}_${exp}.nc
ncks -3 -A -v sftgif ${anc} model_pre.nc
anc=${apath}/sftgrf_GIS_${lab}_${model}_${exp}.nc
ncks -3 -A -v sftgrf ${anc} model_pre.nc
# acabf
anc=${apath}/acabf_GIS_${lab}_${model}_${exp}.nc
ncks -3 -A -v acabf ${anc} model_pre.nc
# set missing to zero like during interpolation
cdo -setmisstoc,0.0 model_pre.nc model.nc
# Add model params
ncks -3 -A ${outp}/${lab}/${model}/params.nc model.nc
### scalar calculations; expect model input in model.nc
../scalars_basin.sh $flg_GICmask $flg_OBSmask 05
# Make settings specific output paths
prefix=SC
# Remove GIC contribution?
if $flg_GICmask; then
prefix=${prefix}_GIC1
else
prefix=${prefix}_GIC0
fi
# Mask to observed?
if $flg_OBSmask; then
prefix=${prefix}_OBS1
else
prefix=${prefix}_OBS0
fi
destpath=${outpsc}/${prefix}/${lab}/${model}/${exp_res}
mkdir -p ${destpath}
### move output ./scalars_??_05.nc to Archive
[ -f ./scalars_mm_05.nc ] && /bin/mv ./scalars_mm_05.nc ${destpath}/scalars_mm_GIS_${lab}_${model}_${exp}.nc
[ -f ./scalars_rm_05.nc ] && /bin/mv ./scalars_rm_05.nc ${destpath}/scalars_rm_GIS_${lab}_${model}_${exp}.nc
[ -f ./scalars_zm_05.nc ] && /bin/mv ./scalars_zm_05.nc ${destpath}/scalars_zm_GIS_${lab}_${model}_${exp}.nc
# clean up
#cd ../
#/bin/rm -rf ${proc}
| true
|
1a0437de6859cbb3023056c96bb02562271af965
|
Shell
|
ivant/configs
|
/.zsh/auto/crx.zsh
|
UTF-8
| 1,871
| 4.1875
| 4
|
[] |
no_license
|
crx-create-pem() {
setopt err_return local_traps
if [[ $# != 1 ]]; then
echo "Usage: crx-create-pem <new pem path>" >&2
return 1
fi
local pem="$1"
if [[ -f "$pem" ]]; then
echo "$pem already exists, aborting..." >&2
return 1
fi
2> /dev/null openssl genrsa 2048 | openssl pkcs8 -topk8 -nocrypt -out "$pem"
echo "Generated private key file '$pem'" >&2
local pub_key="$(2> /dev/null openssl rsa -in "$pem" -pubout -outform DER | openssl base64 -A)"
local extension_id="$(2> /dev/null openssl rsa -in "$pem" -pubout -outform DER | sha256sum | head -c32 | tr 0-9a-f a-p)"
echo "Public key: $pub_key"
echo "Extension id: $extension_id"
}
# Purpose: Pack a Chromium extension directory into crx format
crx-make() {
setopt err_return local_traps
if [[ $# != 2 ]]; then
echo "Usage: crx-make <extension dir> <pem path>" >&2
return 1
fi
local dir="$1"
local key="$2"
local name="$(basename "$dir")"
local crx="$name.crx"
local pub="$name.pub"
local sig="$name.sig"
local zip="$name.zip"
trap "rm -f \"$pub\" \"$sig\" \"$zip\"" EXIT
# zip up the crx dir
local cwd="$(pwd -P)"
(cd "$dir" && zip -qr -9 -X "$cwd/$zip" .)
# signature
openssl sha1 -sha1 -binary -sign "$key" < "$zip" > "$sig"
# public key
openssl rsa -pubout -outform DER < "$key" > "$pub" 2>/dev/null
byte_swap () {
# Take "abcdefgh" and return it as "ghefcdab"
echo "${1:6:2}${1:4:2}${1:2:2}${1:0:2}"
}
local crmagic_hex="4372 3234" # Cr24
local version_hex="0200 0000" # 2
local pub_len_hex=$(byte_swap $(printf '%08x\n' $(ls -l "$pub" | awk '{print $5}')))
local sig_len_hex=$(byte_swap $(printf '%08x\n' $(ls -l "$sig" | awk '{print $5}')))
(
echo "$crmagic_hex $version_hex $pub_len_hex $sig_len_hex" | xxd -r -p
cat "$pub" "$sig" "$zip"
) > "$crx"
echo "Wrote $crx" >&2
return 0
}
| true
|
7780f41b703b9ba4707802af913b006e47a0d8d0
|
Shell
|
NobodyXu/stacks
|
/clang/post_configure.sh
|
UTF-8
| 344
| 2.546875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash -ex
# Use ld.ldd as default linker
sudo ln -s -f $(which ld.lld) /usr/bin/ld
# Use clang as default compiler
## Set env variables instead of with /usr/bin/cc, /usr/bin/c++ or /usr/bin/cpp to avoid breaking
## the compilation of kernel modules.
./append.sh 'export CC=clang' ~/.bashrc
./append.sh 'export CXX=clang++' ~/.bashrc
| true
|
6a5b31c415ceb9b4dc1c1f05dbd78c4b71c75894
|
Shell
|
hevertonrodrigues/dotfiles
|
/install.sh
|
UTF-8
| 2,120
| 3.234375
| 3
|
[] |
no_license
|
#! /bin/bash
NO_COLOR="\033[1;0m"
GREEN="\033[0;32m"
BLUE="\033[0;34m"
RED="\033[0;31m"
WHITE="\033[0;37m"
GRAY="\033[1;30m"
#apps instalados com homebrew
APPS=(wget git ack ant autoconf automake cask cowsay cscope ctags emacs mongodb mysql node openssl sqlite postgresql funcoeszz "macvim --with-lua --with-override-system-vim")
echo -ne "\n${GREEN}Initializing...\n\n"
echo -e "Installing Oh My ZSH"
#sh -c "$(curl -fsSL https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh)"
echo -e "Creating backup + creating symlinks to new dotfiles..."
cd ~/.dotfiles/files
for file in *; do
echo "~/.$file"
if [ -s ~/.$file ]; then mv ~/.$file ~/.$file.bkp; fi
ln -s ~/.dotfiles/files/$file ~/.$file
done
#custom mac preferences
if [[ "$OSTYPE" == "darwin"* ]]; then
echo -e "${GRAY}Installing Homebrew..."
#ruby -e "$(curl -fsSL https://raw.github.com/Homebrew/homebrew/go/install)"
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
echo -e "Installing Homebrew applications"
installApp(){
brew install $*
}
echo -ne "${BLUE}You wanna install all apps ${RED}[y/N]: ${GREEN}"
read -n1 all
vAll=$(echo $all | tr '[:upper:]' '[:lower:]')
echo -e "\n";
for i in "${APPS[@]}"
do
if [ "$vAll" = "y" ]; then
installApp $i;
else
viewText=$(echo $i | tr '[:lower:]' '[:upper:]')
echo -ne "${BLUE}You wanna install:${WHITE} $viewText ${RED}[y/N]: ${GREEN}"
read -n1 response
echo -e "\n";
value=$(echo $response | tr '[:upper:]' '[:lower:]')
if [ "$value" = 'y' ]; then
installApp $i;
fi
fi
done
# Install iTerm2
echo -e "${GRAY}Installing iTerm2..."
wget https://iterm2.com/downloads/stable/iTerm2-3_1_5.zip -P ~/Downloads/
unzip ~/Downloads/iTerm2-3_1_15.zip -d /Applications/
rm -f ~/Downloads/iTerm2-3_1_15.zip
#echo -e "${GRAY}Customizing AppleScript..."
#echo $(sudo chmod +x ~/.dotfiles/files/macos)
#echo $(sudo ~/.dotfiles/files/macos)
#~/.dotfiles/.osx
fi
echo -ne "\n${GREEN}Done! :-)${NO_COLOR}\n\n"
| true
|
ef6b909eacb174ecb087ffb9bf06f4f02b73c763
|
Shell
|
salamaashoush/bashDBMS
|
/start.sh
|
UTF-8
| 3,908
| 3.578125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -x
ROOTDIR="$HOME/bash_dbms/"
PARENTDIR="$ROOTDIR/databases"
USERSFILE="$ROOTDIR/users_file"
DBMSOPTFILE="$ROOTDIR/dbms_opt"
GPK=""
. ./tablemodule.sh
. ./dbmodule.sh
. ./usermodule.sh
commandHelp(){
case "$2" in
"create")
cat "$ROOTDIR/help/create.html" | yad --text-info --text="help" --html --width=400 --height=600
;;
"select")
cat "$ROOTDIR/help/select.html" | yad --text-info --text="help" --html --width=400 --height=600
;;
"drop")
cat "$ROOTDIR/help/drop.html" | yad --text-info --text="help" --html --width=400 --height=600
;;
"delete")
cat "$ROOTDIR/help/delete.html" | yad --text-info --text="help" --html --width=400 --height=600
;;
"update")
cat "$ROOTDIR/help/update.html" | yad --text-info --text="help" --html --width=400 --height=600
;;
"use")
cat "$ROOTDIR/help/use.html" | yad --text-info --text="help" --html --width=400 --height=600
;;
"show")
cat "$ROOTDIR/help/show.html" | yad --text-info --text="help" --html --width=400 --height=600
;;
*)
cat "$ROOTDIR/help/general.html" | yad --text-info --text="help" --html --width=400 --height=600
esac
}
checkCommand() {
if [[ "$1" == "create" ]]
then
generalCreate $*
fi
if [[ "$1" == "use" ]]
then
useDB $*
fi
if [[ "$1" == "check" ]]
then
checkPWD
fi
if [[ "$1" == "show" ]]
then
show $*
fi
if [[ "$1" == "drop" ]]
then
drop $*
fi
if [[ "$1" == "insert" ]]
then
checkInsertStatment $*
fi
if [[ "$1" == "select" ]] && [[ "$2" == "all" ]]
then
selectAllTable $*
fi
if [[ "$1" == "select" ]] && [[ "$2" == "row" ]]
then
selectTableRow $*
fi
if [[ "$1" == "delete" ]] && [[ "$2" == "row" ]]
then
deleteTableRow $*
fi
if [[ "$1" == "update" ]] && [[ "$2" == "row" ]]
then
updateTableRow $*
fi
if [[ "$1" == "help" ]]
then
commandHelp $*
fi
}
drop() {
if [[ "$2" == "table" ]]
then
dropTable $*
elif [[ "$2" == "database" ]]; then
dropDB $*
fi
}
checkPWD() {
echo $(pwd)
}
show() {
if [[ "$2" == "tables" ]]
then
showTable $*
elif [[ "$2" == "databases" ]]; then
showDB $*
fi
}
generalCreate() {
if [[ "$2" == "table" ]]
then
createTable $*
elif [[ "$2" == "database" ]]; then
newDB $*
fi
}
DBMSloop() {
local COMMAND=getCommand
if [[ "$COMMAND" = "" ]]
then
exit
else
while [[ "$COMMAND" != "exit" ]]
do
checkCommand $COMMAND
COMMAND=$(getCommand)
done
fi
}
getCommand() {
local INPUT=$(awk '{print $0}' $DBMSOPTFILE |yad --text-info --title="DBMS Commands" --text="current used dabtabase: $CURRENTDB" --editable --maximized --lang=sql)
echo $INPUT >$DBMSOPTFILE
if [[ "$INPUT" = "" ]]
then
echo 1
else
echo $INPUT | awk 'BEGIN{FS = " "}{ for(i = 1; i <= NF; i++) { print $i; } }'
fi
}
firstRun() {
if [[ ! -e $PARENTDIR ]]
then
setupDBMS
zenity --warning --text "no no not here create user before"
newUSER
DBMSloop
else
userLogin
fi
}
setupDBMS() {
zenity --info --text "Welcome home $LOGNAME "
zenity --question --text "Are you sure you wish to proceed?"
if [[ $? -eq 0 ]]
then
if [[ ! -e $PARENTDIR ]]
then
mkdir -p $PARENTDIR
touch $DBMSOPTFILE
touch $USERSFILE
cd "$PARENTDIR"
fi
else
zenity --info --text "You just break my heart"
exit 0
fi
}
firstRun
| true
|
fe5e2f457af7c811c998baf378fcc39507642548
|
Shell
|
CognitiveScale/mantl
|
/tf_profile
|
UTF-8
| 7,177
| 3.765625
| 4
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
#!/bin/bash
# This should be sourced from the top level of the ansible-cognitivescale repo to pull the values in correctly
trap "exit 1" TERM
export TOP_PID=$$
# Directory/repo for terraform
SECURITY_SETUP=security-setup-cs
MANTL_CONFIG_DIR="../mantl-config"
TF_STATE_FILE='terraform.tfstate'
TF_SECURITY_FILE='security.yml'
TF_SSL_DIR='ssl'
# if you modify ssl/openssl.cfg or ssl/openssl_req.cfg commit and add branchname here
#MANTL_SSL_BRANCH='kbroughton/feature/1277-make-naming-consistent-hosts-route53'
usage="tfProfile [folder_name] [path_to_tf_file] \
folder_name is the directory name (not full path) holding the \
terraform config files. \
path_to_tf_file is the relpath to the .tf file \
eg. terraform/aws.sample.tf or terraform/gce.sample.tf"
# safely remove symlinks. Error if real file is found
function safe_rm() {
if [ -f "${1}" ] && [ ! -L "${1}" ];then
echo "ERROR: real file $1 found. Refusing to delete it for symlinking"
echo "Please remove the file or move it somewhere"
kill -s TERM $TOP_PID
elif [ -d "${1}" ] && [ ! -L "${1}" ];then
echo "ERROR: real directory $1 found. Refusing to delete it for symlinking"
echo "Please remove the directory or move it somewhere"
kill -s TERM $TOP_PID
else
echo "Removing symlink ${1}"
rm -irf "${1}"
fi
}
function tfApply() {
# To allow multiple deploy configurations create a sibling git repo to mantl
# and create folders per deployment. Organizing files into a hierarchy is allowed.
# To run with all the defaults, set MANTL_CONFIG_DIR='.'
#############################
# TF_CONFIG_DIR
#############################
if [ "x${1}x" != "xx" ];then
TF_CONFIG_DIR=$(find ${MANTL_CONFIG_DIR} -type d -name $1);
TF_CONFIG_BASENAME=$(basename $TF_CONFIG_DIR)
if [ ! -d "${TF_CONFIG_DIR}" ];then
echo TF_CONFIG_DIR is not defined
TF_CONFIG_DIR=''
EC2_INI=''
echo "Aborting"
kill -s TERM $TOP_PID
fi
echo "TF_CONFIG_DIR=${TF_CONFIG_DIR}"
echo "Using MANTL_CONFIG_DIR = ${MANTL_CONFIG_DIR}"
#############################
# .tf file
#############################
TF_PATH="${2:-$TF_PATH}"
if [ x"$TF_PATH"x != "xx" ];then
echo "Found TF_PATH $TF_PATH"
else
TF_PATH=$(find ${TF_CONFIG_DIR} -name "*.tf")
echo "Found TF_PATH=${TF_PATH} in ${TF_CONFIG_DIR}"
fi
# Move a .tf file into the pwd or use the existing one
if [ ! -f "${TF_PATH}" ];then
echo "TF_PATH ${TF_PATH} not specified, using working directory"
if [ $(ls *.tf | wc -l) -eq 0 ];then
echo "No .tf file found in pwd. Please specify one"
echo $usage
kill -s TERM $TOP_PID
else
echo "Using $(ls *.tf) as terraform .tf file"
fi
else
TF_FILE=$(basename ${TF_PATH})
safe_rm "${TF_FILE}"
ln -s "${TF_PATH}" "${TF_FILE}"
echo "Using ${TF_PATH} as .tf file"
fi
# Warn if we have more than one .tf file
#echo "ls *.tf | wc -l = $(ls *.tf | wc -l)"
if [ $(ls *.tf | wc -l ) -gt 1 ];then
echo "WARNING: Found more than 1 .tf file"
echo "Press ctrl C to abort"
sleep 3
echo "Continuing with multiple .tf files"
fi
#############################
# terraform.tfvars file
#############################
TF_VARS_PATH=$(find ${TF_CONFIG_DIR} -name "*.tfvars" -o -name "*.tfvars.json")
echo "TF_VARS_PATH = ${TF_VARS_PATH}"
if [ -e "${TF_VARS_PATH}" ];then
TF_VARS_FILE=$(basename ${TF_VARS_PATH})
echo "TF_VARS_FILE = ${TF_VARS_FILE}"
# if a real file exists don't overwrite it
if [ -f "${TF_VARS_FILE}" ] && [ ! -L "${TF_VARS_FILE}" ];then
echo "ERROR: ${TF_CONFIG_DIR}/${TF_VARS_FILE} exists"
echo "But a regular file ${TF_VARS_FILE} exists in the current directory"
echo "Refusing to delete the file. Please move it"
kill -s TERM $TOP_PID
else
echo "SYMLINK TFVARS FILE INTO PLACE"
safe_rm "terraform.tfvars"
safe_rm "terraform.tfvars.json"
ln -s "${TF_CONFIG_DIR}/${TF_VARS_FILE}" "${TF_VARS_FILE}"
echo "TF_VARS_PATH = ${TF_CONFIG_DIR}/${TF_VARS_FILE}"
fi
else
echo "TF_VARS_FILE [${TF_VARS_FILE}] not found in [${TF_CONFIG_DIR}]"
echo "Using defaults in .tf file"
fi
if [ $(ls *tfvars* | wc -l ) -gt 1 ];then
echo "WARNING: Found more than 1 tfvars file"
echo "$(ls *tfvars* )"
echo "Press ctrl C to abort"
sleep 3
echo "Continuing with multiple .tfvars files, precedence may be undefined"
fi
#############################
# security.yml file and ssl dir
#############################
TF_SECURITY_EXISTS="yes"
if [ -e "${TF_CONFIG_DIR}/${TF_SECURITY_FILE}" ];then
safe_rm "${TF_SECURITY_FILE}"
ln -s "${TF_CONFIG_DIR}/${TF_SECURITY_FILE}" "${TF_SECURITY_FILE}"
else
echo "TF_SECURITY_FILE [${TF_SECURITY_FILE}] not found in [${TF_CONFIG_DIR}]"
safe_rm "${TF_SECURITY_FILE}"
TF_SECURITY_EXISTS="no"
fi
# SSL dir may not exist if security is turned off
TF_SSL_EXISTS="yes"
if [ -e "${TF_CONFIG_DIR}/${TF_SSL_DIR}" ];then
# security-setup already ran. Remove existing links and add correct ones
safe_rm "${TF_SSL_DIR}"
ln -s "${TF_CONFIG_DIR}/${TF_SSL_DIR}" "${TF_SSL_DIR}"
else
echo "TF_SSL_DIR [${TF_SSL_DIR}] not found in [${TF_CONFIG_DIR}]"
echo "Running security-setup"
TF_SSL_EXISTS="no"
safe_rm "${TF_SSL_DIR}"
fi
if [ "${TF_SSL_EXISTS}" == 'no' ] || [ "${TF_SECURITY_FILE}" == 'no' ];then
echo "Removing directory ${TF_SSL_DIR}"
rm -rf ssl
echo "Checking out ssl config files"
#git checkout "${MANTL_SSL_BRANCH}" ssl
echo "Running security-setup"
./${SECURITY_SETUP}
if [ "${TF_SECURITY_EXISTS}" == 'no' ];then
mv security.yml "${TF_CONFIG_DIR}"
fi
if [ "${TF_SSL_EXISTS}" == 'no' ];then
mv ssl "${TF_CONFIG_DIR}"
fi
ln -s "${TF_CONFIG_DIR}/${TF_SSL_DIR}" "${TF_SSL_DIR}"
ln -s "${TF_CONFIG_DIR}/${TF_SECURITY_FILE}" "${TF_SECURITY_FILE}"
fi
# By now ssl should be a symlinked dir if it exists
#############################
# .tfstate file
#############################
safe_rm "${TF_STATE_FILE}"
if [ -e "${TF_CONFIG_DIR}/${TF_STATE_FILE}" ];then
ln -s "${TF_CONFIG_DIR}/${TF_STATE_FILE}" "${TF_STATE_FILE}"
echo "Using TF_STATE_FILE=${TF_CONFIG_DIR}/terraform.tfstate"
else
echo "TF_STATE_FILE [terraform.tfvars or terraform.tfvars.json] not found in [${TF_CONFIG_DIR}]"
echo "Running terraform get and terraform apply"
terraform get
fi
echo "Verify your config files..."
echo "You have 5 seconds to press ctrl C to abort"
sleep 5
terraform apply
fi
echo "Success!"
if [ -f "${1}" ] && [ ! -L "${1}" ];then
echo "moving ${TF_STATE_FILE} to ${TF_CONFIG_DIR}"
mv ${TF_STATE_FILE} ${TF_CONFIG_DIR}
echo "symlinking ${TF_STATE_FILE} back into pwd"
ln -s "${TF_CONFIG_DIR}/${TF_STATE_FILE}" "${TF_STATE_FILE}"
fi
}
tfApply ${1}
| true
|
7e6a262506079b6ba0ec43b19e891e281190215b
|
Shell
|
kaczmarj/container-workshop
|
/build_container.sh
|
UTF-8
| 883
| 3.90625
| 4
|
[] |
no_license
|
#!/bin/bash
# Name to give container
CONTNAME="$1"
# Path to definition file
DEFFILE="$2"
# if we are running on OpenMind, we need to use `vagrant`
. /etc/os-release
OS=$NAME
if [ "$OS" = "CentOS Linux" ]; then
# Setup variables to prevent dumps into /home/user
export VAGRANT_HOME="$PWD/.vagrant.d"
export VAGRANT_DOTFILE_PATH="$PWD/.vagrant"
export VAGRANT_SERVER_URL="https://app.vagrantup.com"
# Get vagrant file if needed
if [ ! -f "Vagrantfile" ]; then
vagrant init singularityware/singularity-2.4
fi
# initialize vagrant
vagrant up
VBoxManage setproperty machinefolder "$PWD"
vagrant ssh -c "cd /vagrant_data && export SINGULARITY_DISABLE_CACHE=True && \
sudo singularity build $CONTNAME $DEFFILE"
vagrant halt
else
export SINGULARITY_DISABLE_CACHE=True
sudo singularity build $CONTNAME $DEFFILE
fi
| true
|
730e45b176ffaed14678225413fa1d1f4d255113
|
Shell
|
DichromaticLB/wadu
|
/test/common/misc_commands.sh
|
UTF-8
| 2,306
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
if [ -z "$(./wadu -f ../common/empty.json 2>&1| grep 'Required image, arguments and text offset to intialize trace')" ]; then
error "didnt complain on empty configuration"
else
suc "empty configuration test"
fi
if [[ $(./wadu -e "PRINT(CONCAT('test'.'123'.0x4142434445464748).'');") != "test123HGFEDCBA" ]]; then
error "Failed concatenations"
else
suc "concatenations"
fi
cleanup
./wadu -e \
'FILE("out.temp","temp");' -e \
'DUMP("watchadoing","temp");'
if [ ! -f out.temp ] || [[ $(cat out.temp) != "watchadoing" ]]; then
error "opening stream"
else
suc "opening stream"
fi
if [[ $(./wadu -e "DUMP(MAPFROM('../common/ascii.txt'));") != \
"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" ]]; then
error "file mapping"
else
suc "file mapping"
fi
if [[ $(./wadu -e "DUMP(MAPFROM('../common/ascii.txt',10,15));") != \
"klmno" ]]; then
error "file mapping slice"
else
suc "file mapping slice"
fi
if [[ $(./wadu -e "DUMP(MAPFROM('../common/ascii.txt',10,15).MAPFROM('../common/ascii.txt',40,150));") != \
"klmnoOPQRSTUVWXYZ0123456789" ]]; then
error "file mapping slice 2"
else
suc "file mapping slice 2"
fi
if [[ $( ./wadu -e '$p1=U8(20); PRINT(SCRIPT("../common/fib"));') != "1a6d" ]]; then
error "calling script"
else
suc "calling script"
fi
if [[ $(./wadu -e 'PRINT(LEN(U8(0))|LEN(U16(0))
<<8|LEN(U32(0))<<16|LEN(0)<<24|LEN("sixtencharacters")<<32);' )\
!= "0000001008040201" ]]; then
error "len checks"
else
suc "len checks"
fi
if [[ $( ./wadu -e 'if(MEMCMP("test","test"))PRINT("OK"."");' ) != "OK" ]]; then
error "MEMCMP1"
else
suc "MEMCMP1"
fi
if [[ $( ./wadu -e 'if(MEMCMP("test","tesx"))PRINT("OK"."");' ) == "OK" ]]; then
error "MEMCMP2"
else
suc "MEMCMP2"
fi
if [[ $( ./wadu -e 'if(MEMCMP("test","tesx",3))PRINT("OK"."");' ) != "OK" ]]; then
error "MEMCMP3"
else
suc "MEMCMP3"
fi
if [[ $( ./wadu -e 'if(MEMCMP("abcdabcdabcdabcd",PATTERN("abcd",4)))
PRINT("OK"."");' ) != "OK" ]]; then
error "Generating pattern"
else
suc "Generating pattern"
fi
if [[ $( ./wadu -e 'PRINT(SYSTEM("cat","call me redford")."");' ) != "call me redford" ]]; then
error "exev 1"
else
suc "execv 1"
fi
if [[ $( ./wadu -e 'PRINT(SYSTEM("pwd","","-P")."");' ) != "$(pwd -P)" ]]; then
error "exev 2"
else
suc "execv 2"
fi
| true
|
cef50475d8b77ece1e030035c56164a6a6065132
|
Shell
|
wnxn/QKE
|
/image/update-cni.sh
|
UTF-8
| 1,079
| 2.765625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Copyright 2018 The KubeSphere Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
SCRIPTPATH=$( cd $(dirname $0) ; pwd -P )
K8S_HOME=$(dirname "${SCRIPTPATH}")
echo "*************************"
echo "update cni"
echo "*************************"
source ${K8S_HOME}/version
# install cni through apt
apt install kubernetes-cni=0.7.5-00
CNI_VERSION=v0.6.0
pushd /tmp
wget -c https://pek3a.qingstor.com/k8s-qingcloud/k8s/tool/cni-amd64-${CNI_VERSION}.tgz
mkdir -p /opt/cni/bin
tar -zxvf cni-amd64-${CNI_VERSION}.tgz -C /opt/cni/bin
rm cni*.tgz
popd
| true
|
3049af3d4810a2f4fa592cfea08c8282dcbb8f70
|
Shell
|
WapStart/swordfishd
|
/daemon-src/misc/init.d/configure
|
UTF-8
| 990
| 3.984375
| 4
|
[] |
no_license
|
#!/bin/bash
function configure {
echo "Creating init.d script..."
echo "#!/bin/bash" > sw_init.d_script
echo "PROG="\"$1\" >> sw_init.d_script
echo "PROG_PATH="\"$2\" >> sw_init.d_script
echo "PROG_ARGS=\"-dc "$3"\"" >> sw_init.d_script
echo "PID_PATH=\""$4"\"" >> sw_init.d_script
`cat swordfish_init >> sw_init.d_script`
exit
}
echo "Wellcome to swordfish configure"
read -p "input binary file name: "
PROG=$REPLY
read -p "input installation path: "
PROG_PATH=$REPLY
read -p "input pid-file path:"
PID_PATH=$REPLY
read -p "input configuration full file path (swordfish.ini):"
CFG_PATH=$REPLY
##read -p "input pid-file path:"
##PID_PATH=$REPLY
echo $ARGS
echo "Swordfish will configure with params:"
echo "Program file name: " $PROG
echo "Save in dir: " $PROG_PATH
echo "Path to config file: " $CFG_PATH
read -p "Configure swordfish (y/n)?"
if [ "$REPLY" == "y" ]; then
configure $PROG $PROG_PATH $CFG_PATH $PID_PATH
else
echo "Try again. Bye"
fi
exit
| true
|
b0b4bb073a940d3215b5fd560a4c8514fd7012ec
|
Shell
|
jesyspa/toy-hisp
|
/run_tests.sh
|
UTF-8
| 471
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/bash
readonly progname=$(basename $0)
readonly inputdir="$1"
print_usage() {
echo "usage: $progname inputdir" >&2
exit 1
}
if [[ -z $inputdir ]]; then
print_usage
fi
readonly now=$(date +%Y%m%d-%H-%M-%S)
readonly testdir="test-run-$now"
mkdir "$testdir"
for x in "$inputdir"/*.hisp; do
name=$(basename $x)
./run_one_test.sh "$inputdir" "$testdir" "${name%.hisp}"
if [[ $? -ne 0 ]]; then
echo "$name failed." >&2
fi
done
| true
|
c2e91fc2eb5ddca9c0194cb41d02900bd19452fc
|
Shell
|
diraulo/laptop-deprecated
|
/mac
|
UTF-8
| 4,061
| 3.640625
| 4
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
#!/bin/sh
# Welcome to the thoughtbot laptop script!
# Be prepared to turn your laptop (or desktop, no haters here)
# into an awesome development machine.
fancy_echo() {
local fmt="$1"; shift
# shellcheck disable=SC2059
printf "\\n$fmt\\n" "$@"
}
append_to_zshrc() {
local text="$1" zshrc
local skip_new_line="${2:-0}"
if [ -w "$HOME/.zshrc.local" ]; then
zshrc="$HOME/.zshrc.local"
else
zshrc="$HOME/.zshrc"
fi
if ! grep -Fqs "$text" "$zshrc"; then
if [ "$skip_new_line" -eq 1 ]; then
printf "%s\\n" "$text" >> "$zshrc"
else
printf "\\n%s\\n" "$text" >> "$zshrc"
fi
fi
}
# shellcheck disable=SC2154
trap 'ret=$?; test $ret -ne 0 && printf "failed\n\n" >&2; exit $ret' EXIT
set -eux
if [ ! -d "$HOME/.bin/" ]; then
mkdir "$HOME/.bin"
fi
if [ ! -f "$HOME/.zshrc" ]; then
touch "$HOME/.zshrc"
fi
# Homebrew
# arm64 or x86_64
arch="$(uname -m)"
# Homebrew
if [ "$arch" = "arm64" ]; then
BREW="/opt/homebrew"
else
BREW="/usr/local"
fi
if [ ! -d "$BREW" ]; then
fancy_echo "Installing Homebrew ..."
sudo mkdir -p "$BREW"
sudo chflags norestricted "$BREW"
sudo chown -R "$LOGNAME:admin" "$BREW"
curl -L https://github.com/Homebrew/brew/tarball/master | tar xz --strip 1 -C "$BREW"
fi
export PATH="$BREW/bin:$PATH"
fancy_echo "Updating Homebrew formulae ..."
brew analytics off
brew update-reset
brew bundle --no-lock --file=- <<EOF
tap "heroku/brew"
tap "homebrew/services"
tap "thoughtbot/formulae"
tap "d12frosted/emacs-plus"
brew "asdf"
brew "coreutils"
brew "coreutils"
brew "emacs-plus"
brew "fd"
brew "fzf"
brew "gh"
brew "git"
brew "libyaml"
brew "openssl"
brew "rcm"
brew "reattach-to-user-namespace"
brew "ripgrep"
brew "shellcheck"
brew "tealdeer"
brew "the_silver_searcher"
brew "tmux"
brew "tree"
brew "universal-ctags/universal-ctags/universal-ctags", args: ["HEAD"]
brew "vim"
brew "watchman"
brew "yarn"
brew "zsh"
# Heroku
brew "heroku/brew/heroku"
brew "parity"
# Databases
brew "redis", restart_service: :changed
# Fonts
brew "font-fira-code"
brew "font-fira-code-nerd-font"
cask "gpg-suite"
EOF
brew upgrade
brew cleanup
# shellcheck disable=SC2016
append_to_zshrc 'export PATH="$HOME/.bin:$PATH"'
# zsh
update_shell() {
local shell_path;
shell_path="$(command -v zsh)"
fancy_echo "Changing your shell to zsh ..."
if ! grep "$shell_path" /etc/shells > /dev/null 2>&1 ; then
fancy_echo "Adding '$shell_path' to /etc/shells"
sudo sh -c "echo $shell_path >> /etc/shells"
fi
sudo chsh -s "$shell_path" "$USER"
}
case "$SHELL" in
*/zsh)
if [ "$(command -v zsh)" != '/usr/local/bin/zsh' ] ; then
update_shell
fi
;;
*)
update_shell
;;
esac
# Heroku Postgres
heroku plugins:install heroku-pg-extras
# asdf
export PATH="$BREW/opt/asdf/bin:$BREW/opt/asdf/shims:$PATH"
# ruby
fancy_echo "Installing Ruby ..."
if ! asdf plugin list | grep -Fq "ruby"; then
asdf plugin add ruby https://github.com/asdf-vm/asdf-ruby
fi
asdf plugin update ruby
asdf install ruby 2.7.4
asdf install ruby 3.0.2
# nodejs
fancy_echo "Installing nodejs ..."
if ! asdf plugin-list | grep -Fq "nodejs"; then
asdf plugin add nodejs https://github.com/asdf-vm/asdf-nodejs
fi
asdf plugin update nodejs
asdf install nodejs 10.24.1
asdf install nodejs 14.17.6
asdf install nodejs 16.8.0
# erlang
fancy_echo "Installing erlang ..."
if ! asdf plugin-list | grep -Fq "erlang"; then
asdf plugin add erlang https://github.com/asdf-vm/asdf-erlang
fi
asdf plugin update erlang
asdf install erlang 24.0.5
# elixir
fancy_echo "Installing elixir ..."
if ! asdf plugin-list | grep -Fq "elixir"; then
asdf plugin add elixir https://github.com/asdf-vm/asdf-elixir
fi
asdf plugin update elixir
asdf install elixir 1.12.2-otp-24
# TODO: ruby gems
# gem update --system
# number_of_cores=$(sysctl -n hw.ncpu)
# bundle config --global jobs $((number_of_cores - 1))
# if [ -f "$HOME/.laptop.local" ]; then
# fancy_echo "Running your customizations from ~/.laptop.local ..."
# # shellcheck disable=SC1090
# . "$HOME/.laptop.local"
# fi
| true
|
076eae1e2a9ffd46518ae777a6957f446a2709ee
|
Shell
|
OSBI/saiku
|
/saiku-bi-platform-plugin-p7/src/main/plugin/saiku-shareMondrian.sh
|
UTF-8
| 1,510
| 3.625
| 4
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
# This script makes saiku use the same mondrian as pentaho. Useful to share cache and resources (including CDC) on pentaho >= 4.8
#
usage (){
echo
echo "Usage: saiku-shareMondrian.sh -w pentahoWebapPath]"
echo
echo "-w Pentaho webapp server path"
echo "-h This help screen"
echo
exit 1
}
[ $# -gt 1 ] || usage
WEBAPP_PATH='PATH'
ORIGINAL_CMDS=$@
while [ $# -gt 0 ]
do
case "$1" in
--) shift; break;;
-w) WEBAPP_PATH="$2"; shift;;
--) break;;
-*|-h) usage ;;
esac
shift
done
[ $WEBAPP_PATH = 'PATH' ] && usage
if [[ ! -d $WEBAPP_PATH/WEB-INF/lib ]]
then
echo "ERROR: Supplied webapp path doesn't look like a valid web application - missing WEB-INF/lib"
exit 1
fi
# Step 1: Change plugin.spring.xml
echo -n "Changing saiku configuration.... "
cp plugin.spring.xml plugin.spring.xml.bak
sed 's/\(.*datasourceResolverClass.*\)/<!-- \1 -->/; s/\(.*saikuDatasourceProcessor.*\)/<!-- \1 -->/; s/PentahoSecurityAwareConnectionManager" init-method="init"/PentahoSecurityAwareConnectionManager"/; s/ name="connectionPooling" value="true"/ name="connectionPooling" value="false"/;' plugin.spring.xml.bak > plugin.spring.xml
echo Done
# Step 2: Delete saiku's libs
echo -n "Deleting Saiku version of mondrian.jar and related dependencies.... "
rm -f lib/mondrian* lib/mondrian.olap4j* lib/eigenbase*
# Step 3: Copy jar to WEB-INF
echo -n "Copying olap-util.jar to WEB-INF/lib .... "
mv lib/saiku*olap-util* $WEBAPP_PATH/WEB-INF/lib/
echo Done
| true
|
d22efb56d4600e517713adc7b903b9e7ca6ee839
|
Shell
|
jhu-sheridan-libraries/nutch-compose
|
/nutch1/conf/build.sh
|
UTF-8
| 427
| 2.5625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -x
source /etc/profile.d/java.sh
source /etc/profile.d/ant.sh
source /etc/profile.d/nutch.sh
# config dir is copies to runtime/config on build
ELASTICSEARCH_ENDPOINT="localhost"
ELASTICSEARCH_PORT="9200"
envsubst < /opt/conf/nutch-site.xml.tmpl > $NUTCH_HOME/conf/nutch-site.xml
cp -f /opt/conf/regex-urlfilter.txt $NUTCH_HOME/conf/
cp -f /opt/conf/protocols.txt $NUTCH_HOME/conf/
cd /opt/nutch
ant
| true
|
6aac2d974b032fb5eec4797bf0d5871840613da1
|
Shell
|
ScriptHero/action-spicy-proton-generator
|
/entrypoint.sh
|
UTF-8
| 459
| 2.6875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh -l
set -e
export branch_name="$1"
spicy_proton_name=$(ruby -rspicy-proton -rdigest/sha1 -e'
branch_name_hash = Digest::SHA1.hexdigest(ENV["branch_name"]).to_i(16)
words = Spicy::Proton.new
adjective = words.adjectives[branch_name_hash % words.adjectives.length]
noun = words.nouns[branch_name_hash % words.nouns.length]
puts "#{adjective}-#{noun}"
')
echo "::set-output name=result::${spicy_proton_name}"
| true
|
498a6e726fc3b78b75e66f297f6811b470824964
|
Shell
|
ibsd/ack
|
/bin/shell/lang/langmenu.sh
|
UTF-8
| 2,572
| 3.765625
| 4
|
[] |
no_license
|
export LANG_ROOT=$SHELL_ROOT/lang
LANG_VARS_FILE=$LANG_ROOT/vars.sh
LANG_GETS_FILE=$LANG_ROOT/gets.sh
USE_TIME=
function lang_menu()
{
echo "==================== LANG strings.xml Kit ===================="
echo -n "0.rechoose languages code [$LANG_CODE]"
if [ -n $LANG_CODE ] ; then echo " --> done!"; fi
echo -n "1.rechoose work dir [$LANG_WORK_DIR]"
if [ -n $LANG_WORK_DIR ] ; then echo " --> done!"; fi
echo "2.copy en strings.xml [$LANG_WORK_DIR/en]"
echo "3.copy $LANG_CODE strings.xml [$LANG_WORK_DIR/$LANG_CODE]"
echo "4.transform strings.xml to strings.csv [en]"
echo "5.transform strings.xml to strings.csv [$LANG_CODE]"
echo "6.gen diff [en-->$LANG_CODE]"
echo "7.remove unused module"
echo "8.merge single file"
echo "c.clean work dirs"
echo "t.use time [$USE_TIME]"
echo "q.quit"
echo -n ":"
local mtodo
read mtodo
case $mtodo in
0) echo "NOTE: languages code are en,cn,fr etc."
LANG_CODE=
lang_reload
;;
1) echo "create a new dir..."
LANG_WORK_DIR=
lang_reload
;;
2) echo "find values ..."
$USE_TIME lang_copy_strings
lang_load
;;
3) echo "find values-$LANG_CODE ..."
$USE_TIME lang_copy_strings $LANG_CODE
lang_load
;;
4) echo "transform strings.xml under $LANG_WORK_DIR/en ..."
$USE_TIME lang_strings_xml2csv en
lang_load
;;
5) echo "transform strings.xml under $LANG_WORK_DIR/$LANG_CODE ..."
$USE_TIME lang_strings_xml2csv $LANG_CODE
lang_load
;;
6) echo "gen diff in $LANG_WORK_DIR/en-$LANG_CODE"
lang_gen_diff $LANG_CODE
lang_load
;;
7) echo "remove unused in $LANG_WORK_DIR/en-$LANG_CODE"
lang_clean $LANG_CODE
lang_load
;;
8) echo "merge .csv in $LANG_WORK_DIR/en-$LANG_CODE"
lang_merge $LANG_CODE
lang_load
;;
c) echo "will remove all work dirs ..."
lang_rm_work_dir
lang_reload
;;
t) echo "use time on/off"
lang_switch_time
lang_load
;;
q) echo "Bye!"
;;
*) echo "[ERROR] Invaild input!"
lang_load
;;
esac
}
function lang_switch_time()
{
local tstate=
if [ "$USE_TIME" = "time" ] ; then
tstate='on'
fi
if [ "$USE_TIME" = "" ] ; then
tstate='off'
fi
if [ "$tstate" = "on" ] ; then
USE_TIME=
fi
if [ "$tstate" = "off" ] ; then
USE_TIME=time
fi
}
function lang_reload()
{
lang_uninit_var
lang_load
}
function lang_load()
{
source $LANG_VARS_FILE
source $LANG_GETS_FILE
lang_menu
}
lang_load
| true
|
2f482cd1d6deef616f0cd2db2e724ecd40c41c6d
|
Shell
|
PacBSD/abs
|
/devel/pac-ports/PKGBUILD
|
UTF-8
| 871
| 2.921875
| 3
|
[] |
no_license
|
#
pkgname=PacPorts
pkgver=20151231
pkgrel=1
arch=('any')
license=('GPLv2')
backup=('usr/local/etc/mkportpkg.conf')
url='http://github.com/Amzo/pac-ports'
makedepends=('git')
source=(git://github.com/Amzo/PacPorts)
md5sums=('SKIP')
install=pac-ports.install
pkgver() {
date '+%Y%m%d'
}
package() {
cd $pkgname
install -dm755 ${pkgdir}/usr/local/{etc,bin,share/pacports}
install -m755 mkportpkg ${pkgdir}/usr/local/bin
install -m644 mkportpkg.conf ${pkgdir}/usr/local/etc/
files=($(find . -type f -depth 3 -not -path "./.git/*"))
for _files in ${files[@]}; do
if [[ ! -d "${pkgdir}/usr/local/share/pacports/${_files%/*}" ]]; then
install -dm755 "${pkgdir}/usr/local/share/pacports/${_files%/*}"
else
install -m644 -o root -g wheel "${_files}" ${pkgdir}/usr/local/share/pacports/"${_files}"
fi
done
}
# vim:set sw=2 sts=2 et:
| true
|
100ae362871a3b514e940302e493111a20ca516d
|
Shell
|
dask/dask-gateway
|
/continuous_integration/docker/hadoop/files/scripts/setup-hadoop.sh
|
UTF-8
| 2,682
| 3.546875
| 4
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
set -ex
# Tweak hadoop configuration and permissions:
#
# - hadoop is unpacked with default configuration in etc/hadoop, we relocate
# that to /etc/hadoop/conf.empty.
#
mv /opt/hadoop/etc/hadoop /etc/hadoop/conf.empty
#
# - log4j.properties is a requirement to have in the hadoop configuration
# directory that we don't wan't to redefine, so we copy it from the default
# configuration to our configurations.
#
cp /etc/hadoop/conf.empty/log4j.properties /etc/hadoop/conf.simple/
cp /etc/hadoop/conf.empty/log4j.properties /etc/hadoop/conf.kerberos/
#
# - Create /opt/hadoop/logs directory with high group permissions to ensure it
# isn't created with narrow permissions later when running "hdfs namenode".
#
mkdir -p /opt/hadoop/logs
chmod g+w /opt/hadoop/logs
#
# - Create /var/tmp directory with permissions to ensure the hadoop group is
# propegated and have right to create new directories. Note that the hdfs user
# will later create /var/tmp/dfs but then get to own it even though it will be
# owned also by the hadoop group due to the 2xxx part of these permissions.
#
chown -R root:hadoop /var/tmp
chmod -R 2770 /var/tmp
#
# - Generate a key to authenticate web access during the brief time we use the
# /etc/hadoop/conf.simple configuration as part of building the docker image.
#
dd if=/dev/urandom bs=64 count=1 > /etc/hadoop/conf.simple/http-secret-file
chown root:hadoop /etc/hadoop/conf.simple/http-secret-file
chmod 440 /etc/hadoop/conf.simple/http-secret-file
#
# - Declare HDFS configuration to use temporarily, let /opt/hadoop/etc/hadoop
# point to /etc/hadoop/conf.simple.
#
alternatives --install /opt/hadoop/etc/hadoop hadoop-conf /etc/hadoop/conf.simple 50
alternatives --set hadoop-conf /etc/hadoop/conf.simple
# Initialize HDFS filesystem with content to test against
#
# 1. Delete all hdfs files and start with a clean slate.
#
sudo --preserve-env --user hdfs \
hdfs namenode -format -force
#
# 2. Add to hosts to resolve a domain name, /etc/hosts will be cleared when the
# container starts though, see https://stackoverflow.com/a/25613983. This
# container is supposed to start with "--hostname master.example.com".
#
echo "127.0.0.1 master.example.com" >> /etc/hosts
#
# 3. Start "hdfs namenode" and "hdfs datanode" but detach with "&" to continue
# doing other things.
#
sudo --preserve-env --user hdfs \
hdfs namenode &
sudo --preserve-env --user hdfs \
hdfs datanode &
#
# 4. Run a script to bootstrap the HDFS filesystem with content for testing.
#
sudo --preserve-env --user hdfs \
/scripts/init-hdfs.sh
#
# 5. Shut down started "hdfs namenode" and "hdfs datanode" processes.
#
pkill java
| true
|
fe47ce1ee309946d165606661db29b288e5b7196
|
Shell
|
padeoe/one-key-proxy
|
/my_proxy.sh
|
UTF-8
| 1,937
| 3.578125
| 4
|
[] |
no_license
|
# 读取命令行参数
if [ "$#" -ne 3 ]; then
echo "Usage: $0 DOMAIN PORT EMAIL" >&2
exit 1
fi
domain=$1
port=$2
email=$3
# 安装软件
apt-get update
apt-get install -y git apt-transport-https
# 生成ssl证书
git clone https://github.com/letsencrypt/letsencrypt /opt/letsencrypt
/opt/letsencrypt/letsencrypt-auto certonly --quiet --agree-tos --email $email --standalone -d $domain
# 安装erlang虚拟机
declare -a sources=("deb https://packages.erlang-solutions.com/ubuntu trusty contrib" "deb https://packages.erlang-solutions.com/ubuntu saucy contrib" "deb https://packages.erlang-solutions.com/ubuntu precise contrib")
for source in "${sources[@]}"
do
if grep -q "$source" /etc/apt/sources.list;then
echo $source
else
echo $source | tee --append /etc/apt/sources.list
fi
done
wget https://packages.erlang-solutions.com/ubuntu/erlang_solutions.asc
apt-key add erlang_solutions.asc
apt-get update
apt-get install -y esl-erlang
# 部署 ssl http 代理
wget https://sakura.njunova.com/file/https_proxy.tar.gz
tar -xzvf https_proxy.tar.gz
echo '#!/bin/sh
cd /root/https_proxy
erl -eval "server:start(100,1234,'$port',\"/etc/letsencrypt/live/'$domain'\")." -detached >/dev/null 2>&1'> https_proxy/start.sh
cd https_proxy
bash start.sh
# 检测是否启动成功
test_ps=`ps -ef|grep beam`
if [[ $test_ps == *"erlang"* ]]; then
echo "SSL HTTP Proxy Running now!"
else
echo "SSL HTTP Proxy Launch Failed, Exit!"
exit 1
fi
# 设置开机自动启动
https_proxy_dir=$(pwd)
script=$https_proxy_dir/start.sh
https_startup_crontab='@reboot '$USER' '$script''
if grep -q "$https_startup_crontab" /etc/crontab;then
echo "ssl http proxy has been ready for startup on boot now!"
else
echo '@reboot '$USER' '$script'' | tee --append /etc/crontab
echo "ssl http proxy can startup on boot now!"
fi
# 输出配置
echo 'All done! User list(username:password):'
cat auth
echo ""
| true
|
dbb7132694ed8579f1e1ad4b6a355c7866dd331c
|
Shell
|
sandyherho/bash-script_pribadi
|
/cek_root.sh
|
UTF-8
| 441
| 3.09375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Memastikan bahwa user yang mengeksekusi script ini root user atau bukan.
# Menampilkan UID
echo "UID kamu adalah ${UID}." # UID root selalu 0
# Menampilkan apakah user ini root user atau bukan
if [[ "${UID}" -eq 0 ]] # -lt lebih kecil, -gt lebih besar, -ne tidak sama
#perhatikan spasi 1 kali habis tanda kurung siku kedua
then
echo "Anda adalah root."
else
echo "Anda bukan root."
fi # menutup pernyataan if
| true
|
97cd30643f948091107a3b00fc4f57f45f4d539f
|
Shell
|
n8felton/DeployStudio
|
/DeployStudio Admin.app/Contents/Frameworks/DSCore.framework/Versions/A/Resources/Scripts/ds_partition.sh
|
UTF-8
| 4,089
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/sh
echo "ds_partition.sh - v2.2 ("`date`")"
ARCH=`arch`
BOOT_DEVICE=`diskutil info / | grep "Device Node:" | sed s/Device\ Node://g | sed s/\ *//`
if [ "_${BOOT_DEVICE}" = "_" ]
then
echo "RuntimeAbortWorkflow: cannot get boot device"
echo "ds_partition.sh - end"
exit 1
fi
SLICE_INDEX=`expr ${#BOOT_DEVICE} - 1`
while [ ${SLICE_INDEX} -ge 10 ];
do
if [ "${BOOT_DEVICE:${SLICE_INDEX}:1}" = "s" ]
then
BOOT_DEVICE=${BOOT_DEVICE:0:${SLICE_INDEX}}
break
fi
SLICE_INDEX=`expr ${SLICE_INDEX} - 1`
done
echo "Boot device: "${BOOT_DEVICE}
DISK_NB=0
DEVICE_FOUND=0
# The internal disk is usualy associated to the device /dev/disk0.
# The following loop looks for the first ATA/SATA/SAS drive available (different from the boot drive).
# Change "-ATA-SATA-SAS-" to "-FireWire-Thunderbolt-" when restoring in Target Mode or with external firewire drives.
SUPPORTED_PROTOCOLS="-ATA-SATA-SAS-"
while [ ${DISK_NB} -le 20 ];
do
TARGET_DEVICE=/dev/disk${DISK_NB}
echo "Testing device: "${TARGET_DEVICE}
if [ "_${BOOT_DEVICE}" = "_${TARGET_DEVICE}" ]
then
echo " -> boot drive"
else
PROTOCOL=`diskutil info ${TARGET_DEVICE} | grep "Protocol:" | sed s/Protocol://g | sed s/\ *//`
if [ ! "_"`echo ${SUPPORTED_PROTOCOLS} | sed s/"-${PROTOCOL}-"//g` = "_${SUPPORTED_PROTOCOLS}" ]
then
RAID_MEMBER=`diskutil list ${TARGET_DEVICE} | grep "Apple_RAID"`
if [ -z "${RAID_MEMBER}" ]
then
DEVICE_FOUND=1
break
else
echo " -> RAID set member"
fi
else
echo " -> non ${SUPPORTED_PROTOCOLS} drive (protocol=${PROTOCOL})"
fi
fi
DISK_NB=`expr ${DISK_NB} + 1`
done
if [ "_${DEVICE_FOUND}" = "_0" ]
then
echo "RuntimeAbortWorkflow: no internal drive available found"
echo "ds_partition.sh - end"
exit 1
fi
# Display the final target device
echo "Target device: "${TARGET_DEVICE}
# Find out the disk size
DISK_SIZE_INFO=`diskutil info "${TARGET_DEVICE}" | grep "Total Size:" | sed s/Total\ Size://g`
DISK_SIZE=`echo ${DISK_SIZE_INFO} | awk '{print $1}' | sed -e 's/\..//'`
DISK_SIZE_UNIT=`echo ${DISK_SIZE_INFO} | awk '{print $2}'`
if [ "${DISK_SIZE_UNIT}" = "MB" ]
then
DISK_SIZE_IN_BYTES=`expr \( ${DISK_SIZE} + 1 \) \* 1048576`
elif [ "${DISK_SIZE_UNIT}" = "GB" ]
then
DISK_SIZE_IN_BYTES=`expr \( ${DISK_SIZE} + 1 \) \* 1048576 \* 1024`
elif [ "${DISK_SIZE_UNIT}" = "TB" ]
then
DISK_SIZE_IN_BYTES=`expr \( ${DISK_SIZE} + 1 \) \* 1048576 \* 1048576`
fi
echo "Disk size: "${DISK_SIZE_IN_BYTES}" bytes"
# Compute the partitions size
PARTITIONS_COUNT=2
P1_SIZE=`expr ${DISK_SIZE_IN_BYTES} \* 30 / 100`
P1_NAME="System"
P1_FORMAT="Journaled HFS+"
P2_SIZE=`expr ${DISK_SIZE_IN_BYTES} - ${P1_SIZE}`
P2_NAME="Data"
P2_FORMAT="Journaled HFS+"
echo "${P1_NAME} volume size set to: ${P1_SIZE} bytes"
echo "${P2_NAME} volume size set to: ${P2_SIZE} bytes"
echo "Total: "`expr ${P1_SIZE} + ${P2_SIZE}`" bytes"
# Partition the device
echo "Unmounting device "${TARGET_DEVICE}
OUTPUT=`diskutil unmountDisk force "${TARGET_DEVICE}" 2>&1`
if [ ${?} -ne 0 ] || [[ ! "${OUTPUT}" =~ "successful" ]]
then
echo "RuntimeAbortWorkflow: cannot unmount the device ${TARGET_DEVICE}"
echo "ds_partition.sh - end"
exit 1
fi
if [ "${ARCH}" = "i386" ]
then
PartitionMapOption=GPTFormat
else
PartitionMapOption=APMFormat
fi
echo "Partitioning disk "${TARGET_DEVICE}
diskutil partitionDisk $TARGET_DEVICE $PARTITIONS_COUNT ${PartitionMapOption} \
${P1_FORMAT} "${P1_NAME}" ${P1_SIZE}B \
${P2_FORMAT} "${P2_NAME}" ${P2_SIZE}B 2>&1
if [ ${?} -ne 0 ]
then
echo "RuntimeAbortWorkflow: cannot partition the device ${TARGET_DEVICE}"
echo "ds_partition.sh - end"
exit 1
fi
echo "Mounting all volumes of device "${TARGET_DEVICE}
diskutil mountDisk ${TARGET_DEVICE} 2>&1
if [ ${?} -ne 0 ]
then
echo "RuntimeAbortWorkflow: cannot mount the device ${TARGET_DEVICE}"
echo "ds_partition.sh - end"
exit 1
fi
echo "Give write access to all users on volume "${P2_NAME}
chown root:admin "/Volumes/${P2_NAME}" 2>&1
chmod 777 "/Volumes/${P2_NAME}" 2>&1
echo "ds_partition.sh - end"
exit 0
| true
|
d1b67c2848ecfb996417df179621f0a856589f2e
|
Shell
|
joek295/scripts
|
/dmenu/dbc
|
UTF-8
| 753
| 3.703125
| 4
|
[
"Unlicense"
] |
permissive
|
#!/bin/sh
# calculator based on bc and dmenu
# Based on djp's script on the Arch forums. Made sh compatible, and
# uses bc instead of python for greater power.
if [ -f "$HOME/.config/dmenurc" ]; then
. $HOME/.config/dmenurc
else
DMENU="dmenu -i -l 20"
fi
hist="$(xsel -o)"
(echo "$hist" | bc -l 2>/dev/null) || hist=False
calc=$(echo $hist | $DMENU -l 20 -p Calculate:)
prev="$calc"
while true; do
case $calc in
""|"exit")
break
;;
*)
prev="$calc"
hist="$prev\n$hist"
calc=$(echo "$calc; print \"\n$hist\"" | bc -l 2>/dev/null\
| $DMENU -l 20 -p "Calculate:")
;;
esac
done
echo "$prev" | bc -l 2>/dev/null | xsel -i
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.