blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
4331ae3273b7152fc463086391d5b9654077694c
|
Shell
|
sfoolish/2000-3000-hours
|
/4_Shell/2_tools/squid_install.sh
|
UTF-8
| 1,741
| 2.8125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
function squid3_package_install() {
apt-get update
apt-get install -y squid3
}
function squid3_config() {
if [[ ! -f /etc/squid3/squid.conf_bkp ]]; then
mv /etc/squid3/squid.conf /etc/squid3/squid.conf_bkp
fi
cat > /etc/squid3/squid.conf << EOF
acl all src 0.0.0.0/0.0.0.0
acl SSL_ports port 443
acl Safe_ports port 80 # http
acl Safe_ports port 21 # ftp
acl Safe_ports port 443 # https
acl Safe_ports port 70 # gopher
acl Safe_ports port 210 # wais
acl Safe_ports port 1025-65535 # unregistered ports
acl Safe_ports port 280 # http-mgmt
acl Safe_ports port 488 # gss-http
acl Safe_ports port 591 # filemaker
acl Safe_ports port 777 # multiling http
acl CONNECT method CONNECT
acl wan dst 0.0.0.0/0.0.0.0
http_access wan
http_access deny !Safe_ports
http_access deny CONNECT !SSL_ports
http_access allow localhost manager
http_access deny manager
http_access allow localhost
http_access allow all
http_port 3128
coredump_dir /var/spool/squid3
refresh_pattern ^ftp: 1440 20% 10080
refresh_pattern ^gopher: 1440 0% 1440
refresh_pattern -i (/cgi-bin/|\?) 0 0% 0
refresh_pattern (Release|Packages(.gz)*)$ 0 20% 2880
refresh_pattern . 0 20% 4320
EOF
}
function squid3_service_start() {
service squid3 restart
ps aux | grep squid3 | grep -v grep > /dev/null
if [ $? != 0 ]; then
/usr/sbin/squid3 -N -YC -f /etc/squid3/squid.conf > /dev/null 2>&1 &
fi
}
function main() {
squid3_package_install
squid3_config
squid3_service_start
}
main
| true
|
7de3e30532f75cd7b061241ed0a77575a6cfc5d7
|
Shell
|
aptira/cloudify-manager-install
|
/packaging/install.sh
|
UTF-8
| 538
| 2.8125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
echo "
###########################################################################
Cloudify installer is ready!
To install Cloudify Manager, run:
cfy_manager install --private-ip <PRIVATE_IP> --public-ip <PUBLIC_IP>
(Use cfy_manager -h for a full list of options)
You can specify more installation settings in /etc/cloudify/config.yaml. If you
specify the public and private IP addresses in the config.yaml file, run:
cfy_manager install
###########################################################################
"
| true
|
0da5dd46eb753587da7f7ad70ebfe7a84b909f6f
|
Shell
|
ShuaiZhao/SDN-BenchMark-Shuai
|
/ryu/app/SDN-BenchMarkProject/scripts/SDN_benchmark_switch_remote_ops/remote_delete_ovs_flows.sh
|
UTF-8
| 469
| 2.8125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
USERNAME=szb53
Switches="128.104.159.147 128.104.159.148 128.104.159.149".
OVS_BR=ofpbr
CMD_Delete_Flow="hostname; sudo ovs-ofctl -O Openflow13 del-flows ${OVS_BR}"
echo "Remove ICMP_LOG"
rm ./network-data2/ofp_icmp_log.db
echo "Remove IPERF_LOG"
rm ./network-data2/ofp_iperf_log.db
echo "Remove ICMP REROUTE"
rm ./network-data2/ofp_icmp_reroute_log.db
for ovs in ${Switches} ; do
ssh ${ovs} "${CMD_Delete_Flow}"
echo "Deleting flows"
echo -e "\n"
done
| true
|
b659f128a509104ae0c58007285f0e22815f0e16
|
Shell
|
rahulyesantharao/ryunzip
|
/scripts/testfile.sh
|
UTF-8
| 1,405
| 4.34375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
function cleanup { # filename, passed
if [ $2 -ne 0 ]; then # failed
rm -f "$1.txt"
mv "$1.1.txt" "$1.txt"
else # passed
mv "$1.1.txt" "passed/$1.txt"
rm "$1.txt"
fi
rm "$1.txt.gz"
exit $2
}
# Parameters Parsing
filename=""
verbose=0
if [ $# -ne 1 ] && [ $# -ne 2 ]; then # check number of inputs
echo "usage: testfile.sh [-v] <test name>"
exit 1
fi
# parse out parameters
if [ $# -eq 1 ]; then # just filename
filename=$1
else # flag and filename
if [ "$1" != "-v" ]; then # check flag
echo -e "invalid flag: $1\nusage: testfile.sh [-v] <testname>"
exit 1
fi
filename=$2
verbose=1
fi
# make sure file exists
if [ ! -f "tests/$filename.txt" ]; then
echo "tests/$filename.txt does not exist"
exit 1
fi
# switch to the tests directory
cd tests/
# make a copy of the original
cp "$filename.txt" "$filename.1.txt"
# zip the original
gzip "$filename.txt"
# unzip the original
echo "Unzipping $filename.txt.gz:"
if [ $verbose -eq 0 ]; then
../ryunzip "$filename.txt.gz"
else
../ryunzip -v "$filename.txt.gz"
fi
if [ $? -ne 0 ]; then # ryunzip failed
echo "Unzip Failed"
cleanup "$filename" 1
fi
echo "Unzip Succeeded!"
# check the differences
echo -e "\nDifferences:"
diff "$filename.txt" "$filename.1.txt"
if [ $? -ne 0 ]; then # differences exist
cleanup "$filename" 1
fi
# everything passed!
echo "None!"
cleanup "$filename" 0
| true
|
bbb1182c609c53e7cbe4c52195ec6342f95e7a6a
|
Shell
|
Payero/PhotoMerger
|
/bin/photomerger
|
UTF-8
| 524
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
# Getting the directory where is running
PRG="$0"
progname=`basename "$0"`
saveddir=`pwd`
# need this for relative symlinks
dirname_prg=`dirname "$PRG"`
cd $dirname_prg
while [ -h "$PRG" ]; do
ls=`ls -ld "$PRG"`
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '.*/.*' > /dev/null; then
PRG="$link"
else
PRG=`dirname "$PRG"`"/$link"
fi
done
PM_HOME=`dirname "$PRG"`/..
cd $saveddir
#make it fully qualified
PM_HOME=`cd "$PM_HOME" && pwd`
java -jar ${PM_HOME}/lib/photo-merger.jar
| true
|
41fd782e133b90272f33a1a7099a50de48905e2a
|
Shell
|
tvkent/Grandiflora_recombination
|
/Scripts/vcf_to_ldhat.sh
|
UTF-8
| 523
| 2.609375
| 3
|
[
"CC0-1.0"
] |
permissive
|
#!/bin/bash
#Convert VCF file to LDhat format
#Tyler Kent
#October 27, 2015
#run this from base
set -e
set -u
id=scaf1_sub12.vcf.recode
chr=scaffold_1
data=./Grandiflora_recombination/Data/${id}.vcf.gz
err=./Grandiflora_recombination/Results/Err/${id}_vcf_to_ldhat.err
out=./Grandiflora_recombination/Results/${id}.ldhat
temp=./Grandiflora_recombination/Results/temp
vcfdir=/data/aplatts/data/apps/align/vcftools-0.1.14/bin/vcftools
${vcfdir} --gzvcf ${data} --ldhat-geno --chr ${chr} --temp ${temp} --out ${out} 2> ${err}
| true
|
9f6fe1ca65f93581e1038cf848c4092b5759bd73
|
Shell
|
aws/aws-ec2-instance-connect-config
|
/bin/integration-test/run_instance.sh
|
UTF-8
| 3,274
| 3.921875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
# Reads authorized keys blob $3 and prints verified, unexpired keys
# Openssl to use provided as $1
# Signer public key file path provided as $2
# Attempts to launch an instance to the given specification
# Outputs instance ID on success or where it failed otherwise
while getopts ":t:r:a:k:s:g:n:o:p:" opt ; do
case "${opt}" in
t)
instance_type="${OPTARG}"
;;
r)
region="${OPTARG}"
;;
a)
ami_id="${OPTARG}"
;;
k)
key_name="${OPTARG}"
;;
s)
subnet_id="${OPTARG}"
;;
g)
security_group_id="${OPTARG}"
;;
n)
name_tag="${OPTARG}"
;;
o)
osuser="${OPTARG}"
;;
p)
private_key="${OPTARG}"
;;
*)
echo "Usage: $0 -t instance-type -r aws-region -a ami -k ec2-key-pair -s subnet -g security-group -n name-tag-value"
exit 1
;;
esac
done
launch_output=$(aws ec2 run-instances --region "${region}" --image-id "${ami_id}" --key-name "${key_name}" --security-group-ids "${security_group_id}" --subnet-id "${subnet_id}" --instance-initiated-shutdown-behavior "terminate" --instance-type "${instance_type}" --tag-specifications "[{\"ResourceType\":\"instance\",\"Tags\":[{\"Key\":\"Name\",\"Value\":\"${name_tag}\"}]}]")
launch_code=$?
if [ "${launch_code}" -ne 0 ] ; then
echo "Instance launch failed!"
exit "${launch_code}"
fi
instance_id=$(echo "${launch_output}" | grep \"InstanceId\" | cut -d '"' -f 4)
running=0
try="0"
# Wait up to 5 minutes for the instance to come up, checking every 5 seconds
while [ $try -lt 60 ] ; do
aws ec2 describe-instances --instance-ids "${instance_id}" | grep "Name" | grep -q "running"
launch_code=$?
if [ "${launch_code}" -eq 0 ] ; then
try="60"
running=1
else
try=$((try+1))
sleep 5
fi
done
if [ $running -eq 0 ] ; then
echo "Timed out waiting for instance to enter 'running' state"
exit 1
fi
# Wait a bit extra to let sshd come up
ssh_try="0"
public_ip=$(aws ec2 describe-instances --instance-ids "${instance_id}" | grep "PublicIp" | cut -d '"' -f 4 | uniq)
while [ $ssh_try -lt 30 ] ; do
ssh -q -i "${private_key}" -o StrictHostKeyChecking=no "${osuser}@${public_ip}" exit 2>&1
launch_code="${?}"
if [ "${launch_code}" -eq 0 ] ; then
# Everything's ready
echo "${instance_id}"
exit 0
fi
ssh_try=$((ssh_try+1))
sleep 10
done
echo "Timed out waiting for sshd to start on instance (or keypair is misconfigured)"
exit 1
| true
|
78277555bd0cbc57728c740654eba2dc87eddd33
|
Shell
|
dt-demos/keptn-docker-tasks
|
/build.sh
|
UTF-8
| 365
| 3.296875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
image=$1
tag=$(cat version)
if [ -z $image ]; then
echo "ABORTING: Image is a required argument"
echo "example usage: ./build.sh dtdemos/keptn-docker-tasks"
exit 1
fi
echo "==============================================="
echo "build $image:$tag"
echo "==============================================="
docker build --force-rm -t $image:$tag .
| true
|
4466cbb367f6aec826105fb4cf82a8eddba1d582
|
Shell
|
Stepdan/ImageVis3D
|
/Scripts/nightly.sh
|
UTF-8
| 1,772
| 3.28125
| 3
|
[
"MIT",
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
source Scripts/util.sh
spec="linux-g++"
if test `uname` = "Darwin" ; then
spec="macx-g++"
fi
vcs_update
version
revision
if test "x$1" != "x--dirty" ; then
make clean &>/dev/null
# manual clean, just in case Qt's clean isn't good enough (it isn't.)
find . \( -iname \*.o -or -iname moc_\*.cpp -or -iname ui_\*.h \) -delete
fi
rm -fr Build/ImageVis3D.app
rm -f Build/ImageVis3D warnings
# Find qmake -- expect it in PATH, but the user can set QT_BIN to pick a
# specific Qt.
if test -n "${QT_BIN}" -a -x "${QT_BIN}/qmake" ; then
qmake="${QT_BIN}/qmake"
echo "QT_BIN set; using ${qmake} instead of `which qmake`"
else
qmake="qmake"
fi
# use qmake to generate makefiles, potentially in debug mode.
D_TUVOK="-DTUVOK_SVN_VERSION=${R_TUVOK}"
D_IV3D="-DIV3D_SVN_VERSION=${R_IMAGEVIS3D}"
CF="-fno-strict-aliasing ${D_TUVOK} ${D_IV3D} -U_DEBUG -DNDEBUG"
CFG="release"
if test "x$1" = "x-debug"; then
CF="${CF} -Wextra -D_GLIBCXX_DEBUG -D_DEBUG -UNDEBUG -g"
CFG="debug"
fi
${qmake} \
QMAKE_CONFIG=${CFG} \
QMAKE_CFLAGS="${CF}" \
QMAKE_CXXFLAGS+="${CF}" \
QMAKE_LFLAGS="${CF} ${LDF} ${LDFLAGS}" \
-spec ${spec} \
-recursive
if test $? -ne 0 ; then
die "qmake failed."
fi
make -j3 2> warnings
try make
tarball=$(nm_tarball)
zipfile=$(nm_zipfile)
if test `uname` = "Darwin" ; then
echo "Building app file ..."
try bash Scripts/mk_app.sh
elif test `uname` = "Linux" ; then
echo "Packaging ..."
try bash Scripts/mk_tarball.sh
fi
rm -f latest
echo "${IV3D_MAJOR}.${IV3D_MINOR}.${IV3D_PATCH}" > latest
echo "${R_IMAGEVIS3D}" >> latest
echo "${TUVOK_MAJOR}.${TUVOK_MINOR}.${TUVOK_PATCH}" >> latest
echo "${R_TUVOK}" >> latest
if test -f warnings ; then
echo "Warnings:"
cat warnings
fi
| true
|
f89e927356698f0d3b12ce9cdee1623fed554163
|
Shell
|
pump-io/pump.io
|
/util/migrate-uploaddir.sh
|
UTF-8
| 2,538
| 4.21875
| 4
|
[
"Apache-2.0",
"OFL-1.1",
"CC-BY-4.0"
] |
permissive
|
#!/bin/bash
function run() {
umask 077
# Check for a sed with \?
if ! [ $(echo abc | sed 's/b\?c//') = a ]; then
echo $0: your sed does not support '\?' 1>&2
exit 1
fi
# Sanity check the provided path
if [ -z ${1+x} ]; then
echo Please specify the location of your pump.io.json 1>&2
exit 1
elif [ -e "$1" ]; then
echo Using $1 as the location of \`pump.io.json\`.
JSONFILE="$1"
else
echo $0: $1: No such file or directory 1>&2
exit 1
fi
# Check for jq
warn_jq_install() {
echo \`jq\` was automatically installed\; you may want to remove it with APT 1>&2
}
if ! type jq > /dev/null; then
if [ -e /etc/os-release ]; then
if [ $ID = 'debian' ] || [ $ID_LIKE = 'debian' ]; then
echo Automatically installing dependency \`jq\`.
trap warn_jq_install EXIT
apt install jq
else
echo $0: \`jq\` not available and unable to automatically install it 1>&2
exit 1
fi
else
echo $0: \`jq\` not available and unable to automatically install it 1>&2
exit 1
fi
fi
# Check for `datadir` and `enableUploads` already being there
# Bug: this will presumably fail if either of these are explicitly set to null, but that's such an edge case, who cares
for i in datadir enableUploads; do
if ! [ $(jq '.'$i $JSONFILE) = null ]; then
echo $0: $JSONFILE: \`$i\` key already present 1>&2
exit 1
fi
done
# Make sure there's an `uploaddir` option
UPLOADDIR="$(jq -r '.uploaddir' "$JSONFILE")"
if [ $UPLOADDIR = null ]; then
echo $0: $JSONFILE: no \`uploaddir\` key \(did you already migrate?\) 1>&2
exit 1
else
echo Found \`uploaddir\` set to $UPLOADDIR.
fi
# Make a backup
if ! [ -e $JSONFILE.pre-datadir ]; then
cp $JSONFILE{,.pre-datadir}
else
echo $0: refusing to overwrite backup file $JSONFILE.pre-datadir
exit 1
fi
# Create the new file and move things into place
TMPFILE=$(mktemp)
if [ -z "$(echo $UPLOADDIR | grep 'uploads/\?$')" ]; then
# `uploaddir` does _not_ end in /uploads
TMPDIR=$(mktemp -d)
mv $UPLOADDIR/* $TMPDIR
mkdir $UPLOADDIR/uploads
mv $TMPDIR/* $UPLOADDIR/uploads
rmdir $TMPDIR
# Adjust the config to match the move we just did
jq '.datadir = .uploaddir' $JSONFILE | jq 'del(.uploaddir)' | jq '.enableUploads = true' > $TMPFILE
else
# `uploaddir` ends in /uploads
DATADIR=$(echo $UPLOADDIR | sed "s;/uploads/\?;;")
echo $DATADIR
jq '.datadir = '\"$DATADIR\" $JSONFILE | jq 'del(.uploaddir)' | jq '.enableUploads = true' > $TMPFILE
fi
mv $TMPFILE $JSONFILE
}
run
| true
|
263b22dbc6eb34cc263f940efb096c865fae26c7
|
Shell
|
srirambtechit/linux
|
/shell-scripting-basics/concepts/smenu.sh
|
UTF-8
| 360
| 3.25
| 3
|
[] |
no_license
|
#!/bin/sh
dialog --backtitle "Linux Shell Script Tutorial " --title "Main\
Menu" --menu "Move using [UP] [DOWN], [Enter] to Select" 15 50 3
Date/Time "Shows Date and Time" Calendar "To see calendar" Editor "To start vi editor" 2> /tmp/menuitem.$$
menuitem=`cat /tmp/menuitem.$$`
opt=$?
case $menuitem in
Date/time) date;;
Calendar) cal;;
Editor) vi;;
esac
| true
|
7a53e3c66cedc8e69bfe618d8614421202d754ff
|
Shell
|
Slysidus/plugins-archive
|
/package.sh
|
UTF-8
| 295
| 2.78125
| 3
|
[] |
no_license
|
# Update the Core dependency
(cd ../Core && mvn deploy -DskipTests)
# Package and deploy to local repo if distributionManagement is configured
command=$([[ -n "$(xpath -q -e project/distributionManagement/ pom.xml)" ]] && echo "deploy" || echo "package")
mvn -Duser.name="Lightning" "$command"
| true
|
44df138b3c82225af855593223b523d492a0c8ec
|
Shell
|
nonameffh/docker-image
|
/php/7.4/bin/phpctl.install
|
UTF-8
| 1,119
| 2.828125
| 3
|
[] |
no_license
|
#!/usr/bin/env sh
set -xe
export CFLAGS="$PHP_CFLAGS" \
CPPFLAGS="$PHP_CPPFLAGS" \
LDFLAGS="$PHP_LDFLAGS"
gnu_arch="$(dpkg-architecture --query DEB_BUILD_GNU_TYPE)"
## download, verify and extract sources
phpctl.source download && phpctl.source verify && phpctl.source extract
## build
cd /usr/src/php \
&& ./configure \
--build="$gnu_arch" \
--with-config-file-path="$PHP_INI_DIR/" \
--with-config-file-scan-dir="$PHP_INI_DIR/conf.d" \
$PHP_INSTALL_ARGUMENTS \
$PHP_EXTRA_INSTALL_ARGUMENTS \
&& make -j "$(nproc)" \
&& make install \
&& { find /usr/local/bin /usr/local/sbin -type f -perm +0111 -exec strip --strip-all '{}' + || true; } \
&& make clean \
&& phpctl.source delete \
&& php_deps="$( \
scanelf --needed --nobanner --format '%n#p' --recursive /usr/local \
| tr ',' '\n' \
| sort -u \
| awk 'system("[ -e /usr/local/lib/" $1 " ]") == 0 { next } { print "so:" $1 }' \
)" \
&& apk add --no-cache --virtual .php-deps $php_deps \
&& pecl update-channels \
&& rm -rf /tmp/pear ~/.pearrc
| true
|
21a4448d552af893f81e96e9c5dfcdc698fe1dd4
|
Shell
|
amandasaurus/camarabuntu
|
/bin/extract-cd-contents.sh
|
UTF-8
| 974
| 4.25
| 4
|
[] |
no_license
|
#! /bin/bash
ISO=$1
DIR_NAME=$2
function help {
echo "USAGE: $0 <path of .iso> <path to dir to extract to>"
exit
}
case $1 in
"-h"|"--help")
help
esac
if [ ! "$ISO" -o ! "$DIR_NAME" ] ; then
help
fi
function cleanup {
local status=$1
sudo umount "${TMP_DIR}"
rmdir "${TMP_DIR}"
exit $status
}
trap cleanup SIGINT SIGTERM
if [ ! -d "${DIR_NAME}" ] ; then
mkdir -p "${DIR_NAME}" || exit $?
else
rm -rf "${DIR_NAME}"
fi
TMP_DIR=$(mktemp -t -d cd-image-XXXXXX)
[ "$TMP_DIR" ] || exit 1
echo "Mounting the CD image now, you may need to enter your root password "
sudo mount -o loop "${ISO}" "${TMP_DIR}" || exit $?
echo " ... Done"
echo -n "CD mounted, starting file copy "
cp -rT "${TMP_DIR}" "${DIR_NAME}" || cleanup $?
echo " ... Done"
echo -n "Updating permissions on cd image "
find "${DIR_NAME}" -exec chmod +w '{}' ';' # why not just 'chmod -R +w' ?
echo " ... Done"
echo "All Done"
cleanup
| true
|
05c6126be309797b51e3652f43dfeea7d6dafa6a
|
Shell
|
HubBucket-Team/mongo-c-driver
|
/.evergreen/abi-compliance-check.sh
|
UTF-8
| 1,812
| 3.609375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -o xtrace # Write all commands first to stderr
set -o errexit
# create all needed directories
mkdir abi-compliance
mkdir abi-compliance/changes-install
mkdir abi-compliance/latest-release-install
mkdir abi-compliance/dumps
# build the current changes
export SKIP_TESTS=ON
export EXTRA_CONFIGURE_FLAGS="-DCMAKE_INSTALL_PREFIX=./abi-compliance/changes-install -DCMAKE_C_FLAGS=-g -Og"
python build/calc_release_version.py > VERSION_CURRENT
python build/calc_release_version.py -p > VERSION_RELEASED
sh .evergreen/compile.sh
make install
# checkout the newest release
newest=`cat VERSION_RELEASED`
current=`cat VERSION_CURRENT`
git checkout tags/$newest -f
# build the newest release
export SKIP_TESTS=ON
export EXTRA_CONFIGURE_FLAGS="-DCMAKE_INSTALL_PREFIX=./abi-compliance/latest-release-install -DCMAKE_C_FLAGS=-g -Og"
sh .evergreen/compile.sh
make install
cd abi-compliance
old_xml="<version>$newest</version>\n"
old_xml="${old_xml}<headers>\n"
old_xml="${old_xml}$(pwd)/latest-release-install/include/libmongoc-1.0/mongoc/mongoc.h\n"
old_xml="${old_xml}$(pwd)/latest-release-install/include/libbson-1.0/bson/bson.h\n"
old_xml="${old_xml}</headers>\n"
old_xml="${old_xml}<libs>$(pwd)/latest-release-install/lib</libs>"
printf $old_xml > old.xml
new_xml="<version>$current</version>\n"
new_xml="${new_xml}<headers>\n"
new_xml="${new_xml}$(pwd)/changes-install/include/libmongoc-1.0/mongoc/mongoc.h\n"
new_xml="${new_xml}$(pwd)/changes-install/include/libbson-1.0/bson/bson.h\n"
new_xml="${new_xml}</headers>\n"
new_xml="${new_xml}<libs>$(pwd)/changes-install/lib</libs>"
printf $new_xml > new.xml
# check for abi compliance. Generates HTML Reports
abi-compliance-checker -lib mongo-c-driver -old old.xml -new new.xml || result=$?
if [ -n "$result" ]; then
touch ./abi-error.txt
fi
| true
|
efbbc670ad689680273551862664c2bc3765ba3c
|
Shell
|
rostonn/ph
|
/startup.sh
|
UTF-8
| 180
| 2.890625
| 3
|
[] |
no_license
|
SERIAL="$(cat /proc/cpuinfo | grep Serial | cut -d ':' -f 2)"
SERIAL="module.exports='$SERIAL';"
#${string//substring/replacement}
SERIAL="${SERIAL// }"
echo $SERIAL > serial.js
| true
|
522ccdc2852bf7de184d532cbf73e2b1d9bc8dbe
|
Shell
|
jcantrill/service-example
|
/.s2i/bin/assemble
|
UTF-8
| 194
| 2.578125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -x
APP_DIR=/opt/app-root/src
tmp=`mktemp -d`
git clone --depth 1 https://github.com/jcantrill/service-example.git $tmp
cp -r $tmp/* $APP_DIR
rm -rf $tmp
cd $APP_DIR
npm install
| true
|
e9094192d8c0bd7a0287ccae248869cf35b0ea19
|
Shell
|
Postwork/Integration
|
/scripts/script_master.sh
|
UTF-8
| 2,927
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash
source /var/www/postwork/postwork.itinet.fr/scripts/source.sh
while :
do
echo "Bienvenue Sur le script master de Postwork.
1: Tchat
2: Base de données
3: FQDN
4: Mail
5: Qrcode
6: Vhost
7: Ajouter un utilisateur avec toutes les options
8: Ajouter un site à un utilisateur
9: Creer un utilisateur
0: Sortir "
cmd=$PPID
read cmd
case $cmd in
0 )
break
;;
1 )
read -p "Taper 1 pour creer 2 pour supprimer " var1
read -p "Nom d'utilisateur: " var2
if [[ $var1 = 1 ]]; then
read -s -p "Mot de passe: " var3
fi
script_chat.sh $var1 $var2 $var3
;;
2)
read -p "Tapez 1 pour creer 2 pour supprimer " var1
read -p "Nom d'utilisateur: " var2
if [[ $var1 = 1 ]]; then
read -s -p "Mot de passe: " var3
fi
read -p "Nom de la base à créer: " var4
script_bdd.sh $var1 $var2 $var3 $var4
;;
3)
read -p "Tapez 1 pour creer 2 pour supprimer " var1
read -p "Nom d'utilisateur (ou nom de machine): " var2
read -p "Ip Public (facultatif): " var3
script_fqdn.sh $var1 $var2 $var3
;;
4)
read -p "Tapez 1 pour creer 2 pour supprimer " var1
read -p "Nom d'utilisateur: " var2
if [[ $var1 = 1 ]]; then
read -s -p "Mot de passe: " var3
fi
script_mail.sh $var1 $var2 $var3
;;
5)
read -p "Tapez ou stocker le QRcode 1 pour documentroot, 2 pour envoi par mail " var1
read -p "Nom d'utilisateur: " var2
read -p "Nom de Machine: " var3
script_qrcode.sh $var1 $var2 $var3
;;
6)
read -p "Tapez 1 pour creer, 2 pour supprimer, 3 pour activer, 4 pour desactiver " var1
read -p "Nom de machine: " var2
if [[ $var1 = 1 ]]; then
read -p "Documentroot: " var3
fi
script_vhost.sh $var1 $var2 $var3
;;
7)
read -p "Tapez 1 pour creer, 2 pour supprimer " var1
read -p "Identifiant/nom de machine: " var2
if [[ $var1 = 1 ]]; then
read -s -p "Mot de passe: " var3
fi
script_pwuser.sh $var1 $var2 $var3
;;
8)
read -p "Tapez 1 pour creer, 2 pour supprimer" var1
read -p "Nom d'utilisateur: " var2
if [[ $var1 = 1 ]]; then
read -p "Nom de machine: " var3
read -p "Documentroot: " var4
read -p "IP publique (optionnelle): " var5
fi
script_pwhost.sh $var1 $var2 $var3 $var4 $var5
;;
9)
read -p "Tapez 1 pour creer, 2 pour supprimer " var1
read -p "Identifiant/nom de machine: " var2
if [[ $var1 = 1 ]]; then
read -s -p "Mot de passe:" var3
fi
script_user.sh $var1 $var2
;;
esac
done
| true
|
24ab0cf7e8b90a8f8fdfd85fb44bf320410b218f
|
Shell
|
mjaglan/docker-kafka-zookeeper
|
/third-party/scripts/yahoo-kafka-manager.sh
|
UTF-8
| 558
| 3.1875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# check $ZK_HOSTS variable
echo "ZK_HOSTS=$ZK_HOSTS"
# got to git repository
cd $KAFKA_MANAGER_SERVICE
# run the service
nohup ./bin/kafka-manager -Dapplication.home=$(pwd) -Dconfig.file=conf/application.conf -Dhttp.port=9000 > kafka-manager-service.log 2>&1 &
echo "PID: $!"
# wait for few seconds until "logs/application.log" is generated
sleep 5s
# List all JAVA PID - helps you cross-check if kafka-manager is still running.
jps -lm
# List PID of all running processes - helps you cross-check if kafka-manager is still running.
ps -ef
| true
|
9be5d524dae08734dc551b68d25aa9b76da18229
|
Shell
|
SLeviyang/Yoshida_OCR_Clustering
|
/Yoshida-GEO-ATACseq-process_workflow/yoshida-atacseq-workflow-steps/step3_bam2peaks/check_peaks.sh
|
UTF-8
| 372
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
base_dir="s3://yoshida-atacseq/peaks/"
all_cell_dirs=( $(aws s3 ls $base_dir | awk '{print $2}') )
echo ${all_cell_dirs[@]}
if [ -f check_output_file.txt ]
then
rm check_output_file.txt
fi
touch check_output_file.txt
for cd in ${all_cell_dirs[@]}:
do
echo $cd
echo $cd >> check_output_file.txt
aws s3 ls "$base_dir$cd" >> check_output_file.txt
done
| true
|
56130b8a9ecd1c9eeac711fce3443837dbc1aa53
|
Shell
|
valencik/cssmuadm
|
/courses/csci3431/10-Create-CSCI3431-Users.sh
|
UTF-8
| 3,036
| 3.734375
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
#Configuration
courseShortName=os
path=/home/course/$courseShortName
mkdir -p $path
minAccount=00
maxAccount=50
instructor="tami"
marker="marker"
numberSubmissions=10
#Generate username/password file for usrpasswd command
if [ ! -f ./$courseShortName.usrpasswd ];
then
echo "Generating new $courseShortName.usrpasswd file"
read -p "Press [Enter] key to continue or Ctrl+C to cancel..."
for i in $(seq -w $minAccount $maxAccount); do
#Generate usernames and passwords and write them to a file
user=$courseShortName$i
pass=$(tr -dc A-Za-z0-9 < /dev/urandom | tr -d 1lLiIo0 | head -c 8)
#pass=$(grep -v "'" /usr/share/dict/words | shuf -n3 | xargs | tr -t " " "-")
#pass=$(apg -a1 -n1 -x8 -m8 -M NLC -c cl_seed -E 0oO1liLIsSzZxXcCvV)
echo "$user:$pass">>$courseShortName.usrpasswd
#Ensure users exist and passwords match file
if id -u $user >/dev/null 2>&1; then
echo "User $user already exists. Continuing will update password."
read -p "Press [Enter] key to continue or Ctrl+C to cancel..."
echo "$user:$pass" | chpasswd
else
#Create users in /home/course/$courseShortName with ./$courseShortName-skel contents
useradd --base-dir $path --create-home --skel ./$courseShortName-skel \
--shell /bin/bash --password $(openssl passwd $pass) $user
fi
done
else
echo "Found existing $courseShortName.usrpasswd file."
while IFS=: read -r user pass;
do
#Ensure users exist and passwords match file
if id -u $user >/dev/null 2>&1; then
echo "User $user already exists. Continuing will update password."
read -p "Press [Enter] key to continue or Ctrl+C to cancel..."
echo "$user:$pass" | chpasswd
else
#Create users in /home/course/$courseShortName with ./$courseShortName-skel contents
useradd --base-dir $path --create-home --skel ./$courseShortName-skel \
--shell /bin/bash --password $(openssl passwd $pass) $user
fi
done < "$courseShortName.usrpasswd"
fi
#Give special accounts ownership of student accounts
while IFS=: read -r user pass;
do
if [ "$user" != "$instructor" ] && [ "$user" != "$marker" ]; then
usermod -aG $user $marker
usermod -aG $user $instructor
fi
done < "$courseShortName.usrpasswd"
#Create admin group, this is used for locking files down
groupadd $courseShortName"admin"
usermod -aG $courseShortName"admin" $instructor
usermod -aG $courseShortName"admin" $marker
#Create common group, used for securing webpages from public
#Lets each user create dir (chmod 710) with file (chmod 770) for php to write
groupadd $courseShortName
while IFS=: read -r user pass;
do
if [ "$user" != "$instructor" ] && [ "$user" != "$marker" ]; then
usermod -aG $courseShortName $user
fi
done < "$courseShortName.usrpasswd"
#Create osbashrc file
cp os00-skel/.osbashrc $path/$instructor/.osbashrc
chown $instructor:os $path/$instructor/.osbashrc
chmod 750 $path/$instructor/.osbashrc
| true
|
a4a6abf351f1f15bdff123fcf4dc9d999026602e
|
Shell
|
plugn/dotfiles-1
|
/bashrc
|
UTF-8
| 2,106
| 3.453125
| 3
|
[] |
no_license
|
if [ -n "$PS1" ]; then
export LANG=en_US.UTF-8
export TERM=xterm-256color
export PATH=$HOME/.dotfiles/bin/:$HOME/.gem/ruby/2.0.0/bin:$PATH
export PASSBOX_LOCATION=$HOME/Dropbox/.passwords.gpg
for file in $HOME/.dotfiles/includes/*.sh; do
[ -r "$file" ] && source "$file"
done
unset file
# Set SSH authentication socket location
SOCK="/tmp/ssh-agent-$USER-screen"
if test $SSH_AUTH_SOCK && [ $SSH_AUTH_SOCK != $SOCK ]
then
rm -f /tmp/ssh-agent-$USER-screen
ln -sf $SSH_AUTH_SOCK $SOCK
export SSH_AUTH_SOCK=$SOCK
fi
# Setup git completion for its alias
__git_complete g __git_main
# http://www.gnu.org/software/bash/manual/html_node/The-Shopt-Builtin.html
# Case-insensitive globbing (used in pathname expansion)
shopt -s nocaseglob
# Append to the Bash history file, rather than overwriting it
shopt -s histappend
# Autocorrect typos in path names when using `cd`
shopt -s cdspell
# Save all lines of a multiple-line command in the same history entry (allows easy re-editing of multi-line commands)
shopt -s cmdhist
# Don't autocomplete when accidentally pressing Tab on an empty line. (It takes forever and yields "Display all 15 gazillion possibilites?")
shopt -s no_empty_cmd_completion
# Enable some Bash 4 features when possible:
# * `autocd`, e.g. `**/qux` will enter `./foo/bar/baz/qux`
# * Recursive globbing, e.g. `echo **/*.txt`
# for option in autocd globstar; do
# shopt -s "$option" 2> /dev/null
# done
# Add tab completion for SSH hostnames based on ~/.ssh/config, ignoring wildcards
[ -e "$HOME/.ssh/config" ] && complete -o "default" -o "nospace" -W "$(grep "^Host" ~/.ssh/config | grep -v "[?*]" | awk '{print $2}')" scp sftp ssh
# Add `killall` tab completion for common apps
complete -o "nospace" -W "Contacts Calendar Dock Finder Mail Safari iTunes Terminal" killall
# If possible, add tab completion for many more commands
# [ -f /etc/bash_completion ] && source /etc/bash_completion
fi
| true
|
da63511c736d5c1b46343154e74cfdfddf2e34e1
|
Shell
|
shelvacu/mcfly
|
/dev.bash
|
UTF-8
| 522
| 3.0625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Build mcfly and run a dev environment bash for local mcfly testing
if ! this_dir=$(cd "$(dirname "$0")" && pwd); then
exit $?
fi
rm -f target/debug/mcfly
rm -rf target/debug/deps/mcfly-*
cargo build
# For some reason, to get line numbers in backtraces, we have to run the binary directly.
HISTFILE=$HOME/.bash_history \
MCFLY_PATH=$(find target/debug/deps/mcfly-* -maxdepth 1 -type f | grep -v '\.d') \
RUST_BACKTRACE=full \
MCFLY_DEBUG=1 \
exec /bin/bash --init-file "$this_dir/mcfly.bash" -i
| true
|
e08990aaf01e4a85b9508bb203e36dd28becba6c
|
Shell
|
bnv2103/NGS
|
/WGS/refine_bam.sh
|
UTF-8
| 2,261
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash
#$ -cwd
SAMTOOLS="/ifs/data/c2b2/ngs_lab/ngs/usr/bin/samtools"
REF="/ifs/data/c2b2/ngs_lab/ngs/resources/bwa_samtools_gatk_DB/human_g1k_v37.fasta"
chr=$1
DIR=$2"/"
$SAMTOOLS rmdup $DIR$chr.sorted.bam $DIR$chr.sorted.bam.noDup.bam
echo "Remove PCR Duplicates complete"
# rm $DIR$chr.sorted.bam
## Should add -C int : Coefficient to cap mapping quality of poorly mapped reads. See the pileup command for details. [0]
$SAMTOOLS calmd -rEAb $DIR$chr.sorted.bam.noDup.bam $REF > $DIR$chr.sorted.bam.noDup.bam.baq.bam
$SAMTOOLS index $DIR$chr.sorted.bam.noDup.bam.baq.bam
echo "Index Complete on bam.noDup.bam.baq.bam "
rm $DIR$chr.sorted.bam.noDup.bam
echo "Realigned using Samtools- CALMD with extended BAQ done"
exit
TEMP=$DIR$chr"_temp"
if [ ! -e $TEMP ];then
mkdir $TEMP
fi
JAVA="java -Xmx7g -Djava.io.tmpdir=$TEMP"
FIXMATE="$JAVA -jar ${PICARD}/FixMateInformation.jar"
$FIXMATE INPUT=$OUTDIR/$CHR.cleaned.bam OUTPUT=$OUTDIR/$CHR.fixed.bam SO=coordinate VALIDATION_STRINGENCY=SILENT
out=${DIR}/$chr.fixing.sh
echo '#!/bin/bash' > $out
echo 'uname -a' >> $out
echo "source $myroot/$setting" >> $out
echo "java -Xmx2g -Djava.io.tmpdir=${dir}/temp -jar $FIXMATE INPUT=${dir}/bam/$a/refine/$bam OUTPUT=${myroot}/bam/$a/refine/${i}.noDup.rl.sorted.fxmate.bam SO=coordinate VALIDATION_STRINGENCY=SILENT" >> $out
cmd="java -Xmx2g -Djava.io.tmpdir=${dir}/temp -jar $FIXSTAT INPUT=${dir}/bam/$a/refine/${i}.noDup.rl.sorted.fxmate.bam OUTPUT=${myroot}/bam/$a/refine/stat/${i}/${i}.noDup.rl.sorted.fxmate_details HISTOGRAM_FILE=${myroot}/bam/$a/refine/stat/${i}/${i}.noDup.rl.sorted.fxmate.hist.pdf REFERENCE_SEQUENCE=$REF"
echo $cmd >> $out
cmd="java -Xmx2g -Djava.io.tmpdir=${dir}/temp -jar $GCbias INPUT=${dir}/bam/$a/refine/${i}.noDup.rl.sorted.bam OUTPUT=${myroot}/bam/$a/refine/stat/${i}/${i}.noDup.rl.sorted.fxmate.gc.bias_detail CHART=${myroot}/bam/$a/refine/stat/${i}/${i}.gcBias.pdf REFERENCE_SEQUENCE=$REF"
echo $cmd >> $out
cmd="samtools index ${dir}/bam/$a/refine/${i}.noDup.rl.sorted.fxmate.bam"
echo $cmd >> $out
echo 'kill -USR2 $watch_pid; wait' >> $out
qsub -l mem=3G,time=2:: -o ${dir}/$log/fm-${i}.o -e ${dir}/$log/fm-${i}.e $out
| true
|
b2bbffbbaed16f3eacef66535ccdf26c790c9a4b
|
Shell
|
ddopson/github-migration-tools
|
/bulk-replace.sh
|
UTF-8
| 864
| 3.515625
| 4
|
[] |
no_license
|
# Pull in 'config.sh'
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
CURDIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
source "${CURDIR}/config.sh"
while true; do
for d in *; do
if [ ! -d $d ]; then
continue;
fi
echo
echo "Entering '$d' ..."
cd $d
git fetch origin
for b in $(git branch -a | grep origin | perl -pe 's#.*remotes/origin/([^ \n\t]+).*#$1#' | grep -v HEAD); do
git co -f "$b"
CHANGE=
for f in $(ack -l ${OLD_GITHUB} --all); do
perl -pe "s/${OLD_GITHUB}/${NEW_GITHUB}/g" -i "$f"
CHANGE=yes
git add "$f"
done
if [ ! -z "$CHANGE" ]; then
git commit -m "Bulk Migration: ${OLD_GITHUB} --> ${NEW_GITHUB}"
fi
done
git push --all newrepo
cd -
done | egrep -v "Already up-to-date|$(pwd)"
done
| true
|
eb065e206d476e517f4ed59366bd085e6ebcb377
|
Shell
|
TheAmazingPT/passman
|
/src/add-password.bash
|
UTF-8
| 1,434
| 3.90625
| 4
|
[
"MIT"
] |
permissive
|
function add_password {
local prompt account_name menu generated_password password_1 password_2 \
pasted_password confirmed_pasted_password
prompt="Enter Accountname: (service/username)"
account_name=$(echo | dmenu -i -p "$prompt")
if [[ -z "$account_name" ]]; then
exit
fi
prompt="Enter Password:"
menu=(
"\"Paste from Clipboard\""
"\"Generate Password\""
)
password_1=$(echo "${menu[@]}"| xargs -n 1 | dmenu -i -p "$prompt")
if [[ -z "$password_1" ]]; then
exit
fi
case $password_1 in
"Paste from Clipboard")
pasted_password=$(xclip -o -selection clipboard)
;;
"Generate Password")
generated_password=$(generate_password 32 1)
password_1=$generated_password
password_2=$generated_password
;;
esac
if [[ -n "$pasted_password" ]]; then
prompt="Enter Password (pasted):"
confirmed_pasted_password=$(echo "$pasted_password" | dmenu -i -p "$prompt")
password_1=$confirmed_pasted_password
password_2=$confirmed_pasted_password
fi
if [[ -z "$pasted_password" && -z "$password_2" ]]; then
prompt="Enter Password again:"
password_2=$(echo | dmenu -i -p "$prompt")
fi
if [[ -z "$password_2" ]]; then
exit
fi
if [[ "$password_1" == "$password_2" ]]; then
echo "$password_1" | pass insert --echo "$account_name" 1>/dev/null
notification "New password was created!" "$account_name"
else
notification "Passwords didn't match!"
fi
}
| true
|
445affd40fe9d1dff7f16e4e9fc1bbb5883853f2
|
Shell
|
gabesullice/shape-factory
|
/watch.sh
|
UTF-8
| 257
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
main () {
local testcmd=""
if [[ $# -gt 0 ]]; then
testcmd="/bin/bash -lc 'npm run build && npm run ava $@'"
else
testcmd="/bin/bash -lc 'npm run build && npm test'"
fi
notify -c "$testcmd" ./src ./tests
}
main $@
| true
|
f04f755c21968ed43568befe96fc5dd88824c03d
|
Shell
|
Evan-Jams/SEIR_Waverider_Classwork
|
/unit_1/w01d02/homework/first_bash_instructions.bash
|
UTF-8
| 2,226
| 3.078125
| 3
|
[
"CC-BY-NC-SA-4.0",
"CC-BY-NC-4.0",
"MIT",
"GPL-3.0-only",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
# Copy this text into your `.bash` homework file. Copy and paste the code you successfully used into terminal below each prompt
# Open the **Terminal app**
# Create a new directory on your desktop called `galaxy_far_far_away`and enter it
# Create a directory called `death_star`
# and make the following files inside of it:
# darth_vader.txt
# princess_leia.txt
# storm_trooper.txt
# In `galaxy_far_far_away`, make a directory named `tatooine`
# and create the following files in it:
# luke.txt
# ben_kenobi.txt
# Inside of `tatooine` make a directory called `millenium_falcon`
# and in it create:
# han_solo.txt
# chewbaca.txt
# Rename `ben_kenobi.txt` to `obi_wan.txt
# Copy `storm_trooper.txt` from `death_star` to `tatooine`
# Move `luke.txt` and `obi_wan.txt` to the `millenium_falcon`
# Move `millenium_falcon` out of `tatooine` and into `galaxy_far_far_away`
# Move `millenium_falcon` into `death_star`
# Move `princess_leia.txt` into the `millenium_falcon`
# Delete `obi_wan.txt`
# In `galaxy_far_far_away`, make a directory called `yavin_4`
# Move the `millenium_falcon` out of the `death_star` and into `yavin_4`
# Make a directory in `yavin_4` called `x_wing`
# Move `princess_leia.txt` to `yavin_4` and `luke.txt` to `x_wing`
# Move the `millenium_falcon` and `x_wing` out of `yavin_4` and into `galaxy_far_far_away`
# In `death_star`, create directories for `tie_fighter_1`, `tie_fighter_2` and `tie_fighter_3`
# Move `darth_vader.txt` into `tie_fighter_1`
# Make a copy of `storm_trooper.txt` in both `tie_fighter_2` and `tie_fighter_3`
# Move all of the `tie_fighters` out of the `death_star` and into `galaxy_far_far_away`
# Be careful with this command - cannot undo!
# Make sure you delete the right thing, or you could accidentally delete the contents of your computer (it has happened).
# This command will typically not ask you if you really want to delete. It will just delete so doublecheck you are deleting exactly what you mean to delete
# Remove `tie_fighters` 2 and 3.
# Touch a file in "**x_wing**" called "**the_force.txt**".
# Destroy the "**death_star**" and anyone inside of it.
# Return "**x_wing**" and the "**millenium_falcon**" to "**yavin_4**".
# Celebrate!
| true
|
5d546e8c5c13479cef426ebd84ae40f2226ca2fd
|
Shell
|
gisanfu/fast-change-dir
|
/study/backspace.sh
|
UTF-8
| 211
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
backspace=$(echo -e \\b\\c)
echo 'please input backspace'
# 您可以利用Ctrl + H,就可以讀取到"倒退鍵"
read inputvar
if [ "$inputvar" == $backspace ]; then
echo 'READ backspace!'
fi
| true
|
e94a3fefd2946ef2ebab8c42f47af2c9898afedb
|
Shell
|
mounikab13/test_project
|
/scripts/nanocount
|
UTF-8
| 1,518
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
basedir = /hpcnfs/home/ieo5306/projects/nanocount/data
#download FASTQ file from consortium in github which is generated using latest version of guppy
wget -P $basedir http://s3.amazonaws.com/nanopore-human-wgs/rna/fastq/NA12878-DirectRNA_All_Guppy_3.2.6.fastq.gz
gunzip NA12878-DirectRNA_All_Guppy_3.2.6.fastq.gz
#download human genome fasta file from ftp ensembl
wget -P $basedir http://labshare.cshl.edu/shares/gingeraslab/www-data/dobin/STAR/STARgenomes/ENSEMBL/homo_sapiens/ENSEMBL.homo_sapiens.release-83/Homo_sapiens.GRCh38.dna.primary_assembly.fa
#download corresponding gtf file from ftp
wget -P $basedir ftp://ftp.ensembl.org/pub/release-99/gtf/homo_sapiens/Homo_sapiens.GRCh38.99.gtf.gz
gunzip $basedir/Homo_sapiens.GRCh38.99.gtf.gz
#convert gtf file to bed using bedparse
bedparse gtf2bed $PATH/Homo_sapiens.GRCh38.99.gtf > $PATH/transcripts.bed
#get fasta file from bed file using bedtools getfasta
bedtools getfasta -fi Homo_sapiens.GRCh38.dna.primary_assembly.fa -s -split -name -bed transcripts.bed > rna.fa
#perform mapping using minimap2 specifying all the required parameters using fasta file from above step and fastq file from consortium
minimap2 -a -x splice -k14 -uf rna.fa NA12878-DirectRNA_All_Guppy_3.2.6.fastq > rna1.sam
samtools view rna.sam -bh -t -F 2324 Homo_sapiens.GRCh38.dna.primary_assembly.fa > rna.filt.bam
#nanocount using sam file generated to produce counts for sequences that are mapped
NanoCount -i $basedir/rna.filt.bam -o $basedir/rna_count
| true
|
a6a41455a47d0d9ea0256ced4e21bc5ac016a741
|
Shell
|
Universefei/osm_dbo
|
/importScripts/importpgChinaOSM.sh
|
UTF-8
| 1,028
| 3.5625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# usage example
# $1 : host to import data to
# $2 : osm data path
# ->
# -> ./dumpChinaOSM.sh 192.168.3.137 ~/gisData/china-latest.osm.pbf
# ->
psql -h $1 -p 5432 -U postgres -l |grep china_osm_orig &> /dev/null
if [[ $? != 0 ]]; then
psql -h $1 -p 5432 -U postgres -c "create database china_osm_orig"
psql -h $1 -p 5432 -U postgres -d china_osm_orig -c "create extension postgis"
else
while true
do
read -p "Database china_osm_orig at $1 already exsit, override it? [y/n]" RESP
case $RESP in
Y|y)
echo "will override china_osm_origin"
psql -h $1 -p 5432 -U postgres -d china_osm_orig -c "create extension postgis"
break
;;
N|n)
exit
break
;;
*)
echo "input error"
continue
;;
esac
done
fi
osm2pgsql -H $1 -P 5432 -U postgres -W -d china_osm_orig -c -l -C 800 $2
| true
|
aff8d39ebd63b8785482d394d306b7e1ac1be6fa
|
Shell
|
chuanwang/cloudgate
|
/bootstrap.sh
|
UTF-8
| 1,603
| 3.96875
| 4
|
[] |
no_license
|
#!/bin/bash
#################################################################
# Script to bootstrap the postgresql database #
#################################################################
# directory where postgresql is installed. Usually it is /usr/bin
POSTGRESQL_DIR=$1
# database cluster data directory
DATA_DIR=/home/pgsql/data
SQL_SCRIPT_DIR=/home/pgsql/init_db.sql
# database log file
LOGFILE=/home/pgsql/logfile
CURRENT_DIR=`pwd`
if [ "${POSTGRESQL_DIR}" == "" ]; then
echo "Usage sudo $0 postgresql_install_dir(usually /usr/bin)"
echo ""
exit 1;
fi;
pass=$(perl -e 'print crypt("verycloud", "salt")')
# Create cloudgate user with password verycloud
useradd -p $pass cloudgate
mkdir -p ${DATA_DIR}
chown -R cloudgate ${DATA_DIR}/../
# init db file directory
su - cloudgate -c "${POSTGRESQL_DIR}/initdb -D ${DATA_DIR}"
# create log file
su - cloudgate -c "touch ${LOGFILE}"
# start postgresql server
su - cloudgate -c "${POSTGRESQL_DIR}/pg_ctl -D ${DATA_DIR} -l ${LOGFILE} start"
# wait for server start
sleep 5
# create verycloud database
su - cloudgate -c "${POSTGRESQL_DIR}/createdb verycloud"
# move sql script to pgsql directory so that user cloudgate can read it.
cp ./init_db.sql ${SQL_SCRIPT_DIR}
chown cloudgate ${SQL_SCRIPT_DIR}
# run sql script to populate tables into verycloud database
su - cloudgate -c "${POSTGRESQL_DIR}/psql verycloud < ${SQL_SCRIPT_DIR}"
########################################################
# End of script #
########################################################
| true
|
bf1655582a23da1c2133c9a4db6ddfd6a1ca96f6
|
Shell
|
stoneboy100200/daily_copy
|
/download_caffemodel.sh
|
UTF-8
| 446
| 3.3125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
read -p "Please input user name:" username
read -s -p "Please input password:" password
echo "user=$username, password=$password"
url=ftp://10.2.5.243:21/ftp/caffe_boost/caffe_mp.tar.bz2
CURRENT_DIR=$(dirname $(readlink -f $0))
echo "downloading caffe model..."
wget -c -t 0 $url --ftp-user=$username --ftp-password=$password -O caffe_mp.tar.bz2
tar -xvjf caffe_mp.tar.bz2 -C $CURRENT_DIR/..
rm caffe_mp.tar.bz2
echo "done."
| true
|
a63a18cc0446d0c77c3294ce177a7c514ca8ef4d
|
Shell
|
ravija-maheshwari/Shell
|
/args.sh
|
UTF-8
| 200
| 3.15625
| 3
|
[] |
no_license
|
#! /bin/bash
echo My name is $0
echo My process number is $$
echo I have $# arguments
echo My arguments seperately are "$*"
echo My arguments together are "$@" #"$@"
echo My fifth argument is "'$5'"
| true
|
7db8065323e4ede4203f107c84fb15ad0abf0e28
|
Shell
|
shortthirdman/mongo-enterprise
|
/mongodb-enterprise.sh
|
UTF-8
| 721
| 2.65625
| 3
|
[
"MIT"
] |
permissive
|
## Set-Variable -Name "MONGODB_VERSION" -Value "4.2"
export MONGODB_VERSION=4.2
curl -O --remote-name-all https://raw.githubusercontent.com/docker-library/mongo/master/$MONGODB_VERSION/{Dockerfile,docker-entrypoint.sh}
## Set-Variable -Name "DOCKER_USERNAME" -Value "shortthirdman"
export DOCKER_USERNAME=shortthirdman
chmod 755 ./docker-entrypoint.sh
docker build --build-arg MONGO_PACKAGE=mongodb-enterprise --build-arg MONGO_REPO=repo.mongodb.com -t $DOCKER_USERNAME/mongo-enterprise:$MONGODB_VERSION .
docker run --name mongodb-ee -itd $DOCKER_USERNAME/mongo-enterprise:$MONGODB_VERSION
docker exec -it mongodb-ee /usr/bin/mongo --eval "db.version()"
docker push $DOCKER_USERNAME/mongo-enterprise:$MONGODB_VERSION
| true
|
c10c25682c4991a288861e9161360f9cd22c2447
|
Shell
|
sergey-goncharenko/azure-intrust-sergey-template
|
/DSC/configureLinuxVM.sh
|
UTF-8
| 1,989
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
#########################################################
# Script Name: configureLinuxVM.sh
# Author: Quest
# Version: 0.1
# Description:
# This script configures InTrust Agent
#
# Note :
# This script has only been tested on RedHat 7
#########################################################
#---BEGIN VARIABLES---
AZ_ACCOUNT_NAME=''
AZ_ACCOUNT_PWD=''
AZ_SERVER_NAME=''
function usage()
{
echo "INFO:"
echo "Usage: configureLinuxVM.sh -a -p"
}
function log()
{
# If you want to enable this logging add a un-comment the line below and add your account id
#curl -X POST -H "content-type:text/plain" --data-binary "${HOSTNAME} - $1" https://logs-01.loggly.com/inputs/<key>/tag/es-extension,${HOSTNAME}
echo "$1"
}
#---PARSE AND VALIDATE PARAMETERS---
if [ $# -ne 6 ]; then
log "ERROR:Wrong number of arguments specified. Parameters received $#. Terminating the script."
usage
exit 1
fi
while getopts :a:p:i: optname; do
log "INFO:Option $optname set with value ${OPTARG}"
case $optname in
a) # Azure Private Storage Account Name- SSH Keys
AZ_ACCOUNT_NAME=${OPTARG}
;;
i) # Intrust Server name
AZ_SERVER_NAME=${OPTARG}
;;
p) # Azure Private Storage Account Key - SSH Keys
AZ_ACCOUNT_PWD=${OPTARG}
;;
\?) #Invalid option - show help
log "ERROR:Option -${BOLD}$OPTARG${NORM} not allowed."
usage
exit 1
;;
esac
done
#---PARSE AND VALIDATE PARAMETERS---
function ConfigureInTrustAgent()
{
yum install libuuid.i686 -y --setopt=protected_multilib=false
yum install glibc.i686 -y --setopt=protected_multilib=false
yum install samba-client -y
smbget smb://${AZ_ACCOUNT_NAME}:${AZ_ACCOUNT_PWD}@${AZ_SERVER_NAME}/Agent/linux_intel/adcscm_package.linux_intel.sh
mkdir /home/intrust
./adcscm_package.linux_intel.sh /home/intrust
sleep 5m
/home/intrust/adcscm -add ${AZ_SERVER_NAME} 900 ${AZ_ACCOUNT_PWD}
}
ConfigureInTrustAgent
| true
|
1db1e4edf03e52015fb1c13c6d8e4cabe83bffe2
|
Shell
|
kgolyaev/kgutils
|
/python/installPythonUbuntuLocal.sh
|
UTF-8
| 1,720
| 3
| 3
|
[] |
no_license
|
#! /bin/bash
######################################################################
###
### This script was developed to install Python and its components
### on a clean installation of Ubuntu 14.04 LTS (Trusty Tahr).
###
### It requires sudo priviliges, as may become apparent.
### It was tested a couple times on a clean install onto an Oracle
### VirtualBox VM. I do not guarantee it will work for your use case.
###
######################################################################
### update existing software
sudo apt-get update
sudo apt-get upgrade -y -f
# install additional software
sudo apt-get install -y mysql-server
sudo apt-get install -y mysql-client
sudo apt-get install -y default-jdk
sudo apt-get install -y liblzma-dev
sudo apt-get install -y libxml2-dev
sudo apt-get install -y libiodbc2-dev
sudo apt-get install -y libpq-dev
sudo apt-get install -y curl
sudo apt-get install -y libcurl4-openssl-dev
### install sqlite
sudo apt-get install -y sqlite
### install python and modules
sudo apt-get install -y python-dev
sudo apt-get install -y ipython
sudo apt-get install -y ipython-notebook
sudo apt-get install -y python-numpy
sudo apt-get install -y python-matplotlib
sudo apt-get install -y python-scipy
sudo apt-get install -y python-pandas
sudo apt-get install -y python-sympy
sudo apt-get install -y python-rpy
sudo apt-get install -y python-sklearn
sudo apt-get install -y python-nltk
sudo apt-get install -y python-requests
# installing numba takes a bit of extra work
sudo apt-get install -y llvm-3.3-dev
sudo apt-get install -y python-pip
sudo ln -s /usr/bin/llvm-config-3.3 /usr/bin/llvm-config
sudo LLVM_CONFIG_PATH=/usr/bin/llvm-config pip install llvmpy
sudo pip install numba
| true
|
bca012e8b6bddb4a4d02d932c769326006102f02
|
Shell
|
ClickHouse/ClickHouse
|
/packages/build
|
UTF-8
| 4,960
| 3.984375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -e
# Avoid dependency on locale
LC_ALL=C
# Normalize output directory
if [ -n "$OUTPUT_DIR" ]; then
OUTPUT_DIR=$(realpath -m "$OUTPUT_DIR")
fi
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
cd "$CUR_DIR"
ROOT_DIR=$(readlink -f "$(git rev-parse --show-cdup)")
PKG_ROOT='root'
DEB_ARCH=${DEB_ARCH:-amd64}
OUTPUT_DIR=${OUTPUT_DIR:-$ROOT_DIR}
[ -d "${OUTPUT_DIR}" ] || mkdir -p "${OUTPUT_DIR}"
SANITIZER=${SANITIZER:-""}
SOURCE=${SOURCE:-$PKG_ROOT}
HELP="${0} [--test] [--rpm] [-h|--help]
--test - adds '+test' prefix to version
--apk - build APK packages
--archlinux - build archlinux packages
--rpm - build RPM packages
--tgz - build tarball package
--deb - build deb package
--help - show this help and exit
Used envs:
DEB_ARCH='${DEB_ARCH}'
OUTPUT_DIR='${OUTPUT_DIR}' - where the artifact will be placed
SANITIZER='${SANITIZER}' - if any sanitizer is used, affects version string
SOURCE='${SOURCE}' - directory with sources tree
VERSION_STRING='${VERSION_STRING}' - the package version to overwrite
"
if [ -z "${VERSION_STRING}" ]; then
# Get CLICKHOUSE_VERSION_STRING from the current git repo
eval "$("$ROOT_DIR/tests/ci/version_helper.py" -e)"
else
CLICKHOUSE_VERSION_STRING=${VERSION_STRING}
fi
export CLICKHOUSE_VERSION_STRING
while [[ $1 == --* ]]
do
case "$1" in
--test )
VERSION_POSTFIX+='+test'
shift ;;
--deb )
MAKE_DEB=1
shift ;;
--apk )
MAKE_APK=1
shift ;;
--archlinux )
MAKE_ARCHLINUX=1
shift ;;
--rpm )
MAKE_RPM=1
shift ;;
--tgz )
MAKE_TGZ=1
shift ;;
--help )
echo "$HELP"
exit ;;
* )
echo "Unknown option $1"
exit 2 ;;
esac
done
function deb2tgz {
local FILE PKG_NAME PKG_DIR PKG_PATH TARBALL
FILE=$1
PKG_NAME=${FILE##*/}; PKG_NAME=${PKG_NAME%%_*}
PKG_DIR="$PKG_NAME-$CLICKHOUSE_VERSION_STRING"
PKG_PATH="$OUTPUT_DIR/$PKG_NAME-$CLICKHOUSE_VERSION_STRING"
TARBALL="$OUTPUT_DIR/$PKG_NAME-$CLICKHOUSE_VERSION_STRING-$DEB_ARCH.tgz"
rm -rf "$PKG_PATH"
dpkg-deb -R "$FILE" "$PKG_PATH"
mkdir -p "$PKG_PATH/install"
cat > "$PKG_PATH/install/doinst.sh" << 'EOF'
#!/bin/sh
set -e
SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )"
for filepath in `find $SCRIPTPATH/.. -type f -or -type l | grep -v "\.\./install/"`; do
destpath=${filepath##$SCRIPTPATH/..}
mkdir -p $(dirname "$destpath")
cp -r "$filepath" "$destpath"
done
EOF
chmod +x "$PKG_PATH/install/doinst.sh"
if [ -f "$PKG_PATH/DEBIAN/postinst" ]; then
# we don't need debconf source in doinst in any case
tail +2 "$PKG_PATH/DEBIAN/postinst" | grep -v debconf/confmodule >> "$PKG_PATH/install/doinst.sh"
fi
rm -rf "$PKG_PATH/DEBIAN"
if [ -f "/usr/bin/pigz" ]; then
tar --use-compress-program=pigz -cf "$TARBALL" -C "$OUTPUT_DIR" "$PKG_DIR"
else
tar -czf "$TARBALL" -C "$OUTPUT_DIR" "$PKG_DIR"
fi
sha512sum "$TARBALL" > "$TARBALL".sha512
rm -r "$PKG_PATH"
}
# Build options
if [ -n "$SANITIZER" ]; then
if [[ "$SANITIZER" == "address" ]]; then VERSION_POSTFIX+="+asan"
elif [[ "$SANITIZER" == "thread" ]]; then VERSION_POSTFIX+="+tsan"
elif [[ "$SANITIZER" == "memory" ]]; then VERSION_POSTFIX+="+msan"
elif [[ "$SANITIZER" == "undefined" ]]; then VERSION_POSTFIX+="+ubsan"
else
echo "Unknown value of SANITIZER variable: $SANITIZER"
exit 3
fi
elif [[ $BUILD_TYPE == 'debug' ]]; then
VERSION_POSTFIX+="+debug"
fi
if [[ "$PKG_ROOT" != "$SOURCE" ]]; then
# packages are built only from PKG_SOURCE
rm -rf "./$PKG_ROOT"
ln -sf "$SOURCE" "$PKG_SOURCE"
fi
CLICKHOUSE_VERSION_STRING+=$VERSION_POSTFIX
echo -e "\nCurrent version is $CLICKHOUSE_VERSION_STRING"
for config in clickhouse*.yaml; do
if [ -n "$MAKE_DEB" ] || [ -n "$MAKE_TGZ" ]; then
echo "Building deb package for $config"
# Preserve package path
exec 9>&1
PKG_PATH=$(nfpm package --target "$OUTPUT_DIR" --config "$config" --packager deb | tee /dev/fd/9)
PKG_PATH=${PKG_PATH##*created package: }
exec 9>&-
fi
if [ -n "$MAKE_APK" ]; then
echo "Building apk package for $config"
nfpm package --target "$OUTPUT_DIR" --config "$config" --packager apk
fi
if [ -n "$MAKE_ARCHLINUX" ]; then
echo "Building archlinux package for $config"
nfpm package --target "$OUTPUT_DIR" --config "$config" --packager archlinux
fi
if [ -n "$MAKE_RPM" ]; then
echo "Building rpm package for $config"
nfpm package --target "$OUTPUT_DIR" --config "$config" --packager rpm
fi
if [ -n "$MAKE_TGZ" ]; then
echo "Building tarball for $config"
deb2tgz "$PKG_PATH"
fi
done
# vim: ts=4: sw=4: sts=4: expandtab
| true
|
b07ecf83e9182ef7f7c656c181baad601a7a0e4b
|
Shell
|
abcd567a/pfclient-linux-amd64
|
/Install-pfclient-init.d.sh
|
UTF-8
| 2,282
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash
BINARY_VERSION=pfclient_4.1.1_i386
RESOURCE_FOLDER=/usr/share/pfclient
sudo mkdir ${RESOURCE_FOLDER}
echo "Downloading i386 binary tarball " ${BINARY_VERSION}.tar.gz "from Planefinder.net"
sudo apt install wget
sudo wget -O ${RESOURCE_FOLDER}/${BINARY_VERSION}.tar.gz "http://client.planefinder.net/${BINARY_VERSION}.tar.gz"
sudo tar zxvf ${RESOURCE_FOLDER}/${BINARY_VERSION}.tar.gz -C ${RESOURCE_FOLDER}
sudo cp ${RESOURCE_FOLDER}/pfclient /usr/bin/pfclient
sudo chmod +x /usr/bin/pfclient
echo "Creating log folder ...."
sudo mkdir -p /var/log/pfclient
echo "downloading and installing config file pfclient-config.json"
sudo wget -O ${RESOURCE_FOLDER}/pfclient-config.json https://raw.githubusercontent.com/abcd567a/pfclient-linux-amd64/master/pfclient-config.json
sudo cp ${RESOURCE_FOLDER}/pfclient-config.json /etc/pfclient-config.json
sudo chmod 666 /etc/pfclient-config.json
echo "Downloading and installng init file pfclient"
sudo wget -O ${RESOURCE_FOLDER}/pfclient https://raw.githubusercontent.com/abcd567a/pfclient-linux-amd64/master/pfclient
sudo cp ${RESOURCE_FOLDER}/pfclient /etc/init.d/pfclient
sudo chmod +x /etc/init.d/pfclient
sudo update-rc.d -f pfclient defaults
sudo update-rc.d pfclient enable
sudo service pfclient start
echo " "
echo " "
echo -e "\e[32m INSTALLATION COMPLETED \e[39m"
echo -e "\e[32m=======================\e[39m"
echo -e "\e[32m PLEASE DO FOLLOWING:\e[39m"
echo -e "\e[32m=======================\e[39m"
echo -e "\e[32m SIGNUP:\e[39m"
echo -e "\e[32m In your browser, go to web interface at\e[39m"
echo -e "\e[39m http://$(ip route | grep -m1 -o -P 'src \K[0-9,.]*'):30053 \e[39m"
echo -e "\e[32m Fill necessary details to sign up / sign in\e[39m"
echo -e "\e[32m Use IP Address 127.0.0.1 and Port number 30005 when asked for these\e[39m"
echo -e "\e[31m If it fails to save settings when you hit button [Complete Configuration],\e[39m"
echo -e "\e[31m then restart pfclient by following command, and again hit [Complete Configuration] button\e[39m"
echo " sudo systemctl restart pfclient "
echo " "
echo " "
echo -e "\e[32mTo see status\e[39m sudo systemctl status pfclient"
echo -e "\e[32mTo restart\e[39m sudo systemctl restart pfclient"
echo -e "\e[32mTo stop\e[39m sudo systemctl stop pfclient"
| true
|
195b59c8ef142cc82f17b01315f494ce62728b9c
|
Shell
|
DrVache/Terminus
|
/TerminusQuest/Campus/Bethanie/CREMI/ENT_legendaire.sh
|
UTF-8
| 1,422
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Renseignes ton numéro étudiant puis copie-colle le lien de l’ENT dans ton mémo.
PS : Le numéro ENT est contenu dans le fichier carte_etudiante."
numetu=$(<../A22/inventaire/carte_etudiante.txt)
#lienutile=$(<../A22/inventaire/lien_utile.txt)
lienent=$(<lien_ent.txt)
lienutile=""
res=false
cd ../A22/inventaire
touch lien_utile.txt
cd ../../CREMI
while [ true ]
do
read cdutil
case $cdutil in
"ls") #Rajouter l option -l ?
ls
;;
"INVENTAIRE") #avec "$" devant comme dans le cahier des charges, rien ne se passe
echo "Hé Jean2, ça va ?"
;;
"jobs")
jobs
;;
*)
if [ ${cdutil:0:2} = "cd" ]
then
cd ${cdutil:3}
elif [ ${cdutil:0:3} = "cat" ]
then
cat ${cdutil:4}
elif [ $cdutil = $numetu ]
then
echo "compte cree"
res=true
fi
;;
esac
lienutile=$(</net/cremi/tmaziere/Bureau/Terminus-master/TerminusQuest/Campus/Bethanie/A22/inventaire/lien_utile.txt)
if [ "$lienutile" = "$lienent" ] && [ "$res" = "true" ]
then
echo "lien correcte ajouté"
exec $SHELL
fi
done
#add different chmod for new quest or directory
#add "exec $SHELL" at the end to go on current path on the main shell
| true
|
d634c07ef824c78e0b6714bd593dcd752303dee7
|
Shell
|
david-stratusee/backup
|
/bin/create_cnip.sh
|
UTF-8
| 2,253
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash -
#===============================================================================
# FILE: a.sh
# USAGE: ./a.sh
# AUTHOR: dengwei, david@holonetsecurity.com
# CREATED: 2015年09月15日 10:51
#===============================================================================
set -o nounset # Treat unset variables as an error
. ~/bin/tools.sh
function ip2num()
{
IP_ADDR=$1
#echo $IP_ADDR
#[[ "$IP_ADDR" =~ "^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$" ]] || { echo "ip format error."; exit 1; }
IP_LIST=${IP_ADDR//./ };
read -a IP_ARRAY <<<${IP_LIST};
num=$(( ${IP_ARRAY[0]}<<24 | ${IP_ARRAY[1]}<<16 | ${IP_ARRAY[2]}<<8 | ${IP_ARRAY[3]} ));
echo $num
}
function mask2str()
{
l_smask=$1
let new_smask=l_smask-1
N=$((0xffffffff - ${new_smask}))
H1=$(($N & 0x000000ff))
H2=$((($N & 0x0000ff00) >> 8))
L1=$((($N & 0x00ff0000) >> 16))
L2=$((($N & 0xff000000) >> 24))
echo "$L2.$L1.$H2.$H1"
}
apnic_file="delegated-apnic-latest"
apnic_url="http://ftp.apnic.net/apnic/stats/apnic/${apnic_file}"
if [ ! -f ${apnic_file} ]; then
wget -nv ${apnic_url}
if [ $? -ne 0 ]; then
exit 1
fi
fi
grep -i "|cn|" delegated-apnic-latest | grep apnic | grep ipv4 >/tmp/apnic_file
last_sip=0
last_sip_str=""
last_smask=0
last_sip_dest=0
while read line; do
sip_str=`echo $line | awk -F"|" '{print $4}'`
smask=`echo $line | awk -F"|" '{print $5}'`
sip=`ip2num $sip_str`
let sip_dest=sip+smask
if [ $last_sip_dest -eq 0 ]; then
last_sip_dest=$sip_dest
last_sip_str=$sip_str
last_smask=$smask
last_sip=$sip
elif [ $last_sip_dest -eq $sip ]; then
last_sip_dest=$sip_dest
let last_smask+=smask
else
echo "localnet "$last_sip_str/`mask2str $last_smask`
# $last_sip $last_smask $last_sip_dest
last_sip_dest=$sip_dest
last_sip_str=$sip_str
last_smask=$smask
last_sip=$sip
fi
done </tmp/apnic_file
if [ $last_sip_dest -ne 0 ]; then
echo "localnet "$last_sip_str/`mask2str $last_smask`
#$last_sip $last_smask $last_sip_dest
fi
rm -f /tmp/apnic_file
error_echo "rm -f delegated-apnic-latest"
| true
|
27e3895343e3e86be986645abba61fd4441ec253
|
Shell
|
evkuz/vm119
|
/script/check_os_version.sh
|
UTF-8
| 263
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
WHICH_RELEASE=$(cat /etc/redhat-release | cut -d ' ' -f4 | cut -d '.' -f1)
if [ $WHICH_RELEASE -eq 7 -o $WHICH_RELEASE -eq 6 ]
then
echo "$HOSTNAME is under Centos $WHICH_RELEASE"
else
echo "$HOSTNAME is under Unknown REDHAT version !!!"
fi
| true
|
0455b817525dfacfa8697afd2892e04cb9469b4f
|
Shell
|
sensor-dream/Fedora-post-Install
|
/preparation/network_ifaces
|
UTF-8
| 9,016
| 3.53125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/env bash
# -*- coding: utf-8 -*-
#
## @File : network_ifaces
## @Author : sensor-dream (SDBS)
## @Email : sensor-dream@sensor-dream.ru
## @Site : https://sensor-dream.ru
## @Date : 11.03.2019, 5:56:05
#
# Декларируем имя файла
#
declare -rx preparation_network_ifaces_script_source="$BASH_SOURCE"
if [[ -z "${main_run_script_path}" ]]; then
declare -rx main_run_script_path="$(pwd)"
echo "Скрипт запускается из ${main_run_script_path}"
fi
#
# Подключаем общую библиотеку и общую конфигурацию
#
if [[ -z "${main_cfg_script_source}" ]]; then
check=1
while [[ "${check}" -eq 1 ]]; do
if [[ ! -f 'main.cfg' ]]; then
if [[ "$(pwd)" != "/" ]]; then
cd ../
else
cd "${main_run_script_path}"
check=0
echo '\e[47;31m'"!!! Не найден общий файл конфигурации !!!\033[0m"
tput sgr0
exit 1
fi
else
check=0
if [[ -z "${main_cfg_script_source}" ]]; then
declare -rx main_project_path="$(pwd)"
. "${main_project_path}/main.cfg"
fi
cd "${main_run_script_path}"
fi
done
fi
echo_w "Подключаем $(readlink -m ${preparation_network_ifaces_script_source})"
## Устанавливаем подсети
function main_network_file_modify_linklocal() {
check_root
local linklocal=($(grep "link-local" "/etc/networks"))
if [[ -z "${linklocal}" ]]; then
echo "link-local ${link_local}" >>"/etc/networks"
elif [[ "${linklocal[1]}" != "$link_local" ]]; then
sed -i "s/link-local.*$/link-local ${link_local}/" "/etc/networks"
else
echo_m "link-local in networks file already modify and set ${link_local}"
return 0
fi
echo_w "networks file modify ${link_local}"
}
function main_network_file_modify_link_ipv6_loop() {
local linkipv6loop=($(grep "link-ipv6-loop" "/etc/networks"))
if [[ -z "${linkipv6loop}" ]]; then
echo "link-ipv6-loop ${link_ipv6_loop}" >>"/etc/networks"
elif [[ "${linkipv6loop[1]}" != "${link_ipv6_loop}" ]]; then
sed -i "s/link-ipv6-loop.*$/link-ipv6-loop ${link_ipv6_loop}/" "/etc/networks"
else
echo_m "link-ipv6-loop in networks file already modify and set ${link_ipv6_loop}"
return 0
fi
echo_w "networks file modify ${link_ipv6_loop}"
}
function main_ip4_interface_configuration() {
check_root
local deviceinterface="${device_interface}"
local mainuser="${main_user}"
local connectmethod="${connect_method}"
local connectautoconnect="${connect_autoconnect}"
local ignoreautodns="${ignore_auto_dns}"
local ignoreautoroutes="${ignore_auto_routes}"
local hostname="${host_name}"
local ipdevice="${ip_device}"
local ipmask="${ip_mask}"
local ipgateway="${ip_gateway}"
local ipdnslocal="${ip_dns_local}"
local ipdns1="${ip_dns_1}"
local ipdns2="${ip_dns_2}"
local ipdns3="${ip_dns_3}"
local mainpinghost="${main_ping_host}"
# local real_name_connection="$(nmcli d show ${deviceinterface} | awk -F':[[:space:]]+' '/GENERAL.CONNECTION/ { print $2 }' )";
# local real_name_connection="$(nmcli -t -f 'GENERAL.CONNECTION' dev show ${deviceinterface} | awk -F: '{print $2}')"
local real_name_connection="$(nmcli -g 'GENERAL.CONNECTION' dev show ${deviceinterface})"
local ifacename=''
echo_w "Real connection name: ${real_name_connection}"
if [[ "${real_name_connection}" != "${deviceinterface}" ]]; then
ifacename="${real_name_connection}"
echo_w "Modification connection.id: ${real_name_connection} -> ${deviceinterface}"
else
ifacename="${deviceinterface}"
fi
nmcli device disconnect "${deviceinterface}"
local macaddress=$(ifconfig "${deviceinterface}" | awk '/ether/ {print $2}')
nmcli c mod "${ifacename}" connection.id "${deviceinterface}" connection.stable-id "${deviceinterface}" connection.interface-name "${deviceinterface}" connection.autoconnect "${connectautoconnect}" connection.permissions user:$mainuser ethernet.mac-address "${macaddress}" ethernet.cloned-mac-address "${macaddress}" ipv4.method "${connectmethod}" ipv4.ignore-auto-dns "${ignoreautodns}" ipv4.ignore-auto-routes "${ignoreautoroutes}" ipv4.dhcp-hostname "${hostname}" ipv4.addresses "${ipdevice}/${ipmask}" ipv4.gateway "${ipgateway}"
nmcli c mod "${deviceinterface}" ipv6.addresses '' ipv6.method ignore
if [[ -n "${ipdnslocal}" ]]; then
# nmcli c mod "$nameinterface" ipv4.dns "$ipdnslocal" +ipv4.dns "$ipdns1" +ipv4.dns "$ipdns2";
nmcli c mod "${deviceinterface}" ipv4.dns "${ipdnslocal}"
else
nmcli c mod "${deviceinterface}" ipv4.dns "${ipdns1}" +ipv4.dns "${ipdns2}" +ipv4.dns "${ipdns3}"
fi
#cat<<EOF > "/etc/NetworkManager/dispatcher.d/del_search_resolv_con" | chmod +x '/etc/NetworkManager/dispatcher.d/del_search_resolv_conf';
##!/usr/bin/sh
#[ "\$2" = "up" ] && \$(sed -i '/search/d' '/etc/resolv.conf');
#exit 0
#EOF
# nmcli connection up "${deviceinterface}";
nmcli device connect "${deviceinterface}"
# nmcli connection show "${deviceinterface}";
# tmp=<<<$(ping "$mainpinghost" -c 1 2> /dev/null);
if [[ -n "$(ping "${mainpinghost}" -c 1 2>/dev/null)" ]]; then
install_packages \
"mc" \
"tmpwatch" \
;
## "NetworkManager-tui" \
## "NetworkManager-ppp" \
## "lshw" \
## "lookup" \
else
echo_w "Проблемы со shлюзом ???"
fi
echo_w "interface $deviceinterface configuration prepared"
}
function ip6_brocker_interface_configuration() {
check_root
local mainuser="${main_user}"
local hostname="${host_name}"
local deviceinterface="${device_interface}"
local brockerconnectmethod="${brocker_connect_method}"
local brockerconnecttype="${brocker_connect_type}"
local brockeraddinterface="${brocker_add_interface}"
local brockerupinterface="${brocker_up_interface}"
local brockerdeviceinterface="${brocker_device_interface}"
local brockerautoconnect="${brocker_autoconnect}"
local brockerparentdevice="${brocker_parent_device}"
local brockerlocalip="${brocker_local_ip}"
local brockerignoreautodns="${brocker_ignore_auto_dns}"
local brockerignoreautoroutes="${brocker_ignore_auto_routes}"
local namebrocker="${name_brocker}"
local brockerremoteip="${brocker_remote_ip}"
local brockeripv6="${brocker_ipv6}"
local ipv6mask="${ipv6_mask}"
local ipv6maskpull="${ipv6_mask_pull}"
local brockergwipv6="${brocker_gw_ipv6}"
local brockernetworkpull="${brocker_network_pull}"
local localipv6ofbrockernetworkpull="${local_ipv6_of_brocker_network_pull}"
local ipv6dnslocal="${ipv6_dns_local}"
local ipv6dns1="${ipv6_dns_1}"
local ipv6dns2="${ipv6_dns_2}"
local ipv6dns3="${ipv6_dns_3}"
nmcli device disconnect "${deviceinterface}"
if [[ -n "$(nmcli connection show "${brockerdeviceinterface}" 2>/dev/null)" ]]; then
nmcli c mod "${deviceinterface}" ipv6.addresses '' ipv6.method ignore
# nmcli device delete "${brockerdeviceinterface}";
nmcli c delete "${brockerdeviceinterface}"
# exit;
fi
if [[ ${brockeraddinterface} -eq 1 ]]; then
nmcli c add type "${brockerconnecttype}" connection.permissions user:$mainuser \
autoconnect "${brockerautoconnect}" save yes con-name "${brockerdeviceinterface}" ifname "${brockerdeviceinterface}" \
mode sit remote "${brockerremoteip}" local "${brockerlocalip}" dev "${brockerparentdevice}" \
ip6 "${brockeripv6}/${ipv6mask}" gw6 "${brockergwipv6}" ipv6.method "${brockerconnectmethod}" \
ipv6.ignore-auto-dns "${brockerignoreautodns}" ipv6.ignore-auto-routes "${brockerignoreautoroutes}" ipv6.dhcp-hostname "${hostname}" ip-tunnel.ttl 255 ipv4.method disabled
if [[ -n "${ipv6dnslocal}" ]]; then
# nmcli c mod "$brockerdeviceinterface" ipv6.dns "$ipv6dnslocal" +ipv6.dns "$ipv6dns1" +ipv6.dns "$ipv6dns2";
nmcli c mod "${brockerdeviceinterface}" ipv6.dns "${ipv6dnslocal}"
else
nmcli c mod "${brockerdeviceinterface}" ipv6.dns "${ipv6dns1}" +ipv6.dns "${ipv6dns2}" +ipv6.dns "${ipv6dns3}"
fi
nmcli c mod "${deviceinterface}" ipv6.method "${brockerconnectmethod}" ipv6.ignore-auto-dns "${brockerignoreautodns}" ipv6.ignore-auto-routes "${brockerignoreautoroutes}" ipv6.dhcp-hostname "${hostname}" ipv6.addresses "${localipv6ofbrockernetworkpull}/${ipv6maskpull}" ## ipv6.gateway "$brockerGw6";
nmcli device connect "${deviceinterface}"
if [[ ${brockerupinterface} -eq 1 ]]; then
nmcli c delete "${brockerdeviceinterface}"
elif [[ ${brockerupinterface} -eq 0 && ${brockerautoconnect} -eq 1 ]]; then
nmcli device disconnect "${brockerdeviceinterface}"
fi
else
nmcli device connect "${deviceinterface}"
fi
}
function main_preparation_network_ifaces_script_source() {
echo_m "Initialise main function"
main_network_file_modify_linklocal
main_network_file_modify_link_ipv6_loop
main_ip4_interface_configuration
ip6_brocker_interface_configuration
}
main_preparation_network_ifaces_script_source "$@"
| true
|
b1bd1d408c9b3b6628c193d19fd0024570b6a92a
|
Shell
|
chen3feng/szl
|
/src/engine/language_tests/update
|
UTF-8
| 10,244
| 3.375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
if [[ "$0" == */* ]] ; then
cd "${0%/*}/"
fi
source ./setpaths
# Need to be in Pacific timezone to pass some tests
TZ=PST8PDT; export TZ
RFLAG="" # -R: put stdout/stderr in $SZL_TMP, not testdata
NFLAG="" # -N: native code gen
UFLAG="" # -U: do not run optimizer
CFLAG="" # -C: test cloning
while true; do
case "$1" in
-X)
# specify a unique tag for file names to avoid collisions
shift
UNIQUER=$1
;;
-R)
RFLAG=$1
;;
-N)
NFLAG=$1
;;
-NU|-UN)
NFLAG=$1
UFLAG=$1
;;
-U)
UFLAG=$1
;;
-C)
CFLAG=$1
;;
-)
# default flags. (can't use empty string; shell has trouble)
;;
*)
break # exits "while" loop
;;
esac
shift
done
if test -z "$*"; then
rm -f *.out *.err
args=$(echo *.szl*)
else
args=$*
fi
for i in $args; do
case $i in
*szl)
dir=$(echo $i | sed 's/\/.*//')
base=$(echo $i | sed 's/\.szl.*//')
if [ "$RFLAG" ]; then
out="$SZL_TMP"/$USER-sawzall-out-$UNIQUER
err="$SZL_TMP"/$USER-sawzall-err-$UNIQUER
else
out=$base.out
err=$base.err
fi
flags="--logfile=/dev/null"
if [ "$NFLAG" ]; then
flags="$flags --native"
else
flags="$flags --nonative"
fi
if [ "$UFLAG" ]; then
flags="$flags --nooptimize_sawzall_code"
else
flags="$flags --optimize_sawzall_code"
fi
if [ "$CFLAG" ]; then
flags="$flags --test_function_cloning"
fi
# Suppress warnings about multiple inclusion caused by .proto files.
flags="$flags --noshow_multiple_inclusion_warnings"
export SZL_ARG=""
sources=$i
files=''
if [ "$dir" == 'base' ]; then
flags="$flags --print_source"
fi
# A separate flags variable that quotes flags differently because they
# contain spaces.
flags_with_spaces=()
case $i in
base/assert.szl)
flags="$flags --print_rewritten_source"
;;
base/badindex.szl)
flags="$flags -ignore_undefs"
;;
base/badnew.szl)
flags="$flags -ignore_undefs"
;;
base/defleak.szl)
flags="$flags -ignore_undefs"
;;
base/constantfolding.szl|base/constantfolding[234].szl)
flags="$flags -optimize_sawzall_code"
;;
base/defness.szl)
flags="$flags -optimize_sawzall_code"
;;
base/defness-U.szl)
flags="$flags -nooptimize_sawzall_code"
;;
base/defness2.szl)
flags="$flags -optimize_sawzall_code -noignore_undefs"
flags="$flags --noremove_unreachable_functions"
;;
base/defness3.szl)
flags="$flags -optimize_sawzall_code -ignore_undefs"
flags="$flags --noremove_unreachable_functions"
;;
base/defness4.szl)
flags="$flags -optimize_sawzall_code"
flags="$flags --noremove_unreachable_functions"
;;
base/intrinsic_good.szl|base/goodlit.szl|base/indexable.szl)
flags="$flags -noexecute"
;;
base/goodout.szl)
flags="$flags --table_output=*"
;;
base/grep.szl)
files=base/passwd
;;
base/fnesting.szl)
flags="$flags -ignore_undefs"
;;
base/limit_recursion-32.szl|base/limit_recursion-64.szl)
# run all stack overflow tests in interpreted mode only
flags="$flags --nonative"
;;
base/stackoverflow-32.szl|base/stackoverflow-64.szl)
flags="$flags --nonative --optimize_sawzall_code"
;;
base/stackoverflow-U-32.szl|base/stackoverflow-U-64.szl)
flags="$flags --nonative --nooptimize_sawzall_code"
;;
base/include.szl)
flags="$flags --show_multiple_inclusion_warnings"
;;
base/include_b.szl)
sources="base/include_dir/a.szl $sources"
;;
base/line_directives.szl|base/line_directives3.szl)
flags="$flags --noprint_source --print_raw_source"
;;
base/print_tables.szl)
flags="$flags --noprint_source --print_tables --test_backend_type_conversion"
;;
base/proto_keyword_good.szl|base/proto_keyword_bad.szl)
flags="$flags --print_proto_clauses"
;;
base/proto6.szl)
flags="$flags --print_input_proto_name
--print_referenced_tuple_field_names=<all>"
;;
base/proto8.szl)
flags="$flags --print_input_proto_name
--print_referenced_tuple_field_names=<input>"
;;
base/printtree.szl)
flags="$flags --print_source --print_tree --noexecute"
;;
base/stackoverflow_b-32.szl)
flags="$flags --stack_size=0 --nonative"
;;
base/stackoverflow_c-32.szl)
flags="$flags --stack_size=0 --nonative"
;;
base/stackoverflow_d-32.szl)
flags="$flags --stack_size=0 --nonative"
;;
base/garbagecollection1.szl)
files=base/garbagecollection1.in
# set memory_limit last to override default setting
flags="$flags --memory_limit=200"
;;
base/garbagecollection[23].szl)
# set memory_limit last to override default setting
flags="$flags --memory_limit=200 --nonative"
;;
base/valuepropagation.szl)
flags="$flags --optimize_sawzall_code"
;;
base/valuepropagation2.szl)
flags="$flags --optimize_sawzall_code --ignore_undefs"
;;
base/valuepropagation[35].szl)
if [ ! "$RFLAG" ] ; then
echo "Update $i using base/valuepropagation{3-4,5-6}.sh" 1>&2
exit 1
fi
flags="$flags --noprint_source --optimize_sawzall_code"
flags="$flags --noremove_unreachable_functions"
;;
base/valuepropagation[46].szl)
if [ ! "$RFLAG" ] ; then
echo "Update $i using base/valuepropagation{3-4,5-6}.sh" 1>&2
exit 1
fi
flags="$flags --noprint_source --optimize_sawzall_code --ignore_undefs"
flags="$flags --noremove_unreachable_functions"
;;
emitter/tables*.szl)
flags="$flags --bootstrapsum_seed=bootsum --table_output=*"
;;
intrinsics/resourcestats*.szl)
# set memory_limit last to override default setting
flags="$flags --memory_limit=-1" # prevent GC from going off in Alloc
;;
fixed_crashes/crash7.szl)
flags="$flags --memory_limit=50"
;;
fixed_crashes/crash18.szl)
export SZL_ARG="--pattern=グーグル"
;;
fixed_crashes/crash21.szl)
# verify that ELF file generation succeeds
flags="$flags --gen_elf=/dev/null"
;;
fixed_crashes/crash23.szl)
flags="$flags --ignore_undefs"
files=fixed_crashes/crash23.in
;;
fixed_crashes/crash25.szl)
# assert occurs without NDEBUG and --nonative
flags="$flags --nonative"
;;
type_declarations/outputType_good.szl)
# print the source with table parameters folded
flags="$flags --print_source"
;;
type_declarations/outputType_bad_restrict.szl)
flags="$flags --restrict"
;;
unreachable/unused_fields.szl)
# Check functions are removed and fields are unused.
flags="$flags --remove_unreachable_functions --print_rewritten_source"
flags="$flags --ignore_undefs"
flags="$flags --print_referenced_tuple_field_names=<all>"
;;
unreachable/*.szl)
# Check functions are removed in rewritten source.
flags="$flags --remove_unreachable_functions --print_rewritten_source"
;;
*)
;;
esac
if ! "$SZL" --protocol_compiler="$PROTOCOL_COMPILER" \
--protocol_compiler_plugin="$PROTOCOL_COMPILER_PLUGIN" \
--protocol_compiler_temp="$PROTOCOL_COMPILER_TMP" \
$flags $sources $files "${flags_with_spaces[@]}" > $out 2> $err; then
echo 2>& 1 szl failed on $i
fi
# Filter timestamps and filename/line numbers from error messages.
# Also ignore <nil> entries in stack trace
sed -e '/SSLAgentClientHelper/d
/protocol-compiler: warning: /d
/^ <nil>: /d
/^### COMMAND/d
/^WARNING: Logging before InitGoogle/d
/^\[WARNING .*Using weak random seed for poisson dice/d
/^W.*\] UncompressChunkOrAll: Error: -3 avail_out: 81/d
s|\(undefined value at \).*/\(intrinsics/runszl\)|\1\2|
s/^[IEWF][0-9]\{4\} [0-9:.]\{15\} .*] \(.*\)/\1/' $err > $err.tmp
mv -f $err.tmp $err
# Also ignore the exact path of the protocol-compiler and plugin
sed -e 's/^### COMMAND: .*protoc /### COMMAND: protocol-compiler /
/^### /s/--plugin=.*protoc-gen-szl/--plugin=protoc-gen-szl/' \
$out > $out.tmp
if [[ "$i" == "emitter/tables_misc_good.szl" ]] ; then
# Special case: output order varies, so keep output in sorted order.
sort $out.tmp > $out
else
mv -f $out.tmp $out
fi
chmod g+w $out $err 2>/dev/null
;;
*)
echo 2>&1 usage: update foo.szl
;;
esac
done
| true
|
7af7bbb52a8cc0a85d85206ae0191d81ff2e24ce
|
Shell
|
nancheal/docker-arachni
|
/scripts/startup.sh
|
UTF-8
| 3,835
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Arachni Web Bootstrap
logo() {
cat <<FILE
[32m
▄▄▄· ▄▄▄ ▄▄▄· ▄▄· ▄ .▄ ▐ ▄ ▪ .▄▄ · ▄▄· ▄▄▄· ▐ ▄ ▐ ▄ ▄▄▄ .▄▄▄
▐█ ▀█ ▀▄ █·▐█ ▀█ ▐█ ▌▪██▪▐█•█▌▐███ ▐█ ▀. ▐█ ▌▪▐█ ▀█ •█▌▐█•█▌▐█▀▄.▀·▀▄ █·
▄█▀▀█ ▐▀▀▄ ▄█▀▀█ ██ ▄▄██▀▐█▐█▐▐▌▐█· ▄▀▀▀█▄██ ▄▄▄█▀▀█ ▐█▐▐▌▐█▐▐▌▐▀▀▪▄▐▀▀▄
▐█ ▪▐▌▐█•█▌▐█ ▪▐▌▐███▌██▌▐▀██▐█▌▐█▌ ▐█▄▪▐█▐███▌▐█ ▪▐▌██▐█▌██▐█▌▐█▄▄▌▐█•█▌
▀ ▀ .▀ ▀ ▀ ▀ ·▀▀▀ ▀▀▀ ·▀▀ █▪▀▀▀ ▀▀▀▀ ·▀▀▀ ▀ ▀ ▀▀ █▪▀▀ █▪ ▀▀▀ .▀ ▀
[0m
FILE
}
picture() {
cat <<TEXT
[31m
I III
III IHNN NNCH
IINHHHHHN HCHN IHNICN
INNNNII NHNIICIHHI NHN NNIINNNNNNNNNII
NHNII IHNICIHHI NHIINHNNNNNNNNNNHHHHHNI
HN IHNH HHN HHIHHNNNNNNNNNHHHNNNHHHHHNNI
IIHHNII IHNIHCHIIHHHHHHHHNNNNNHHHNNNNNNHIINNHCHHNI
IINNHHNNHNNHHHHNNII CCHNCCNNHHHHHHCHNNNHHCNNHHHCCHHI INNHHNI
IINNNNNI ICI IINNHHHNNNNHHCNHHNHHHHHNNHHCHHHHHCCCCCCHHHHHI HCI
INNII ICI INNHHCCCCCCACHNHHHHHHCHHCCHHHNIII NHHN HN
NHI IH INNHCCHHHCHNHCCHHCHHCHN IHCNI ICN
NN NN NCHAACHHHHHHCCHHCHHHHIIIIIIIII IHCN NH
IHN IHI HNHCCHAAHHCCCHNNNINHCHHHHHHHHNNII IHI HI
IHI IHNIII HHHHHI IIINNNHHHCCCCHCCHHHHHNNII NHI IC
INN III IHNIIIINNHHHCCCCCCCCCCHHHHNNNII HN HN
NNNI NHHINNNHHCCCCCCCCCHHHHHNIIIII IHN NH
NHHNHHHHCCCCCHHHHHNNIINCN NH IINHI
ICHNNHHHHHHHHNNNIIIII IHN HN INNI
HHIINNNNNNNII HN IH
ICI IINH IHN
NN IICI IHN
HNI IHN IN
NHN IH
IHN NN
IHN I
INI
[0m
TEXT
}
genpass() {
cat /dev/urandom | tr -cd 'A-Za-z0-9' | fold -w 20 | head -1
}
database() {
bin/arachni_web_task db:version >/dev/null 2>&1
if [ $? -ne 0 ];
then
echo "Setting up database..."
bin/arachni_web_task db:setup
fi
}
setpass() {
echo "Setting up a new password..."
pass=$(genpass)
bin/arachni_web_change_password "admin@admin.admin" "$pass"
echo "Your new credentials are as follows..."
echo
echo "Username: admin@admin.admin"
echo "Password: $pass"
echo
}
startup() {
echo
echo "Booting web server...."
echo
bin/arachni_web --host 0.0.0.0
}
database
picture
logo
setpass
startup
| true
|
57e0cfce55cee47a006a0ac9503a442858d228e0
|
Shell
|
n1amr/dotfiles
|
/bin/open-url
|
UTF-8
| 286
| 3.71875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
BROWSER='firefox'
while [ $# -gt 0 ]; do
case "$1" in
--) shift ; break ;;
*) break ;;
esac
shift
done
open () { xdg-open "$1" & }
open_with_redirect () { open "$@" > /dev/null 2>&1 & }
for url in "$@"; do
open_with_redirect "$url"
done
| true
|
1524d68bcf313faa90a99db40d5b2d5d5f223df3
|
Shell
|
getsocial-rnd/neo4j-aws-ha-cluster
|
/docker-entrypoint.sh
|
UTF-8
| 22,076
| 3.5
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -eu
# get more info from AWS environment:
# - instance im running on
# - my IP
# - AZ and region.
INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id/)
INSTANCE_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4/)
AZ=$(curl -s http://169.254.169.254/latest/meta-data/placement/availability-zone/)
REGION="us-east-1"
# these will be populated by cluster surrounding later in the code.
CLUSTER_IPS=""
SERVER_ID=""
COORDINATION_PORT=5001
BOLT_PORT=7687
ENVIRONMENT=""
# we use special tags for volumes, marked as data volumes.
# they hold Neo4j database and may be picked up by any node starting up in the respective
# availability zone.
DATA_TAG="${STACK_NAME}-data"
# number of iterations to try find any volume to use before creating a new one.
VOLUME_SEARCH_RETRIES=4
# number of iterations to wait for a volume to get attached
VOLUME_PROVISION_RETRIES=10
VOLUME_DEVICE="/dev/sdz"
DATA_ROOT="/neo4j"
FOUND_VOLUME=""
ATTACHED=false
DEVICE=""
BACKUP_NAME=neo4j-backup
assert_enterprise_license() {
if [ "$NEO4J_EDITION" == "enterprise" ]; then
if [ "${NEO4J_ACCEPT_LICENSE_AGREEMENT:=no}" != "yes" ]; then
echo >&2 "
In order to use Neo4j Enterprise Edition you must accept the license agreement.
(c) Network Engine for Objects in Lund AB. 2017. All Rights Reserved.
Use of this Software without a proper commercial license with Neo4j,
Inc. or its affiliates is prohibited.
Email inquiries can be directed to: licensing@neo4j.com
More information is also available at: https://neo4j.com/licensing/
To accept the license agreement set the environment variable
NEO4J_ACCEPT_LICENSE_AGREEMENT=yes
To do this you can use the following docker argument:
--env=NEO4J_ACCEPT_LICENSE_AGREEMENT=yes
"
exit 1
fi
fi
}
# create fixed server IDs for nodes.
# we can't let ids be random or inappropriate, main reason is
# not letting slave-only node to be picked up for push synchronization
# in case there's only one active slave.
# So in case we have a 3 node cluster, layout is:
#
#
# [Master] <===== pull changes ==== [Slave-only]
# ||
# push changes
# ||
# \/
# [Slave]
#
make_server_id() {
local octets=${INSTANCE_IP#*.*.*}
local oct3=${octets%%.*}
local oct4=${octets#*.*}
SERVER_ID=$((((oct3 << 8)) + oct4))
# If its a read-only slave node, don't let it be picked by tx_push_factor.
# Since the order is ascending, this node will always have higher ID than any node
# in main cluster.
if [ "${NEO4J_ha_slave__only:-}" == "true" ] ; then
SERVER_ID=$((SERVER_ID + 65536))
fi
echo "Server ID generated: $SERVER_ID"
}
# get all instances running in this CF stack
get_cluster_node_ips() {
get_tags
local ips=$(aws ec2 describe-instances \
--filters Name=tag-key,Values=App Name=tag-value,Values=Neo4j \
Name=tag-key,Values=Environment Name=tag-value,Values=$ENVIRONMENT \
Name=tag-key,Values=aws:cloudformation:stack-name Name=tag-value,Values=$STACK_NAME \
--region $REGION \
--query Reservations[].Instances[].InstanceId \
--output text \
)
local count=0
for ID in $ips;
do
local result=$(aws ec2 describe-instances --instance-ids $ID --region $REGION \
--query 'Reservations[].Instances[].[PrivateIpAddress,Tags[?Key==`SlaveOnly`][Value]]' \
--output text \
)
local parts=($result)
local IP="${parts[0]}"
local slave_only="${parts[1]}"
if [ "$IP" == "" ] ; then
continue
fi
# slave will join himself, main cluster should not wait for slave-only node
# to startup.
if [ "$slave_only" == "true" ] ; then
if [ "$ID" == "$INSTANCE_ID" ] ; then
echo "I'm slave-only, i'll try to join main cluster"
else
echo "Slave-only instance $IP is excluded from initial hosts, it'll have to request invitation"
continue
fi
fi
if [ "${CLUSTER_IPS}" != "" ] ; then
CLUSTER_IPS=$CLUSTER_IPS,
fi
CLUSTER_IPS=$CLUSTER_IPS$IP:$COORDINATION_PORT
done
echo "Instances in stack: $count"
if [ "$count" -eq 2 ] && [ "${SLAVE_MODE}" == "SINGLE" ]; then
CLUSTER_IPS=$INSTANCE_IP:$COORDINATION_PORT
return
fi
echo "Fetched Neo4j cluster nodes: $CLUSTER_IPS"
}
# reads the name of environment tag, provided as a paramter to CF.
get_tags() {
echo "Instance tags: Reading..."
ENVIRONMENT=$(aws ec2 describe-tags \
--filters Name=resource-id,Values=${INSTANCE_ID} \
--query "Tags[?Key=='Environment'][Value]" \
--region ${REGION} \
--output text \
)
if [ "$ENVIRONMENT" == "" ] ; then
echo "Error: tag 'Environment' is required to be set on cluster instances."
exit 1
fi
echo "Instance tags: environment = '${ENVIRONMENT}', data volume tags = '${DATA_TAG}'"
}
# creates a new volume with a desired size/type for database deployment
# happens only if no free compatible volume has been found in the given availability zone.
create_volume() {
echo "Initiating volume creation... (Type: ${NEO4J_EBS_TYPE}, Size: ${NEO4J_EBS_SIZE})."
local tags="ResourceType=volume,Tags=[{Key=Name,Value=$DATA_TAG},{Key=Environment,Value=$ENVIRONMENT}]"
local state=$(aws ec2 create-volume \
--availability-zone ${AZ} \
--region ${REGION} \
--volume-type ${NEO4J_EBS_TYPE} \
--size ${NEO4J_EBS_SIZE} \
--query [VolumeId,State] \
--tag-specifications "${tags}" \
--output=text \
)
local status=$?
local parts=($state)
FOUND_VOLUME="${parts[0]}"
local state="${parts[1]}"
if [ "$status" -ne 0 ] || [ "$state" != "creating" ]; then
echo "Fatal: Failed to create new volume. AWS status: $status, volume state: '$state'."
exit 1
fi
echo "Creating volume $FOUND_VOLUME..."
local created=false
for ((i=0; i < $VOLUME_PROVISION_RETRIES; i++))
do
# let it create
sleep 2
state=$(aws ec2 describe-volumes \
--volume-ids=$FOUND_VOLUME \
--region ${REGION} \
--query=Volumes[].State \
--output=text \
)
echo "Creation status: '$state'"
if [ "$state" == "available" ]; then
created=true
break
elif [ "$state" == "in-use" ]; then
echo "Fatal: $FOUND_VOLUME already in-use."
exit 1
fi
done
if [ "$created" != true ]; then
echo "Fatal: Failed to wait for $FOUND_VOLUME to be created."
exit 1
fi
}
# attach a volume with existing Neo4j database dir via AWS cli.
# Does several retries before abandoning and creating a new volume.
attach_volume() {
ATTACHED=false
FOUND_VOLUME=""
echo "Attaching a volume $1..."
local attach_state=$(aws ec2 attach-volume \
--volume-id $1 \
--instance-id ${INSTANCE_ID} \
--device ${VOLUME_DEVICE} \
--query State \
--region ${REGION} \
--output text \
)
local status=$?
if [ "$status" -eq 0 ] && [ "$attach_state" == "attaching" ]; then
echo "Successfully started attechment of $1!"
FOUND_VOLUME=$1
else
echo "Failed to attach $1, state = '$attach_state', continuing..."
fi
for ((i=0; i < $VOLUME_PROVISION_RETRIES; i++))
do
# give it some time to attach
sleep 2
echo "Checking attachment status..."
local status=$(aws ec2 describe-volumes \
--volume-ids=$1 \
--region ${REGION} \
--query=Volumes[].[Attachments[].[InstanceId,State,Device]] \
--output=text \
)
echo "Attachment status: '$status'"
local parts=($status)
local inst_id="${parts[0]}"
local state="${parts[1]}"
local dev="${parts[2]}"
if [ "$inst_id" != "$INSTANCE_ID" ] ; then
echo "Error: $1 volume was expected to be attached to instance $INSTANCE_ID, is attached to $inst_id"
exit 1
fi
if [ "$state" == "attached" ] ; then
echo "Successfully attached the volume!"
ATTACHED=true
DEVICE=$dev
break
fi
done
if [ "$ATTACHED" != true ] ; then
echo "Error: $1 volume never got attached to instance $INSTANCE_ID."
exit 1
fi
}
# Establish a database directory on a data volume.
# Tries to attach a free volume in given AZ. If unsuccessful aftr several retries, will
# create a new volume, format it to ext4 if it has been just created and mount it.
setup_data_volume() {
echo "Trying to start using existing Neo4j EBS volume... Instance ID = $INSTANCE_ID"
# fetch environment/name for given node.
get_tags
DEVICE=""
for ((i=0; i < $VOLUME_SEARCH_RETRIES; i++))
do
DEVICE=$(aws ec2 describe-volumes \
--filters Name=tag-key,Values=Name Name=tag-value,Values=$DATA_TAG \
Name=tag-key,Values=Environment Name=tag-value,Values=$ENVIRONMENT \
Name=attachment.instance-id,Values=$INSTANCE_ID \
--query Volumes[].Attachments[].Device \
--region ${REGION} \
--output text \
)
if [ "$DEVICE" != "" ] ; then
echo "Found existing volume: $DEVICE"
break
else
echo "No existing volume attached. Searching..."
fi
local volumes=$(aws ec2 describe-volumes \
--filters Name=tag-key,Values=Name Name=tag-value,Values=$DATA_TAG \
Name=tag-key,Values=Environment Name=tag-value,Values=$ENVIRONMENT \
Name=availability-zone,Values=$AZ \
Name=status,Values=available \
--query Volumes[].VolumeId \
--region ${REGION} \
--output text \
)
echo "Available volumes in $AZ: $volumes"
for vol_id in $volumes;
do
attach_volume $vol_id
done
if [ "$FOUND_VOLUME" != "" ] ; then
break
else
# no luck in this round, try to find free volumes again after a timeout.
echo "Failed to attach an available volume in this round."
sleep 2
fi
done
if [ "$FOUND_VOLUME" == "" ] && [ "$DEVICE" == "" ]; then
echo "WARNING: Could not find an available EBS volume in $AZ availability zone, nor existing device, must create."
create_volume
attach_volume $FOUND_VOLUME
fi
# check if its formatted, if not then make it be ext4
local device_fmt=$(blkid $DEVICE)
local status=$?
echo "Block device ($DEVICE) (status $status): $device_fmt"
if [ "$status" -ne 0 ] || [ "$device_fmt" == "" ]; then
echo "New volume: formatting as ext4..."
mkfs.ext4 $DEVICE
fi
# mount it
echo "Mounting volume..."
mkdir -p $DATA_ROOT
mount $DEVICE $DATA_ROOT
# make sure subdirs exist
mkdir -p $DATA_ROOT/data
mkdir -p $DATA_ROOT/logs
ln -s $DATA_ROOT/data /var/lib/neo4j/data
ln -s $DATA_ROOT/logs /var/lib/neo4j/logs
echo "Mounting volume... Done."
}
# dump all configurationi to neo4j.conf
save_neo4j_configurations() {
# list env variables with prefix NEO4J_ and create settings from them
unset NEO4J_AUTH NEO4J_SHA256 NEO4J_TARBALL NEO4J_EBS_SIZE NEO4J_EBS_TYPE
unset NEO4J_GUEST_AUTH
for i in $( set | grep ^NEO4J_ | awk -F'=' '{print $1}' | sort -rn ); do
setting=$(echo ${i} | sed 's|^NEO4J_||' | sed 's|_|.|g' | sed 's|\.\.|_|g')
value=$(echo ${!i})
if [[ -n ${value} ]]; then
if grep -q -F "${setting}=" /var/lib/neo4j/conf/neo4j.conf; then
# Remove any lines containing the setting already
sed --in-place "/${setting}=.*/d" /var/lib/neo4j/conf/neo4j.conf
fi
# Then always append setting to file
echo "${setting}=${value}" >> /var/lib/neo4j/conf/neo4j.conf
fi
done
}
configure_neo4j() {
# high availability cluster settings.
NEO4J_dbms_mode=${NEO4J_dbms_mode:-HA}
NEO4J_ha_server__id=${NEO4J_ha_serverId:-$SERVER_ID}
NEO4J_ha_initial__hosts=${NEO4J_ha_initialHosts:-$CLUSTER_IPS}
NEO4J_ha_pull__interval=${NEO4J_ha_pull__interval:-5s}
NEO4J_ha_tx__push__factor=${NEO4J_ha_tx__push__factor:-1}
NEO4J_ha_join__timeout=${NEO4J_ha_join__timeout:-2m}
NEO4J_dbms_backup_address=${NEO4J_dbms_backup_address:-0.0.0.0:6362}
NEO4J_dbms_allow__upgrade=${NEO4J_dbms_allow__upgrade:-false}
NEO4J_apoc_export_file_enabled=true
# using lucene index provider fixes disk leak in the 3.4.6 version
NEO4J_dbms_index_default__schema__provider="lucene+native-2.0"
# not configurable for now.
NEO4J_ha_tx__push__strategy=fixed_ascending
NEO4J_dbms_security_procedures_unrestricted=apoc.*
# this allows master/slave health/status endpoints to be open for ELB
# without basic auth.
NEO4J_dbms_security_ha__status__auth__enabled=false
# Env variable naming convention:
# - prefix NEO4J_
# - double underscore char '__' instead of single underscore '_' char in the setting name
# - underscore char '_' instead of dot '.' char in the setting name
# Example:
# NEO4J_dbms_tx__log_rotation_retention_policy env variable to set
# dbms.tx_log.rotation.retention_policy setting
# Backward compatibility - map old hardcoded env variables into new naming convention
: ${NEO4J_dbms_tx__log_rotation_retention__policy:=${NEO4J_dbms_txLog_rotation_retentionPolicy:-"100M size"}}
: ${NEO4J_wrapper_java_additional:=${NEO4J_UDC_SOURCE:-"-Dneo4j.ext.udc.source=docker"}}
: ${NEO4J_dbms_memory_heap_initial__size:=${NEO4J_dbms_memory_heap_maxSize:-"512M"}}
: ${NEO4J_dbms_memory_heap_max__size:=${NEO4J_dbms_memory_heap_maxSize:-"512M"}}
: ${NEO4J_dbms_unmanaged__extension__classes:=${NEO4J_dbms_unmanagedExtensionClasses:-}}
: ${NEO4J_dbms_allow__format__migration:=${NEO4J_dbms_allowFormatMigration:-}}
: ${NEO4J_dbms_connectors_default__advertised__address:=${NEO4J_dbms_connectors_defaultAdvertisedAddress:-}}
: ${NEO4J_causal__clustering_expected__core__cluster__size:=${NEO4J_causalClustering_expectedCoreClusterSize:-}}
: ${NEO4J_causal__clustering_initial__discovery__members:=${NEO4J_causalClustering_initialDiscoveryMembers:-}}
: ${NEO4J_causal__clustering_discovery__listen__address:=${NEO4J_causalClustering_discoveryListenAddress:-"0.0.0.0:5000"}}
: ${NEO4J_causal__clustering_discovery__advertised__address:=${NEO4J_causalClustering_discoveryAdvertisedAddress:-"$(hostname):5000"}}
: ${NEO4J_causal__clustering_transaction__listen__address:=${NEO4J_causalClustering_transactionListenAddress:-"0.0.0.0:6000"}}
: ${NEO4J_causal__clustering_transaction__advertised__address:=${NEO4J_causalClustering_transactionAdvertisedAddress:-"$(hostname):6000"}}
: ${NEO4J_causal__clustering_raft__listen__address:=${NEO4J_causalClustering_raftListenAddress:-"0.0.0.0:7000"}}
: ${NEO4J_causal__clustering_raft__advertised__address:=${NEO4J_causalClustering_raftAdvertisedAddress:-"$(hostname):7000"}}
# unset old hardcoded unsupported env variables
unset NEO4J_dbms_txLog_rotation_retentionPolicy NEO4J_UDC_SOURCE \
NEO4J_dbms_memory_heap_maxSize NEO4J_dbms_memory_heap_maxSize \
NEO4J_dbms_unmanagedExtensionClasses NEO4J_dbms_allowFormatMigration \
NEO4J_dbms_connectors_defaultAdvertisedAddress NEO4J_ha_serverId \
NEO4J_ha_initialHosts NEO4J_causalClustering_expectedCoreClusterSize \
NEO4J_causalClustering_initialDiscoveryMembers \
NEO4J_causalClustering_discoveryListenAddress \
NEO4J_causalClustering_discoveryAdvertisedAddress \
NEO4J_causalClustering_transactionListenAddress \
NEO4J_causalClustering_transactionAdvertisedAddress \
NEO4J_causalClustering_raftListenAddress \
NEO4J_causalClustering_raftAdvertisedAddress
# Custom settings for dockerized neo4j
: ${NEO4J_dbms_tx__log_rotation_retention_policy:=100M size}
: ${NEO4J_dbms_memory_pagecache_size:=512M}
: ${NEO4J_wrapper_java_additional:=-Dneo4j.ext.udc.source=docker}
: ${NEO4J_dbms_memory_heap_initial__size:=512M}
: ${NEO4J_dbms_memory_heap_max__size:=512M}
: ${NEO4J_dbms_connectors_default__listen__address:=0.0.0.0}
: ${NEO4J_dbms_connector_http_listen__address:=0.0.0.0:7474}
: ${NEO4J_dbms_connector_https_listen__address:=0.0.0.0:7473}
: ${NEO4J_dbms_connector_bolt_listen__address:=0.0.0.0:$BOLT_PORT}
: ${NEO4J_ha_host_coordination:=$(hostname):$COORDINATION_PORT}
: ${NEO4J_ha_host_data:=$(hostname):6001}
: ${NEO4J_causal__clustering_discovery__listen__address:=0.0.0.0:5000}
: ${NEO4J_causal__clustering_discovery__advertised__address:=$(hostname):5000}
: ${NEO4J_causal__clustering_transaction__listen__address:=0.0.0.0:6000}
: ${NEO4J_causal__clustering_transaction__advertised__address:=$(hostname):6000}
: ${NEO4J_causal__clustering_raft__listen__address:=0.0.0.0:7000}
: ${NEO4J_causal__clustering_raft__advertised__address:=$(hostname):7000}
if [ -d /conf ]; then
find /conf -type f -exec cp {} conf \;
fi
if [ -d /ssl ]; then
NEO4J_dbms_directories_certificates="/ssl"
fi
if [ -d /plugins ]; then
NEO4J_dbms_directories_plugins="/plugins"
fi
if [ -d /logs ]; then
NEO4J_dbms_directories_logs="/logs"
fi
if [ -d /import ]; then
NEO4J_dbms_directories_import="/import"
fi
if [ -d /metrics ]; then
NEO4J_dbms_directories_metrics="/metrics"
fi
user=$(echo ${NEO4J_AUTH:-} | cut -d'/' -f1)
password=$(echo ${NEO4J_AUTH:-} | cut -d'/' -f2)
guest_user=$(echo ${NEO4J_GUEST_AUTH:-} | cut -d'/' -f1)
guest_password=$(echo ${NEO4J_GUEST_AUTH:-} | cut -d'/' -f2)
if [ "${NEO4J_AUTH:-}" == "none" ]; then
NEO4J_dbms_security_auth__enabled=false
elif [[ "${user}" == neo4j ]]; then
if [ "${password}" == "neo4j" ]; then
echo "Invalid value for password. It cannot be 'neo4j', which is the default."
exit 1
fi
# Will exit with error if users already exist (and print a message explaining that)
bin/neo4j-admin set-initial-password "${password}" || true
# as soon as we get credentials, we can start waiting for BOLT protocol to warm it up
# upon startup.
echo "Scheduling init tasks..."
NEO4J_USERNAME="${user}" NEO4J_PASSWORD="${password}" GUEST_USERNAME="${guest_user}" GUEST_PASSWORD="${guest_password}" bash /init_db.sh &
echo "Scheduling init tasks: Done."
elif [ -n "${NEO4J_AUTH:-}" ]; then
echo "Invalid value for NEO4J_AUTH: '${NEO4J_AUTH}'"
exit 1
fi
}
run_neo4j() {
[ -f "${EXTENSION_SCRIPT:-}" ] && . ${EXTENSION_SCRIPT}
exec /var/lib/neo4j/bin/neo4j console
}
restore_neo4j() {
BACKUP_DIR=/tmp
BACKUP_PATH="$BACKUP_DIR/_snapshot.zip"
S3_PATH="s3://$SNAPSHOT_PATH"
echo "Restore initiated. Source: $S3_PATH"
aws s3 cp $S3_PATH $BACKUP_PATH
local status=$?
if [ "$status" -ne 0 ] ; then
echo "Error: failed to copy snapshot $SNAPSHOT_PATH from S3"
exit 1
fi
echo "Successfully copied snapshot $SNAPSHOT_PATH from S3!"
unzip $BACKUP_PATH -d $BACKUP_DIR
echo "Running restore..."
/var/lib/neo4j/bin/neo4j-admin restore --from="$BACKUP_DIR/$BACKUP_NAME" --database=graph.db --force
status=$?
if [ "$status" -ne 0 ] ; then
echo "Error: failed to restore from snapshot."
exit 1
fi
}
if [ "$1" == "neo4j" ]; then
# make sure the client has aggreed to license if it's an enterprise edition.
# (only prompt for license agreement if command contains "neo4j" in it).
assert_enterprise_license
# create server ID, unique to this instance, based on it's private IP's last 2 octets.
make_server_id
# get all IPs of this autoscaling group to build ha.initial_hosts of the cluster.
get_cluster_node_ips
setup_data_volume
configure_neo4j
save_neo4j_configurations
if [ -n "${SNAPSHOT_PATH:-}" ]; then
restore_neo4j
fi
run_neo4j
elif [ "$1" == "dump-config" ]; then
if [ -d /conf ]; then
cp --recursive conf/* /conf
else
echo "You must provide a /conf volume"
exit 1
fi
elif [ "$1" == "backup" ]; then
BACKUP_DIR=${BACKUP_DIR:-/tmp}
# XXX: you may want to modify this script to just use the latest backup.
# download latest backup if exist
# LATEST_BACKUP=$(aws s3 ls s3://$AWS_BACKUP_BUCKET | tail -n 1 | awk '{print $4}')
# if [ -n "$LATEST_BACKUP" ]; then
# echo "Getting latest backup file $LATEST_BACKUP from s3://$AWS_BACKUP_BUCKET"
# aws s3 cp s3://$AWS_BACKUP_BUCKET/$LATEST_BACKUP $BACKUP_DIR/
# echo "Unzipping backup content"
# unzip $BACKUP_DIR/$LATEST_BACKUP -d $BACKUP_DIR
# fi
if [ -z $BACKUP_FROM ] || [ "$BACKUP_FROM" == "this_instance" ]; then
BACKUP_FROM=$INSTANCE_IP
fi
configure_neo4j
save_neo4j_configurations
echo "Creating Neo4j DB backup"
/var/lib/neo4j/bin/neo4j-admin backup --backup-dir=$BACKUP_DIR/ --name=$BACKUP_NAME --from=$BACKUP_FROM
BACKUP_FILE=$BACKUP_NAME-$(date +%s).zip
echo "Zipping backup content in file $BACKUP_FILE"
pushd $BACKUP_DIR
zip -r $BACKUP_FILE $BACKUP_NAME
# Upload file to the "/daily" dir if backup run at 00 hour
if [ "$(date +%H)" == "00" ]; then
aws s3 cp $BACKUP_FILE s3://$AWS_BACKUP_BUCKET/daily/
else
aws s3 cp $BACKUP_FILE s3://$AWS_BACKUP_BUCKET/hourly/
fi
rm -rf $BACKUP_FILE
else
exec "$@"
fi
| true
|
2a1e6c31f45997012860ab89f220cbd71e120692
|
Shell
|
Kugumi/roger-skyline-1
|
/scrup.sh
|
UTF-8
| 258
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/sh
s=`sudo apt-get update`
p=`sudo apt-get upgrade`
echo "\n*******************************\n$(date)\n*******************************\n" >> /var/log/update_script.log
echo "$s\n" >> /var/log/update_script.log
echo "$p" >> /var/log/update_script.log
| true
|
d448674dbbab6ea90314c5f85b18e7fee8eb72f9
|
Shell
|
diadara/dispatch
|
/release.sh
|
UTF-8
| 761
| 3.4375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
set -o pipefail
shopt -s nullglob
Package=github.com/khlieng/dispatch
BuildDir=$GOPATH/src/$Package/build
ReleaseDir=$GOPATH/src/$Package/release
BinaryName=dispatch
mkdir -p $BuildDir
cd $BuildDir
rm -f dispatch*
gox -ldflags -w $Package
mkdir -p $ReleaseDir
cd $ReleaseDir
rm -f dispatch*
for f in $BuildDir/*
do
zipname=$(basename ${f%".exe"})
if [[ $f == *"linux"* ]] || [[ $f == *"bsd"* ]]; then
zipname=${zipname}.tar.gz
else
zipname=${zipname}.zip
fi
binbase=$BinaryName
if [[ $f == *.exe ]]; then
binbase=$binbase.exe
fi
bin=$BuildDir/$binbase
mv $f $bin
if [[ $zipname == *.zip ]]; then
zip -j $zipname $bin
else
tar -cvzf $zipname -C $BuildDir $binbase
fi
mv $bin $f
done
| true
|
7919b625d4a6275a962efa12f54569f86ebc978b
|
Shell
|
1liujin/ChaosPlotter
|
/setup.sh
|
UTF-8
| 468
| 3.546875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
error() {
echo "Invoke this script with no arguments or '--no-venv' only."
exit 1
}
if [ $# -gt 1 ]; then
error
fi
if [[ $1 == "" ]]; then
command -v virtualenv >/dev/null 2>&1 || { echo >&2 "virtualenv is required but it's not installed. Aborting."; exit 1; }
virtualenv venv
source venv/bin/activate
elif [[ $1 != "--no-venv" ]]; then
error
fi
pip3 install pyqt5 matplotlib
echo -e "----------\nSUCCESS!\n"
| true
|
7bccdaeecc265197683ca15da903d61805a09a96
|
Shell
|
clippaxa/chbook
|
/full.sh
|
UTF-8
| 1,366
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/sh
set -x
arch=amd64
target=trusty
chroot_dir=trusty_rootfs
# Install git and debootstrap on the host
sudo apt-get update
sudo apt-get install -y -f debootstrap
# Create chroot
mkdir ${chroot_dir}
sudo debootstrap --arch=${arch} --variant=buildd ${target} ${chroot_dir}
# Need to bind /dev, /dev/pts, /proc, and /sys before entering chroot
#sudo mount --bind /dev $chroot_dir/dev
sudo mount --bind /dev/pts $chroot_dir/dev/pts
sudo mount -t proc proc $chroot_dir/proc
sudo mount -t sysfs sys $chroot_dir/sys
# Copy necessary files to the chroot
apt_sources_file=sources.list
chroot_apt_sources_file=${chroot_dir}/etc/apt/sources.list
sudo cp ${apt_sources_file} ${chroot_apt_sources_file}
work_dir=/usr/src
chroot_work_dir=${chroot_dir}${work_dir}
sudo cp .config ${chroot_work_dir}
sudo cp kernel_cmdline_boot_from_sd.txt ${chroot_work_dir}
sudo cp kernel_cmdline_boot_from_ssd.txt ${chroot_work_dir}
sudo cp kernel_cmdline_boot_from_usb.txt ${chroot_work_dir}
sudo cp bootstub.efi ${chroot_work_dir}
sudo cp run_commands_in_chroot.sh ${chroot_work_dir}
# Run script in chroot
sudo chroot ${chroot_dir} /bin/bash -x ${work_dir}/run_commands_in_chroot.sh
# Need to inbind /dev, /dev/pts, /proc, and /sys after leaving chroot
sudo umount $chroot_dir/dev/pts
#sudo umount $chroot_dir/dev
sudo umount $chroot_dir/proc
sudo umount $chroot_dir/sys
set +x
| true
|
2044ff006831407c9fc3add30386d8b8a0a59ef0
|
Shell
|
rmferrer/env_bootstrap
|
/setup.sh
|
UTF-8
| 3,675
| 3.859375
| 4
|
[] |
no_license
|
#!/bin/bash
# SETUP_URL="https://bit.ly/rmferrer_env_bootstrap" && /bin/bash -c "$(curl -fsSL ${SETUP_URL} || wget ${SETUP_URL} -O - )";
function _install_brew() {
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)"
}
function _uname() {
echo "$(uname -a | tr '[:upper:]' '[:lower:]')"
}
function _pkg_install() {
UNAME=$(_uname)
if [[ $UNAME =~ "darwin" || $UNAME =~ "ubuntu" ]]; then
brew install "${@}" || brew upgrade "${@}"
elif [[ $UNAME =~ "raspberrypi" ]]; then
sudo apt-get -y install "${@}"
fi
}
function _run_or_exit() {
local local_cmd="${@}"
eval "${local_cmd}"
local local_status="${?}"
if [[ ${local_status} == 0 ]]; then
return 0
else
printf "Failed running: ${local_cmd}\nStatus: ${local_status}\nExiting..."
exit 1
fi
}
function _install_package_manager() {
UNAME=$(_uname)
printf "Attempting to install package manager...\n\n"
printf "System detected: ${UNAME}\n\n\n"
if [[ ${UNAME} =~ "darwin" ]]; then
printf "Setting up macOS...\n\n"
_install_brew
brew update
elif [[ ${UNAME} =~ "ubuntu" || ${UNAME} =~ "debian" ]]; then
printf "Setting up ubuntu or debian...\n\n"
sudo apt-get -y install build-essential curl file git
_install_brew
eval $(/home/linuxbrew/.linuxbrew/bin/brew shellenv)
brew update
elif [[ ${UNAME} =~ "raspberrypi" ]]; then
printf "Setting up Raspberry Pi...\n\n"
sudo apt-get -y update
else
printf "System not recognized! uname -a: ${UNAME}\n\n"
return 1
fi
}
function _install_test_packages() {
_run_or_exit _pkg_install hello
}
function _install_base_packages() {
_run_or_exit _pkg_install git zsh
UNAME=$(_uname)
if [[ ${UNAME} =~ "darwin" ]]; then
_run_or_exit brew install --cask 1password-cli
else
printf "Please install one password and then type any key.\n\n"
printf "https://app-updates.agilebits.com/product_history/CLI\n\n\n"
read DUMMY
fi
# verify 1p
_run_or_exit op help > /dev/null
}
function _1p_logged_in() {
op list templates > /dev/null 2>&1
}
function _1p_login() {
readonly max_retries=3
[[ -f ${HOME}/.op/config ]] && rm ${HOME}/.op/config
local retries=0
while ! _1p_logged_in && [[ ${retries} < ${max_retries} ]]; do
printf "Enter 1password domain: "
read domain
printf "Enter 1password email: "
read email
printf "Enter shorthand: "
read shorthand
retries=$((retries + 1))
if [[ $shorthand ]]; then
eval $(op signin ${domain} ${email} --shorthand ${shorthand});
else
eval $(op signin ${domain} ${email});
fi
done
_1p_logged_in
}
function _install_chezmoi() {
UNAME=$(_uname)
if [[ ${UNAME} =~ "darwin" || ${UNAME} =~ "ubuntu" ]]; then
_pkg_install chezmoi
elif [[ ${UNAME} =~ "raspberrypi" ]]; then
sudo apt-get -y install golang && sudo apt-get -y upgrade golang && \
go get -u github.com/twpayne/chezmoi
fi
}
function _manage_dotfiles() {
CHEZMOI_DIR="${HOME}/.local/share/chezmoi"
if [[ -d "${CHEZMOI_DIR}" ]]; then
printf "Chezmoi dir exists. Delete it? [y/n]: "
read DELETE_CHEZMOI_DIR
if [[ ${DELETE_CHEZMOI_DIR} = 'y' ]]; then
rm -rf "${CHEZMOI_DIR}"
fi
fi
_run_or_exit _install_chezmoi
printf "Logging into 1password...\n\n"
_run_or_exit _1p_login
printf "Logged in successfully!\n\n"
printf "Enter OP Key Id: "
read KEY_ID
printf "Enter dotfiles repo uri: "
read DOTFILES_URI
ssh-agent bash -c "
ssh-add -D
ssh-add - <<< \"$(op get document ${KEY_ID})\"
chezmoi init \"${DOTFILES_URI}\" --apply;
op signout;
"
${SHELL}
}
function _main() {
_run_or_exit _install_package_manager
_install_test_packages
_install_base_packages
_manage_dotfiles
}
_main
| true
|
dc54d3dc4e2a152f02025ac1cfec06dd1eb19100
|
Shell
|
DeercoderPractice/shell
|
/abs/comment.sh
|
UTF-8
| 500
| 3.296875
| 3
|
[
"MIT"
] |
permissive
|
#! /bin/bash
echo 'The # here doesnot begin a comment'
echo "The # here doesnot begin a commnet"
echo The \# here does not begin a comment
echo The #这里是一个注释
echo ${PATH#*:} #参数替换,不是注释
echo $((2#101011)) #数制替换,不是注释
echo hello; echo there;
if [ -x "$filename" ]; then
#+
echo "File $filename exists."; cp $filename $filename.bak
else #
echo "File $filename not found."; touch $filename;
fi;
echo "File test complete."
# Thanks, S.C
| true
|
0feb675131aeccd3cd97ca77ce5cb857b7cd743a
|
Shell
|
narulkargunjan/lethril
|
/bin/lethril-twitterd
|
UTF-8
| 2,770
| 3.859375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh -e
# Get Config File if passed as argument 2
DEFAULTS=$2
DEFAULTS=${DEFAULTS:-"/etc/default/lethril-twitterd"}
. $DEFAULTS
NAME="Lethril Twitter Daemon"
. /lib/lsb/init-functions
# Are we running from init?
run_by_init() {
([ "$previous" ] && [ "$runlevel" ]) || [ "$runlevel" = S ]
}
check_dev_null() {
if [ ! -c /dev/null ]; then
if [ "$1" = log_end_msg ]; then
log_end_msg 1 || true
fi
if ! run_by_init; then
log_action_msg "/dev/null is not a character device!"
fi
exit 1
fi
}
export PATH="${PATH:+$PATH:}/usr/sbin:/sbin"
stop_worker () {
cmd="start-stop-daemon --stop \
--verbose \
$* \
--pidfile $PID_FILE"
if $cmd; then
log_end_msg 0
/bin/rm -f $PID_FILE
else
log_end_msg 1
fi
}
start_worker () {
cmd="start-stop-daemon --start \
--verbose --background \
--oknodo \
--make-pidfile \
$* \
--pidfile $PID_FILE
--exec $PYTHON $SCRIPT -- --config-file $CONFIG_FILE"
if $cmd; then
log_end_msg 0
else
log_end_msg 1
fi
}
case "$1" in
start)
check_dev_null
log_daemon_msg "Starting...." "$NAME"
start_worker
;;
stop)
log_daemon_msg "Stopping...." "$NAME"
stop_worker --oknodo
;;
reload|force-reload)
echo "Use start+stop"
;;
restart)
log_daemon_msg "Restarting...." "$NAME"
stop_worker --oknodo --retry 5
check_dev_null log_end_msg
start_worker
;;
try-restart)
log_daemon_msg "Restarting....." "$NAME"
set +e
stop_worker --retry 5
RET="$?"
set -e
case $RET in
0)
# old daemon stopped
check_dev_null log_end_msg
start_worker
;;
1)
# daemon not running
log_progress_msg "(not running)"
log_end_msg 0
;;
*)
# failed to stop
log_progress_msg "(failed to stop)"
log_end_msg 1
;;
esac
;;
status)
status_of_proc -p $PID_FILE "$PYTHON $SCRIPT" "$NAME" && exit 0 || exit $?
;;
monitor_status)
if status_of_proc -p $PID_FILE "$PYTHON $SCRIPT" "$NAME"
then
echo "lethril-twitterd is up"
else
echo "lethril-twitterd server is down"
log_daemon_msg "Restarting...." "$NAME"
stop_worker --oknodo --retry 5
check_dev_null log_end_msg
start_worker
exit
fi
;;
*)
log_action_msg "Usage: $0 {start|stop|force-reload|restart|try-restart|status|monitor_status}"
exit 1
esac
exit 0
| true
|
e77236bbbedf761269e6aa4e55e0fd64b6d5bba0
|
Shell
|
Haxine/MingTools
|
/bin/run-jre.sh
|
UTF-8
| 1,087
| 3.703125
| 4
|
[
"Apache-2.0"
] |
permissive
|
cd "$(dirname "$0")" # 进入该文件所在的文件夹
BASE_DIR=.
APP_NAME="ming-tools"
APP_JAR="${BASE_DIR}/lib/ming-tools.jar"
JAVA="${BASE_DIR}/jre/bin/java"
JAVA_OPT=""
SPRING_BOOT_OPT="--spring.config.location=${BASE_DIR}/conf/application.yaml"
${JAVA} -jar ${JAVA_OPT} ${APP_JAR} ${SPRING_BOOT_OPT} > ${APP_NAME}_start.out &
## 判断操作系统
pl=echo
if [[ "$(uname)"=="Darwin" ]]
then
# Mac OS X 操作系统
pl=/bin/echo
fi
num=1
while [ -f ${APP_NAME}_start.out ]
do
result=`grep "Started application" ${APP_NAME}_start.out`
result_err=`grep "Caused by" ${APP_NAME}_start.out`
if [[ "$result" != "" ]]
then
${pl} ""
${pl} ${result}
${pl} "应用启动完成!"
break
else
if [ $num -eq 1 ]
then
${pl} -n "正在启动.."
((num++))
else
${pl} -n ".."
fi
sleep 1s
fi
if [[ "$result_err" != "" ]]
then
${pl} ""
${pl} ${result_err}
${pl} "应用启动失败!"
break
fi
done
exit 1
| true
|
20732a706e5dd6d54c8e2e42106efb220deef22a
|
Shell
|
XGWang0/Suse_testsuite
|
/sets/qa_testset_automation/automation/qaset/run/acceptance-run.openqa
|
UTF-8
| 2,527
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
if [[ -z ${TARGET_RELEASE} ]] ; then
echo "TARGET_RELEASE isn't defined!"
echo "Try parse it from /etc/SuSE-release"
if [[ -a /etc/os-release ]];then
source /etc/os-release
if [[ ! ${NAME:0:3} -eq 'SLE' ]];then
echo "Isn't SUSE SLE system"
exit 0
fi
TARGET_RELEASE="SLE${VERSION//-}"
elif [[ -a /etc/SuSE-release ]] ; then
V=$(awk '/VERSION/ {printf $3}')
P=$(awk '/PATCHLEVEL/ {printf $3}')
TARGET_RELEASE="SLE${V}"
[[ ${P} == '0' ]] || TARGET_RELEASE="${TARGET_RELEASE}SP${P}"
else
echo "Isn't SUSE SLE system"
exit 1
fi
echo "The TARGET_RELEASE is ${TARGET_RELEASE}"
fi
if [[ -z ${QASET_ROOT} ]];then
echo "QASET_ROOT isn't SET !"
dirpart="$(dirname $0)"
if [[ -f ${dirpart}/../qaset && -d ${dirpart}/../qavm ]];then
pushd ${dirpart}/.. > /dev/null
QASET_ROOT=${PWD}
popd > /dev/null
elif [[ -f ${PWD}/qaset && -d ${PWD}/qavm ]];then
QASET_ROOT=${PWD}
elif [[ -f ${PWD}/../qaset && -d ${PWD}/../qavm ]];then
pushd .. > /dev/null
QASET_ROOT=${PWD}
popd > /dev/null
else
echo "Failed to get QASET_ROOT"
exit 1
fi
echo "The QASET_ROOT is ${QASET_ROOT}"
fi
if [[ -z ${SQ_TEST_RUN_SET} ]]; then
echo "SQ_TEST_RUN_SET isn't set"
echo "Try my best to guess it"
arg0=$(basename $0)
if [[ -z ${arg0} ]]; then
echo "impossible"
exit 1
fi
SQ_TEST_RUN_SET=${arg0%%-run.*}
if [[ -z ${SQ_TEST_RUN_SET} ]] ; then
echo "Failed to get SQ_TEST_RUN_SET"
exit 1
fi
echo "The SQ_TEST_RUN_SET is ${SQ_TEST_RUN_SET}"
fi
# start reboot service default whenever what kind of test
case ${TARGET_RELEASE} in
SLE11*) ${QASET_ROOT}/sysv-install;;
*) systemctl enable qaperf.service;;
esac
SQ_SET_BIN=${QASET_ROOT}/qaset
# user configuration
# TODO : really is this user ?
SQ_USER_CONFIG_DIR=/root/qaset
SQ_USER_CONFIG_FILE=${SQ_USER_CONFIG_DIR}/config
mkdir -p ${SQ_USER_CONFIG_DIR}
if [[ ! -f ${SQ_USER_CONFIG_FILE} ]] ;then
touch ${SQ_USER_CONFIG_FILE}
fi
if grep -e -q '^SQ_TEST_RUN_SET=' ${SQ_USER_CONFIG_FILE};then
sed -i "/^SQ_TEST_RUN_SET=/s#^.*\$#SQ_TEST_RUN_SET=${SQ_TEST_RUN_SET}#" ${SQ_USER_CONFIG_FILE}
else
echo "SQ_TEST_RUN_SET=${SQ_TEST_RUN_SET}" >> ${SQ_USER_CONFIG_FILE}
fi
export SQ_SET_CALL_BY_WRAP=YES
export SQ_CI_ENV="openqa"
${SQ_SET_BIN} -t ${TARGET_RELEASE} run
| true
|
044279cb1a28b3726eb418553de60c431d760c8c
|
Shell
|
roman-mueller/dotfiles
|
/bitbar/disapproval.sh
|
UTF-8
| 362
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
# <bitbar.title>ಠ_ಠ</bitbar.title>
# <bitbar.version>v1.0</bitbar.version>
# <bitbar.author.github>roman-mueller</bitbar.author.github>
# <bitbar.desc>ಠ_ಠ</bitbar.desc>
if [ "$1" = "copy" ]; then
echo -n "ಠ_ಠ" | LANG=en_US.UTF-8 pbcopy
exit 0
fi
echo 'ಠ_ಠ'
echo "---"
echo "Copy ಠ_ಠ | terminal=false bash='$0' param1=copy"
| true
|
1aece2174085c560b07ed327d4ccc56f03c38078
|
Shell
|
UCSB-CS-RACELab/eager-appscale
|
/Eager/install/setup_mysql.sh
|
UTF-8
| 592
| 3.578125
| 4
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
#!/bin/sh
echo "Setting Up MySQL for EAGER"
echo "=========================="
if command -v mysql > /dev/null; then
echo "MySQL already installed. Assuming the root password is correctly set."
else
export DEBIAN_FRONTEND=noninteractive
echo "Installing MySQL..."
apt-get -y install mysql-server
echo "Waiting for MySQL server to start..."
sleep 5
echo "Setting password for MySQL user: root."
mysql -uroot -e <<EOSQL "UPDATE mysql.user SET Password=PASSWORD('$1') WHERE User='root'; FLUSH PRIVILEGES;"
EOSQL
echo "Done setting MySQL root password."
fi
| true
|
44f8cad3b01946bfd0e9c53fe94483dcc2dbf93e
|
Shell
|
outtersg/guili
|
/_tinyproxy
|
UTF-8
| 4,771
| 3.921875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Copyright (c) 2006 Guillaume Outters
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
set -e
DelieS() { local s2 ; while [ -h "$s" ] ; do s2="`readlink "$s"`" ; case "$s2" in [^/]*) s2="`dirname "$s"`/$s2" ;; esac ; s="$s2" ; done ; } ; SCRIPTS() { local s="`command -v "$0"`" ; [ -x "$s" -o ! -x "$0" ] || s="$0" ; case "`basename "$s"`" in *.*) true ;; *sh) s="$1" ;; esac ; case "$s" in [^/]*) s="`pwd`/$s" ;; esac ; DelieS ; s="`dirname "$s"`" ; DelieS ; SCRIPTS="$s" ; } ; SCRIPTS
. "$SCRIPTS/util.sh"
# Historique des versions gérées
prerequis="tinyproxy"
auSecours()
{
moi="`basename "$0"`"
cat >&2 <<TERMINE
# Utilisation: $moi [-u <utilisateur>] -p <port> [-r <remplace>] <règle>*
-u <utilisateur>
Compte sous lequel tourner.
-p <port>
Numéro de port d'écoute.
-r <remplace>
Remplace un autre serveur (en plus de lui-même s'il se trouve)
<règle>
Si -, l'entrée standard est récupérée.
Les règles sont celles de tinyproxy: upstream, no upstream, Allow, Filter,
etc.
Attention, les chaînes de caractères doivent être entourées de guillemets.
TERMINE
exit 1
}
analyserParametres()
{
vars="rien"
compte=
port=
seulement=
optionsServeur=
> $TMP/$$/regles
while [ $# -gt 0 ]
do
case "$1" in
--seulement) shift ; seulement="$1" ;;
-u) shift ; compte="$1" ;;
-p) shift ; port="$1" ;;
-r) optionsServeur="$optionsServeur $1 $2" ; shift ;;
-) cat >> $TMP/$$/regles ;;
*)
if [ "$vars" = rien ]
then
echo "$1" >> $TMP/$$/regles
else
apAffecter "$1" $vars
fi
;;
esac
shift
done
if [ -z "$compte" ]
then
comptesEnvisages="web www daemon `id -u -n`"
for compte in $comptesEnvisages personne
do
id "$compte" 2> /dev/null >&2 && break
done
if [ "$compte" = personne ]
then
echo "# Impossible de trouver un des comptes sous lequel tourner ($comptesEnvisages). Forcez un compte via l'option -u (il sera créé si nécessaire)." >&2
exit 1
fi
fi
[ ! -z "$compte" ] || auSecours
[ -n "$port" ] || auSecours
remplacerPrerequis "tinyproxy $argVersion"
}
IFS="$guili_sep"
tifs analyserParametres $guili_params_
prerequis
# Modifications
# Variables
eval "version=\"\$version_`echo "$logiciel" | cut -c 2-`\"" # On prend pour numéro de version installée celui de notre logiciel sous-jacent.
destiner
creerAmorceur()
{
local desttemp="$TMP/$$/init"
mkdir -p "$TMP/$$/init/etc/rc.d" "$TMP/$$/init/var/run" "$TMP/$$/init/var/log"
fpid="var/run/tinyproxy.pid"
ftrace="var/log/tinyproxy.log"
fconfig="etc/tproxy.conf"
foui="etc/tproxy.oui"
groupe="`id -g -n "$compte"`"
cat > "$TMP/$$/init/$fconfig" <<TERMINE
User $compte
Group $groupe
PidFile "$dest/$fpid"
LogFile "$dest/$ftrace"
TERMINE
# On ajoute aussi les valeurs par défaut de tous les paramètres qui n'auront pas été définis par l'appelant.
cat $TMP/$$/regles >> "$TMP/$$/init/$fconfig"
(
if [ ! -z "$seulement" ]
then
echo "FilterDefaultDeny Yes"
echo "Filter \"$dest/foui\""
echo "$seulement" > "$TMP/$$/init/$foui"
fi
cat <<TERMINE
Port $port
StartServers 3
MinSpareServers 3
MaxSpareServers 3
MaxClients 100
TERMINE
) | while read param contenu
do
grep -q "^$param[ ]" < "$TMP/$$/init/$fconfig" || echo "$param $contenu"
done >> "$TMP/$$/init/$fconfig"
serveur_patronVars "$desttemp" var/run var/log
mv "$desttemp/$fconfig" "$desttemp/$fconfig$PERSO_ORIGINAL"
iperso "$desttemp"
sudo cp -R "$TMP/$$/init/." "$dest"
serveur_chownVars
local bin=bin
pge $version 1.10 || bin=sbin
serveur -p "$dest/$fpid" $optionsServeur demon tinyproxy "$desttinyproxy/$bin/tinyproxy -c $dest/$fconfig"
}
echo "Auto-démarrage…" >&2
creerAmorceur
sutiliser -
| true
|
78f5b0d3ffbfbcc84ecb31cb8525a1f15bfd866a
|
Shell
|
pcdshub/engineering_tools
|
/scripts/imgr
|
UTF-8
| 2,200
| 3.921875
| 4
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
# Define usage here for faster return
usage()
{
cat << EOF
usage: imgr <IOCNAME> [--hutch HUTCH] <OPTION>
Control status of all IOCs running in a particular hutch from the command line.
Current hutch is used if not provided.
List of options:
imgr IOCNAME [--hutch HUTCH] --reboot soft
imgr IOCNAME [--hutch HUTCH] --reboot hard
imgr IOCNAME [--hutch HUTCH] --enable
imgr IOCNAME [--hutch HUTCH] --disable
imgr IOCNAME [--hutch HUTCH] --upgrade RELEASE_DIR
imgr IOCNAME [--hutch HUTCH] --move HOST
imgr IOCNAME [--hutch HUTCH] --move HOST:PORT
imgr [--hutch HUTCH] --list [--host HOST] [--enabled_only|--disabled_only]
EOF
}
if [ $# -lt 1 ]; then
echo 'Missing required arguments' >&2
usage
exit 1
elif [[ ($1 == "--help") || ($1 == "-h") ]]; then
usage
exit 0
fi
# Add engineering_tools to PATH for get_hutch_name
DIR=$(readlink -f "$(dirname "${BASH_SOURCE[0]}")")
PATH=$PATH:$DIR
# Search for hutch in args
for (( i=1; i<=$#; i++)); do
if [[ "${!i}" == "--hutch" ]]; then
ARGHUTCH_INDEX=$((i+1))
ARGHUTCH=${!ARGHUTCH_INDEX}
break
fi
done
# Hutch choice priority order:
# 1. --hutch arg
# 2. HUTCH environment variable
# 3. get_hutch_name
HUTCH=${ARGHUTCH:-$HUTCH}
HUTCH=${HUTCH:-$(get_hutch_name)}
# Exit if hutch is still unknown
# TODO: Replace with check for actual hutch names
if [[ "$HUTCH" == "unknown_hutch" ]]; then
echo "Unknown hutch, please use --hutch argument" >&2
exit 1
fi
# Setup environment
if [ -x /etc/pathinit ]; then
source /etc/pathinit
else
export PSPKG_ROOT=/cds/group/pcds/pkg_mgr
export PYPS_ROOT=/cds/group/pcds/pyps
export IOC_ROOT=/cds/group/pcds/epics/ioc
export CAMRECORD_ROOT=/cds/group/pcds/controls/camrecord
export IOC_DATA=/cds/data/iocData
export IOC_COMMON=/cds/data/iocCommon
fi
PSPKG_OS=$(${PSPKG_ROOT}/etc/pspkg_os.sh)
export PSPKG_OS
if [ "$PSPKG_OS" == rhel5 ]; then
echo "IocManager 2.0.0 and higher does not run on RHEL5!" >&2
exit 1
fi
export QT_XKB_CONFIG_ROOT=/usr/share/X11/xkb
export PSPKG_RELEASE=controls-0.1.9
source $PSPKG_ROOT/etc/set_env.sh
/reg/g/pcds/pyps/config/"${HUTCH,,}"/iocmanager/imgr.py "$@" --hutch "${HUTCH,,}"
| true
|
5e445f7ae593943d23474a664df389c0e6099791
|
Shell
|
brycefrank/pyfor
|
/docs/update_docs.sh
|
UTF-8
| 228
| 2.6875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# This script autoregenerates pyfor's documentation for updates to master.
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
sphinx-apidoc -f -o api/ $DIR/../pyfor --separate
cd $DIR
make html
cd -
| true
|
8dbb4a490df7cbb0fba3b39a3d8581814d149dc5
|
Shell
|
olegartys/Mini2440-SDK
|
/lenny_sandbox/usr/lib/util-vserver/vserver-setup.functions
|
UTF-8
| 7,989
| 3.046875
| 3
|
[] |
no_license
|
# $Id: vserver-setup.functions 2517 2007-03-18 22:02:22Z dhozac $ --*- sh -*--
# Copyright (C) 2003,2004,2005,2006 Enrico Scholz <enrico.scholz@informatik.tu-chemnitz.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
SETUP_HOSTNAME=
SETUP_NETDEV=
SETUP_NETMASK=
SETUP_NETPREFIX=
SETUP_NETBCAST=
SETUP_LOCKFILE=
SETUP_CONFDIR=
SETUP_CONTEXT=
SETUP_INITSTYLE=
SETUP_CPUSET=
SETUP_CPUSETCPUS=
SETUP_CPUSETMEMS=
SETUP_CPUSETVIRT=
declare -a SETUP_INTERFACES=()
declare -a SETUP_FLAGS=()
declare -r SETUP_OPTIONS="confdir:,lockfile:,hostname:,netdev:,netmask:,netprefix:,netbcast:,interface:,flags:,context:,initstyle:,cpuset:,cpusetcpus:,cpusetmems:,cpusetvirt"
declare -r SETUP_HELPMSG=$"
--context ... the static context of the vserver [default: none; one will
be generated for you]
--confdir ... [default: $__CONFDIR/<name>]
--lockfile <filename>
... [default: $__RUNDIR/<name>]
--hostname <hostname>
--netdev <device>
--netbcast <broadcast>
--netmask <netmask>|--netprefix <prefixlen>
... sets the default netmask (a.b.c.d quadruple) or prefixlen
(length of the interface)
--interface [<name-suffix>=][<device>:]<ip>[/<mask|prefixlen>]
... declares an network-interface; this option can be specified
multiple times
--flags <flags>+
... sets comma-separated list of flags; possible flags are
lock: Prevent the vserver from setting new security context
sched: Merge scheduler priority of all processes in the
vserver so that it acts a like a single
one (kernel 2.4 only).
nproc: Limit the number of processes in the vserver
according to ulimit (instead of a per user limit,
this becomes a per vserver limit)
private: No other process can join this security context.
Even root
--cpuset <name>
... declares the CPUSET this vserver will run in [default: none]
--cpusetcpus <number[-number][:<exclusive>]>
... sets which cpus belong to the CPUSET,
exclusive is a flag (0|1) prohibiting any other cpuset from
using those cpus
--cpusetmems <number[-number][:<exclusive>]>
... sets which memory pools belong to the CPUSET,
exclusive is a flag (0|1) prohibiting any other cpuset from
using those memory pools
--cpusetvirt
... virtualize cpuset (guest will see only CPUs defined in cpuset)
Requires kernel patch from http://www.bullopensource.org/cpuset/
--initstyle <style>
... configures the initstyle (e.g. minit,sysv,plain)
"
function setup_setOption2
{
case "$1" in
(--context) SETUP_CONTEXT=$2;;
(--confdir) SETUP_CONFDIR=$2;;
(--lockfile) SETUP_LOCKFILE=$2;;
(--hostname) SETUP_HOSTNAME=$2;;
(--netdev) SETUP_NETDEV=$2;;
(--netmask) SETUP_NETMASK=$2;;
(--netprefix) SETUP_NETPREFIX=$2;;
(--netbcast) SETUP_NETBCAST=$2;;
(--interface) SETUP_INTERFACES=( "${SETUP_INTERFACES[@]}" "$2" );;
(--initstyle) SETUP_INITSTYLE=$2;;
(--cpuset) SETUP_CPUSET=$2;;
(--cpusetcpus) old_IFS=$IFS
IFS=:
set -- $2
SETUP_CPUSETCPUS=$1
SETUP_CPUSETCPUSEXCL=$2
IFS=$old_IFS
;;
(--cpusetmems) old_IFS=$IFS
IFS=:
set -- $2
SETUP_CPUSETMEMS=$1
SETUP_CPUSETMEMSEXCL=$2
IFS=$old_IFS
;;
(--cpusetvirt) SETUP_CPUSETVIRT=1;;
(--flags) old_IFS=$IFS
IFS=,
set -- $2
SETUP_FLAGS=( "${SETUP_FLAGS[@]}" "$@" )
IFS=$old_IFS
;;
(*) return 1;;
esac
return 0
}
function _setup_writeSingleOption
{
test -z "$1" || echo "$1" >"$2"
}
function _setup_writeInterface
{
local vdir=$1
local idx=$2
local tmp=$3
local name=${tmp%%=*}
test "$name" != "$tmp" || name=
tmp=${tmp##${name}=}
local dev=${tmp%%:*}
local nodev=
test "$dev" != "$tmp" || dev=
tmp=${tmp##${dev}:}
test "$dev" != "nodev" || {
dev=
nodev=1
}
local mask=${tmp##*/}
test "$mask" != "$tmp" || mask=
local ip=${tmp%%/${mask}}
local prefix=
test "${mask%%.*}" != "$mask" || {
prefix=$mask
mask=
}
d=$vdir/interfaces/$idx
mkdir "$d"
_setup_writeSingleOption "$name" $d/name
_setup_writeSingleOption "$dev" $d/dev
_setup_writeSingleOption "$ip" $d/ip
_setup_writeSingleOption "$mask" $d/mask
_setup_writeSingleOption "$prefix" $d/prefix
test -n "$dev" -o -n "$SETUP_NETDEV" || {
test -n "$nodev" || \
echo $"No device specified for interface '$idx'; setting 'nodev'" >&2
$_TOUCH $d/nodev
}
}
function setup_setDefaults
{
: ${SETUP_CONFDIR:=$__CONFDIR/$1}
: ${SETUP_LOCKFILE:=$__RUNDIR/$1}
findFile SETUP_FSTAB "$__CONFDIR"/.defaults/fstab "$__PKGLIBDEFAULTDIR"/fstab
}
function _setup_generateContext
{
if test -z "$SETUP_CONTEXT" && test ! -e "$__CONFDIR/.defaults/context.dynamic"; then
if test -e "$__CONFDIR/.defaults/context.next"; then
SETUP_CONTEXT=`$_CAT "$__CONFDIR/.defaults/context.next"`
else
SETUP_CONTEXT=`$_CAT "$__PKGLIBDEFAULTDIR/context.start"`
fi
expr "$SETUP_CONTEXT" + 1 > "$__CONFDIR/.defaults/context.next"
fi
}
function setup_writeOption
{
local name=$1
local cfgdir=${SETUP_CONFDIR:?}
local i
mkdir -p "$cfgdir"/interfaces "$cfgdir"/apps/init "$cfgdir"/uts "$cfgdir"/cpuset
_setup_generateContext
_setup_writeSingleOption "$name" "$cfgdir"/name
_setup_writeSingleOption "$SETUP_CONTEXT" "$cfgdir"/context
_setup_writeSingleOption "$SETUP_HOSTNAME" "$cfgdir"/uts/nodename
_setup_writeSingleOption "$SETUP_NETDEV" "$cfgdir"/interfaces/dev
_setup_writeSingleOption "$SETUP_NETMASK" "$cfgdir"/interfaces/mask
_setup_writeSingleOption "$SETUP_NETPREFIX" "$cfgdir"/interfaces/prefix
_setup_writeSingleOption "$SETUP_NETBCAST" "$cfgdir"/interfaces/bcast
_setup_writeSingleOption "$SETUP_INITSTYLE" "$cfgdir"/apps/init/style
_setup_writeSingleOption "$SETUP_CPUSET" "$cfgdir"/cpuset/name
_setup_writeSingleOption "$SETUP_CPUSETCPUS" "$cfgdir"/cpuset/cpus
_setup_writeSingleOption "$SETUP_CPUSETCPUSEXCL" "$cfgdir"/cpuset/cpus_exclusive
_setup_writeSingleOption "$SETUP_CPUSETMEMS" "$cfgdir"/cpuset/mems
_setup_writeSingleOption "$SETUP_CPUSETMEMSEXCL" "$cfgdir"/cpuset/mem_exclusive
_setup_writeSingleOption "$SETUP_CPUSETVIRT" "$cfgdir"/cpuset/virtualized
local idx=0
for i in "${SETUP_INTERFACES[@]}"; do
_setup_writeInterface "$cfgdir" $idx "$i"
let ++idx
done
test -z "$SETUP_FLAGS" || for i in "${SETUP_FLAGS[@]}"; do
echo "$i"
done >"$cfgdir"/cflags
ln -s "$SETUP_LOCKFILE" "$cfgdir"/run
}
function setup_writeInitialFstab
{
cat "${SETUP_FSTAB:?}" >"${SETUP_CONFDIR:?}"/fstab
}
function setup_test
{
SETUP_INTERFACES=()
setup_setOption2 --interface foo0=eth0:1.2.3.4/1
setup_setOption2 --interface foo1=eth0:1.2.3.4/255.255.248.0
setup_setOption2 --interface foo2=eth0:1.2.3.4
setup_setOption2 --interface foo3=1.2.3.4
setup_setOption2 --interface foo4=1.2.3.4/1
setup_setOption2 --interface eth0:1.2.3.4
setup_setOption2 --interface eth0:1.2.3.4/1
setup_setOption2 --interface 1.2.3.4
setup_setOption2 --interface 1.2.3.4/1
setup_writeOption xx
}
| true
|
334a4abf10558ba42d9c1ff19c1bea8f21037688
|
Shell
|
syranez/bash-oauth
|
/GmailOAuth.sh
|
UTF-8
| 5,113
| 3.03125
| 3
|
[
"BSD-3-Clause",
"MIT",
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
# Copyright (c) 2010, Yu-Jie Lin
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Gmail non-public-API endpoints
GMAIL_UNREAD='https://mail.google.com/mail/feed/atom/'
GMAIL_REQUEST_TOKEN='https://www.google.com/accounts/OAuthGetRequestToken'
GMAIL_ACCESS_TOKEN='https://www.google.com/accounts/OAuthAuthorizeToken'
GMAIL_AUTHORIZE_TOKEN='https://www.google.com/accounts/OAuthGetAccessToken'
GMAIL_SCOPE='https://mail.google.com/mail/feed/atom/'
# Source OAuth.sh
OAuth_sh=$(which OAuth.sh)
(( $? != 0 )) && echo 'Unable to locate OAuth.sh! Make sure it is in searching PATH.' && exit 1
source "$OAuth_sh"
GMAIL_debug () {
# Print out all parameters, each in own line
[[ "$GMAIL_DEBUG" == "" ]] && return
local t=$(date +%FT%T.%N)
while (( $# > 0 )); do
echo "[GMAIL][DEBUG][$t] $1"
shift 1
done
}
GMAIL_nonce () {
echo "$RANDOM$RANDOM"
}
GMAIL_extract_value () {
# $1 key name
# $2 string to find
egrep -o "$1=[a-zA-Z0-9-]*" <<< "$2" | cut -d\= -f 2
}
SM_extract_XML_value () {
# $1 entity name
# $2 string to find
echo -n "$2" | egrep -o "<$1>[^<]+" | sed -e "s/<$1>//"
}
GMAIL_init() {
# Initialize TwitterOAuth
oauth_version='1.0'
oauth_signature_method='HMAC-SHA1'
oauth_basic_params=(
$(OAuth_param 'oauth_consumer_key' "$oauth_consumer_key")
$(OAuth_param 'oauth_signature_method' "$oauth_signature_method")
$(OAuth_param 'oauth_version' "$oauth_version")
)
}
GMAIL_access_token_helper () {
# Help guide user to get access token
local resp PIN
# Request Token
local auth_header="$(_OAuth_authorization_header 'Authorization' '' "$oauth_consumer_key" "$oauth_consumer_secret" '' '' "$oauth_signature_method" "$oauth_version" "$(GMAIL_nonce)" "$(OAuth_timestamp)" 'POST' "$GMAIL_REQUEST_TOKEN", "$(OAuth_param_quote 'scope' "$GMAIL_SCOPE")")"
#local auth_header="$(_OAuth_authorization_header 'Authorization' '' "$oauth_consumer_key" "$oauth_consumer_secret" '' '' "$oauth_signature_method" "$oauth_version" "$(GMAIL_nonce)" "$(OAuth_timestamp)" 'POST' "$GMAIL_REQUEST_TOKEN?scope=https%3A%2F%2Fmail.google.com%2Fmail%2Ffeed%2Fatom%2F", "$(OAuth_param_quote 'oauth_callback' "oob")", "$(OAuth_param_quote 'scope' "$GMAIL_SCOPE")"), $(OAuth_param_quote 'oauth_callback' "oob")"
# echo "$auth_header"
#resp=$(curl -v -d "scope=$GMAIL_SCOPE" -H "$auth_header" "$GMAIL_REQUEST_TOKEN")
#resp=$(curl -v -d "scope=$(OAuth_PE $GMAIL_SCOPE)" -H "$auth_header" "$GMAIL_REQUEST_TOKEN")
#resp=$(curl -v -d '' -H "$auth_header" "$GMAIL_REQUEST_TOKEN?scope=$GMAIL_SCOPE")
resp=$(curl -v -d "scope=$GMAIL_SCOPE" -H "$auth_header" "$GMAIL_REQUEST_TOKEN")
GMAIL_rval=$?
echo "$resp"
(( $? != 0 )) && return $GMAIL_rval
local _oauth_token=$(GMAIL_extract_value 'oauth_token' "$resp")
local _oauth_token_secret=$(GMAIL_extract_value 'oauth_token_secret' "$resp")
echo 'Please go to the following link to get the PIN:'
echo " ${GMAIL_AUTHORIZE_TOKEN}?oauth_token=$_oauth_token"
read -p 'PIN: ' PIN
# Access Token
local auth_header="$(_OAuth_authorization_header 'Authorization' '' "$oauth_consumer_key" "$oauth_consumer_secret" "$_oauth_token" "$_oauth_token_secret" "$oauth_signature_method" "$oauth_version" "$(OAuth_nonce)" "$(OAuth_timestamp)" 'POST' "$GMAIL_ACCESS_TOKEN" "$(OAuth_param 'oauth_verifier' "$PIN")"), $(OAuth_param_quote 'oauth_verifier' "$PIN")"
resp=$(curl -s -d "" -H "$auth_header" "$GMAIL_ACCESS_TOKEN")
GMAIL_rval=$?
(( $? != 0 )) && return $GMAIL_rval
GMAIL_ret=(
$(GMAIL_extract_value 'oauth_token' "$resp")
$(GMAIL_extract_value 'oauth_token_secret' "$resp")
)
}
# APIs
######
GMAIL_unread () {
# Get unread emails
local params=(
)
local auth_header=$(OAuth_authorization_header 'Authorization' '' '' '' 'GET' "$GM_UNREAD" ${params[@]})
GM_ret=$(curl -s -H "$auth_header" "$GM_UNREAD")
GM_rval=$?
return $GM_rval
}
| true
|
07efce580aa2a89d9127fa1b2fe748e92e818002
|
Shell
|
racket/pkg-index
|
/planet-compat/s3.sh
|
UTF-8
| 434
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/zsh
set -e
. ~/.profile
export PC_ROOT="${PC_ROOT:-~/Dev/scm/plt/extra-pkgs/pkg-index/planet-compat}"
export S3CFG_PLT="${S3CFG_PLT:-~/.s3cfg-plt}"
export S3_BUCKET="${S3_BUCKET:-planet-compats.racket-lang.org}"
export S3CMD="${S3CMD:-s3cmd}"
cd $(dirname $0)
raco make update.rkt static.rkt
racket update.rkt
racket static.rkt
${S3CMD} -c ${S3CFG_PLT} sync --recursive --delete-removed ${PC_ROOT}/cache/ s3://${S3_BUCKET}/
| true
|
5d830e0ab42b9e0954974bd604d8bc54cae72b23
|
Shell
|
qnib/plain-influxdb
|
/opt/qnib/influxdb/bin/start.sh
|
UTF-8
| 366
| 3.3125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#! /bin/bash
set -m
influxd -pidfile /var/run/influxdb.pid -config /etc/influxdb/influxdb.conf ${INFLUXD_OPTS} &
sleep 3
if [ "X${INFLUXDB_DATABASES}" != "X" ];then
for db in $(echo ${INFLUXDB_DATABASES} |sed -e 's/,/ /g');do
echo "[INFO] Create database ${db}"
influx -host localhost -port 8086 -execute "create database ${db}"
done
fi
fg
| true
|
6b9e11d27c9c60a762c9b6bb223553c53228ba50
|
Shell
|
fsmithred/scripts
|
/startfrisk.1
|
UTF-8
| 2,626
| 4
| 4
|
[] |
no_license
|
#!/bin/bash
logdir="$HOME"/.xfrisk
# while loop to give options for exiting:
# There's now an option to restart the game,
# in case you closed it by accident.
# There's also an option to exit the script but
# keep the server running. Not sure what good that is,
# but if you do that, and then run the script again, it restarts game.
# Killing the server also removes the pid file.
function get_out {
while true; do
printf " Kill the server?
(y or k) Exit and kill the server.
(n or q) Quit and keep server running.
(a or r) Play again.
"
read ans
case $ans in
[YyKk]*) echo "
Killing the server
"
kill $(cat "$logdir"/frisk.pid) ; rm "$logdir"/frisk.pid ; exit 0 ;;
[NnQq]*) echo "
Exiting.
Server is still running.
" ; exit 0 ;;
[AaRr]*) xfrisk localhost ;
esac
done
}
# make sure logdir exists
if ! [[ -d "$logdir" ]]
then
mkdir "$logdir"
fi
# If pid file exists (i.e. if server is running)
# then just start the game.
# else start the server, store the pid in a file, then start game.
if [[ -f "$logdir"/frisk.pid ]]
then
echo "pid file exists"
xfrisk localhost
get_out
else
friskserver >& "$logdir"/log-friskserver &
sleep 1
my_pid="$!"
echo $my_pid > "$logdir"/frisk.pid
echo "pid is $(cat "$logdir"/frisk.pid)"
echo
printf "\n\n\t You may choose one or more AI players, or
\t you may continue to the game to play with
\t humans, or you may hit ctrl-c to abort.
\t Remember to kill the friskserver when you are finished playing. \n
"
printf "
1) aiColson 4) aiColson_and_aiConway 7) All
2) aiConway 5) aiColson_and_aiDummy 8) Continue
3) aiDummy 6) aiConway_and_aiDummy
\t Enter a number: "
read ans
case $ans in
1) aiColson localhost >& "$logdir"/log-aiColson & ;;
2) aiConway localhost >& "$logdir"/log-aiConway & ;;
3) aiDummy localhost >& "$logdir"/log-aiDummy & ;;
4) aiColson localhost >& "$logdir"/log-aiColson &
aiConway localhost >& "$logdir"/log-aiConway & ;;
5) aiColson localhost >& "$logdir"/log-aiColson &
aiDummy localhost >& "$logdir"/log-aiDummy & ;;
6) aiConway localhost >& "$logdir"/log-aiConway &
aiDummy localhost >& "$logdir"/log-aiDummy & ;;
7) aiColson localhost >& "$logdir"/log-aiColson &
aiConway localhost >& "$logdir"/log-aiConway &
aiDummy localhost >& "$logdir"/log-aiDummy & ;;
8) ;;
esac
xfrisk localhost
get_out
fi
| true
|
f549845b843dff73cae8787e52e5be457ad4325f
|
Shell
|
amalgam-silver/eldk-switch
|
/eldk-switch.sh
|
UTF-8
| 9,128
| 4.125
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Call with "eval `eldk-switch.sh <arch, or board>`"
# e.g. put "eldk-switch() { eval `eldk-switch.sh $*`; }"
# in your .bashrc
#
# (C) 2007-2012 by Detlev Zundel, <dzu@denx.de> DENX Software Engineering GmbH
#
eldk_prefix=/opt/eldk-
rev=5.2
root_symlink=~/target-root
usage () {
echo "usage: $(basename $0) [-v] [-m] [-r <release>] <board, cpu, eldkcc/target>"1>&2
echo " Switches to using the ELDK <release> for" 1>&2
echo " <board>, <cpu> or <eldkcc>/<target>." 1>&2
echo " -m will only affact a minimal amount of environment variables." 1>&2
echo " $(basename $0) -l" 1>&2
echo " Lists the installed ELDKs" 1>&2
echo " $(basename $0) -q" 1>&2
echo " Queries the currently used ELDK" 1>&2
exit 1
}
# Add $1 at the end of path only if not already present
add_path () {
if echo $PATH | grep -vq $1
then
PATH=$PATH:$1
fi
}
# Prune PATH of components starting with $1
prune_path () {
if echo $PATH | grep -q $1
then
PATH=$(echo $PATH | tr : "\n" | grep -v $1 | tr "\n" : | sed 's/:$//')
fi
}
# Unexpand sorted entries by common prefix elimination
unexpand () {
local prefix len lenm2 active last
active=0
len=1
last=$1
shift
for item in $*
do
if [ $active -eq 0 ]; then
# Searching a prefix
lenm2=-1
while [ "${last:$lenm2:1}" != "_" \
-a "${last:0:$len}" = "${item:0:$len}" ]; do
len=$(expr $len + 1)
lenm2=$(expr $len - 2)
done
if [ $len -eq 1 ]; then
echo -n "$last "
last=$item
continue
fi
active=1
len=$(expr $len - 1)
prefix=${last:0:$len}
echo -n "${prefix}{${last:$len},${item:$len}"
else
# unxepanding prefixes
last=$item
if [ "$prefix" = "${item:0:$len}" ]; then
echo -n ",${item:$len}"
else
active=0
len=1
echo -n "} "
fi
fi
done
# Cleanup
if [ $active -eq 0 ]; then
echo "$last"
else
echo "}"
fi
}
# Get a field from a "colon separated key value file" which looks like this:
# Key 1: Value
# Another Key: Yet another value
cskv_getfield () {
sed -n "/^${2}:/ { s/${2}: *//; p; q }" $1
}
strip_last_path () {
echo $1 | sed 's|/[^/]\+$||'
}
# Determine ELDK generation
eldk_generation () {
if [ -r ${1}/version ]; then
echo "pre5"
elif [ -n "$(ls ${1}/*/version* 2>/dev/null )" ] ;then
echo "yocto"
else
echo "unknown"
fi
}
# Get version information by looking at the version file in an ELDK installation
eldk_version () {
eldk_root=$(strip_last_path $1)
case $(eldk_generation ${eldk_root}) in
pre5)
sed -n '1 { s/^[^0-9]*\(.*\)$/\1/ ; p }' ${eldk_root}/version
;;
yocto)
cskv_getfield ${1}/version-* "Distro Version"
;;
*)
echo "unknown"
;;
esac
}
# Enumerate ELDK installations
enum_eldk_roots () {
for root in ${1}*
do
if [ "unknown" != "$(eldk_generation ${root})" ]
then
echo $root
fi
done
}
# Enumerate installed arches
enum_eldk_arches () {
case $(eldk_generation $1) in
pre5)
# Get supported architectures by looking at the version
# file in an ELDK installation
sed -n '2~1 { s/^\(.*\):.*$/\1/ ; p }' ${1}/version
;;
yocto)
ls ${1}
;;
esac
}
# Iterate over all installed ELDK versions and show info
show_versions () {
local dir
local ver
echo ",+--- Installed ELDK versions:" 1>&2
for dir in $(enum_eldk_roots ${eldk_prefix/%-})
do
if [ ! -L $dir ]; then
set -- $(enum_eldk_arches $dir)
ver=$(eldk_version ${dir}/$1)
if [ "$ver" != "unknown" ]; then
echo -en "eldk ${ver}: $dir " 1>&2
unexpand $(enum_eldk_arches $dir | sort) 1>&2
fi
else
set -- $(enum_eldk_arches $dir)
ver=$(eldk_version ${dir}/$1)
echo "eldk ${ver}: $dir -> $(readlink $dir)" 1>&2
fi
done
}
# Show currently used ELDK
query_version () {
dir=$(echo $PATH | tr : "\n" | grep ${eldk_prefix/%-} | \
head -1 | sed 's/\/bin//; s/\/usr\/bin//; s/\/sysroots.*$//';)
ver=$(eldk_version $dir)
if [ -n "$dir" ]; then
echo "Currently using eldk ${ver} from ${dir}" 1>&2
echo "CROSS_COMPILE=$CROSS_COMPILE" 1>&2
[ -n "$ARCH" ] && echo "ARCH=$ARCH" 1>&2
else
echo "Environment is not setup to use an ELDK." 1>&2
fi
}
# Check if ARCH setting needs to be changed for eldkcc provided as first parameter
need_eldkcc_arch_change () {
[ -z "$ARCH" ] && return 0
if eldk-map eldkcc arch $1 | sed 's/:/\n/g' | grep -q "^${ARCH}$"; then
return 1
fi
return 0
}
# Check if ARCH setting needs to be changed for target provided as first parameter
need_target_arch_change () {
[ -z "$ARCH" ] && return 0
if eldk-map target arch $1 | sed 's/:/\n/g' | grep -q "^${ARCH}$"; then
return 1
fi
return 0
}
# Most unusual usage of sort :)
version_lte () {
first=$(echo $1 | sed 's/[^.0-9]//g')
second=$(echo $2 | sed 's/[^.0-9.]//g')
[ "$(echo -e "${first}\n${second}" | sort --version-sort | head -1)" == "${first}" ]
}
# Parse options (bash extension)
while getopts mlqr:v option
do
case $option in
m) minimal=1
;;
l) show_versions
exit 1
;;
q) query_version
exit 1
;;
r) rev=$OPTARG
;;
v) verbose=1
;;
*) usage
exit 1
;;
esac
done
shift $(( $OPTIND - 1 ))
# We expect exactly one required parameter
if [ $# -ne 1 ]
then
usage
fi
if version_lte $rev "4.2"
then
# Before version 5.0 (legacy)
# This is our "smart as a collie" lookup logic. We try to interpret
# the argument as a board, as a cpu, as an alias and finally only as
# the ELDK CROSS_COMPILE value.
cpu=$(eldk-map board cpu $1 2>/dev/null)
if [ -n "$cpu" ]
then
echo "[ $1 is using $cpu ]" 1>&2
eldkcc=$(eldk-map cpu eldkcc $cpu 2>/dev/null)
else
eldkcc=$(eldk-map cpu eldkcc $1 2>/dev/null)
if [ -z "$eldkcc" ]
then
eldkcc=$(eldk-map alias eldkcc $1 2>/dev/null)
if [ -z "$eldkcc" ]
then
if eldk-map eldkcc | grep -q "^${1}\$"
then
eldkcc=$1
else
echo "$(basename $0): don't know what $1 might be, giving up." 1>&2
exit 1
fi
fi
fi
fi
if [ -z "$eldkcc" ]
then
echo "Internal error" 1>&2
else
prune_path ${eldk_prefix/%-}
if [ ! -x ${eldk_prefix}${rev}/usr/bin/${eldkcc}-gcc ]
then
echo "$(basename $0): ELDK $rev for $eldkcc is not installed!" 1>&2
exit 1
fi
echo "Setup for ${eldkcc} (using ELDK $rev)" 1>&2
add_path ${eldk_prefix}${rev}/bin
add_path ${eldk_prefix}${rev}/usr/bin
cmds="PATH=$PATH"
cmds="$cmds ; export CROSS_COMPILE=${eldkcc}-"
cmds="$cmds ; export DEPMOD=${eldk_prefix}${rev}/usr/bin/depmod.pl"
if need_eldkcc_arch_change $eldkcc
then
cmds="$cmds ; export ARCH=$(eldk-map eldkcc arch $eldkcc | sed 's/:.*$//g')"
fi
echo $cmds
[ -n "$verbose" ] && echo $cmds | sed 's/ ; /\n/g' 1>&2
if [ -L $root_symlink ]
then
rm $root_symlink
ln -s ${eldk_prefix}${rev}/${eldkcc} $root_symlink
echo "Adjusted $root_symlink pointing to $(readlink $root_symlink)" 1>&2
fi
fi
else
# Post 5.0 (Yocto)
# This is our "smart as a collie" lookup logic. We try to interpret
# the argument as a board, as a cpu, as an alias and finally only as
# the ELDK CROSS_COMPILE value.
cpu=$(eldk-map board cpu $1 2>/dev/null)
if [ -n "$cpu" ]
then
echo "[ $1 is using $cpu ]" 1>&2
target=$(eldk-map cpu target $cpu 2>/dev/null)
else
target=$(eldk-map cpu target $1 2>/dev/null)
if [ -z "$target" ]
then
target=$(eldk-map alias target $1 2>/dev/null)
if [ -z "$target" ]
then
if eldk-map target | grep -q "^${1}\$"
then
target=$1
else
echo "$(basename $0): don't know what $1 might be, giving up." 1>&2
exit 1
fi
fi
fi
fi
if [ -z "$target" ]
then
echo "Internal error" 1>&2
else
prune_path ${eldk_prefix/%-}
config=$(ls ${eldk_prefix}${rev}/${target}/environment-setup-* 2>/dev/null)
if [ ! -r "${config}" ]
then
echo "$(basename $0): ELDK $rev for $target is not installed!" 1>&2
exit 1
fi
echo "Setup for ${target} (using ELDK $rev)" 1>&2
# Use our pruned path to add the new path in our environment
pathcmd=$(cat ${config} | grep " PATH=")
eval $pathcmd
cmds=$(cat ${config} | grep -v " PATH=" | sed 's/$/ ; /g')
# We want to reference ${TARGET_PREFIX}, so evaluate it
eval $(cat ${config} | grep "TARGET_PREFIX=")
# Built minimal set of variables, i.e. PATH, CROSS_COMPILE and ARCH
min_cmds="export PATH=$PATH ; export CROSS_COMPILE=${TARGET_PREFIX}"
# cmds="$cmds ; export DEPMOD=${eldk_prefix}${rev}/usr/bin/depmod.pl"
if need_target_arch_change $target
then
min_cmds="$min_cmds ; export ARCH=$(eldk-map target arch $target | sed 's/:.*$//g')"
fi
if [ -n "${minimal}" ]; then
cmds="$min_cmds"
else
cmds="$min_cmds ; $cmds"
fi
echo $cmds
[ -n "$verbose" ] && echo $cmds | sed 's/ ; /\n/g' 1>&2
if [ -L $root_symlink ]
then
rm $root_symlink
ln -s ${eldk_prefix}${rev}/${target}/rootfs $root_symlink
echo "Adjusted $root_symlink pointing to $(readlink $root_symlink)" 1>&2
fi
fi
fi
| true
|
ceac7fb85fb2162359755c24ae05c819eaa61f22
|
Shell
|
noone-fly/linuxshell
|
/autotaskBra/listener_debug.sh
|
UTF-8
| 1,041
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/sh
#listener_debug.sh
rm autodebug 2>/dev/null
touch autodebug
for PID in `ps -ef | grep 'DEBUG' | awk '{print $2}'`
do
echo "pid : [$PID]" >> autodebug
done
process_num=`cat autodebug | wc -l`
if [ "$process_num" -gt "4" ];then
rm autodebug 2>/dev/null
./killprocess.sh 2>>logs/killProcessByTimeLog
exit 0
fi
if [ "$process_num" -lt "4" ];then
case $process_num in
3) echo "==3=="
./assignChannelID_debug.sh 1 2>>/dev/null 2>&1
./multipleTask.sh debug 1 2>>/dev/null 2>&1
;;
2) echo "==2=="
./assignChannelID_debug.sh 2 2>>/dev/null 2>&1
./multipleTask.sh debug 2 2>>/dev/null 2>&1
;;
1) echo "==1=="
./assignChannelID_debug.sh 3 2>>/dev/null 2>&1
./multipleTask.sh debug 3 2>>/dev/null 2>&1
;;
0) echo "==0=="
./assignChannelID_debug.sh 4 2>>/dev/null 2>&1
./multipleTask.sh debug 4 2>>/dev/null 2>&1
;;
*) echo "+++++++++++++++"
exit 1
;;
esac
fi
| true
|
45449013a91ab6ea2de5d481dd5380d115fc6444
|
Shell
|
hbalp/callers
|
/bin/indent_jsonfiles.sh
|
UTF-8
| 454
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/bash
# Copyright (C) 2015 Thales Communication & Security
# - All Rights Reserved
# coded by Hugues Balp
# WARNING: We assume here that only one input parameter is present and correspond to a valid directory.
dir=$1
echo "Try to indent all json files present in directory \"${dir}\""
for json in `find $dir -name "*.json"`
do
tmp="${json}.tmp"
#echo "* indent json file: ${json}"
cp ${json} ${tmp}
ydump ${tmp} > ${json}
rm ${tmp}
done
| true
|
8c8057e38fe75e65a7655e8b1ff80e7a4e9530e8
|
Shell
|
alexharv074/encrypt_ami
|
/encrypt_ami.sh
|
UTF-8
| 6,825
| 3.984375
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
usage() {
echo "Usage: $0 [-h] SOURCE_IMAGE_ID \
IMAGE_NAME \
KMS_KEY_ID \
OS_TYPE \
[SUBNET_ID \
IAM_INSTANCE_PROFILE \
TAGS]"
exit 1
}
build_user_data() {
local os_type=$1
[ "$os_type" != "windows" ] && return
cat <<'EOF'
<powershell>
$Cmd = Get-WmiObject -Class Win32_OperatingSystem | ForEach-Object -MemberName Caption
$Get_OS = $Cmd -match '(\d+)'
if ($Get_OS) {
$Os_Type = $matches[1]
}
Write-Host "The operating system is $Os_Type"
if ($Os_Type -eq '2012') {
$EC2SettingsFile = "C:\\Program Files\\Amazon\\Ec2ConfigService\\Settings\\Config.xml"
$xml = [xml](get-content $EC2SettingsFile)
$xmlElement = $xml.get_DocumentElement()
$xmlElementToModify = $xmlElement.Plugins
$enableElements = "Ec2SetPassword", `
"Ec2SetComputerName", `
"Ec2HandleUserData", `
"Ec2DynamicBootVolumeSize"
$xmlElementToModify.Plugin | Where-Object {$enableElements -contains $_.name} | Foreach-Object {$_.State="Enabled"}
$xml.Save($EC2SettingsFile)
# Sysprep Configuration setting for win2k12
$EC2SettingsFile = "C:\\Program Files\\Amazon\\Ec2ConfigService\\Settings\\BundleConfig.xml"
$xml = [xml](get-content $EC2SettingsFile)
$xmlElement = $xml.get_DocumentElement()
foreach ($element in $xmlElement.Property) {
if ($element.Name -eq "AutoSysprep") {
$element.Value = "Yes"
}
}
$xml.Save($EC2SettingsFile)
} elseif ($Os_Type -eq '2016') {
# Changes are made to LaunchConfig.json file
$LaunchConfigFile = "C:\\ProgramData\\Amazon\\EC2-Windows\\Launch\\Config\\LaunchConfig.json"
$jsoncontent = Get-Content $LaunchConfigFile | ConvertFrom-Json
$jsoncontent.SetComputerName = 'true'
$jsoncontent | ConvertTo-Json | set-content $LaunchConfigFile
# This script schedules the instance to initialize during the next boot.
C:\ProgramData\Amazon\EC2-Windows\Launch\Scripts\InitializeInstance.ps1 -Schedule
# The EC2Launch service runs Sysprep, a Microsoft tool that enables creation of customized Windows AMI that can be reused
C:\ProgramData\Amazon\EC2-Windows\Launch\Scripts\SysprepInstance.ps1
} else {
Write-Host "Don't know what to do for OS type $Os_Type"
}
</powershell>
EOF
}
this_account() {
aws sts get-caller-identity \
--query Account --output text
}
account_of() {
local image_id=$1
aws ec2 describe-images --image-id "$image_id" \
--query 'Images[].OwnerId' --output text
}
copy_image() {
local image_id=$1
local name=$2
local kms_key_id=$3
local encrypted_image_id
echo "Creating the AMI: $name"
set -x
encrypted_image_id=$(aws ec2 copy-image \
--name "$name" \
--source-image-id "$image_id" \
--source-region ap-southeast-2 \
--encrypted \
--kms-key-id "$kms_key_id" \
--query ImageId \
--output text)
set +x
wait_for_image_state "$encrypted_image_id" 'available'
echo "$encrypted_image_id" > encrypted_image_id
}
create_image() {
local instance_id=$1
local name=$2
local unencrypted_image_id
echo "Creating the AMI: $name"
set -x
unencrypted_image_id=$(aws ec2 create-image --instance-id "$instance_id" \
--name "$name" --query ImageId --output text)
set +x
wait_for_image_state "$unencrypted_image_id" 'available'
echo "$unencrypted_image_id" > unencrypted_image_id
}
deregister_image() {
local image_id=$1
echo "Deregistering the AMI: $image_id"
set -x
aws ec2 deregister-image --image-id "$image_id"
set +x
}
wait_for_image_state() {
local image_id=$1
local desired_state=$2
local state
echo "Waiting for AMI to become $desired_state..."
while true ; do
state=$(aws ec2 describe-images --image-id "$image_id" \
--query 'Images[].State' --output text)
[ "$state" == "$desired_state" ] && break
echo "state: $state"
sleep 10
done
}
wait_for_instance_status() {
local instance_id=$1
local desired_state=$2
local desired_status=$3
local state
local statu # $status is a built-in.
echo "Waiting for instance ($instance_id) to become $desired_state..."
while true ; do
state=$(aws ec2 describe-instances --instance-ids "$instance_id" \
--query 'Reservations[].Instances[].State.Name' --output text)
[ "$state" == "$desired_state" ] && break
echo "state: $state"
sleep 5
done
[ -z "$desired_status" ] && return
echo "Waiting for instance ($instance_id) to become $desired_status..."
while true ; do
statu=$(aws ec2 describe-instance-status --instance-ids "$instance_id" \
--query 'InstanceStatuses[].InstanceStatus.Status' --output text)
[ "$statu" == "$desired_status" ] && break
echo "state: $statu"
sleep 5
done
}
run_instance() {
local image_id=$1
local iam_instance_profile=$2
local subnet_id=$3
local os_type=$4
local user_data
local instance_id
user_data=$(build_user_data "$os_type")
echo "Launching a source AWS instance..."
instance_id=$(aws ec2 run-instances --image-id "$image_id" \
--instance-type 'c4.2xlarge' \
--subnet-id "$subnet_id" \
--iam-instance-profile "Name=$iam_instance_profile" \
--user-data "$user_data" \
--tag-specifications "ResourceType=instance,Tags=${tags}" \
--query 'Instances[].InstanceId' \
--output text)
wait_for_instance_status "$instance_id" 'running' 'ok'
echo "$instance_id" > instance_id
}
stop_instance() {
local instance_id=$1
echo "Stopping the source AWS instance..."
aws ec2 stop-instances --instance-ids "$instance_id" > /dev/null
wait_for_instance_status "$instance_id" 'stopped'
}
terminate_instance() {
local instance_id=$1
echo "Terminating the source AWS instance..."
aws ec2 terminate-instances --instance-ids "$instance_id" > /dev/null
wait_for_instance_status "$instance_id" 'terminated'
}
clean_up() {
rm -f encrypted_image_id instance_id unencrypted_image_id
}
main() {
if [ "$(this_account)" == "$(account_of "$source_image_id")" ] ; then
copy_image "$source_image_id" "$image_name" "$kms_key_id"
else
run_instance "$source_image_id" "$iam_instance_profile" "$subnet_id" "$os_type"
instance_id=$(<instance_id)
stop_instance "$instance_id"
create_image "$instance_id" "${image_name}-unencrypted"
unencrypted_image_id=$(<unencrypted_image_id)
terminate_instance "$instance_id"
copy_image "$unencrypted_image_id" "$image_name" "$kms_key_id"
deregister_image "$unencrypted_image_id"
fi
echo "Encrypted AMI ID: $(<encrypted_image_id)"
clean_up
}
source_image_id=$1
image_name=$2
kms_key_id=$3
os_type=$4
subnet_id=$5
iam_instance_profile=$6
tags=$7
if [ "$1" == "-h" ] ; then
usage
fi
if [ "$0" == "${BASH_SOURCE[0]}" ] ; then
main
fi
| true
|
6e05887c5117e1ab1ea576f29482918b5674fed9
|
Shell
|
rasoolianbehnam/ED
|
/cnit127/proj8x/run.sh
|
UTF-8
| 454
| 3.28125
| 3
|
[] |
no_license
|
fileName=$(echo $1 | cut -d'.' -f1)
extension=$(echo $1 | cut -d'.' -f2)
echo file name: $fileName
echo extension: $extension
if [ $extension = 'py' ]; then
echo python $1 | bash -x;
elif [ $extension = 'c' ]; then
#echo gcc -g -static -m32 -z execstack -no-pie -fno-stack-protector -o $fileName.out $1 | bash -x
echo gcc -g -m32 -z execstack -no-pie -fno-stack-protector -o $fileName.out $1 | bash -x
echo ./${fileName.out} | bash -x
fi
| true
|
715ef7c00625c12ace7ba7c7369f46e14090cea9
|
Shell
|
rperea14/drigoscripts
|
/drigo_Tractography/older/drigo_AFTER_Tractography/drigo_BD_str2diff_targetsStep2.sh
|
UTF-8
| 1,363
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
#Rodrigo Perea
#Goal: This script should be executed after PROBTRACKX2 has been executed
# It will generate copy the necessary files
# to create a str2diff tranformations of the paths and generate the paths
# in diffusion space.
SOURCE=$( pwd )
#Variables to initialize
TARGET=$1
LDIR="L_str2diff_TARGETS"
mkdir -p $SOURCE/$LDIR
for DIR in $(ls -1d $SOURCE/11969_* ) ;
do
DIRB=$(basename $DIR)
#FROM STEP1
# #Copying the neccesary files
# echo "Copying in $DIR..."
# mkdir -p $LDIR/$DIRB
# cp $DIR/xfms/str2diff.mat $LDIR/$DIRB/
# cp $DIR/L-Thalamus-Tract/waytotal $LDIR/$DIRB/L-waytotal
# cp $DIR/L-Thalamus-Tract/target* $LDIR/$DIRB/
# cp $DIR/R-Thalamus-Tract/waytotal $LDIR/$DIRB/R-waytotal
# cp $DIR/R-Thalamus-Tract/target* $LDIR/$DIRB/
# cp $DIR/nodif_brain.nii.gz $LDIR/$DIRB/
# cp $DIR/FS_brain.nii.gz $LDIR/$DIRB/
WAYTOTAL=1
WAYL=$(cat $DIR/waytotal-L)
WAYR=$(cat $DIR/waytotal-R)
echo "In $DIR ... "
#Entering each directory ....
cd $DIR
for TAR in $(ls target_paths_R* ) ;
do
echo "In $TAR ... "
fslmaths $TAR -div $WAYL norm_${TAR}
flirt -in norm_${TAR} -ref nodif_brain.nii.gz -applyxfm -init str2diff.mat -out str2diff_norm_${TAR}
fslmaths str2diff_norm_${TAR} -thrP 95 tr95_str2diff_norm_${TAR}
done
cd $SOURCE
#leaving each directory....
done
| true
|
41f4e9b63a9b2dd9b2c4dd5729f55de34af7f8b3
|
Shell
|
thiagonerys/universal_kotlin
|
/resources/scripts/apple/xcode.sh
|
UTF-8
| 2,101
| 3.375
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
# This script is referenced in the following files:
# application/mobile/native/apple/ios/ios_x64_copying_framework/iosApp.xcodeproj/project.pbxproj
# application/mobile/native/apple/ios/ios_x64_framework/framework.xcodeproj/project.pbxproj
# application/mobile/native/apple/ios/ios_x64_with_framework/application.xcodeproj/project.pbxproj
# application/mobile/native/apple/ios/ios_x64_without_framework/application.xcodeproj/project.pbxproj
if [[ -z "$module" ]]; then
printf "Module name not found. Check if the 'shellScript' entry is correctly configured in your 'project.pbxproj' file."
exit 1
fi
export enabledModules="$module"
export task=":$module"
printf "Environment Variables:\n"
# env | sort
( set -o posix ; set ) | sort
echo "KOTLIN_TARGET: $KOTLIN_TARGET"
echo "KOTLIN_BUILD_TYPE: $KOTLIN_BUILD_TYPE"
echo "KOTLIN_NATIVE_BUILD_CAPABLE: $KOTLIN_NATIVE_BUILD_CAPABLE"
printf "TARGET_BUILD_DIR - Contents - Before\n"
printf "$TARGET_BUILD_DIR\n"
tree $TARGET_BUILD_DIR
#if [ "$module" = "application-mobile-native-apple-ios-ios_x64_without_framework" ]; then
# echo ""
## rm -f "$TARGET_BUILD_DIR/$EXECUTABLE_PATH"
#fi
if [ "$module" = "application-mobile-native-apple-ios-ios_x64_copying_framework" ]; then
./gradlew --stacktrace --warning-mode all \
-p $SRCROOT/ \
-Pconfiguration.build.dir=$CONFIGURATION_BUILD_DIR \
-Pkotlin.build.type=$KOTLIN_BUILD_TYPE \
-Pkotlin.target=$KOTLIN_TARGET \
"$task:copyFramework"
elif [ "$module" = "application-mobile-native-apple-ios-ios_x64_without_framework" ]; then
./gradlew --stacktrace --warning-mode all "$task:copyApplication"
elif [ -z $KOTLIN_NATIVE_BUILD_CAPABLE ]; then
./gradlew --stacktrace --warning-mode all "$task:buildForXcode"
else
printf "Uknown module '$module'. Not target action defined"
exit 2
fi
#if [ "$module" = "application-mobile-native-apple-ios-ios_x64_without_framework" ]; then
# mv "$TARGET_BUILD_DIR/$EXECUTABLE_NAME.kexe" "$TARGET_BUILD_DIR/$EXECUTABLE_NAME.app"
#fi
printf "TARGET_BUILD_DIR - Contents - After\n"
printf "$TARGET_BUILD_DIR\n"
tree $TARGET_BUILD_DIR
| true
|
80d9369480ffad3058cb665e1296e222d0da2a17
|
Shell
|
anastasiiaNG/variant_calling
|
/9_annovar.sh
|
UTF-8
| 984
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
if [[ -z $1 ]]; then
echo "First argument is empty! Type the name of the vcf file"
exit
fi
samplename=$1
annovar "$samplename"_ann_b37.vcf $ngstools/annovar/humandb/ -buildver hg19 -out "$samplename" -remove -protocol refGene,cytoBand,genomicSuperDups,esp6500siv2_all,1000g2015aug_all,1000g2015aug_eur,exac03,avsnp147,dbnsfp30a -operation g,r,r,f,f,f,f,f,f -nastring . -vcfinput
annovar "$samplename"_ann_b37.vcf $ngstools/annovar/humandb/ -buildver hg38 -out "$samplename" -remove -protocol refGene,cytoBand,genomicSuperDups,esp6500siv2_all,1000g2015aug_all,1000g2015aug_eur,exac03,avsnp147,dbnsfp30a -operation g,r,r,f,f,f,f,f,f -nastring . -vcfinput
#perl ~/ngs/tools/annovar/table_annovar.pl "$samplename"_ann.vcf $ngstools/annovar/humandb/ -buildver $ref_name -out $samplename -remove -protocol refGene,cytoBand,genomicSuperDups,esp6500siv2_all,1000g2015aug_all,1000g2015aug_eur,exac03,avsnp147,dbnsfp30a -operation g,r,r,f,f,f,f,f,f -nastring . -vcfinput
| true
|
f35a9c57b9cff54307ca8c1d183bcea8c18ab55a
|
Shell
|
tjcelaya/dotfiles
|
/consul_cluster.sh
|
UTF-8
| 556
| 2.84375
| 3
|
[] |
no_license
|
case $1 in
*init*)
docker pull progrium/consul
;;
*start*)
JOIN_IP="$(docker inspect -f '{{.NetworkSettings.IPAddress}}' node1)"
docker run -d --name node2 -h node2 progrium/consul -server -join $JOIN_IP
docker run -d --name node3 -h node3 progrium/consul -server -join $JOIN_IP
docker run -d -p 8400:8400 -p 8500:8500 -p 8600:53/udp --name node4 -h node4 progrium/consul -join $JOIN_IP
docker ps -a
;;
*stop*)
docker stop $(docker ps -a -q)
;;
*status*)
docker ps -a
;;
*)
echo "Usage: consul_cluster.sh {init|start|stop|status}
| true
|
855640a80a4895f90d554261d79969bc675eb298
|
Shell
|
KaneLab/GenotyperM
|
/GenotyperM_with_phasing.sh
|
UTF-8
| 5,627
| 3.734375
| 4
|
[] |
no_license
|
#This program turns a vcf table into a fasta file given a reference sequence
#Inputs are
# $1: multi fasta containing all references unwrapped
# $2: vcf output of an mpileup for one or multiple organisms versus a reference
# $3: relative or absolute filepath to the sorted.bam files. At present, the files must be in the format <sampleID>.sorted.bam
#get array of reference genes and length of array
refs=(`grep ">" $1 | tr -d ">"`)
refLen=${#refs[@]}
echo "Building genotypes for $refLen sequences, ${refs[*]}."
#Filter the .vcf
echo "Formatting input vcf."
grep -v "#" $2 | awk '$6 == 999 && $4 != "N" && $5 !~ ","' | grep -v "INDEL" > workingVcf
echo "Formatting completed successfully."
#Get the header info and format it to work with RefSeq2
sampleArray=(`grep -v "##" $2 | head -n 1 | sed 's/ /\t/g' | sed 's/.sorted.bam//g' | cut -f10-`)
sampleLen=${#sampleArray[@]}
grep -v "##" $2 | head -n 1 | sed 's/ /\t/g' | cut -f 10- > header
sed -i 's/.sorted.bam//g' header
bash transpose header > sampleIDs; rm header
echo "Each sequence has $sampleLen individuals."
#Run bam parser for each sorted BAM in directory.
mkdir tmp_input
perl run_bam_parser.pl sampleIDs $3 workingVcf tmp_input
cat tmp_input/*.count.txt > count.txt
cat tmp_input/*.jump1.txt > jump1.txt
cat tmp_input/*.jump2.txt > jump2.txt
cat tmp_input/*.read.txt > read.txt
grep -v "#" $2 | awk '$6 == 999 && $4 != "N" && $5 !~ ","' | grep -v "INDEL" | cut -f 2,4,5 > sites.txt
#Run hapseq2 phasing program
hapseq2 --readCounts count.txt --polymorphicSites sites.txt --readHap jump1.txt --seqReadFile read.txt --seed 1 --readForwOpt 1 --mcmcHap 2 --mcmcScale 05 -o HapSeqOutput --seqError 0.01 --phase --geno --quality -r 20
rm count.txt; rm jump1.txt; rm jump2.txt; rm read.txt
#####Now reformat the output from hapseq2 to input into GenotyperM#####
echo "New stuff 1"
#Introduce delimitations into haplotypes so that we can transpose the rows into columns
awk '{print $3}' HapSeqOutput | sed 's/[a-z]/& /g' > HaplotypesSpaced
bash transpose HaplotypesSpaced > HaplotypesSpaced_T
rm HaplotypesSpaced
#Tranpose name list into columns
#awk '{print $2}' HapSeqOutput > nameList
#bash transpose nameList > nameList_T
#rm nameList
echo "New stuff 2"
#Double each sample identifier, since each sample takes up 2 columns (1 column per chromosome) and transpose
sed 'h;:a;s/[^\n]\+/&/2;t;G;ba' sampleIDs > sampleIDs_D
bash transpose sampleIDs_D > sampleIDs_D_T
#rm sampleIDs_D
echo "New stuff 3"
#Concatenate name list and haplotype info
cat sampleIDs_D_T HaplotypesSpaced_T > preGenotyperFormat
#rm sampleIDs_D_T; rm HaplotypesSpaced_T
#Get positions list
grep -v "##" $2 | cut -f 1,2 > posList
echo "New stuff 4"
#Get list of positions of variant sites from original .vcf file
cat posList preGenotyperFormat > preGenotyperFormat_S
#rm preGenotyperFormat; rm posList
echo "Last new stuff"
#Finally, we get the correct format for GenotyperM
paste preGenotyperFormat_S HaplotypesSpaced_T > workingVcf
#rm preGenotyperFormat_S; rm HaplotypesSpaced_T
#reference loop
for(( r=0; r<$refLen; r++ )); do
#store current gene in working name
cRef=${refs[$r]}
#make new multifastas to write genotypes to
> ${cRef}_L.fa
> ${cRef}_R.fa
#get reference in the form to genotype (supports single fasta reference for now)
grep -A1 $cRef $1 | tail -n1 > tempRef.fa
#only take rows that include that referece header from the fasta input
grep $cRef workingVcf > preSnp
#get positions of snps and snps into array and get lengths
posArray=(`awk '{print $2}' preSnp`)
posLength=${#posArray[@]}
#get list of alternate nucleotides to replace the reference with
altArray=(`awk '{print $3}' preSnp`)
#make a genotyped sequence for LEFT and RIGHT columns of each sample
for((s=0; s<$sampleLen; s++));do
echo "Current iteration is $s out of $sampleLen"
#get templates for overwriting with unique genotypes
typeL=`cat tempRef.fa`
typeR=`cat tempRef.fa`
#get current sample
currSample=${sampleArray[$s]}
echo "currSample is $currSample"
#echo "currsamp is $currSample"
#find column to work on
currColumnL=$((2*$s + 3))
currColumnR=$((2*$s + 4))
#echo "$currColumnL and $currColumnR"
snpArrayL=(`awk -v currColumn=$currColumnL '{print $currColumn}' preSnp`) #get LEFT snp column in vcf for current sample.
snpArrayR=(`awk -v currColumn=$currColumnR '{print $currColumn}' preSnp`) #get RIGHT snp column in vcf for current sample.
#echo "${snpArrayL[*]} and ${snpArrayR[*]}"
#iteratively sub reference nucleotide for current snp at the current position
for((i=0;i<$posLength;i++));do
currentPos=${posArray[$i]}
currentSNPL=${snpArrayL[$i]}
currentSNPR=${snpArrayR[$i]}
#echo "$currentSNPL $currentSNPR"
#replace
typeL=`echo $typeL | sed "s/./$currentSNPL/$currentPos"`
typeR=`echo $typeR | sed "s/./$currentSNPR/$currentPos"`
#end loop on SNP positions
done
#Create new file for the genotyped individual, use $3 command line arg (prefix) for header and file name
#vcfName=`echo $cVcf | awk -F "\." '{print $1}'`
echo ">$currSample" >> ${cRef}_L.fa; echo ">$currSample" >> ${cRef}_R.fa;
echo $typeL >> ${cRef}_L.fa
echo $typeR >> ${cRef}_R.fa
done
rm tempRef.fa
rm preSnp
done
| true
|
424816dfb82c8601cc8537cd1a6b3896769daf03
|
Shell
|
famsedition/MISP-install-CentOS7
|
/misp-preparations.sh
|
UTF-8
| 830
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/bash
#######################################################################
# MISP CENTOS7 INSTALL SCRIPT #
# #
# Revised from: #
# https://misp.github.io/MISP/INSTALL.rhel7/ #
# #
# > Must be run as root #
# > run this file first #
#######################################################################
yum update -y
yum install epel-release -y
yum install centos-release-scl -y
yum install deltarpm -y
yum groupinstall "Development tools" -y
useradd -rU misp
reboot
| true
|
8dbf7dc6d62c557d81d22281b4bea47b8883a8e0
|
Shell
|
sanyecao2314/docker-nagios-1
|
/postfix.sh
|
UTF-8
| 269
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/bash
### In postfix.sh (make sure this file is chmod +x):
# `/sbin/setuser xxxxx` runs the given command as the user `xxxxx`.
# If you omit that part, the command will be run as root.
exec /usr/lib/postfix/master -d -c /etc/postfix >>/var/log/postfix.log 2>&1
| true
|
7a97b3efa135020ff32036410e1f6be53a20a293
|
Shell
|
larsla/oauth2_proxy
|
/entrypoint.sh
|
UTF-8
| 2,892
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
function generatesecret() {
< /dev/urandom tr -dc _A-Z-a-z-0-9 | head -c${1:-32};echo;
}
COOKIE_SECRET=${COOKIE_SECRET:-`generatesecret`}
PROVIDER=${PROVIDER:-"gitlab"}
EMAIL_DOMAIN=${EMAIL_DOMAIN:-"*"}
UPSTREAM_URL=${UPSTREAM_URL:-"http://127.0.0.1:1234"}
HTTP_ADDRESS=${HTTP_ADDRESS:-"0.0.0.0:4180"}
HTTPS_ADDRESS=${HTTPS_ADDRESS:-":443"}
case $PROVIDER in
"gitlab")
if [ -n "${GITLAB_URL}" ]; then
/go/bin/oauth2_proxy -provider="gitlab" \
-http-address="${HTTP_ADDRESS}" \
-https-address="${HTTPS_ADDRESS}" \
-cookie-secret="${COOKIE_SECRET}" \
-cookie-secure=true \
-client-id="${CLIENT_ID}" \
-client-secret="${CLIENT_SECRET}" \
-login-url="${GITLAB_URL}/oauth/authorize" \
-redeem-url="${GITLAB_URL}/oauth/token" \
-validate-url="${GITLAB_URL}/api/v3/user" \
-upstream="${UPSTREAM_URL}" \
-email-domain="${EMAIL_DOMAIN}"
else
/go/bin/oauth2_proxy -provider="gitlab" \
-http-address="${HTTP_ADDRESS}" \
-https-address="${HTTPS_ADDRESS}" \
-cookie-secret="${COOKIE_SECRET}" \
-cookie-secure=true \
-client-id="${CLIENT_ID}" \
-client-secret="${CLIENT_SECRET}" \
-upstream="${UPSTREAM_URL}" \
-email-domain="${EMAIL_DOMAIN}"
fi
;;
"github")
if [ -n "${GITHUB_URL}" ]; then
/go/bin/oauth2_proxy -provider="github" \
-http-address="${HTTP_ADDRESS}" \
-https-address="${HTTPS_ADDRESS}" \
-cookie-secret="${COOKIE_SECRET}" \
-cookie-secure=true \
-client-id="${CLIENT_ID}" \
-client-secret="${CLIENT_SECRET}" \
-login-url="${GITHUB_URL}/login/oauth/authorize" \
-redeem-url="${GITHUB_URL}/login/oauth/access_token" \
-validate-url="${GITHUB_URL}/api/v3" \
-upstream="${UPSTREAM_URL}" \
-email-domain="${EMAIL_DOMAIN}" \
-github-org="${GITHUB_ORG}" \
-github-team="${GITHUB_TEAM}"
else
/go/bin/oauth2_proxy -provider="github" \
-http-address="${HTTP_ADDRESS}" \
-https-address="${HTTPS_ADDRESS}" \
-cookie-secret="${COOKIE_SECRET}" \
-cookie-secure=true \
-client-id="${CLIENT_ID}" \
-client-secret="${CLIENT_SECRET}" \
-upstream="${UPSTREAM_URL}" \
-email-domain="${EMAIL_DOMAIN}" \
-github-org="${GITHUB_ORG}" \
-github-team="${GITHUB_TEAM}"
fi
;;
"google")
/go/bin/oauth2_proxy -provider="google" \
-http-address="${HTTP_ADDRESS}" \
-https-address="${HTTPS_ADDRESS}" \
-cookie-secret="${COOKIE_SECRET}" \
-cookie-secure=true \
-client-id="${CLIENT_ID}" \
-client-secret="${CLIENT_SECRET}" \
-upstream="${UPSTREAM_URL}" \
-email-domain="${EMAIL_DOMAIN}" \
-google-admin-email="${GOOGLE_ADMIN_EMAIL}" \
-google-group="${GOOGLE_GROUP}" \
-google-service-account-json="${GOOGLE_ACCOUNT_JSON}"
;;
*)
echo "Not a known provider.."
;;
esac
| true
|
c1ea78fb046fa2ef85a5578cbf1b0d37a7e745ec
|
Shell
|
cutec-mac/promet-erp
|
/promet/setup/i386-linux/build_deb_7.sh
|
UTF-8
| 722
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
Program=$2
Widgetset=$1
Version=$3
Arch=$4
Archfpc=$5
Date=$6
BuildDir=$7
TmpDir=$8
sudo -S rm -rf $BuildDir
Subprogram=ocr
echo "copyright and changelog files..."
mkdir -p $BuildDir/usr/share/doc/$Program-$Subprogram
cp debian/copyright $BuildDir/usr/share/doc/$Program-$Subprogram/copyright
echo "creating control file..."
mkdir -p $BuildDir/DEBIAN
cat debian/control_ocr | \
sed -e "s/VERSION/$Version/g" \
-e "s/ARCH/$Arch/g" \
-e "s/DEBSIZE/$DebSize/g" \
> $BuildDir/DEBIAN/control
echo "building package..."
sudo -S dpkg-deb --build $BuildDir
cp $TmpDir/software_build.deb ../output/${Program}-${Subprogram}_${Version}_${Arch}-$Widgetset.deb
echo "cleaning up..."
sudo -S rm -r $BuildDir
| true
|
9144f4b5b9c80126092b4cc40abb26af8abb93ec
|
Shell
|
tushargoyal02/bashScript
|
/untilLoop.sh
|
UTF-8
| 203
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
count=25
until [ $count -lt 10 ]; do
echo $count
let count=count-1
done
# the while loop could have never worked here
: 'while [ $count -lt 10 ]; do
echo "hello"
done '
| true
|
255880b19c061595c9cbbb0cd642c76d87f9e2b8
|
Shell
|
tchamabe1979/exareme
|
/Local-Deployment/portainer.sh
|
UTF-8
| 483
| 2.78125
| 3
|
[] |
permissive
|
#!/usr/bin/env bash
test -d ${PORTAINER_DATA} \
|| sudo mkdir -p ${PORTAINER_DATA} \
|| ( echo Failed to create ${PORTAINER_DATA}; exit 1 )
sudo docker service create \
--publish mode=host,target=${PORTAINER_PORT},published=9000 \
--constraint 'node.role == manager' \
--detach=true --mount type=bind,src=/var/run/docker.sock,dst=/var/run/docker.sock \
--mount type=bind,src=${PORTAINER_DATA},dst=/data \
--name mip_portainer ${PORTAINER_IMAGE}${PORTAINER_VERSION}
| true
|
dd3edc27cad84454f554a2ae027e41f7d4bb7f70
|
Shell
|
eschmar/postgresql-popcount
|
/helper/base.sh
|
UTF-8
| 1,090
| 3.734375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
RED='\033[0;31m'
CYAN='\033[0;36m'
MAGENTA='\033[0;35m'
NC='\033[0m'
trials=10
database="bit_count"
system="mysql"
units='false'
while getopts 't:d:s:u' flag; do
case "${flag}" in
t) trials=$OPTARG ;;
d) database=$OPTARG ;;
s) system=$OPTARG ;;
u) units='true' ;;
*) error "Unexpected option ${flag}" ;;
esac
done
case $system in
'mysql')
query="SET profiling = 1; SELECT ''; SHOW PROFILES;"
for (( i=1; i<=$trials; i++ ))
do
echo $(mysql -u root -vvv $database -e "$query" | grep "| SELECT ''" | grep -Eo '[0-9][.][0-9]+')
done
;;
'postgres')
for (( i=1; i<=$trials; i++ ))
do
case $units in
'true')
echo "\\timing on \\\\ SELECT '';" | psql | grep "Time:" | sed s/"Time: "//
;;
'false')
echo "\\timing on \\\\ SELECT '';" | psql | grep "Time:" | grep -Eo '[0-9]+([.][0-9]+)?'
;;
esac
done
;;
esac
| true
|
50409cfa43831339af05fdd27f7b827f653b34c3
|
Shell
|
mr-justin/apexaifbxxplore
|
/semplore/sortun.sh
|
UTF-8
| 167
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/sh
split -C 200MB $1 $1.split
for f in $1.split*
do
sort -n -T . < $f | uniq > $f.sort
rm -f $f
done
sort -m $1.split*.sort | uniq
rm -f $1.split*.sort
| true
|
040b02051e88695667fd864de0eaa4e0163b185a
|
Shell
|
BhatnagarKshitij/MSU_MScIT
|
/IST/Ex4/Ex4.sh
|
UTF-8
| 810
| 2.921875
| 3
|
[] |
no_license
|
#Question1
ls -1
ls -t
ls -S
ls -R
ls -r
echo "--------------------------------------------------";
#Question
echo "January
February
March
April
May
June
July
August
September
October
November
December" > file1.txt
echo "--------------------------------------------------";
#Question2
head -n4 file1.txt
echo "--------------------------------------------------";
#Question3
tail -n3 file1.txt
echo "--------------------------------------------------";
#Question4
tail -n +4 file1.txt
#Question5
head -n -5 file1.txt
#Question6
tail -n +3 file1.txt|head -n 5 >file2.txt
#Question9a
cut -d" " -f1,2 data.txt > file1.txt
#Question9b
cut -d" " -f4-6 data.txt >file2.txt
#Question9c
paste file1.txt file2.txt
#Question9d
cut -d" " -f2 file1.txt|sort
#Question9e
cut -d" " -f2 file1.txt|sort -r
#Question9f
| true
|
719132ee51ea1777ce30f162e40e43fcf7197195
|
Shell
|
picarodias/neo-cpp-core
|
/packages/lib-neopt-core-js/export-api-methods.sh
|
UTF-8
| 1,771
| 3.5625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# functions with EXPORT_NEO3API prefix (in Neo:: explicit namespace) will be automatically exported
# we expect these functions to be string -> string, so if signature is not this, export manually on .cpp file
echo "Listing EXPORT_NEO3API methods (remember to include Neo:: scope and auto-export only string->string methods)"
echo ""
(cd ../../src && grep -rh "EXPORT_NEO3API" . | grep "Neo::")
echo ""
BASEFILE=src/lib-neopt-core-js.cpp
OUTFILE=src/lib-neopt-core-js.autogen.cpp
#
BASEFILE_JS=src/index.js
OUTFILE_JS=src/index.autogen.js
#
echo "Adding these to '$OUTFILE' file"
echo "// AUTOMATICALLY GENERATED FILE - DO NOT EDIT THIS" > $OUTFILE
echo "// AUTOMATICALLY GENERATED FILE - DO NOT EDIT THIS" > $OUTFILE_JS
cat $BASEFILE >> $OUTFILE
cat $BASEFILE_JS >> $OUTFILE_JS
#
for func in `(cd ../../src && grep -rh "EXPORT_NEO3API" . | grep "Neo::" | cut -d '(' -f1 )`; do
#
func1=${func/Neo::/Neo3::}
func2=${func1/EXPORT_NEO3API_} # remove EXPORT_NEO3API_
#
func4=$(echo $func2 | sed "s/::/\_/g")
#echo $func4
# ======================
# writing to output file
#
echo "Exporting method '$func2' into C version '$func4'..."
#
echo "" >> $OUTFILE
echo "// this method was automatically exported" >> $OUTFILE
echo "EMSCRIPTEN_KEEPALIVE" >> $OUTFILE
echo "extern \"C\" const char*" >> $OUTFILE
echo "$func4(const char* cs_in) {" >> $OUTFILE
echo "return $func(std::string(cs_in)).c_str();" >> $OUTFILE
echo "}" >> $OUTFILE
#
# js part
#
echo "" >> $OUTFILE_JS
echo "// this method was automatically imported" >> $OUTFILE_JS
echo "export const $func4 =" >> $OUTFILE_JS
echo " mymodule.cwrap('$func4', 'string', ['string']);" >> $OUTFILE_JS
done
| true
|
f17a2c199b7f345738a1a62041b30fdd3de82afa
|
Shell
|
Eat1n/android101
|
/bin/gen_jks
|
UTF-8
| 237
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/sh
curr_pwd=${PWD}
cd $(dirname $0)
script_pwd=${PWD}
CERTS_DIR=${script_pwd}/../certs
mkdir -p ${CERTS_DIR}
rm -rf ${CERTS_DIR}/android.jks
keytool -keystore ${CERTS_DIR}/android.jks -genkey -alias ineat.in
ls -la ${CERTS_DIR}
| true
|
110734444060d04dad878b2b118675c4111c47fa
|
Shell
|
lidel/dotfiles
|
/archive/bin/psi-mpd-tune.sh
|
UTF-8
| 273
| 2.53125
| 3
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/bin/sh
# Improved version of the original script from
# http://psi-im.org/wiki/Publish_Tune
# that takes advantage of MPD idle architecture
while [ 1 ] ; do
mpc current --format "%title%
%artist%
%album%
%track%
%time%" > ~/.psi/tune
mpc idle > /dev/null
done
| true
|
191a3b72b072a699ea4f5c5c4e8e5bfd06c320b0
|
Shell
|
gquittet/dotfiles
|
/scripts/.local/bin/stopwatch_
|
UTF-8
| 686
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
hours=0
minutes=0
seconds=0
hoursString="0"
minutesString="0"
secondsString="0"
function renderTime {
if [ $1 -lt 10 ]
then
echo "0$1"
else
echo "$1"
fi
}
while :
do
seconds=$(($seconds+1))
# Compute time
if [ $seconds -ge 60 ]
then
seconds=0
minutes=$(($minutes+1))
fi
if [ $minutes -ge 60 ]
then
seconds=0
minutes=0
hours=$(($hours+1))
fi
# Render time
hoursString=$(renderTime $hours)
minutesString=$(renderTime $minutes)
secondsString=$(renderTime $seconds)
clear
echo "$hoursString:$minutesString:$secondsString"
sleep 1s
done
| true
|
03e1d6ccc278de2d48c17e7d56af99f6d2f6db27
|
Shell
|
minghao2016/md-scripts-1
|
/DES-IRC/des_irc.py
|
UTF-8
| 699
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
# This script loops through all the DES folders in the working directory
# and submits the Gromacs simulation job.
from pathlib import Path
import os, sys
pathway = Path()
path = str(sys.argv[1])
for file in pathway.glob(path):
if os.path.isdir(file):
des = file.stem.split('-')
folder = os.path.normpath(file)
print(folder)
compound1 = des[0]
compound2 = des[1][0:3]
molar_ratio = des[1][3:]
molsA_ratio = des[1][3:4]
molsA = int(molsA_ratio) * 50
print(compound1, compound2)
os.system(f"cd '{file}'; ls; ./md.sh {compound1} {compound2} {molar_ratio} {molsA} 7.0 0.2;")
print("\n")
| true
|
3c6338581209a9a2ef4c15d967b365aef86cb4fd
|
Shell
|
ThiruYadav/docker-splunk-1
|
/init.sh
|
UTF-8
| 1,127
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
# if an 'etc' directory exists within the data dir, symlink it to where 'etc' lives
if [[ -d /opt/splunk/var/lib/splunk/etc ]]; then
mv /opt/splunk/etc /opt/splunk/.etc.old
ln -s /opt/splunk/var/lib/splunk/etc /opt/splunk/etc
fi
# if the etc directory is new, initialize it with the default config
if [[ ! -e /opt/splunk/etc/splunk.version ]]; then
cp -a --no-clobber /opt/splunk/etc.orig/. /opt/splunk/etc/
fi
# move docker-image provided files in place, overwriting any conflicts
cp -a /opt/splunk/.etc-docker/. /opt/splunk/etc/
if [[ -e "/opt/splunk/var/lib/splunk" ]]; then
uid="$(stat -L -c '%u' /opt/splunk/var/lib/splunk)"
else
uid="$(stat -L -c '%u' /opt/splunk/var)"
fi
if [[ "$(stat -L -c '%u' /opt/splunk)" != "$uid" ]] || [[ "$(getent passwd splunk | awk -F: '{ print $3 }')" != "$uid" ]]; then
usermod -u "$uid" splunk
find /opt/splunk -mount -exec chown splunk {} +
fi
syslog-ng --no-caps
splunk start --accept-license --answer-yes --no-prompt
trap "splunk stop" EXIT INT TERM QUIT
pid="$(pgrep -x splunkd | head -n 1)"
while kill -0 "$pid" &>/dev/null; do sleep 1; done
| true
|
a883524631c717e7e647136636b429f24afa491d
|
Shell
|
danwatford/trap0-tests
|
/trap_zero_test.sh
|
UTF-8
| 1,471
| 4.3125
| 4
|
[] |
no_license
|
# No hash-bang in this script. We intend it to be run as an argument to the shell to
# permit testing against multiple shells.
#
# trap_zero_test.sh
# Script to test which signals will cause trap zero to be executed for the shell
#
# For each signal under test:
# - launch a child process which has the task writing a string to a file upon execution
# of trap zero. The child process shall send itself the signal and then sleeps until
# timeout after approximately 3 seconds unless the signal handler causes an exit.
# - Observe whether trap zero is triggered by examining the content of the written file.
# Create the temporary file for child processes to write to. Clean up the file on exit.
TEMP_FILE=$(mktemp)
trap "rm $TEMP_FILE" 0
# Print a header
printf "Signal\tName\tTrap 0 Executed\n"
for signal in $(seq 1 15); do
printf "$signal\t$(kill -l $signal)\t"
# Clear the temporary file ready for writing to by the child process.
: > $TEMP_FILE
# Launch the child process using a script customised to the signal under test.
$SHELL -s <<-EOSCRIPT
# Configured the trap
trap "echo Trap0 > $TEMP_FILE" 0
# Send the test signal.
kill -$signal \$\$
# Timeout the process in 3 seconds if not alredy killed.
sleep 3 &
wait
# Reset the trap if it hasn't already been fired.
trap 0
EOSCRIPT
# Observe whether trap 0 was fired in the child process.
result="No"
[ "$(cat $TEMP_FILE)" = "Trap0" ] && result="Yes"
echo $result
done
| true
|
dac64385ee6b509461730f2ddb392706f3f3cb4e
|
Shell
|
bmwiedemann/openSUSE
|
/packages/r/rdma-core/post_download.sh
|
UTF-8
| 1,029
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
# Script to be run after updating the spec file from a newer release
# Enable pyverbs by default
sed -i -e 's/^%define with_pyverbs .*$/%if 0%{?sle_version} > 120400\n%define with_pyverbs 1\n%else\n%define with_pyverbs 0\n%endif/' rdma-core.spec
# Disable static
sed -i -e 's/^%define with_static .*$/%define with_static 0/' rdma-core.spec
# Fixup pandoc
# To remove a build dependency to pandoc in the core rings, prebuild the pandoc
# tarball and patch the spec file
bash gen-pandoc.sh || exit 1
EXTRA_SOURCES="Source2: post_download.sh\nSource3: prebuilt-pandoc.tgz\nSource4: rdma-core-rpmlintrc\nSource5: gen-pandoc.sh\nSource6: get_build.py"
PANDOC_SETUP="#Extract prebuilt pandoc file in the buildlib directory\n(cd buildlib && tar xf %{S:3})"
sed -i -e '/Source1:/a '"$EXTRA_SOURCES" rdma-core.spec
sed -i -e '/^BuildRequires: pandoc/d' rdma-core.spec
sed -i -e '/^BuildRequires: python3-docutils/d' rdma-core.spec
sed -i -e '/^%setup /a '"$PANDOC_SETUP" rdma-core.spec
| true
|
b853e0e219fdc8f3862dc23c137dfb0b26faf3ea
|
Shell
|
MoonHouseAaron/health-cloud-configuration
|
/orgInitAndData.sh
|
UTF-8
| 1,182
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
./orgInit.sh
if [ "$#" -eq 0 ]
then
SCRATCH_ORG_NAME="healthcloud"
else
SCRATCH_ORG_NAME=$1
fi
echo "Using ${SCRATCH_ORG_NAME} alias"
#SFDX DMU plugin: https://github.com/forcedotcom/SFDX-Data-Move-Utility/wiki
#Data Extract from existing org; if needed
#AUthenticate existing org for data extraction
#sfdx force:auth:web:login -a HCTrialOrg
#sfdx sfdmu:run --sourceusername HCTrialOrg --targetusername csvfile -p data/sfdmu/
#sfdx force:data:soql:query -u HCADK -q "Select Id,AccountId,ContactId from AccountContactRelation"
#Custom Data Load permission set
sfdx force:user:permset:assign -n HC_DataLoad_Custom -u $SCRATCH_ORG_NAME
#Cleanup data prior to data load
sfdx force:apex:execute -f config/cleanup.apex -u $SCRATCH_ORG_NAME
#data load
sfdx sfdmu:run --sourceusername csvfile -p data/sfdmu/ --noprompt --targetusername $SCRATCH_ORG_NAME
#Send user password reset email
sfdx force:apex:execute -f config/setup.apex -u $SCRATCH_ORG_NAME
#delete [select id from Case];
#delete [select id from Contact];
#delete [select id from Account];
#delete [select id from HealthCloudGA__ContactContactRelation__c];
#delete [select id from AccountContactRelation];
| true
|
5d31999e9a46292e0b865c4b5ec0e8bd25d48c75
|
Shell
|
LeonSong1995/QTLEnrich
|
/src/concatenate_QTLEnrich_output_files.sh
|
UTF-8
| 781
| 3.734375
| 4
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
# Shell script to concatenate outputs of QTLEnrich into one file
# Author: Andrew Hamel
# Written in the lab of Ayellet Segre, Massachusetts Eye and Ear, Harvard Medical School, Boston, MA
# Date: 12 February 2020
##### Inputs #####
# directory: directory where QTLEnrich output files were generated
# concatenated_filename: designated name of concatenated QTLEnrich set, e.g. /path/to/file/concatenated_set.txt
# Note: depending on the presence of other files in directory,
# user may need to change *.txt to concatenate only relevant files
# Currently assumes directory consists of relevant files
directory=$1
concatenated_filename=$2
#change to relevant directory
#concatenate files
cd $directory
awk 'FNR==1 && NR!=1{next;}{print}' *.txt > $concatenated_filename
| true
|
0122ebc6a9e0e2afb7204da980b783c0673b20a9
|
Shell
|
sylashsu/dotfiles
|
/install.sh
|
UTF-8
| 970
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
function setup_git() {
echo 'Setting up git config...'
read -p 'Enter Github username: ' GIT_USER
git config --global user.name "$GIT_USER"
read -p 'Enter email: ' GIT_EMAIL
git config --global user.email $GIT_EMAIL
git config --global core.editor vim
echo 'Add GitHub access token ?:'
echo '(1) Yes'
echo '(2) No'
read -p 'Enter a number: ' SHELL_CHOICE
if [[ $SHELL_CHOICE == '1' ]] ; then
read -p 'Enter GitHub access token ' GIT_ACCESS_TOKEN
echo "machine github.com login $GIT_USER password $GIT_ACCESS_TOKEN" > ~/.netrc
fi
}
function setup_vim() {
sudo apt-get install vim
sudo git clone https://github.com/VundleVim/Vundle.vim.git ~/.vim/bundle/Vundle.vim
sudo ln -s vimrc ~/.vimrc
sudo touch ~/.vimrc
sudo vim +PluginInstall +qall
}
echo "apt-get update..."
sudo apt-get update
echo "setup git.."
setup_git
echo "setup vim.."
setup_vim
echo "Done !"
| true
|
2a1c10b1cff905ec22849e2c47429a681528818b
|
Shell
|
Kot2Na/42-roger-skyline-1
|
/01-network-and-security/09-uppate_scheduled.sh
|
UTF-8
| 741
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
# Create a script that updates all the sources of package, then your packages
# and which logs the whole in a file named /var/log/update_script.log.
# Create a scheduled task for this script once a week at 4AM and every
# time the machine reboots.
# Создайте сценарий, который обновляет все источники пакета, затем ваши пакеты
# и который записывает все в файл с именем /var/log/update_script.log.
# Создавайте запланированное задание для этого сценария раз в неделю
# в 4 часа утра и каждый раз, когда машина перезагружается.
| true
|
479b73137135b79d0918d42b14db7285a0690775
|
Shell
|
shu-ando/bin
|
/sshkeyput.sh
|
UTF-8
| 415
| 3.40625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
case "$#" in
"2" ) port=22 ;;
"3" ) port=$3 ;;
* ) printf "Usage: sshkeyput.sh user host [port]\n" ; exit 1 ;;
esac
ssh-keygen -C $USER@$HOSTNAME -f $HOME/.ssh/id_rsa -N ""
temp_file=$(mktemp)
scp -p -P $port $1@$2:~/.ssh/authorized_keys $temp_file
cat $HOME/.ssh/id_rsa.pub >>$temp_file
sort -u $temp_file -o $temp_file
scp -p -P $port $temp_file $1@$2:~/.ssh/authorized_keys
rm $temp_file
| true
|
4339ef816b3afc4cbd02e193f34278331401d8b4
|
Shell
|
olsaki/chef-repo
|
/bootstrap
|
UTF-8
| 1,033
| 3.109375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
BIN_RVM=`which rvm`
if [ -z $BIN_RVM ]; then
BIN_CURL=`which curl`
if [ -z $BIN_CURL ]; then
echo "curl not found. Install curl to continue."
else
echo "Using $BIN_CURL"
fi
$BIN_CURL -L get.rvm.io | bash -s stable --auto
source /etc/profile.d/rvm.sh
BIN_RVM=`which rvm`
sudo usermod -a -G rvm $CURRENT_USER
rvm --quiet-curl --summary requirements
rvm --quiet-curl --summary install 1.9.3
rvm --default use 1.9.3
if [ -w $HOME/.bash_profile ]; then
echo "source /etc/profile.d/rvm.sh" >> $HOME/.bash_profile
elif [ -w $HOME/.bash_profile ]; then
echo "source /etc/profile.d/rvm.sh" >> $HOME/.profile
else
echo "No writable profile file found. Make sure to 'source /etc/profile.d/rvm.sh' in your login profile to have RVM available."
fi
fi
echo "Using $BIN_RVM as rvm"
echo "Using $(rvm current) as RVM environment"
gem install bundler --no-ri --no-rdoc
BIN_BUNDLE=`which bundle`
echo "Using $BIN_BUNDLE as bundler"
bundle install
berks install --path cookbooks
| true
|
818203343d21d59538329b49308d647b67b24a32
|
Shell
|
prophetyang/namespace
|
/containerPID.sh
|
UTF-8
| 192
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
containers=`docker ps | awk '{ if (FNR > 1) print $1 }'`
for container in $containers; do
pid=`docker top $container | awk '{ if (FNR>1) print $2}'`
echo "$container $pid"
done
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.