blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
5c10ed577ec2de95e94ca287dadb0dde675f42f7 | Shell | Kedarix/2021-04-12-alx-linux | /petle5.sh | UTF-8 | 86 | 2.75 | 3 | [] | no_license | #!/bin/bash
for n in `seq 0 3 40`
do
echo "Kolejna wielokrotność 3 to: $n"
done
| true |
65e31ac04dc4d6a37f63da226bb0b3b86f683664 | Shell | delkyd/alfheim_linux-PKGBUILDS | /nodejs-groovebasin/PKGBUILD | UTF-8 | 2,199 | 2.71875 | 3 | [] | no_license | # Maintainer: Christoph Gysin <christoph.gysin@gmail.com>
# Contributor: Jeremy "Ichimonji10" Audet <ichimonji10 at gmail dot com>
# Contributor: Andrew Kelley <superjoe30@gmail.com>
# Contributor: superjoe <superjoe30@gmail.com>
#
# makepkg warns "Package contains reference to $pkgdir". This is OK. See:
# https://github.com/andrewrk/groovebasin/issues/214
pkgname=nodejs-groovebasin
_pkgname="${pkgname#nodejs-}"
pkgver=1.5.1
pkgrel=1
pkgdesc='Music player server with a web-based user interface inspired by Amarok 1.4'
arch=('i686' 'x86_64')
url='http://groovebasin.com/'
license=(MIT)
depends=(nodejs libgroove)
makedepends=(python2 npm)
backup=('etc/groovebasin.json')
install=groovebasin.install
source=("https://github.com/andrewrk/groovebasin/archive/${pkgver}.tar.gz"
groovebasin
groovebasin.json
groovebasin.service)
sha256sums=('6414302c6c6109ccec5626780c6187511325d85c5ccf95e93adc52fa72769905'
'5169f64bbe305959d6c2c76f73b10c3a604586cb884c78e9b620e476f45132df'
'd4e6f06b601b16304199f61bce662ccc8e34842ddb0f8f688eae6e0be150e8df'
'fca2b5d94cef9e5b70936bdb47c4a69724050d657fe72f471f989dce933a1caa')
build() {
cd "${srcdir}/${_pkgname}-${pkgver}"
npm \
--python=python2 \
run build
}
package() {
npm \
--python=python2 \
install \
--user root \
--global \
--prefix "${pkgdir}/usr" \
"${srcdir}/${_pkgname}-${pkgver}"
install -Dm755 "${srcdir}/groovebasin" "${pkgdir}/usr/bin/${_pkgname}"
install -Dm644 "${srcdir}/${_pkgname}-${pkgver}/LICENSE" \
"${pkgdir}/usr/share/licenses/${pkgname}/LICENSE"
install -d -g 49 -o 49 "${pkgdir}/var/lib/${_pkgname}"
install -Dm644 "${srcdir}/${_pkgname}.json" "${pkgdir}/etc/${_pkgname}.json"
ln -sf "/etc/${_pkgname}.json" "${pkgdir}/var/lib/${_pkgname}/config.json"
install -d -g 49 -o 49 "${pkgdir}"/var/lib/groovebasin/certs
ln -sf /usr/lib/node_modules/groovebasin/certs/self-signed-key.pem \
"${pkgdir}"/var/lib/groovebasin/certs
ln -sf /usr/lib/node_modules/groovebasin/certs/self-signed-cert.pem \
"${pkgdir}"/var/lib/groovebasin/certs
install -Dm644 "${srcdir}"/groovebasin.service \
"${pkgdir}"/usr/lib/systemd/system/groovebasin.service
}
# vim:set ts=2 sw=2 et:
| true |
ba7f04b073a3f555e51a7e53dba7eacd5274f153 | Shell | jrayzero/SeqMirror | /.github/actions/build-precise/entrypoint.sh | UTF-8 | 2,294 | 2.953125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/sh -l
set -e
# setup
cd /github/workspace
apt-get update
apt-get -y install build-essential sudo curl wget git zlib1g-dev libbz2-dev liblzma-dev python-software-properties apt-transport-https ca-certificates
wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add -
sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test
sudo add-apt-repository -y ppa:fkrull/deadsnakes
sudo apt-add-repository 'deb https://apt.llvm.org/precise/ llvm-toolchain-precise-4.0 main'
# workaround: https://github.com/skyportal/skyportal/commit/6e639e4b4af93323095b22bb3994ccc358a4b379
sudo rm -f /etc/apt/sources.list.d/mongodb*
sudo rm -f /etc/apt/sources.list.d/couchdb*
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 762E3157
sudo apt-get -q update
sudo apt-get -y install clang-4.0 clang++-4.0 python3.5 python3.5-dev
sudo ln -s /usr/bin/clang-4.0 /usr/bin/clang
sudo ln -s /usr/bin/clang++-4.0 /usr/bin/clang++
wget https://github.com/Kitware/CMake/releases/download/v3.18.1/cmake-3.18.1-Linux-x86_64.sh
sudo sh cmake-3.18.1-Linux-x86_64.sh --prefix=/usr --skip-license
wget -q -O - https://bootstrap.pypa.io/get-pip.py | sudo python3.5
export CC=clang
export CXX=clang++
# deps
if [ ! -d ./deps ]; then
/bin/bash scripts/deps.sh 2;
fi
# env
export PYTHONPATH=$(pwd)/test/python
export SEQ_PATH=$(pwd)/stdlib
export SEQ_HTSLIB=$(pwd)/deps/lib/libhts.so
export SEQ_PYTHON=$(python3.5 test/python/find-python-library.py)
# build
mkdir build
ln -s $(pwd)/deps/lib/libomp.so $(pwd)/build/libomp.so
(cd build && cmake .. -DCMAKE_BUILD_TYPE=Release \
-DSEQ_DEP=$(pwd)/../deps \
-DCMAKE_C_COMPILER=${CC} \
-DCMAKE_CXX_COMPILER=${CXX})
cmake --build build --config Release -- VERBOSE=1
# test
build/seqtest
build/seqc test/core/helloworld.seq
# package
export SEQ_BUILD_ARCHIVE=seq-$(uname -s | awk '{print tolower($0)}')-$(uname -m).tar.gz
export SEQ_DEP_ARCHIVE=seq-deps-linux.tar.bz2
mkdir -p seq-deploy/bin seq-deploy/lib/seq
cp build/seqc seq-deploy/bin/
cp build/libseq*.so seq-deploy/lib/seq/
cp deps/lib/libomp.so seq-deploy/lib/seq/
cp -r stdlib seq-deploy/lib/seq/
tar -czf ${SEQ_BUILD_ARCHIVE} seq-deploy
tar -cjf ${SEQ_DEP_ARCHIVE} deps
du -sh seq-deploy
du -sh deps
ls -lah ${SEQ_DEP_ARCHIVE}
| true |
4f16512c3551d166c4b354f384565280555f2c98 | Shell | koraa/cupdev.net | /package.d/preinstall.sh | UTF-8 | 394 | 2.734375 | 3 | [] | no_license | #! /bin/sh
dir="`dirname "$0"`"
. "$dir"/common.sh
. "$dir/lib/wgit.sh"
################### Dirs #############################
mkdir -p "$libs" "$meta"
#################### GIT Install ####################
# These repos are cloned manually in order to create
# shallow clones which accellerates the installation
# process enormously
NOINSTALL=1 wgithub isagalaev/highlight.js highlight.js
| true |
b8a60870a148f8d7d10a369708f7a67d7f18e38d | Shell | lukeseawalker/aws-parallelcluster-cookbook | /system_tests/test_centos7.sh | UTF-8 | 439 | 3.25 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/sh
set -e
run_command=$1
if [ "$run_command" = "" ]; then
run_command=/build/system_tests/systemd
fi
docker build . -t chef-base:centos7 -f system_tests/Dockerfile.centos7
cat system_tests/dna.json | sed 's/\(.*base_os":\).*/\1 "centos7",/' > /tmp/dna.json
docker run -ti \
--rm=true \
--name=chef_configure \
-v $PWD:/build \
-v /tmp/dna.json:/etc/chef/dna.json \
chef-base:centos7 \
$run_command
| true |
3e40f13c2e4d2bf73a4b1218609f09408b148c85 | Shell | rebuy-de/partial-deployment-cleanup | /hack/deps.sh | UTF-8 | 264 | 2.71875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
cd $( dirname $0 )/..
set -ex
export VERSION=$( git describe --always --dirty | tr '-' '.' )
export BINARY_NAME=partial-deployment-cleanup-${VERSION}
export PATH="$(readlink -f target/consul):${PATH}"
hack/consulw.sh version
hack/glidew.sh install
| true |
c7f688a636c8f1e16f8bd4363b95fd5be2d8a639 | Shell | Leandroafm21/aprimoramento_do_diceware | /extracao/dicionario/divextractor_sub.sh | UTF-8 | 471 | 2.9375 | 3 | [] | no_license | #!/bin/bash
for i in {a..z}; do
cp wordExtractor $i
cd $i
echo "Extraindo substantivos com a letra $i..."
for (( j=1; j<=50; j++ )); do
arq="$i-$j.txt"
./substantiveExtractor $j $arq
cp $arq /home/leandro/IC/new/wordSelector/extracao/substantivo/compilado
done
cd ..
done
cd compilado
echo "Juntando arquivos..."
for i in {a..z}; do
arq="intermed$i.txt"
cat $i-* > $arq
done
cat intermed* > final.txt
cd ..
| true |
8765b0559595a8bcc21cc8aa3b6d5a2dd1c07c4d | Shell | mnulle/Reduduncy_Detection_Tool | /sweeper.sh | UTF-8 | 260 | 3.34375 | 3 | [] | no_license | #!/bin/bash
#sweep through number of seconds for expiry time
for variable in {0..360..5}
do
#analyse file given in $1, put output to test-output.txt
./analyse-file $variable $1 >> test-output.txt
#show output as it's compiled
tail -1 test-output.txt
done
| true |
53e41f8c554ce76988f3cc131dcea7293f3e2f05 | Shell | OSSSP/cgc-monitor | /cgc-monitor/zk/monitorUtils/killMonitorWatcher | UTF-8 | 237 | 3.15625 | 3 | [] | no_license | #!/bin/bash
PROC=$(ps aux | grep '[m]onitorRamWatcher' | grep -v tail | awk '{print $2}')
if [ -z "$PROC" ]; then
echo "no monitorRamWatcher running"
else
echo "kill monitorRamWatcher, running as $PROC"
sudo kill -9 $PROC
fi
| true |
17886001fd2650f31215adf1df993bd8c9d1e718 | Shell | sisamiwe/plugins | /stateengine/autblind_update.sh | UTF-8 | 2,554 | 3.921875 | 4 | [] | no_license | #!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
SCRIPTSYSTEM=$(uname -a)
stringContain() { [ -z "${2##*$1*}" ] && { [ -z "$1" ] || [ -n "$2" ] ;} ; }
items_mac () {
echo "Updating item yaml on Mac OS X"
sudo sed -i {} 's/as_/se_/g' ../../items/*.yaml 2>&1
sudo sed -i {} 's/autostate_/stateengine_/g' ../../items/*.yaml 2>&1
sudo sed -i {} 's/autoblind/stateengine/g' ../../items/*.yaml 2>&1
}
items_linux () {
echo "Updating item yaml on Linux"
sudo sed -i 's/as_/se_/g' ../../items/*.yaml 2>&1
sudo sed -i 's/autostate_/stateengine_/g' ../../items/*.yaml 2>&1
sudo sed -i 's/autoblind/stateengine/g' ../../items/*.yaml 2>&1
}
update_items () {
if stringContain 'Darwin' $SCRIPTSYSTEM; then items_mac; else items_linux; fi
}
logics_mac () {
echo "Updating logics on Mac OS X"
sudo sed -i {} 's/autostate_/stateengine_/g' ../../logics/*.py 2>&1
sudo sed -i {} 's/autoblind/stateengine/g' ../../logics/*.py 2>&1
}
logics_linux () {
echo "Updating logics on Linux"
sudo sed -i 's/autostate_/stateengine_/g' ../../logics/*.py 2>&1
sudo sed -i 's/autoblind/stateengine/g' ../../logics/*.py 2>&1
}
update_logics () {
if stringContain 'Darwin' $SCRIPTSYSTEM; then logics_mac; else logics_linux; fi
}
rename_files () {
find ../../var/cache/ -type f -name '*autoblind*' -exec bash -c 'mv "$0" "${0/\autoblind/stateengine}"' {} \;
find ../../var/cache/ -type f -name '*autostate*' -exec bash -c 'mv "$0" "${0/\autostate/stateengine}"' {} \;
}
cd $DIR
echo "Changed to directory: $DIR. Running on $SCRIPTSYSTEM"
echo "This script replaces as_* entries by se_* entries, autostate and autoblind entries by stateengine."
echo "Do you want to update all item yaml files?"
select rerun in "Update" "Skip"; do
case $rerun in
"Update" ) update_items;break;;
"Skip" ) echo "Skipping"; break;;
*) echo "Skipping"; break;;
esac
done
echo "Do you want to update your logics files and replace autostate/autoblind entries by stateengine?"
select rerun in "Update" "Skip"; do
case $rerun in
"Update" ) update_logics;break;;
"Skip" ) echo "Skipping"; break;;
*) echo "Skipping"; break;;
esac
done
echo ""
echo "This script renames all cache files including autoblind or autostate to stateengine"
echo "Do you want to rename cache files?"
select rerun in "Rename" "Skip"; do
case $rerun in
"Rename" ) rename_files;break;;
"Skip" ) echo "Skipping"; break;;
*) echo "Skipping"; break;;
esac
done
| true |
595ba5675bb71209bc7b24647cf3e0337b9b4ab1 | Shell | AtomGraph/LinkedDataHub | /scripts/create-document.sh | UTF-8 | 2,175 | 4.21875 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
print_usage()
{
printf "Creates an RDF document.\n"
printf "\n"
printf "Usage: cat data.ttl | %s options TARGET_URI\n" "$0"
printf "\n"
printf "Options:\n"
printf " -f, --cert-pem-file CERT_FILE .pem file with the WebID certificate of the agent\n"
printf " -p, --cert-password CERT_PASSWORD Password of the WebID certificate\n"
printf " --proxy PROXY_URL The host this request will be proxied through (optional)\n"
printf "\n"
printf " -t, --content-type MEDIA_TYPE Media type of the RDF body (e.g. text/turtle)\n"
}
hash curl 2>/dev/null || { echo >&2 "curl not on \$PATH. Aborting."; exit 1; }
unknown=()
while [[ $# -gt 0 ]]
do
key="$1"
case $key in
-f|--cert-pem-file)
cert_pem_file="$2"
shift # past argument
shift # past value
;;
-p|--cert-password)
cert_password="$2"
shift # past argument
shift # past value
;;
--proxy)
proxy="$2"
shift # past argument
shift # past value
;;
-t|--content-type)
content_type="$2"
shift # past argument
shift # past value
;;
*) # unknown option
unknown+=("$1") # save it in an array for later
shift # past argument
;;
esac
done
set -- "${unknown[@]}" # restore args
if [ -z "$cert_pem_file" ] ; then
print_usage
exit 1
fi
if [ -z "$cert_password" ] ; then
print_usage
exit 1
fi
if [ -z "$content_type" ] ; then
print_usage
exit 1
fi
if [ "$#" -ne 1 ]; then
print_usage
exit 1
fi
target="$1"
if [ -n "$proxy" ]; then
# rewrite target hostname to proxy hostname
target_host=$(echo "$target" | cut -d '/' -f 1,2,3)
proxy_host=$(echo "$proxy" | cut -d '/' -f 1,2,3)
target="${target/$target_host/$proxy_host}"
fi
# POST RDF document from stdin to the server and print Location URL
cat - | curl -v -k -E "$cert_pem_file":"$cert_password" -d @- -H "Content-Type: ${content_type}" -H "Accept: text/turtle" "$target" -v -D - | tr -d '\r' | sed -En 's/^Location: (.*)/\1/p' | true |
33be333d1d8f3d0b95ef906144a118283bc43d23 | Shell | centreon/centreon-plugins | /connectors/vmware/packaging/debian/centreon_vmware-init | UTF-8 | 2,973 | 3.796875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/sh
### BEGIN INIT INFO
# Provides: centreon_vmware.pl
# Required-Start: $local_fs $network
# Required-Stop: $local_fs $network
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Should-Start:
# Should-Stop:
# Short-Description: Start daemon centreon_vmware.pl at boot
# Description:
### END INIT INFO
PKGNAME=centreon_vmware
DESC="centreon-vmware"
DAEMON=/usr/bin/centreon_vmware.pl
PIDFILE=/var/run/centreon/centreon_vmware.pid
FOLDER=/var/run/centreon/
if [ ! -d "$FOLDER" ]; then # Control will enter here if $DIRECTORY doesn't exist.
mkdir $FOLDER
fi
if [ ! -x "${DAEMON}" ]; then
echo "The program ${DAEMON} does not exists or is not executable"
exit 3
fi
# Include the default user configuration if exists
[ -r /etc/default/${PKGNAME} ] && . /etc/default/${PKGNAME}
# Load the VERBOSE setting and other rcS variables
[ -f /etc/init/vars.sh ] && . /etc/init/vars.sh
# Define LSB log_* functions.
# Depend on lsb-base (>= 3.0-6) to ensure that this file is present.
. /lib/lsb/init-functions
if [ -z "${RUN_AT_STARTUP}" -o "${RUN_AT_STARTUP}" != "YES" ]; then
log_warning_msg "Not starting $PKGNAME, edit /etc/default/$PKGNAME to start it."
exit 0
fi
if [ -z "${CENTREON_USER}" ]; then
log_warning_msg "Not starting $PKGNAME, CENTREON_USER not set in /etc/default/$PKGNAME."
exit 0
fi
do_start()
{
start-stop-daemon --start --background --quiet --pidfile ${PIDFILE} --exec ${DAEMON} \
--chuid ${CENTREON_USER} --user ${CENTREON_USER} --test -- $OPTIONS
[ "$?" = "0" ] || return 1
start-stop-daemon --start --background --quiet --pidfile ${PIDFILE} --exec ${DAEMON} \
--make-pidfile --chuid ${CENTREON_USER} --user ${CENTREON_USER} -- $OPTIONS
[ "$?" = "0" ] || return 2
return 0
}
do_stop()
{
start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --user ${CENTREON_USER} --pidfile ${PIDFILE}
[ "$?" = "2" ] && return 2
rm -rf ${PIDFILE}
[ "$?" = 0 ] && return 0 || return 1
}
case "$1" in
start)
[ "${VERBOSE}" != "no" ] && log_daemon_msg "Starting ${DESC}" "${PKGNAME}"
do_start
case "$?" in
0|1) [ "${VERBOSE}" != "no" ] && log_end_msg 0 ;;
2) [ "${VERBOSE}" != "no" ] && log_end_msg 1 ;;
esac
;;
stop)
[ "${VERBOSE}" != no ] && log_daemon_msg "Stopping ${DESC}" "${PKGNAME}"
do_stop
case "$?" in
0|1) [ "${VERBOSE}" != no ] && log_end_msg 0 ;;
2) [ "${VERBOSE}" != no ] && log_end_msg 1 ;;
esac
;;
status)
status_of_proc ${DAEMON} ${PKGNAME} -p ${PIDFILE}
;;
restart|force-reload)
[ "${VERBOSE}" != no ] && log_daemon_msg "Restarting ${DESC}" "${PKGNAME}"
do_stop
case "$?" in
0|1)
do_start
case "$?" in
0) log_end_msg 0 ;;
1) log_end_msg 1 ;;
*) log_end_msg 1 ;;
esac
;;
*) log_end_msg 1 ;;
esac
;;
*)
echo "Usage: ${SCRIPTNAME} (start|stop|status|restart|force-reload)" >&2
exit 3
esac | true |
7a0da80f2ca8012a57081e71e101ef358f91393f | Shell | Dick-And-Co/OJO | /ojo.sh | UTF-8 | 204 | 2.5625 | 3 | [] | no_license | #!/bin/bash
while true;
do echo "Getting Fridge Snapshot"
ffmpeg -y -f video4linux2 -i /dev/video1 -vframes 1 fridge.jpg
echo "Calculating OJ Percent"
python get_oj_percent.py fridge.jpg
sleep 20s
done
| true |
e8dde5c524e25f730bb977180061961eef4616a3 | Shell | 99002657/MINI_PROJECT_LINUX | /LINUX_PROJECT/release.sh | UTF-8 | 941 | 3.390625 | 3 | [] | no_license | #!/bin/bash
#
readonly NAME="cs5348_project_2"
readonly RELEASE_FOLDER="${HOME}/${NAME}"
readonly RELEASE_ZIP="${HOME}/${NAME}.zip"
# delete previous release zip
if [ -f "$RELEASE_ZIP" ]; then
rm "$RELEASE_ZIP"
fi
mkdir -p "$RELEASE_FOLDER"/src
# copy source files
cp theater_simulation/*.cc theater_simulation/*.h theater_simulation/CMakeLists.txt "$RELEASE_FOLDER"/src
# copy readme.txt
cp theater_simulation/readme.txt "$RELEASE_FOLDER"
# copy testcase
cp movies.txt "$RELEASE_FOLDER"
# compile summary.tex
pushd summary
pdflatex -output-directory="$RELEASE_FOLDER" summary.tex
popd
# compile design.tex
pushd design
pdflatex -output-directory="$RELEASE_FOLDER" design.tex
popd
# clean auxiliary files
pushd "$RELEASE_FOLDER"
rm *.aux *.log
popd
# package all files
pushd "${HOME}"
zip -r "$RELEASE_ZIP" "$NAME"/*
chmod 777 "$RELEASE_ZIP"
popd
# delete release folder
if [ -d "$RELEASE_FOLDER" ]; then
rm -rf "$RELEASE_FOLDER"
fi
| true |
2663bb776c3016afeb9c664e934f5808fab1f82e | Shell | michaellindahl/homestead-apache-customization | /serve-apache.sh | UTF-8 | 606 | 3.234375 | 3 | [] | no_license | #!/usr/bin/env bash
apacheblock="<VirtualHost $1:${3:-80}>
ServerName $1
DocumentRoot $2
<Directory "$2">
Require all granted
</Directory>
# Possible values include: debug, info, notice, warn, error, crit, alert, emerg.
LogLevel warn
ErrorLog $5/$1.error.log
CustomLog $5/$1.access.log combined
</VirtualHost>"
sudo mkdir --parents /etc/apache2/sites-available/
sudo mkdir --parents /etc/apache2/sites-enabled/
echo "$apacheblock" > "/etc/apache2/sites-available/$1.conf"
ln -fs "/etc/apache2/sites-available/$1.conf" "/etc/apache2/sites-enabled/$1.conf"
| true |
72a3e7b0bcf5cde2177b36d3ea9c8de4f1ce7e00 | Shell | couchbase/build-infra | /terraform/gerrit/files/scripts/gerrit-get-secrets | UTF-8 | 1,309 | 3.90625 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash -e
for encoding in none base64
do
# Get the names of the secrets we want from parameter store
secrets=$(aws ssm --region us-east-2 describe-parameters --parameter-filters "Key=tag:Consumer,Values=gerrit" "Key=tag:Encoding,Values=${encoding}" | jq -r ".Parameters[].Name")
target_dir="/home/ec2-user/"
for secret in ${secrets}
do
echo "Reading ${secret}"
# We're composing filenames from parameters like: gerrit__.ssh__config
# where each __ is replaced with a / to construct the path.
#
# The first component of this string specifies the application, whatever
# remains as the path.
#
# e.g:
# gerrit__.ssh__config = [/home/ec2-user/].ssh/config
secret_path="${target_dir}$(echo ${secret} | sed -e"s/__/\//g;s/^gerrit\///g")"
mkdir -p $(dirname ${secret_path})
param=$(aws ssm get-parameter --region us-east-2 --with-decryption --name ${secret} | jq -r ".Parameter.Value")
case ${encoding} in
none)
echo "${param}" > "${secret_path}"
;;
base64)
echo "${param}" | base64 --decode > "${secret_path}"
;;
esac
done
chown -R 1000:1000 /home/ec2-user/.ssh
chmod 600 /home/ec2-user/.ssh/*
done
| true |
d28e84318e43dfcfdb6e60729685a1f4f31123fc | Shell | opatut/BukkitPlugins | /StopMobDrop/project_setup.sh | UTF-8 | 385 | 2.703125 | 3 | [] | no_license | #!/bin/bash
NAME=$1
sed -i 's/^name:.*$/name: '$NAME'/g' src/main/resources/plugin.yml
sed -i 's/^main:.*$/main: me.opatut.bukkit.'$NAME'.'$NAME'/g' src/main/resources/plugin.yml
sed -i 's/%PROJECT_NAME%/'$NAME'/g' build.xml
mkdir -p 'src/main/java/me/opatut/bukkit/'$NAME'/'
echo "class "$NAME" {}" >> 'src/main/java/me/opatut/bukkit/'$NAME'/'$NAME'.java'
echo "Project setup done."
| true |
3f1fed405a3d77980493d700903d0d4a0376b6fc | Shell | kapitsa2811/ganplan | /tileclient/ttof.sh | UTF-8 | 99 | 2.671875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
cat | tr -d '[],' | while read col row zoom
do
echo "${zoom}_${col}_${row}.png"
done
| true |
1d91937a745906d01ea6e4b45ba30704b5bb76c7 | Shell | grepableoutput/Penetration-Testing-Bash-Scripts- | /MSFShodanTargetsToTXTFile.sh | UTF-8 | 1,765 | 3.8125 | 4 | [] | no_license | #!/bin/bash
### Global variables ###
arg=$#
API=$1
QUERY=$2
WORKSPACE=$3
PATHTOFILE=/tmp/resource.rc
CHECK=$(service postgresql status | grep active | cut -d ":" -f 2)
OUTPUT=hostsPerLine.txt
GREEN="\033[32m"
NC="\033[0m"
RED="\033[31m"
YELLOW="\033[33m"
_banner()
{
echo
echo -e " +--------------------------------------+"
echo -e " + MSFShodanTargetsToTXTFile +"
echo -e " + $RED Author $NC:$GREEN R@mi@hmed $NC +"
echo -e " +--------------------------------------+"
echo
}
_CreateResourceFile()
{
echo "
workspace -a $WORKSPACE
workspace $WORKSPACE
use auxiliary/gather/shodan_search
set verbose true
set database true
set shodan_apikey $API
set query $QUERY
db_export -f xml $WORKSPACE.xml
hosts -o $WORKSPACE.csv
run
exit
" > $PATHTOFILE
}
_CheckPostgresql()
{
## Enabling regex
shopt -s extglob
if [[ ${CHECK} =~ (in)active ]] ;
then
echo -e "$RED[!]$NC service postgresql down "
echo -e "$YELLOW[+]$NC starting postgresql "
service postgresql start
if [[ `service postgresql status | grep active | cut -d " " -f 6 | sed 's/(//g' | sed 's/)//g'` =~ exited ]] ;
then
echo -e "$GREEN[+]$NC started successfully "
else
echo -e "$RED[!]$NC couldn't start postgresql , quiting !!"
exit 1
fi
fi
}
_StartMetasploit()
{
msfconsole -x " resource $PATHTOFILE "
}
_main()
{
echo -e "$GREEN[+]$NC Generating a hosts per line file"
sleep 3
cat $WORKSPACE.csv | cut -d , -f 1 | awk '{ if (NR !=1 ) {print}}' | sed 's/"//g' > $OUTPUT
echo -e "$GREEN[+]$NC Done !!"
}
_banner
_CreateResourceFile
echo -e "$GREEN[+]$NC configurations created successfully ==> /tmp/resource.rc"
_CheckPostgresql
_StartMetasploit
_main
| true |
1bf7e318d14196aeefc9096319ab5604587ec39f | Shell | whatsupcoders/flutter-desktop-embedding | /tools/build_flutter_assets | UTF-8 | 1,504 | 3.3125 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script runs the necessary Flutter commands to build the Flutter assets
# than need to be packaged in an embedding application.
# It should be called with one argument, which is the directory of the
# Flutter application to build.
readonly base_dir="$(dirname "$0")"
readonly flutter_dir="$("$base_dir/flutter_location")"
readonly flutter_binary="$flutter_dir/bin/flutter"
# To use a custom Flutter engine, uncomment the following variables, and set
# engine_src_path to the path on your machine to your Flutter engine tree's
# src/ directory (and build_type if your engine build is not debug).
#readonly engine_src_path="/path/to/engine/src"
#readonly build_type=host_debug_unopt
#readonly extra_flags=(--local-engine-src-path $engine_src_path --local-engine=$build_type)
cd "$1"
echo Running "$flutter_binary" ${extra_flags[*]} build bundle
exec "$flutter_binary" ${extra_flags[*]} build bundle
| true |
c78f37492675b7dcda3577d05d2ede8ad9cd25ea | Shell | tobiasmarschall/bioconda-recipes | /recipes/hts-nim-tools/build.sh | UTF-8 | 1,340 | 3.109375 | 3 | [] | no_license | #!/bin/bash
set -eu -o pipefail
# Build script based off of mosdepth build
# https://github.com/brentp/mosdepth/blob/master/scripts/install.sh
# c2nim needs development branch of nim, see notes below
export BRANCH=devel
export base=$(pwd)
# Install nim
git clone -b $BRANCH --depth 1 git://github.com/nim-lang/nim nim-$BRANCH/
cd nim-$BRANCH
git clone --depth 1 git://github.com/nim-lang/csources csources/
cd csources
sh build.sh
cd ..
rm -rf csources
bin/nim c koch
./koch boot -d:release
# Nimble package manager
./koch nimble
./bin/nimble refresh --nimbleDir:$base/.nimble
export PATH=$base/nim-$BRANCH/bin:$PATH
# Avoid c2nim build errors: https://github.com/nim-lang/c2nim/issues/115
# Need to build this from within the nim-devel folder for some unknown reason
./bin/nimble install --nimbleDir:$base/.nimble -y compiler@#head
cd $base
echo $PATH
set -x
echo $(which nimble)
echo $(pwd)
if [ ! -x hts-nim ]; then
cd $base
git clone --depth 1 https://github.com/brentp/hts-nim/
cd hts-nim
# Avoid c2nim build errors: https://github.com/nim-lang/c2nim/issues/115
nimble install --nimbleDir:$base/.nimble -y c2nim@#head
nimble --nimbleDir:$base/.nimble install -y
fi
set -x
cd $base
nimble --nimbleDir:$base/.nimble install -y
mkdir -p $PREFIX/bin
chmod a+x hts_nim_tools
cp hts_nim_tools $PREFIX/bin
| true |
9f27fb23171839592a18630c33ec3c54b3c0e309 | Shell | iambenmitchell/obs-installer-for-apple-silicon | /install.sh | UTF-8 | 5,851 | 4.03125 | 4 | [] | no_license | #!/usr/bin/env bash
#
# Interested in contributing? Thank you!
# The easiest way to read this is to start from the bottom and work upwards!
#
REMOVE_INSTALLATION_DIRS="${REMOVE_INSTALLATION_DIRS:-true}"
LOG_LEVEL="${LOG_LEVEL:-info}"
OBS_INSTALL_DIR="/tmp/obs"
OBS_DEPS_DIR="/tmp/obsdeps"
OBS_GIT_URI=https://github.com/obsproject/obs-studio.git
OBS_DEPS_GIT_URI=https://github.com/obsproject/obs-deps.git
OBS_DMG_PATH=obs-studio-x64-27.0.1-2-g3cc4feb8d-modified.dmg
FINAL_OBS_DMG_PATH="$HOME/Downloads/$OBS_DMG_PATH"
SPEEX_DIR=/tmp/speexdsp
SPEEX_URI=https://github.com/xiph/speexdsp.git
_log() {
echo "[$(date)] $(echo "$1" | tr '[:lower:]' '[:upper:]'): $2"
}
_fetch() {
name="$1"
dest="$2"
git_uri="$3"
if ! test -d "$dest"
then
log_info "Downloading [$name] from [$git_uri] to $dest. This might take a while; please be patient."
if ! git clone --recursive "$git_uri" "$dest"
then
fail "Unable to download [$name]."
fi
else
log_debug "[$name] repo already downloaded; skipping."
fi
}
this_is_not_an_m1_mac() {
test "$(uname)" != "Darwin" ||
test "$(uname -p)" != "arm64" ||
test "$(uname -p)" != "arm"
}
log_debug() {
if test "$(echo "$LOG_LEVEL" | tr '[:upper:]' '[:lower:]')" == "debug" || \
test "$(echo "$LOG_LEVEL" | tr '[:upper:]' '[:lower:]')" == "verbose"
then
_log "debug" "$1"
fi
}
log_info() {
_log "info" "$1"
}
log_warning() {
_log "info" "$1"
}
log_error() {
_log "error" "$1"
}
log_fatal() {
_log "fatal" "$1"
}
fail() {
log_fatal "$1"
exit 1
}
homebrew_installed() {
log_debug "Checking for Homebrew"
which brew &>/dev/null
}
install_dependencies_or_fail() {
log_info "Installing build dependencies"
if ! brew install akeru-inc/tap/xcnotary cmake cmocka ffmpeg jack mbedtls qt@5 swig vlc
then
fail "Unable to install one or more OBS dependencies. See log above for more details."
fi
}
download_obs_or_fail() {
_fetch "OBS" "$OBS_INSTALL_DIR" "$OBS_GIT_URI"
}
download_obs_deps_or_fail() {
_fetch "OBS Dependencies" "$OBS_DEPS_DIR" "$OBS_DEPS_GIT_URI"
}
fetch_speexdsp_source() {
_fetch "SpeexDSP" "$SPEEX_DIR" "$SPEEX_URI"
}
copy_modified_files_into_cloned_repo() {
while read -r file
do
dest="$OBS_INSTALL_DIR/$(echo "$file" | sed 's#files/##')"
log_info "Copying [$file] into [$dest]"
cp "$file" "$dest"
done < <(find files -type f)
}
copy_templates_into_cloned_repo() {
_create_folder_for_file_if_not_exist() {
file="$1"
dir_to_create="$(dirname "$file")"
if ! test -d "$dir_to_create"
then
log_info "Creating directory [$dir_to_create]"
mkdir -p "$dir_to_create"
fi
}
while read -r template
do
dest="$OBS_INSTALL_DIR/$(echo "$template" | sed 's#template/##')"
_create_folder_for_file_if_not_exist "$dest"
log_info "Copying template [$template] into [$dest]"
cp "$template" "$dest"
done < <(find template -type f | grep -Ev '(Instructions|DS_Store)')
}
build_obs_or_fail() {
pushd "$OBS_INSTALL_DIR/cmake"
if ! (
cmake -DCMAKE_OSX_DEPLOYMENT_TARGET=10.13 -DDISABLE_PYTHON=ON \
-DCMAKE_PREFIX_PATH="/opt/homebrew/opt/qt@5" \
-DSPEEXDSP_INCLUDE_DIR="$SPEEX_DIR/include" \
-DSWIGDIR="$OBS_DEPS_DIR" \
-DDepsPath="$OBS_DEPS_DIR" .. &&
make &&
stat rundir/RelWithDebInfo/bin/obs 1>/dev/null
)
then
popd &>/dev/null
fail "Unable to build OBS; see above logs for more info."
fi
popd &>/dev/null
}
package_obs_or_fail() {
if ! test -f "$OBS_INSTALL_DIR/cmake/$OBS_DMG_PATH"
then
log_info "Packaging OBS"
pushd "$OBS_INSTALL_DIR/cmake"
if ! ( cpack && test -f "$OBS_INSTALL_DIR/cmake/$OBS_DMG_PATH" )
then
popd &>/dev/null
fail "Unable to package OBS; see above logs for more info."
fi
popd &>/dev/null
fi
}
add_virtualcam_plugin() {
log_info "Adding MacOS Virtual Camera plugin."
if test -f "$OBS_INSTALL_DIR/obs.dmg"
then
rm "$OBS_INSTALL_DIR/obs.dmg"
fi
if ! (
hdiutil convert -format UDRW -o "$OBS_INSTALL_DIR/obs.dmg" \
"$OBS_INSTALL_DIR/cmake/$OBS_DMG_PATH" &&
hdiutil attach "$OBS_INSTALL_DIR/obs.dmg"
)
then
fail "Unable to create or attach to writeable OBS image; see logs for more."
fi
device=$(hdiutil attach "$OBS_INSTALL_DIR/obs.dmg" | \
tail -1 | \
awk '{print $1}')
mountpath=$(hdiutil attach "$OBS_INSTALL_DIR/obs.dmg" | \
tail -1 | \
awk '{print $3}')
cp -r "$OBS_INSTALL_DIR/cmake/rundir/RelWithDebInfo/data/obs-mac-virtualcam.plugin" \
"$mountpath/OBS.app/Contents/Resources/data"
hdiutil detach "$device"
}
repackage_obs_or_fail() {
log_info "Re-packaging OBS with Virtual Camera added."
hdiutil convert -format UDRO -o "$FINAL_OBS_DMG_PATH" "$OBS_INSTALL_DIR/obs.dmg"
}
remove_data_directories() {
if test "$REMOVE_INSTALLATION_DIRS" == "true"
then
log_info "Cleaning up"
rm -rf "$OBS_INSTALL_DIR" &&
rm -rf "$OBS_DEPS_DIR" &&
rm -rf "$SPEEX_DIR"
else
log_info "Clean up skipped. You can find OBS sources at $OBS_INSTALL_DIR,
OBS dependencies sources at $OBS_DEPS_DIR, and Speex sources at
$SPEEX_DIR."
fi
}
if this_is_not_an_m1_mac
then
fail "This installer only works on Apple M1 Macs. \
For OBS 26.x, use Homebrew: 'brew install obs'. \
For OBS 27.x, build OBS from source using the mainstream instructions"
fi
if ! homebrew_installed
then
fail "Homebrew isn't installed. Please install it."
fi
install_dependencies_or_fail
download_obs_or_fail
download_obs_deps_or_fail
copy_modified_files_into_cloned_repo
copy_templates_into_cloned_repo
fetch_speexdsp_source
build_obs_or_fail
package_obs_or_fail
add_virtualcam_plugin
repackage_obs_or_fail
remove_data_directories
log_info "Installation succeeded! Move OBS into your Applications folder in the \
Finder window that pops up."
open "$FINAL_OBS_DMG_PATH"
| true |
455c2bd2004941fd047418af6e3a179a696664c6 | Shell | aur-archive/virtualbox-beta-bin | /PKGBUILD | UTF-8 | 5,733 | 2.59375 | 3 | [] | no_license | # Maintainer: 4679kun <admin@4679.us>
# Contributors: Det, The Ringmaster, Christian Berendt, Balwinder S "bsd" Dheeman, thotypous
# Based on virtualbox-bin and virtualbox-ext-oracle
pkgname=virtualbox-beta-bin
true && pkgname=("$pkgname" 'virtualbox-ext-oracle-beta')
pkgver=4.3.0_BETA3
_build=89194
pkgrel=1
arch=('i686' 'x86_64')
url='http://virtualbox.org'
license=('GPL2')
options=('!strip')
_arch='x86'
[ "$CARCH" = 'x86_64' ] && _arch='amd64'
source=("http://download.virtualbox.org/virtualbox/$pkgver/VirtualBox-$pkgver-$_build-Linux_$_arch.run"
"http://download.virtualbox.org/virtualbox/$pkgver/Oracle_VM_VirtualBox_Extension_Pack-$pkgver-$_build.vbox-extpack"
'10-vboxdrv.rules'
'vboxdrv'
'vboxdrv.conf'
'vboxweb'
'vboxweb.conf'
'PUEL')
sha256sums=('a9e8791efefaaf17eefc1667a4bd4e6829875c03070e440fdc354aa86d104a4a'
'3bf5a11e540007c1c3100bdcfac7950f1f79d6803dba487e66400eb4cad71b34'
'69417a9e8855cab8e4878886abe138f559fd17ae487d4cd19c8a24974a8bbec2'
'578b63ab173cdcd9169d8aff00caf48668e46a6886bb90fd34a0fbe63e180a4e'
'a3961bdeac733264320ec3b7de49018cbdb7e26b726170370feac8b809bf0bdd'
'656905de981ffa24f6f921c920538854a235225053f44baedacc07b46ca0cf56'
'12dbba3b59991f2b68cddeeeda20236aeff63e11b7e2d1b08d9d6a82225f6651'
'50658c653cde4dc43ba73a64c72761d2e996fd8e360fc682aa2844e1ad5b045f')
package_virtualbox-beta-bin() {
pkgdesc="A powerful x86 virtualizer - Beta"
depends=('fontconfig' 'gcc' 'libgl' 'libidl2' 'libxcursor' 'libxinerama' 'libxmu' 'linux-headers' 'python2' 'sdl')
optdepends=('virtualbox-ext-oracle-beta: for Oracle extensions'
'dkms: for handling kernel modules with dkms')
provides=("virtualbox=$pkgver")
conflicts=('virtualbox' 'virtualbox-modules')
backup=('etc/vbox/vbox.cfg' 'etc/conf.d/vboxdrv' 'etc/conf.d/vboxweb')
install=$pkgname.install
_installdir=/opt/VirtualBox
msg2 "Unpacking the .run package"
echo yes | sh VirtualBox-$pkgver-$_build-Linux_$_arch.run --target . --nox11 --noexec &> /dev/null
msg2 "Creating required dirs"
mkdir -p "$pkgdir"/{$_installdir,etc/{vbox,{rc,conf}.d},lib/udev/rules.d,usr/{bin,src,share/{applications,doc/$pkgname,mime/packages,pixmaps}},var/run/VirtualBox}
msg2 "Extracting VirtualBox.tar.bz2"
cd "$pkgdir/$_installdir"
tar -xjf "$srcdir/VirtualBox.tar.bz2"
msg2 "Hardened build: Mark binaries suid root, create symlinks for working around
unsupported $ORIGIN/.. in VBoxC.so and make sure the
directory is only writable by the user"
chmod 4511 VirtualBox VBox{SDL,Headless,NetDHCP,NetAdpCtl}
for _lib in VBox{VMM,REM,RT,DDU,XPCOM}.so; do
ln -sf $_installdir/$_lib components/$_lib
done
chmod go-w .
msg2 "Patching 'vboxshell.py' for Python 2"
sed -i 's#/usr/bin/python#\02#' vboxshell.py
msg2 "Fixing VBox.sh according to Arch's initscripts"
sed -i -e 's,/etc/init.d/,/etc/rc.d/,g' VBox.sh
msg2 "Installing the scripts and confs"
cd "$srcdir"
install -m755 vbox{drv,web} "$pkgdir/etc/rc.d/"
install -m644 vboxdrv.conf "$pkgdir/etc/conf.d/vboxdrv"
install -m644 vboxweb.conf "$pkgdir/etc/conf.d/vboxweb"
msg2 "Installing the udev rules"
install -m644 10-vboxdrv.rules "$pkgdir/lib/udev/rules.d/"
ln -s "$_installdir/VBoxCreateUSBNode.sh" "$pkgdir/lib/udev/"
msg2 "Installing the SDK"
cd "$pkgdir/$_installdir/sdk/installer"
VBOX_INSTALL_PATH=$_installdir python2 vboxapisetup.py install --root "$pkgdir"
rm -r -f build
cd -
msg2 "Symlinking the launchers" # 2nd can fail, if fs not case sensitive
for _bin in VirtualBox VBox{Headless,Manage,SDL,SVC,Tunctl,NetAdpCtl} rdesktop-vrdp; do
ln -s $_installdir/$_bin "$pkgdir/usr/bin/$_bin"
ln -s $_installdir/$_bin "$pkgdir/usr/bin/${_bin,,}" &>/dev/null || :
done
msg2 "Symlinking the desktop icon, mime info, doc, module sources and .desktop files"
ln -s $_installdir/{VBox,icons/128x128/virtualbox}.png "$pkgdir/usr/share/pixmaps/"
ln -s $_installdir/virtualbox.desktop "$pkgdir/usr/share/applications/"
ln -s $_installdir/virtualbox.xml "$pkgdir/usr/share/mime/packages/"
ln -s $_installdir/VirtualBox.chm "$pkgdir/usr/share/doc/$pkgname/"
ln -s $_installdir/src/vboxhost "$pkgdir/usr/src/vboxhost-$pkgver"
msg2 "Symlinking the icons"
cd "$pkgdir/$_installdir/icons"
for _dir in *; do
cd "$_dir"
mkdir -p "$pkgdir/usr/share/icons/hicolor/$_dir/"{apps,mimetypes}
for _icon in *; do
if [[ "$_icon" = 'virtualbox.png' ]]; then
ln -s $_installdir/icons/$_dir/$_icon "$pkgdir/usr/share/icons/hicolor/$_dir/apps/"
else
ln -s $_installdir/icons/$_dir/$_icon "$pkgdir/usr/share/icons/hicolor/$_dir/mimetypes/"
fi
done
cd - >/dev/null
done
msg2 "Writing the configuration file"
echo "# VirtualBox installation directory
INSTALL_DIR=$_installdir
# VirtualBox version
INSTALL_VER=$pkgver
INSTALL_REV=$_build" >> "$pkgdir/etc/vbox/vbox.cfg"
}
package_virtualbox-ext-oracle-beta() {
pkgdesc="An extension pack for VirtualBox - Beta"
arch=('any')
depends=("virtualbox-beta-bin=$pkgver")
license=('custom:PUEL')
install="$pkgname.install"
msg2 "Installing the extension pack and license"
install -Dm644 Oracle_VM_VirtualBox_Extension_Pack-$pkgver-$_build.vbox-extpack "$pkgdir/usr/share/virtualbox/extensions/Oracle_VM_VirtualBox_Extension_Pack-$pkgver.vbox-extpack"
install -Dm644 PUEL "$pkgdir/usr/share/licenses/$pkgname/PUEL"
}
pkgdesc="A powerful x86 virtualizer - Beta + Extension Pack + Additions"
depends=('libidl2' 'libxcursor' 'libxinerama' 'libxslt' 'curl' 'linux-headers' 'python2')
license=('GPL2' 'custom:PUEL')
# vim:set ts=2 sw=2 ft=sh et:
| true |
0b0cc87b3cd89720597d7bb81993fb594cf769cd | Shell | builab/EM_preprocessing | /frame_to_stack_slurm.sh | UTF-8 | 843 | 3.40625 | 3 | [] | no_license | #!/bin/bash
#SBATCH --ntasks=1
#SBATCH --partition=cpusmall
#SBATCH --error=error.log
#SBATCH --output=frames_to_stack.log
#SBATCH --job-name=frames_to_stack
#SBATCH --mem 1000
# Script to combine single image frames into stack
# Used for EPU 1.5
# HB 2018/04/30
# Base command to combine frames in to stack
# newstack -mode 1 abc_frames_n*.mrc abc_movie.mrcs
# Rot 90 degree to backward compatible
module load imod
inputDir='Frames'
outDir='Movies'
if [ ! -d "${outDir}" ]; then
mkdir ${outDir}
else
echo "${outDir} exists !!! Continue in 2s anyway ...";
sleep 2s
fi
for i in ${inputDir}/*n0.mrc;
do
foo=${i/_frames_n0.mrc}
foo=${foo#${inputDir}/}
echo "newstack -rotate -90 ${inputDir}/${foo}_frames_n*.mrc ${outDir}/${foo}_frames.mrcs";
newstack -rotate -90 ${inputDir}/${foo}_frames_n*.mrc ${outDir}/${foo}_frames.mrcs
done
| true |
3304d404a7c2485ff5bc505cce196231cbd1f91a | Shell | jsm/go-chi-framework-migrations | /deploy/deploy-to-eb.sh | UTF-8 | 1,032 | 3.5625 | 4 | [] | no_license | #!/usr/bin/env bash
set -ex
deploy-to-region () {
AWS_ACCOUNT_ID=0000000000000
ECR_REPO=$AWS_ACCOUNT_ID.dkr.ecr.$REGION.amazonaws.com/migrations
EB_BUCKET=$AWS_ACCOUNT_ID-eb-$REGION-versions
docker tag $SHA1 $ECR_REPO:$VERSION
docker push $ECR_REPO:$VERSION
# Create new Elastic Beanstalk version
sed "s/<TAG>/$VERSION/" < eb/Dockerrun.aws.json.template > eb/Dockerrun.aws.json
(cd eb && zip -r ../$VERSION.zip .)
aws s3 cp $VERSION.zip s3://$EB_BUCKET/$APPLICATION_NAME/$VERSION.zip
aws --region $REGION elasticbeanstalk create-application-version --application-name $APPLICATION_NAME \
--version-label $VERSION --source-bundle S3Bucket=$EB_BUCKET,S3Key=$APPLICATION_NAME/$VERSION.zip
sleep 10
aws --region $REGION elasticbeanstalk update-environment --application-name $APPLICATION_NAME --environment-name $APPLICATION_NAME-dev --version-label $VERSION
}
SHA1=$1
VERSION=$2
APPLICATION_NAME=users
DOCKERRUN_FILE=$SHA1-Dockerrun.aws.json
# Deploy to us-east-1
REGION=us-east-1 deploy-to-region
| true |
d17fb13931bbf48a288ef5890a7f3d5a7c8237a9 | Shell | xiaoqi55125/bashscripts | /java/install.sh | UTF-8 | 547 | 3.09375 | 3 | [] | no_license | #! /bin/bash
echo "downloading and installing jdk7..."
wget -c http://download.oracle.com/otn-pub/java/jdk/7/jdk-7-linux-i586.tar.gz
echo "extracting and installing..."
sudo tar zxvf ./jdk-7-linux-i586.tar.gz -C /usr/lib/jvm
cd /usr/lib/jvm
sudo mv jdk1.7.0/ java-7-sun
profile=~/.bashrc
echo -e "\nexport JAVA_HOME=/usr/lib/jvm/java-7-sun">>$profile
echo -e "\nexport JRE_HOME=${JAVA_HOME}/jre">>$profile
echo -e "\nexport CLASSPATH=.:$(JAVA_HOME)/lib:$(JRE_HOME)/lib">>$profile
echo -e "\nexport PATH=$PATH:$(JAVA_HOME)/bin">>$profile | true |
f1c387b5c4ab93441f83da551c5af3a3edcd9f56 | Shell | korrapatiharathi/htaptestcases | /hpss_cancel.sh | UTF-8 | 1,610 | 3.328125 | 3 | [] | no_license | #!/bin/bash
# Archive and cancel. Do not keep a agent around so the request won't be
# performed.
# Load in the testing tools
. $(dirname $0)/hpss_framework.inc
# Prepare
#TESTPATH=`readlink -f $LHMSC_TEST_LUSTRE_MP/$LHMSC_TEST_LUSTRE_TESTPATH`
TESTPATH="$LHMSC_TEST_LUSTRE_MP/$LHMSC_TEST_LUSTRE_TESTPATH"
if [ -d "$TESTPATH" ] ; then
rm -rf "$TESTPATH"
fi
mkdir -p $TESTPATH
# Create the file, archive it
rm -f $TESTPATH/ls
cp /bin/ls $TESTPATH/
make_conf 4dm1cm.conf
# Start the manager
start_cmm
echo "Archiving"
lfs hsm_archive $TESTPATH/ls
echo "Archiving started"
# Wait for transfer to start for 10 seconds.
LIM=$((`date +%s`+10))
while :
do
sleep .5
[ `date +%s` -ge $LIM ] && error "archiving not started (timeout)"
get_status cmm1
grep -c archive $(log cmm1 status) | grep "^1$" && break
done
# Cancel
echo "Cancel"
lfs hsm_cancel $TESTPATH/ls
echo "Canceled"
# Wait until it is gone
LIM=$((`date +%s`+10))
while :
do
sleep .5
[ `date +%s` -ge $LIM ] && error "archiving not canceled (timeout)"
get_status cmm1
grep "archive.*0x" $(log cmm1 status) || break
done
get_stats cmm1
get_status cmm1
# Two cancel lines. One for the frontend, and one for the total of all
# frontends.
ecount 2 "\|\s+cancel\s+\|\s+1\s+\|\s+1\s+\|" $(log cmm1 stats)
grep "\b417\b.*\b417\b" $(log cmm1 status) || error "BAD"
# Check it's done
grep "returned to Lustre (op=archive, .*, a_ret=125, ret=0)" $(log cmm1) || error "expected op returned not found"
check_stats cmm1 || error "stat checks failed"
check_status cmm1 || error "status checks failed"
echo "Good"
exit 0
| true |
39313c1913b1ae46f460914f121f49ec2fa114a0 | Shell | molleweide/dorothy | /commands/fs-structure | UTF-8 | 522 | 3.703125 | 4 | [
"LicenseRef-scancode-public-domain",
"Unlicense"
] | permissive | #!/usr/bin/env bash
source "$DOROTHY/sources/strict.bash"
path="$(ask --question="Enter the path to output the structure of." --default="${1-}" --required)"
# ls
# -A, --almost-all do not list implied . and ..
# -l use a long listing format
# --time-style=TIME_STYLE time/date format with -l; see TIME_STYLE below
if test -d "$path"; then
cd "$path" || exit 1
ls -lA --time-style=+''
else
cd "$(dirname "$path")" || exit 1
ls -lA --time-style=+'' "$(basename "$path")"
fi
| true |
d0250a10ba279c5389a310c55ccf7bd964d76d98 | Shell | Doboo/btsync | /btsync.sh | WINDOWS-1252 | 780 | 3.0625 | 3 | [] | no_license | #sudo wget http://download-lb.utorrent.com/endpoint/btsync/os/linux-x64/track/stable -O /tmp/btsync_x64.tar.gz
function set32 {
wget https://download-cdn.getsync.com/stable/linux-i386/BitTorrent-Sync_i386.tar.gz -O /tmp/btsync_i386.tar.gz
cd /opt/btsync
tar xzf /tmp/btsync_i386.tar.gz
}
function set64 {
wget https://download-cdn.getsync.com/stable/linux-x64/BitTorrent-Sync_x64.tar.gz -O /tmp/btsync_x64.tar.gz
cd /opt/btsync
tar xzf /tmp/btsync_x64.tar.gz
}
mkdir /opt/btsync
mkdir /opt/btsync_transfer
if [ $(getconf WORD_BIT) = '32' ] && [ $(getconf LONG_BIT) = '64' ] ; then
set64
else
set32
fi
cp -a /root/btsync/btsync /etc/init.d/btsync
chmod +x /etc/init.d/btsync
cp -a /root/btsync/btsync.cfg /opt/btsync/btsync.cfg
#
/etc/init.d/btsync start | true |
20244cd64dd077de7a8176d9fd8eaf95639c4a7b | Shell | schlomo/wireshark-fritzbox | /fritzdump.sh | UTF-8 | 1,198 | 3.734375 | 4 | [] | no_license | #!/bin/bash
if [ $# -lt 1 ]; then
echo "Usage: $0 <IP>"
exit 1
fi
WGET=/usr/local/bin/wget
FRITZ_IP=$1
FRITZ_USER=""
FRITZ_IFACE="1-lan"
SIDFILE="/tmp/fritz.sid"
if [ ! -f $SIDFILE ]; then
touch $SIDFILE
fi
SID=$(cat $SIDFILE)
NOTCONNECTED=$(curl -s "http://$FRITZ_IP/login_sid.lua?sid=$SID" | grep -c "0000000000000000")
if [ $NOTCONNECTED -gt 0 ]; then
read -s -p "Enter Router Password: " FRITZ_PWD
echo ""
CHALLENGE=$(curl -s http://$FRITZ_IP/login_sid.lua | grep -o "<Challenge>[a-z0-9]\{8\}" | cut -d'>' -f 2)
HASH=$(perl -MPOSIX -e '
use Digest::MD5 "md5_hex";
my $ch_pw = "$ARGV[0]-$ARGV[1]";
$ch_pw =~ s/(.)/$1 . chr(0)/eg;
my $md5 = lc(md5_hex($ch_pw));
print $md5;
' -- "$CHALLENGE" "$FRITZ_PWD")
curl -s "http://$FRITZ_IP/login_sid.lua" -d "response=$CHALLENGE-$HASH" -d 'username='${FRITZ_USER} | grep -o "<SID>[a-z0-9]\{16\}" | cut -d'>' -f 2 > $SIDFILE
fi
SID=$(cat $SIDFILE)
if [ "$SID" == "0000000000000000" ]; then
echo "Authentication error" 1>&2
exit 1
fi
echo "Capturing traffic.." 1>&2
$WGET -qO- http://$FRITZ_IP/cgi-bin/capture_notimeout?ifaceorminor=$FRITZ_IFACE\&snaplen=\&capture=Start\&sid=$SID | tshark -i -
| true |
c7f86b837df5b4fc76ab4d62817beb78ee3ea3f5 | Shell | sumeetgajjar/PDP-Utils | /zip-submission.sh | UTF-8 | 1,349 | 3.875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
set -e
SUBMISSION_DIR="submissions"
SOURCE_CODE_DIR="src"
TEST_CODE_DIR="test"
RES_DIR="res"
PWD=`pwd`
CURRENT_DIR=`basename $PWD`
LOCK_FILE_ROOT_DIR="/tmp/submission-locks/"
HISTORY_FILE="${SUBMISSION_DIR}/history.csv"
if [ ! -d $LOCK_FILE_ROOT_DIR ]
then
echo "creating the lock root dir"
mkdir -p $LOCK_FILE_ROOT_DIR || exit 1
fi
(
if flock -xn 9 ;
then
if [ ! -d $SOURCE_CODE_DIR ]
then
echo "source code dir does not exist"
exit 1
fi
if [ ! -d $TEST_CODE_DIR ]
then
echo "test code dir does not exist"
exit 1
fi
if [ ! -d $SUBMISSION_DIR ]
then
echo "creating the submission dir"
mkdir $SUBMISSION_DIR
echo "submission dir created"
fi
if [ -f $HISTORY_FILE ]
then
SUBMISSION_COUNT=`tail -n1 $HISTORY_FILE | awk -F"," '{print $2+1}'`
else
SUBMISSION_COUNT=0
echo '"Submission Zip Creation Date","Submission Count"' >> $HISTORY_FILE
fi
SUBMISSION_FILE="${SUBMISSION_DIR}/submission-${SUBMISSION_COUNT}"
echo "creating zip for the submission"
zip -r $SUBMISSION_FILE $SOURCE_CODE_DIR $TEST_CODE_DIR $RES_DIR
echo "submission zip created : ${SUBMISSION_FILE}.zip"
SUBMISSION_HISTORY='"'`date`'",'${SUBMISSION_COUNT}
echo $SUBMISSION_HISTORY >> $HISTORY_FILE
else
echo "Cannot acquire lock"
exit 1
fi
)9>$LOCK_FILE_ROOT_DIR"$CURRENT_DIR.lock"
| true |
a03e8b4a6de88eb4c0d16e08373d389329175843 | Shell | up4/billing | /install.sh | UTF-8 | 7,304 | 3.734375 | 4 | [] | no_license | #!/bin/bash
# linux user for ucrm docker containers
UCRM_USER="docker"
UCRM_ROOT="/var/lib/docker/compose/ucrm"
GITHUB_REPO="up4/billing"
POSTGRES_PASSWORD=$(cat /dev/urandom | tr -dc "a-zA-Z0-9" | fold -w 48 | head -n 1);
SECRET=$(cat /dev/urandom | tr -dc "a-zA-Z0-9" | fold -w 48 | head -n 1);
if [ -z "$INSTALL_CLOUD" ]; then INSTALL_CLOUD=false; fi
check_system() {
local lsb_dist
local dist_version
if [ -z "$lsb_dist" ] && [ -r /etc/lsb-release ]; then
lsb_dist="$(. /etc/lsb-release && echo "$DISTRIB_ID")"
fi
if [ -z "$lsb_dist" ] && [ -r /etc/debian_version ]; then
lsb_dist='debian'
fi
if [ -z "$lsb_dist" ] && [ -r /etc/fedora-release ]; then
lsb_dist='fedora'
fi
if [ -z "$lsb_dist" ] && [ -r /etc/oracle-release ]; then
lsb_dist='oracleserver'
fi
if [ -z "$lsb_dist" ]; then
if [ -r /etc/centos-release ] || [ -r /etc/redhat-release ]; then
lsb_dist='centos'
fi
fi
if [ -z "$lsb_dist" ] && [ -r /etc/os-release ]; then
lsb_dist="$(. /etc/os-release && echo "$ID")"
fi
lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')"
case "$lsb_dist" in
ubuntu)
if [ -z "$dist_version" ] && [ -r /etc/lsb-release ]; then
dist_version="$(. /etc/lsb-release && echo "$DISTRIB_CODENAME")"
fi
;;
debian)
dist_version="$(cat /etc/debian_version | sed 's/\/.*//' | sed 's/\..*//')"
;;
*)
if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then
dist_version="$(. /etc/os-release && echo "$VERSION_ID")"
fi
;;
esac
if [ "$lsb_dist" = "ubuntu" ] && [ "$dist_version" != "xenial" ] || [ "$lsb_dist" = "debian" ] && [ "$dist_version" != "8" ]; then
echo "Unsupported distro."
echo "Supported was: Ubuntu Xenial and Debian 8."
echo $lsb_dist
echo $dist_version
exit 1
fi
}
install_docker() {
which docker > /dev/null 2>&1
if [ $? = 1 ]; then
echo "Download and install Docker"
curl -fsSL https://get.docker.com/ | sh
fi
which docker > /dev/null 2>&1
if [ $? = 1 ]; then
echo "Docker not installed. Please check previous logs. Aborting."
exit 1
fi
}
install_docker_compose() {
which docker-compose > /dev/null 2>&1
if [ $? = 1 ]; then
echo "Download and install Docker compose."
curl -L https://github.com/docker/compose/releases/download/1.7.1/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
fi
which docker-compose > /dev/null 2>&1
if [ $? = 1 ]; then
echo "Docker compose not installed. Please check previous logs. Aborting."
exit 1
fi
}
create_user() {
if hash getent 2>/dev/null; then
if [ -z "$(getent passwd $UCRM_USER)" ]; then
echo "Creating user $UCRM_USER."
adduser --disabled-password --gecos "" "$UCRM_ROOT"
usermod -aG docker $UCRM_USER
fi
fi
if [ ! -d "$UCRM_ROOT" ]; then
echo "Creating directory $UCRM_ROOT."
mkdir -p "$UCRM_ROOT"
fi
}
download_docker_compose_files() {
if [ ! -f "$UCRM_ROOT"/docker-compose.yml ]; then
echo "Downloading docker compose files."
curl -o "$UCRM_ROOT"/docker-compose.yml https://raw.githubusercontent.com/$GITHUB_REPO/master/docker-compose.yml
curl -o "$UCRM_ROOT"/docker-compose.migrate.yml https://raw.githubusercontent.com/$GITHUB_REPO/master/docker-compose.migrate.yml
curl -o "$UCRM_ROOT"/docker-compose.env https://raw.githubusercontent.com/$GITHUB_REPO/master/docker-compose.env
echo "Replacing env in docker compose."
sed -i -e "s/POSTGRES_PASSWORD=ucrmdbpass1/POSTGRES_PASSWORD=$POSTGRES_PASSWORD/g" "$UCRM_ROOT"/docker-compose.env
sed -i -e "s/SECRET=changeThisSecretKey/SECRET=$SECRET/g" "$UCRM_ROOT"/docker-compose.env
change_ucrm_port
change_ucrm_suspend_port
enable_ssl
fi
}
change_ucrm_port() {
local PORT
while true; do
if [ "$INSTALL_CLOUD" = true ]; then
PORT=y
else
read -r -p "Do you want UCRM to be accessible on port 80? (Yes: recommended for most users, No: will set 8080 as default) [Y/n]: " PORT
fi
case $PORT in
[yY][eE][sS]|[yY])
sed -i -e "s/- 8080:80/- 80:80/g" "$UCRM_ROOT"/docker-compose.yml
sed -i -e "s/- 8443:443/- 443:443/g" "$UCRM_ROOT"/docker-compose.yml
echo "UCRM will start at 80 port."
echo "#used only in instalation" >> "$UCRM_ROOT"/docker-compose.env
echo "SERVER_PORT=80" >> "$UCRM_ROOT"/docker-compose.env
break;;
[nN][oO]|[nN])
echo "UCRM will start at 8080 port. If you will change it, edit your docker-compose.yml in $UCRM_USER home direcotry."
echo "#used only in instalation" >> "$UCRM_ROOT"/docker-compose.env
echo "SERVER_PORT=8080" >> "$UCRM_ROOT"/docker-compose.env
break;;
*)
;;
esac
done
}
change_ucrm_suspend_port() {
local PORT
while true; do
if [ "$INSTALL_CLOUD" = true ]; then
PORT=y
else
read -r -p "Do you want UCRM suspend page to be accessible on port 81? (Yes: recommended for most users, No: will set 8081 as default) [Y/n]: " PORT
fi
case $PORT in
[yY]*)
sed -i -e "s/- 8081:81/- 81:81/g" "$UCRM_ROOT"/docker-compose.yml
echo "UCRM suspend page will start at 81 port."
echo "#used only in instalation" >> "$UCRM_ROOT"/docker-compose.env
echo "SERVER_SUSPEND_PORT=81" >> "$UCRM_ROOT"/docker-compose.env
break;;
[nN]*)
echo "UCRM suspend page will start at 8081 port. If you will change it, edit your docker-compose.yml in $UCRM_USER home direcotry."
echo "#used only in instalation" >> "$UCRM_ROOT"/docker-compose.env
echo "SERVER_SUSPEND_PORT=8081" >> "$UCRM_ROOT"/docker-compose.env
break;;
*)
;;
esac
done
}
enable_ssl() {
local SSL
while true; do
if [ "$INSTALL_CLOUD" = true ]; then
SSL=y
else
read -r -p "Do you want to enable SSL? (You need to generate a certificate for yourself) [Y/n]: " SSL
fi
case $SSL in
[yY]*)
enable_server_name
change_ucrm_ssl_port
break;;
[nN]*)
echo "UCRM has disabled support for SSL."
break;;
*)
;;
esac
done
}
enable_server_name() {
local SERVER_NAME_LOCAL
if [ "$INSTALL_CLOUD" = true ]; then
if [ -f "$CLOUD_CONF" ]; then
cat "$CLOUD_CONF" >> "$UCRM_ROOT"/docker-compose.env
fi
else
read -r -p "Enter Server domain name for UCRM, for example ucrm.example.com: " SERVER_NAME_LOCAL
echo "SERVER_NAME=$SERVER_NAME_LOCAL" >> "$UCRM_ROOT"/docker-compose.env
fi
}
change_ucrm_ssl_port() {
local PORT
while true; do
if [ "$INSTALL_CLOUD" = true ]; then
PORT=y
else
read -r -p "Do you want UCRM SSL to be accessible on port 443? (Yes: recommended for most users, No: will set 8443 as default) [Y/n]: " PORT
fi
case $PORT in
[yY]*)
sed -i -e "s/- 8443:443/- 443:443/g" "$UCRM_ROOT"/docker-compose.yml
echo "UCRM SSL will start at 443 port."
break;;
[nN]*)
echo "UCRM SSL will start at 8443 port."
break;;
*)
;;
esac
done
}
download_docker_images() {
echo "Downloading docker images."
cd "$UCRM_ROOT" && /usr/local/bin/docker-compose pull
}
start_docker_images() {
echo "Starting docker images."
cd "$UCRM_ROOT" && \
/usr/local/bin/docker-compose -f docker-compose.yml -f docker-compose.migrate.yml run migrate_app && \
/usr/local/bin/docker-compose up -d && \
/usr/local/bin/docker-compose ps
}
check_system
install_docker
install_docker_compose
create_user
download_docker_compose_files
download_docker_images
start_docker_images
exit 0
| true |
19e3e3ee7527c1e3d02b6aa933912a7f55a27355 | Shell | net-snmp/net-snmp | /testing/fulltests/default/Sv3usmtraps | UTF-8 | 5,195 | 3.359375 | 3 | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-other-permissive",
"BSD-3-Clause",
"MIT",
"MIT-CMU"
] | permissive | #!/bin/sh
#
# SNMPv3 dynamic trap config
#
# Input variables:
# NOTIFYTYPE trap|inform
# TRAPSESS yes|no
# DYNTRAP yes|no
#
SKIPIFNOT NETSNMP_SECMOD_USM
## Defaults
[ "x$NOTIFYTYPE" = "x" ] && NOTIFYTYPE="trap"
[ "x$TRAPSESS" = "x" ] && TRAPSESS="yes"
[ "x$DYNTRAP" = "x" ] && DYNTRAP="no"
case "$NOTIFYTYPE" in
"trap")
notifytype=1
trapsess_arg="";;
"inform")
notifytype=2
trapsess_arg="-Ci";;
*)
echo "Error: NOTIFYTYPE = $NOTIFYTYPE is not supported"
return 1;;
esac
# Arguments: host ($1) and port ($2)
ipv4_2_hex() {
printf "0x"
for i in $(echo "${1%:}" | tr '.' ' '); do
printf "%02x" "$i"
done
printf "%04x\\n" "$2"
}
# Arguments: host ($1) and port ($2)
ipv6_2_hex() {
# Convert [<address>]: into <address>
ipv6_host=$(echo "$1" | sed 's/^\[//;s/:$//;s/]$//;')
if echo "${ipv6_host}" | grep -q ::; then
no_of_colons=$(echo "${ipv6_host}" | tr -d -c ':' | awk '{ print length }')
case $no_of_colons in
7) ipv6_host=$(echo "${ipv6_host}" | sed 's/::/:0:/') ;;
6) ipv6_host=$(echo "${ipv6_host}" | sed 's/::/:0:0:/') ;;
5) ipv6_host=$(echo "${ipv6_host}" | sed 's/::/:0:0:0:/') ;;
4) ipv6_host=$(echo "${ipv6_host}" | sed 's/::/:0:0:0:0:/') ;;
3) ipv6_host=$(echo "${ipv6_host}" | sed 's/::/:0:0:0:0:0:/') ;;
2) if [ "${ipv6_host#::}" != "${ipv6_host}" ]; then
ipv6_host=$(echo "${ipv6_host}" | sed 's/::/0:0:0:0:0:0:0:/')
else
ipv6_host=$(echo "${ipv6_host}" | sed 's/::/:0:0:0:0:0:0:/')
fi;;
esac
fi
printf "0x"
for i in $(echo "${ipv6_host}" | tr ':' ' '); do
printf "%04x" $((0x${i:-0}))
done
printf "%04x\\n" "$2"
}
settmib() {
CAPTURE "snmpset -t 3 -r 0 -d $TESTAUTHARGS $SNMP_TRANSPORT_SPEC:$SNMP_TEST_DEST$SNMP_SNMPD_PORT SNMP-TARGET-MIB::$1 $2 $3"
}
setnmib() {
CAPTURE "snmpset -t 3 -r 0 -d $TESTAUTHARGS $SNMP_TRANSPORT_SPEC:$SNMP_TEST_DEST$SNMP_SNMPD_PORT SNMP-NOTIFICATION-MIB::$1 $2 $3"
}
dyntrap_config() {
if [ "$SNMP_TRANSPORT_SPEC" = "udp" ]; then
tdomain="SNMPv2-TM::snmpUDPDomain"
targetaddr=$(ipv4_2_hex "$SNMP_TEST_DEST" "$SNMP_SNMPTRAPD_PORT")
elif [ "$SNMP_TRANSPORT_SPEC" = "udp6" ]; then
tdomain="SNMPv2-SMI::mib-2.100.1.2"
targetaddr=$(ipv6_2_hex "$SNMP_TEST_DEST" "$SNMP_SNMPTRAPD_PORT")
else
tdomain="NA"
targetaddr="NA"
fi
obj="'dynamictrap'"
link=dynamictrap
settmib "snmpTargetParamsRowStatus.$obj" = 5
settmib "snmpTargetParamsStorageType.$obj" = 3 # nonVolatile
settmib "snmpTargetParamsMPModel.$obj" = 3 # USM
settmib "snmpTargetParamsSecurityModel.$obj" = 3 # USM
settmib "snmpTargetParamsSecurityName.$obj" = "$TESTAUTHUSER"
settmib "snmpTargetParamsSecurityLevel.$obj" = 2 # authNoPriv
# snmpNotify
setnmib "snmpNotifyRowStatus.$obj" = 5
setnmib "snmpNotifyTag.$obj" = "$link"
setnmib "snmpNotifyType.$obj" = "$notifytype"
setnmib "snmpNotifyStorageType.$obj" = 3
# snmpTargetAddr
settmib "snmpTargetAddrRowStatus.$obj" = 5
settmib "snmpTargetAddrTimeout.$obj" = 1000
settmib "snmpTargetAddrRetryCount.$obj" = 5
settmib "snmpTargetAddrTagList.$obj" = "$link"
settmib "snmpTargetAddrParams.$obj" = "$link"
settmib "snmpTargetAddrStorageType.$obj" = 3
settmib "snmpTargetAddrTDomain.$obj" = "$tdomain"
settmib "snmpTargetAddrTAddress.$obj" x "$targetaddr"
# enable the trap
settmib "snmpTargetAddrRowStatus.$obj" = 1
settmib "snmpTargetParamsRowStatus.$obj" = 1
setnmib "snmpNotifyRowStatus.$obj" = 1
if [ "$SNMP_VERBOSE" -gt 0 ]; then
for table in SNMP-TARGET-MIB::snmpTargetAddrTable \
SNMP-TARGET-MIB::snmpTargetParamsTable \
SNMP-NOTIFICATION-MIB::snmpNotifyTable; do
CAPTURE "snmpwalk -v 3 -r 2 -t 10 $TESTAUTHARGS $SNMP_TRANSPORT_SPEC:$SNMP_TEST_DEST$SNMP_SNMPD_PORT ${table}"
done
fi
}
## common SNMPv3 USM config
CREATEUSERENGINEID=0x80001f88802b3d0e1111111111
. ./Sv3usmconfigbase
CONFIGTRAPD "[snmp] persistentDir $SNMP_TMP_PERSISTENTDIR"
CONFIGTRAPD "oldEngineID $CREATEUSERENGINEID"
CONFIGTRAPD "$CREATEAUTHUSER"
CONFIGTRAPD "authUser log $TESTAUTHUSER"
STARTTRAPD
CONFIGAGENT "[snmp] persistentdir $SNMP_TMP_PERSISTENTDIR"
CONFIGAGENT "oldEngineID $CREATEUSERENGINEID"
CONFIGAGENT "$CREATEAUTHUSER"
CONFIGAGENT "rwuser $TESTAUTHUSER auth"
if [ "x$TRAPSESS" = "xyes" ]; then
CONFIGAGENT "trapsess $trapsess_arg -e $CREATEUSERENGINEID $TESTAUTHARGS \
${SNMP_TRANSPORT_SPEC}:${SNMP_TEST_DEST}${SNMP_SNMPTRAPD_PORT}"
fi
#AGENT_FLAGS="$AGENT_FLAGS -Dtrap -Dsend_notifications -DsnmpTargetAddrEntry -Dtarget_sessions -DsnmpNotifyTable_data"
STARTAGENT
[ "x$DYNTRAP" = "xyes" ] && dyntrap_config
CAPTURE "snmpset -On -t 3 -r 0 -d $TESTAUTHARGS $SNMP_TRANSPORT_SPEC:$SNMP_TEST_DEST$SNMP_SNMPD_PORT .1.3.6.1.4.1.2021.254.8.0 i 1"
STOPAGENT
STOPTRAPD
NO_OF_TRAPS=0
[ "x$TRAPSESS" = "xyes" ] && NO_OF_TRAPS=$((NO_OF_TRAPS+1))
[ "x$DYNTRAP" = "xyes" ] && NO_OF_TRAPS=$((NO_OF_TRAPS+1))
CHECKTRAPDCOUNT $NO_OF_TRAPS "life the universe and everything"
FINISHED
| true |
e28c46272fb5e579a5319688defefb7e19b5e91b | Shell | jibundeyare/install-scripts | /mkwebsite.sh | UTF-8 | 7,494 | 4 | 4 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | #!/bin/bash
function usage {
this=$(basename $0)
cat <<-EOT
Usage: $this [USERNAME] [PROJECTS_DIRECTORY] [VHOST_DIRECTORY] [DOMAIN] [VHOST_TEMPLATE]
This script configures a new website.
It configures the Apache2 virtual host and the PHP-FPM pool files.
Warning: this script will stop before doing anything if:
- the specified PROJECTS_DIRECTORY does not exist
- the specified VHOST_DIRECTORY already exists
- the specified VHOST_TEMPLATE file does not exist
USERNAME should be your username.
PROJECTS_DIRECTORY is the directory in which you'll store all your projects.
VHOST_DIRECTORY is the directory in which a particular project will be stored.
DOMAIN is the domain name you will be using in your web browser to access a particular project.
This value is also used to set the sender email domain when sending mails
VHOST_TEMPLATE is an optional parameter that specifies the Apache2 virtual host template you want to use for a particular project.
Possible values are: template-vhost.conf, template-vhost-symfony.conf, template-subdir.conf, template-subdir-symfony.conf
template-vhost*.conf files will create a new virtual host.
Creating a new virtual host is useful when you have a domain name and you want to separate your applications into different directories and memory space.
template-subdir*.conf files will create a new sub-directory.
Creating a new sub-directory is useful when you do not have a domain name but still want to separate applications into different directories and memory space.
Default value is "template-vhost.conf".
WARNING: when using the templates "template-subdir.conf" or "template-subdir-symfony.conf", the DOMAIN parameter will be ignored, so any value is valid.
Example 1: $this johndoe projects foo foo.local
This command will:
- make the website accessible from the url "http://foo.local"
- use the default "template-vhost.conf" for the VHOST_TEMPLATE value
- create the Apache2 virtual host file "/etc/apache2/sites-available/foo.conf"
- create the PHP-FPM pool file "/etc/php/X.Y/fpm/pool.d/foo.conf"
Example 2: $this johndoe projects foo foo.example.com
This command will:
- the same things as example 1
- but make the website accessible from the url "http://foo.example.com"
Example 3: $this johndoe projects foo foo.local template-vhost-symfony.conf
This command will:
- make the website accessible from the url "http://foo.local"
- use the template "template-vhost-symfony.conf" for the VHOST_TEMPLATE value
- create the Apache2 virtual host file "/etc/apache2/sites-available/example.conf"
- create the PHP-FPM pool file "/etc/php/X.Y/fpm/pool.d/example.conf"
Example 4: $this johndoe projects foo foo.example.com template-vhost-symfony.conf
This command will:
- the same things as example 3
- but make the website accessible from the url "http://foo.example.com"
Example 5: $this johndoe projects foo foo.local template-subdir.conf
This command will:
- ignore the domain "foo.local" parameter
- make the website accessible from the url "http://localhost/foo", "http://example.com/foo" or "http://1.2.3.4/foo" depending on wether you are on a local machine, a vps and if you have a domain name or not.
- use the template "template-subdir.conf" for the VHOST_TEMPLATE value
- create the Apache2 conf file "/etc/apache2/sites-available/foo.conf"
- create the PHP-FPM pool file "/etc/php/X.Y/fpm/pool.d/foo.conf"
Example 6: $this johndoe projects foo foo.local template-subdir-symfony.conf
This command will:
- ignore the domain "foo.local" parameter
- make the website accessible from the url "http://localhost/foo", "http://example.com/foo" or "http://1.2.3.4/foo" depending on wether you are on a local machine, a vps and if you have a domain name or not.
- use the template "template-subdir-symfony.conf" for the VHOST_TEMPLATE value
- create the Apache2 conf file "/etc/apache2/sites-available/example.conf"
- create the PHP-FPM pool file "/etc/php/X.Y/fpm/pool.d/example.conf"
EOT
}
if [ $# -lt 4 ]; then
usage
exit 1
else
# settings
username="$1"
projects_directory="$2"
vhost_directory="$3"
domain="$4"
grep -i $username /etc/passwd > /dev/null
if [ "$?" == "1" ]; then
echo "error: the username $username does not exist"
exit 1
fi
if [ $# -gt 4 ]; then
vhost_template="$5"
if [ ! -f $vhost_template ]; then
echo "error: the vhost template file '$vhost_template' does not exist"
exit 1
fi
else
vhost_template="template-vhost.conf"
fi
if [ "$vhost_template" == "template-subdir.conf" ] || [ "$vhost_template" == "template-subdir-symfony.conf" ]; then
domain="<ignored>"
fi
cat <<-EOT
USERNAME: $username
PROJECTS_DIRECTORY: $projects_directory
VHOST_DIRECTORY: $vhost_directory
DOMAIN: $domain
VHOST_TEMPLATE: $vhost_template
EOT
read -p "Press [y/Y] to confirm: " answer
echo ""
if [ "$answer" != "y" ] && [ "$answer" != "Y" ]; then
echo "canceled"
exit
fi
fi
# set phpX.Y version
php_version="8.2"
# check that the script is not run as root
current_id="$(id -nu)"
if [ "$current_id" == "root" ]; then
echo "error: this script should not be run as root"
exit 1
fi
# check that user is a sudoer
sudo_id=$(sudo id -nu)
if [ "$sudo_id" != "root" ]; then
echo "error: you must be a sudoer to use this script"
exit 1
fi
if [ ! -d /home/$username/$projects_directory ]; then
echo "error: the projects directory '/home/$username/$projects_directory' does not exist"
exit 1
fi
# create a dedicated php session directory
sudo mkdir /var/lib/php/sessions/$vhost_directory
# set appropriate rights (drwx-wx-wt) on the dedicated php sessions directory
sudo chmod 1733 /var/lib/php/sessions/$vhost_directory
# copy template-pool.conf to php fpm pool directory
sudo cp template-pool.conf /etc/php/$php_version/fpm/pool.d/$vhost_directory.conf
# edit file to match selected username and virtual host directory
sudo sed -i "s/{username}/$username/g" /etc/php/$php_version/fpm/pool.d/$vhost_directory.conf
sudo sed -i "s/{projects_directory}/$projects_directory/g" /etc/php/$php_version/fpm/pool.d/$vhost_directory.conf
sudo sed -i "s/{vhost_directory}/$vhost_directory/g" /etc/php/$php_version/fpm/pool.d/$vhost_directory.conf
sudo sed -i "s/{domain}/$domain/g" /etc/php/$php_version/fpm/pool.d/$vhost_directory.conf
# restart php fpm
sudo systemctl restart php$php_version-fpm.service
# copy template-vhost.conf to apache2 available virtual host directory
sudo cp $vhost_template /etc/apache2/sites-available/$vhost_directory.conf
# edit file to match selected username, projects directory, virtual host directory and local domain name
sudo sed -i "s/{username}/$username/g" /etc/apache2/sites-available/$vhost_directory.conf
sudo sed -i "s/{projects_directory}/$projects_directory/g" /etc/apache2/sites-available/$vhost_directory.conf
sudo sed -i "s/{vhost_directory}/$vhost_directory/g" /etc/apache2/sites-available/$vhost_directory.conf
sudo sed -i "s/{domain}/$domain/g" /etc/apache2/sites-available/$vhost_directory.conf
# enable virtual host
sudo a2ensite $vhost_directory.conf
# restart apache2
sudo systemctl reload apache2.service
# inform user
echo "INFO: apache2 has been reloaded"
echo ""
# warn user that he has to create the website directory by himself
echo "WARNING: This script does not create the website directory."
echo "To create the website directory, use this command:"
echo ""
echo " mkdir /home/$username/$projects_directory/$vhost_directory"
echo ""
| true |
c3f0cf4f6270a36498616bcc4586bb385806f5ad | Shell | ytnobody/covid19-surveryor | /test/url-map_test.sh | UTF-8 | 1,236 | 2.71875 | 3 | [
"WTFPL"
] | permissive | #!/bin/bash
set -e
. ./slack-bot/url-map.sh
# test get_title_by_url
res=`wget -q -O - https://kantei.go.jp`
title=`get_title_by_res "$res"`
echo actual $title
title_expect_result="首相官邸ホームページ"
echo expect $title_expect_result
if [ "$title" = "$title_expect_result" ]; then
echo "passed"
else
echo "failed"
exit 1
fi
# test get_title_by_url
res=`wget -q -O - https://www.city.funabashi.lg.jp/jigyou/shoukou/002/corona-jigyosha.html`
title=`get_title_by_res "$res"`
echo actual $title
title_expect_result="新型コロナウィルス感染症に関する中小企業者(農林漁業者を含む)・労働者への支援|船橋市公式ホームページ"
echo expect $title_expect_result
if [ "$title" = "$title_expect_result" ]; then
echo "passed"
else
echo "failed"
exit 1
fi
# test get_title_by_url
res=`wget -q -O - https://www.pref.oita.jp/soshiki/14040/sodanmadoguti1.html`
title=`get_title_by_res "$res"`
echo actual $title
title_expect_result="新型コロナウイルスの流行に伴う経営・金融相談窓口の開設について - 大分県ホームページ"
echo expect $title_expect_result
if [ "$title" = "$title_expect_result" ]; then
echo "passed"
else
echo "failed"
exit 1
fi
| true |
0bd8d5290fa552d458bdc1f56a797a5b060e8fad | Shell | dlandis/AngryPigs | /bashbeans | UTF-8 | 3,593 | 3.4375 | 3 | [] | no_license | #!/bin/bash
#todo
##.bashbeans settings (global ~/. and per project and per machine)
###folders, classpath/ext, platform info, compilers, etc.,
###cooperate with git (gitignore for machine specific and hashes)
###detect machine settings (os, which md5sum?, )
##support for java and clojure
###put compiler scripts into global ~/.
###javac + compiler jars vs. scalac,clojurec,...
###pass names without .ext, and let it figure it out?
##save last few hashes and mock me if I tried the exact same thing already :D
###also "compile by lookup"
##make a github repo
#toimprove
#search .bashbeanshash and shift out already compiled, instead of doing a pure compare
#links
## http://www.codecommit.com/blog/scala/joint-compilation-of-scala-and-java-sources
## http://code.google.com/p/simple-build-tool/
starttime="$(date +%s)"
#load settings
##@put to file, read lines and execute them
##@make bashbeansinit to generate such files
hashpath="../.bashbeanhash"
os="linux"
hashfunc="md5sum" #any will do, really, it only needs input as hashfunc <files>
projectname="Angry Pigs"
runname="AngryPigs"
pathprocess="echo"
#setuppath="../.bashbeansetup"
#if [ -e $setuppath ]; then
# #lol="l"
# ./$setuppath
#fi
originalcall="$*"
cd src
echo "$projectname"
#check if some stuff exists
classdir="../class"
if [ ! -d "$classdir" ]; then
mkdir $classdir
fi
classdir="../class/AngryPigs"
if [ ! -d "$classdir" ]; then
mkdir $classdir
fi
if [ ! -f "$hashpath" ];
then
touch "$hashpath"
fi
#get arguments, process switches
#@do it with getopt(s)
if [ $# -gt 0 ]; then
for i do
case "$i" in #dont take the strings seriously - they're used as booleans ;)
"-f") Force="m*a";shift;;
"-r") Run="yes please";shift;;
"-o") Optimise="-optimise";shift;;
"-R") Run="yes please";Unforce="<- means don't ever compile";shift;;
"--cygwin") pathprocess="cygpath -wp";shift;;
esac
done
files="$@"
fi
if [ "$files" == "" ]; then
files=" *.scala"
fi
#hash func
##@ getpath za $1?
fileshash="`$hashfunc ../$0 $files`" #must be compiled with same bashbeans script
#echo "$fileshash"
#build
builderror="0"
if [ "$Unforce" != "" ]; then
echo "You told me not to recompile."
elif [ "$Force" == "" -a -e $hashpath -a "$fileshash" == "`cat $hashpath`" ]; then
echo "I don't think I need to recompile. (use force if neccesary.)"
else
echo "Compiling:"$files
#fast scala compile (loads compile deamon)
compileErr=$( { fsc $Optimise -cp "`$pathprocess ../lib/clojure.jar`" -d ../class -extdirs ../lib/lwjgl-2.7.1/jar/ $files ; } 2>&1 )
builderror=$?
if [ "$compileErr" != "" ]; then
echo "$compileErr"
if [ "$compileErr" == "Could not connect to compilation daemon." ]; then
echo "OK, let's try this again..."
echo ""
cd ..
echo "\$ $0 $originalcall"
$0 $originalcall
exit 0
fi
fi
#write hashes
if [ "$builderror" == "0" ]; then
echo "$fileshash" > $hashpath
fi
fi
buildtime="$(date +%s)"
elapsedtime="$(expr $buildtime - $starttime)"
echo "Done("$builderror") in ${elapsedtime}s."
#run it
if [ "$builderror" == "0" -a "$Run" != "" ]; then
cd ..
scala $Optimise -cp "`$pathprocess src/AngryPigs:class:class/AngryPigs:lib/lwjgl-2.7.1/jar/lwjgl.jar:lib/lwjgl-2.7.1/jar/lwjgl_util.jar:lib/clojure.jar`" -Djava.library.path="`$pathprocess lib/lwjgl-2.7.1/native/linux/:lib/lwjgl-2.7.1/native/macosx/:lib/lwjgl-2.7.1/native/windows/`" AngryPigs.Game
fi
| true |
d3916105c0f8639e2e169834f6eaff40371dad11 | Shell | Pra3t0r5/repo_cloner | /praetors_repo_cloner.sh | UTF-8 | 1,846 | 3.96875 | 4 | [] | no_license | #!/bin/bash
ERR="None"
apt -y install figlet
figlet RepoCloner
echo "|-------------------< By Pra3t0r5 >------------------|"
echo "Ingresa usuario Github"
read github_username
echo "Ingresa contraseña Github"
read -s github_password
echo "Ingresa usuario Bitbucket"
read bitbucket_username
echo "Ingresa contraseña Bitbucket"
read -s bitbucket_password
read -p "Usar Scripts Adicionales? (Y/N) " -n 1 -r
echo # (optional) move to a new line
if [[ ! $REPLY =~ ^[Yy]$ ]] ; then
execute_aditionals="0"
else
execute_aditionals="1"
fi
script_path=$(readlink -f -- "$0")
dir=${script_path%/*}
config=$dir"/repositories.conf"
custom_scripts=$dir/"customscripts"
while read -r A ; do
repo_url=$(echo "$A" | awk -F" " '{print $1}');
repo_host=$(echo "$A" | awk -F/" " '{print $1}');
repo_name="${repo_url##*/}"
case "$repo_host" in
*github.com*)
username="${github_username}"
password="${github_password}"
host="github.com"
git clone https://"${username}":"${password}"@"${host}"/"${username}"/"${repo_name}"
;;
*bitbucket.org*)
username="${bitbucket_username}"
password="${bitbucket_password}"
host="bitbucket.org"
file_path=$(echo ${repo_url} | cut -d@ -f2 | cut -d/ -f2- | cut -d? -f1)
git clone https://"${username}":"${password}"@bitbucket.org/"${file_path}"
;;
esac
if [ $? -ne 0 ] ; then ERR="ERROR CLONANDO '${repo_url##*/}'" ; fi
if [ "$ERR" != "None" ] ; then
echo "${ERR}"
else
echo "${repo_url##*/} CLONADO EXITOSAMENTE" ;
fi
done < "$config";
if [ ${execute_aditionals} == "1" ] ; then
for each in ${custom_scripts}/*.* ; do bash $each ; done
else
echo "NO SE EJECUTARAN SCRIPTS ADICIONALES"
fi
echo "FINALIZADO"
exit
| true |
56c07e31d8a8e1146a6291046cd364049ff92b79 | Shell | snake-way/xcash-dpops | /scripts/firewall/firewall_shared_delegates_script.sh | UTF-8 | 4,970 | 2.921875 | 3 | [
"MIT",
"ISC"
] | permissive | #!/bin/sh
# iptables script for server
# if you changed any default ports change them in the firewall as well
# ACCEPT all packets at the top so each packet runs through the firewall rules, then DROP all INPUT and FORWARD if they dont use any of the firewall settings
iptables -P INPUT ACCEPT
iptables -P FORWARD ACCEPT
iptables -P OUTPUT ACCEPT
# remove all existing IP tables
iptables -t nat -F
iptables -t mangle -F
iptables -t mangle -X
iptables -t mangle -F
iptables -t raw -F
iptables -t raw -X
iptables -F
iptables -X
# ip table prerouting data (this is where you want to block ddos attacks)
# Drop all invalid packets
iptables -t mangle -A PREROUTING -m conntrack --ctstate INVALID -j DROP
# Prevent syn flood
iptables -A INPUT -p tcp ! --syn -m state --state NEW -j DROP
iptables -t mangle -A PREROUTING -p tcp -m conntrack --ctstate NEW -m tcpmss ! --mss 536:65535 -j DROP
iptables -t mangle -A PREROUTING -p tcp --tcp-flags FIN,SYN,RST,PSH,ACK,URG NONE -j DROP
iptables -t mangle -A PREROUTING -p tcp --tcp-flags FIN,SYN FIN,SYN -j DROP
iptables -t mangle -A PREROUTING -p tcp --tcp-flags SYN,RST SYN,RST -j DROP
iptables -t mangle -A PREROUTING -p tcp --tcp-flags FIN,RST FIN,RST -j DROP
iptables -t mangle -A PREROUTING -p tcp --tcp-flags FIN,ACK FIN -j DROP
iptables -t mangle -A PREROUTING -p tcp --tcp-flags ACK,URG URG -j DROP
iptables -t mangle -A PREROUTING -p tcp --tcp-flags ACK,FIN FIN -j DROP
iptables -t mangle -A PREROUTING -p tcp --tcp-flags ACK,PSH PSH -j DROP
iptables -t mangle -A PREROUTING -p tcp --tcp-flags ALL ALL -j DROP
iptables -t mangle -A PREROUTING -p tcp --tcp-flags ALL NONE -j DROP
iptables -t mangle -A PREROUTING -p tcp --tcp-flags ALL FIN,PSH,URG -j DROP
iptables -t mangle -A PREROUTING -p tcp --tcp-flags ALL SYN,FIN,PSH,URG -j DROP
iptables -t mangle -A PREROUTING -p tcp --tcp-flags ALL SYN,RST,ACK,FIN,URG -j DROP
# filter data for INPUT, FORWARD, and OUTPUT
# Accept any packets coming or going on localhost
iptables -I INPUT -i lo -j ACCEPT
# keep already established connections running
iptables -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
# block ip spoofing. these are the ranges of local IP address.
iptables -A INPUT -s 10.12.242.0/24 -j ACCEPT
iptables -A INPUT -s 10.0.0.0/8 -j DROP
iptables -A INPUT -s 169.254.0.0/16 -j DROP
iptables -A INPUT -s 172.16.0.0/12 -j DROP
iptables -A INPUT -s 127.0.0.0/8 -j DROP
iptables -A INPUT -s 192.168.0.0/24 -j DROP
iptables -A INPUT -s 224.0.0.0/4 -j DROP
iptables -A INPUT -d 224.0.0.0/4 -j DROP
iptables -A INPUT -s 240.0.0.0/5 -j DROP
iptables -A INPUT -d 240.0.0.0/5 -j DROP
iptables -A INPUT -s 0.0.0.0/8 -j DROP
iptables -A INPUT -d 0.0.0.0/8 -j DROP
iptables -A INPUT -d 239.255.255.0/24 -j DROP
iptables -A INPUT -d 255.255.255.255 -j DROP
# block all traffic from ip address (iptables -A INPUT -s ipaddress -j DROP)
#unblock them using iptables -D INPUT -s ipaddress -j DROP
# Block different attacks
# block one computer from opening too many connections (100 simultaneous connections) if this gives trouble with post remove this or increase the limit
iptables -t filter -I INPUT -p tcp --syn --dport 80 -m connlimit --connlimit-above 100 --connlimit-mask 32 -j DROP
iptables -t filter -I INPUT -p tcp --syn --dport 18283 -m connlimit --connlimit-above 100 --connlimit-mask 32 -j DROP
# block port scans
# this will lock the IP out for 1 day
iptables -A INPUT -m recent --name portscan --rcheck --seconds 86400 -j DROP
iptables -A FORWARD -m recent --name portscan --rcheck --seconds 86400 -j DROP
iptables -A INPUT -m recent --name portscan --remove
iptables -A FORWARD -m recent --name portscan --remove
iptables -A INPUT -p tcp -m tcp -m multiport --destination-ports 21,25,110,135,139,143,445,1433,3306,3389 -m recent --name portscan --set -j DROP
iptables -A FORWARD -p tcp -m tcp -m multiport --destination-ports 21,25,110,135,139,143,445,1433,3306,3389 -m recent --name portscan --set -j DROP
# Accept specific packets
# Accept ICMP
iptables -A INPUT -p icmp -j ACCEPT
# Accept HTTP
iptables -A INPUT -p tcp --dport 80 -j ACCEPT
# Accept XCASH
iptables -A INPUT -p tcp --dport 18280 -j ACCEPT
iptables -A INPUT -p tcp --dport 18281 -j ACCEPT
iptables -A INPUT -p tcp --dport 18283 -j ACCEPT
# Allow ssh (allow 100 login attempts in 1 hour from the same ip, if more than ban them for 1 hour)
iptables -A INPUT -p tcp -m tcp --dport ${SSH_PORT_NUMBER} -m state --state NEW -m recent --set --name DEFAULT --rsource
iptables -A INPUT -p tcp -m tcp --dport ${SSH_PORT_NUMBER} -m state --state NEW -m recent --update --seconds 3600 --hitcount 100 --name DEFAULT --rsource -j DROP
iptables -A INPUT -p tcp -m tcp --dport ${SSH_PORT_NUMBER} -j ACCEPT
# Redirect HTTP to port 18283
iptables -A PREROUTING -t nat -i ${DEFAULT_NETWORK_DEVICE} -p tcp --dport 80 -j REDIRECT --to-ports 18283
# DROP all INPUT and FORWARD packets if they have reached this point
iptables -A INPUT -j DROP
iptables -A FORWARD -j DROP
| true |
280feae1db297a57c1ce98c0ce704d9bfab21797 | Shell | EnBr55/dotfiles | /.zshrc | UTF-8 | 5,878 | 2.8125 | 3 | [] | no_license | # If you come from bash you might have to change your $PATH.
# export PATH=$HOME/bin:/usr/local/bin:$PATH
# Path to your oh-my-zsh installation.
export ZSH="/home/ben/.oh-my-zsh"
# Set name of the theme to load --- if set to "random", it will
# load a random theme each time oh-my-zsh is loaded, in which case,
# to know which specific one was loaded, run: echo $RANDOM_THEME
# See https://github.com/robbyrussell/oh-my-zsh/wiki/Themes
ZSH_THEME="lambda"
# Set list of themes to pick from when loading at random
# Setting this variable when ZSH_THEME=random will cause zsh to load
# a theme from this variable instead of looking in ~/.oh-my-zsh/themes/
# If set to an empty array, this variable will have no effect.
# ZSH_THEME_RANDOM_CANDIDATES=( "robbyrussell" "agnoster" )
# Uncomment the following line to use case-sensitive completion.
# CASE_SENSITIVE="true"
# Uncomment the following line to use hyphen-insensitive completion.
# Case-sensitive completion must be off. _ and - will be interchangeable.
# HYPHEN_INSENSITIVE="true"
# Uncomment the following line to disable bi-weekly auto-update checks.
# DISABLE_AUTO_UPDATE="true"
# Uncomment the following line to change how often to auto-update (in days).
# export UPDATE_ZSH_DAYS=13
# Uncomment the following line to disable colors in ls.
# DISABLE_LS_COLORS="true"
# Uncomment the following line to disable auto-setting terminal title.
# DISABLE_AUTO_TITLE="true"
# Uncomment the following line to enable command auto-correction.
# ENABLE_CORRECTION="true"
# Uncomment the following line to display red dots whilst waiting for completion.
# COMPLETION_WAITING_DOTS="true"
# Uncomment the following line if you want to disable marking untracked files
# under VCS as dirty. This makes repository status check for large repositories
# much, much faster.
# DISABLE_UNTRACKED_FILES_DIRTY="true"
# Uncomment the following line if you want to change the command execution time
# stamp shown in the history command output.
# You can set one of the optional three formats:
# "mm/dd/yyyy"|"dd.mm.yyyy"|"yyyy-mm-dd"
# or set a custom format using the strftime function format specifications,
# see 'man strftime' for details.
# HIST_STAMPS="mm/dd/yyyy"
# Would you like to use another custom folder than $ZSH/custom?
# ZSH_CUSTOM=/path/to/new-custom-folder
# Which plugins would you like to load?
# Standard plugins can be found in ~/.oh-my-zsh/plugins/*
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
# Add wisely, as too many plugins slow down shell startup.
plugins=(git docker docker-compose zsh-syntax-highlighting history-substring-search)
source $ZSH/oh-my-zsh.sh
source /usr/share/zsh/plugins/zsh-vi-mode/zsh-vi-mode.plugin.zsh
# removing delay for entering insert mode
KEYTIMEOUT=1
# User configuration
# export MANPATH="/usr/local/man:$MANPATH"
# You may need to manually set your language environment
export LANG=en_AU.UTF-8
export EDITOR='vim'
# Preferred editor for local and remote sessions
# if [[ -n $SSH_CONNECTION ]]; then
# export EDITOR='vim'
# else
# export EDITOR='mvim'
# fi
# Compilation flags
# export ARCHFLAGS="-arch x86_64"
# Set personal aliases, overriding those provided by oh-my-zsh libs,
# plugins, and themes. Aliases can be placed here, though oh-my-zsh
# users are encouraged to define aliases within the ZSH_CUSTOM folder.
# For a full list of active aliases, run `alias`.
#
alias o="xdg-open"
alias fuck='sudo $(fc -ln -1)'
alias npmcode='code . && npm start'
alias i3config='vim ~/.config/i3/config'
alias testing='cd ~/Documents/temp/code && rm -r ~/Documents/temp/code/*'
alias uni='cd ~/Documents/University/Courses/Units'
alias haguichi='sudo systemctl start logmein-hamachi.service && haguichi'
alias dotfiles='/usr/bin/git --git-dir=$HOME/.dotfiles/ --work-tree=$HOME'
# Open new terminal in same location
alias nt='urxvt &'
alias sqa='cd ~/Dropbox/Uni/SQA-Project/workbook'
alias cdp='cd ~/Uni/Units/INFO2222/info2222_project'
alias cdu='cd ~/Dropbox/Uni/Units'
alias timetable='feh ~/Dropbox/Uni/timetable.png &'
alias vpn='sudo openconnect vpn.sydney.edu.au'
alias qpy='bash ~/qpy.sh'
alias py='ipython --TerminalInteractiveShell.editing_mode=vi'
alias c='calcurse -D ~/Dropbox/Uni/calendar/calcurse'
alias jrl='~/jrl.sh'
alias cam='vlc v4l2://:input-slave=alsa://:v4l-vdev="/dev/video0"'
alias plan='vim ~/Dropbox/Uni/plan.md'
alias cdq='cd ~/Dropbox/Uni/Quantum'
alias qn='vim ~/Dropbox/Uni/Quantum/global_notes.md'
alias pi='pip install --user'
alias todo='~/todo.sh'
export BROWSER=firefox
#export PATH=/opt/anaconda/bin:$PATH
export TERMINFO=/usr/share/terminfo
export APT_HOME=/Applications/APT_v2.8.3
export PATH=$APT_HOME:$PATH
export APT_ARCH=LINUX
# >>> conda initialize >>>
# !! Contents within this block are managed by 'conda init' !!
#
phys() {
__conda_setup="$('/opt/anaconda/bin/conda' 'shell.zsh' 'hook' 2> /dev/null)"
if [ $? -eq 0 ]; then
eval "$__conda_setup"
else
if [ -f "/opt/anaconda/etc/profile.d/conda.sh" ]; then
. "/opt/anaconda/etc/profile.d/conda.sh"
else
export PATH="/opt/anaconda/bin:$PATH"
fi
fi
unset __conda_setup
conda activate physics
}
qutip-dev() {
__conda_setup="$('/opt/anaconda/bin/conda' 'shell.zsh' 'hook' 2> /dev/null)"
if [ $? -eq 0 ]; then
eval "$__conda_setup"
else
if [ -f "/opt/anaconda/etc/profile.d/conda.sh" ]; then
. "/opt/anaconda/etc/profile.d/conda.sh"
else
export PATH="/opt/anaconda/bin:$PATH"
fi
fi
unset __conda_setup
conda activate qutip-dev
}
# <<< conda initialize <<<
export PATH="$HOME/.dynamic-colors/bin:$PATH"
source $HOME/.dynamic-colors/completions/dynamic-colors.zsh
alias theme='~/Scripts/theme.sh'
alias themes='dynamic-colors list'
dynamic-colors switch $(cat ~/.theme)
| true |
840dbde19dbb0be9c700d014766a92aeb55451fd | Shell | dzamoyoni/SheBash | /linuxPrac5/test3b.sh | UTF-8 | 348 | 3.328125 | 3 | [] | no_license | #!/bin/bash
# Removing a set trap
#
trap "echo 'Sorry... Ctrl-C is trapped.'" SIGINT
#
count=1
while [ $count -le 5 ]
do
echo "Loop #$count"
sleep 1
count=$[ $count + 1 ]
done
#
# Remove the trap
trap -- SIGINT
echo "I just removed the trap"
#
count=1
while [ $count -le 5 ]
do
echo "Second Loop #$count"
sleep 1
count=$[ $count + 1 ]
done
#
| true |
767027fab5d6b96d1d8f7df789dca4aa110ea1dd | Shell | wangjuanmt/kb_script | /arch/util/install_polipo.sh | UTF-8 | 828 | 3.09375 | 3 | [] | no_license | #!/usr/bin/env bash
# Convert Shadowsocks into an HTTP proxy (127.0.0.1:8123)
sudo pacman -S --noconfirm --needed polipo
# Run polipo as non-root user
# Create new config file from sample config (not required if not formatting home dir)
#cp /etc/polipo/config.sample $USER_HOME/.poliporc
# Create cache dir (not required if not formatting home dir)
#mkdir $USER_HOME/.polipo-cache
# Change cache folder to a writable location (not required if not formatting home dir)
#printf "\ndiskCacheRoot=\"~/.polipo-cache/\"" | tee -a $USER_HOME/.poliporc
# Set socksParentProxy (not required if not formatting home dir)
#printf "\nsocksParentProxy=127.0.0.1:1080" | tee -a $USER_HOME/.poliporc
#printf "\nsocksProxyType=socks5" | tee -a $USER_HOME/.poliporc
# Then we can start polipo as non-root user like:
#polipo -c ~/.poliporc
| true |
3fa6f8443f804fbcd140045cf4f2ac0867fb7587 | Shell | Krl1/SO2_lab | /lab_02_bash/zadanie_2.sh | UTF-8 | 651 | 3.484375 | 3 | [] | no_license | #! /bin/bash
# Zadanie 2
#Dane są katalog D (pierwszy parametr skryptu)
#oraz plik regularny P (drugi parametr skryptu).
#P zawiera (być może pustą) listę (zbiór wierszy).
#Należy w D utworzyć puste pliki regularne
#o nazwach zgodnych z tą listą.
#Jeżeli jakiś plik już istnieje,
#to nie powinien zostać zniszczony.
if [ $# -eq 0 ]
then
echo "Podano za malo parametrow"
exit 1
fi
if [ ! -d $1 ]
then
echo "Nie podano katalogu"
exit 1
fi
if [ ! -f $2 ]
then
echo "Nie podano pliku"
exit 1
fi
for item in `cat $2`
do
if [ ! -f $1/$item ]
then
touch $1/$item
fi
done
| true |
c7f692537eb353e186881329f7d7d62e7c73c184 | Shell | fossabot/admin-scripts | /bareos/maintain-catalog.sh | UTF-8 | 489 | 3.1875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# perform post-backup catalog maintenence
CATALOG="BareosCatalog"
STATS_DAYS=14
hash bconsole 2>/dev/null || { echo >&2 "You need to install bareos-bconsole. Aborting."; exit 1; }
bconsole <<END_OF_DATA 2>&1 >/dev/null
@output /dev/null
@output
use catalog=${CATALOG}
update stats days=${STATS_DAYS}
prune stats yes
.bvfs_update
quit
END_OF_DATA
if ! [ $? -eq 0 ]; then
echo >&2 "ERROR: Failed running post-backup maintenence for catalog ${CATALOG}."
exit 1
fi
exit 0
| true |
0e657192b7aa3f6a1eef438994ecfa157c3305d4 | Shell | Hogeyama/dotfiles | /roles/user_script/files/vifm-fzf | UTF-8 | 614 | 3.484375 | 3 | [] | no_license | #!/bin/bash
set -eu
set -o pipefail
FCMD=goto
DCMD=cd
while [ $# -gt 0 ]; do
case "$1" in
-c|--cmd)
FCMD="$2"
shift
;;
-d|--dcmd)
DCMD="$2"
shift
;;
*)
;;
esac
shift
done
FD_OPTS=(
--hidden
--no-ignore
--exclude .git
--exclude .hie
--exclude .direnv
--exclude dist-newstyle
--exclude .stack-work
)
FZF_OPTS=(
--preview "vifm-viewer {}"
--preview-window right:50%:noborder
)
TARGET=$(realpath $(fd "${FD_OPTS[@]}" | fzf "${FZF_OPTS[@]}"))
if [[ -d "${TARGET}" ]]; then
echo "${DCMD} '${TARGET}'"
else
echo "${FCMD} '${TARGET}'"
fi
| true |
b74f4e80b294e4f4182f6be40f0f2ef705efcb49 | Shell | cuchy/TTools | /xTotem/parser_runner.sh | UTF-8 | 197 | 2.8125 | 3 | [] | no_license | #!/usr/bin/env bash
mkdir -p parser_out
echo "Running parser..."
for mrt in `ls parser_data`; do
echo -n " parsing $mrt..."
./bgpdump -vm parser_data/$mrt > parser_out/$mrt.bgp
done
| true |
7080c12bd4a59572f1606d19735b859e72507a29 | Shell | redox-os/ipcd | /redoxer.sh | UTF-8 | 332 | 3.234375 | 3 | [
"MIT"
] | permissive | #!/bin/sh
set -e
old_path="file:/bin/ipcd"
new_path="target/x86_64-unknown-redox/debug/ipcd"
if [ -e "${new_path}" ]
then
mv -v "${new_path}" "${old_path}"
shutdown --reboot
fi
while [ "$#" != "0" ]
do
example="$1"
shift
echo "# ${example} #"
"target/x86_64-unknown-redox/debug/examples/${example}"
done
| true |
80648cdb6906e43b8ae925ed1a31b9796b93d5fa | Shell | WarpRat/NTI-320 | /snmp_install.sh | UTF-8 | 690 | 3.78125 | 4 | [] | no_license | #!/bin/bash
#
#A small script to check linux distro and install snmp
#
#Will be expanded to configure snmp for cacti monitoring in the future
#Find distro ID
distro=$(cat /etc/os-release | grep '^ID=' | cut -d '=' -f 2 | sed 's/"//g')
#Set distro package manager and package names based on ID
case $distro in
centos|rhel)
pkg_man=yum
pkg_name="net-snmp net-snmp-utils"
;;
fedora)
pkg_man=dnf
pkg_name="net-snmp net-snmp-utils"
;;
ubuntu)
pkg_man=apt-get
pkg_name="snmp snmp-mibs-downloader"
;;
*)
printf "Unrecognized version of Linux, sorry" >&2
exit 1
esac
#Install the packages
$pkg_man install -y $pkg_name
#Make sure it's running
systemctl restart snmpd
| true |
fb75db01e61e00df7a241bffb4a53237dc3f7527 | Shell | ONB-RD/dm2e-ontologies | /src/main/bash/dm2e-data.profile.sh | UTF-8 | 678 | 2.890625 | 3 | [] | no_license | #!/bin/bash
#-----------------------------------------------#
# Copy this file to your home directory and #
# comment out anything that you want to change #
#-----------------------------------------------#
## Set these to run from a different repository
# SPARQL_DIR=$(realpath "../resources/sparql-queries")
# DM2E_EDM_JAR=$(realpath "../../../target/dm2e-edm-jar-with-dependencies.jar")
# export EDM_VALIDATION_JAR=$(realpath "../../../../edm-validation/target/edm-validation-jar-with-dependencies.jar")
## BASE_DIR defines the base dir where data will be written to
## default is './dm2e2edm'
# BASE_DIR="/data/dm2e2edm-$(date "+%Y-%m-%d")"
# BASE_DIR=/tmp/dm2e2edm
| true |
a39b5b0a64af018821f045a04116419ee38cb9ef | Shell | ChrisPerkinsCap/ubuntu-rpi-3-set-up | /setup-static-ip/static_ip_setup_functions_print.sh | UTF-8 | 2,909 | 3.671875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
## PRINT VARIABLES ##
function print_dns_server_1() {
if [[ -z "${dns_server_1}" ]]; then
printf "\nDNS Server 1 is not set\n"
elif [[ -n "${dns_server_1}" ]]; then
printf "\nDNS Server 1: ${dns_server_1}\n"
fi
}
function print_dns_server_2() {
if [[ -z "${dns_server_2}" ]]; then
printf "\nDNS Server 2 not set\n"
elif [[ -n "${dns_server_2}" ]]; then
printf "\nDNS Server 2: ${dns_server_2}\n"
fi
}
function print_dns_server_list() {
if [[ -z "${dns_server_list}" ]]; then
printf "\nDNS Server list not set\n"
elif [[ -n "${dns_server_list}" ]]; then
printf "\nDNS Server list: ${dns_server_list}\n"
fi
}
function print_gateway_interface() {
if [[ -z "${gateway_interface}" ]]; then
printf "\nThe Gateway Interface is not set\n"
elif [[ -n "${gateway_interface}" ]]; then
printf "\nNetwork Gateway Interface: ${gateway_interface}\n"
fi
}
function print_ip_address() {
if [[ -z "${ip_address}" ]]; then
printf "\nIP Address not set\n"
elif [[ -n "${ip_address}" ]]; then
printf "\nIP Address: ${ip_address}\n"
fi
}
function print_netmask() {
if [[ -z "${netmask}" ]]; then
printf "\nNetmask not set\n"
elif [[ -n "${netmask}" ]]; then
printf "\nNetmask: ${netmask}\n"
fi
}
function print_net_adapter() {
if [[ -z "${network_adapter}" ]]; then
printf "\nThe Network Adapter is not set\n"
elif [[ -n "${network_adapter}" ]]; then
printf "\nNetwork Adapter: ${network_adapter}\n"
fi
}
function print_net_conf_file() {
if [[ -z "${network_conf_file}" ]]; then
printf "\nThe Network Configuration is not set\n"
elif [[ -n "${network_conf_file}" ]]; then
printf "\nNetwork Configuration File: ${network_conf_file}\n"
fi
}
function print_net_conf_path() {
if [[ -z "${network_conf_path}" ]]; then
printf "\nThe Network Configuration is not set\n"
elif [[ -n "${network_conf_path}" ]]; then
printf "\nNetwork Configuration Path: ${network_conf_path}\n"
fi
}
function print_net_manager() {
if [[ -z "${network_manager}" ]]; then
printf "\nThe Network Manager is not set\n"
elif [[ -n "${network_manager}" ]]; then
printf "\nNetwork Manager: ${network_manager}\n"
fi
}
function print_all_vars() {
echo " VARIABLES CURRENT VALUES"
echo "|--------------------------------------------|"
echo "|--------------------------------------------|"
sleep 1
print_dns_server_1
sleep 1
print_dns_server_2
sleep 1
print_dns_server_list
sleep 1
print_gateway_interface
sleep 1
print_ip_address
sleep 1
print_netmask
sleep 1
print_net_adapter
sleep 1
print_net_conf_file
sleep 1
print_net_conf_path
sleep 1
print_net_manager
sleep 1
echo " END VARIABLES CURRENT VALUES"
echo "|--------------------------------------------|"
echo "|--------------------------------------------|"
} | true |
dd8b4e644fa1305d987f95afa1422baee1b206bb | Shell | jorrite/now-dyn-dns | /main.sh | UTF-8 | 1,136 | 3.171875 | 3 | [] | no_license | #!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
. $1
IP=$(dig +short myip.opendns.com @resolver1.opendns.com)
RESULT=$(curl -s "https://api.zeit.co/v2/domains/$DOMAIN/records" \
-H "Authorization: Bearer $TOKEN" | \
jq -r --arg IP "$IP" \
--arg NAME "$SUBDOMAIN" \
'.[][] | select(.name==$NAME and .value==$IP) | .id')
if [ -z "$RESULT" ]
then
#check if there is another record and delete it later on...
ID=$(curl -s "https://api.zeit.co/v2/domains/$DOMAIN/records" \
-H "Authorization: Bearer $TOKEN" | \
jq -r --arg IP "$IP" \
--arg NAME "$SUBDOMAIN" \
'.[][] | select(.name==$NAME) | .id')
curl -X POST "https://api.zeit.co/v2/domains/$DOMAIN/records" \
-H "Authorization: Bearer $TOKEN" \
-H "Content-Type: application/json" \
-d '{
"name": "'$SUBDOMAIN'",
"type": "A",
"value": "'$IP'"
}'
#deleting earlier found ID, if any
if [[ ! -z "$ID" ]]
then
curl -X DELETE "https://api.zeit.co/v2/domains/$DOMAIN/records/$ID" \
-H "Authorization: Bearer $TOKEN"
fi
fi | true |
73fe258e86da27f6c8f647f2e5a5e0a951e18d01 | Shell | Cj-bc/libfile | /libfile.sh | UTF-8 | 416 | 3.578125 | 4 | [] | no_license | #!/usr/bin/env bash
#
# libfile -- file methods
#
# Copyright 2018 (c) Cj-bc
# This software is released under MIT License
#
# @(#) v1.0
#
# parse file line by line, put them into one array
# @param <string file_path> <array reference_for_result>
File::ParseToArray() {
local file=$1
local -n ret=$2
set -f
local line
while IFS= read -r line; do
ret=("${ret[@]}" "$line")
done < "$file"
set +f
}
| true |
beaa8e1742dd58d84046298395f9328e14b671a5 | Shell | LinuxStory/linuxstory.dockernize | /wordpress/entrypoint.sh | UTF-8 | 396 | 2.765625 | 3 | [] | no_license | #!/bin/bash
# entrypoint.sh --server 127.0.0.1 --site-root /var/www
if [ "4" = "$#" ]; then
cp /config/nginx.conf /etc/nginx/nginx.conf
m4 -DWP_SERVER_NAME="$2" -DWP_SITE_ROOT="$4" /config/wp-nginx.conf > /etc/nginx/sites-available/default
fi
service php7.4-fpm start
mkdir -p /fastcgi-cache
chown www-data /fastcgi-cache
chmod 755 /fastcgi-cache
service nginx start
exec /bin/bash
| true |
fdf7f7cfe4a1f0ea4d1aa036952c20828a375c67 | Shell | kubadawczynski/bitbucket_clonner | /getAllRepos.sh | UTF-8 | 825 | 3.609375 | 4 | [] | no_license | #!/bin/bash
#Script to get all repositories under a user from remote bitbucket to private bitbucket server
LOGIN_CLOUD=$1
SERVER_TO=$2
PROJECT=$3
createRepo(){
curl -u admin_user:admin_pass -H "Content-Type: application/json" -X POST http://$SERVER_TO:7990/rest/api/1.0/projects/$PROJECT/repos -d '{ "name":"'$1'","scmId":"git","forkable":true }'
}
cloneRepo(){
for repo_name in `curl -u ${1} https://api.bitbucket.org/1.0/users/${1} | awk -F':|}|,' '{ for (x=1;x<=NF;x++) if ($x~"\"slug\"") print $(x+1) }'| sed 's/"//g' `
do
echo "$repo_name"
git clone --mirror ssh://git@bitbucket.org/${1}/$repo_name &>/dev/null
cd ''$repo_name'.git'
pwd
createRepo "$repo_name"
git push --mirror 'ssh://git@'$SERVER_TO':7999/'$PROJECT'/'$repo_name'.git'
cd ..
rm -rf ''$repo_name'.git'
done
}
cloneRepo $LOGIN_CLOUD
| true |
e3f305364f4d10ef3ddfc349dc8c0f8447d5826f | Shell | swatgoss/UrukDroid-1.6 | /UrukDroid_1.6/usr/bin/tiwlan_check.sh | UTF-8 | 262 | 2.78125 | 3 | [] | no_license | #!/bin/sh
# tiwlan_check.sh : 25/01/10
# g.revaillot, revaillot@archos.com
TIWLAN_TEMPLATE="/etc/tiwlan.ini"
TIWLAN_USER="/data/misc/wifi/tiwlan.ini"
if [ ! -f $TIWLAN_USER ] ; then
cp $TIWLAN_TEMPLATE $TIWLAN_USER
fi
chmod a+rx /data/misc/wifi/
chmod 666 $TIWLAN_USER
| true |
5a32c0684f1ea1e77d2b2d54c53616334fbed99f | Shell | cuishao23/github2021 | /nms_starter/package.sh | UTF-8 | 2,331 | 2.765625 | 3 | [] | no_license | #!/bin/bash
export LC_ALL="en_US.UTF-8"
mkdir -p .files_temp/package
PACKAGE_PATH=$(pwd)/.files_temp/package
RELEASE_PATH=$(pwd)/../../../10-common/version/release/linux/nms
PYTHON_DEPOT_PATH=$(pwd)/../nms_python
ls | grep -v ".files_temp" | grep -v "package.sh" | xargs -i \cp -a {} .files_temp/package
# python3 模块打包
pythonpath=${PACKAGE_PATH}/starter_python3
export PYTHONPATH=${pythonpath}/lib/python3.5/site-packages
PIP=/opt/midware/python3/bin/pip3
PIP_OPTION="--prefix=${pythonpath} --find-links=${PYTHON_DEPOT_PATH}"
\cp -a ${PYTHON_DEPOT_PATH}/* .files_temp/
${PIP} install .files_temp/redis-3.3.7-py2.py3-none-any.whl ${PIP_OPTION}
${PIP} install .files_temp/docutils-0.15.2-py3-none-any.whl ${PIP_OPTION}
${PIP} install .files_temp/setuptools-42.0.2-py2.py3-none-any.whl ${PIP_OPTION}
${PIP} install .files_temp/lockfile-0.12.2-py2.py3-none-any.whl ${PIP_OPTION}
${PIP} install .files_temp/python_daemon-2.2.4-py2.py3-none-any.whl ${PIP_OPTION}
${PIP} install .files_temp/pika-1.1.0-py2.py3-none-any.whl ${PIP_OPTION}
${PIP} install .files_temp/setproctitle-1.1.10.tar.gz ${PIP_OPTION}
${PIP} install .files_temp/SQLAlchemy-1.3.8.tar.gz ${PIP_OPTION}
${PIP} install .files_temp/PyMySQL-0.9.3-py2.py3-none-any.whl ${PIP_OPTION}
${PIP} install .files_temp/ ${PIP_OPTION}
${PIP} install .files_temp/urllib3-1.25.7-py2.py3-none-any.whl ${PIP_OPTION}
${PIP} install .files_temp/certifi-2019.11.28-py2.py3-none-any.whl ${PIP_OPTION}
${PIP} install .files_temp/chardet-3.0.4-py2.py3-none-any.whl ${PIP_OPTION}
${PIP} install .files_temp/idna-2.8-py2.py3-none-any.whl ${PIP_OPTION}
${PIP} install .files_temp/requests-2.22.0-py2.py3-none-any.whl ${PIP_OPTION}
${PIP} install .files_temp/xlrd-1.2.0-py2.py3-none-any.whl ${PIP_OPTION}
${PIP} install .files_temp/decorator-4.4.2-py2.py3-none-any.whl ${PIP_OPTION}
${PIP} install .files_temp/py-1.8.1-py2.py3-none-any.whl ${PIP_OPTION}
${PIP} install .files_temp/retry-0.9.2-py2.py3-none-any.whl ${PIP_OPTION}
${PIP} install .files_temp/APScheduler-3.6.3-py2.py3-none-any.whl ${PIP_OPTION}
# 拷贝飞腾依赖
\cp -a ${PYTHON_DEPOT_PATH}/ft/psycopg2 ${pythonpath}/lib/python3.5/site-packages/
# make starter.bin
cd .files_temp
makeself package/ ./nms_starter.bin "Installing nms starter..." ./install.sh
mv ./nms_starter.bin ${RELEASE_PATH}/
cd -
rm -rf .files_temp | true |
4132617471303190cee2edf69e3c4012ad880ba5 | Shell | petronny/aur3-mirror | /i386-haiku-binutils-git/PKGBUILD | UTF-8 | 945 | 3.40625 | 3 | [] | no_license | pkgname=i386-haiku-binutils-git
pkgver=20120309
pkgrel=1
pkgdesc="A set of programs to assemble and manipulate binary and object files (Haiku, git repo)"
arch=('i686' 'x86_64')
url="http://haiku-os.org/development"
license=('GPL')
depends=('glibc')
makedepends=(git)
_gitroot=git://git.haiku-os.org/buildtools
_gitname=buildtools
build() {
cd "$srcdir"
msg "Connecting to GIT server...."
if [[ -d "$_gitname" ]]
then
cd "$_gitname" && git pull origin
msg "The local files are updated."
else
git clone "$_gitroot" "$_gitname"
fi
msg "GIT checkout done or server timeout"
msg "Starting build..."
rm -rf "$srcdir/$_gitname-build"
cp -r "$srcdir/$_gitname" "$srcdir/$_gitname-build"
cd "$srcdir/$_gitname-build/binutils"
./configure --prefix=/usr --target=i386-haiku --host=$CHOST --build=$CHOST --disable-nls
make
make DESTDIR=$pkgdir install
rm -rf $pkgdir/usr/lib
rm -rf $pkgdir/usr/share/{info,man}
} | true |
56a8019b0953c5327c7caf84eac0203d928b0efd | Shell | petronny/aur3-mirror | /live-helper/PKGBUILD | UTF-8 | 698 | 3.34375 | 3 | [] | no_license | # Contributor: Mark Pustjens <pustjens@dds.nl>
pkgname=live-helper
pkgver=20100503
pkgrel=1
pkgdesc="Tool to create debian-based live cd/dvd/usb images."
arch=('i686' 'x86_64')
url="http://live.debian.net/"
license=('GPL')
depends=('debootstrap' 'bash')
source=()
_gitroot="git://live.debian.net/git/live-helper.git"
_gitname="live-helper"
build() {
cd ${srcdir}
msg "Connecting to GIT server...."
if [ -d $_gitname ] ; then
cd $_gitname && git pull origin
msg "The local files are updated."
else
git clone $_gitroot $_gitname
fi
msg "GIT checkout done or server timeout"
msg "Starting make..."
#
# BUILD HERE
#
cd ${srcdir}/${_gitname}
make install DESTDIR=${pkgdir}
}
| true |
30b04b3846ff4977e6679b2fef1a4a7ae463e165 | Shell | uacode/improv-web | /.travis/unittest.sh | UTF-8 | 688 | 2.75 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
set -e
cd src
touch .env
echo "Installing Composer dependencies..."
composer install --no-interaction
# Setup coverage reporter
curl -L https://codeclimate.com/downloads/test-reporter/test-reporter-latest-linux-amd64 > ./cc-test-reporter
chmod +x ./cc-test-reporter
./cc-test-reporter before-build
# Travis, by default, has older version of NPM
# Upgrade it to a minimum acceptable version for us
npm i -g npm@6.7.0
npm ci
# Static assets are needed for Laravel Mix \ unittests that test for Blade
echo "Generating static assets"
npm run development
echo "Running unit tests..."
php vendor/bin/phpunit
./cc-test-reporter after-build -t clover --exit-code $?
| true |
5b63726d419debcb27fb0d38e4951ce594c3f73a | Shell | architus/architus | /rabbitmq/init.sh | UTF-8 | 1,717 | 4.625 | 5 | [
"MIT"
] | permissive | #!/bin/sh
log() {
# Logs a message using an easily grep-able format
# Usage:
# log [message]
echo >&2 "*** init.sh: ${1-} ***"
}
create_user() {
# Continuously creates Rabbitmq user until it succeeds
# Usage:
# create_user
initial_delay="5"
backoff="5"
sleep "$initial_delay"
log "Starting to configure RabbitMQ server."
until try_create_user "$RABBITMQ_USER" "$RABBITMQ_PASSWORD"
do
log "Creating user failed; waiting $backoff seconds to try again."
sleep 5
done
log "User '$RABBITMQ_USER' with password '$RABBITMQ_PASSWORD' completed."
log "Log in the WebUI at port 15672 (example: http:/localhost:15672)"
}
try_create_user() {
# Tries to create the RabbitMQ user once.
# If successful, returns a zero exit code.
# Otherwise, it should be retried.
# Usage:
# try_create_user [user] [password]
user="$1"
password="$2"
# Use `&&\` at the end of each command
# to make the function's return code non-zero if any of them fail,
# and also for them to short-circuit
# (i.e. if an earlier one fails,
# the function returns early without execuitng the later ones)
rabbitmqctl await_startup >/dev/null 2>&1 &&\
rabbitmqctl add_user "$user" "$password" 2>/dev/null &&\
rabbitmqctl set_user_tags "$user" administrator &&\
rabbitmqctl set_permissions -p / "$password" ".*" ".*" ".*"
}
# $@ is used to pass arguments to the rabbitmq-server command.
# For example if you use it like this: docker run -d rabbitmq arg1 arg2,
# it will be as you run in the container rabbitmq-server arg1 arg2
log "Starting RabbitMQ server and waiting to configure."
(create_user & rabbitmq-server "$@")
| true |
8f365a4e9b4a9031804b8c0def05c2d8c71a7394 | Shell | necla-ml/gen-dnn | /regr.sh | UTF-8 | 9,367 | 3.578125 | 4 | [
"Apache-2.0",
"BSD-3-Clause",
"Intel"
] | permissive | #!/bin/bash
# Usage:
# regr.sh {FWD*|BWD_D|BWD_W*} ...other bench.sh args
#
#if [ $NLC_BASE ]; then # NEC SX Aurora VE
# VE_EXEC=ve_exec
#fi
BUILDDIR=build
if [ "`uname -m`" = "SX-ACE" ]; then BUILDDIR=build-sx; fi
echo "BUILDDIR: $BUILDDIR"
VE_EXEC=''
# if compiled and VE_EXEC existed, use that path
if [ -f "${BUILDDIR}/bash_help.inc" ]; then
# snarf some CMAKE variables
source "${BUILDDIR}/bash_help.inc"
#echo " From bash_help_inc, CMAKE_CROSSCOMPILING_EMULATOR is ${CMAKE_CROSSCOMPILING_EMULATOR}"
#echo " NECVE is ${NECVE}"
#echo " VE_EXEC is ${VE_EXEC}"
if [ "${VE_EXEC}" ]; then
if [ "${CMAKE_CROSSCOMPILING_EMULATOR}" ]; then
VE_EXEC="${CMAKE_CROSSCOMPILING_EMULATOR}"
if [ ! -x "${VE_EXEC}" ]; then
VE_EXEC='';
echo "cmake crosscompiling emulator ${CMAKE_CROSSCOMPILING_EMULATOR} [such as ve_exec] not available"
fi
fi
if [ "${VE_EXEC}" = "" -a "${NECVE}" ]; then
# otherwise check for VE_EXEC in PATH on this machine
for try in ve_exec /opt/nec/ve/bin/ve_exec; do
if { $try --version 2> /dev/null; } then
VE_EXEC="$try";
break;
fi
done
fi
if [ ! "${VE_EXEC}" ]; then VE_EXEC="echo Not-Running"; fi
fi
# In this script, we do need ve_exec, because we are running test
#executables directly, rather than via a 'make' target
echo "VE_EXEC : ${VE_EXEC}"
fi
ORIGINAL_CMD="$0 $*"
usage() {
echo " command: $ORIGINAL_CMD"
echo "$0 usage:"
echo " regr.sh [threads] {[<benchdnn_arg>|<batch_file>|FWD*|BWD_D|BWD_W*|ALEX|MINI] ...}"
head -n140 "$0" | grep "^[^#]*.)\ #"
awk '/getopts/{flag=1;next} /done/{flag=0} flag&&/^[^#]+\) #/; flag&&/^ *# /' $0
echo " Substitutions:"
echo " FWD|BWD_D|BWD_W* --> test_fwd_regrssion, ..."
echo " ALL --> shortish regression tests (all dirns)"
echo " ALEX --> a series of alexnet tests (all dirns)"
echo " <file> found? --> copy to build dir and use --batch=<file>"
echo " Examples: ./regr.sh 6 FWD # FWD 6 threads to FWD-t6-<host>.log"
echo " ./regr.sh FWD # FWD, default threads to FWD-<host>.log"
echo " ./regr.sh 6 --dir=BWD_WB conv_alexnet # 6 thread BWD_WB alexnet"
echo " ./regr.sh 6 --dir=BWD_W conv_regression_group # fast bug-hunting"
#./bench.sh -h
#exit 0
}
THREADS="$1"
re_nonneg='^[0-9]+$' # reg expr for zero or positive int
if ! [[ "$THREADS" =~ $re_nonneg ]]; then # non-digits? no threads argument
THREADS=""
else
shift # gobble the threads arg
fi
# Hack : NECVE does not support omp yet
# NEW [2108] NECVE support openmp ... (supposedly)
#if [ "${NECVE}" ]; then
# THREADS=1
#fi
if [ "$#" -lt 1 ]; then usage; exit; fi
ARGS=($*)
BASE=''
COPY="cp -uav"
if [ "`uname -m`" = "SX-ACE" ]; then COPY="cp -a"; fi
echo "COPY : $COPY"
batch_check() {
BATCH="$1"
# if batch is a file, copy to build dir and prepend --batch=
for DIR in ./tests/benchdnn ./tests/benchdnn/inputs .; do
#echo "file? ${DIR}/${BATCH}"
if [ -f "${DIR}/${BATCH}" ]; then
${COPY} "${DIR}/${BATCH}" "./${BUILDDIR}/tests/benchdnn/inputs/${BATCH}"
BATCH="--batch=inputs/${1}"
return
fi
done
echo "Not-a-file: ${BATCH}"
}
nopt=0
SXskip=''
if [ `uname -m` == "SX-ACE" ]; then SXskip='--skip-impl=sx2'; fi
for ((i=0; i<${#ARGS[*]}; ++i)); do
#echo "$i : ${ARGS[i]}"
xform="${ARGS[i]}"
chkfile=0
case $xform in
FWD|FWD_B|FWD_D) # mostly FWD_B and some with --merge=RELU
BASE="FWD"; xform="test_fwd_regression"; chkfile=1; ((++nopt))
;;
BWD_D) # backward data
BASE="BWD_D"; xform="test_bwd_d_regression"; chkfile=1; ((++nopt))
;;
BWD_W*) # backward weights (runs BWD_WB tests)
BASE="BWD_W"; xform="test_bwd_w_regression"; chkfile=1; ((++nopt))
;;
ALL) # test_conv_regression mix
BASE="${xform}"; xform="test_conv_regression"; chkfile=1; ((++nopt))
;;
ALEX|ALEXNET) # alexnet (mix)
BASE="ALEX";
xform="--dir=FWD_B --batch=inputs/conv_alexnet --dir=BWD_D --batch=inputs/conv_alexnet --dir=BWD_WB --batch=inputs/conv_alexnet"
((++nopt))
;;
A1) # alexnet conv1, mb=8
BASE="A1";
xform='g1mb8ic3ih227oc96oh55kh11sh4ph0n"mini:conv1"';
if [ "`uname -m`" = "SX-ACE" ]; then
xform='g1mb8ic3ih60oc32oh25kh11sh4ph0n"minisx:conv1"';
fi;
((++nopt))
;;
A3) # alexnet conv3, mb=8
BASE="A3";
xform='g1mb8ic256ih13oc384oh13kh3ph1n"mini:conv3"'
if [ "`uname -m`" = "SX-ACE" ]; then
xform='g1mb8ic32ih13oc48oh13kh3ph1n"minisx:conv3"'
fi;
((++nopt))
;;
minifwd*) # fast fwd minialex
BASE="minifwd"
xform="--dir=FWD_B ${SXskip} --batch=inputs/minialex"
((++nopt))
;;
minibwd_d*) # fast minialex
BASE="minibwd_d"
xform="--dir=BWD_D ${SXskip} --batch=inputs/minialex"
((++nopt))
;;
minibwd_w*) # fast minialex
BASE="minibwd_w"
xform="--dir=BWD_WB ${SXskip} --batch=inputs/minialex"
((++nopt))
;;
MINI) # low-minibatch alexnet
BASE="MINIALEX";
xform="--dir=FWD_B --batch=inputs/minialex --dir=BWD_D --batch=inputs/minialex --dir=BWD_WB --batch=inputs/minialex"
((++nopt))
;;
h|-h|--help) # help
usage
exit
;;
n*|-n*) # base name for logfile; ex. -nFOO (no space)
s="${xform#-}"; s="${s#n}"
if [ "$s" != "" ]; then BASE="$s"; xform=''; fi
;;
*) # other? arbitrary benchdnn args or test files ...
chkfile=1
((++nopt))
;;
esac
#echo "xform1 = ${xform}"
if [ $chkfile -ne 0 ]; then # search for batch file args, copying into build dir
batch_check "${xform}"
xform="${BATCH}"
fi
#echo "xform2 = ${xform}"
ARGS[$i]="${xform}"
done
if [ "${BASE}" == "" ]; then BASE='x'; fi
echo "THREADS': ${THREADS}"
echo "BASE : ${BASE}"
echo "ARGS : ${ARGS[@]}"
echo "nopt : $nopt"
#for ((i=0; i<${#ARGS[*]}; ++i)); do
# echo "$i : ${ARGS[i]}"
#done
# Alexnet: better to run as
# (cd "${BUILDDIR}" && make && cd tests/benchdnn && OMP_NUM_THREADS=6 ./benchdnn --mode=PT --dir=FWD_B --batch=inputs/conv_alexnet) 2>&1 | tee alex-fwd.log
# etc. to control which direction
# zero in on a particular failure (by hand):
# (cd "${BUILDDIR}" && make && cd tests/benchdnn && OMP_NUM_THREADS=6 ./benchdnn --mode=PT --dir=BWD_W --batch=inputs/conv_regression_group) >& x.log && { echo 'OK'; tail -n40 x.log | awk '/final stats/{f=1} /kbytes/{f=0} f==1{print $0;}'; } || { echo 'FAIL'; tail -n80 x.log; echo 'See x.log'; }
#
#(cd "${BUILDDIR}" && make && cd tests/benchdnn && /usr/bin/time -v ./benchdnn --mode=PT --batch=inputs/test_fwd_regression) 2>&1 | tee PT.log
HOSTNAME=`hostname -s` # SX-ACCE does not support --short
if [ "`uname -m`" = "SX-ACE" ]; then HOSTNAME='sx'; fi # I could not put spaces here, for SX-ACE (so no awk/sed)
if [ "${NECVE}" ]; then HOSTNAME='ve'; fi
LOGFILE="${BASE}-${HOSTNAME}.log"
if [ ! "$THREADS" = "" ]; then LOGFILE="${BASE}-t${THREADS}-${HOSTNAME}.log"; fi
echo "LOGFILE : $LOGFILE"
COLUMN="column -t"
if [ "`uname -m`" = "SX-ACE" ]; then COLUMN='cat'; fi # I could not put spaces here, for SX-ACE (so no awk/sed)
echo "COLUMN : $COLUMN"
# following does not exist on some systems
#TIME="/usr/bin/time -v"
#TIME="time"
TIME=""
if [ "`uname -m`" = "SX-ACE" ]; then TIME="time"; fi # just use the bash builtin
set +x
(
{
cd "$BUILDDIR" && { if [ ! "`uname -m`" = "SX-ACE" ]; then make; fi; } \
&& $COPY -r ../tests/benchdnn/inputs/* ./tests/benchdnn/inputs/ && \
{
if [ "$THREADS" = "" ]; then unset OMP_NUM_THREADS;
else THREADS="OMP_NUM_THREADS=$THREADS"; fi
echo "THREADS : $THREADS"
echo "cmd : $THREADS C_PROGINF=DETAIL ${TIME} $VE_EXEC ./benchdnn --mode=PT ${ARGS[@]}"
cd tests/benchdnn;
pwd
ls -l .
#ls -l inputs;
echo " `pwd` inputs:"
set +x
echo "COLUMN ... $COLUMN"
(cd inputs && ls -1) | awk '//{p=p " " $0;++n} n>=4{print p; p=""; n=0} END{print p}' | ${COLUMN}
echo "eval $THREADS C_PROGINF=DETAIL ${TIME} $VE_EXEC ./benchdnn --mode=PT ${ARGS[@]}"
eval $THREADS C_PROGINF=DETAIL ${TIME} $VE_EXEC ./benchdnn --mode=PT ${ARGS[@]}
}
} || { echo "Problems?"; false; }
) >& "$LOGFILE" \
&& { echo 'regr.sh OK'; tail -60 $LOGFILE | awk '/final stats/{f=1} /kbytes/ || /Sys Time/{f=0} f==1{print $0; next} /MFLOPS/ || /Concurr/ || /Ratio/'; } \
|| { echo "regr.sh FAIL"; tail -60 $LOGFILE | awk 'BEGIN{f=1} /kbytes/{f=0} f==1{print $0}'; echo "See LOGFILE = $LOGFILE"; exit 1; }
# Note: SX-ACE does not support tail -n40
# vim: set ts=4 sw=4 et :
| true |
e4530cacebb7c2e7f1ffb6065c5255f750fc59f4 | Shell | llnek/kirby | /bin/build | UTF-8 | 724 | 3.703125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/sh
COMPILER="bin/boot.js"
TESTER="bin/test.js"
DISTDIR=dist
OUTDIR=out
#make and clean out dir
if [ ! -d $OUTDIR ]; then
if [ -e $OUTDIR ]; then
rm $OUTDIR
fi
mkdir $OUTDIR
else
rm $OUTDIR/*.js
fi
#compile files
for n in stdlib parser engine compiler main
do
${COMPILER} src/czlab/kirby/${n}.ky $OUTDIR/${n}.js
done
#compile test files
${TESTER} test/test.ky
#run test cases
TESTOUT=/tmp/kirby-test.out
PASSED=0
node test/test.js > $TESTOUT
ERRCOUNT=`grep FAILED $TESTOUT | wc -c`
if [ "0" != $ERRCOUNT ]; then
cat $TESTOUT
exit 1
else
PASSED=1
fi
#move files to dist
if [ "1" = $PASSED ]; then
echo "Copying files for distribution!"
rm $DISTDIR/*.js
cp $OUTDIR/*.js $DISTDIR
fi
| true |
1bd22d1108d5c98a894b4521d73efd05dea35f2b | Shell | 13122310958/jitsi-videobridge | /resources/collect-dump-logs.sh | UTF-8 | 688 | 3.578125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# script that creates an archive in current folder
# containing the heap and thread dump and the current log file
PID=$(cat /var/run/jitsi-videobridge.pid)
if [ $PID ]; then
PROC_PID=$(pgrep -P $PID)
echo "Jvb at pid $PROC_PID"
STAMP=`date +%Y-%m-%d-%H%M`
THREADS_FILE="/tmp/stack-${STAMP}-${PROC_PID}.threads"
HEAP_FILE="/tmp/heap-${STAMP}-${PROC_PID}.bin"
sudo -u jvb jstack ${PROC_PID} > ${THREADS_FILE}
sudo -u jvb jmap -dump:live,format=b,file=${HEAP_FILE} ${PROC_PID}
tar zcvf jvb-dumps-${STAMP}-${PROC_PID}.tgz ${THREADS_FILE} ${HEAP_FILE} /var/log/jitsi/jvb.log
rm ${HEAP_FILE} ${THREADS_FILE}
else
echo "JVB not running."
fi
| true |
0433e7ebf1c1722d55aabc2074b51b647eb3248d | Shell | plus3it/spel | /spel/scripts/virtualbox.sh | UTF-8 | 1,034 | 3.546875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Bail if we are not running inside VirtualBox.
if [[ "$(virt-what | head -1)" != "virtualbox" ]]; then
exit 0
fi
# Install deps
echo "installing virtualbox guest addition dependencies"
VBOX_GUEST_DEPS=(kernel-devel kernel-headers gcc perl)
test "$(rpm --quiet -q bzip2)$?" -eq 0 || VBOX_GUEST_DEPS+=(bzip2)
bash /tmp/retry.sh 5 yum -y install "${VBOX_GUEST_DEPS[@]}"
bash /tmp/retry.sh 5 yum -y install dkms make
KERN_DIR=/lib/modules/$(uname -r)/build
export KERN_DIR
# Install VirtualBox Guest Additions
echo "installing virtualbox guest additions"
mkdir -p /mnt/virtualbox
mount -o loop /home/vagrant/VBoxGuest*.iso /mnt/virtualbox
sh /mnt/virtualbox/VBoxLinuxAdditions.run || (cat /var/log/vboxadd-setup.log && exit 1)
ln -sf /opt/VBoxGuestAdditions-*/lib/VBoxGuestAdditions /usr/lib/VBoxGuestAdditions
umount /mnt/virtualbox
rm -rf /home/vagrant/VBoxGuest*.iso
# Remove deps
echo "removing virtualbox guest addition dependencies"
yum -y remove --setopt=clean_requirements_on_remove=1 "${VBOX_GUEST_DEPS[@]}"
| true |
a1a6fdc18fcfc238049d522027774bdb94cd0402 | Shell | buren/git-story | /src/git/git-unpushed | UTF-8 | 280 | 3.3125 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
source $GS_INIT_PATH
__git-story-init
USAGE="<args>
Show the diff of everything you haven't pushed yet."
main() {
__gs-check-usage-param $1
main-exec "$@"
}
main-exec() {
branch=$(git rev-parse --abbrev-ref HEAD)
git diff origin/$branch..HEAD
}
main "$@"
| true |
9e6164fcf550d8a3b3bb2d7d288c9514646262fb | Shell | munchi-selva/daikon | /preprocessing/scripts/get_source_files.sh | UTF-8 | 855 | 3.578125 | 4 | [] | no_license | #!/bin/bash
#
# Retrieves the scripts and data required as a starting point for working with
# daikon for MT FS 2019 exercise 5
#
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
source ${SCRIPT_DIR}/get_dirs.sh
if [ ! -d $MOSES_REP_DIR ]
then
#
# Clone Mathias Müller's mosesdecoder fork
#
git clone https://github.com/bricksdont/moses-scripts $MOSES_REP_DIR
fi
if [ ! -d $DATA_DIR ]
then
mkdir -p $DATA_DIR
fi
#
# Download corpus files
#
pushd $DATA_DIR
corp_source=https://files.ifi.uzh.ch/cl/archiv/2019/mt19
for corpus in train dev
do
for lang in de en
do
corp_file=$corpus.$lang
if [ ! -f $corp_file ]
then
wget $corp_source/$corp_file
fi
done
done
test_file=test.de
if [ ! -f $test_file ]
then
wget $corp_source/$test_file
fi
popd
| true |
2f65e5f17c705c818fe658485f6bd165c7530a39 | Shell | ymmxl/a-bird | /centos6_squid_proxy_script | UTF-8 | 1,702 | 2.984375 | 3 | [
"MIT"
] | permissive | #!/bin/sh
PROXY_USER=user
PROXY_PASS=password
PROXY_PORT=3128
# Clear the repository index caches
yum clean all
# Update the operating system
yum update -y
# Install httpd-tools to get htpasswd
yum install httpd-tools -y
# Install squid
yum install squid -y
# Create the htpasswd file
htpasswd -c -b /etc/squid/passwords $PROXY_USER $PROXY_PASS
# Backup the original squid config
cp /etc/squid/squid.conf /etc/squid/squid.conf.bak
# Set up the squid config
cat << EOF > /etc/squid/squid.conf
auth_param basic program /usr/lib64/squid/ncsa_auth /etc/squid/passwords
auth_param basic realm proxy
acl authenticated proxy_auth REQUIRED
http_access allow authenticated
forwarded_for delete
http_port 0.0.0.0:$PROXY_PORT
EOF
# Set squid to start on boot
chkconfig squid on
# Start squid
/etc/init.d/squid start
# Set up the iptables config
cat << EOF > /etc/sysconfig/iptables
*filter
:INPUT ACCEPT [0:0]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [0:0]
-A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
-A INPUT -p icmp -j ACCEPT
-A INPUT -i lo -j ACCEPT
#######################################################
# BEGIN CUSTOM RULES
#######################################################
# Allow SSH from anywhere
-A INPUT -m state --state NEW -m tcp -p tcp --dport 22 -j ACCEPT
# Allow squid access from anywhere
-A INPUT -m state --state NEW -m tcp -p tcp --dport $PROXY_PORT -j ACCEPT
#######################################################
# END CUSTOM RULES
#######################################################
-A INPUT -j REJECT --reject-with icmp-host-prohibited
-A FORWARD -j REJECT --reject-with icmp-host-prohibited
COMMIT
EOF
# Restart iptables
/etc/init.d/iptables restart
| true |
749b097c5129bb34ca02a6743bb798d034985d9e | Shell | uiuc-arc/Storm | /bugs/templates/stan1121/run.sh | UTF-8 | 1,331 | 3.484375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# first argument is the directory where everthing should be setup
# cd ../TestMin/bugs/stan1121
cd $1
echo $pwd
if [ ! -d "cmdstan" ]; then
wget https://github.com/stan-dev/cmdstan/releases/download/v2.5.0/cmdstan-2.5.0.tar.gz
tar -xf cmdstan-2.5.0.tar.gz
fi
cd cmdstan
make build
if [ ! -d "stan1121" ]; then
mkdir stan1121
fi
echo $pwd
# translate template to stan
here=`realpath ../`
echo $here
(cd ../../../../translators/ && ./teststan.py -o $here/stan1121.template && cp stan1121.stan $here/stan1121.stan && cp stan1121.data.R $here/stan1121.data.R)
if [ $? -ne 0 ]; then
echo "Translate failed"
echo "Failed"
exit 2
fi
diff ../stan1121.stan stan1121/stan1121.stan
diff ../stan1121.data.R stan1121/stan1121.data.R
#if [ $? -ne 0 ]; then
cp ../*.R ../*.stan stan1121/
#fi
pwd
echo "making..."
rm -f stan1121/stan1121
make stan1121/stan1121 > /dev/null >&2
cd ./stan1121/
if [ -z $2 ]
then
./stan1121 sample save_warmup=1 random seed=2205139738 data file=stan1121.data.R > stanout 2>&1
else
./stan1121 sample num_samples=$2 save_warmup=1 random seed=2205139738 data file=stan1121.data.R > stanout 2>&1
fi
nans=`grep -wi "not symmetric" stanout | wc -l`
if [ $nans -gt 0 ]; then
echo "Passed"
else
echo "Failed"
fi
#END=$(date +%N)
#DIFF=$(( ($END - $START)/1000 ))
#echo $DIFF >> stanout | true |
51d06ed5bf482ee8f5155b5d44b8752c3853e793 | Shell | ilventu/aur-mirror | /rabbitvcs-svn/PKGBUILD | UTF-8 | 1,282 | 2.703125 | 3 | [] | no_license | # Maintainer: H.Gökhan Sarı <hsa2@difuzyon.net>
pkgname=rabbitvcs-svn
pkgver=2933
pkgrel=1
pkgdesc="RabbitVCS is a set of graphical tools written to provide simple and straightforward access to the version control systems you use."
arch=('i686' 'x86_64')
url="http://code.google.com/p/rabbitvcs/"
license=('GPL')
groups=()
depends=('pygtk>=2.12' 'pygobject>=2.14' 'python-configobj>=4.4.0' 'pysvn-py2>=1.5.2' 'subversion>=1.4.6' 'git' 'python-dulwich>=0.6.0' 'meld' 'python-simplejson')
makedepends=('subversion')
provides=('rabbitvcs')
conflicts=('rabbitvcs')
install=rabbitvcs-svn.install
source=()
noextract=()
md5sums=() #generate with 'makepkg -g'
_svntrunk=http://rabbitvcs.googlecode.com/svn/trunk/
_svnmod=rabbitvcs
build() {
cd "$srcdir"
if [ -d $_svnmod/.svn ]; then
(cd $_svnmod && svn up -r $pkgver)
else
svn co $_svntrunk --config-dir ./ -r $pkgver $_svnmod
fi
msg "SVN checkout done or server timeout"
msg "Starting make..."
rm -rf "$srcdir/$_svnmod-build"
cp -r "$srcdir/$_svnmod" "$srcdir/$_svnmod-build"
cd "$srcdir/$_svnmod-build"
#
# BUILD
#
sed -i "s#env python#env python2#" setup.py
find . -name "*.py" | xargs grep -l sys.executable | xargs sed -i 's|sys\.executable|"/usr/bin/python2"|g'
python2 setup.py install --root=$pkgdir
}
| true |
d0a890e338401a081e1780e853b3603348ea2b0d | Shell | xshoji/unity-todo | /builder.sh | UTF-8 | 3,822 | 4.09375 | 4 | [] | no_license | #!/bin/bash
function usage()
{
cat << _EOT_
builder
------------
Builder for unity application.
Usage:
./$(basename "$0") --outputApplicationPath /path/to/app [ --platform android --version 2019.2.6f1 ]
Required:
-o, --outputApplicationPath /path/to/app : Output application path. ( the extension is not needed. )
Optional:
-p, --platform android : Platform. [ android | ios | mac | windows ] [ default: android ]
-v, --version 2019.2.6f1 : Unity version. [ default: 2019.2.6f1 ]
Helper options:
--help, --debug
_EOT_
[[ "${1+x}" != "" ]] && { exit "${1}"; }
exit 1
}
function printColored() { local B="\033[0;"; local C=""; case "${1}" in "red") C="31m";; "green") C="32m";; "yellow") C="33m";; "blue") C="34m";; esac; printf "%b%b\033[0m" "${B}${C}" "${2}"; }
#------------------------------------------
# Preparation
#------------------------------------------
set -eu
# Parse parameters
for ARG in "$@"
do
SHIFT="true"
[[ "${ARG}" == "--debug" ]] && { shift 1; set -eux; SHIFT="false"; }
{ [[ "${ARG}" == "--outputApplicationPath" ]] || [[ "${ARG}" == "-o" ]]; } && { shift 1; OUTPUT_APPLICATION_PATH="${1}"; SHIFT="false"; }
{ [[ "${ARG}" == "--platform" ]] || [[ "${ARG}" == "-p" ]]; } && { shift 1; PLATFORM="${1}"; SHIFT="false"; }
{ [[ "${ARG}" == "--version" ]] || [[ "${ARG}" == "-v" ]]; } && { shift 1; VERSION="${1}"; SHIFT="false"; }
{ [[ "${ARG}" == "--help" ]] || [[ "${ARG}" == "-h" ]]; } && { shift 1; HELP="true"; SHIFT="false"; }
{ [[ "${SHIFT}" == "true" ]] && [[ "$#" -gt 0 ]]; } && { shift 1; }
done
[[ -n "${HELP+x}" ]] && { usage 0; }
# Check required parameters
[[ -z "${OUTPUT_APPLICATION_PATH+x}" ]] && { printColored yellow "[!] --outputApplicationPath is required.\n"; INVALID_STATE="true"; }
# Check invalid state and display usage
[[ -n "${INVALID_STATE+x}" ]] && { usage; }
# Initialize optional variables
[[ -z "${PLATFORM+x}" ]] && { PLATFORM="android"; }
[[ -z "${VERSION+x}" ]] && { VERSION="2019.2.6f1"; }
#------------------------------------------
# Main
#------------------------------------------
cat << __EOT__
[ Required parameters ]
outputApplicationPath: ${OUTPUT_APPLICATION_PATH}
[ Optional parameters ]
platform: ${PLATFORM}
version: ${VERSION}
__EOT__
SCRIPT_DIR="$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd)"
APPLICATION_DIR="${SCRIPT_DIR}/unity-todo-app"
UNITY_CMD="/Applications/Unity/Hub/Editor/${VERSION}/Unity.app/Contents/MacOS/Unity"
BUILD_LOG="/tmp/build.log"
if [[ "${PLATFORM}" == "android" ]]; then
${UNITY_CMD} -batchmode -quit -logFile "${BUILD_LOG}" -projectPath "${APPLICATION_DIR}" -buildTarget android -executeMethod AppBuilder.buildForAndroid "${OUTPUT_APPLICATION_PATH}"
elif [[ "${PLATFORM}" == "ios" ]]; then
${UNITY_CMD} -batchmode -quit -logFile "${BUILD_LOG}" -projectPath "${APPLICATION_DIR}" -buildTarget ios -executeMethod AppBuilder.buildForIOS "${OUTPUT_APPLICATION_PATH}"
elif [[ "${PLATFORM}" == "mac" ]]; then
${UNITY_CMD} -batchmode -quit -logFile "${BUILD_LOG}" -projectPath "${APPLICATION_DIR}" -buildTarget mac -executeMethod AppBuilder.buildForMac "${OUTPUT_APPLICATION_PATH}"
elif [[ "${PLATFORM}" == "windows" ]]; then
${UNITY_CMD} -batchmode -quit -logFile "${BUILD_LOG}" -projectPath "${APPLICATION_DIR}" -buildTarget mac -executeMethod AppBuilder.buildForWindows "${OUTPUT_APPLICATION_PATH}"
else
printColored yellow "${PLATFORM} is Unknown."
exit 1
fi
# curl -sf ${STARTER_URL} |bash -s - \
# -n builder \
# -d "Builder for unity application." \
# -r outputApplicationPath,/path/to/app,"Output application path. ( the extension is not needed. )" \
# -o platform,android,"Platform. [ android | ios | mac | windows ] ",android \
# -o version,2019.2.6f1,"Unity version.",2019.2.6f1 \
# -s > /tmp/test.sh; open /tmp/test.sh
| true |
a2c7807448235d4d6ea0bd3dbea82ad2f47a3e10 | Shell | huangzulin/rclone-aria2 | /aria2/delete.sh | UTF-8 | 891 | 3.5 | 4 | [
"MIT"
] | permissive | #!/bin/sh
#=================================================
# Description: Delete files after Aria2 download error
# Lisence: MIT
# Version: 1.8
# Author: P3TERX
# Blog: https://p3terx.com
#=================================================
downloadpath='/root/Download' #Aria2下载目录
filepath=$3
rdp=${filepath#${downloadpath}/}
path=${downloadpath}/${rdp%%/*}
if [ $2 -eq 0 ]
then
exit 0
elif [ "$path" = "$filepath" ] && [ $2 -eq 1 ]
then
[ -e "$filepath".aria2 ] && rm -vf "$filepath".aria2
rm -vf "$filepath"
exit 0
elif [ "$path" != "$filepath" ] && [ $2 -gt 1 ]
then
[ -e "$path".aria2 ] && rm -vf "$path".aria2
rm -vrf "$path"
exit 0
elif [ "$path" != "$filepath" ] && [ $2 -eq 1 ]
then
[ -e "$filepath".aria2 ] && rm -vf "$filepath".aria2
rm -vf "$filepath"
find "${downloadpath}" ! -path "${downloadpath}" -depth -type d -empty -exec rm -vrf {} \;
exit 0
fi | true |
f4118bf29bf9d17eaa040a2acb1413ba8ad76071 | Shell | creptt/clustercode | /e2e/test1.bats | UTF-8 | 549 | 2.5625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bats
load "lib/utils"
load "lib/detik"
load "lib/custom"
DETIK_CLIENT_NAME="kubectl"
DETIK_CLIENT_NAMESPACE="clustercode-test1"
DEBUG_DETIK="true"
setup() {
reset_debug
run kubectl apply -f debug/${TEST_FILE_ID}.yaml
debug "$output"
}
@test "Given Blueprint, When scheduling scan job, Then Job should succeed" {
try "at most 20 times every 5s to find 1 pod named 'test-blueprint-scan-job' with 'status' being 'Succeeded'"
}
teardown() {
run kubectl delete -f debug/${TEST_FILE_ID}.yaml
debug "$output"
}
| true |
1ed6c39503180bd645e3b80b2bc14ae2edd4ac5a | Shell | columbia-it/screencasts | /batchvtt2transcript.sh | UTF-8 | 291 | 3.453125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/sh
# Batch convert the (corrected) VTT transcripts to a hyperlinked transcript.
if [ $# -eq 0 ];
then
echo "Usage: $0 filename..."
fi
for f in $*
do
b=`echo $f | sed -e 's/.vtt$//'`
vtt="$b.vtt"
html="$b-transcript.html"
python vtt2transcript.py $vtt $html
done
| true |
f16c4b459253a1759ac6bf7ba79a6e74b94d077c | Shell | mika314/gametube | /house_in_the_woods/build.sh | UTF-8 | 186 | 2.96875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -x
for bin in server client; do
cd $bin
echo Entering directory \`$bin\'
coddle release || exit 1
echo Leaving directory \`$bin\'
cd ..
done
| true |
0500357dd23919417e16d28aec641c831a44ffac | Shell | Fethienv/django-sms-sender | /sms_sender/deployement/gunicorn_service | UTF-8 | 1,400 | 3.296875 | 3 | [] | no_license | #!/bin/bash
NAME="sms_sender" # Name of the application
DJANGODIR=/home/sendersm/public_html/sms_sender/ # Django project directory
SOCKFILE=127.0.0.1:8000 #/home/sendersm/public_html/sms_sender/run/gunicorn.sock # we will communicte using this unix socket
USER=root # the user to run as
GROUP=root # the group to run as
NUM_WORKERS=3 # how many worker processes should Gunicorn spawn
DJANGO_SETTINGS_MODULE=sms_sender.settings # which settings file should Django use
DJANGO_WSGI_MODULE=sms_sender.wsgi # WSGI module name
echo "Starting $NAME as `whoami`"
# Activate the virtual environment
cd $DJANGODIR
source ../venv/bin/activate
export DJANGO_SETTINGS_MODULE=$DJANGO_SETTINGS_MODULE
export PYTHONPATH=$DJANGODIR:$PYTHONPATH
# Create the run directory if it doesn't exist
# RUNDIR=$(dirname $SOCKFILE)
# test -d $RUNDIR || mkdir -p $RUNDIR
# Start your Django Unicorn
# Programs meant to be run under supervisor should not daemonize themselves (do not use --daemon)
exec /home/sendersm/public_html/venv/bin/gunicorn ${DJANGO_WSGI_MODULE}:application \
--name $NAME \
--workers $NUM_WORKERS \
--user=$USER --group=$GROUP \
--bind=$SOCKFILE \
--log-level=debug \
--log-file=- | true |
b793bd0d74618a565d382f93ed6a0a5686b209c1 | Shell | gbv/cocoda-services | /start.sh | UTF-8 | 953 | 3.96875 | 4 | [
"MIT",
"Unlicense"
] | permissive | #!/bin/bash -e
function usage {
echo "Usage: $0 <name> | all"
echo "Start or restart service in directory <name> with pm2"
}
. utils.sh
echo "Start service $1"
cd $1
ECOSYSTEM_EXAMPLE=ecosystem.example.json
ECOSYSTEM=ecosystem.config.json
if [[ -f $ECOSYSTEM ]]; then
echo "Using ecosystem file $ECOSYSTEM without adjustments"
pm2 reload $ECOSYSTEM || pm2 start $ECOSYSTEM
elif [[ -f $ECOSYSTEM_EXAMPLE ]]; then
echo "Using and adjusting ecosystem file $ECOSYSTEM_EXAMPLE"
# add or adjust service name
SCRIPT="console.log(JSON.stringify(Object.assign(\
JSON.parse(require('fs').readFileSync('$ECOSYSTEM_EXAMPLE')),{name:'$1'}),null,2))"
node -e "$SCRIPT" > $ECOSYSTEM
# reload or start
pm2 reload $ECOSYSTEM || pm2 start $ECOSYSTEM
else
NAME=$(basename $1)
echo "Assuming pm2 process name to be $NAME"
pm2 reload $NAME || pm2 start $NAME
fi
# show status
pm2 list -m | awk "\$1==\"+---\" {P=\$2==\"$1\"} P {print}"
| true |
08c29b94ffed90f6868bf8ced8e59cf5c0a5b650 | Shell | sandor-juhasz/raspberry-pi-cluster | /bin/cl-admin | UTF-8 | 1,363 | 4.0625 | 4 | [] | no_license | #!/bin/bash
###########################################################################
#
# Text-based administration interface for the Raspberry Pi Cluster.
#
# Created by Sándor Juhász, 2017.
#
###########################################################################
shutdown-cmd () {
whiptail --title "Shutdown" \
--yesno \
--defaultno \
"Are you sure you want to shut down the cluster?" \
8 60
if [ $? -eq 0 ]; then
cl-shutdown
fi
}
raspbian-upgrade-cmd () {
whiptail --title "Upgrade Raspbian OS-es" \
--yesno \
--defaultno \
"Are you sure you want to upgrade all nodes?" \
8 60
if [ $? -eq 0 ]; then
cl-raspbian-upgrade
fi
}
status=0
while [ ${status} -eq 0 ]; do
choice=`whiptail --title "Raspberry Pi Cluster Administration" \
--menu "Choose an option" 15 60 5 \
"Provision" "(Re)provisions the whole pi cluster." \
"Upgrade" "Upgrades the Raspbian OS on all nodes." \
"Shutdown" "Shuts down the cluster in an orderly manner." \
"Quit" "" 3>&2 2>&1 1>&3`
option=$(echo $choice | tr '[:upper:]' '[:lower:]' | sed 's/ //g')
case "${option}" in
quit)
status=1
exit
;;
upgrade)
raspbian-upgrade-cmd
;;
shutdown)
shutdown-cmd
;;
esac
done
| true |
f39546de959b1ebf998d36e1884526998e32cf3f | Shell | shashwatx/shell_scripts | /archive/gnuplot_script1.sh | UTF-8 | 481 | 2.796875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
FILES=./varianceWithSeedFiles/*.sorted
for f in $FILES
do
gnuplot <<-EOF
set xlabel "|Seeds|"
set ylabel "|CR| - |Seeds|"
set term png
set output "${f}.png"
set grid
set style line 1 lc rgb '#0060ad' lt 1 lw 2 pt 7 ps 1.5 # --- blue
set style line 2 lc rgb '#B23611' lt 1 lw 2 pt 7 ps 1.5 # --- red
set xtic rotate by -45 scale 0
plot "${f}" using (\$2-\$1):xticlabels(1) title "CR" with linespoints ls 1
EOF
done
| true |
2befd210af6e8ba174fc17617ffefcdef23a176e | Shell | cih9088/dotfiles | /script/environments/lua_env__stylua.sh | UTF-8 | 2,911 | 4.0625 | 4 | [] | no_license | #!/usr/bin/env bash
################################################################
THIS=$(basename "$0")
THIS=${THIS%.*}
TARGET=lua-env
GH="JohnnyMorganz/StyLua"
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
. ${DIR}/../helpers/common.sh
################################################################
THIS_HL="${BOLD}${UNDERLINE}${THIS}${NC}"
THIS_CMD=stylua
log_title "Prepare for ${THIS_HL}"
################################################################
list_versions() {
echo "$("${DIR}/../helpers/gh_list_releases" "${GH}")"
}
version_func() {
$1 --version | awk '{print $2}'
}
verify_version() {
local TARGET_VERSION="${1}"
local AVAILABLE_VERSIONS="${2}"
AVAILABLE_VERSIONS=$(echo "${AVAILABLE_VERSIONS}" | tr "\n\r" " ")
[[ " ${AVAILABLE_VERSIONS} " == *" ${TARGET_VERSION} "* ]]
}
setup_for_local() {
local COMMAND="${1:-skip}"
local VERSION="${2:-}"
[[ -z "${VERSION}" || "${VERSION}" == "latest" ]] && VERSION="$(list_versions | head -n 1)"
# remove
if [[ "remove update" == *"${COMMAND}"* ]]; then
if [ -f "${PREFIX}/bin/stylua" ]; then
rm -rf "${PREFIX}/bin/stylua" || true
else
if [ "${COMMAND}" == "update" ]; then
log_error "${THIS_HL} is not installed. Please install it before update it."
exit 1
fi
fi
fi
# install
if [[ "install update" == *"${COMMAND}"* ]]; then
if [ ! -f "${PREFIX}/bin/stylua" ]; then
if [[ ${PLATFORM} == "OSX" ]]; then
++ curl -LO "https://github.com/JohnnyMorganz/StyLua/releases/download/${VERSION}/stylua-macos.zip"
++ unzip stylua-macos.zip
elif [[ ${PLATFORM} == "LINUX" ]]; then
++ curl -LO "https://github.com/JohnnyMorganz/StyLua/releases/download/${VERSION}/stylua-linux.zip"
++ unzip stylua-linux.zip
fi
++ chmod +x stylua
++ cp stylua "${PREFIX}/bin"
fi
fi
}
setup_for_system() {
local COMMAND="${1:-skip}"
local VERSION="$(list_versions | head -n 1)"
case "${PLATFORM}" in
OSX)
if [ "${COMMAND}" == "remove" ]; then
brew list stylua >/dev/null 2>&1 && ++ brew uninstall stylua
elif [ "${COMMAND}" == "install" ]; then
brew list stylua >/dev/null 2>&1 || ++ brew install stylua
elif [ "${COMMAND}" == "update" ]; then
++ brew upgrade stylua
fi
;;
LINUX)
if [[ "remove update" == *"${COMMAND}"* ]]; then
++ sudo rm -f /usr/local/bin/stylua
fi
if [[ "install update" == *"${COMMAND}"* ]]; then
++ curl -LO "https://github.com/JohnnyMorganz/StyLua/releases/download/${VERSION}/stylua-linux.zip"
++ unzip stylua-linux.zip
++ chmod +x stylua
++ sudo mkdir -p /usr/local/bin
++ sudo cp stylua /usr/local/bin/
fi
;;
esac
}
main_script "${THIS}" \
setup_for_local setup_for_system \
list_versions verify_version version_func
| true |
e057e8379f0bcb69238d57d1f2d3c0735180e108 | Shell | pSub/configs | /zsh/dot-zshrc | UTF-8 | 3,500 | 2.90625 | 3 | [] | no_license | # Created by pSub for 4.3.10
# Declaration of config files to
# be used, which are located in $ZSHDIR
config_files=(alias
bindkey
functions
style
prompt
zle
)
export ZSHDIR=$HOME/.zsh
export ZSHFUN=$ZSHDIR/functions
export EDITOR="ec"
export PAGER="less -R"
export BROWSER=dwb
export PRESENTER=pdfpc
export REPORTTIME="10"
export HISTSIZE=1000
export SAVEHIST=1000
export HISTFILE=$HOME/.zshhistory
export COLORTERM=yes
export DIRSTACKSIZE=9
export DIRSTACKFILE=~/.zdirs
# MODULES
autoload -U compinit && compinit
autoload -U keeper && keeper
autoload -U colors && colors
autoload -U zmv
autoload -Uz vcs_info
autoload -U url-quote-magic
# Add directory with custom functions to FPATH
fpath=($fpath $ZSHFUN)
# Mark all functions in ZSHFUN for autoloading
for file in $ZSHFUN/*
do
autoload -U $file:t
done
# Register zle widgets
zle -N self-insert url-quote-magic
zle -N global-alias-space
zle -N global-alias-tilde
zle -N global-alias-dot
zle -N global-alias-dirstack
zle -N after-first-word
zle -N goto-directory
zle -N find-file
zle -C retract complete-word _generic
# IRC client like history
# http://zshwiki.org/home/zle/ircclientlikeinput
zle -N _fake-accept-line
zle -N down-line-or-history _down-or-fake-accept-line
# OPTIONS
# change directory without 'cd'
setopt autocd
# Push direcotries automatically
setopt autopushd
# Ignore duplicates on dictionary stack
setopt pushd_ignoredups
# Be quiet
setopt nobeep
setopt nohistbeep
setopt nolistbeep
# Maybe needed for prompt, I'm not sure
setopt prompt_subst
# $0 is the name of the function/script
setopt function_argzero
# No duplicate entries in history
setopt histignoredups
# Cool globbing stuff, see http://zsh.sourceforge.net/Intro/intro_2.html
setopt extendedglob
# Comments are allowed in the prompt, useful when pasting a shelscript
setopt interactivecomments
# Do hashing
setopt hash_cmds
setopt hash_dirs
# Redirect output to multiple destinations (this is the default)
setopt multios
# COLORS
if [[ -f ~/.dircolors ]] {
if [[ ${TERM} == screen* ]] {
eval $( TERM=screen dircolors ~/.dircolors )
} else {
eval $( dircolors ~/.dircolors )
}
} else {
eval $( dircolors -b )
}
# Persistent directory stack by Christian Neukirchen
# <http://chneukirchen.org/blog/archive/2012/02/10-new-zsh-tricks-you-may-not-know.html>
if [[ -f $DIRSTACKFILE ]] && [[ $#dirstack -eq 0 ]]; then
dirstack=( ${(f)"$(< $DIRSTACKFILE)"} )
[[ -d $dirstack[1] ]] && cd $dirstack[1] && cd $OLDPWD
fi
chpwd() {
print -l $PWD ${(u)dirstack} >$DIRSTACKFILE
}
# Load config files
if [[ -d $ZSHDIR ]] {
for config_file in $config_files
do
if [[ -f $ZSHDIR/$config_file.zsh ]] {
source $ZSHDIR/$config_file.zsh
}
done
}
unset config_files
### ZNT's installer added snippet ###
fpath=( "$fpath[@]" "$HOME/.nix-profile/share/zsh/site-functions" )
autoload n-aliases n-cd n-env n-functions n-history n-kill n-list n-list-draw n-list-input n-options n-panelize n-help
autoload znt-usetty-wrapper znt-history-widget znt-cd-widget znt-kill-widget
alias naliases=n-aliases ncd=n-cd nenv=n-env nfunctions=n-functions nhistory=n-history
alias nkill=n-kill noptions=n-options npanelize=n-panelize nhelp=n-help
zle -N znt-history-widget
bindkey '^R' znt-history-widget
setopt AUTO_PUSHD HIST_IGNORE_DUPS PUSHD_IGNORE_DUPS
zstyle ':completion::complete:n-kill::bits' matcher 'r:|=** l:|=*'
### END ###
| true |
51f0b81a61050ae3229098111b128ccefcf1f8b7 | Shell | mmickan/puppet-vault | /files/vault-secret-ssh | UTF-8 | 4,267 | 4.15625 | 4 | [] | no_license | #!/bin/bash
#
# vault-secret-ssh
#
# Helper script to mount and configure Vault's SSH secret backend.
#
######################################################################
# Script configuraiton
######################################################################
version=0.1.0
app_id='A0DC8E55-8C4E-41FE-90D9-F222D68697E5'
user_id=`facter ec2_instance_id`
user_id=${user_id:-`sudo facter uuid`}
if [ "$user_id" == "Not Settable" ] || [ -z "$user_id" ]; then
if [ ! -e /etc/UUID ]; then
touch /etc/UUID
chown root:root /etc/UUID
chmod 0600 /etc/UUID
uuidgen > /etc/UUID
chmod 0400 /etc/UUID
fi
user_id=`cat /etc/UUID`
fi
vault_addr=${VAULT_ADDR:-https://127.0.0.1:8200}
declare -a names
unset http_proxy
unset https_proxy
######################################################################
# Functions
######################################################################
usage(){
cat <<EOF
Usage: $0 [<options>] -- <mountpoint> ...
--version Show the version of this script
--help Display this mess
--show-app-id Output this application's app-id
--token <token> Use <token> to authenticate to Vault. If not provided,
VAULT_TOKEN environment variable will be used. Prefix
with @ to read token from a file
--addr <address> Connect to Vault server at <address>. Uses VAULT_ADDR
environment variable as default, and falls back to
https://127.0.0.1:8200 if neither the parameter not the
environment variable are set
--app-id <id> Use <id> to authenticate to App ID auth backend. If not
specified, this script's default app-id is used
EOF
exit 1
}
version(){
cat <<EOF
vault-secret-ssh version $version
EOF
exit 0
}
show_app_id(){
echo -n $app_id
exit 0
}
strip_leading_at(){
input=$1
output=`echo -n $input | sed -e 's/^@//'`
echo -n $output
if [ "$input" == "$output" ]; then
return 1
else
return 0
fi
}
log(){ echo -e "\e[32m\e[1m--> ${1}...\e[0m"; }
warn(){ echo -e "\e[33m\e[1mWARNING: ${1}\e[0m"; }
error(){ echo -e "\e[31m\e[1mERROR: ${1}\e[0m"; }
fatal(){ echo -e "\e[31m\e[1mFATAL: ${1}\e[0m"; exit 1; }
######################################################################
# Script start
######################################################################
PARSED_OPTIONS=$(getopt -n "$0" -o hvst:a: --long "help,version,show-app-id,token:,addr:,app-id:" -- "$@")
if [ $? -ne 0 ]; then
usage
fi
eval set -- "$PARSED_OPTIONS"
while true; do
case "$1" in
-h|--help)
usage;;
-v|--version)
version;;
-s|--show-app-id)
show_app_id;;
-t|--token)
vault_token=$2
shift 2;;
--addr)
vault_addr=$2
shift 2;;
-a|--app-id)
app_id=$2
shift 2;;
--)
shift
break;;
*)
error "unknown option $1"
usage;;
esac
done
names=( $* )
# validate and mutate inputs
vault_token=`strip_leading_at "$vault_token"`
if [ "$?" -eq "0" ]; then
# token had a leading "@"; read token from file
vault_token=`cat $vault_token 2>/dev/null`
fi
if [ -z "$vault_token" -a -z "$VAULT_TOKEN" ]; then
# log in to Vault
vault_token=`curl \
-s \
-X POST \
-d "{\"app_id\":\"${app_id}\",\"user_id\":\"${user_id}\"}" \
${vault_addr}/v1/auth/app-id/login \
| jq -r '.["auth"]["client_token"]' 2>/dev/null`
if [ -z "$vault_token" ]; then
fatal 'Unable to authenticate to Vault'
fi
# flag token for cleanup
generated_token=1
fi
# set up the environment
if [ -n "$vault_token" ]; then
export VAULT_TOKEN=$vault_token
fi
if [ -n "$vault_addr" ]; then
export VAULT_ADDR=$vault_addr
fi
for name in ${names[@]}; do
# ensure SSH secret backend is mounted
if ! vault mounts | grep -qs "^${name}/"; then
log "Mounting SSH secret backend"
vault mount -path="${name}" ssh
fi
if ! vault mounts | grep -qs "^${name}/"; then
fatal "Unable to mount SSH secret backend"
fi
done
if [ ${generated_token:-0} -eq 1 ]; then
# clean up after ourself
curl \
-s \
-X POST \
-H "X-Vault-Token: ${vault_token}" \
${vault_addr}/v1/auth/token/revoke/${vault_token}
fi
| true |
8e53dbcee436f3218e5aaeb60e0f3fb210de13dd | Shell | tyfkda/yalp | /embed_examples/test.sh | UTF-8 | 814 | 3.59375 | 4 | [
"Unlicense"
] | permissive | #!/bin/bash
################################################################
# Test framework.
function error_exit() {
echo -n -e "\033[1;31m[ERROR]\033[0;39m "
echo "$1"
exit 1
}
function run() {
echo -n "Testing $1 ... "
result=$(./$1)
code=$?
if [ $code -ne 0 ]; then
error_exit "exit status is not 0 [$code]"
fi
if [ "$result" != "$2" ]; then
error_exit "$2 expected, but got '$result'"
fi
echo ok
}
################################################################
# Test cases.
run 001_run_string 'Hello, Yalp!'
run 002_register_c_func '1234321'
run 003_call_script_func '3'
run 004_use_binder "1234321
123
** Foo bar baz **"
################################################################
# All tests succeeded.
echo -n -e "\033[1;32mVM-TEST ALL SUCCEEDED!\033[0;39m\n"
| true |
1af4c7f9e10706b23973161a6a1bf8b657b6b264 | Shell | zg2pro/analysis-production-randomized-texts | /src/script.sh | UTF-8 | 371 | 3.015625 | 3 | [] | no_license | #!/bin/bash
rm -f res.txt
for fichier in *.txt
do
(tr [A-Z] [a-z] < $fichier | tr -s ' ' | tr ' ' '\n' | grep -v 'é' | grep -v 'è' | grep -v 'à' | grep -v 'â' | grep -v 'ä' | grep -v 'ê' | grep -v 'ë' | grep -v 'î' | grep -v 'ï' | grep -v 'ç' | grep -v 'û' | grep -v 'ü' | grep -v 'ö' | grep -v 'ô' | tr '\n' ' ' | tr -d ',?.;:/!') >> res.txt
done
| true |
83c1d86eda774003484a6091596783986a627cc3 | Shell | supercobra/bamzooka_docker | /build-bam-image.sh | UTF-8 | 1,254 | 3.6875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
NAME="bamzooka/bamzooka"
echo "+=======================+"
echo "| BUILDING DOCKER IMAGE |"
echo "+=======================+"
echo -n "Do you want to push to Docker after the build? (y/n)"
read want_to_push
if [ "$want_to_push" == "y" ]; then
echo "OK, will push image to docker.com"
sleep 2
else
echo "OK, just building then, no pushing anything..."
sleep 2
fi
# Build the image
set -xe
APP=./image/base/bamzooka
rm -rf $APP # remove previous build if any
git clone --depth=1 git@github.com:metadot/metadot-workspace.git $APP
rm -f $APP/apps/bamzooka-backend/log/*
rm -rf $APP/apps/bamzooka-backend/tmp
VERSION=$(date +%Y%m%d.%H%M%S)
BASE_VERSION="2.0"
FULLNAME=$NAME:$BASE_VERSION.$VERSION
docker build image/base -t $FULLNAME
set +xe
rm -rf $APP
echo "+=======================+"
echo "| IMAGE BUILD DONE |"
echo "+=======================+"
if [ "$want_to_push" == "y" ]; then
echo "Pushing image to docker.com"
sleep 2
set -xe
./push_docker.sh $FULLNAME
set +xe
echo "+"
echo "|"
echo "|"
echo "|"
echo "| IMAGE pushed to docker.com |"
echo "| VERSION: $FULLNAME"
echo "|"
echo "+----------> ALL DONE!!!"
else
echo "Not pushing image to docker.com"
sleep 2
fi
echo "REMEMBER to update the "
exit 0
| true |
039aba0791e7a4419903d38540d1299803467a8d | Shell | tanner00/anu | /build.sh | UTF-8 | 779 | 2.921875 | 3 | [] | no_license | BASE=~/Desktop/anu/
cd "$BASE"
if [ ! -d "obuild" ]
then
mkdir obuild
fi
for i in *.*; do
[ -f "$i" ] || break
without_extension="${i%%.*}"
if [[ "$i" == *.S ]]
then
nasm -felf32 "$i" -o "$BASE/obuild/$without_extension.o"
elif [[ "$i" == *.c ]]
then
~/Desktop/i686-elf-4.9.1-Linux-x86_64/bin/i686-elf-gcc -D"$1" -g -c "$i" -o "$BASE/obuild/"$without_extension".o" -I"$BASE/include" -std=gnu99 -ffreestanding -O0 -Wall -Wextra
fi
done
~/Desktop/i686-elf-4.9.1-Linux-x86_64/bin/i686-elf-gcc -T "$BASE/link.ld" -g -o "$BASE/obuild/os.bin" -ffreestanding -O0 -nostdlib "$BASE/obuild/"*.o -lgcc
qemu-system-i386 -serial stdio -kernel "$BASE/obuild/os.bin" -display "$2" -m "$3" # dumb gtk output (sdl doesn't work with qemu monitor)
| true |
1752e9377b66ae7c02e1df1f02147b9b31002971 | Shell | radiomalva/radiomalva-player | /bin/onSongChange.d/twitter.sh | UTF-8 | 1,041 | 3.4375 | 3 | [] | no_license | #! /bin/bash -eux
echo "$0" "$@"
f="$1"
## Sólo en /media
grep -q "^/media/" <<< "$f" || exit
# Restringir extensiones
grep -q -e "ogg$" -e "mp3$" <<< "$f" || exit
# Quitar las /Cuñas/
! grep -q -e "/Cuñas/" <<< "$f"|| exit
# Tuit a la cuenta de Radio Malva
grep -q -e "/Podcast/" -e "/Programas/" <<< "$f" || exit # Sólo Podcast y Programas
h="$(echo $f| cut -d'/' -f4)"
g="$(echo $f| sed 's/.$h//' | cut -d'/' -f5)"
case "$h" in
Programas)
k="http://radiomalvapodcast.wordpress.com"
;;
Podcast)
i="$(basename "$f")"
j="$(sqlite3 ~/.config/gpodder/database.sqlite "select title FROM episodes WHERE filename='$i'")"
TITLE="$(echo "$j"|sed "s/\’/'/g")"
k="$(sqlite3 ~/.config/gpodder/database.sqlite "select link from channels where foldername='$g'")"
eyeD3 --title="$TITLE" "$f"
;;
esac
(
echo "Ahora sonando en Radio Malva: $g"
eyeD3 --rfc822 "$f" | grep Title | cut -d':' -f2 || true
echo "Escúchalo en el 104.9FM, en radiomalva.org o en podcast: $k"
) | ~/bin/autotwt.sh
| true |
bff694d37f5674b8ba7cfb6412918b94d0759e4d | Shell | adymitruk/pairup | /bin/pairup-kill-server | UTF-8 | 158 | 2.546875 | 3 | [] | no_license | #!/bin/bash
# Delete a new rackspace server instance
set -ex
source "${BASH_SOURCE%/*}/pairup-helper-functions"
echo "$0 not implemented yet" >&2
exit 1
| true |
a30102280b90c19e232fd09a70e9802a48dcfc15 | Shell | rockmenjack/TupleNet | /src/tests/test_simple_untunnel.sh | UTF-8 | 4,186 | 2.625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#noparallel
. env_utils.sh
env_init ${0##*/} # 0##*/ is the filename
# this testing cannot run inside a container
skip_if_in_container
DISABLE_DUMMY=1
sim_create hv1 || exit_test
ENABLE_UNTUNNEL=1 ONDEMAND=0 GATEWAY=1 start_tuplenet_daemon hv1 172.20.11.1
wait_for_brint # waiting for building br-int bridge
# for supporting parallel testing, must generate uniq namespace name
LS_A=`random_short_str "LS-A"`
LS_B=`random_short_str "LS-B"`
out1=`random_short_str "out1"`
out2=`random_short_str "out2"`
net_namespace_add ${LS_A}
net_namespace_add ${LS_B}
net_namespace_add ${out1}
net_namespace_add ${out2}
tpctl ls add ${LS_A} || exit_test
tpctl ls add ${LS_B} || exit_test
tpctl lr add LR-A || exit_test
tpctl ls add m1 || exit_test
tpctl ls add m2 || exit_test
tpctl lr add edge1 hv1 || exit_test
tpctl lr add edge2 hv1 || exit_test
tpctl ls add ${out1} || exit_test
tpctl ls add ${out2} || exit_test
# link ${LS_A} to LR-A
tpctl lr link LR-A ${LS_A} 10.10.1.1/24 || exit_test
# link ${LS_B} to LR-A
tpctl lr link LR-A ${LS_B} 10.10.2.1/24 || exit_test
# link m1 to LR-A
tpctl lr link LR-A m1 100.10.10.1/24 || exit_test
# link m2 to LR-A
tpctl lr link LR-A m2 100.10.10.3/24 || exit_test
# link m1 to edge1
tpctl lr link edge1 m1 100.10.10.2/24 || exit_test
# link m2 to edge2
tpctl lr link edge2 m2 100.10.10.2/24 || exit_test
# link ${out1} to edge1
tpctl lr link edge1 ${out1} 172.20.11.19/24 || exit_test
# link ${out2} to edge2
tpctl lr link edge2 ${out2} 172.20.12.20/24 || exit_test
# set static routes on LR-A to dispatch traffic to m1,m2
tpctl lsr add LR-A lsr1 172.20.11.0/24 100.10.10.2 LR-A_to_m1 || exit_test
tpctl lsr add LR-A lsr2 172.20.12.0/24 100.10.10.2 LR-A_to_m2 || exit_test
# set static route on edge1
tpctl lsr add edge1 lsr3 10.10.0.0/16 100.10.10.1 edge1_to_m1 || exit_test
# set static route on edge2
tpctl lsr add edge2 lsr4 10.10.0.0/16 100.10.10.3 edge2_to_m2 || exit_test
# add snat on edge1, edge2
tpctl lnat add edge1 snat1_rule 10.10.0.0/16 snat 172.20.11.100 || exit_test
tpctl lnat add edge2 snat2_rule 10.10.0.0/16 snat 172.20.12.101 || exit_test
port_add ${LS_A} 10.10.1.2/24 00:00:06:08:08:01 10.10.1.1 || exit_test
port_add ${LS_B} 10.10.2.2/24 00:00:06:08:08:02 10.10.2.1 || exit_test
port_add ${out1} 172.20.11.16/24 00:00:06:08:08:03 172.20.11.1 || exit_test
port_add ${out2} 172.20.12.18/24 00:00:06:08:08:04 172.20.12.1 || exit_test
tpctl lsp add ${LS_A} ovsport-${LS_A} 10.10.1.2 00:00:06:08:08:01 || exit_test
tpctl lsp add ${LS_B} ovsport-${LS_B} 10.10.2.2 00:00:06:08:08:02 || exit_test
tpctl lsp add ${out1} ovsport-${out1} 172.20.11.16 00:00:06:08:08:03 || exit_test
tpctl lsp add ${out2} ovsport-${out2} 172.20.12.18 00:00:06:08:08:04 || exit_test
wait_for_flows_unchange # waiting for install flows
ret="`ip netns exec ${LS_A} ping 172.20.11.16 -c 1`"
verify_has_str "$ret" "1 received" || exit_test
ret="`ip netns exec ${LS_A} ping 172.20.12.18 -c 1`"
verify_has_str "$ret" "1 received" || exit_test
ret="`ip netns exec ${LS_B} ping 172.20.11.16 -c 1`"
verify_has_str "$ret" "1 received" || exit_test
ret="`ip netns exec ${LS_B} ping 172.20.12.18 -c 1`"
verify_has_str "$ret" "1 received" || exit_test
ret="`ip netns exec ${LS_A} ping 100.10.10.1 -c 1`"
verify_has_str "$ret" "1 received" || exit_test
ret="`ip netns exec ${LS_A} ping 100.10.10.2 -c 1`"
verify_has_str "$ret" "1 received" || exit_test
ret="`ip netns exec ${LS_A} ping 100.10.10.3 -c 1`"
verify_has_str "$ret" "1 received" || exit_test
tcpdump_file=`random_short_str "${OVS_RUNDIR}/tcpdump.pcap"`
tcpdump -i br-int icmp -nevvv -w $tcpdump_file &
tcpdump_pid=$!
on_tuplenet_exit "kill $tcpdump_pid 2>/dev/null"
sleep 2
ret="`ip netns exec ${LS_B} ping 192.168.30.10 -c 1`"
pmsg "$ret"
kill $tcpdump_pid 2>/dev/null
sleep 1 # tcpdump need sometime to dump packet into file
pkt_dump="`tcpdump -r $tcpdump_file -nnevvv`"
verify_has_str "$pkt_dump" "192.168.30.10" || exit_test
# delete m1 to test if it cause overlap deletion
(yes | tpctl ls del -r m1) || exit_test
wait_for_flows_unchange # waiting for install flows
ret="`ip netns exec ${LS_A} ping 100.10.10.3 -c 1`"
verify_has_str "$ret" "1 received" || exit_test
pass_test
| true |
a1780c26bf644dda055639f110a27bf60a04202c | Shell | MCSH/dot-files | /bin/pomodoro.sh | UTF-8 | 340 | 3.234375 | 3 | [] | no_license |
pomodor_wait(){
if [ -z "$1" ]
then
M=45
else
M=$1
fi
TIME=$(($M * 60))
notify-send "Pomodor session started for $M minutes"
sleep $TIME && notify-send "Pomodor session ended!" && aplay /home/sajjad/bin/bell.wav
}
export -f pomodor_wait
nohup bash -c "pomodor_wait $1" > /dev/null 2>&1 &
| true |
701a2f4b680decda04b56fb75af481f14fc71b04 | Shell | jawaff/Youtube-Scripts | /youtube-video-download | UTF-8 | 268 | 3.03125 | 3 | [] | no_license | #!/bin/sh
if [ $# -lt 2 ]; then
echo "youtube-video-download <Group> <YoutubeURL>"
exit 1
fi
youtube-dl --yes-playlist -f "bestvideo[ext=mp4]+bestaudio[ext=m4a]/bestvideo+bestaudio" --merge-output-format mp4 -o "/mnt/plex-hdd/videos/$1/%(title)s.%(ext)s" "$2"
| true |
16547a6e2b9201cd1085bc9aee7530c768d10f10 | Shell | mattlee821/000_GWAS_formatting | /scripts/convert_delimiter.sh | UTF-8 | 194 | 2.609375 | 3 | [] | no_license | #!/bin/bash
# Convert comma-separated to tab-separated
perl -wnlpi -e 's/,/\t/g;' file
# Convert space-separated (i.e., more than one space) to tab-separated
perl -wnlpi -e 's/\s+/\t/g;' file
| true |
0984250ee037fcd45038d4fab762c5ffc37dd541 | Shell | thanple/ThinkingInTechnology | /shes/cookbook.sh | UTF-8 | 1,206 | 3.40625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
rm -rf ./dir/*
find . -iname 'test*' | xargs -i cp {} ./dir1/
#统计文件行数
#find -print0生成以'\0'为分隔符的输出, xargs -0以'\0'进行分割
echo "***** test 1 ******"
find ./ -type f -name "*.sh" -print0 | xargs -0 wc -l
#获取sample, %从右往左 #从左往右
echo "***** test 2 ******"
file_jpg="sample.jpg"
file_func_jpg="sample.func.easy.jpg"
echo ${file_jpg%.*}
echo ${file_func_jpg%.*}
echo ${file_func_jpg%%.*}
echo ${file_jpg#*.}
echo ${file_func_jpg##*.}
#echo -e代表转义, read -p交互式
echo -e "***** test 3 ******\n *********"
#read -p "What woudld you like?" likes
echo ${likes}
#并行命令&, wait相当于Thread.join(),不过这里是并行进程
echo "***** test 4 ******"
PIDARRAY=()
for file in ./*
do
md5sum $file &
PIDARRAY+=("$!")
done
wait ${PIDARRAY[@]}
#awk '{pattern + action}' {filenames}
echo "***** test awk ******"
awk -F":" '{ print $1 }' /etc/passwd
awk '{if(NR>=20 && NR<=30) print $1}' test.txt
echo 'I am Poe,my qq is 33794712' | awk -F '[, ]' '{print $3" "$7}'
echo 'I am Poe,my qq is 33794712' | awk 'BEGIN{FS="[, ]"} {print $3" "$7}'
ls -all |awk 'BEGIN {size=0;} {size=size+$5;} END{print "[end]size is ",size}'
| true |
ada1f90b35a877f5b631e6fbb2696da019db2c72 | Shell | boklm/upload-manager-conf | /scripts/copy-dist | UTF-8 | 204 | 2.734375 | 3 | [] | no_license | #!/bin/sh
set -e
destdir=[% shell_quote(config('repodir') _ '/' _ u.project _ '/' _ u.version) %]
mkdir -p "$destdir"
[% FOREACH file IN u.files.keys %]
mv -f [% shell_quote(file) %] "$destdir"
[% END %]
| true |
6198f9b26982020896643c28591bf64a5d1f8111 | Shell | guenther-brunthaler/usr-local-bin-xworld-jv3gwuidf2ezyr5vbqavqtxyh | /html2ascii-with-aart-tables | UTF-8 | 2,653 | 4.1875 | 4 | [] | no_license | #! /bin/sh
exit_version() {
cat << ===; exit
$APP Version 2021.82
Copyright (c) 2021 Guenther Brunthaler. All rights reserved.
This script is free software.
Distribution is permitted under the terms of the GPLv3.
===
}
exit_help() {
cat << ===; echo; exit_version
$APP - convert HTML to ASCII text (and any tables to ASCII-art)
Usage:
$APP [ <options> ... [--] ] < input.html > output.txt
$APP [ <options> ... [--] ] file1.html file2.html ...
$APP [ <options> ... [--] ] < file.html
The first usage variant lets $APP act like a filter.
The second variant creates *.txt files for all input files.
The third variant shows an ASCII-fied version of the file
in the "w3c" text-mode web browser, using pretty line-art
graphics characters rather than ASCII art for tables.
Supported options:
-w <columns>: Format output with that many columns (default: $default_width)
-f: Enforce overwriting of output files which already exist
-h: show this help and exit
-V: show version information and exit
===
}
APP=${0##*/}
set -e
trap 'test $? = 0 || echo "\"$0\" failed!" >& 2' 0
default_width=72
width=$default_width
force=false
while getopts fw:hV opt
do
case $opt in
f) force=true;;
w) width=$OPTARG;;
h) exit_help;;
V) exit_version;;
*) false || exit
esac
done
shift `expr $OPTIND - 1 || :`
all=true; # An *: entry must be first for initializing $pkg.
for need in icu-devtools: uconv : tidy w3m xmlstarlet
do
case $need in
*:) pkg=${need%:};;
*)
: ${pkg:="$need"}
command -v -- "${need}" > /dev/null 2>& 1 || {
echo
echo "Required utility '$need' is missing!"
echo "On some systems it can be installed with"
echo "\$ sudo apt-get install $pkg"
all=false
} >& 2
pkg=
esac
done
$all || exit
unumlaut() {
uconv -x "::de-ASCII;"
}
filter() {
tidy -asxhtml --quote-nbsp no --show-warnings no --show-info no \
2> /dev/null \
| unumlaut \
| xmlstarlet pyx \
| sed '/^Aborder/d; /^(table/!b; p; s/.*/Aborder 1/' \
| xmlstarlet p2x \
| w3m -cols "$width" -O US-ASCII -T text/html -graph
}
asciify() {
unumlaut \
| LC_ALL=C tr -c '[:alnum:]' _ \
| sed 's/___*/_/g; s/^_//; s/_$//'
}
asciify_txt() {
result=`printf '%s\n' "${1%.[Hh][Tt][Mm]*}" | asciify`.txt
}
case $# in
0) filter;;
*)
for a
do
test -f "$a" || {
echo "Input file '$a' does not exist!"
false || exit
}
asciify_txt "$a"
test ! -e "$result" && continue
echo
echo "Output file '$result' already exists!"
if $force
then
echo "(Overwriting)"
else
echo "(Use -f to enforce overwriting.)"
false || exit
fi
done >& 2
for a
do
asciify_txt "$a"
filter < "$a" > "$result"
done
esac
| true |
2f0fb71709c8a35d6acb8c666503bd083fe5f846 | Shell | dokempf/dune-python-buildsystem-test | /testcases/testtools-case/install.sh | UTF-8 | 262 | 2.515625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -e
#
# Testing a dune-testtools workflow
#
DUNECONTROL_OPTS="--builddir=$(pwd)/build/testtools-case --module=dune-testtools"
./dune-common/bin/dunecontrol $DUNECONTROL_OPTS all
pushd build/testtools-case/dune-testtools
make build_tests
ctest
| true |
bfba86bb22f7c10a3bba5abdf61c3526dace3f6a | Shell | NVIDIA/deepops | /workloads/examples/slurm/dask-rapids/files/run.sh | UTF-8 | 2,177 | 3.84375 | 4 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
pythonscript="${SCRIPT_DIR}/sum.py"
XDIM=500000
YDIM=500000
function usage {
echo "usage: $pythonscript [-cgd]"
echo " -c use CPU only, num sockets, num cores"
echo " -g use local GPUs, number of GPUs"
echo " -d use distributed dask"
exit 1
}
export MKL_NUM_THREADS=$(( $(nproc) / $(nvidia-smi -L | wc -l) ))
# Set MAX values
MAX_GPUS=$(nvidia-smi -L | wc -l)
MAX_CPU_SOCKETS=$(lscpu | grep Socket | awk '{print($NF)}')
MAX_CPU_CORES_PER_SOCKET=$(lscpu | grep -F "Core(s) per socket" | awk '{print($NF)}')
MAX_CPU_THREADS_PER_CORE=$(lscpu | grep Thread | awk '{print($NF)}')
DFLAG=0
CFLAG=0
GFLAG=0
while getopts ":c:g:x:y:d" opt; do
case ${opt} in
c)
n_cores=${OPTARG}
if [ "$n_cores" -gt $((MAX_CPU_CORES_PER_SOCKET * MAX_CPU_THREADS_PER_CORE)) ]; then
n_cores=$((MAX_CPU_CORES_PER_SOCKET * MAX_CPU_THREADS_PER_CORE))
fi
CFLAG=1
;;
g)
n_gpus=${OPTARG}
if [ "$n_gpus" -gt "$MAX_GPUS" ]; then
n_gpus=$MAX_GPUS
fi
GFLAG=1
;;
d)
DFLAG=1
;;
x)
XDIM="${OPTARG}"
;;
y)
YDIM="${OPTARG}"
;;
\?) echo "Usage: cmd [-c ncores] [-g ngpus] [-d]"
;;
:)
echo "Invalid option: $OPTARG requires an argument" 1>&2
;;
esac
done
shift $((OPTIND -1))
if [ $DFLAG == 1 ]; then
if [ $GFLAG == 1 ] ; then
python "$pythonscript" --use_distributed_dask --use_gpus_only --xdim="${XDIM}" --ydim="${YDIM}"
elif [ $CFLAG == 1 ]; then
python "$pythonscript" --use_distributed_dask --use_cpus_only --xdim="${XDIM}" --ydim="${YDIM}"
fi
elif [ $GFLAG == 1 ]; then
python "$pythonscript" --use_gpus_only --n_gpus="$n_gpus" --xdim="${XDIM}" --ydim="${YDIM}"
elif [ $CFLAG == 1 ]; then
python "$pythonscript" --use_cpus_only --n_cpu_sockets="${MAX_CPU_SOCKETS}" --n_cpu_cores_per_socket="$n_cores" --xdim="${XDIM}" --ydim="${YDIM}"
fi
| true |
529639efec84913609eef4d5e20b5d5a57aae29a | Shell | FrancisMathew/COMP2101 | /bash/passwordguesser.sh | UTF-8 | 1,941 | 3.921875 | 4 | [] | no_license | #!/bin/bash
# This script demonstrates testing to see if 2 strings are the same
# it also demonstrates using conditional execution
# TASK 1: Improve it by asking the user for a password guess instead of using inline literal data (i.e. TestString)
# TASK 2: Improve it by rewriting it to use the if command
# The if command should test if they got it right, in which case it should tell them they got it right
# The if command should tell them they got it wrong
# TASK 3: Improve it by giving them 5 tries to get it right before failing
# *** Do not use the exit command, and do not use a loop (e.g. while or for)
#Task 1 and Task2 ==================================================================================================================
echo "Enter the password:"
read password
referencestring=password10
if [ $password = $referencestring ]
then
echo "Your guess is correct"
else
echo "Your guess is wrong"
fi
#Task 3=========================================================================
echo "Guess your password(NB:FIVE TRIES ONLY )"
read password
if [ $password != password10 ]
then
echo "Guess your password for 2nd time"
read password
if [ $password != password10 ]
then
echo "Guess your password for 3rd time"
read password
if [ $password != password10 ]
then
echo "Guess your password for 4th time"
read password
if [ $password != password10 ]
then
echo "Guess your password for 5th time"
read password
if [ $password != password10 ]
then
echo "Full attempt are finished......Sorry"
else
echo "Password is correct.....Hurrah"
fi
else
echo "Password is correct.....Hurrah"
fi
else
echo "Password is correct.....Hurrah"
fi
else
echo "Password is correct.....Hurrah"
fi
else
echo "Password is correct.....Hurrah"
fi
| true |
da270cd7348b207263fa1032e3cd90d41d85dd0d | Shell | nathanael-cho/configuration-files | /.bash_aliases | UTF-8 | 1,638 | 2.78125 | 3 | [] | no_license | ### Aliases
## Emacs
alias emasc=emacs
alias emcas=emacs
alias emsca=emacs
alias emcs=emacs
alias enw='emacs -nw'
alias eq='emacs -nw -Q'
## Make
alias mkae=make
alias mae=make
alias meka=make
## General Command-Line Help
alias ag='ag --mmap --path-to-ignore ~/.ignore'
alias agh='ag --hidden'
alias brewup='brew update; brew upgrade; brew cleanup; brew doctor'
alias curl='/usr/local/opt/curl/bin/curl'
alias dh='history -cw; \rm ~/.bash_history; touch ~/.bash_history'
alias e='exit'
alias gdb='lldb'
alias gti='git'
alias ls='gls -p --color=auto'
alias la='ls -a'
alias ll='ls -l'
# My exit: command that clears the session's history and then exits
alias me='history -cw; exit'
alias msql='mysql.server'
alias refresh='cd; reset; exec bash -l'
alias rm='echo You should be using trash'
alias sncdu='sudo ncdu'
alias time='gtime'
alias timeout='gtimeout'
alias ts='trash -s'
alias tsy='trash -sy'
## Virtual Machine Shenanigans (for Pi-Hole)
alias vs='VBoxManage startvm pi-hole --type headless'
alias ve='VBoxManage controlvm pi-hole poweroff'
alias sph='ssh pi-hole'
## Miscellaneous
alias cleanall='cleantilde; cleands'
alias cleands='sudo find / -name ".DS_Store" -depth -exec rm {} \;'
alias cleantilde='sudo find / -type f -name "*~" -exec rm -f "{}" \;'
alias textedit='open -a "textEdit"'
## Navigation
alias cl='cd ~/Church\ Life/'
alias conf='cd ~/Development/configuration-files/'
alias dev='cd ~/Development/'
alias pe='cd ~/Development/go/src/project_euler/'
alias prof='cd ~/Professional/'
## Python
alias virtualenv='virtualenv --system-site-packages'
alias va='source activate'
alias da='conda deactivate'
| true |
f4a9f6168b81bc21be13e89496e8c90458e32bcb | Shell | andrewnikiforov/ft_services | /setup.sh | UTF-8 | 2,000 | 2.796875 | 3 | [] | no_license | #echo -ne "\033[32m""Enter your name: "
#read name
#echo "Hello $name, welcome to my FT_SERVICES"
#echo -e "\033[33m""=======Let's set up the project!=======\n"
#echo -e "\033[32m""=======At first start minikube in virtualbox. It start for same time. Keep calm and ask the questions=======\n""\033[37m"
minikube start --vm-driver=virtualbox --cpus 3 --memory=3000 --disk-size 10000MB
#echo -e "\033[32m""Yeah! Minikube started!\n"
#echo -ne "\033[31m""Press Enter if we can go to next part"
#read y
#echo -e "\033[33m""Let's lok for it's addons\n""\033[37m"
#minikube addons list
#echo -e "\033[33m""We need to enable Metallb! Let's do it!\n""\033[37m"
minikube addons enable metallb
#echo -ne "\033[31m""Press Enter if we can go to next part""\033[37m"
#read y
#minikube addons list
eval $(minikube docker-env)
docker pull metallb/speaker:v0.8.2
docker pull metallb/controller:v0.8.2
#echo -e "\033[33m""At now we will build docker image for nginx.\n""\033[37m"
#echo -ne "\033[31m""Press Enter for it""\033[37m"
#read y
docker build --no-cache -t nginx-image ./srcs/nginx/
docker build --no-cache -t php-image ./srcs/php/
docker build --no-cache -t wp-image ./srcs/wordpress/
docker build --no-cache -t mysql-image ./srcs/mysql/
#docker build --no-cache -t influxdb-image ./srcs/influxdb/
#docker build --no-cache -t grafana-image ./srcs/grafana/
#docker build --no-cache -t ftps-image ./srcs/ftps/
#echo -e "\033[33m""And now we creat and start Load Balancer pod and service with nginx\n""\033[37m"
#echo -ne "\033[31m""Press Enter for it""\033[37m"
#read y
#echo -e "\033[33m""At now we will build docker image for php.\n""\033[37m"
kubectl apply -f ./srcs/metallb/configmap.yaml
kubectl apply -f ./srcs/nginx/nginx.yaml
kubectl apply -f ./srcs/php/php.yaml
kubectl apply -f ./srcs/wordpress/wp.yaml
kubectl apply -f ./srcs/mysql/mysql.yaml
#kubectl apply -f ./srcs/influxdb/influxdb.yaml
#kubectl apply -f ./srcs/grafana/grafana.yaml
#kubectl apply -f ./srcs/ftps/ftps.yaml
minikube dashboard | true |
3f76421f13dd856166578c69ab5caebc54f690e9 | Shell | wolfbox/packages | /xorg-fonts-encodings/PKGBUILD | UTF-8 | 1,024 | 2.703125 | 3 | [] | no_license | # Maintainer: Andrew Aldridge <i80and@foxquill.com>
# Derived from Arch Linux
pkgname=xorg-fonts-encodings
pkgver=1.0.4
pkgrel=1
pkgdesc="X.org font encoding files"
arch=('any')
url="http://xorg.freedesktop.org/"
license=('custom')
makedepends=('xorg-mkfontscale' 'xorg-util-macros' 'xorg-font-util')
groups=('xorg-fonts' 'xorg')
options=(!makeflags)
source=(${url}/releases/individual/font/encodings-${pkgver}.tar.bz2)
sha256sums=('ced6312988a45d23812c2ac708b4595f63fd7a49c4dcd9f66bdcd50d1057d539')
build() {
cd "${srcdir}/encodings-${pkgver}"
./configure --prefix=/usr
make
}
package() {
cd "${srcdir}/encodings-${pkgver}"
make DESTDIR="${pkgdir}" install
install -Dm644 COPYING "${pkgdir}/usr/share/doc/${pkgname}/COPYING"
# regenerate encodngs file not to include non-compressed versions
cd $pkgdir/usr/share/fonts/encodings/large
mkfontscale -b -s -l -n -r -p /usr/share/fonts/encodings/large -e . .
cd $pkgdir/usr/share/fonts/encodings/
mkfontscale -b -s -l -n -r -p /usr/share/fonts/encodings -e . -e large .
}
| true |
bd3c23262ec45815966b96b35469a4a7480acaa5 | Shell | Detaysoft/jidesha | /firefox/make.sh | UTF-8 | 991 | 3.40625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -e
#space-separated list of domains
DOMAINS="meet.detaysoft.com"
# The ID of the extension. This is to be chosen for the particular deployment and
# is used to allow applications (e.g. jitsi-meet) to detect whether the
# extension they need is installed. The same ID should not be used for different
# deployments.
# See https://developer.mozilla.org/en-US/Add-ons/Install_Manifests for requirements
# for the format.
EXT_ID="meet.detaysoft@detaysoft.com"
CONTENT_ROOT=`echo $EXT_ID | tr @ .`
if [ -z "$DOMAINS" -o -z "$EXT_ID" ]; then
echo "Domains or extension ID not defined."
exit 1
fi
rm -rf target
rm -f jidesha.xpi
mkdir -p target/content
for domain in $DOMAINS ;do
cp empty.png target/content/$domain.png
done
sed -e "s/JIDESHA_DOMAINS/$DOMAINS/" bootstrap.js > target/bootstrap.js
sed -e "s/JIDESHA_EXT_ID/$EXT_ID/" install.rdf > target/install.rdf
sed -e "s/CONTENT_ROOT/$CONTENT_ROOT/" chrome.manifest > target/chrome.manifest
(cd target ; zip -r ../jidesha.xpi *)
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.