blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
789a5f6e170d9d165a97063bd195069f8d77b3f4
|
Shell
|
CReinicke1992/assignment
|
/guessinggame.sh
|
UTF-8
| 615
| 3.921875
| 4
|
[] |
no_license
|
# File: guessinggame.sh
function guess_func {
local num_files=$(ls -1 | wc -l)
if [[ $2 -eq $num_files ]]
then
echo "Congratulations! Your guess is correct."
let $1=$1+1
elif [[ $2 -lt $num_files ]]
then
echo "Your guess is too low. Try again:"
read $2
elif [[ $2 -gt $num_files ]]
then
echo "Your guess is too high. Try again:"
read $2
fi
}
count=0
echo "How many files are in the current directory?"
read guess
while [[ $count -eq 0 ]]
do
guess_func count guess
done
| true
|
c551a0c10a1813255cbe3da73398e0a5178c8bcf
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/opentxs-cli-git/PKGBUILD
|
UTF-8
| 1,431
| 2.96875
| 3
|
[] |
no_license
|
# Maintainer: Andy Weidenbaum <archbaum@gmail.com>
pkgname=opentxs-cli-git
pkgver=20140911
pkgrel=1
pkgdesc="Open Transactions Command Line Interface"
arch=('i686' 'x86_64')
depends=('opentxs-git')
makedepends=('autoconf' 'automake' 'cmake' 'gcc' 'git' 'libtool' 'make' 'pkg-config')
url="https://github.com/Open-Transactions/opentxs-cli"
license=('custom')
source=(git+https://github.com/Open-Transactions/opentxs-cli)
sha256sums=('SKIP')
provides=('opentxs-cli')
conflicts=('opentxs-cli')
pkgver() {
cd ${pkgname%-git}
git log -1 --format="%cd" --date=short | sed "s|-||g"
}
build() {
cd ${pkgname%-git}
msg 'Building...'
mkdir -p build && cd build
export opentxsIncludePath=/usr/include
cmake -DCMAKE_INSTALL_PREFIX=/usr \
-DCMAKE_BUILD_TYPE=Release \
-DKEYRING_FLATFILE=ON \
-DWITH_TERMCOLORS=ON \
--debug-output ../
make
}
package() {
cd ${pkgname%-git}
msg 'Installing license...'
install -Dm 644 LICENSE "$pkgdir/usr/share/licenses/opentxs-cli/LICENSE"
msg 'Installing docs...'
install -dm 755 "$pkgdir/usr/share/doc/opentxs-cli"
for _doc in ChangeLog README.md docs script; do
cp -dpr --no-preserve=ownership $_doc "$pkgdir/usr/share/doc/opentxs-cli"
done
msg 'Installing...'
make DESTDIR="$pkgdir" install -C build
msg 'Cleaning up pkgdir...'
find "$pkgdir" -type d -name .git -exec rm -r '{}' +
find "$pkgdir" -type f -name .gitignore -exec rm -r '{}' +
}
| true
|
51ee85769cfe75ac3f3707f48f9db2d9f11f2d78
|
Shell
|
Twicer/devopstraining
|
/day2/edit_shell_bash.sh
|
UTF-8
| 335
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
sudo cp -f /etc/passwd /root/passwd_bash.copy
sudo bash -c "cat /root/passwd_bash.copy | while read word;do
if [[ \$word == \"games\"* ]];then
echo \"games:x:12:100:games:/usr/games:/bin/bash\" >> passwd.copy.temp
else
echo \"\$word\" >> passwd.copy.temp
fi
done"
sudo mv -f passwd.copy.temp /root/passwd_bash.copy
| true
|
f0177aa6404122c08f276dfc10685ebbb40cde37
|
Shell
|
jbrowne6/fpExperiments
|
/multipleSystems/memUse/memTest.sh
|
UTF-8
| 6,560
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
nTimes=1
#start with varying cores
echo "writing file"
Rscript createDS.R -1 -1 -1
echo "done writing"
nSample=60000
nFeature=1024
nClass=5
testName="cores"
for nTimesLoop in {1..5}
do
for algname in "rfBase" "rerf" "binnedBase" "binnedBaseRerF"
do
for dataset in "svhn"
do
for numCores in 32 16 8 4 2 1
do
var=`/usr/bin/time -v Rscript fastRF.R $algname $dataset $numCores $nTimes $nClass $nSample $nFeature 2>&1 >/dev/null | awk -F: '/Maximum resident/ {print $2}'`
echo "$dataset,$var,$testName,$algname,$numCores,$nClass,$nSample,$nFeature" >> memUse.txt
done
done
done
for dataset in "svhn"
do
for numCores in 32 16 8 4 2 1
do
var=`/usr/bin/time -v Rscript XGBoost.R $dataset $numCores $nTimes $nClass $nSample $nFeature 2>&1 >/dev/null | awk -F: '/Maximum resident/ {print $2}'`
echo "$dataset,$var,$testName,XGBoost,$numCores,$nClass,$nSample,$nFeature" >> memUse.txt
done
done
for dataset in "svhn"
do
for numCores in 32 16 8 4 2 1
do
var=`/usr/bin/time -v Rscript Ranger.R $dataset $numCores $nTimes $nClass $nSample $nFeature 2>&1 >/dev/null | awk -F: '/Maximum resident/ {print $2}'`
echo "$dataset,$var,$testName,Ranger,$numCores,$nClass,$nSample,$nFeature" >> memUse.txt
done
done
if false
then
for dataset in "svhn"
do
for numCores in 32 16 8 4 2 1
do
var=`/usr/bin/time -v Rscript RerF.R $dataset $numCores $nTimes $nClass $nSample $nFeature 2>&1 >/dev/null | awk -F: '/Maximum resident/ {print $2}'`
echo "$dataset,$var,$testName,R-RerF,$numCores,$nClass,$nSample,$nFeature" >> memUse.txt
done
done
fi
done
testName="classes"
for nClass in 2 3 4 5 6 7 8 9 10
do
echo "writing file"
Rscript createDS.R $nClass -1 -1
echo "done writing"
nSample=60000
nFeature=1024
for nTimesLoop in {1..5}
do
for algname in "rfBase" "rerf" "binnedBase" "binnedBaseRerF"
do
for dataset in "svhn"
do
for numCores in 16
do
var=`/usr/bin/time -v Rscript fastRF.R $algname $dataset $numCores $nTimes $nClass $nSample $nFeature 2>&1 >/dev/null | awk -F: '/Maximum resident/ {print $2}'`
echo "$dataset,$var,$testName,$algname,$numCores,$nClass,$nSample,$nFeature" >> memUse.txt
done
done
done
for dataset in "svhn"
do
for numCores in 16
do
var=`/usr/bin/time -v Rscript XGBoost.R $dataset $numCores $nTimes $nClass $nSample $nFeature 2>&1 >/dev/null | awk -F: '/Maximum resident/ {print $2}'`
echo "$dataset,$var,$testName,XGBoost,$numCores,$nClass,$nSample,$nFeature" >> memUse.txt
done
done
for dataset in "svhn"
do
for numCores in 16
do
var=`/usr/bin/time -v Rscript Ranger.R $dataset $numCores $nTimes $nClass $nSample $nFeature 2>&1 >/dev/null | awk -F: '/Maximum resident/ {print $2}'`
echo "$dataset,$var,$testName,Ranger,$numCores,$nClass,$nSample,$nFeature" >> memUse.txt
done
done
if false
then
for dataset in "svhn"
do
for numCores in 16
do
var=`/usr/bin/time -v Rscript RerF.R $dataset $numCores $nTimes $nClass $nSample $nFeature 2>&1 >/dev/null | awk -F: '/Maximum resident/ {print $2}'`
echo "$dataset,$var,$testName,R-RerF,$numCores,$nClass,$nSample,$nFeature" >> memUse.txt
done
done
fi
done
done
testName="observations"
for nSample in 30000 60000 90000 120000 150000 180000
do
echo "writing file"
Rscript createDS.R -1 $nSample -1
echo "done writing"
nClass=5
nFeature=1024
for nTimesLoop in {1..5}
do
for algname in "rfBase" "rerf" "binnedBase" "binnedBaseRerF"
do
for dataset in "svhn"
do
for numCores in 16
do
var=`/usr/bin/time -v Rscript fastRF.R $algname $dataset $numCores $nTimes $nClass $nSample $nFeature 2>&1 >/dev/null | awk -F: '/Maximum resident/ {print $2}'`
echo "$dataset,$var,$testName,$algname,$numCores,$nClass,$nSample,$nFeature" >> memUse.txt
done
done
done
for dataset in "svhn"
do
for numCores in 16
do
var=`/usr/bin/time -v Rscript XGBoost.R $dataset $numCores $nTimes $nClass $nSample $nFeature 2>&1 >/dev/null | awk -F: '/Maximum resident/ {print $2}'`
echo "$dataset,$var,$testName,XGBoost,$numCores,$nClass,$nSample,$nFeature" >> memUse.txt
done
done
for dataset in "svhn"
do
for numCores in 16
do
var=`/usr/bin/time -v Rscript Ranger.R $dataset $numCores $nTimes $nClass $nSample $nFeature 2>&1 >/dev/null | awk -F: '/Maximum resident/ {print $2}'`
echo "$dataset,$var,$testName,Ranger,$numCores,$nClass,$nSample,$nFeature" >> memUse.txt
done
done
if false
then
for dataset in "svhn"
do
for numCores in 16
do
var=`/usr/bin/time -v Rscript RerF.R $dataset $numCores $nTimes $nClass $nSample $nFeature 2>&1 >/dev/null | awk -F: '/Maximum resident/ {print $2}'`
echo "$dataset,$var,$testName,R-RerF,$numCores,$nClass,$nSample,$nFeature" >> memUse.txt
done
done
fi
done
done
testName="features"
for nFeature in 250 500 1000 1500 2250 3072
do
echo "writing file"
Rscript createDS.R -1 -1 $nFeature
echo "done writing"
nClass=5
nSample=60000
for nTimesLoop in {1..5}
do
for algname in "rfBase" "rerf" "binnedBase" "binnedBaseRerF"
do
for dataset in "svhn"
do
for numCores in 16
do
var=`/usr/bin/time -v Rscript fastRF.R $algname $dataset $numCores $nTimes $nClass $nSample $nFeature 2>&1 >/dev/null | awk -F: '/Maximum resident/ {print $2}'`
echo "$dataset,$var,$testName,$algname,$numCores,$nClass,$nSample,$nFeature" >> memUse.txt
done
done
done
for dataset in "svhn"
do
for numCores in 16
do
var=`/usr/bin/time -v Rscript XGBoost.R $dataset $numCores $nTimes $nClass $nSample $nFeature 2>&1 >/dev/null | awk -F: '/Maximum resident/ {print $2}'`
echo "$dataset,$var,$testName,XGBoost,$numCores,$nClass,$nSample,$nFeature" >> memUse.txt
done
done
for dataset in "svhn"
do
for numCores in 16
do
var=`/usr/bin/time -v Rscript Ranger.R $dataset $numCores $nTimes $nClass $nSample $nFeature 2>&1 >/dev/null | awk -F: '/Maximum resident/ {print $2}'`
echo "$dataset,$var,$testName,Ranger,$numCores,$nClass,$nSample,$nFeature" >> memUse.txt
done
done
if false
then
for dataset in "svhn"
do
for numCores in 16
do
var=`/usr/bin/time -v Rscript RerF.R $dataset $numCores $nTimes $nClass $nSample $nFeature 2>&1 >/dev/null | awk -F: '/Maximum resident/ {print $2}'`
echo "$dataset,$var,$testName,R-RerF,$numCores,$nClass,$nSample,$nFeature" >> memUse.txt
done
done
fi
done
done
| true
|
79920ad30b0a397db2b0f05ac0306caf957db92b
|
Shell
|
benevolence-doctor/sunny_shell
|
/scripts/dnsstartstop.sh
|
UTF-8
| 842
| 3.96875
| 4
|
[] |
no_license
|
#!/bin/bash
prog="dnsReflector"
RETVAL=0
start() {
echo -n $"Starting $prog: "
nohup ./$prog -i eth1 -o eth0 >/dev/null 2>&1 &
nohup ./$prog -i eth2 -o eth0 >/dev/null 2>&1 &
RETVAL=$?
echo
return $RETVAL
}
stop() {
echo "dnsReflector is stop ........................."
killall $prog
RETVAL=$?
echo
return $RETVAL
}
status() {
pid=`ps aux |grep dnsReflector|grep -v grep|awk '{print $2}'`
if [ -n "$pid" ];then
echo $"${prog} (pid "$pid") is running..."
else
echo $"${prog} is stop..."
fi
}
case "$1" in
start)
start
;;
stop)
stop
;;
status)
status
;;
*)
echo $"Usage: $0 {start|stop|status}"
RETVAL=1
esac
exit $RETVAL
| true
|
0cebeb7f5a780d8aa61ce4a95ed926913a7330d9
|
Shell
|
NasreddinHodja/zinebar
|
/secondary_feeder.sh
|
UTF-8
| 278
| 2.8125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
source ./feeder.sh
workspaces() {
echo -e "$(./workspaces.py HDMI-0)"
}
bar_out() {
echo -e "%{l}$(workspaces)%{r}$(home) $(storage) $(ram) $(internet) $(volume) $(brightness) $(battery) $(clock)"
}
while true; do
bar_out
sleep 0.1s
done
| true
|
5a5873a91a540b0c82f56925bef5cf2ef46ab2d0
|
Shell
|
logolive/eNutri
|
/parse-server/setup.sh
|
UTF-8
| 783
| 3.421875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Expected ENV:
# - eNutri_APP_ID
# - eNutri_MASTER_KEY
cd $(dirname "$0")
echo -e "Setting up MongoDB..."
mongodb/setup.sh | awk '{print "mongoDB:\t" $0}'
echo -e "Generating Parse Server config file"
cat > config.json << EOF
{
"appId": "$eNutri_APP_ID",
"masterKey": "$eNutri_MASTER_KEY",
"databaseURI": "mongodb://mongo/test",
"allowClientClassCreation": false
}
EOF
echo -e "Building custom Docker image for Parse Server"
docker build -t enutri_parse-server_img .
echo -e "Starting Parse Server"
docker run --name enutri_parse \
-v enutri_parse-cloud-code:/parse-server/cloud \
-v enutri_parse-config:/parse-server/config \
--link enutri_mongodb:mongo \
--restart always \
-d enutri_parse-server_img
echo -e "Cleanup"
rm config.json
echo -e "\nDONE"
| true
|
d8deabc77eee3764e8d4ec3e6720a3d19006aba9
|
Shell
|
raskchanky/terrarium
|
/scripts/capture_logs.sh
|
UTF-8
| 181
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
set -xeuo pipefail
for container in $(docker ps --filter name=terrarium --format '{{.Names}}'); do
docker logs "${container}" > "data/${container}_stdout.txt"
done
| true
|
a786ddecb7c22f2023bb895ec022ed7a9adb0f17
|
Shell
|
maltenorstroem/uibuildertools
|
/scripts/dummy.sh
|
UTF-8
| 311
| 3.53125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# A simple script as example
set -o errexit
set -o nounset
set -o pipefail
FIRST_ARGUMENT="$1"
function dummy() {
echo $FIRST_ARGUMENT
}
dummy
function handle_exit() {
# Add cleanup code here
echo "cleanup block"
exit 0
}
trap handle_exit 0 SIGHUP SIGINT SIGQUIT SIGABRT SIGTERM
| true
|
6fd4429f17ed364a1f6a62d7d3269117e70cfdc9
|
Shell
|
allscale/allscale_amdados
|
/api/allscale_api/scripts/dependencies/defaults.sh
|
UTF-8
| 1,079
| 3.203125
| 3
|
[] |
no_license
|
# default location
export PREFIX="${PREFIX:-${THIRD_PARTY_LIBS:-$HOME/third_party_libs}}"
# default compile flags
export CFLAGS="-mtune=native -O3"
export CXXFLAGS="-mtune=native -O3"
export LDLAGS="-mtune=native -O3"
# override compiler
#GCC_PKG=$(get_property gcc PACKAGE)
#export CC="$PREFIX/$GCC_PKG/bin/gcc"
#export CXX="$PREFIX/$GCC_PKG/bin/g++"
#export PATH="$PREFIX/$GCC_PKG/bin:$PATH"
#export LD_LIBRARY_PATH="$PREFIX/$GCC_PKG/lib64"
# parallel build
export SLOTS="${SLOTS:-$(nproc)}"
pkg_is_installed() {
[[ -f "$PREFIX/$PACKAGE/.installed" ]]
}
pkg_download() {
wget -nc "$URL"
if [[ "$SHA256SUM" ]]; then
echo "$SHA256SUM $FILE" | sha256sum -c
fi
}
pkg_extract() {
tar xf "$FILE"
}
pkg_prepare() {
find "$INSTALLER_DIR/patches" -name "$NAME-*.patch" | sort | xargs -r -L 1 patch -p1 -N -i
}
pkg_configure() {
./configure --prefix="$PREFIX/$PACKAGE"
}
pkg_build() {
make -j "$SLOTS"
}
pkg_check() {
true
}
pkg_install() {
make install
}
pkg_install_done() {
touch "$PREFIX/$PACKAGE/.installed"
}
pkg_cleanup() {
rm -rf "$PACKAGE" "$FILE"
}
| true
|
1dfd6751333d62afb41c5585405a7954f0275696
|
Shell
|
nobody2100/dotfiles
|
/install.sh
|
UTF-8
| 1,118
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash
##################################################
##
## little install script for my configs...
## (warning: will delete original files)
##
##################################################
echo -e "This install script will modify the following configuration files in your home directory:\n\t.bashrc\n\t.bash_profile\n\t.emacs\n\t.screenrc"
path=$(pwd)
echo "the path to the dotfiles is $path"
# bashrc
#mv ~/.bashrc ~/.bashrc_bak
echo ". $path/bashrc" > ~/.bashrc
# decided to make this not so flexible
#echo ". $path/bash/env" > bashrc
#echo ". $path/bash/aliases" >> bashrc
#echo ". $path/bash/config" >> bashrc
# bash_profile: the same as bashrc
#mv ~/.bash_profile ~/.bash_profile_bak
echo -e "if [ -f ~/.bashrc ];\nthen\n\tsource ~/.bashrc\nfi" > ~/.bash_profile
# emacs
#mv ~/.emacs ~/.emacs_bak
ln -sfv $path/emacs ~/.emacs
# ssh config
#mv ~/.emacs ~/.emacs_bak
ln -sfv $path/ssh/config ~/.ssh/config
# screenrc
#mv ~/.screenrc ~/.screenrc_bak
ln -sfv $path/screenrc ~/.screenrc
# nautilus scripts
ln -sfv $path/nautilus/OpenTerminalHere ~/.gnome2/nautilus-scripts/OpenTerminalHere
| true
|
f9a2a0d621597efda180a4345c91a2168619a067
|
Shell
|
ohyoungjooung2/u18kvk8s
|
/k8s/vagrant_install.sh
|
UTF-8
| 1,351
| 3.328125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
which curl
if [[ $? -eq 0 ]]
then
echo "curl already installed"
else
sudo apt install -y curl
fi
if [[ ! -e ./vagrant225.deb ]]
then
VFILE="https://releases.hashicorp.com/vagrant/2.2.5/vagrant_2.2.5_x86_64.deb"
curl -o vagrant225.deb $VFILE
else
echo "vagrant225.deb already downloaded"
fi
#sha256 check
#https://releases.hashicorp.com/vagrant/2.2.5/vagrant_2.2.5_SHA256SUMS
SH256="415f50b93235e761db284c761f6a8240a6ef6762ee3ec7ff869d2bccb1a1cdf7"
SH256FILE=$(/usr/bin/sha256sum vagrant225.deb | awk '{print $1}')
if [[ $SH256==$SHA256FILE ]]
then
if [[ -e /usr/bin/vagrant ]]
then
VAGRANT_VCHK=$(/usr/bin/vagrant --version | awk '{print $2}')
echo "Vagrant 2.2.5 already installed"
else
sudo /usr/bin/dpkg -i vagrant225.deb
fi
else
echo "Something is wrong. It would be wrong sha256"
fi
#plugin install
#First dependencies install(https://github.com/vagrant-libvirt/vagrant-libvirt#installation)
sudo apt-get build-dep -y vagrant ruby-libvirt
sudo apt-get install -y qemu libvirt-bin ebtables dnsmasq-base
sudo apt-get install -y libxslt-dev libxml2-dev libvirt-dev zlib1g-dev ruby-dev
#Plugin install last
/usr/bin/vagrant plugin install vagrant-libvirt
#Plugin check
if [[ $? -eq 0 ]]
then
/usr/bin/vagrant plugin list | grep libvirt
fi
sudo apt autoremove -y
exit 0
| true
|
38209e6074ac8d481549e442065930dcefce8a26
|
Shell
|
jmosbacher/straxen
|
/.github/scripts/pre_pyflakes.sh
|
UTF-8
| 413
| 2.75
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
# Pyflakes does not like the way we do __all__ += []. This simple script
# Changes all the files in straxen to abide by this convention and
# removes the lines that have such a signature.
start="$(pwd)"
echo $start
cd straxen
sed -e '/__all__ +=/ s/^#*/#/' -i ./*.py
cd plugins
sed -e '/__all__ +=/ s/^#*/#/' -i ./*.py
cd ../analyses
sed -e '/__all__ +=/ s/^#*/#/' -i ./*.py
cd $start
echo "done"
| true
|
1c7f2df866e913ce9611f80f700d8f3fb3028a8b
|
Shell
|
ismogal/s2i-dotnetcore-ex
|
/.s2i/bin/assemble
|
UTF-8
| 526
| 3.0625
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
#!/bin/bash
set -e
# 'bower' and 'gulp' used to be installed by default by the dotnet-example template.
# Since .NET Core 2.1 these tools are no longer required by the ASP.NET Core templates.
# We manually install them here, to be able to remove them from the dotnet-example template.
# see https://github.com/redhat-developer/s2i-dotnetcore/issues/192.
echo "---> Installing npm tools..."
pushd $HOME
npm install bower gulp
popd
# Delegate to running the s2i builder's
# assemble script.
exec ${STI_SCRIPTS_PATH}/assemble
| true
|
ab65d7b71ee6403e77b08bff52df9cd0f8047f38
|
Shell
|
18F/frstack
|
/ansible/roles/openidm/templates/openidm
|
UTF-8
| 1,498
| 3.640625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# chkconfig: 345 95 5
# description: start/stop openidm
# clean up left over pid files if necessary
cleanupPidFile() {
if [ -f $OPENIDM_PID_FILE ]; then
rm -f "$OPENIDM_PID_FILE"
fi
trap - EXIT
exit
}
JAVA_BIN={{java_home}}/bin/java
OPENIDM_HOME={{install_root}}/openidm
OPENIDM_USER={{fr_user}}
OPENIDM_PID_FILE=$OPENIDM_HOME/.openidm.pid
OPENIDM_OPTS="-Xmx1024m -Dfile.encoding=UTF-8"
cd ${OPENIDM_HOME}
# Set JDK Logger config file if it is present and an override has not been issued
if [ -z "$LOGGING_CONFIG" ]; then
if [ -r "$OPENIDM_HOME"/conf/logging.properties ]; then
LOGGING_CONFIG="-Djava.util.logging.config.file=$OPENIDM_HOME/conf/logging.properties"
else
LOGGING_CONFIG="-Dnop"
fi
fi
CLASSPATH="$OPENIDM_HOME/bin/*:$OPENIDM_HOME/framework/*"
START_CMD="nohup $JAVA_BIN $LOGGING_CONFIG $JAVA_OPTS $OPENIDM_OPTS \
-Djava.endorsed.dirs=$JAVA_ENDORSED_DIRS \
-classpath $CLASSPATH \
-Dopenidm.system.server.root=$OPENIDM_HOME \
-Djava.awt.headless=true \
org.forgerock.commons.launcher.Main -c $OPENIDM_HOME/bin/launcher.json > $OPENIDM_HOME/logs/server.out 2>&1 &"
case "${1}" in
start)
su $OPENIDM_USER -c "$START_CMD eval echo \$\! > $OPENIDM_PID_FILE"
exit ${?}
;;
stop)
./shutdown.sh > /dev/null
exit ${?}
;;
restart)
./shutdown.sh > /dev/null
su $OPENIDM_USER -c "$START_CMD eval echo \$\! > $OPENIDM_PID_FILE"
exit ${?}
;;
*)
echo "Usage: openidm { start | stop | restart }"
exit 1
;;
esac
| true
|
1c3209e58666e97e400fbfd5e477f8b35b89a82b
|
Shell
|
Areed24/CrazyIvan
|
/scripts/linux/deb/build_deps.sh
|
UTF-8
| 2,362
| 3.890625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
#This script will attempt to build CrazyIvan dependencies
#Based on Ubuntu 14.04 LTS
#Not intended for use with other OS (should function correctly with Debian 7, untested)
printf "Creating Dependency Folder"
PRE=./downloads
RETURN=..
mkdir $PRE
printf "Calling apt-get update"
#Update the Ubuntu Server
apt-get -y update
apt-get install -y git libboost-all-dev
#Build & Install the Shared Service Library
if [ ! -d /usr/local/include/aossl ]; then
#Create the folder to clone into
mkdir $PRE/aossl
#Pull the code down
git clone https://github.com/AO-StreetArt/AOSharedServiceLibrary.git $PRE/aossl
#Build the dependencies for the shared service library
mkdir $PRE/aossl_deps
cp $PRE/aossl/scripts/deb/build_deps.sh $PRE/aossl_deps/
cd $PRE/aossl_deps
./build_deps.sh
cd ../$RETURN
#Build the shared service library
cd $PRE/aossl
make
make install
ldconfig
cd ../..
fi
#Determine if we Need RapidJSON
if [ ! -d /usr/local/include/rapidjson ]; then
printf "Cloning RapidJSON"
mkdir $PRE/rapidjson
#Get the RapidJSON Dependency
git clone https://github.com/miloyip/rapidjson.git $PRE/rapidjson
#Move the RapidJSON header files to the include path
cp -r $PRE/rapidjson/include/rapidjson/ /usr/local/include
fi
#Ensure we have access to the Protocol Buffer Interfaces
if [ ! -d /usr/local/include/dvs_interface ]; then
mkdir $PRE/interfaces/
git clone https://github.com/AO-StreetArt/DvsInterface.git $PRE/interfaces
cd $PRE/interfaces && make install
cd ../..
fi
# Install librdkafka
if [ ! -d /usr/local/include/librdkafka ]; then
wget https://github.com/edenhill/librdkafka/archive/v0.11.3.tar.gz
tar -xvzf v0.11.3.tar.gz
cd librdkafka-0.11.3 && ./configure && make && make install
cd ..
fi
# Here we look to install cppkafka
if [ ! -d /usr/local/include/cppkafka ]; then
printf "Cloning CppKafka\n"
mkdir $PRE/cppkafka
#Get the CppKafka Dependency
git clone https://github.com/mfontanini/cppkafka.git $PRE/cppkafka
# Build and install
mkdir $PRE/cppkafka/build && cd $PRE/cppkafka/build && cmake .. && make && make install
fi
#Install python, pyzmq, protobuf, boost, and the protobuf compiler
apt-get install -y python-pip python-dev libprotobuf-dev protobuf-compiler libboost-all-dev
pip install pyzmq
printf "Finished installing dependencies"
| true
|
c82be842bc1eb9ef9efb44bd0a987f8955f9169d
|
Shell
|
MGX-metagenomics/tools
|
/patches/GraphValidate.template
|
UTF-8
| 1,401
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
. PREFIX/share/config/conveyor.config
conveyor_graph=$1
projName=$2
jobId=$3
jobConfig=/vol/mgx-data/${projName}/jobs/${jobId}
set -e
umask 0007
if [ ! -r ${CEBITEC_SETTINGS_FILE} ]; then
echo "Cannot read Conveyor settings file."
exit 1
fi
if [ ! -r ${conveyor_graph} ]; then
echo "Cannot read Conveyor workflow file."
exit 1
fi
if [ ! -r ${jobConfig} ]; then
echo "Cannot read Conveyor workflow parameter file."
exit 1
fi
unset LC_MONETARY TERM LC_NUMERIC NLSPATH \
LC_COLLATE LC_TIME LC_MESSAGES LC_CTYPE \
SMF_FMRI SMF_METHOD SMF_ZONENAME SMF_RESTARTER \
XFILESEARCHPATH
cd /vol/mgx-data/${projName}/jobs || exit 1
jobName=validate-${projName/MGX_/}-${jobId}
rm -f ${jobConfig}.stdout ${jobConfig}.stderr
PREFIX/lib/Conveyor/GraphRun --validate ${conveyor_graph} ${jobConfig} \
> ${jobConfig}.stdout 2>${jobConfig}.stderr
#sbatch --job-name=${jobName} \
# --no-requeue --wait --partition=chaos --quiet \
# --cpus-per-task=2 --mem-per-cpu=50G \
# --chdir=/vol/mgx-data/${projName}/jobs \
# --export=CEBITEC_SETTINGS_FILE,DOTNET_ROOT \
# -i /dev/null -o ${jobConfig}.stdout -e ${jobConfig}.stderr \
# --wrap="PREFIX/lib/Conveyor/GraphRun --validate ${conveyor_graph} ${jobConfig}"
err=$?
if [ $err != 0 ]; then
echo "GraphValidate exited with return code ${err}."
exit ${err}
fi
| true
|
475c07555e868dc43955906b6a03c37b455fd7d5
|
Shell
|
freesense/poserver
|
/stop.sh
|
UTF-8
| 495
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/sh
if [ -f ./gyconfig.py ];
then
gefil=`awk '/^zmq_gevent2worker/' ./gyconfig.py | awk '{print $3}' | awk 'BEGIN{FS="[:'\'']"}{print $3}'|awk -F'/' '{print $3}'`
dbhub=`awk '/^zmq_dbhub2proxy/' ./gyconfig.py | awk '{print $3}' | awk 'BEGIN{FS="[:'\'']"}{print $3}'|awk -F'/' '{print $3}'`
else
gefil='gevent.worker2'
dbhub='hub.dbproxy2'
fi
if [ -e $gefil ]; then
rm -fr $gefil
fi
if [ -e $dbhub ]; then
rm -fr $dbhub
fi
if [ -w pid.pid ];
then
kill -9 `cat pid.pid`
rm pid.pid
fi
| true
|
5d706998987d8bd1ba94c36b6e6208f76da4abc1
|
Shell
|
bockbilbo/server-package-updates-to-mattemost-google-chat
|
/mattermost-script.sh
|
UTF-8
| 5,076
| 3.84375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Script to notify Mattermost of latest package changes in Ubuntu or
# CentOS.
#
# Based on original work by Rick Harrison available at:
# https://github.com/fortybelowzero/server-package-updates-to-slack
#
# Setup:
# - Change values in the configuration section below
# - Add this as a cron-job - we run it every 15 minutes with this cron entry as root:
# */15 * * * * root /bin/bash /usr/local/bin/notify_updates > /dev/null 2>&1
#
# ==== CONFIGURATION =========================================================
# How often you are running this in cron (must match the same frequency. This string needs to be in the format unix date command can parse, eg:
# 1 hour
# 2 hours
# 15 minutes
FREQUENCY="15 minutes"
# Mattermost Hook Url to post the message to. Commented out here as I set it on the server as an environment variable, you could either do that or
# uncomment and add your own Mattermost API Hook url here:
# MM_HOOK_URL="https://mattermost.domain.gbl/hooks/XXXXXXXXXXXX"
# Other Mattermost config settings.
MM_CHANNEL_NAME="#server-updates"
MM_POST_THUMBNAIL_UBUNTU="https://assets.ubuntu.com/v1/29985a98-ubuntu-logo32.png"
MM_POST_THUMBNAIL_CENTOS="https://upload.wikimedia.org/wikipedia/commons/thumb/b/b4/CentOS_logo.svg/500px-CentOS_logo.svg.png"
MM_POST_THUMBNAIL=$MM_POST_THUMBNAIL_UBUNTU
MM_POST_USERNAME="update-notifier"
MM_POST_USERNAME_ICON="https://icons-for-free.com/download-icon-refresh+reload+update+icon-1320191166843452904_512.png"
# Name of the server to use in the mattermost message title. By default below we're using the servers' own hostname, feel free to swap it to a
# string if theres something you'd rather use to identify the server instead.
SERVERNAME=$(hostname)
# ==== END OF CONFIGURATION =========================================================
# distro-finding - try to work out what linux flavour we're under.
# Currently this script support redhat/centos and ubuntu. Feel free to PR amends to include other distros.
# Hat-tip: https://askubuntu.com/a/459425
UNAME=$(uname | tr "[:upper:]" "[:lower:]")
# If Linux, try to determine specific distribution
if [ "$UNAME" == "linux" ]; then
# If available, use LSB to identify distribution
if [ -f /etc/lsb-release -o -d /etc/lsb-release.d ]; then
export DISTRO=$(lsb_release -i | cut -d: -f2 | sed s/'^\t'//)
# Otherwise, use release info file
else
export DISTRO=$(ls -d /etc/[A-Za-z]*[_-][rv]e[lr]* | grep -v "lsb" | cut -d'/' -f3 | cut -d'-' -f1 | cut -d'_' -f1)
fi
fi
# For everything else (or if above failed), just use generic identifier
[ "$DISTRO" == "" ] && export DISTRO=$UNAME
unset UNAME
# /distro-finding
LASTFREQUENCY=$(date -d "$FREQUENCY ago" +"%s")
NOWTIME=$(date -d 'NOW' +"%F")
# --------------- DEAL WITH PACKAGES INSTALLED IF LINUX DISTRIBUTION IS REDHAT OR CENTOS ------------------
if [[ ${DISTRO,,} == *"redhat"* ]] || [[ ${DISTRO,,} == *"centos"* ]] ; then
MM_POST_THUMBNAIL=$MM_POST_THUMBNAIL_CENTOS
rpm -qa --last | head -30 | while read -a linearray ; do
PACKAGE=${linearray[0]}
DATETIMESTR="${linearray[1]} ${linearray[2]} ${linearray[3]} ${linearray[4]} ${linearray[5]} ${linearray[6]}"
INSTALLTIME=$(date --date="$DATETIMESTR" +"%s")
if [ "$INSTALLTIME" -ge "$LASTFREQUENCY" ]; then
echo "$PACKAGE ($DATETIMESTR)\n" >> /tmp/package-updates-mattermost-announce.txt
fi
done
# --------------- DEAL WITH PACKAGES INSTALLED IF LINUX DISTRIBUTION IS UBUNTU ------------------
elif [[ ${DISTRO,,} == *"ubuntu"* ]] ; then
MM_POST_THUMBNAIL=$MM_POST_THUMBNAIL_UBUNTU
cat /var/log/dpkg.log | grep "\ installed\ " | tail -n 30 | while read -a linearray ; do
PACKAGE="${linearray[3]} ${linearray[4]} ${linearray[5]}"
DATETIMESTR="${linearray[0]} ${linearray[1]}"
INSTALLTIME=$(date --date="$DATETIMESTR" +"%s")
if [ "$INSTALLTIME" -ge "$LASTFREQUENCY" ]; then
echo "$PACKAGE ($DATETIMESTR)\n" >> /tmp/package-updates-mattermost-announce.txt
fi
done
# --------------- OTHER LINUX DISTROS ARE UNTESTED - ABORT. ------------------
else
echo "ERROR: Untested/unsupported linux distro - Centos/Redhat/Ubuntu currently supported, feel free to amend for other distros and submit a PR."
fi
# --------------- IF PACKAGED WERE INSTALLED (THERES A TEMPORARY FILE WITH THEM LISTED IN IT) THEN SEND A MATTERMOST NOTIFICATION. -------------
if [ -f /tmp/package-updates-mattermost-announce.txt ]; then
echo "$NOWTIME - notifying updates to mattermost..."
INSTALLATIONS=$(cat /tmp/package-updates-mattermost-announce.txt)
curl -X POST --data-urlencode 'payload={"channel": "'"$MM_CHANNEL_NAME"'", "username": "'"$MM_POST_USERNAME"'", "icon_url": "'"$MM_POST_USERNAME_ICON"'", "attachments": [ { "fallback": "'"$INSTALLATIONS"'", "color": "good", "title": "UPDATES APPLIED ON '"$SERVERNAME"'", "text": "Packages Updated:\n\n'"$INSTALLATIONS"'", "thumb_url": "'"$MM_POST_THUMBNAIL"'" } ] }' $MM_HOOK_URL
rm -f /tmp/package-updates-mattermost-announce.txt
fi
| true
|
6fceacae9467065b59a10e4010b94c37a4ea0ea4
|
Shell
|
zhangfeixiang/cocos-example
|
/girlGame-main/server/env/env_install.sh
|
UTF-8
| 2,249
| 3.53125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
RDS_VER="6.0.8"
MYSQL_APT_CFG_VER="0.8.15-1"
ubuntu_env_setup() {
apt install curl -y
# node
curl -sL https://deb.nodesource.com/setup_lts.x | sudo -E bash -
apt install nodejs -y
npm -g i n
n lts
echo "Installing node done."
# redis
apt install gcc g++
cd deps
tar zxvf redis-6.0.8.tar.gz
cd redis-6.0.8
make && make install
make clean
echo "Installing redis done."
# mysql
dpkg -i mysql-apt-config_${MYSQL_APT_CFG_VER}_all.deb
apt update -y
apt install mysql-server -y
echo "Installing mysql done."
# rabbitmq
curl -fsSL https://github.com/rabbitmq/signing-keys/releases/download/2.0/rabbitmq-release-signing-key.asc | sudo apt-key add -
apt install apt-transport-https
cp rabbitmq_configs/bintray.erlang.list /etc/apt/sources.list.d/
apt update -y
apt install -y erlang-base \
erlang-asn1 erlang-crypto erlang-eldap erlang-ftp erlang-inets \
erlang-mnesia erlang-os-mon erlang-parsetools erlang-public-key \
erlang-runtime-tools erlang-snmp erlang-ssl \
erlang-syntax-tools erlang-tftp erlang-tools erlang-xmerl
apt install rabbitmq-server -y --fix-missing
rabbitmq-plugins enable rabbitmq_management
cd ..
rm -Rf deps
echo "Setup game runtime environment done."
}
centos_env_setup() {
# disk
echo "n
p
1
w" | fdisk /dev/vdb && mkfs.ext4 /dev/vdb1
fdisk -l
mkdir -p /mnt/data
mount /dev/vdb1 /mnt/data
df -TH
echo "/dev/vdb1 /mnt/data ext4 defaults 0 0" >> /etc/fstab
cat /etc/fstab
echo "Installing disk done."
# node
curl -sL https://rpm.nodesource.com/setup_lts.x | bash - && yum install -y nodejs
npm i -g n
n lts
echo "Installing node done."
}
if [[ -f /usr/bin/lsb_release ]]; then
OS=$(/usr/bin/lsb_release -a |grep Description |awk -F : '{print $2}' |sed 's/^[ \t]*//g')
else
OS=$(cat /etc/issue |sed -n '1p')
fi
Line="====================================="
echo -e "${Line}\nOS:\n${OS}\n${Line}"
if [[ $(echo ${OS} | grep "Ubuntu") != "" ]]
then
ubuntu_env_setup
elif [[ $(echo ${OS} | grep "CentOS") != "" ]]
then
centos_env_setup
else
echo "Unknown System"
fi
| true
|
c6de6192c4b19e6bc62a4e4c2968f75891784a23
|
Shell
|
nkigen/ubuild
|
/specs/scripts/cross_u-boot-tools.sh
|
UTF-8
| 813
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
. build.include
. toolchain.include
src_prepare() {
build_src_prepare || return 1
cross_setup_environment || return 1
# fixup compilation, see Gentoo bug #429302
(
cd "${S}" || exit 1
sed -i -e "s:-g ::" tools/Makefile || exit 1
sed -i '/include.*config.h/d' tools/env/fw_env.[ch] || exit 1
ln -s ../include/image.h tools/ || exit 1
) || return 1
}
src_configure() { :; }
src_compile() {
cd "${BUILD_DIR}" || return 1
ARCH= ABI= bmake -C "${S}" O="${BUILD_DIR}" \
HOSTSTRIP=: CONFIG_ENV_OVERWRITE=y tools-all || return 1
}
src_install() {
cd "${BUILD_DIR}" || return 1
mkdir -p "${TARGET_DIR}/${CROSS_PREFIX_DIR}/bin" || return 1
cp tools/mkimage "${TARGET_DIR}/${CROSS_PREFIX_DIR}/bin/mkimage" || return 1
}
main
| true
|
cf60f4babaf0a5ee1f5dd90395e9831cddc87839
|
Shell
|
gaocuo/hiboy-opt
|
/script/Sh25_softether.sh
|
UTF-8
| 5,268
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/sh
#copyright by hiboy
source /etc/storage/script/init.sh
nvramshow=`nvram show | grep softether | awk '{print gensub(/=/,"='\''",1,$0)"'\'';";}'` && eval $nvramshow
softether_path=${softether_path:-"/opt/softether/vpnserver"}
[ -z $softether_enable ] && softether_enable=0 && nvram set softether_enable=0
if [ ! -z "$(echo $scriptfilepath | grep -v "/tmp/script/" | grep softether)" ] ; then
mkdir -p /tmp/script
ln -sf $scriptfilepath /tmp/script/_softether
chmod 777 /tmp/script/_softether
fi
softether_check () {
SVC_PATH="$softether_path"
A_restart=`nvram get softether_status`
B_restart="$softether_enable$softether_path$(cat /etc/storage/softether_script.sh | grep -v '^#' | grep -v "^$")"
B_restart=`echo -n "$B_restart" | md5sum | sed s/[[:space:]]//g | sed s/-//g`
if [ "$A_restart" != "$B_restart" ] ; then
nvram set softether_status=$B_restart
needed_restart=1
else
needed_restart=0
fi
if [ "$softether_enable" != "1" ] && [ "$needed_restart" = "1" ] ; then
[ ! -z "$(ps - w | grep "$softether_path" | grep -v grep )" ] && logger -t "【softether】" "停止 softether"
softether_close
{ eval $(ps - w | grep "$scriptname" | grep -v grep | awk '{print "kill "$1;}'); exit 0; }
fi
if [ "$softether_enable" = "1" ] ; then
if [ "$needed_restart" = "1" ] ; then
softether_close
softether_start
else
[ -z "$(ps - w | grep "$softether_path" | grep -v grep )" ] || [ ! -s "$softether_path" ] && nvram set softether_status=00 && { eval "$scriptfilepath start &"; exit 0; }
port=$(iptables -t filter -L INPUT -v -n --line-numbers | grep dpt:500 | cut -d " " -f 1 | sort -nr | wc -l)
if [ "$port" = 0 ] ; then
logger -t "【softether】" "允许 500、4500、1701 udp端口通过防火墙"
iptables -I INPUT -p udp --destination-port 500 -j ACCEPT
iptables -I INPUT -p udp --destination-port 4500 -j ACCEPT
iptables -I INPUT -p udp --destination-port 1701 -j ACCEPT
fi
fi
fi
}
softether_keep () {
logger -t "【softether】" "守护进程启动"
while true; do
NUM=`ps - w | grep "$softether_path" | grep -v grep |wc -l`
if [ "$NUM" -lt "1" ] || [ ! -s "$softether_path" ] ; then
logger -t "【softether】" "重新启动$NUM"
{ eval "$scriptfilepath &" ; exit 0; }
fi
sleep 225
done
}
softether_close () {
iptables -D INPUT -p udp --destination-port 500 -j ACCEPT
iptables -D INPUT -p udp --destination-port 4500 -j ACCEPT
iptables -D INPUT -p udp --destination-port 1701 -j ACCEPT
[ ! -z "$softether_path" ] && $softether_path stop
[ ! -z "$softether_path" ] && eval $(ps - w | grep "$softether_path" | grep -v grep | awk '{print "kill "$1;}')
killall vpnserver softether_script.sh
killall -9 vpnserver softether_script.sh
eval $(ps - w | grep "$scriptname keep" | grep -v grep | awk '{print "kill "$1;}')
}
softether_start () {
SVC_PATH="$softether_path"
if [ ! -s "$SVC_PATH" ] ; then
SVC_PATH="/opt/softether/vpnserver"
fi
if [ ! -s "$SVC_PATH" ] ; then
logger -t "【softether】" "找不到 $SVC_PATH,安装 opt 程序"
/tmp/script/_mountopt start
fi
mkdir -p /opt/softether
if [ ! -s "$SVC_PATH" ] ; then
logger -t "【softether】" "找不到 $SVC_PATH 下载程序"
wgetcurl.sh /opt/softether/vpnserver "$hiboyfile/vpnserver"
chmod 755 "/opt/softether/vpnserver"
wgetcurl.sh /opt/softether/vpncmd "$hiboyfile/vpncmd"
chmod 755 "/opt/softether/vpncmd"
wgetcurl.sh /opt/softether/hamcore.se2 "$hiboyfile/hamcore.se2"
chmod 755 "/opt/softether/hamcore.se2"
else
logger -t "【softether】" "找到 $SVC_PATH"
fi
if [ ! -s "$SVC_PATH" ] ; then
logger -t "【softether】" "找不到 $SVC_PATH ,需要手动安装 $SVC_PATH"
logger -t "【softether】" "启动失败, 10 秒后自动尝试重新启动" && sleep 10 && { nvram set softether_status=00; eval "$scriptfilepath &"; exit 0; }
fi
if [ -s "$SVC_PATH" ] ; then
nvram set softether_path="$SVC_PATH"
B_restart="$softether_enable$softether_path$(cat /etc/storage/softether_script.sh | grep -v '^#' | grep -v "^$")"
B_restart=`echo -n "$B_restart" | md5sum | sed s/[[:space:]]//g | sed s/-//g`
[ "$A_restart" != "$B_restart" ] && nvram set softether_status=$B_restart
fi
softether_path="$SVC_PATH"
logger -t "【softether】" "运行 softether_script"
$softether_path stop
/etc/storage/softether_script.sh &
sleep 2
[ ! -z "`pidof vpnserver`" ] && logger -t "【softether】" "启动成功"
[ -z "`pidof vpnserver`" ] && logger -t "【softether】" "启动失败, 注意检查hamcore.se2、vpncmd、vpnserver是否下载完整,10 秒后自动尝试重新启动" && sleep 10 && { nvram set softether_status=00; eval "$scriptfilepath &"; exit 0; }
logger -t "【softether】" "允许 500、4500、1701 udp端口通过防火墙"
iptables -I INPUT -p udp --destination-port 500 -j ACCEPT
iptables -I INPUT -p udp --destination-port 4500 -j ACCEPT
iptables -I INPUT -p udp --destination-port 1701 -j ACCEPT
initopt
eval "$scriptfilepath keep &"
}
initopt () {
optPath=`grep ' /opt ' /proc/mounts | grep tmpfs`
[ ! -z "$optPath" ] && return
if [ -s "/opt/etc/init.d/rc.func" ] ; then
ln -sf "$scriptfilepath" "/opt/etc/init.d/$scriptname"
fi
}
case $ACTION in
start)
softether_close
softether_check
;;
check)
softether_check
;;
stop)
softether_close
;;
keep)
softether_check
softether_keep
;;
*)
softether_check
;;
esac
| true
|
e6210b40f6dc49a7ff06427de17e01eab45fccde
|
Shell
|
lidongcheng88/Remote-Terminal-Settings
|
/.zshrc
|
UTF-8
| 4,732
| 2.6875
| 3
|
[] |
no_license
|
# If you come from bash you might have to change your $PATH.
# export PATH=$HOME/bin:/usr/local/bin:$PATH
# Path to your oh-my-zsh installation.
export ZSH="/root/.oh-my-zsh"
# Set name of the theme to load --- if set to "random", it will
# load a random theme each time oh-my-zsh is loaded, in which case,
# to know which specific one was loaded, run: echo $RANDOM_THEME
# See https://github.com/robbyrussell/oh-my-zsh/wiki/Themes
# ZSH_THEME="robbyrussell"
# ZSH_THEME="random"
# ZSH_THEME="cloud"
ZSH_THEME="af-magic"
# Set list of themes to pick from when loading at random
# Setting this variable when ZSH_THEME=random will cause zsh to load
# a theme from this variable instead of looking in ~/.oh-my-zsh/themes/
# If set to an empty array, this variable will have no effect.
# ZSH_THEME_RANDOM_CANDIDATES=( "robbyrussell" "agnoster" )
# Uncomment the following line to use case-sensitive completion.
# CASE_SENSITIVE="true"
# Uncomment the following line to use hyphen-insensitive completion.
# Case-sensitive completion must be off. _ and - will be interchangeable.
# HYPHEN_INSENSITIVE="true"
# Uncomment the following line to disable bi-weekly auto-update checks.
# DISABLE_AUTO_UPDATE="true"
# Uncomment the following line to automatically update without prompting.
# DISABLE_UPDATE_PROMPT="true"
# Uncomment the following line to change how often to auto-update (in days).
# export UPDATE_ZSH_DAYS=13
# Uncomment the following line if pasting URLs and other text is messed up.
# DISABLE_MAGIC_FUNCTIONS=true
# Uncomment the following line to disable colors in ls.
# DISABLE_LS_COLORS="true"
# Uncomment the following line to disable auto-setting terminal title.
# DISABLE_AUTO_TITLE="true"
# Uncomment the following line to enable command auto-correction.
# ENABLE_CORRECTION="true"
# Uncomment the following line to display red dots whilst waiting for completion.
# COMPLETION_WAITING_DOTS="true"
# Uncomment the following line if you want to disable marking untracked files
# under VCS as dirty. This makes repository status check for large repositories
# much, much faster.
# DISABLE_UNTRACKED_FILES_DIRTY="true"
# Uncomment the following line if you want to change the command execution time
# stamp shown in the history command output.
# You can set one of the optional three formats:
# "mm/dd/yyyy"|"dd.mm.yyyy"|"yyyy-mm-dd"
# or set a custom format using the strftime function format specifications,
# see 'man strftime' for details.
# HIST_STAMPS="mm/dd/yyyy"
# Would you like to use another custom folder than $ZSH/custom?
# ZSH_CUSTOM=/path/to/new-custom-folder
# Which plugins would you like to load?
# Standard plugins can be found in ~/.oh-my-zsh/plugins/*
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
# Add wisely, as too many plugins slow down shell startup.
# plugins=(git)
plugins=(
autojump
cp # cpv 有进度显示
celery # celery 补全
extract # x -r 解压并删除
git # alias
git-escape-magic # ^ 等特殊符号自动加反斜杆
gitignore # gi [TEMPLATENAME] >> .gitignore
virtualenv # 主题显示虚拟环境
zsh-autosuggestions # 命令提示
zsh_reload # src
)
source $ZSH/oh-my-zsh.sh
# User configuration
# export MANPATH="/usr/local/man:$MANPATH"
# You may need to manually set your language environment
# export LANG=en_US.UTF-8
# Preferred editor for local and remote sessions
# if [[ -n $SSH_CONNECTION ]]; then
# export EDITOR='vim'
# else
# export EDITOR='mvim'
# fi
# Compilation flags
# export ARCHFLAGS="-arch x86_64"
# Set personal aliases, overriding those provided by oh-my-zsh libs,
# plugins, and themes. Aliases can be placed here, though oh-my-zsh
# users are encouraged to define aliases within the ZSH_CUSTOM folder.
# For a full list of active aliases, run `alias`.
#
# Example aliases
# alias zshconfig="mate ~/.zshrc"
# alias ohmyzsh="mate ~/.oh-my-zsh"
# fzf
[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh
# exa
export PATH=/root/.cargo/bin:$PATH
# virtualenv
alias act="source `find $(pwd) -name activate`"
alias va="source `find $(pwd) -name activate` && cd .."
alias mkv="md venv && cd venv && virtualenv"
# fuck
eval $(thefuck --alias)
# 防止 copy 的时候覆盖已存在的文件
alias cp=cp -i
# 两下 Esc 添加 sudo
bindkey -s '\e\e' '\C-asudo \C-e'
# 初始化 zplug
source ~/.zplug/init.zsh
# zplug 安装
# 错误命令标红
zplug "zsh-users/zsh-syntax-highlighting"
# 自動檢查套件是否安裝的檢查指示,若有沒有安裝的套件,還會自動提醒您要不要裝
if ! zplug check --verbose; then
printf "Install? [y/N]: "
if read -q; then
echo; zplug install
fi
fi
# 把 zplug 載入
zplug load
| true
|
e2b98282120ee0e6c1621248ff50e5fc1d6be3aa
|
Shell
|
Nick-Getka/PKI-Project
|
/389Setup.sh
|
UTF-8
| 2,079
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
#Setup and install of the 389-directory server on a new RHEL server
#Variables
INF=/tmp/ks-$$.inf
hostname 389-Dir.localdomain
HOSTNAME==$(hostname)
TYPE="LDAP_Dir_Server"
#installing necessary packages
function package_install(){
subscription-manager register --username=$1 --password=$2
subscription-manager attach
yum install java-1.7.0-openjdk -y
yum install java-1.7.0-openjdk-devel -y
rpm -Uvh http://download.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-7.noarch.rpm
yum install foo
yum groupinstall Xfce -y
yum install 389-ds-base openldap-clients -y
yum install 389-ds-* -y
yum install 389-* -y
yum upgrade -y
}
#Setting and Resolving the hostname
function resolve_hostname(){
IP="$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)"
echo "$IP $HOSTNAME $TYPE">> /etc/hosts
}
#Allowing the 389-server ports
function open_ladp_ports(){
firewall-cmd --zone=dmz --add-port=389/tcp --permanent
firewall-cmd --zone=dmz --add-port=636/tcp --permanent
firewall-cmd --zone=dmz --add-port=9830/tcp --permanent
systemctl reload firewalld.service
}
#Creating user for server
function create_users(){
useradd dirsrv
}
#Setting up the Directory server with the default options
function create_inf(){
cat > $INF <<EOF
[General]
FullMachineName= $HOSTNAME
SuiteSpotUserID= dirsrv
SuiteSpotGroup= dirsrv
AdminDomain= localdomain
ConfigDirectoryAdminID= admin
ConfigDirectoryAdminPwd= practice1234
ConfigDirectoryLdapURL= ldap://$HOSTNAME:389/o=NetscapeRoot
[slapd]
SlapdConfigForMC= Yes
UseExistingMC= 0
ServerPort= 389
ServerIdentifier= dir
Suffix= dc=example,dc=com
RootDN= cn=Directory Manager
RootDNPwd= practice1234
ds_bename=exampleDB
AddSampleEntries= No
[admin]
Port= 9830
ServerIpAddress= $IP
ServerAdminID= admin
ServerAdminPwd= practice1234
EOF
}
#Pre-setup functions
package_install
resolve_hostname
open_ladp_ports
create_users
create_inf
#Run Setup
setup-ds-admin.pl -s -f $INF
#Start and Enable service
systemctl start dirsrv.target
systemctl enable dirsrv.target
#Clean-up
rm -v $INF
| true
|
e51ea5745770320bb700fcff78f7ee2e07d98235
|
Shell
|
marabout2015/mmlspark
|
/tools/deployment/gpu-setup.sh
|
UTF-8
| 3,735
| 3.5625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Copyright (C) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in project root for information.
# Install the prerequisites for MMLSpark on a GPU VM
INSTALL_DIR="/usr/local"
NVIDIA_VERSION="384.111"
NVIDIA_INSTALLER_URL="http://us.download.nvidia.com/XFree86/Linux-x86_64/$NVIDIA_VERSION"
NVIDIA_INSTALLER_URL+="/NVIDIA-Linux-x86_64-$NVIDIA_VERSION.run"
# Note: this is a custom build that includes the Parquet reader,
# all of these should change when it is part of CNTK
# (Probably also run "Scripts/install/linux/install-cntk.sh")
CNTK_VERSION="2.2"
CNTK_INSTALLER_URL="https://mmlspark.blob.core.windows.net/installers"
CNTK_INSTALLER_URL+="/cntk-$CNTK_VERSION-parquet-gpu.tgz"
CNTK_PATH="cntk-$CNTK_VERSION-parquet-gpu"
MPI_VERSION="1.10.3"
MPI_INSTALLER_URL="https://www.open-mpi.org/software/ompi/v1.10/downloads"
MPI_INSTALLER_URL+="/openmpi-$MPI_VERSION.tar.gz"
MPI_PATH="openmpi-$MPI_VERSION"
HADOOP_VERSION="2.8.1"
HADOOP_INSTALLER_URL="http://www-us.apache.org/dist/hadoop/common"
HADOOP_INSTALLER_URL+="/hadoop-$HADOOP_VERSION/hadoop-$HADOOP_VERSION.tar.gz"
HADOOP_PATH="hadoop-$HADOOP_VERSION"
ZULU_DOWNLOAD_SITE="http://repos.azulsystems.com/ubuntu"
ZULU_PKG="zulu-8"
JAVA_HOME="/usr/lib/jvm/zulu-8-amd64"
failwith() { show error "Error: $*" 1>&2; exit 1; }
if [[ "$(lsb_release -i)" != "Distributor ID:"*"Ubuntu" ]]; then
failwith "This script is incompatible with non-Ubuntu machines"
fi
CURL_FLAGS="-f --location --retry 20 --retry-max-time 60 --connect-timeout 120"
CURL_FLAGS="$CURL_FLAGS --speed-limit 10 --speed-time 120"
install_url() { # dir, url, inst_arg...
local url="$1"; shift
local file="${url##*/}"
local tmp="/tmp/$file"
local owd="$PWD"
curl $CURL_FLAGS "$url" > "$tmp" || \
failwith "error retrieving $url"
cd "$INSTALL_DIR"
case "$file" in
( *.tgz | *.tar.gz ) tar xzf "$tmp" || failwith "Could not extract $file" ;;
( *.sh | *.run ) chmod +x "$tmp"
"$tmp" "$@" || failwith "Errors while running $file" ;;
( * ) failwith "Internal error: unknown file extension: $file" ;;
esac
rm -f "$tmp"
cd "$owd"
}
maybe_install() { # pkg...
dpkg-query -W "$@" > /dev/null 2>&1 || apt-get -qq install -y "$@"
}
add_new_line() { # file, line...
local file="$1"; shift
local line
for line in "$@"; do
if [[ ! -e "$file" ]] || ! grep -qF "$line" "$file"; then
printf "%s\n" "$line" >> "$file"
fi
done
}
echo "## Installing prerequisites"
maybe_install "gcc" "g++" "make" "libjasper1" "libpng12-0"
echo "## Installing Zulu Java"
echo "Adding the \"Azul Systems\" key"
apt-key adv --keyserver "hkp://keyserver.ubuntu.com:80" --recv-keys "0x219BD9C9"
apt-add-repository "deb $ZULU_DOWNLOAD_SITE stable main"
apt update
maybe_install "$ZULU_PKG"
add_new_line "/etc/environment" "JAVA_HOME=$JAVA_HOME"
echo "## Installing NVIDIA driver"
install_url "$NVIDIA_INSTALLER_URL" -s
echo "## Installing prebuilt cntk with parquet reader"
install_url "$CNTK_INSTALLER_URL"
add_new_line "/etc/environment" \
"CNTK_HOME=$INSTALL_DIR/$CNTK_PATH" \
"PATH=$INSTALL_DIR/$CNTK_PATH/bin:$PATH"
echo "$INSTALL_DIR/$CNTK_PATH/dependencies/lib" > "/etc/ld.so.conf.d/cntk.conf"
echo "## Installing MPI"
install_url "$MPI_INSTALLER_URL"
cd "$INSTALL_DIR/$MPI_PATH"
./configure --prefix="$INSTALL_DIR" && make -j all && make install || \
failwith "Error building MPI"
echo "## Installing Hadoop binary"
install_url "$HADOOP_INSTALLER_URL"
add_new_line "/etc/environment" "HADOOP_HOME=$INSTALL_DIR/$HADOOP_PATH"
add_new_line "/etc/profile" \
"export CLASSPATH=\$(\$HADOOP_HOME/bin/hadoop classpath --glob)"
echo "## Reloading the ld.so cache"
rm "/etc/ld.so.cache"
ldconfig
| true
|
e4225960731c5b9bd322bed9bb109a70b7a5fba1
|
Shell
|
gitter-badger/0system.0
|
/scripts/start.sh
|
UTF-8
| 1,782
| 3.53125
| 4
|
[
"0BSD",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
if [ -z "$Z0_ROOT" ]; then
echo "ERROR: 'Z0_ROOT' environment variable not set!"
exit 1
fi
# Source https://github.com/bash-origin/bash.origin
. "$Z0_ROOT/lib/bash.origin/bash.origin"
function init {
eval BO_SELF_BASH_SOURCE="$BO_READ_SELF_BASH_SOURCE"
BO_deriveSelfDir ___TMP___ "$BO_SELF_BASH_SOURCE"
local __BO_DIR__="$___TMP___"
# NOTE: We purposely DO NOT include "$__BO_DIR__/activate.sh" as this script is to
# be used to start the system in a production environment which must provide
# the minimal config options ve defaulting to the workspace activation script.
function Start {
BO_format "$VERBOSE" "HEADER" "Starting system"
if [ -z "$ENVIRONMENT_TYPE" ]; then
export ENVIRONMENT_TYPE="production"
fi
if [ -z "$PLATFORM_NAME" ]; then
echo "ERROR: 'PLATFORM_NAME' environment variable not set!"
exit 1
fi
if [ -z "$ENVIRONMENT_NAME" ]; then
echo "ERROR: 'ENVIRONMENT_NAME' environment variable not set!"
exit 1
fi
if [ -z "$PIO_PROFILE_KEY" ]; then
echo "ERROR: 'PIO_PROFILE_KEY' environment variable not set!"
exit 1
fi
if [ -z "$PIO_PROFILE_SECRET" ]; then
echo "ERROR: 'PIO_PROFILE_SECRET' environment variable not set!"
exit 1
fi
NODEJS_VERSION="4"
BO_log "$VERBOSE" "Activating NodeJS version '$NODEJS_VERSION' ..."
BO_ensure_nvm
if ! nvm use $NODEJS_VERSION; then
nvm install $NODEJS_VERSION
fi
BO_log "$VERBOSE" "PWD: $PWD"
BO_log "$VERBOSE" "Z0_ROOT: $Z0_ROOT"
if [ ! -e ".git.commit.rev" ]; then
echo "ERROR: '.git.commit.rev' file not found!"
exit 1
fi
export GIT_COMMIT_REV=`cat .git.commit.rev`
BO_log "$VERBOSE" "GIT_COMMIT_REV: $GIT_COMMIT_REV"
node "$Z0_ROOT/server.js"
BO_format "$VERBOSE" "FOOTER"
}
Start $@
}
init $@
| true
|
40765ad90a59cfa9b864e7591c65570cd33776f9
|
Shell
|
ilventu/aur-mirror
|
/quake-par/PKGBUILD
|
UTF-8
| 644
| 2.59375
| 3
|
[] |
no_license
|
# Contributor: Slash <demodevil5[at]yahoo[dot]com>
pkgname=quake-par
pkgver=0.03.01
pkgrel=1
pkgdesc="Quake PAK archiving utility."
url="ftp://ibiblio.org/pub/linux/games/quake/"
license="GPL"
depends=('glibc')
makedepends=()
conflicts=()
replaces=()
backup=()
install=
source=("ftp://ibiblio.org/pub/linux/games/quake/par-$pkgver.tar.gz")
md5sums=('39a73b5b95b04067dfc9acb8ef6bc9ff')
build() {
cd $startdir/src/par-$pkgver
# Modify Destination Directory in Makefile
/bin/sed -i "s:/usr/local:$startdir/pkg/usr:" Makefile.Linux
# Create Dirs
install -d $startdir/pkg/usr/{bin,man/man1}
./configure
make || return 1
make install
}
| true
|
360dce4182cadddcc2deac1f5cecfdf71c56d3a4
|
Shell
|
SaiNadh001/DockerWork
|
/DreamOrbit/mongodb/mongo.sh
|
UTF-8
| 304
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Creating dreamorbit/mongodb and mongo_container"
cd $DOCKER_HOME/DreamOrbit/mongodb
docker build -t dreamorbit/mongodb .
docker run -d -p 27017:27017 --name mongo_container dreamorbit/mongodb
sleep 5m
docker exec mongo_container mongorestore /data-base-folder -db data-base-name
| true
|
51909151a648c456b676fdb22b3afc1ef8a3f57c
|
Shell
|
c3-amitsalunke/cassandra-operator
|
/buildenv/build-all
|
UTF-8
| 1,288
| 3.140625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# at the point a makefile is probably the sensible choice
set -o errexit
set -o nounset
set -o pipefail
cd $(dirname "$BASH_SOURCE")/..
source ./buildenv/repo
make
function release_dev() {
docker tag cassandra-operator:latest "${REGISTRY}/cassandra-operator:latest-dev"
docker tag cassandra-sidecar:latest "${REGISTRY}/cassandra-sidecar:latest-dev"
docker tag cassandra-3.11.8:latest "${REGISTRY}/cassandra-3.11.8:latest-dev"
docker tag cassandra-4.0-beta2:latest "${REGISTRY}/cassandra-4.0-beta2:latest-dev"
}
function release_prod() {
docker tag cassandra-operator:latest "${REGISTRY}/cassandra-operator:${TAG}"
docker tag cassandra-operator:latest "${REGISTRY}/cassandra-operator:latest"
docker tag cassandra-sidecar:latest "${REGISTRY}/cassandra-sidecar:${TAG}"
docker tag cassandra-sidecar:latest "${REGISTRY}/cassandra-sidecar:latest"
docker tag cassandra-3.11.8:latest "${REGISTRY}/cassandra-3.11.8:${TAG}"
docker tag cassandra-3.11.8:latest "${REGISTRY}/cassandra-3.11.8:latest"
docker tag cassandra-4.0-beta2:latest "${REGISTRY}/cassandra-4.0-beta2:${TAG}"
docker tag cassandra-4.0-beta2:latest "${REGISTRY}/cassandra-4.0-beta2:latest"
}
if [ "${1}" = "dev" ]; then
release_dev
elif [ "${1}" = "prod" ]; then
release_prod
fi
| true
|
11c4b796007c787564b1bc9eb6fa485f3c83ba97
|
Shell
|
ZenBrayn/openms-feature-detection-pipeline
|
/SCRIPTS/PIPELINE/02_rt_alignment.sh
|
UTF-8
| 848
| 3.1875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
datadir="/input_data"
files=$datadir/*.mzML
featfiles=""
rtaoutfiles=""
trafooutfiles=""
for f in $files
do
bn=$(basename $f)
fn="${bn%.*}"
featfn=${fn}_features.featureXML
rtafn=${fn}_features_rta.featureXML
trafofn=${fn}_rta.trafoXML
featfiles="$featfiles $featfn"
rtaoutfiles="$rtaoutfiles $rtafn"
trafooutfiles="$trafooutfiles $trafofn"
done
echo "Performing RT Alignment..."
MapAlignerPoseClustering -ini /INI/map_aligner_pose_clustering.ini \
-in $featfiles \
-out $rtaoutfiles \
-trafo_out $trafooutfiles
echo "Transforming maps..."
for f in $files
do
bn=$(basename $f)
fn="${bn%.*}"
centfn=${fn}_centroided.mzML
trafofn=${fn}_rta.trafoXML
rtafn=${fn}_centroided_rta.mzML
MapRTTransformer -ini /INI/map_rt_transformer.ini \
-in $centfn \
-trafo_in $trafofn \
-out $rtafn
done
| true
|
d777cc7a6d5c1a798ba8e240a83869b20ae6c477
|
Shell
|
astroclark/osg_tools
|
/bwb/testsuite/simulated_noise/run.sh
|
UTF-8
| 2,097
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
###############################################################
# #
# Example BayesWave run on SIMULATED GAUSSIAN NOISE #
# #
# RUN TIME ~20 minutes #
# - Processor: 2 GHz Intel Core i7 #
# - Memory: 4 GB 1600 Mhz #
# #
###############################################################
###############################################################
# #
# NOTE: VERY aggressive settings for short wall time #
# DO NOT use this command line for real analyses #
# #
###############################################################
# Setup paths for LAL install
source /usr/local/etc/master_rc
source /Users/tyson/bayeswave/trunk/utils/setup_paths.sh
# Main BayesWave analysis
bayeswave \
--ifo H1 --H1-flow 16 \
--ifo L1 --L1-flow 16 \
--H1-cache LALSimAdLIGO --H1-channel H1:LALSimAdLIGO \
--L1-cache LALSimAdLIGO --L1-channel L1:LALSimAdLIGO \
--srate 512 --seglen 4 \
--trigtime 900000000.00 --PSDstart 900000000 --PSDlength 1024 \
--dataseed 1234 --Nchain 5 --Niter 2000000 --bayesLine
# check that bayeswave exited normally
rc=$?; if [[ $rc != 0 ]]; then exit $rc; fi
# BayesWave post processing
bayeswave_post \
--ifo H1 --H1-flow 16 \
--ifo L1 --L1-flow 16 \
--H1-cache LALSimAdLIGO --H1-channel H1:LALSimAdLIGO \
--L1-cache LALSimAdLIGO --L1-channel L1:LALSimAdLIGO \
--srate 512 --seglen 4 \
--trigtime 900000000.00 --PSDstart 900000000 --PSDlength 1024 \
--bayesLine \
--dataseed 1234 \
--0noise
# Generate skymap
python /Users/tyson/bayeswave/trunk/postprocess/skymap/megasky.py
# Generate output webpage
python /Users/tyson/bayeswave/trunk/postprocess/megaplot.py
| true
|
6dc28bf1d0a4a47ea79d821f3d2b6983146bab3a
|
Shell
|
cuppajoeman/music-experimentation
|
/formatted/view.sh
|
UTF-8
| 86
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/bash
FILE=$1
FILE_NO_EXT="${FILE%%.*}"
lilypond $1 && zathura $FILE_NO_EXT.pdf
| true
|
85ca52658b6143e3cbad2a76ab7867584688cdb4
|
Shell
|
dobos/kurucz-atlas12
|
/scripts/run.sh
|
UTF-8
| 324
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
PARAMS=$1
INPUT_MODELS=$2
echo "Sourcing params file"
source $PARAMS
LINEDIR=$(mktemp -d -p $TEMPDIR)
./scripts/lines.sh $PARAMS $LINEDIR
echo -e $INPUT_MODELS | sed -e 's/\s\+/\n/g' \
| xargs --max-procs=20 --replace=% ./scripts/synthe.sh $PARAMS $LINEDIR %
echo "Removing temp directory"
rm -Rf $LINEDIR
| true
|
4df7d00fc36d542b52bbe7b107994989fe880b19
|
Shell
|
harttu/keras-bert-ner
|
/scripts/predict-s800.sh
|
UTF-8
| 739
| 2.9375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Predict using model trained on FiNER news data
# https://stackoverflow.com/a/246128
SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
set -euo pipefail
#datadir="$SCRIPTDIR/../data/finer-news"
#test_data="$datadir/test.tsv"
test_data="/scratch/project_2001426/harttu/july-2020/keras-bert-ner/scripts/../data/s800/conll//test.tsv"
ner_model_dir="/scratch/project_2001426/harttu/july-2020/keras-bert-ner/scripts/../ner-models/s800_eval_1"
#ner_model_dir="$SCRIPTDIR/../ner-models/finer-news-model"
output_file="$SCRIPTDIR/../finer-s800-predictions.tsv"
python "$SCRIPTDIR/../predict.py" \
--ner_model_dir "$ner_model_dir" \
--test_data "$test_data" \
--output_file "$output_file"
| true
|
d4a17b493fdb43ddfd4bccc049010a008b653936
|
Shell
|
acorg/acmacs-whocc
|
/bin/hidb-get-from-albertine
|
UTF-8
| 340
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
#! /bin/bash
echo "OBSOLETE! Use ~/acmacs-data/hidb5-download" >&2
exit 1
# if [ "$HOSTNAME" != "i19" ]; then
# if [[ -z ${HIDB_V5} ]]; then
# echo HIDB_V5 is not set >&2
# exit 1
# fi
# rsync -av albertine:'/syn/eu/acmacs-data/hidb5.*.json.xz' albertine:'/syn/eu/acmacs-data/hidb5.*.hidb5b' "${HIDB_V5}"
# fi
| true
|
aa6a28940aa3eba5a5003876366859bb32ed30e5
|
Shell
|
imerali/puppet-mod_security
|
/files/scripts/Debian/mod_security_asl_update.sh
|
UTF-8
| 3,257
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/sh
# Autoupdater for modsec rulesets.
#
# This script will attempt to update your rulefiles, and restart apache.
# If it apache does not start after changing rules, it will roll back to
# the old ruleset and restart apache again.
#
# Version: $Id: modsec.sh,v 2.0 2006/09/03 23:58:00 olei Exp $
# Based on a script by:
# URL: http://cs.evilnetwork.org/cycro
#
# Copyleft 2006, SkyHorse.Org, No Rights Reserved
# URL: http://www.skyhorse.org/web-server-administration/auto-update-modsecurity-rules-modsecsh/
APACHEINITD="/etc/init.d/apache2"
APACHECTL="/usr/sbin/apache2ctl"
APACHEPID="/var/run/apache2.pid"
MODSECPATH="/etc/apache2/modsecurity.d/asl"
##########################################################################
######### you probably don't need to change anything below here ##########
##########################################################################
# internal
PID=`cat ${APACHEPID}`
UPDATED=0
#echo -n "Changing PWD: "
cd ${MODSECPATH}
#echo `pwd`
# generic by skyhorse
# updated by Puzzle ITC
listOfRules="20_asl_useragents.conf 60_asl_recons.conf domain-blacklist.txt malware-blacklist.txt 30_asl_antimalware.conf 98_asl_jitp.conf sql.txt 05_asl_exclude.conf 99_asl_exclude.conf domain-spam-whitelist.txt 05_asl_scanner.conf 99_asl_jitp.conf malware-blacklist-high.txt trusted-domains.txt 10_asl_antimalware.conf 40_asl_apache2-rules.conf Zour_excludes.conf malware-blacklist-local.txt whitelist.txt 10_asl_rules.conf 50_asl_rootkits.conf domain-blacklist-local.txt malware-blacklist-low.txt sql.txt"
baseUrl="http://downloads.prometheus-group.com/delayed/rules/modsec/"
for theRule in $listOfRules ; do
#echo -n "Updating $theRule: "
/usr/bin/wget -t 30 -O ${theRule}.1 -q ${baseUrl}${theRule}
if [ ! -e ${theRule} ]; then
mv ${theRule}.1 ${theRule}
else
if [ `md5sum ${theRule} | cut -d " " -f1` != `md5sum ${theRule}.1 | cut -d " " -f1` ] ; then
/bin/mv ${theRule} ${theRule}.bak
/bin/mv ${theRule}.1 ${theRule}
UPDATED=`expr $UPDATED + 1`
#echo "ok."
else
#echo "allready up to date."
/bin/rm -f ${theRule}.1
fi
fi
done
# try restart
if [ "$UPDATED" -gt "0" ]; then
#echo -n "Restarting apache: "
$APACHECTL configtest
configtest=$?
if [ "$configtest" -eq "0" ]; then
$APACHEINITD restart
# did it work? wait 2s to let the apache start
sleep 2
$APACHEINITD status
configtest=$?
if [ "$configtest" -eq "0" ]; then
#echo "Apache restarted ok."
exit 0
fi
echo "error. Apache not running."
fi
#roll back everything
for theRule in $listOfRules ; do
echo -n "Rolling back ${theRule}"
/bin/mv ${theRule} ${theRule}.new
/bin/mv ${theRule}.bak ${theRule}
echo "rolled back ok."
done
$APACHECTL configtest
configtest=$?
if [ "$configtest" -eq "0" ]; then
# try starting httpd again
$APACHEINITD restart
# did that fix the problem?
$APACHEINITD status
configtest=$?
if [ "$configtest" -eq "0" ]; then
echo "That did the trick."
exit 0
fi
else
echo "Fatal: Apache configtest is till failing, Server needs attention!"
fi
echo "Fatal: Apache still not running! Run $APACHEINITD configtest to find the error."
exit 999
fi
| true
|
e224d8f89077b62f474ea03d76cf97a3a6293a58
|
Shell
|
zwfprogit/Shell
|
/toolsShell/mutiRepoGitManage.sh
|
UTF-8
| 1,040
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
local_root=${PWD}/LinuxC
server="git@github.com"
port="zwfprogit"
project=""
allRepo=(0 1 2 3 4)
repo_name[0]="CClassicUsage"
repo_name[1]="CMakeLists"
repo_name[2]="CProgrammePractice"
repo_name[3]="Python"
repo_name[4]="Shell"
local_path[0]="CClassicUsage"
local_path[1]="CMakeLists"
local_path[2]="CProgrammePractice"
local_path[3]="Python"
local_path[4]="Shell"
if [ ! -d ${local_root} ]; then
mkdir ${local_root}
cd ${local_root}
else
cd ${local_root}
fi
targetRepo=(${allRepo[*]})
function gitclone()
{
for x in ${targetRepo[*]}
do
git clone ${server}:${port}/${repo_name[${x}]}.git
cd ${local_root}/${local_path[${x}]}
git config core.fileMode false
git config core.autocrlf false
git config core.safecrlf true
cd - >> /dev/null
done
}
function gitdo()
{
for x in ${targetRepo[*]}
do
cd ${local_root}/${local_path[${x}]}
echo -e "-------->${project}/$repo_name[${x}]<-----------"
echo git $@
git $@
cd - >> /dev/null
done
}
if [ "$1"=="clone" ]; then
gitclone
else
gitdo $@
fi
| true
|
763b13c99ace65e6cb87647f07b92ba6af92b0ed
|
Shell
|
xusxlinux/Document
|
/shell/配置文件/test_config.sh
|
UTF-8
| 279
| 2.859375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
import=$(basename "${BASH_SOURCE[0]}" .sh)
if [[ $(eval echo '$'"${import}") == 0 ]]; then return; fi
eval "${import}=0"
LOG_DIR=/data/shell/log
LOG_LEVEL=INFO
readonly TRUE=0
readonly FALSE=1
readonly NONE=''
readonly NULL='null'
readonly EMPTY_LIST='[]'
| true
|
022678801b183ec31c0c56f4265bd7276a96751f
|
Shell
|
c-dilks/scalers12
|
/RUN_TEST
|
UTF-8
| 495
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
if [ $# -ne 1 ]; then
echo "usage: $0 [test char]"
exit
fi
./bunch_kicker_pattern_selector
#./bunch_kicker
./accumulate
./rellum_all
./rellum_runs
./wait_for_condor
root -b -q sumTree.C
root -b -q combineAll.C
pushd matrix
pushd rootfiles
hadd -f all.root matxR*.root
popd
./RunPatterns
mkdir -p tests/$1
cp -v fit_result.*.root tests/$1/
cp -v colour.*.root tests/$1/
cp -vr pats tests/$1/
cp -v ../counts.root tests/$1/
cp -v ../rtree.root tests/$1/
echo "test $1 complete"
| true
|
b679399151f99167fc184aa224ee574b3543003b
|
Shell
|
brontosaurusrex/singularity
|
/workers/worker1
|
UTF-8
| 987
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
# set -x
# base input where subfolders will live, each one is named as script that will be called for encoding
# like "toX264crf"
rootin="$HOME/output/workers"
# output dir, all encodes from various encoders will come here with "find-unique-file-name" protection
rootout="$HOME/output"
# eternal loop
while :
do
# find oldest file in all $rootin subfolders http://mywiki.wooledge.org/BashFAQ/003 (3rd example)
IFS= read -r -d '' latest \
< <(find "$rootin" -type f -printf '%T@ %p\0' | sort -znr)
latest=${latest#* } # remove timestamp + space
echo "$latest"
#valid folders/encoders check
encoderfromdir="$(basename "$(dirname "$latest")")"
echo "$encoderfromdir"
containsElement () {
local e
for e in "${@:2}"; do [[ "$e" == "$1" ]] && return 0; done
return 1
}
array=("toX264crf" "toDV")
containsElement "$encoderfromdir" "${array[@]}" && echo "valid encoder/subdir"
# to be continued
done
| true
|
e51af080f5a104364247493b88612f2078260928
|
Shell
|
devcamcar/mesos-boshrelease
|
/jobs/cassandra/templates/bin/cassandra_ctl.erb
|
UTF-8
| 1,765
| 3.59375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e # exit immediately if a simple command exits with a non-zero status
# Setup common env vars and folders
source /var/vcap/packages/bosh-helpers/ctl_setup.sh 'cassandra'
export CASSANDRA_PID_FILE=${CASSANDRA_PID_DIR}/cassandra.pid
# Set the hostname
hostname <%= index %>.<%= name %>.<%= spec.networks.methods(false).grep(/[^=]$/).first.to_s %>.<%= spec.deployment %>.<%= spec.dns_domain_name %>
case $1 in
start)
pid_guard ${CASSANDRA_PID_FILE} ${JOB_NAME}
echo $$ > ${CASSANDRA_PID_FILE}
# Increase the default number of open file descriptors
ulimit -n 8192
# Create Apache Cassandra user & group
create_group ${CASSANDRA_GROUP}
create_user ${CASSANDRA_USER} ${CASSANDRA_GROUP}
# Set Apache Cassandra classpath
CASSANDRA_CLASSPATH=""
for jar in /var/vcap/packages/cassandra/lib/*.jar; do
CASSANDRA_CLASSPATH="${CLASSPATH}:$jar"
done
# Start Apache Cassandra service
### START HACK
### cassandra-mesos doesn't allow to set the conf dir, so cd'ing into cassandra job directory in order for
### cassandra-mesos to find the conf files
cd ${JOB_DIR}
### END HACK
exec chpst -u ${CASSANDRA_USER}:${CASSANDRA_GROUP} java \
-Xmx"${CASSANDRA_JAVA_HEAP_SIZE}" \
-Xms"${CASSANDRA_JAVA_HEAP_SIZE}" \
-Dfile.encoding=UTF-8 \
-Djava.io.tmpdir=${CASSANDRA_TMP_DIR} \
-cp ${CASSANDRA_CLASSPATH} mesosphere.cassandra.Main \
>>${CASSANDRA_LOG_DIR}/${OUTPUT_LABEL}.stdout.log \
2>>${CASSANDRA_LOG_DIR}/${OUTPUT_LABEL}.stderr.log
;;
stop)
# Stop Apache Cassandra service
kill_and_wait ${CASSANDRA_PID_FILE}
;;
*)
echo "Usage: $0 {start|stop}"
exit 1
;;
esac
exit 0
| true
|
add1f6c08e5df2ee50e952f0a0d21fd308dd28c0
|
Shell
|
ztipnis/docker-wildduck
|
/install/wildduck.sh
|
UTF-8
| 346
| 3.078125
| 3
|
[] |
no_license
|
case $1 in
start)
set -e
cd /opt/wildduck
(node server.js --config=/etc/wildduck/wildduck.toml > /var/log/wildduck.log 2>/var/log/wildduck_error.log)&
echo $! > /var/run/wildduck.pid
;;
stop)
kill `cat /var/run/wildduck.pid` ;;
*)
echo "usage: wildduck.sh {start|stop}" ;;
esac
exit 0
| true
|
ef4ff2e22f7348f5f074cbe9becb7329a6091f6a
|
Shell
|
wrld3d/android-api
|
/update.platform.sh
|
UTF-8
| 1,013
| 3.703125
| 4
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
baseUrl="http://s3.amazonaws.com/eegeo-static/"
srcPackageName="sdk.package.android.cpp11.c++_static.tar.gz"
destPackageName="./sdk.package.tar.gz"
includeDestination="./sdk/src/main/cpp/libs/eegeo"
sdkDestination="sdk.package.android"
echo "Fetching eeGeo sdk..."
rm -f ./$destPackageName
rm -rf $includeDestination
src_url=$(echo $baseUrl$srcPackageName | sed "s:+:%2B:g")
curl $src_url > ./$destPackageName
statuscode=$?
if [ $statuscode -ne 0 ] ; then
echo "Failed to download sdk package ${baseUrl}${srcPackageName}" >&2
exit $statuscode
fi
if [ ! -d `dirname "$includeDestination"` ]; then
mkdir -p `dirname "$includeDestination"`
fi
tar -zxvf $destPackageName
if [ $? -ne 0 ] ; then
echo "Failed to unzip sdk package ${destPackageName}"
exit 1
fi
rm -f ./$destPackageName
platformVersion=`cat ./$sdkDestination/version.txt`
echo "Platform version --> $platformVersion"
echo mv ./$sdkDestination $includeDestination
mv ./$sdkDestination $includeDestination
| true
|
99e79112d6a9c091700a5645dfa35919cf2e55d1
|
Shell
|
oucxlw/MVAE
|
/download.sh
|
UTF-8
| 1,962
| 3.5
| 4
|
[
"MIT"
] |
permissive
|
progressfilt ()
{
local flag=false c count cr=$'\r' nl=$'\n'
while IFS='' read -d '' -rn 1 c
do
if $flag
then
printf '%s' "$c"
else
if [[ $c != $cr && $c != $nl ]]
then
count=0
else
((count++))
if ((count > 1))
then
flag=true
fi
fi
fi
done
}
FILE=$1
if [ $FILE == "dataset-VCC" ]; then
# VCC dataset including 4 speakers
URL=http://www.kecl.ntt.co.jp/people/kameoka.hirokazu/data/mvae/vcc.zip
ZIP_FILE=./data/vcc.zip
mkdir -p ./data/
wget --progress=bar:force $URL -O $ZIP_FILE 2>&1 | progressfilt
unzip -qq $ZIP_FILE -d ./data/
rm $ZIP_FILE
elif [ $FILE == "test-samples" ]; then
# test samples for VCC dataset
URL=http://www.kecl.ntt.co.jp/people/kameoka.hirokazu/data/mvae/test_input.zip
ZIP_FILE=./data/test_input.zip
mkdir -p ./data/
wget --progress=bar:force $URL -O $ZIP_FILE 2>&1 | progressfilt
unzip -qq $ZIP_FILE -d ./data/
rm $ZIP_FILE
elif [ $FILE == "model-VCC" ]; then
# pretrained model using VCC dataset
URL=http://www.kecl.ntt.co.jp/people/kameoka.hirokazu/data/mvae/model-vcc.zip
ZIP_FILE=./pretrained_model/vcc.zip
mkdir -p ./pretrained_model/
wget --progress=bar:force $URL -O $ZIP_FILE 2>&1 | progressfilt
unzip -qq $ZIP_FILE -d ./pretrained_model/
rm $ZIP_FILE
elif [ $FILE == "model-WSJ0" ]; then
# pretrained model using WSJ0 dataset
URL=http://www.kecl.ntt.co.jp/people/kameoka.hirokazu/data/mvae/model-wsj0.zip
ZIP_FILE=./pretrained_model/wsj0.zip
mkdir -p ./pretrained_model/
wget --progress=bar:force $URL -O $ZIP_FILE 2>&1 | progressfilt
unzip -qq $ZIP_FILE -d ./pretrained_model/
rm $ZIP_FILE
else
echo "Available arguments are dataset-VCC, test-samples, model-VCC, model-WSJ0."
exit 1
fi
| true
|
afc215dad72d49396e09cad40553aab94dd430f7
|
Shell
|
bskaggs/link_text_topic_model
|
/src/main/scripts/run_link_lda
|
UTF-8
| 602
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/bash
#raw bzip2 file is $1
#working dir is $2
#hdfs dir is $3
DIRNAME=$(cd `dirname $0` && pwd)
LIBDIR=$DIRNAME/../ruby
JAR=$LIBDIR/phoenix.jar
PYTHONIOENCODING=utf_8 python -m gensim.corpora.wikicorpus $1 $2 | hadoop jar $JAR phoenix.StreamToHdfs $3/gensim_titles.txt
PYTHONIOENCODING=utf_8 python $DIRNAME/lda.py $2 | hadoop jar $JAR phoenix.GensimLDAMapFileSaver $3/gensim_link_titles.txt $3/unsorted_link_lda.seq
hadoop jar $JAR phoenix.SequenceFileSorter -D mapreduce.input.fileinputformat.inputdir=$3/unsorted_link_lda.seq -D mapreduce.output.fileoutputformat.outputdir=$3/sorted_link_lda
| true
|
140c790de6272a8fc7bc64859ba9841736b15687
|
Shell
|
truhlikfredy/thinkpadScripts
|
/cpu_power_switch.sh
|
UTF-8
| 2,189
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
#now have sandybridge i7-2760qm - 2.4ghz with 3.5ghz turbo
under=2100000
noturbo=`cat /sys/devices/system/cpu/intel_pstate/no_turbo`
status=`cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor`
min=`cat /sys/bus/cpu/devices/cpu0/cpufreq/cpuinfo_min_freq`
max=`cat /sys/bus/cpu/devices/cpu0/cpufreq/cpuinfo_max_freq`
freq=`cat /sys/bus/cpu/devices/cpu0/cpufreq/scaling_max_freq`
function maxFreq {
echo "Setting max cpu clock to $1"
for cpu in `ls /sys/bus/cpu/devices/`
do
echo $cpu
echo $1 > /sys/bus/cpu/devices/$cpu/cpufreq/scaling_max_freq
done
}
function pState {
echo $1 | tee /sys/devices/system/cpu/intel_pstate/min_perf_pct
echo $2 | tee /sys/devices/system/cpu/intel_pstate/max_perf_pct
echo $3 | tee /sys/devices/system/cpu/intel_pstate/no_turbo
echo "pstate min=$1 max=$2, turboDisabled=$3"
}
function governor {
echo "$1" | tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor
}
function setpoint {
echo $1 | tee /sys/kernel/debug/pstate_snb/setpoint
}
function msg {
xmessage -timeout 1 -nearmouse -center "$1"
}
cat /sys/bus/cpu/devices/cpu0/cpufreq/scaling_max_freq
echo Switching to mode:
#0 to 1
if [ "$freq" = "$min" ]; then
echo "0 -> 1"
governor powersave
setpoint 98
maxFreq $under
pState 22 65 1
msg 1-low-$under=1.4GHz
fi
#1 to 2
if [ "$freq" = "$under" ]; then
echo "1 -> 2"
governor powersave
setpoint 97
maxFreq $max
pState 22 90 1
msg 2-low-$max=2.1GHz
fi
#2 to 3
if [ "$freq" = "$max" ] && [ "$status" = "powersave" ] && [ "$noturbo" = "1" ]; then
echo "2 -> 3"
governor powersave
setpoint 97
maxFreq $max
pState 22 100 0
msg 3-medium-powersave-TURBO=3.5GHz
fi
#3 to 4
if [ "$status" = "powersave" ] && [ "$noturbo" = "0" ]; then
echo "3 -> 4"
governor performance
setpoint 97
maxFreq $max
pState 50 100 0
msg 4-high-performance
fi
#4 to 0
if [ "$status" = "performance" ]; then
echo "4 -> 0"
governor powersave
setpoint 98
maxFreq $min
pState 22 22 1
msg 0-ultra-low-$min=0.8GHz
fi
echo Max freq now:
cat /sys/bus/cpu/devices/cpu0/cpufreq/scaling_max_freq
| true
|
e332df6f944f1a62a83793f00dc90e9c71ca263b
|
Shell
|
monotonemonk/arch_svntogit_community-
|
/python2-tlslite/repos/community-any/PKGBUILD
|
UTF-8
| 1,308
| 2.640625
| 3
|
[] |
no_license
|
# $Id$
# Maintainer: Timothy Redaelli <timothy.redaelli@gmail.com>
# Contributor: Andy Weidenbaum <archbaum@gmail.com>
pkgname=python2-tlslite
pkgver=0.4.9
pkgrel=2
pkgdesc="SSL/TLS library in pure Python supporting RSA and SRP ciphersuites"
arch=('any')
depends=('python2')
makedepends=('python2-setuptools')
optdepends=('python2-crypto: fast RSA operations and fast ciphers'
'python2-gmpy: fast RSA and SRP operations'
'python2-m2crypto: fast RSA operations and fast ciphers'
'python2-tackpy: run an X.509 server using TACK')
url="http://trevp.net/tlslite"
license=('custom')
options=(!emptydirs)
source=(https://pypi.org/packages/source/t/${pkgname#python2-}/${pkgname#python2-}-$pkgver.tar.gz)
sha256sums=('9b9a487694c239efea8cec4454a99a56ee1ae1a5f3af0858ccf8029e2ac2d42d')
prepare(){
cd "${pkgname#python2-}-$pkgver"
find . -type f -print0 | xargs -0 sed -i 's#/usr/bin/python#/usr/bin/python2#g'
find . -type f -print0 | xargs -0 sed -i 's#/usr/bin/env python#/usr/bin/env python2#g'
}
build() {
cd "${pkgname#python2-}-$pkgver"
python2 setup.py build
}
package() {
cd "${pkgname#python2-}-$pkgver"
python2 setup.py install --root="$pkgdir" --optimize=1
install -Dm 644 LICENSE "$pkgdir/usr/share/licenses/$pkgname/LICENSE"
install -Dm 644 README "$pkgdir/usr/share/doc/$pkgname/README"
}
| true
|
c2f2e02d45956d5cb4e68240aae47b3d4320090e
|
Shell
|
dividat/playos
|
/base/self-update/recover-from-tmpfile
|
UTF-8
| 1,881
| 4.1875
| 4
|
[
"MIT"
] |
permissive
|
#! /usr/bin/env bash
# This script is designed to recover files written with glib's
# g_file_set_contents function. On journaling filesystems this function
# ensures atomic updates to the target file by writing to a temporary
# file and moving that to the target destination in a second step.
# On journal-less filesystems such as FAT, the moving may itself not be
# atomic and we can end up with an empty target file and a complete
# temporary file. This is the situation this script is designed to
# detect and recover from, by repeating the moving from temp file to
# target.
FILE="$1"
# Get absolute path for given file name
TARGET="$(realpath --no-symlinks "$FILE")"
if ! [ -s "$TARGET" ]; then
# We expect a random alnum suffix of "up to" 7 characters
# (https://docs.gtk.org/glib/func.file_set_contents_full.html).
# The ones actually observed were 6 characters long, and we want to
# ignore files that don't seem likely to be tempfile copies.
TMP_SUFFIX="\.\w{5,7}"
PARENT="$(dirname "$TARGET")"
# List temp files based off of the target's name, with newer files first
CANDIDATES=($(ls -t --time=birth -d "$PARENT/"* | grep -E "$TARGET$TMP_SUFFIX"))
GREP_EXIT="$?"
if [ "$GREP_EXIT" -eq 0 ] && [ "${#CANDIDATES[@]}" -ge 1 ]; then
# Use the first, i.e. newest alternative as replacement
REPLACEMENT="${CANDIDATES[0]}"
if [ -s "$REPLACEMENT" ]; then
mv "$REPLACEMENT" "$FILE"
echo "Detected missing or empty '$FILE' and replaced it with '$REPLACEMENT'."
else
# If the newest alternative is empty, we do not know what to do.
# Do not touch any evidence and abort.
echo "Both '$FILE' and recovery candidate '$REPLACEMENT' are empty. Aborting."
fi
else
echo "The file '$FILE' seems empty, but no alternatives were found. Aborting."
fi
else
echo "The file '$FILE' seems OK. Nothing to do."
fi
| true
|
9c352549f0a47feb506c8438430c870a0a2e509e
|
Shell
|
notnoopci/yarpc-go
|
/scripts/cover.sh
|
UTF-8
| 1,567
| 3.890625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
if echo "${GOPATH}" | grep : >/dev/null; then
echo "error: GOPATH must be one directory, but has multiple directories separated by colons: ${GOPATH}" >&2
exit 1
fi
start_waitpids() {
WAITPIDS=
}
do_waitpid() {
$@ &
WAITPIDS="${WAITPIDS} $!"
}
reset_waitpids() {
for waitpid in ${WAITPIDS}; do
wait "${waitpid}" || exit 1
done
WAITPIDS=
}
COVER=cover
ROOT_PKG=go.uber.org/yarpc
if [[ -d "$COVER" ]]; then
rm -rf "$COVER"
fi
mkdir -p "$COVER"
# If a package directory has a .nocover file, don't count it when calculating
# coverage.
filter=""
for pkg in "$@"; do
if [[ -f "$GOPATH/src/$pkg/.nocover" ]]; then
if [[ -n "$filter" ]]; then
filter="$filter, "
fi
filter="$filter\"$pkg\": true"
fi
done
i=0
start_waitpids
for pkg in "$@"; do
if ! ls "${GOPATH}/src/${pkg}" | grep _test\.go$ >/dev/null; then
continue
fi
i=$((i + 1))
extracoverpkg=""
if [[ -f "$GOPATH/src/$pkg/.extra-coverpkg" ]]; then
extracoverpkg=$( \
sed -e "s|^|$pkg/|g" < "$GOPATH/src/$pkg/.extra-coverpkg" \
| tr '\n' ',')
fi
coverpkg=$(go list -json "$pkg" | jq -r '
.Deps
| . + ["'"$pkg"'"]
| map
( select(startswith("'"$ROOT_PKG"'"))
| select(contains("/vendor/") | not)
| select({'"$filter"'}[.] | not)
)
| join(",")
')
if [[ -n "$extracoverpkg" ]]; then
coverpkg="$extracoverpkg$coverpkg"
fi
args=""
if [[ -n "$coverpkg" ]]; then
args="-coverprofile $COVER/cover.${i}.out -coverpkg $coverpkg"
fi
do_waitpid go test -race $args "$pkg"
done
reset_waitpids
gocovmerge "$COVER"/*.out > cover.out
| true
|
3fb1c5a854ee8fcfba11ba0f90a69206f9434857
|
Shell
|
bitwisetech/xmly
|
/aXmly
|
UTF-8
| 2,620
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/sh
# Utility to auto-run xmly.py; creates OpenRadar procedures file and FGFS Sid/Star Rm routes according to given ICAO name
MNAM=${0##*/} && MNAM=${MNAM%%.*}
ICAO=${1##*/} && ICAO=${ICAO%%.*}
UNAM="whoami"
DNAM=${MNAM}
if [ "$1" = "-h " ]
then
echo "$MNAM Utility to auto-run xmly.py; creates OpenRadar procedures file and FGFS Sid/Star Rm routes according to given ICAO name "
fi
### Edit / Change these pathNames for your system
# subfolders with arg1 icaoName will be written with AIFP, KMLS, ORDR, RMV2, SCEN files
OUTPPATH="."
# path to FAA inppppppput data
DATAPATH="/data/ARDP"
#
TDIR="$ICAO/AIFP"
echo mkdir -p $TDIR
mkdir -p $TDIR
#
TDIR="$ICAO/ATPI"
echo mkdir -p $TDIR
mkdir -p $TDIR
#
TDIR="$ICAO/KMLS"
echo mkdir -p $TDIR
mkdir -p $TDIR
#
TDIR="$ICAO/LVLD"
echo mkdir -p $TDIR
mkdir -p $TDIR
#
TDIR="$ICAO/RMV2"
echo mkdir -p $TDIR
mkdir -p $TDIR
#
TDIR="$ICAO/SCEN"
echo mkdir -p $TDIR
mkdir -p $TDIR
##
#cd "$ICAO"
##
echo `pwd`
CALL="xmly.py -i $DATAPATH/STARDP.txt -f ARDP -n $ICAO -g ORDR -o $ICAO/procedures.xml -k $ICAO/$ICAO-skel.txt "
echo $CALL
#read -n1 -r -p "$MNAM Rdy OK ( AnyKey or Ctl-C to exit)"
$CALL
# preserve already customized spec file else offer new one
if [ ! -f "$ICAO/$ICAO-spec.txt" ]; then
sort $ICAO/$ICAO-skel.txt > $ICAO/$ICAO-spec.txt
nedit $ICAO/$ICAO-spec.txt
#
fi
#
CALL="xmly.py -i $DATAPATH/STARDP.txt -f ARDP -n $ICAO -g ORDR -o $ICAO/procedures.xml -s $ICAO/$ICAO-spec.txt "
echo $CALL
#read -n1 -r -p "$MNAM Rdy OK ( AnyKey or Ctl-C to exit)"
$CALL
#
CALL="xmly.py -i $DATAPATH/STARDP.txt -f ARDP -n $ICAO -g ATPI -o $ICAO/$ICAO "
echo $CALL
#read -n1 -r -p "$MNAM Rdy OK ( AnyKey or Ctl-C to exit)"
$CALL
#
CALL="xmly.py -i $DATAPATH/STARDP.txt -f ARDP -n $ICAO -g FGAI -o $ICAO/$ICAO-fgai.xml -s $ICAO/$ICAO-spec.txt "
echo $CALL
#read -n1 -r -p "$MNAM Rdy OK ( AnyKey or Ctl-C to exit)"
$CALL
#
CALL="xmly.py -i $DATAPATH/STARDP.txt -f ARDP -n $ICAO -g KMLS -o $ICAO/$ICAO-klst.xml "
echo $CALL
#read -n1 -r -p "$MNAM Rdy OK ( AnyKey or Ctl-C to exit)"
$CALL
sort $ICAO/$ICAO-klst.xml > $ICAO/$ICAO-kmls.xml
#
CALL="xmly.py -i $DATAPATH/STARDP.txt -f ARDP -n $ICAO -g FGLD -o $ICAO/$ICAO-fgld.xml -s $ICAO/$ICAO-spec.txt "
echo $CALL
#read -n1 -r -p "$MNAM Rdy OK ( AnyKey or Ctl-C to exit)"
$CALL
#
CALL="xmly.py -i $DATAPATH/STARDP.txt -f ARDP -n $ICAO -g RMV2 -o $ICAO/$ICAO-rmv2.xml "
echo $CALL
#read -n1 -r -p "$MNAM Rdy OK ( AnyKey or Ctl-C to exit)"
$CALL
#
#
echo "$MNAM $ICAO finis "
| true
|
2088659cb02f8b9ea4ccd1b2c38dfa4e9e2b9182
|
Shell
|
jongmin92/code-examples
|
/bash/run-command-n-times.sh
|
UTF-8
| 254
| 3.3125
| 3
|
[] |
no_license
|
## for loop - 1
for i in {1..10};
do
date;
done
## for loop - 2
for i in {1..10};
do
echo ${i};
done
## for loop - 3
for ((n=0; n<10; n++))
do
echo ${n};
done
## while loop
END=5
x=$END
while [ $x -gt 0 ];
do
date
x=$(($x-1))
done
| true
|
f762237d24d0eab0232abb82f75aedf0cac70038
|
Shell
|
samwhelp/archcraft-adjustment-prototype
|
/project/archcraft-adjustment-prototype-ctrl/asset/usr/share/archcraft-adjustment-prototype-ctrl/bin/maintain-help.sh
|
UTF-8
| 805
| 2.890625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
################################################################################
### Head: Init
##
THE_BASE_DIR_PATH="$(cd -- "$(dirname -- "$0")" ; pwd)"
THE_BASE_DIR_PATH="$THE_BASE_DIR_PATH/../ext"
source "$THE_BASE_DIR_PATH/init.sh"
##
### Tail: Init
################################################################################
################################################################################
### Head: Main
##
__main__ () {
cat << EOF
Usage:
$ make [action]
Example:
$ make
$ make help
$ make install
$ make remove
$ make manjaro-prepare
$ make ubuntu-prepare
Debug:
$ export DEBUG_ADJUSTMENT_PROTOTYPE_CTRL=true
EOF
}
__main__ "$@"
##
### Tail: Main
################################################################################
| true
|
f949c87823b041883116b90f60f99aa4d91fd93f
|
Shell
|
LinMAD/SimpleGames
|
/SDL2/Tetris/conan.sh
|
UTF-8
| 673
| 3.1875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
case "$1" in
add_remote)echo "Configure conan with community remotes to get needed dependencies"
conan remote add bincrafters https://api.bintray.com/conan/bincrafters/public-conan
;;
debug)echo "Installing dependencies for debug"
conan install . -s build_type=Debug --install-folder=cmake-build-debug
wait
;;
release)echo "Installing dependencies for release"
conan install . -s build_type=Release --install-folder=cmake-build-release
wait
;;
*)echo "Unknown arg, user: debug or release"
echo "---------------------------"
exit 1
;;
esac
exit 0
| true
|
abde242c1726c8e5237d3ffb7c6e4757ef5bfbaf
|
Shell
|
open-horizon/anax
|
/test/gov/sharedreg.sh
|
UTF-8
| 2,186
| 2.921875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
EMAIL="foo@goo.com"
echo "Calling node API"
curl -sS -H "Content-Type: application/json" "$ANAX_API/node" | jq -er '. | .account.id' > /dev/null
if [[ $? -eq 0 ]]; then
read -d '' updatehzntoken <<EOF
{
"account": {
"id": "$USER"
},
"token": "$TOKEN"
}
EOF
echo -e "\n[D] hzntoken payload: $updatehzntoken"
echo "Setting device id and token into horizon API"
echo "$updatehzntoken" | curl -sS -X PATCH -H "Content-Type: application/json" --data @- "$ANAX_API/node"
else
read -d '' newhzndevice <<EOF
{
"account": {
"id": "$USER",
"email": "$EMAIL"
},
"id": "$DEVICE_ID",
"name": "$DEVICE_NAME",
"token": "$TOKEN"
}
EOF
echo -e "\n[D] hzndevice payload: $newhzndevice"
echo "Updating horizon with out device id and token"
echo "$newhzndevice" | curl -sS -X POST -H "Content-Type: application/json" --data @- "$ANAX_API/node"
fi
read -d '' gpstestservice <<EOF
{
"sensor_url": "https://bluehorizon.network/documentation/gpstest-device-api",
"sensor_name": "gpstest",
"attributes": [
{
"id": "free form",
"short_type": "mapped",
"label": "Extra",
"publishable": true,
"mappings": {
"foo": "goo",
"new": "zoo"
}
}
]
}
EOF
echo -e "\n\n[D] gpstestservice payload: $gpstestservice"
echo "Registering gpstest service"
echo "$gpstestservice" | curl -sS -X POST -H "Content-Type: application/json" --data @- "$ANAX_API/microservice/config"
read -d '' location2service <<EOF
{
"sensor_url": "https://bluehorizon.network/documentation/location2-device-api",
"sensor_name": "location2",
"attributes": [
{
"id": "free form",
"short_type": "mapped",
"label": "Extra",
"publishable": true,
"mappings": {
"foo": "goo",
"new": "zoo"
}
}
]
}
EOF
echo -e "\n\n[D] location2service payload: $location2service"
echo "Registering location2 service"
echo "$location2service" | curl -sS -X POST -H "Content-Type: application/json" --data @- "$ANAX_API/microservice/config"
echo -e "\n\n[D] all registered attributes:\n"
curl -sS -H "Content-Type: application/json" "$ANAX_API/attribute" | jq -r '.'
| true
|
b265e77ddafc6b80e4f51ced4d7dff5873798849
|
Shell
|
aheilmaier/iHAC
|
/ihac-qkviewadd
|
UTF-8
| 1,674
| 3.328125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#version: 2.0
#license: MIT
#author: Simon Kowallik <github simonkowallik.com>
#source: https://github.com/simonkowallik/iHAC
#
IHACAUTH="$HOME/.ihac/auth.jar"
IHACPROXYFILE="$HOME/.ihac/IHACPROXY"
if [[ -f "$IHACPROXYFILE" ]]; then IHACPROXY="$(<$IHACPROXYFILE)"; fi
if [[ ! -z "$IHACPROXY" ]]; then IHACPROXY="x $IHACPROXY"; fi
function echoerr() { echo "$@" 1>&2; }
command -v perl >/dev/null 2>&1 || { echoerr "Error: $(basename $0) requires perl but it is not installed."; exit 1; }
command -v curl >/dev/null 2>&1 || { echoerr "Error: $(basename $0) requires curl but it is not installed."; exit 1; }
if [[ "$#" -gt 0 ]]
then
QKVIEWS=("$@")
elif [[ ! -t 0 ]]
then
QKVIEWS="-"
else
echoerr "Error: no file specified"
echoerr "Use: `basename $0` /path/file [/path/fileN]"
echoerr "Use: cat /path/file | `basename $0`"
exit 1
fi
for QKVIEW in "${QKVIEWS[@]}"; do
if [[ -f "$QKVIEW" ]] || [[ "$QKVIEW" == "-" ]]; then
curl --progress-bar -i$IHACPROXY -H"Accept: application/vnd.f5.ihealth.api.v1.0" --user-agent "iHAC/2.0" \
--cookie "$IHACAUTH" --cookie-jar "$IHACAUTH" \
-F "qkview=@$QKVIEW" -F 'visible_in_gui=True' \
-o - https://ihealth-api.f5.com/qkview-analyzer/api/qkviews \
| perl -ne 's/\r\n//g;
if(m|http/1.. 303|gi) {$ok=1} else {
if(m|http/1.. 30\d|gi) {print STDERR "Error: not authenticated.\n";exit 1}
if(m|http/1.. (40\d)|gi) {print STDERR "Error: received '$1' response from server.\n";exit 1}
} if($ok && m/^location/gi) {if(m|.+/qkviews/(\d+)$|gi) {$qkid=$1}}
if($ok && m/<\?xml/gi) {if(m|<result>(.+)</result>|gi) {print "$qkid $1\n"} exit 0}'
else
echoerr "Error: '$QKVIEW' not a file or does not exist."
fi
done
| true
|
c624d05a0f4afb4930a34419d649cd0b6ebe2c20
|
Shell
|
vutung2311/stratos_kernel
|
/ramdisk/sbin/script/renice_tweak.sh
|
UTF-8
| 3,091
| 2.984375
| 3
|
[] |
no_license
|
#!/system/bin/sh
#=====================================================
#=====================================================
#========== AmazBoost by androidexpert35 =============
#=====================================================
#=====================================================
#======== Copyright (C) 2018 Antonio Cirielli ========
#=======================================================================#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>.
#======================================================================#
LS=/sdcard/amazboost/LOGS/renice.log
if [ -e $LS ]; then
busybox rm $LS;
fi
busybox touch $LS;
busybox echo "# AmazBoost Engine" | busybox tee -a $LS;
busybox echo "" | busybox tee -a $LS;
busybox echo "# STARTING AmazBoost : $( date +"%m-%d-%Y %H:%M:%S" )" | busybox tee -a $LS;
busybox echo "" | busybox tee -a $LS;
for pid in `busybox ps | busybox awk '{ if ( $2 ~ /^root$/ && $4 !~ /netd$|rild$|zygote$|kswapd|vold|loop/ ) print $1 }'`; do
busybox ionice -c 2 -n 3 -p $pid 2>/dev/null
done;
busybox echo "Renice Swap/Loop... = Activated..." | busybox tee -a $LS;
for pid in $(busybox pgrep pdflush 2>/dev/null); do
busybox renice 7 $pid 2>/dev/null
busybox ionice -c 3 -n 7 -p $pid 2>/dev/null
done;
busybox echo "Renice PdFlush... = Activated..." | busybox tee -a $LS;
for pid in $(busybox pgrep flush- 2>/dev/null); do
busybox renice 7 $pid 2>/dev/null
busybox ionice -c 3 -n 7 -p $pid 2>/dev/null
done;
busybox echo "Renice Flush... = Activated..." | busybox tee -a $LS;
pid=$(busybox pgrep zygote 2>/dev/null)
busybox renice -1 $pid 2>/dev/null
busybox ionice -c 1 -n 0 -p $pid 2>/dev/null
busybox echo "Renice Zygote... = Activated..." | busybox tee -a $LS;
for pid in `dumpsys activity services | busybox grep -i app=ProcessRecord | busybox awk '{ print $2 }' | busybox grep -v '/100[0-9]}' | busybox cut -d: -f1`; do
busybox renice 7 $pid 2>/dev/null
busybox ionice -c 2 -n 7 -p $pid 2>/dev/null
done;
busybox echo "Renice Dumpsys Activity Service = Activated..." | busybox tee -a $LS;
for pid in `busybox ps | busybox awk '{ if ( $2 ~ /^app_/) print $1 }'`; do
busybox renice -1 $pid 2>/dev/null
busybox ionice -c 1 -n 0 -p $pid 2>/dev/null
done;
busybox echo "Renice Apps... = Activated..." | busybox tee -a $LS;
busybox echo "" | busybox tee -a $LS;
busybox echo "Optimization Renice System = Activated..." | busybox tee -a $LS
busybox echo "" | busybox tee -a $LS;
busybox echo "# END AmazBoost : $( date +"%m-%d-%Y %H:%M:%S" ) " | busybox tee -a $LS
| true
|
8eca8cd4335a94e0ebd0c946e4a04d6160151fff
|
Shell
|
gmfricke/singularity-test
|
/mpitest/mpiring.pbs
|
UTF-8
| 1,558
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
# bash$ cd $PBS_O_WORKDIR
# bash$ bash ./singularity-test-ubuntu.pbs
#
# Interactive pbs sessions are useful for debugging, but are not
# the preferred way to run production jobs. Once problem is
# identified, do submit as normal.
# Processors per node (ppn), today is synonymous with cores:
# xena has 16 and wheeler has 8
#PBS -l nodes=2:ppn=8
#PBS -l walltime=48:00:00
#PBS -S /bin/bash
#PBS -j oe
#PBS -N singularity-test
# Load the environment modules system
## TODO add to .profile along with umask
module load intel
module load openmpi-2.1.1-intel-17.0.4-gyecxys
module load singularity-2.4.1-intel-17.0.4-sjwoqj4
# The variable $PBS_NP is always equal to nodes x ppn as set via -l flag
# and the following information will be found in the .o file
echo -e "\nLaunching $PBS_NP mpi tasks across $PBS_NUM_NODES node(s)."
echo -e "The nodes are: $(cat $PBS_NODEFILE | uniq | tr '\n' ' ')\n\n"
# Change to the directory that this job was submitted from
# The variable is set to whatever directory you were in when qsub was called
cd $PBS_O_WORKDIR
echo -e "Starting job $(echo $PBS_JOBID | cut -d"." -f1) on $(date)\n"
# I have redirected output to out.log, which you will be able to check while running.
# Otherwise, STDOUT is stored in RAM both consuming RAM and hiding output until the end
# when the .o file is produced. Feel free to change the name of output to anything you like.
mpirun -n $PBS_NP -machinefile $PBS_NODEFILE $PBS_O_WORKDIR/mpi-ring
echo -e "Finishing job $(echo $PBS_JOBID | cut -d"." -f1) on $(date)"
| true
|
d36084836a03b81b655dcc31523c8991f38df085
|
Shell
|
crrlcx/ansible-jemalloc
|
/test/run.sh
|
UTF-8
| 414
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
ansible-playbook -i hosts test.yml
# Remember the exit code of the previous command to return it
# at the end of the script
test_status=$?
# Shut down containers manually
# Ignore connection errors
(ansible-playbook -i hosts shut-down-containers.yml | grep -v 'Failed to connect to the host via ssh') || true
# Exit script with status code returned by runnig the test.yml playbook
exit $test_status
| true
|
830dabec383ea2a1f49a87b3d35620c820a5faf5
|
Shell
|
Ramachandraiah/power-scripting
|
/Shell/make-folder.sh
|
UTF-8
| 2,124
| 4.59375
| 5
|
[] |
no_license
|
#!/bin/bash
###############################################################################################
# Date : 28th May 2016 #
# Author : Ramachandraiah Nikadi #
# Languge : Shell Scripting #
# Script name : make-folder.sh #
# Description : Creates a given folder in the list of sub folders in the given base directory #
# usage : ./make-folder.sh <base_directory> <new_directory> #
###############################################################################################
# Check for number of input parameters and exit if they are not equals to 2
# Also Provide some usage information
if [ $# != 2 ]
then
echo "INFO :: Please provide correct number of arguments"
echo "INFO :: usage : ./make-folder.sh <base_directory> <new_directory>"
exit 1
fi
# Getting folder names from the command line arguments
base_directory=$1
new_directory=$2
echo "Given base directory: \"$base_directory\""
echo "Given new directory: \"$new_directory\""
#Fidn the directories in the base directory and create new folder in each of them, only if they did't exists already.
# maxdepth option limits the depth of the file search
# http://askubuntu.com/questions/266179/how-to-exclude-ignore-hidden-files-and-directories-in-a-wildcard-embedded-find
# find . \( ! -regex '.*/\..*' \) -type f -name "whatever", works. The regex looks for "anything, then a slash, then a dot, then anything" (i.e. all hidden files and folders including their subfolders), and the "!" negates the regex.
for D in `find $base_directory -maxdepth 1 -mindepth 1 \( ! -regex '.*/\..*' \) -type d`
do
new_folder_full_path=$D/$new_directory
if [ -d $new_folder_full_path ]
then
echo "INFO :: folder \"$new_folder_full_path\" already exists"
else
echo "INFO :: folder \"$new_folder_full_path\" does't exists, Hence Creating"
mkdir $new_folder_full_path
fi
done
| true
|
79fcf343a0f33d5cc352ef8e57de2fdc19338e8e
|
Shell
|
leopiney/tscf
|
/evaluate_logs.sh
|
UTF-8
| 171
| 2.828125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
array=( 0 1 2 3 4 5 6 7 8 9 )
for i in "${array[@]}"
do
awk -f extract_logs.awk evaluations/evaluation_$i.log > evaluations/evaluation_$i.csv
done
| true
|
4f035e54eb6c270dab502d81c0ae17d94b44c179
|
Shell
|
conda-forge/lmdb-feedstock
|
/recipe/build.sh
|
UTF-8
| 263
| 3.03125
| 3
|
[
"BSD-3-Clause",
"OLDAP-2.8",
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
set -ex
cd libraries/liblmdb/
if [[ -z "${AR}" ]]; then
AR=ar
fi
export DESTDIR=$PREFIX
make CC=$CC AR=$AR
if [[ "${CONDA_BUILD_CROSS_COMPILATION:-0}" == "0" ]]; then
make test
fi
make install
# delete static library
rm $PREFIX/lib/liblmdb.a
| true
|
fa3062080e42ca64bd847bd8e3658c2f203a30d3
|
Shell
|
ctx-core/monorepo
|
/bin/package-manifest-changeset.sh
|
UTF-8
| 1,014
| 3.703125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
usage() { echo "Usage: $0 [-b] [-h]" 1>&2; exit 1; }
while getopts "b:h" o; do
case "$o" in
b)
BUMP=$OPTARG
;;
h)
usage
exit
;;
esac
done
BUMP=${BUMP:-patch}
PWD=$(pwd)
LIST="$(pnpm list -r --depth -1)"
BIN_DIR="$(dirname $0)"
echo $DIR
while read line; do
if [ -z "$(echo "$line" | xargs)" ]; then
continue
fi
PKG="$(echo "$line" | awk '{print $1}')"
MSG="$(echo "$line" | awk '{$1=""; print $0}' | "$BIN_DIR/surrounding-trim.sh")"
DIR="$(echo "$LIST" | grep "$PKG" | awk '{print $2}')"
CHANGESET_MD_PATH="$PWD/.changeset/$(cksum <<<$MSG | cut -f 1 -d ' ').md"
FRONTMATTER=''
if [ -f "$CHANGESET_MD_PATH" ]; then
FRONTMATTER="$(perl -ne '/^---/ && $i++; !/^---/ && $i < 2 && print' "$CHANGESET_MD_PATH")"
fi
if [ -z "$(echo "$FRONTMATTER" | xargs | grep $PKG)" ]; then
FRONTMATTER="$(
echo "$FRONTMATTER"
echo \"$PKG\": $BUMP
)"
fi
FRONTMATTER="$(echo "$FRONTMATTER" | sed '/^$/d')"
cat <<EOF >"$CHANGESET_MD_PATH"
---
$(echo "$FRONTMATTER")
---
$MSG
EOF
done
| true
|
36810df8fea6dd97d89c0851c4df5c6e6c65191e
|
Shell
|
zlmone/scripts-and-guides
|
/_bin/update_readme.sh
|
UTF-8
| 1,786
| 3.8125
| 4
|
[
"MIT"
] |
permissive
|
#/bin/bash
echo "sdfsdf"
cat > README.md << "EOL"
# Embedded Machine Learning Scripts and Guides
This is a collection of scripts and guides by the <a href="https://embedded-machine-learning.github.io/webpage/">Christian Doppler Laboratory for Embedded Machine Learning</a>
We collect scripts and guides that help us in our everyday work to setup software and frameworks. This repository is also the source of an EML Toolbox that aims to easyily implement machine learning toolchains.
## Table of Contents
### Guides
EOL
# loop through dir
for d in guides/*.md; do
dirname=$(basename ${d%/})
# remove numbers
name_print=${dirname}
# printf "\n\n## ${name_print//_/ }\n" >> README.md
grep -m 1 "#" guides/${dirname}
for d2 in ${d}/*.md; do
name=$(basename ${d2%.md})
printf "* [" >> README.md
grep -m 1 "#" guides/${dirname} | tr -d '\n' | tr -d '#'>> README.md
printf "](./guides/${dirname})\n" >> README.md
done
done
printf "\n\n### Scripts\n" >> README.md
# loop through dir
for d in scripts/*/; do
dirname=$(basename ${d%/})
# remove numbers
name_print=${dirname}
#printf "\n\n## ${name_print//_/ }\n" >> README.md
for d2 in ${d}/*.md; do
name=$(basename ${d2%.md})
printf "* [" >> README.md
grep -m 1 "#" scripts/${dirname}/README.md | tr -d '\n' | tr -d '#'>> README.md
printf "](./scripts/${dirname}/README.md)\n" >> README.md
#printf "* [${name//_/ }](./scripts/${dirname}/${name}.md/)\n" >> README.md
done
done
#printf "\n\nTo automatically rebuild the README file on each commit, run "\""bin/activate_hook"\"" from inside the repo once." >> ${dir}/README.md
git add README.md
echo "README updated!"
cat README.md
git status
| true
|
0bf9775696853383531f9c8b40c39a79ea78a923
|
Shell
|
langest/dotfiles
|
/scripts/wallpaper.sh
|
UTF-8
| 527
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
if [[ $MONITOR_SETTINGS = "external" ]]; then
# Oneliner that does the same thing
# cat <(find $HOME/img/wps_portrait/ -type f -printf '"%p"\n' | sort -R | head -n1) <(find $HOME/img/wps/ -type f -printf '"%p"\n' | sort -R | head -n1) | xargs feh --bg-fill
LANDSCAPE=`find $HOME/Pictures/wps/ -type f | sort -R | head -n1`
PORTRAIT=`find $HOME/Pictures/wps_portrait/ -type f | sort -R | head -n1`
feh --bg-fill "$LANDSCAPE" "$PORTRAIT"
exit 0
fi
feh --randomize --recursive --bg-fill $HOME/Pictures/wps
| true
|
2d5cbd1d37e8b97ebd234dafca60f885ec612ee8
|
Shell
|
TheCoreMan/make-git-better-2
|
/levels/tests/test-tag-2.sh
|
UTF-8
| 539
| 2.890625
| 3
|
[
"CC-BY-4.0"
] |
permissive
|
#!/bin/bash
source $(dirname $0)/tests-lib.sh
level_branch=individually-nonintroversive-chalcomancy
level_title=tag-2
test_log "testing level $level_title branch $level_branch"
git checkout $level_branch
git clean -f -d
# PUT TEST CODE HERE, like git add + git commit
if [[ $(git show $(git show-ref --tags -d | grep ^$(git --no-pager log --pretty=%P -n 1) | sed -e 's,.* refs/tags/,,' -e 's/\^{}//')) == *"nine-botchy-remarker (final)"* ]]; then
# manual pass (no push)
exit 0
else
# manual fail (no push)
exit 1
fi
| true
|
1be71fae837f6e0f23156bf42e3e88175013ffd4
|
Shell
|
bhoobal/terraform-asCode
|
/createec2
|
UTF-8
| 1,487
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/bash -ex
# This script will create one or more EC2 instances in either
# Expects numerous environment variable to be in place.
# Required values to be set:
# USER
# ENVIRONMENT
# APP
# COMPONENTS
# NAME
# TEAM
# INSTANCE_TYPE
USER=$1
ENVIRONMENT=$2
APP=$3
NAME=$4
TEAM=$5
DESCRIPTION=$6
POWER_ON_HOURS=$7
if [[ -z "${USER}" ]] || [[ -z "${ENVIRONMENT}" ]] || [[ -z "${APP}" ]] || [[ -z "${NAME}" ]] || [[ -z "${TEAM}" ]]; then
echo
echo "Parameter Error: USER, ENVIRONMENT, APP, NAME, TEAM and INSTANCE_TYPE are all required parameters."
echo
exit 1
fi
# When copying the tfstate to S3, delineate by static -vs- feature
if [[ $(echo "$ENVIRONMENT" | tr A-Z a-z) =~ alpha|beta|prod ]]; then
S3DIR="static/${ENVIRONMENT}"
TFVARS="${ENVIRONMENT}.tfvars"
else
S3DIR="feature/${ENVIRONMENT}"
TFVARS="./Feature.tfvars"
fi
pushd profiles/${APP}
terraform init
terraform get
# Each profile contains environment-specific tfvars files for Alpha+
# Feature needs to use a reasonable set of defaults
terraform apply -var-file=../../Global.tfvars \
-var-file=${TFVARS} \
-var "name=${NAME}" \
-var "environment=${ENVIRONMENT}" \
-var "user=${USER}" \
-var "app=${APP}" \
-var "team=${TEAM}" \
-var "poh=${POWER_ON_HOURS}" \
-var "description=${DESCRIPTION}" \
--auto-approve
# ID=$(terraform show|grep 'id = i'|cut -f 2 -d '='|sed 's/ //')
aws s3 cp terraform.tfstate s3://support/governance/ec2/${USER}/$(date +%Y%m%d)/$(uuidgen)
| true
|
8f51f9adb33a3ee4719fe2e5af8d6d4200879c1b
|
Shell
|
propertybase/elk
|
/image/10_system_env.sh
|
UTF-8
| 841
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
# Install services
## Elasticsearch
mkdir /etc/service/elasticsearch
touch /etc/service/elasticsearch/down
cat > /etc/service/elasticsearch/run <<EOF
#!/bin/sh
exec 2>&1
exec /usr/local/elasticsearch/bin/elasticsearch "" "" "-Des.default.config=/etc/elasticsearch/elasticsearch.yml -Des.default.path.data=/data -Des.default.path.conf=/etc/elasticsearch"
EOF
chmod +x /etc/service/elasticsearch/run
## Logstash
mkdir /etc/service/logstash
touch /etc/service/logstash/down
cat > /etc/service/logstash/run <<EOF
#!/bin/sh
exec 2>&1
exec /usr/local/logstash/bin/logstash -f /etc/logstash.conf
EOF
chmod +x /etc/service/logstash/run
## Kibana
mkdir /etc/service/kibana
touch /etc/service/kibana/down
cat > /etc/service/kibana/run <<EOF
#!/bin/sh
exec 2>&1
exec /usr/local/kibana/bin/kibana
EOF
chmod +x /etc/service/kibana/run
| true
|
ed34dfe7ddf89db9ae696fcab6d54422963fedc4
|
Shell
|
NotoriousRebel/Mythic
|
/rabbitmq-docker/config_rabbit.sh
|
UTF-8
| 880
| 3.609375
| 4
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
#!/bin/bash
# This script needs to be executed just once
if [ -f /$0.completed ] ; then
echo "$0 `date` /$0.completed found, skipping run"
exit 0
fi
# Wait for RabbitMQ startup
for (( ; ; )) ; do
sleep 2
rabbitmqctl -q node_health_check > /dev/null 2>&1
if [ $? -eq 0 ] ; then
echo "$0 `date` rabbitmq is now running"
break
else
echo "$0 `date` waiting for rabbitmq startup"
fi
done
# Execute RabbitMQ config commands here
# Create user
rabbitmqctl add_user mythic_user mythic_password
rabbitmqctl add_vhost mythic_vhost
rabbitmqctl set_user_tags mythic_user administrator
rabbitmqctl set_permissions -p mythic_vhost mythic_user ".*" ".*" ".*"
echo "$0 `date` user mythic_user created"
# Create queue
#rabbitmqadmin declare queue name=QUEUE durable=true
#echo "$0 `date` queues created"
# Create mark so script is not ran again
touch /$0.completed
| true
|
102a8f4337582df3c6d31b82d9734c435f3d391e
|
Shell
|
xk2600/libsh
|
/src/package
|
UTF-8
| 879
| 3.84375
| 4
|
[] |
no_license
|
#!/usr/bin/printf %s: must be sourced from /bin/sh interpreter.\n
# vim:syntax=sh
# package require cli::debug
. "${CLILIB:?}/lib/cli/debug"
## require NAMESPACE
## sources file scope making it available in the interpreter
##
package::require() {
local REQUIREMENTS
local MODULE
set -e
while [ $# -gt 0 ]; do
MODULE="$1"
if [ -f "${CLILIB}/lib/`echo $MODULE | sed -r 's/::/\//g'`" ]; then
REQUIREMENTS="$REQUIREMENTS $1"
else
error "require: could not find lib %s" "$1"
fi
shift
done
set +e
}
debug func $LINENO function 'cli::command::context() created.'
package() {
SUBCOMMAND="${1:?}" ; shift
case $SUBCOMMAND in
require) package::require "$@" ;;
*) error 'unknown subcommand "%s" must be: require.' "$SUBCOMMAND" ;;
esac
}
debug func $LINENO function 'cli::command::context() created.'
| true
|
633697172d5dcf65e8d8b4bc26b639f84a9de7df
|
Shell
|
gutek9/scripts_tools
|
/k8s_scripts/ext_ips.sh
|
UTF-8
| 474
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/bash
if [ "$EUID" -ne 0 ]
then echo "Please run as root or using sudo"
exit -1
fi
IP=$1
NETMASK="255.255.252.0"
IFACES_FILE="/etc/network/interfaces"
LAST_IFACE=$(cat $IFACES_FILE | grep "auto eth0:" | cut -d ":" -f2 | sort -nr | head -1)
NEXT_IFACE=$((LAST_IFACE+1))
echo "Adding $IP..."
echo -e "\nauto eth0:$NEXT_IFACE\niface eth0:$NEXT_IFACE inet static\n\taddress $IP\n\tnetmask $NETMASK" >> $IFACES_FILE
ifup eth0:$NEXT_IFACE
echo "Done."
| true
|
5d70b817c295dcb6eaabc4bb07f54e9360006110
|
Shell
|
wbgalvao/tracking_server
|
/scripts/cassandra-init.sh
|
UTF-8
| 440
| 2.703125
| 3
|
[
"MIT"
] |
permissive
|
CQL="CREATE KEYSPACE IF NOT EXISTS $KEYSPACE WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'} AND durable_writes = true;
CREATE TABLE IF NOT EXISTS $KEYSPACE.$TABLE (id uuid, username text, target text, description text, timestamp timestamp, PRIMARY KEY (id));"
until echo $CQL | cqlsh; do
echo "cqlsh: Cassandra is unavailable to initialize - will retry later"
sleep 2
done &
exec /docker-entrypoint.sh "$@"
| true
|
65eb6783283b3b8b6fdb2d39b3657eff716ea2ac
|
Shell
|
artudi54/ShellRc
|
/deployment/configure/components/components.sh
|
UTF-8
| 401
| 3.859375
| 4
|
[] |
no_license
|
#!/bin/bash
if [[ $# -ne 1 ]]; then
echo "install: invalid number of arguments passed" 1>&2
exit 1
fi
SCAN_DIR="$1"
for componentDirectory in "$SCAN_DIR"/*; do
if [[ ! -f "$componentDirectory/install.sh" ]]; then
continue
fi
componentName=$(basename "$componentDirectory")
echo "installing config for $componentName"
source "$componentDirectory/install.sh"
done
| true
|
09234ca033ac54c78d91090a80a4bba231cf0ccb
|
Shell
|
tungtran3012/Linux2018
|
/W07/GiaiThua.sh
|
UTF-8
| 256
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
function GiaiThua()
{
if [ $1 -ge 0 ]
then
if [ $1 -eq 0 ]
then
echo "0! =1"
elif [ $1 -eq 1 ]
then
echo "1! =1"
else
g=1
for((i=1;i<=$1;i++))
do
g=`expr $g \* $i`
done
echo "$1 != $g"
fi
fi
}
GiaiThua 4
| true
|
874235f3a3aeb66a29ce4ae391fb57b27c3fbccf
|
Shell
|
aparnaank/chaos-monkey-test-framework
|
/chaotic-patterns/instance-app-failures/instanceAppFailures.sh
|
UTF-8
| 807
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
while getopts k:p:o: option
do
case "${option}"
in
p) path=${OPTARG};;
k) killSvr=${OPTARG};;
o) operation=${OPTARG};;
esac
done
function serverOperations(){
echo $operation
echo $path
sh $path/bin/wso2server.sh $operation
}
function serverKill(){
PID=`jps | grep Bootstrap | awk '{ print $1 }'`
kill -9 ${PID}
}
function help_message() {
echo ""
echo "Restarting server"
echo "./makeStress.sh -p [instance path] -t [duration N ]"
echo ""
echo ""
echo "Kill server"
echo "./makeStress.sh -k killSvr"
echo ""
exit
}
if [ "$killSvr" == "killSvr" ];
then
#log "INFO" "Stressing CPU tests running $CPU"
serverKill
fi
if [ "$path" != "" ] && [ "$operation" != "" ];
then
#log "INFO" "Stressing CPU tests running $CPU"
serverOperations
fi
| true
|
4e81d249cbd732e2e081d1628843640c7b14117b
|
Shell
|
jannewulf/dotfiles.global
|
/configs/profile
|
UTF-8
| 533
| 3.171875
| 3
|
[] |
no_license
|
# ~/.profile has the stuff NOT specifically related to bash, such as environment variables (PATH and friends)
# Anything that should be available to graphical applications OR to sh (or bash invoked as sh) MUST be in ~/.profile
# Anything that should be available only to login shells should go in ~/.profile
# https://superuser.com/a/789465
if [ -d "$HOME/bin" ]; then
PATH="$HOME/bin:$PATH"
fi
if [ -d "$HOME/.local/bin" ]; then
PATH="$HOME/.local/bin:$PATH"
fi
if [ -f ~/.profile.local ]; then
. ~/.profile.local
fi
| true
|
e2309d645a32690c9c7995b4f62bd128a0e2766a
|
Shell
|
jake-stewart/funtime_projects
|
/2020/Bash/calculator.sh
|
UTF-8
| 586
| 3.546875
| 4
|
[
"MIT"
] |
permissive
|
# !/bin/bash
clear
echo "Choose one of the following."
echo ""
echo "1. Addition"
echo "2. Subtraction"
echo "3. Multiplication"
echo "4. Division"
echo ""
read -p "Choice: " choice
clear
if choice = '5'; then
exit 1
fi
read -p 'Enter your first number: ' a
read -p 'Enter your second number: ' b
clear
case $choice in
1)results= echo "$a + $b" | bc ;;
2)results= echo "$a - $b" | bc ;;
3)results= echo "$a \* $b" | bc ;;
4)results= echo "scale=2; $a / $b" | bc ;;
*)results= "Choose valid number" ;;
esac
echo "Result : $results"
| true
|
ffe5adf89ec59de8082adcb651f25005028ab932
|
Shell
|
crosser/pdns-pipe-nmc
|
/build
|
UTF-8
| 754
| 3.140625
| 3
|
[
"Zlib"
] |
permissive
|
#!/bin/sh
file="pdns-pipe-nmc"
sfx="linux-glibc6.`uname -m`.`date +%Y-%m-%d`.git-`git describe`"
echo "Making $file.$sfx"
# tried this:
#ghc --make -static -optc-static -optl-shared-libgcc -optl-static \
# -optl-pthread -pgml g++ "$file"
# but cannot make it "only glibc is shared". And with static glibc,
# getaddrinfo does not work on systems with different glibc.
#
# To build with ghc, run this:
#ghc --make -optP-include -optPdist/build/autogen/cabal_macros.h "$file"
# but it is still necessary to run `cabal configure` before to generate
# the file `dist/build/autogen/cabal_macros.h`.
cabal build
case $? in
0) ;;
*) echo build falied; exit 1 ;;
esac
mv "dist/build/$file/$file" "$file.$sfx" || exit 1
gpg --detach-sign "$file.$sfx"
| true
|
5d1df6bc6b47c9a07dfcb9e6d952fdc6824c8a1f
|
Shell
|
gwindlord/uber-saber
|
/aosp_r35_r36.sh
|
UTF-8
| 1,872
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
# patching android-5.1.1_r34 to android-5.1.1_r36
# taking changes from log http://www.androidpolice.com/android_aosp_changelogs/android-5.1.1_r34-to-android-5.1.1_r35-AOSP-changelog.html
# and results of own android-5.1.1_r35 to android-5.1.1_r36 investigation
LOCAL_REPO="$1"
if [[ "$#" != "1" ]]; then
echo "usage: $0 LOCAL_REPO" >&2
exit 1
fi
# errors on
set -e
pushd "$LOCAL_REPO/build"
[ $(git remote | egrep \^aosp_build) ] && git remote rm aosp_build
git remote add aosp_build https://android.googlesource.com/platform/build
git fetch aosp_build
git cherry-pick fa1323c3b38495606fba31518e552faec530b199
git cherry-pick fafe789699316238ce8755f92ae0a1a9aa79c6ac
git cherry-pick c56338d6b3871b1cd0f261c4911d4a850c00d86d
git remote rm aosp_build
popd
pushd "$LOCAL_REPO/frameworks/av"
[ $(git remote | egrep \^aosp) ] && git remote rm aosp
git remote add aosp https://android.googlesource.com/platform/frameworks/av
git fetch aosp
git cherry-pick 6ad0c98cb9ae119156b264a7532b1e0cc701e0d8
git cherry-pick 5a6788730acfc6fd8f4a6ef89d2c376572a26b55
git cherry-pick fe84c20143f95ad1d4e0203a6a11abb772efdef0
git cherry-pick b862285d2ac905c2a4845335d6a68a55135f6260
git remote rm aosp
popd
pushd "$LOCAL_REPO/frameworks/native"
[ $(git remote | egrep \^aosp) ] && git remote rm aosp
git remote add aosp https://android.googlesource.com/platform/frameworks/native
git fetch aosp
git cherry-pick b8a86fe81c0da124d04630b9b3327482fef6220a
git cherry-pick d9b370cf4d8cbc5972023bd9dde2174d9d965191
git remote rm aosp
popd
pushd "$LOCAL_REPO/frameworks/opt/telephony"
[ $(git remote | egrep \^aosp) ] && git remote rm aosp
git remote add aosp https://android.googlesource.com/platform/frameworks/opt/telephony
git fetch aosp
git cherry-pick 572af2dd8148fd6b24b1c8a0bf2ff769015ba2db
git remote rm aosp
popd
| true
|
b63e4292b9ac5e1487af7cf3a2e74e3431849572
|
Shell
|
evandropaes/dotfiles
|
/scripts/os/preferences/macos/finder.sh
|
UTF-8
| 4,505
| 2.8125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
cd "$(dirname "${BASH_SOURCE[0]}")" \
&& . "../../utils.sh"
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
print_in_purple "\n Finder\n\n"
execute "defaults write com.apple.frameworks.diskimages auto-open-ro-root -bool true && \
defaults write com.apple.frameworks.diskimages auto-open-rw-root -bool true && \
defaults write com.apple.finder OpenWindowForNewRemovableDisk -bool true" \
"Abra automaticamente uma nova janela do Finder quando um volume é montado"
execute "defaults write com.apple.finder _FXShowPosixPathInTitle -bool true" \
"Use o caminho POSIX completo como título da janela"
execute "defaults write com.apple.finder DisableAllAnimations -bool true" \
"Desativar todas as animações"
execute "defaults write com.apple.finder WarnOnEmptyTrash -bool false" \
"Desative o aviso antes de esvaziar o Lixo"
execute "defaults write com.apple.finder FXDefaultSearchScope -string 'SCcf'" \
"Procurar no diretório atual por padrão"
execute "defaults write com.apple.finder FXEnableExtensionChangeWarning -bool false" \
"Desativar aviso ao alterar uma extensão de arquivo"
execute "defaults write com.apple.finder FXPreferredViewStyle -string 'Nlsv'" \
"Use a vista de lista em todas as janelas do Finder por padrão"
execute "defaults write com.apple.finder NewWindowTarget -string 'PfDe' && \
defaults write com.apple.finder NewWindowTargetPath -string 'file://$HOME/Desktop/'" \
"Defina 'Desktop' como o local padrão para novas janelas do Finder"
execute "defaults write com.apple.finder ShowExternalHardDrivesOnDesktop -bool true && \
defaults write com.apple.finder ShowHardDrivesOnDesktop -bool true && \
defaults write com.apple.finder ShowMountedServersOnDesktop -bool true && \
defaults write com.apple.finder ShowRemovableMediaOnDesktop -bool true" \
"Mostre ícones para discos rígidos, servidores e mídia removível na área de trabalho"
# execute "defaults write com.apple.finder ShowRecentTags -bool false" \
# "Não mostra as tags atuais"
# execute "defaults write -g AppleShowAllExtensions -bool true" \
# "Mostre as extensões de nome de arquivo"
execute "/usr/libexec/PlistBuddy -c 'Set :DesktopViewSettings:IconViewSettings:iconSize 72' ~/Library/Preferences/com.apple.finder.plist && \
/usr/libexec/PlistBuddy -c 'Set :StandardViewSettings:IconViewSettings:iconSize 72' ~/Library/Preferences/com.apple.finder.plist" \
"Defina o tamanho do ícone"
execute "/usr/libexec/PlistBuddy -c 'Set :DesktopViewSettings:IconViewSettings:gridSpacing 1' ~/Library/Preferences/com.apple.finder.plist && \
/usr/libexec/PlistBuddy -c 'Set :StandardViewSettings:IconViewSettings:gridSpacing 1' ~/Library/Preferences/com.apple.finder.plist" \
"Definir o tamanho do espaçamento da grade do ícone"
execute "/usr/libexec/PlistBuddy -c 'Set :DesktopViewSettings:IconViewSettings:textSize 13' ~/Library/Preferences/com.apple.finder.plist && \
/usr/libexec/PlistBuddy -c 'Set :StandardViewSettings:IconViewSettings:textSize 13' ~/Library/Preferences/com.apple.finder.plist" \
"Definir o tamanho do texto da etiqueta do ícone"
execute "/usr/libexec/PlistBuddy -c 'Set :DesktopViewSettings:IconViewSettings:labelOnBottom true' ~/Library/Preferences/com.apple.finder.plist && \
/usr/libexec/PlistBuddy -c 'Set :StandardViewSettings:IconViewSettings:labelOnBottom true' ~/Library/Preferences/com.apple.finder.plist" \
"Definir posição do rótulo do ícone"
execute "/usr/libexec/PlistBuddy -c 'Set :DesktopViewSettings:IconViewSettings:showItemInfo true' ~/Library/Preferences/com.apple.finder.plist && \
/usr/libexec/PlistBuddy -c 'Set :StandardViewSettings:IconViewSettings:showItemInfo true' ~/Library/Preferences/com.apple.finder.plist" \
"Mostrar item info"
execute "/usr/libexec/PlistBuddy -c 'Set :DesktopViewSettings:IconViewSettings:arrangeBy none' ~/Library/Preferences/com.apple.finder.plist && \
/usr/libexec/PlistBuddy -c 'Set :StandardViewSettings:IconViewSettings:arrangeBy none' ~/Library/Preferences/com.apple.finder.plist" \
"Definir o metódio de Sort"
killall "Finder" &> /dev/null
# Começando com o Mac OS X Mavericks as preferências são armazenadas em cache,
# Então, para que as coisas sejam configuradas corretamente usando `PlistBuddy`,
# O processo `cfprefsd` também precisa ser morto.
killall "cfprefsd" &> /dev/null
| true
|
6b2a8793b495f4a9a7937f2f88fbea2d0b560eda
|
Shell
|
libpcp/pcp
|
/test_coverage.sh
|
UTF-8
| 623
| 2.765625
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#/bin/sh
rm test_coverage.tmp -rf
mkdir test_coverage.tmp
#[ -f configure.ac.orig ] || cp configure.ac configure.ac.orig
#sed -i 's/subdir-objects//' configure.ac
./autogen.sh
cd test_coverage.tmp
CPPFLAGS="-DNDEBUG -DPCP_MAX_LOG_LEVEL=5" CFLAGS="-O0 -g" ../configure --enable-gcov
make check
rm pcp_app/pcp-pcp_app.gcda
lcov -c --directory . --output-file info && genhtml -o report/ info && cd .. && rm -rf test_coverage && mv test_coverage.tmp test_coverage
URL=test_coverage/report/index.html
[[ -x $BROWSER ]] && exec "$BROWSER" "$URL"
path=$(which xdg-open || which gnome-open || which open) && exec "$path" "$URL"
| true
|
34a71de2ea82ec05347e645f8186982a20fefb23
|
Shell
|
pavanyendluri588/ubuntu
|
/shell.sh
|
UTF-8
| 856
| 2.78125
| 3
|
[] |
no_license
|
echo "executing git add . command "
sudo git add .
echo "git add command is successfully executed"
echo "==================================================================================================="
echo "checking ststus..... by using git status command"
sudo git status
echo "git status command is successfully executed"
echo "==================================================================================================="
echo "creating upload"
sudo git commit -m "29/6/2021 upload"
echo "upload is successfully created"
echo "===================================================================================================="
echo "uploading into github"
sudo git push origin master
echo "#####################################################################################################"
echo "pushing is completed"
| true
|
3e57d5bd89241829064ec300de8bb454fb12a919
|
Shell
|
cbsd/reggae
|
/scripts/service.sh
|
UTF-8
| 1,251
| 3.71875
| 4
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
set -e
SOCKET_DIR=${1}
PID_FILE=${2}
if [ -z "${SOCKET_DIR}" -o -z "${PID_FILE}" ]; then
echo "Usage: $0 <directory to put socket in> <pid file>" >&2
exit 1
fi
SOCKET="${SOCKET_DIR}/reggae.sock"
MYPID=""
cleanup() {
pkill -P ${MYPID}
rm -rf ${SOCKET}
}
trap cleanup HUP INT ABRT BUS TERM EXIT
register_v4() {
pfctl -t reggae -T add $1
}
register_v6() {
pfctl -t reggae6 -T add $1
}
unregister_v4() {
pfctl -t reggae -T delete $1
}
unregister_v6() {
pfctl -t reggae6 -T delete $1
}
run() {
rm -rf "${SOCKET}"
/usr/bin/nc -k -l -U "${SOCKET}" | while read action inet ip fqdn; do
if [ "${action}" = "register" ]; then
if [ "${inet}" = "ipv4" ]; then
register_v4 ${ip}
elif [ "${inet}" = "ipv6" ]; then
register_v6 ${ip}
fi
elif [ "${action}" = "unregister" ]; then
if [ "${inet}" = "ipv4" ]; then
unregister_v4 ${ip}
elif [ "${inet}" = "ipv6" ]; then
unregister_v6 ${ip}
fi
fi
first=$(echo ${fqdn} | cut -f 1 -d '.')
if [ ! -z "${first}" ]; then
/usr/sbin/local-unbound-control flush ${fqdn}
fi
done
}
run &
MYPID=$!
echo $$ >"${PID_FILE}"
sleep 0.3
chmod g+w "${SOCKET}"
chown root:216 "${SOCKET}"
wait
| true
|
165744490faa9f780ddbfc465ab6ea2656c38d84
|
Shell
|
Aweponken/dj-scripts
|
/scripts/rename_files_from_id3_tag.sh
|
UTF-8
| 1,730
| 4.375
| 4
|
[] |
no_license
|
#!/bin/bash
# This script renames MP3 files to a name that matches a pattern.
# The pattern elements can be found here: https://eyed3.readthedocs.io/en/latest/plugins/display_plugin.html#pattern-elements
# Usage: ./rename_files_from_id3_tag.sh /Users/Me/Music/ "%title%_%artist%"
# Note: This script requires eyeD3 and grako, which can be install with: pip3 install eyeD3 grako
# Note2: eyeD3 requires python >= 3.6
# If less than two arguments supplied, display usage message
if [ $# -lt 2 ]; then
echo -e "\nUsage: ./remove_files.sh <search_folder> <pattern> \n"
exit 1
fi
# Verify that eyed3 and grako is installed
if ! command -v eyed3 &> /dev/null; then
echo "eyed3 could not be found. Please install it with: pip3 install eyed3"
exit 1
elif ! command -v grako &> /dev/null; then
echo "grako could not be found. Please install it with: pip3 install grako"
exit 1
fi
SEARCH_FOLDER=$(echo "${1//\\}")
PATTERN=$2
EXPRESSION="*.mp3"
echo "Searching folder $SEARCH_FOLDER"
find "$SEARCH_FOLDER" -not -path '*/\.*' -name "$EXPRESSION" -print0 | while read -d $'\0' FILE
do
echo "Renaming $FILE"
DIR_OF_FILE=$(dirname "${FILE}")
RAW_NEW_FILE_NAME=$(eyed3 --plugin display --pattern "$PATTERN" "$FILE") # Create file name based on the pattern
NEW_FILE_NAME=$(echo "${RAW_NEW_FILE_NAME//\/}") # Remove any slashes in the new file name
NEW_FILE_NAME="${NEW_FILE_NAME#"${NEW_FILE_NAME%%[![:space:]]*}"}" # Remove leading whitespace characters
NEW_FILE_NAME="${NEW_FILE_NAME%"${NEW_FILE_NAME##*[![:space:]]}"}" # Remove trailing whitespace characters
if ! mv "$FILE" "$DIR_OF_FILE/$NEW_FILE_NAME.mp3"; then
echo -e "\nRename failed!\n"
exit 1
fi
done
echo -e "\nRename completed!\n"
| true
|
8bef54c81309ca4932586c31e8759070279541db
|
Shell
|
fullstackdev427/Project-tutorial
|
/scripts/install_httpd
|
UTF-8
| 763
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
# Install apache
yum -y install httpd-2.4.48
# Set file and folder permissions
chgrp -R apache /home/ec2-user/
find /home/ec2-user/ -type d -exec chmod g+rx {} +
find /home/ec2-user/ -type f -exec chmod g+r {} +
chown -R ec2-user /home/ec2-user/
find /home/ec2-user/ -type d -exec chmod u+rwx {} +
find /home/ec2-user/ -type f -exec chmod u+rw {} +
sed -i "s|AllowOverride None|AllowOverride All|g" /etc/httpd/conf/httpd.conf
sed -i "s|/var/www/html|/var/www/phplaravel/public|g" /etc/httpd/conf/httpd.conf
sed -i "s|index.html|index.php|g" /etc/httpd.conf/httpd.conf
cd /var/www/phplaravel
chown -R ec2-user:ec2-user /var/www/phplaravel
useradd -G ec2-user apache
chgrp -R apache storage bootstrap/cache
chmod -R ug+rwx storage bootstrap/cache
| true
|
aa0c0b7c4ebdcc6c992c3ecabfe4aa64ec9c8987
|
Shell
|
mayank64857/GitJenkins
|
/Jenkins.sh
|
UTF-8
| 346
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/sh
echo "hello"
df -H | grep -vE '^Filesystem|tmpfs|cdrom' | awk '{ print $5 " " $1 }' | while read output;
do
echo $output
usep=$(echo $output | awk '{ print $1}' | cut -d'%' -f1 )
partition=$(echo $output | awk '{ print $2 }' )
echo "Disk Space is fine total $usep percent used in /root on $(hostname -i) as on $(date)"
done
| true
|
7a2a28684b9cb110918402ae485cef2d257f4c0d
|
Shell
|
misiyu/base_shell_demo
|
/example/4.sh
|
UTF-8
| 473
| 3.171875
| 3
|
[] |
no_license
|
pips=6
max=1000
throw=1
one=0
two=0
three=0
four=0
five=0
six=0
count()
{
case "$1" in
0) let "one = one + 1";;
1) let "two = two + 1";;
2) let "three = three + 1";;
3) let "four = four + 1";;
4) let "five = five + 1";;
5) let "six = six + 1";;
esac
}
while [ "$throw" -le "$max" ]
do
let "dice = RANDOM % $pips"
count $dice
((throw=throw+1))
done
echo "one=$one"
echo "two=$two"
echo "three=$three"
echo "four=$four"
echo "five=$five"
echo "six=$six"
| true
|
0f4074ebeedbade06ae00e4bc35a552fb589ab38
|
Shell
|
tanaka3622/bruby
|
/bash_to_ruby/if/ge.sh
|
UTF-8
| 133
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
var1=2
var2=2
# >=
if [ $var2 -ge $var1 ] ; then
echo "var2はvar1以上"
else
echo "var2はvar1より小さい"
fi
| true
|
14c70ee02a7314e21785717905d7b14810655242
|
Shell
|
xet7/shails
|
/SHac/base.sh
|
UTF-8
| 939
| 3.03125
| 3
|
[
"MIT"
] |
permissive
|
function base_controller::class.redirect_to() {
local class=$1
local url=$(url_for "$@")
logger.debug "redirect to $url"
CGI.redirect "$url"
}
function base_controller::class.render() {
local class=$1
local action
shift
local e
for e; do
case "$e" in
:action=*)
action=${e#:action=}
;;
esac
done
logger.debug "call ${class}.${action}"
${class}.${action}
if [[ -n "$SHac_RENDER_DONE" ]]; then return; fi
SHac_RENDER_DONE=1
local file="app/views/${class%_controller}/${action}.ebash"
if [[ -e "$file" ]]; then
logger.debug "render file $file"
controller::render_file "$file"
fi
}
## Internal functions
function controller::render_file() {
local file=$1
local tmpl
CGI.header 'Content-type: text/html'
ebash.new tmpl "$file"
tmpl.result
tmpl.delete
}
# make base_controller class
Object.extend base_controller
| true
|
d9d9fb9476e94a17632fe67f634865edd3171456
|
Shell
|
bestephe/res-sim
|
/run_missing.sh
|
UTF-8
| 695
| 3.890625
| 4
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
###########################
# Parse arguments
###########################
usage()
{
echo "usage: $0 missing.txt"
exit 1
}
if [ $# -ne 1 ]
then
usage
fi
###########################
# Clean exit
###########################
cleanup()
# example cleanup function
{
kill $(jobs -p)
return $?
}
control_c()
# run if user hits control-c
{
echo -en "\n*** Ouch! Exiting ***\n"
exit $?
}
# trap keyboard interrupt (control-c)
trap control_c SIGINT
# kill background jobs
trap cleanup EXIT
###########################
# Run the program
###########################
NUMPROC=4
ulimit -Sv 300000 # 3GB virtual memory limit
cat $1 | xargs -I CMD -P $NUMPROC bash -c CMD
| true
|
7cba17c70bec18b7a8ff25e41bfe4e792888d93d
|
Shell
|
ssh352/events_stuff_main
|
/code/elements/scripts/make_release.sh
|
UTF-8
| 434
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
./clean
./make
version=$1
root=click_trader.${version}
mkdir $root
cp click_trader.param $root
cp cme_md_config.properties $root
cp field_mask $root
cp oms_config.properties $root
cp params.proto $root
cp params_pb2.py $root
cp user.proto $root
cp user_pb2.py $root
mkdir $root/build
mkdir $root/scripts
cp ./scripts/* $root/scripts
cp ./build/click_trader $root/build
tar -zcvf $root.tar.gz $root
rm -rf $root
| true
|
8e6ef0f272ee570f81436a33928e126b50060744
|
Shell
|
hewenhao2008/boy
|
/wiware/bin/addwhitemac.sh
|
UTF-8
| 640
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/sh
# $1 - the mac
. /wiware/bin/wicore.sh
mac=$(echo $1 |sed -r '/[a-fA-F0-9]{2}:[a-fA-F0-9]{2}:[a-fA-F0-9]{2}:[a-fA-F0-9]{2}:[a-fA-F0-9]{2}:[a-fA-F0-9]{2}/!d')
if [ "x$mac" != "x" ]; then
existed=$(uci get wipark.user.white 2>/dev/null|grep $mac|wc -l)
if [ $existed -eq 0 ]; then
if [ "x$(uci get wipark.user 2>/dev/null)" = "x" ]; then
uci set wipark.user=wipark
fi
uci add_list wipark.user.white=$mac
uci commit
iptables -t nat -D PREROUTING -m mac --mac-source $mac -j ACCEPT 2>/dev/null
iptables -t nat -I PREROUTING -m mac --mac-source $mac -j ACCEPT
wipark_log "Added mac:$mac to white mac list"
fi
fi
| true
|
b0f727b552d30b4822e27b5ec5c87b20e2718988
|
Shell
|
Marvin9/longago
|
/scripts/test.sh
|
UTF-8
| 256
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
# https://unix.stackexchange.com/a/452142
if [ "${CI+1}" ]
then
echo "CI environment"
$UPLOAD_STORAGE="/tmp"
else
echo "Local environment"
. ./.env
fi
echo $UPLOAD_STORAGE
echo "\n"
UPLOAD_STORAGE=$UPLOAD_STORAGE go test ./...
| true
|
b8088ce41ecbe02046aaa0813438da5ec47fa7cb
|
Shell
|
BellRampion/documentationWriting
|
/processWholeFolderFindMethodsPython.sh
|
UTF-8
| 1,070
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
# Copyright 2019 by Bailie Livingston.
# This script is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This script is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this script. If not, see <https://www.gnu.org/licenses/>.
#**Script to run a command (or set of commands) against an entire folder's files
#NOTE: This script tries once if the folder is empty with a false filename
#Dir path must be hard-coded without quotes
for filename in /mnt/c/Users/SSIntern/Documents/lifeloopweb/lifeloopweb/helpers/*.py; do
./findMethodsPython "$filename" findMethodsPythonOutput.txt
done
| true
|
679e2b2b2fc6f406b08a99f6f0366babb6d063a1
|
Shell
|
daringway/trigger-workflow-and-wait
|
/entrypoint.sh
|
UTF-8
| 3,418
| 3.46875
| 3
|
[
"MIT"
] |
permissive
|
function usage_docs {
echo ""
echo "You can use this Github Action with:"
echo "- uses: convictional/trigger-workflow-and-wait"
echo " with:"
echo " owner: keithconvictional"
echo " repo: myrepo"
echo " github_token: \${{ secrets.GITHUB_PERSONAL_ACCESS_TOKEN }}"
}
# TODO - Add client_payload
function validate_args {
wait_interval=10
if [ "$INPUT_WAITING_INTERVAL" ]
then
wait_interval=$INPUT_WAITING_INTERVAL
fi
if [ -z "$INPUT_OWNER" ]
then
echo "Error: Owner is a required arugment."
usage_docs
exit 1
fi
if [ -z "$INPUT_REPO" ]
then
echo "Error: Repo is a required arugment."
usage_docs
exit 1
fi
if [ -z "$INPUT_GITHUB_TOKEN" ]
then
echo "Error: Github token is required. You can head over settings and"
echo "under developer, you can create a personal access tokens. The"
echo "token requires repo access."
usage_docs
exit 1
fi
event_type="ping"
if [ "$INPUT_EVENT_TYPE" ]
then
event_type=$INPUT_EVENT_TYPE
fi
client_payload="{}"
if [ "$INPUT_CLIENT_PAYLOAD" ]
then
client_payload=$INPUT_CLIENT_PAYLOAD
fi
ref="master"
if [ $INPUT_REF ]
then
ref=$INPUT_REF
fi
}
function trigger_workflow {
echo "https://api.github.com/repos/${INPUT_OWNER}/${INPUT_REPO}/dispatches"
curl --silent -X POST "https://api.github.com/repos/${INPUT_OWNER}/${INPUT_REPO}/dispatches" \
-H "Accept: application/vnd.github.everest-preview+json" \
-H "Content-Type: application/json" \
-H "Authorization: Bearer ${INPUT_GITHUB_TOKEN}" \
--data "{\"event_type\": \"${event_type}\", \"client_payload\": ${client_payload} }"
sleep $wait_interval
}
function wait_for_workflow_to_finish {
# Find the id of the last build
last_run_id=$(curl --silent -X GET "https://api.github.com/repos/$INPUT_OWNER/$INPUT_REPO/commits/$ref/check-runs" \
-H 'Accept: application/vnd.github.antiope-preview+json' \
-H "Authorization: Bearer $INPUT_GITHUB_TOKEN" | jq '[.check_runs[].id] | first')
echo "The job id is [$last_run_id]."
last_run_html=$(curl --silent -X GET "https://api.github.com/repos/$INPUT_OWNER/$INPUT_REPO/commits/$ref/check-runs" \
-H 'Accept: application/vnd.github.antiope-preview+json' \
-H "Authorization: Bearer $INPUT_GITHUB_TOKEN" | jq '[.check_runs[].html_url] | first')
echo "The dispatched action $last_run_html"
echo ""
conclusion=$(curl --silent -X GET "https://api.github.com/repos/$INPUT_OWNER/$INPUT_REPO/check-runs/$last_run_id" -H 'Accept: application/vnd.github.antiope-preview+json' -H "Authorization: Bearer $INPUT_GITHUB_TOKEN" | jq '.conclusion')
while [[ $conclusion == "null" ]]
do
sleep $wait_interval
conclusion=$(curl --silent -X GET "https://api.github.com/repos/$INPUT_OWNER/$INPUT_REPO/check-runs/$last_run_id" -H 'Accept: application/vnd.github.antiope-preview+json' -H "Authorization: Bearer $INPUT_GITHUB_TOKEN" | jq '.conclusion')
echo "Checking conclusion [$conclusion]"
done
echo "The dispatched action $last_run_html" > comment.txt
echo "Status: $conclusion" >> comment.txt
github_pr_comment comment.txt
if [[ $conclusion == "\"success\"" ]]
then
echo "Yes, success"
else
# Alternative "failure"
echo "Conclusion is not success, its [$conclusion]."
exit 1
fi
}
function main {
validate_args
trigger_workflow
wait_for_workflow_to_finish
}
main
| true
|
90c83beb63ca4a3f619f45289cbcfc83bf2336a8
|
Shell
|
tbrunz/installer_scripts
|
/src/installers/z-scripts/deprecated/install-ppa-hd5500.sh
|
UTF-8
| 1,540
| 3.46875
| 3
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
#! /usr/bin/env bash
#
# ----------------------------------------------------------------------------
# Install the Intel HD5500 video driver from a PPA repository.
# ----------------------------------------------------------------------------
#
INCLUDES="core-install.bash"
if [[ -f "${INCLUDES}" ]]; then source "${INCLUDES}"
else
echo -n "$( basename "${0}" ): error: "
echo "Could not source the '${INCLUDES}' file ! "
exit
fi
GetScriptName "${0}"
GetOSversion
SET_NAME="Intel HD 5500 driver"
USAGE="
This package installs the latest driver for the Intel HD 5500 series chipset.
The Google Pixel (2015) laptop uses this chipset, which may have rendering
issues with the default driver, particularly regarding the mouse pointer,
when Ubuntu is built in a ChromeOS 'chroot' (using, e.g., 'crouton').
https://github.com/dnschneid/crouton/issues/1519
"
POST_INSTALL="
Note that in order for the new driver to take effect, you will need to
restart your 'chroot' after installation.
"
PACKAGE_SET="software-properties-common python-software-properties
ppa-purge "
REPO_NAME="${SET_NAME} (PPA)"
REPO_URL="https://download.01.org/gfx/ubuntu/${MAJOR}.${MINOR}/main"
REPO_GREP=".*01.org/gfx.*${DISTRO}"
# Add the PPA's GPG keys so that 'apt-get' won't complain:
#
QualifySudo
for KEYNUM in ilg ilg-2 ilg-3 ilg-4
do
sudo wget --no-check-certificate \
https://download.01.org/gfx/RPM-GPG-KEY-${KEYNUM} -O - | sudo apt-key add -
done
PerformAppInstallation "-r" "$@"
bash do-update-all.sh
InstallComplete
| true
|
a0d97e38190991e4f9d0ae6209dfc5341880bf95
|
Shell
|
ICRAR/bldr
|
/pkgs.orig/system/060-gpusd.sh
|
UTF-8
| 2,045
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
####################################################################################################
# import the BLDR system
####################################################################################################
source "bldr.sh"
####################################################################################################
# setup pkg definition and resource files
####################################################################################################
pkg_ctry="system"
pkg_name="gpusd"
pkg_default="1.4"
pkg_variants=("1.4")
pkg_info="Local and remote ZeroConf service discovery for GPU resources."
pkg_desc="Local and remote ZeroConf service discovery for GPU resources."
pkg_opts="cmake force-inplace-build"
pkg_uses="lunchbox"
pkg_reqs="lunchbox"
pkg_cflags=""
pkg_ldflags=""
pkg_cfg=""
pkg_cfg_path="Eyescale-gpusd-5b821a5"
####################################################################################################
# register each pkg version with bldr
####################################################################################################
for pkg_vers in ${pkg_variants[@]}
do
pkg_file="$pkg_name-$pkg_vers.zip"
pkg_urls="https://github.com/Eyescale/gpusd/zipball/$pkg_vers/$pkg_file"
bldr_register_pkg \
--category "$pkg_ctry" \
--name "$pkg_name" \
--version "$pkg_vers" \
--default "$pkg_default" \
--info "$pkg_info" \
--description "$pkg_desc" \
--file "$pkg_file" \
--url "$pkg_urls" \
--uses "$pkg_uses" \
--requires "$pkg_reqs" \
--options "$pkg_opts" \
--cflags "$pkg_cflags" \
--ldflags "$pkg_ldflags" \
--config "$pkg_cfg" \
--config-path "$pkg_cfg_path"
done
####################################################################################################
| true
|
111502649b1c267d4f03d726de7228d174bfdf47
|
Shell
|
shesek/gdk
|
/tools/swig.sh
|
UTF-8
| 1,167
| 3.171875
| 3
|
[
"OpenSSL",
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
sed_exe=$1
result="$3/com/blockstream/libgreenaddress/GDK.java"
mkdir -p `dirname $result`
swig -java -noproxy -package com.blockstream.libgreenaddress -o $2 -outdir $3 $4
$sed_exe -i 's/GDKJNI/GDK/g' $2
# Merge the constants and JNI interface into GDK.java
grep -v '^}$' $3/GDKJNI.java | $sed_exe 's/GDKJNI/GDK/g' >$result
grep 'public final static' $3/GDKConstants.java >>$result
cat $5 >>$result
echo '}' >>$result
JAVAC_SOURCE=$7
JAVAC_TARGET=$7
JAVAC_ARGS="-implicit:none -source $JAVAC_SOURCE -target $JAVAC_TARGET -sourcepath $3/com/blockstream/libgreenaddress/ $3/com/blockstream/libgreenaddress/GDK.java"
$JAVA_HOME/bin/javac $JAVAC_ARGS
tmp_wally_java_dir=`mktemp -d`
pushd . > /dev/null
cd $tmp_wally_java_dir
$JAVA_HOME/bin/jar xf $6
popd > /dev/null
$JAVA_HOME/bin/jar cf $3/GDK.jar -C $3 'com/blockstream/libgreenaddress/GDK$Obj.class' \
-C $3 'com/blockstream/libgreenaddress/GDK$NotificationHandler.class' \
-C $3 'com/blockstream/libgreenaddress/GDK$JSONConverter.class' \
-C $3 'com/blockstream/libgreenaddress/GDK.class' \
-C $tmp_wally_java_dir .
# Clean up
rm -f $3/*.java
rm -rf $tmp_wally_java_dir
| true
|
5d2adb5ae4a88b01aa7d15be474e2583ab260098
|
Shell
|
JamesYeoman/zsh-config
|
/modules/base/envs.zsh
|
UTF-8
| 962
| 3.296875
| 3
|
[] |
no_license
|
XENV_COMMANDS=()
function exportEnvIfExists() {
local pth="${XDG_DATA_HOME}/$1"
if [[ -d "$pth" ]]; then
eval "export ${1:u}_ROOT=\"${pth}\""
# Avoid processing commands that have to use workarounds
case $1 in
goenv)
# While goenv has moved $GOPATH/bin to the end of $PATH,
# https://github.com/syndbg/goenv/issues/99#issuecomment-829783709
# mentions that $GOENV_ROOT/shims is at the beginning of $PATH,
# and it hasn't been fixed yet.
;;
*)
path+="${pth}/bin"
XENV_COMMANDS+="$1"
;;
esac
fi
}
exportEnvIfExists "jenv"
exportEnvIfExists "pyenv"
exportEnvIfExists "nodenv"
exportEnvIfExists "sbtenv"
exportEnvIfExists "scalaenv"
exportEnvIfExists "goenv"
export GOENV_GOPATH_PREFIX="${XDG_DATA_HOME}/go"
exportEnvIfExists "rbenv"
exportEnvIfExists "phpenv"
unset exportEnvIfExists
export XENV_COMMANDS
| true
|
5e23bcd66ecb26a818b8666514edbff294c523c4
|
Shell
|
fosskers/vagrant-docker
|
/bootstrap.sh
|
UTF-8
| 782
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
apt-get update
# --- Docker Setup --- #
# https://docs.docker.com/engine/installation/linux/ubuntu/
apt-get install -y linux-image-extra-$(uname -r) linux-image-extra-virtual
# Add docker repo
curl -fsSL https://yum.dockerproject.org/gpg | apt-key add -
apt-key fingerprint 58118E89F3A912897C070ADBF76221572C52609D
add-apt-repository "deb https://apt.dockerproject.org/repo/ ubuntu-$(lsb_release -cs) main"
apt-get update
# Pin the docker version
apt-get install -y docker-engine=1.12.6-0~ubuntu-xenial
# Allow non-root to use `docker`
usermod -aG docker ubuntu
# Docker Compose
# https://docs.docker.com/compose/install/
curl -L "https://github.com/docker/compose/releases/download/1.10.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
| true
|
5c3b8121fff036a5f552c34d6a42046d6b1155a8
|
Shell
|
DmitryYudin/vctest
|
/core/condor.sh
|
WINDOWS-1252
| 7,772
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Copyright 2021 Dmitry Yudin. All rights reserved.
# Licensed under the Apache License, Version 2.0
#
#
# This script is for a sourcing, a seft-test is executed if running alone.
#
# Usage:
#
# . ./condor.sh
#
# # Set job name
# CONDOR_setBatchname $(date "+%Y.%m.%d-%H.%M.%S")
#
# for task in $tasks; do
#
# # Jump into a task-specific directory
# mkdir -p $dirOut/$task && cd $dirOut/$task
#
# # Create condor-task in push it into the queue
# CONDOR_makeTask "$executable" "$arguments" "$files" > task.sub
# CONDOR_submit task.sub > submit.log
#
# cd -
# done
#
# CONDOR_wait
#
set -eu
[[ "$(basename ${BASH_SOURCE-condor.sh})" == "$(basename $0)" ]] && CNDR_SELF_TEST=1 || CNDR_SELF_TEST=
CONDOR_setBatchname()
{
if ! command -p -v condor_version >/dev/null; then
echo "error: condor not found" >&2
return 1
fi
export CNDR_batchname=$1; shift
export CNDR_killfile=$(pwd)/kill_session_${CNDR_batchname////_}.sh
echo "condor_rm -const JobBatchName==\\\"$CNDR_batchname\\\"" >$CNDR_killfile
chmod 777 "$CNDR_killfile"
}
CONDOR_setCPU()
{
export CNDR_request_cpus=$1
}
CONDOR_setDisk()
{
export CNDR_request_disk=$1
}
CONDOR_setMemory()
{
export CNDR_request_memory=$1
}
CONDOR_submit()
{
local taskfile=$1
[[ -z "$CNDR_batchname" ]] && echo "error: batch name not set" >&2 && return 1
condor_submit -batch-name "$CNDR_batchname" -queue 1 -terse $taskfile
}
CONDOR_wait()
{
condor_parse_status() {
local data=$1 request=$2
local sum_U=0 sum_R=0 sum_I=0 sum_X=0 sum_C=0 sum_H=0 sum_E=0
local IFS=$'\n'
for REPLY in $data; do
case $REPLY in
0) sum_U=$((sum_U + 1));; # Unexpanded
1) sum_I=$((sum_I + 1));; # Idle
2) sum_R=$((sum_R + 1));; # Running
3) sum_X=$((sum_X + 1));; # Removed # remove from queue or killed (if running) with condor_rm
4) sum_C=$((sum_C + 1));; # Completed
5) sum_H=$((sum_H + 1));; # Held # job will not be scheduled to run until it is released (condor_hold, condor_release)
6) sum_E=$((sum_E + 1));; # Submission_err
esac
done
case $request in
all) REPLY=$((sum_U + sum_R + sum_I + sum_X + sum_C + sum_H + sum_E));;
idle) REPLY=$sum_I;;
hold) REPLY=$sum_H;;
run) REPLY=$sum_R;;
complete) REPLY=$sum_C;;
*) error "error: unrecognized CONDOR_STATUS=$request" >&2; return 1;
esac
}
condor_getTimestamp() {
local dt=$1; shift
local hr=$(( dt/60/60 )) min=$(( (dt/60) % 60 )) sec=$(( dt % 60 ))
[[ ${#min} == 1 ]] && min=0$min
[[ ${#sec} == 1 ]] && sec=0$sec
[[ ${#hr} == 1 ]] && hr=0$hr
REPLY="$hr:$min:$sec"
}
local startSec=$SECONDS error_code=0
while :; do
local exec_q=$(condor_q -format "%d\n" JobStatus -const "JobBatchName==\"$CNDR_batchname\"")
local hist_q=$(condor_history -format "%d\n" JobStatus -const "JobBatchName==\"$CNDR_batchname\"")
condor_parse_status "$exec_q" all; local exec_tot=$REPLY
condor_parse_status "$exec_q" idle; local wait_idle=$REPLY
condor_parse_status "$exec_q" hold; local wait_hold=$REPLY
condor_parse_status "$exec_q" run; local wait_run=$REPLY
condor_parse_status "$hist_q" all; local done_tot=$REPLY
condor_parse_status "$hist_q" complete; local done_ok=$REPLY
local wait_tot=$((wait_idle + wait_hold + wait_run));
local done_err=$((done_tot - done_ok))
local tot=$((wait_tot + done_tot))
local dt=$((SECONDS - startSec)) timestamp=
condor_getTimestamp $dt; timestamp=$REPLY
printf "%s[%4s/%4s]#%s|E=%s Idle=%-3s Hold=%-3s %s\r" \
$timestamp $done_tot $tot $wait_run $done_err $wait_idle $wait_hold "$CNDR_batchname"
[[ $done_err != 0 ]] && error_code=1 && break;
[[ $wait_tot == 0 ]] && break;
sleep 1s
done
echo ""
rm -f "$CNDR_killfile"
if [[ $error_code != 0 ]]; then
local exec_q num_before num_after=0
exec_q=$(condor_q -format "%d\n" JobStatus -const "JobBatchName==\"$CNDR_batchname\"")
condor_parse_status "$exec_q" all; num_before=$REPLY
if [[ $num_before != 0 ]]; then
condor_rm -const "JobBatchName==\"$JobBatchName\"" 2>/dev/null || true
exec_q=$(condor_q -format "%d\n" JobStatus -const "JobBatchName==\"$CNDR_batchname\"")
condor_parse_status "$exec_q" all; num_after=$REPLY
fi
echo "Complete with errors, $((num_before - num_after)) jobs were killed, $num_after jobs in a queue marked for delete"
return 1
fi
}
CONDOR_makeTask() # exe args files="file1, file2, ..." environment="name1=val1; name2=var2; ..."
{
local executable=$1; shift
local arguments=$1; shift
local input_files=$1; shift
local environment=$1; shift
local tag=${1-} prefix=
[[ -n "$tag" ]] && prefix=${tag:+${tag}_}
local prefix=${tag:+${tag}_}condor
cat <<-EOT
universe=vanilla
log=${prefix}_cluster.log
arguments=$arguments
environment=$environment
# input
transfer_input_files=$input_files
should_transfer_files=YES
when_to_transfer_output=ON_EXIT
# do not capture local environment
getenv=False
# exec
executable=$executable
transfer_executable=True
# stdin
input=/dev/null
stream_input=False
# stdout
output=${prefix}_stdout.log
stream_output=False
# stderr
error=${prefix}_stderr.log
stream_error=False
# resources
request_cpus=${CNDR_request_cpus:-1}
request_disk=${CNDR_request_disk:-3G}
request_memory=${CNDR_request_memory:-500M}
EOT
}
if [[ -n $CNDR_SELF_TEST ]]; then
entrypoint()
{
pushd() { command pushd "$@" >/dev/null; }
popd() { command popd "$@" >/dev/null; }
local timestamp=$(date "+%Y.%m.%d-%H.%M.%S")
CONDOR_setBatchname msk-$timestamp
mkdir -p tmp # consider as a test root
pushd tmp
local executable=$(pwd)/task.sh
echo "#!/bin/bash" >$executable
cat<<-'EOT' >>$executable
echo "----------------------------- pwd/PWD"
echo $PWD
pwd
echo "----------------------------- Directory content:"
ls .
echo "----------------------------- Environment:"
env
echo "----------------------------- Environment passed:"
echo "ENV[1]=$ENV1"
echo "ENV[2]=$ENV2"
echo "----------------------------- Arguments:"
for i; do echo "arg: $i"; done
echo "Hello, stderr!" >&2
echo "Hello, output file!" >out.txt
sleep 2s
echo "----------------------------- $1:"
cat $1
echo "----------------------------- in1.txt:"
cat in1.txt
echo "----------------------------- in2.txt:"
cat in2.txt
EOT
chmod 777 $executable
mkdir -p vectors
pushd vectors
local dirVec=$(pwd)
echo "Hello, I'm input-1!" >in1.txt
echo "Hello, I'm input-2!" >in2.txt
local files="$dirVec/in1.txt, $dirVec/in2.txt"
popd
local dirOut=$timestamp
local id=0
while [[ $id -lt 8 ]]; do
local arguments="in1.txt arg1 arg2 $id"
mkdir -p $dirOut/$id
pushd $dirOut/$id
CONDOR_makeTask "$executable" "$arguments" "$files" > task.sub
CONDOR_submit task.sub > submit.log
popd
id=$((id + 1))
done
echo Submitted
trap 'echo' EXIT
CONDOR_wait
trap - EXIT
}
entrypoint "$@"
fi
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.