blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
021c5e77dbbc7513f8e6b29a4226779d8bfa5e23 | Shell | jianzuoyi/barley | /shell/assembly/Evaluation/Mapping/reads/aws-encode.sh | UTF-8 | 717 | 3.640625 | 4 | [] | no_license | #!/bin/bash
SCRIPT=$1
LINE=$2
INSTANCE_TYPE=$3
INSTANCE_PRICE=$4
SPOT_TEMPLATE_JSON=$5
SCRIPT_DIR=$(cd $(dirname $0);pwd)
WD=$(cd $(dirname $SCRIPT);pwd)
outdir=$(mktemp -d ${WD}/encode_$INSTANCE_TYPE.XXXXXX)
split -a 4 -d -l $LINE $SCRIPT encode_$INSTANCE_TYPE_
mv encode_$INSTANCE_TYPE_???? $outdir
find $outdir -type f | while read SH
do
UserData=$(base64 $SH | tr -d "\n")
cp $SPOT_TEMPLATE_JSON ${SH}.json
sed "s/USERDATA_XXXXXX/${UserData}/" ${SH}.json -i
sed "s/INSTANCETYPE_XXXXXX/${INSTANCE_TYPE}/" ${SH}.json -i
echo "aws ec2 request-spot-instances --spot-price $INSTANCE_PRICE --instance-count 1 --type one-time --launch-specification file://${SH}.json"
done > ${outdir}.spot.sh
| true |
f9220dd0c4ac90cccc801c31834fe6756a7d2f28 | Shell | drmingdrmer/cheatsheet | /sheets/awk/iterate-array-by-ordered-keys.sh | UTF-8 | 180 | 2.625 | 3 | [
"MIT"
] | permissive | #!/bin/sh
{
cat <<-END
a 1
c 3
b 2
END
} | awk '{
a[$1] = $2
}
END {
n = asorti(a, keys)
for (i=1; i<=n; i++) {
k = keys[i]
print k " " a[k]
}
}'
| true |
81714e2a877eb4a3544bf87d9ac382deef2c41b3 | Shell | copywang/Shell_Scripts | /w3school/10_caculate.sh | UTF-8 | 318 | 3.34375 | 3 | [] | no_license | #!/bin/bash
a=10
b=20
value=`expr ${a} + ${b}`
echo "${a}+${b} : ${value}"
value=`expr ${a} - ${b}`
echo "${value}"
value=`expr $a \* $b`
echo "${value}"
value=`expr $b / $a`
echo "${value}"
value=`expr $b % $a`
echo "${value}"
if [ $a == $b ]
then
echo "a = b"
fi
if [ $a != $b ]
then
echo "a != b"
fi
| true |
910a194cf434be2131c064405b23a9d19668fe21 | Shell | hodgesds/tokio-cassandra | /bin/integration-test.sh | UTF-8 | 2,778 | 3.453125 | 3 | [
"MIT",
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
cli=${1:?Please provide the commandline interface executable as first argument}
image=${2:?Please provide the image name for the cassandra database}
source "$(dirname $0)/../lib/utilities.sh"
set -eu
port=$CASSANDRA_PORT
host=$CASSANDRA_HOST_NAME
ip=$CASSANDRA_HOST_IP
set +u
ca_file_args="--ca-file ./etc/docker-cassandra/secrets/keystore.cer.pem"
con_ip_args="-h $ip --port $port"
con_host_args="-h $host --port $port"
trap stop-dependencies 0 1 2 5 15
#########################################################################
echo ">>>>>>>>>>>>>>>>>>>> TEST CONNECTION: PLAIN <<<<<<<<<<<<<"
#########################################################################
start-dependencies-plain $image
set -x
$cli $con_ip_args test-connection
$cli $con_host_args test-connection
$cli --desired-cql-version 3.0.0 $con_host_args test-connection
$cli --desired-cql-version 2.0.0 $con_host_args test-connection \
&& { echo "server cannot handle versions that are too low"; exit 6; }
$cli $con_ip_args --tls $ca_file_args test-connection \
&& { echo "should not connect if ip is set when using tls - verification must fail"; exit 1; }
$cli $con_host_args --tls $ca_file_args test-connection
$cli $con_host_args $ca_file_args test-connection \
|| { echo "should imply tls if CA-file is specified"; exit 2; }
$cli $con_host_args --tls test-connection \
&& { echo "should fail TLS hostname verification on self-signed cert by default"; exit 3; }
set +x
#########################################################################
echo ">>>>>>>>>>>>>>>>>>>> TEST CONNECTION: WITH-AUTHENTICATION <<<<<<<<"
#########################################################################
start-dependencies-auth $image
# YES - there is something async going on, so we have to give it even more time until
# it can accept properly authenticated connections
sleep 1
auth_args="-u cassandra -p cassandra"
set -x
$cli $auth_args $con_ip_args test-connection
$cli $auth_args $con_host_args $ca_file_args test-connection
set +x
#########################################################################
echo ">>>>>>>>>>>>>>>>>>>> TEST CONNECTION: WITH-CERTIFICATE <<<<<<<<"
#########################################################################
start-dependencies-cert $image
cert_args="--cert ./etc/docker-cassandra/secrets/keystore.p12:cassandra"
set -x
$cli $con_host_args --cert-type pkcs12 $cert_args $ca_file_args test-connection
$cli $con_host_args $cert_args $ca_file_args test-connection \
|| { echo "cert-type PK12 is defaulting to the one type we currently know"; exit 4; }
$cli $con_host_args $ca_file_args test-connection \
&& { echo "it doesnt work with without a certificate as server requires client cert"; exit 5; }
set +x
| true |
d7aed55cc983e8a6f3081675dd2a6953ee010f92 | Shell | BeBeMo/Linux-Security-Pro | /shell/zomble_kill.sh | UTF-8 | 330 | 3.328125 | 3 | [] | no_license | #!/bin/bash
#杀死僵尸进程
#sleep 2
num=$(ps -ef | grep defunct | grep -v grep | wc -l)
#如果个数为0
if [ $num -eq 0 ]
then
echo "系统中目前不存在僵尸进程"
#如果个数不为0
else
#ps -A -o stat,ppid,pid,cmd | grep -e '^[Zz]' | awk '{print $2}' | xargs kill -9
echo "已杀死 $num 个僵尸进程"
fi
| true |
2fbf7e8281e5c869e74069099e7179f4342b6599 | Shell | merodwin/ansible-role-ftp-backup | /templates/backup.sh.j2 | UTF-8 | 275 | 2.5625 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
HOST='{{backup_host}}'
USER='{{backup_user}}'
PASS='{{backup_pass}}'
TARGETFOLDER='{{backup_dest_dir}}'
SOURCEFOLDER='{{backup_src_dir}}'
lftp -f "
open $HOST
user $USER $PASS
lcd $SOURCEFOLDER
mirror --reverse --delete --verbose $SOURCEFOLDER $TARGETFOLDER
bye
"
| true |
3282dcc068686d703003edd132a28ec13721251b | Shell | hashicorp/vagrant-installers | /package/common-setup | UTF-8 | 741 | 4.125 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
function info() {
local msg_template="${1}\n"
local i=$(( ${#} - 1 ))
local msg_args=("${@:2:$i}")
printf "${msg_template}" "${msg_args[@]}" >&2
}
function error() {
local msg_template="ERROR: ${1}\n"
local i=$(( ${#} - 1 ))
local msg_args=("${@:2:$i}")
printf "${msg_template}" "${msg_args[@]}" >&2
exit 1
}
function file_directory() {
local path="${1?File path is required}"
local dir
if [[ "${path}" != *"/"* ]]; then
dir="."
else
dir="${path%/*}"
fi
if [ ! -d "${dir}" ]; then
mkdir -p "${dir}" || exit
fi
pushd "${dir}" > /dev/null || exit
dir="$(pwd)" || exit
popd > /dev/null || exit
printf "%s" "${dir}"
}
| true |
7292ca91adb72a15d4025817002d7b86b6570ca9 | Shell | yhaddad/Trivent | /Build | UTF-8 | 644 | 3.21875 | 3 | [] | no_license | #! /bin/sh
echo "============================================================"
echo "= Build Makefile for compilation ="
echo "============================================================"
echo " Move frome directory :"
echo ${PWD}
echo " to the directory :"
cd ${PWD}/build
echo ${PWD}
if [ -e ${PWD}/Makefile ]; then
echo " The Makefile is allrady exist .."
rm -rf *
cmake -C ${ILCSOFT}/ILCSoft.cmake ..
ls -lthr
else
echo " Create a new Makefile .. "
cmake -C ${ILCSOFT}/ILCSoft.cmake ..
ls -lthr
fi
echo "============================================================"
| true |
0b08b1940496802356e4ace8889c1d48d58a1d36 | Shell | skalogerakis/TUC_Theory_of_Computation | /MyDocs/FlexSTABLE/run | UTF-8 | 580 | 2.609375 | 3 | [] | no_license | #!/bin/bash
#Just a simple bash file to run everything at once
#TODO LOOKS LIKE PERMISSIONS ARE NOT NEEDED.CHECK!
#echo -e "\nGIVING PERMISSIONS"
#give permissions for everything read write execute
#chmod 755 ./Serial/Serial
#chmod 755 ./OpenMP_FineGrained/FineOMP
#chmod 755 ./Pthread_FineGrained/threadFine
echo -e "\nRe-Run everything on lex file"
flex mylexer.l #give name of .l file
gcc -o mylexer lex.yy.c -lfl
#./mylexer < ./MyDocs/FlexInput/example-3.test-1.in #give name of input file
./mylexer < myprog.tc #give name of input file
echo -e "\nProcedure finished."
| true |
8b2935c7f675a00e744175c71371338fb1787f4d | Shell | sbradley7777/dot.config | /bin/bin.redhat/logs-extract_logs.sh | UTF-8 | 352 | 3.546875 | 4 | [] | no_license | #!/bin/sh
# Description: Extracts all the .gz files in the /var/log/ and /var/log/cluster
# directories in an extracted sosreport.
# Author: Shane Bradley(sbradley@redhat.com)
for i in *; do
# skip the "reports" directories if found.
if [ -d "$i/var/log" ]; then
for gzip_file in `find $i/var/log/ -name "*.gz"`; do
gunzip -d $gzip_file
done
fi
done
| true |
2858ba351a34a8df8e8a77a08921ad330a6d1627 | Shell | JohnOmernik/zetaextra | /mariadb/install.sh | UTF-8 | 2,562 | 3.3125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
###############
# $APP Specific
echo "The next step will walk through instance defaults for ${APP_ID}"
echo ""
SOME_COMMENTS="Port for MariaDB"
PORTSTR="CLUSTER:tcp:30306:${APP_ROLE}:${APP_ID}:$SOME_COMMENTS"
getport "CHKADD" "$SOME_COMMENTS" "$SERVICES_CONF" "$PORTSTR"
if [ "$CHKADD" != "" ]; then
getpstr "MYTYPE" "MYPROTOCOL" "APP_PORT" "MYROLE" "MYAPP_ID" "MYCOMMENTS" "$CHKADD"
APP_PORTSTR="$CHKADD"
else
@go.log FATAL "Failed to get Port for $APP_NAME $PSTR"
fi
bridgeports "APP_PORT_JSON" "3306" "$APP_PORTSTR"
haproxylabel "APP_HA_PROXY" "${APP_PORTSTR}"
read -e -p "Please enter the CPU shares to use with $APP_NAME: " -i "1.0" APP_CPU
echo ""
read -e -p "Please enter the Marathon Memory limit to use with $APP_NAME: " -i "2048" APP_MEM
echo ""
APP_MAR_FILE="${APP_HOME}/marathon.json"
APP_DATA_DIR="$APP_HOME/data"
APP_LOCK_DIR="$APP_HOME/lock"
APP_CRED_DIR="$APP_HOME/creds"
APP_LOG_DIR="$APP_HOME/logs"
APP_ENV_FILE="$CLUSTERMOUNT/zeta/kstore/env/env_${APP_ROLE}/${APP_NAME}_${APP_ID}.sh"
mkdir -p $APP_DATA_DIR
mkdir -p $APP_LOCK_DIR
mkdir -p $APP_CRED_DIR
mkdir -p $APP_LOG_DIR
sudo chown -R ${IUSER}:zeta${APP_ROLE}apps $APP_CRED_DIR
sudo chmod 770 $APP_CRED_DIR
sudo chown -R ${IUSER}:zeta${APP_ROLE}apps $APP_LOCK_DIR
sudo chmod 770 $APP_LOCK_DIR
sudo chown -R ${IUSER}:zeta${APP_ROLE}apps $APP_LOG_DIR
sudo chmod 770 $APP_LOG_DIR
cat > $APP_ENV_FILE << EOL1
#!/bin/bash
export ZETA_${APP_NAME}_${APP_ID}_PORT="${APP_PORT}"
EOL1
cat > $APP_MAR_FILE << EOL
{
"id": "${APP_MAR_ID}",
"cpus": $APP_CPU,
"mem": $APP_MEM,
"instances": 1,
"labels": {
$APP_HA_PROXY
"CONTAINERIZER":"Docker"
},
"container": {
"type": "DOCKER",
"docker": {
"image": "${APP_IMG}",
"network": "BRIDGE",
"portMappings": [
$APP_PORT_JSON
]
},
"volumes": [
{
"containerPath": "/var/lib/mysql",
"hostPath": "${APP_DATA_DIR}",
"mode": "RW"
},
{
"containerPath": "/lock",
"hostPath": "${APP_LOCK_DIR}",
"mode": "RW"
},
{
"containerPath": "/creds",
"hostPath": "${APP_CRED_DIR}",
"mode": "RW"
},
{
"containerPath": "/logs",
"hostPath": "${APP_LOG_DIR}",
"mode": "RW"
}
]
}
}
EOL
##########
# Provide instructions for next steps
echo ""
echo ""
echo "$APP_NAME instance ${APP_ID} installed at ${APP_HOME} and ready to go"
echo "To start please run: "
echo ""
echo "$ ./zeta package start ${APP_HOME}/$APP_ID.conf"
echo ""
| true |
5e1671cb8f362e3068e50324b069a22cf2d372a9 | Shell | jackyssion/inexistence | /00.Installation/install/install_libtorrent_rasterbar | UTF-8 | 10,382 | 3.515625 | 4 | [] | no_license | #!/bin/bash
#
# https://github.com/Aniverse/inexistence
# Author: Aniverse
#
script_update=2019.05.23
script_version=r10033
################################################################################################
usage_for_me() {
bash $local_packages/install/libtorrent_rasterbar -m deb2 --logbase $LogTimes
bash <(wget -qO- https://github.com/Aniverse/inexistence/raw/master/00.Installation/install/libtorrent_rasterbar) -m deb2
bash <(wget -qO- https://github.com/Aniverse/inexistence/raw/master/00.Installation/install/libtorrent_rasterbar) -b RC_1_2 ; }
################################################################################################ Get options
function show_usage() { echo " Invalid option $1
Usage:
-m Install mode, can only be specified as apt, deb, deb2, deb3 or source
-v Specifiy which version of libtorrent-rasterbar to be installed
-b Specifiy which branchn of libtorrent-rasterbar to be installed
--debug Enable debug mode
Note that installing specific version cause the installation to fail
or the version you installed is not compatible with the version of
your Deluge or qBittorrent
"
exit 1 ; }
OPTS=$(getopt -o m:v:b: -al debug,install-mode:,version:,branch:logbase: -- "$@")
[ ! $? = 0 ] && show_usage
eval set -- "$OPTS"
while [ -n "$1" ] ; do case "$1" in
-m | --install-mode ) mode="$2" ; shift 2 ;;
-v | --version ) version="$2" ; shift 2 ;;
-b | --branch ) branch="$2" ; shift 2 ;;
--logbase ) LogTimes="$2" ; shift 2 ;;
--debug ) debug=1 ; shift ;;
-- ) shift ; break ;;
esac ; done
################################################################################################
[[ -z $LogTimes ]] && LogTimes=/log/inexistence/single
OutputLOG=$LogTimes/install/libtorrent_rasterbar.txt # /dev/null
DebLocation=$LogTimes/deb
SCLocation=$LogTimes/source
LOCKLocation=$LogTimes/lock
source /etc/inexistence/00.Installation/install/function --output $OutputLOG
git_repo="https://github.com/arvidn/libtorrent"
################################################################################################
cancel() { echo -e "${normal}" ; rm -f $HOME/.lt.git.tag ; exit ; }
trap cancel SIGINT
[[ -z $mode ]] && [[ ! -z $version ]] && mode=source
[[ -z $mode ]] && [[ ! -z $branch ]] && mode=source
[[ -z $mode ]] && echo -e "\n${CW} Installation mode must be specified as apt, deb, deb2, deb3 or source${normal}\n" && cancel
[[ -z $MAXCPUS ]] && MAXCPUS=$(nproc)
# Transform version to branch
[[ $( echo $version | grep -Eo "[012]\.[0-9]+\.[0-9]+" ) ]] && branch=$( echo libtorrent-$version | sed "s/\./_/g" )
[[ $debug == 1 ]] && echo -e "version=$version, branch=$branch"
# Check if input branch exists
# [[ ! -z $branch ]] && [[ ! $( wget -qO- "https://github.com/arvidn/libtorrent" | grep data-name | cut -d '"' -f2 | grep -P "$branch\b" ) ]] && { echo -e "\n${CW} No such branch!\n" ; exit 1 ; }
[[ -n $branch ]] && {
rm -f $HOME/.lt.git.tag
git ls-remote --tags $git_repo | awk '{print $NF}' >> $HOME/.lt.git.tag
git ls-remote --heads $git_repo | awk '{print $NF}' >> $HOME/.lt.git.tag
grep $branch $HOME/.lt.git.tag -q || { echo -e "\n${CW} No such branch!${normal}\n" ; rm -f $HOME/.lt.git.tag ; exit 1 ; }
rm -f $HOME/.lt.git.tag ; }
# Use RC_1_1 if not specified
[[ -z $branch ]] && branch=RC_1_1
# Use 6.6.6 when version cannot be determind
[[ -n $branch ]] && [[ -z $version ]] && version=$( wget -qO- https://github.com/arvidn/libtorrent/raw/$branch/include/libtorrent/version.hpp | grep LIBTORRENT_VERSION | tail -n1 | grep -oE "[0-9.]+\"" | sed "s/.0\"//" )
[[ -n $branch ]] && [[ -z $version ]] && version=6.6.6
# Random number for marking different installations
[[ $mode == source ]] && RN=$(shuf -i 1-999 -n1)
rm -f /tmp/lt.1.lock /tmp/lt.2.lock /tmp/ltd.1.lock /tmp/ltd.2.lock
################################################################################################
# Install build dependencies for libtorrent-rasterbar
function install_lt_dependencies() {
apt_install \
build-essential pkg-config autoconf automake libtool git \
libboost-dev libboost-system-dev libboost-chrono-dev libboost-random-dev libssl-dev \
geoip-database libgeoip-dev \
libboost-python-dev \
zlib1g-dev >> $OutputLOG 2>&1 && touch /tmp/ltd.1.lock || touch /tmp/ltd.2.lock
[[ -f /tmp/ltd.1.lock ]] && touch $LOCKLocation/libtorrent-rasterbar.dependencies.lock ; }
# Install from source codes
function install_lt_source() {
git clone --depth=1 -b $branch $git_repo libtorrent-$version-$RN >> $OutputLOG 2>&1
cd libtorrent-$version-$RN
# See here for details: https://github.com/qbittorrent/qBittorrent/issues/6383
# Patches from amefs (QuickBox Dev)
if version_ge $version 1.1 ; then
wget https://github.com/QuickBox/QB/raw/master/setup/sources/libtorrent-rasterbar-RC_1_1.patch >> $OutputLOG 2>&1
patch -p1 < libtorrent-rasterbar-RC_1_1.patch >> $OutputLOG 2>&1
elif version_ge $version 1.0 ; then
wget https://github.com/QuickBox/QB/raw/master/setup/sources/libtorrent-rasterbar-RC_1_0.patch >> $OutputLOG 2>&1
patch -p1 < libtorrent-rasterbar-RC_1_0.patch >> $OutputLOG 2>&1
else
sed -i "s/+ target_specific(),/+ target_specific() + ['-std=c++11'],/" bindings/python/setup.py || NoPatch=1
fi
mkdir -p doc-pak && echo "an efficient feature complete C++ bittorrent implementation" > description-pak
# ltversion=$(grep -oE "AC_INIT\(\[libtorrent-rasterbar\],.*" configure.ac | grep -oE "[0-9.]+" | head -1)
./autotool.sh >> $OutputLOG 2>&1
if [[ -z $NoPatch ]]; then
./configure --enable-python-binding --with-libiconv \
--disable-debug --enable-encryption --with-libgeoip=system CXXFLAGS=-std=c++11 >> $OutputLOG 2>&1 # For both Deluge and qBittorrent
else
./configure --enable-python-binding --with-libiconv \
--disable-debug --enable-encryption --with-libgeoip=system >> $OutputLOG 2>&1
fi
make -j$MAXCPUS >> $OutputLOG 2>&1
if [[ $CODENAME == buster ]]; then
make install >> $OutputLOG 2>&1 && touch /tmp/lt.1.lock || touch /tmp/lt.2.lock
else
# If we installed libtorrent-rasterbar with different package name via checkinstall formerly, checkinstall will get failed, so to make sure, make install again after checkinstall
checkinstall -y --pkgname=libtorrent-rasterbar --pkggroup libtorrent --pkgversion $version >> $OutputLOG 2>&1 &&
touch /tmp/lt.1.lock || {
make install >> $OutputLOG 2>&1 && touch /tmp/lt.1.lock || touch /tmp/lt.2.lock ; }
fi
cp -f libtorrent-rasterb*.deb $DebLocation
[[ -f /tmp/lt.1.lock ]] && touch $LOCKLocation/libtorrent-rasterbar.lock
[[ -f /tmp/lt.1.lock ]] && touch $LOCKLocation/libtorrent-rasterbar.source.$branch.lock ; }
# Install from pre-compiled deb package (libtorrent-rasterbar8, based on RC_1_0 #62c96797a06a024dc17a44931c19afe6f7bd7d6c with python-binding fix)
function install_lt8_deb() {
[[ $CODENAME == Buster ]] && { echo -e "Error: No libtorrent 1.0.11 deb for Buster, please ust libtorrent 1.1 or later." ; exit 1 ; }
wget -O lt.$CODENAME.1.0.11.deb https://raw.githubusercontent.com/Aniverse/inexistence/files/debian.package/libtorrent-rasterbar-1.0.11.$CODENAME.amd64.deb >> $OutputLOG 2>&1
dpkg -i lt.$CODENAME.1.0.11.deb >> $OutputLOG 2>&1 && touch /tmp/lt.1.lock || touch /tmp/lt.2.lock
[[ -f /tmp/lt.1.lock ]] && touch $LOCKLocation/libtorrent-rasterbar.lock
[[ -f /tmp/lt.1.lock ]] && touch $LOCKLocation/libtorrent-rasterbar.deb.lock ; }
# Install from pre-compiled deb package
# libtorrent-rasterbar9, based on RC_1_1 6f1250c6535730897909240ea0f4f2a81937d21a
# with python-binding fix (from amefs) and #5a48292aefd6ebffd5be6b237081ba2d978a2caa fix (Announce only once to private trackers, by Airium)
function install_lt9_deb() {
wget -O lt.$CODENAME.1.1.13.deb https://raw.githubusercontent.com/Aniverse/inexistence/files/debian.package/libtorrent-rasterbar-1.1.13.1.$CODENAME.amd64.cpp11.deb >> $OutputLOG 2>&1
dpkg -i lt.$CODENAME.1.1.13.deb >> $OutputLOG 2>&1 && touch /tmp/lt.1.lock || touch /tmp/lt.2.lock
[[ -f /tmp/lt.1.lock ]] && touch $LOCKLocation/libtorrent-rasterbar.lock
[[ -f /tmp/lt.1.lock ]] && touch $LOCKLocation/libtorrent-rasterbar.deb.lock ; }
mkdir -p $SCLocation $DebLocation $LOCKLocation $(dirname $OutputLOG)
cd $SCLocation
echo -e "${bold}${green}\n\n\n$(date "+%Y.%m.%d.%H.%M.%S") $RN\n\n\n${normal}" >> $OutputLOG
case $mode in
deb ) [[ ! -f $LOCKLocation/libtorrent-rasterbar.dependencies.lock ]] && {
echo -ne "Installing libtorrent-rasterbar build dependencies ..." | tee -a $OutputLOG
install_lt_dependencies & spinner $!
check_status ltd ; }
echo -ne "Installing libtorrent-rasterbar ${bold}${cyan}1.0.11${normal} from pre-compiled deb package ..." | tee -a $OutputLOG
install_lt8_deb & spinner $!
;;
deb2 ) [[ ! -f $LOCKLocation/libtorrent-rasterbar.dependencies.lock ]] && {
echo -ne "Installing libtorrent-rasterbar build dependencies ..." | tee -a $OutputLOG
install_lt_dependencies & spinner $!
check_status ltd ; }
echo -ne "Installing libtorrent-rasterbar ${bold}${cyan}1.1.13${normal} from pre-compiled deb package ..." | tee -a $OutputLOG
install_lt9_deb & spinner $!
;;
source ) [[ ! -f $LOCKLocation/libtorrent-rasterbar.dependencies.lock ]] && {
echo -ne "Installing libtorrent-rasterbar build dependencies ..." | tee -a $OutputLOG
install_lt_dependencies & spinner $!
check_status ltd ; }
if [[ ` echo $branch | grep -Eo "[012]_[0-9]_[0-9]+" ` ]]; then
echo -ne "Installing libtorrent-rasterbar ${bold}${cyan}$version${normal} from source codes ..." | tee -a $OutputLOG
else
echo -ne "Installing libtorrent-rasterbar ${bold}$branch branch (${cyan}$version${jiacu})${normal} from source codes ..." | tee -a $OutputLOG
fi
install_lt_source & spinner $!
;;
esac
cd ; ldconfig
check_status lt
| true |
90b980dd8a3899e9c9993a3357ccce8faca7288b | Shell | foxytrixy-com/foxtools | /src/scripts/git_branch.bash | UTF-8 | 125 | 2.84375 | 3 | [
"MIT"
] | permissive | #!/bin/bash -eu
branch_fullname=$(git symbolic-ref -q HEAD)
branch_name=${branch_fullname##refs/heads/}
echo "$branch_name"
| true |
050ff577905f135069bf22f439329e4b916bba84 | Shell | imattman/dotfiles | /darwin/install-fonts.sh | UTF-8 | 566 | 3.546875 | 4 | [] | no_license | #!/bin/sh
# fail early
set -eou pipefail
if [ -z "$(xcode-select -p)" ]; then
echo "Command line tools not found"
sudo xcode-select --install
fi
if [ ! "$(command -v brew)" ]; then
echo "Homebrew not found"
open "https://brew.sh"
exit 1
fi
CASKS="homebrew/cask-fonts"
FONTS="
font-fira-code-nerd-font
font-go-mono-nerd-font
font-hack-nerd-font
font-inconsolata-nerd-font
font-meslo-lg-nerd-font
font-ubuntu-mono-nerd-font
font-ubuntu-nerd-font
"
for cask in $CASKS; do
brew tap $cask
done
for font in $FONTS; do
brew install --cask $font
done
| true |
e26147e45f24b8fa84d5ce466684880cba3e1999 | Shell | anizox/Animesh_personal | /whileshell.sh | UTF-8 | 190 | 3.03125 | 3 | [] | no_license | #SCRIPT TO PRINT "5,4,3,2,1" exactly using while loop
#!/usr/bin/perl
#!/bin/bash
no=$1
while test $no -ge 1
do
echo -en "$no"
if [ $no -gt 1 ]
then
echo -en ","
fi
((no=$no-1))
done
echo
| true |
c8313b2a7c97a71ac3bcab55eba2156459e748cd | Shell | wdubaiyu/Hackintosh-Dell-XPS-9380 | /ALCPlugFix/alc_fix/install双击自动安装(解决耳机无声)11.X.command | UTF-8 | 1,429 | 2.65625 | 3 | [] | no_license | #!/bin/bash
path=${0%/*}
username=$(whoami)
sudo mount -o nobrowse -t apfs /dev/disk1s5 /Users/$username/Downloads/
sudo cp -a "$path/ALCPlugFix" /Users/$username/Downloads/usr/bin
sudo chmod 755 /Users/$username/Downloads/usr/bin/ALCPlugFix
sudo chown root:wheel /Users/$username/Downloads/usr/bin/ALCPlugFix
sudo cp -a "$path/hda-verb" /Users/$username/Downloads/usr/bin
sudo chmod 755 /Users/$username/Downloads/usr/bin/hda-verb
sudo chown root:wheel /Users/$username/Downloads/usr/bin/hda-verb
sudo cp -a "$path/good.win.ALCPlugFix.plist" /Users/$username/Downloads/Library/LaunchAgents/
sudo chmod 644 /Users/$username/Downloads/Library/LaunchAgents/good.win.ALCPlugFix.plist
sudo chown root:wheel /Users/$username/Downloads/Library/LaunchAgents/good.win.ALCPlugFix.plist
sudo launchctl load /Users/$username/Downloads/Library/LaunchAgents/good.win.ALCPlugFix.plist
echo '安装ALCPlugFix守护进程完成!'
echo '重建缓存中,请稍候……'
sudo rm -rf /Users/$username/Downloads/System/Library/Caches/com.apple.kext.caches/Startup/kernelcache
sudo rm -rf /Users/$username/Downloads/System/Library/PrelinkedKernels/prelinkedkernel
sudo touch /Users/$username/Downloads/System/Library/Extensions/ && sudo kextcache -u /
sudo bless --folder /Users/$username/Downloads/System/Library/CoreServices --bootefi --create-snapshot
echo '安装程序结束,请重启电脑!!!'
bash read -p '按任何键退出' | true |
c75ca050b127b72946797e303365f4790d6ccaf6 | Shell | glatard/manymaths | /wrap/tests/run_tests.sh | UTF-8 | 269 | 2.640625 | 3 | [] | no_license | #!/usr/bin/env bash
LIBS="mpfr glibc-v2.31 intel-v2021.2.0 musl-v1.1.24-1 openlibm-v0.7.0 openlibm-v0.7.5"
for LIB in ${LIBS}
do
echo "Testing ${LIB}"
docker run -e LIB_NAME=${LIB} -v $PWD:$PWD -w $PWD --rm manymaths ./test > test-results/test-${LIB}.txt
done | true |
78b7f703d5d81c9461b58b6869e1fe71186a1fe6 | Shell | ansgardahlen/dc-nextcloud | /generate_config.sh | UTF-8 | 1,698 | 3.5625 | 4 | [] | no_license | #!/bin/bash
if [[ -f nextcloud.conf ]]; then
read -r -p "config file nextcloud.conf exists and will be overwritten, are you sure you want to contine? [y/N] " response
case $response in
[yY][eE][sS]|[yY])
mv nextcloud.conf nextcloud.conf_backup
;;
*)
exit 1
;;
esac
fi
if [ -z "$PUBLIC_FQDN" ]; then
read -p "Hostname (FQDN): " -ei "nextcloud.example.org" PUBLIC_FQDN
fi
if [ -z "$ADMIN_MAIL" ]; then
read -p "Nextcloud admin Mail address: " -ei "mail@example.com" ADMIN_MAIL
fi
[[ -f /etc/timezone ]] && TZ=$(cat /etc/timezone)
if [ -z "$TZ" ]; then
read -p "Timezone: " -ei "Europe/Berlin" TZ
fi
DBNAME=nextcloud
DBUSER=nextcloud
DBPASS=$(</dev/urandom tr -dc A-Za-z0-9 | head -c 28)
HTTP_PORT=8888
cat << EOF > nextcloud.conf
# ------------------------------
# nextcloud web ui configuration
# ------------------------------
# example.org is _not_ a valid hostname, use a fqdn here.
PUBLIC_FQDN=${PUBLIC_FQDN}
# ------------------------------
# NEXTCLOUD admin user
# ------------------------------
NEXTCLOUD_ADMIN=nextcloudadmin
ADMIN_MAIL=${ADMIN_MAIL}
NEXTCLOUD_PASS=$(</dev/urandom tr -dc A-Za-z0-9 | head -c 28)
# ------------------------------
# SQL database configuration
# ------------------------------
DBNAME=${DBNAME}
DBUSER=${DBUSER}
# Please use long, random alphanumeric strings (A-Za-z0-9)
DBPASS=${DBPASS}
DBROOT=$(</dev/urandom tr -dc A-Za-z0-9 | head -c 28)
# ------------------------------
# Bindings
# ------------------------------
# You should use HTTPS, but in case of SSL offloaded reverse proxies:
HTTP_PORT=${HTTP_PORT}
HTTP_BIND=0.0.0.0
# Your timezone
TZ=${TZ}
# Fixed project name
COMPOSE_PROJECT_NAME=nextcloud
EOF
| true |
03dde920d247f91582bba245a808aff27904d3dc | Shell | marsnow/lavawhale | /.env.example | UTF-8 | 1,912 | 2.703125 | 3 | [
"MIT"
] | permissive | ### Docker compose files ##################################
# Select which docker-compose files to include.
COMPOSE_FILE=docker-compose.yml
# Define the prefix of container names.
COMPOSE_PROJECT_NAME=docker
### Paths #################################################
# Point to the path of your applications code on your host
APP_CODE_PATH_HOST=../
# Point to where the `APP_CODE_PATH_HOST` should be in the container
APP_CODE_PATH_CONTAINER=/var/www
# You may add flags to the path `:cached`, `:delegated`. When using Docker Sync add `:nocopy`
APP_CODE_CONTAINER_FLAG=:cached
### Drivers ################################################
# All volumes driver
VOLUMES_DRIVER=local
# All Networks driver
NETWORKS_DRIVER=bridge
### Docker Host IP ########################################
# Enter your Docker Host IP (will be appended to /etc/hosts). Default is `10.0.75.1`
DOCKER_HOST_IP=10.0.75.1
### Remote Interpreter ####################################
# Choose a Remote Interpreter entry matching name. Default is `laradock`
PHP_IDE_CONFIG=serverName=docker
### PHP_FPM ###############################################
PHP_INSTALL_MYSQLI=false
PHP_INSTALL_PHPREDIS=false
PHP_INSTALL_MEMCACHED=false
PHP_INSTALL_XDEBUG=false
PHP_INSTALL_IMAP=false
PHP_INSTALL_MONGO=false
PHP_INSTALL_MSSQL=false
PHP_INSTALL_SSH2=false
PHP_INSTALL_SOAP=false
PHP_INSTALL_XSL=false
PHP_INSTALL_BCMATH=false
PHP_INSTALL_IMAGE=false
PHP_INSTALL_EXIF=false
PHP_INSTALL_INTL=false
PHP_INSTALL_PGSQL=false
PHP_INSTALL_PCNTL=false
PHP_INSTALL_RDKAFKA=false
PHP_INSTALL_YAML=false
PHP_INSTALL_FFMPEG=false
PHP_COMPOSER_INSTALL=false
PHP_COMPOSER_INSTALL_DEV=false
### NGINX #################################################
NGINX_HOST_HTTP_PORT=80
NGINX_HOST_HTTPS_PORT=443
NGINX_HOST_LOG_PATH=./logs/nginx/
NGINX_SITES_PATH=./nginx/sites/
NGINX_PHP_FPM_PORT=9000
NGINX_PHP_SWOOLE_PORT=1215
NGINX_SSL_PATH=./nginx/ssl/ | true |
8f47003735d3c474839c0337ec5a45827094af52 | Shell | DzmitryShylovich/angular | /scripts/ci-lite/offline_compiler_test.sh | UTF-8 | 2,137 | 3.1875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -ex -o pipefail
# These ones can be `npm link`ed for fast development
LINKABLE_PKGS=(
$(pwd)/dist/packages-dist/{common,forms,core,compiler,compiler-cli,platform-{browser,server},platform-browser-dynamic,router}
$(pwd)/dist/tools/@angular/tsc-wrapped
)
PKGS=(
reflect-metadata@0.1.8
typescript@2.0.2
zone.js@0.6.25
rxjs@5.0.1
@types/{node@6.0.38,jasmine@2.2.33}
jasmine@2.4.1
webpack@2.1.0-beta.21
@angular2-material/{core,button}@2.0.0-alpha.8-1
)
TMPDIR=${TMPDIR:-.}
readonly TMP=$TMPDIR/e2e_test.$(date +%s)
mkdir -p $TMP
cp -R -v modules/@angular/compiler-cli/integrationtest/* $TMP
cp -R -v modules/benchmarks $TMP
# Try to use the same versions as angular, in particular, this will
# cause us to install the same rxjs version.
cp -v package.json $TMP
# run in subshell to avoid polluting cwd
(
cd $TMP
set -ex -o pipefail
npm install ${PKGS[*]}
# TODO(alexeagle): allow this to be npm link instead
npm install ${LINKABLE_PKGS[*]}
./node_modules/.bin/tsc --version
# Compile the compiler-cli third_party simulation.
# Use ngc-wrapped directly so we don't produce *.ngfactory.ts files!
# Compile the compiler-cli integration tests
# TODO(vicb): restore the test for .xtb
#./node_modules/.bin/ngc -p tsconfig-build.json --i18nFile=src/messages.fi.xtb --locale=fi --i18nFormat=xtb
# Generate the metadata for the third-party modules
node ./node_modules/@angular/tsc-wrapped/src/main -p third_party_src/tsconfig-build.json
./node_modules/.bin/ngc -p tsconfig-build.json --i18nFile=src/messages.fi.xlf --locale=fi --i18nFormat=xlf
./node_modules/.bin/ng-xi18n -p tsconfig-build.json --i18nFormat=xlf
./node_modules/.bin/ng-xi18n -p tsconfig-build.json --i18nFormat=xmb
node test/test_summaries.js
node test/test_ngtools_api.js
./node_modules/.bin/jasmine init
# Run compiler-cli integration tests in node
./node_modules/.bin/webpack ./webpack.config.js
./node_modules/.bin/jasmine ./all_spec.js
# Compile again with a differently named tsconfig file
mv tsconfig-build.json othername.json
./node_modules/.bin/ngc -p othername.json
)
| true |
e95081263248d3c1b847cd5b5a42a5bd03b2a345 | Shell | jbree/CollisionSimulator | /run-simulation.sh | UTF-8 | 223 | 3.09375 | 3 | [] | no_license | #! /usr/bin/env sh
if [[ -z $2 ]]
then
echo "Usage: ./run-simulation.sh SIM_EXE_PATH CONFIG_DIR"
exit 1
fi
for CONFIG in $2/*.siminput
do
$1 --input $CONFIG
echo
$1 --input $CONFIG --vcs
echo
done
| true |
bf161ba2f2864b2e33310ed6bfa07c6fcf5406a0 | Shell | gilbertosg/shells-bashLinux | /Shell_Practica_Final_3Particiones | UTF-8 | 187 | 2.84375 | 3 | [] | no_license | #!/bin/bash
echo Particiones Primarias:
echo -n 'Raíz(/): '
df -h | awk '/sda4/{print $2}'
echo -n 'Memoria Swap: '
cat /proc/meminfo | awk '/SwapTotal:/ { print $2/1048576 " GB"}'
| true |
40665437c70f4a52947862b73c260d0f7e3d7c37 | Shell | Chinggis6/TengeridHome | /.local/share/qutebrowser/userscripts/uselang | UTF-8 | 172 | 2.546875 | 3 | [] | no_license | #!/usr/bin/bash
# View tools and so in English
# chmod +x
# bind --force <ctrl-f1> spawn --userscript uselang
key="uselang=en"
echo "open $QUTE_URL?$key" >> $QUTE_FIFO
| true |
ed4555e13d802c1bad47113d1900cd41d3596d66 | Shell | matheushjs/projeto_bd | /scripts/developer/wannaCry.sh | UTF-8 | 790 | 3.78125 | 4 | [] | no_license | #!/bin/bash
# Usage: ./wannaCry.sh BEGIN_NUM_INSERTS END_NUM_INSERTS NUM_LOOPS
# Variáveis de Ambiente
# RANGES
BEGIN_NUM_INSERTS=${1:-1}
END_NUM_INSERTS=${2:-500}
NUM_LOOPS=${3:-100}
# FILENAMES
OUTPUT=errors.txt
CLEAN_OUTPUT=cleanErrors.txt
# PATHS
PROGRAM_PATH=./sqlFiles/run.sh
# Limpa o arquivo de output
echo -n > $OUTPUT
# Realiza um teste exaustivo
for i in `seq $BEGIN_NUM_INSERTS $END_NUM_INSERTS`;
do
for j in `seq 1 $NUM_LOOPS`;
do
echo -ne "Progress: [$i $j]\r"
$PROGRAM_PATH $i >> $OUTPUT
egrep "DELETE|ROLLBACK|CREATE|DROP|COMMIT|BEGIN|ALTER|INSERT" -v $OUTPUT > $CLEAN_OUTPUT
done
done
# Imprime os resultados
echo "Cleaned Outputs:"
cat $CLEAN_OUTPUT | sort | uniq -c | sort -k1 -n
echo "Original Results:"
cat $OUTPUT | sort | uniq -c | sort -k1 -n | true |
687a2d95c66ef0e7e5cfee82e1c793e5ff3af44c | Shell | hagnat/bash.d | /global-conf/30-git-global-ignore | UTF-8 | 544 | 3.578125 | 4 | [] | no_license | #!/bin/bash
tput el
echo -n "configuring global gitignore" $'\r'
GITIGNORE_GLOBAL=${HOME}/.gitignore_global
GITIGNORE_TEMPLATES=${HOME}/.bash.d/templates/gitignore/*.gitignore
touch ~/.gitignore_global
git config --global core.excludesfile ${GITIGNORE_GLOBAL}
for f in ${GITIGNORE_TEMPLATES}
do
message="# autogenerated by template ${f}"
grep "${message}" ${GITIGNORE_GLOBAL} >/dev/null
if [[ 1 == $? ]]; then
printf "\n\n${message}\n" >> ${GITIGNORE_GLOBAL}
cat $f >> ${GITIGNORE_GLOBAL}
fi
done
return;
| true |
e7719c395064505c8bb37bb158fe56d955058331 | Shell | bllewell/yugabyte-db | /yugabyted-ui/apiserver/scripts/openapi_bundle.sh | UTF-8 | 489 | 3.046875 | 3 | [
"OpenSSL",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/usr/bin/env bash
export NPM_BIN=`npm bin -g 2>/dev/null`
pushd ../conf/openapi
echo "Processing paths component in openapi ..."
pushd paths
rm -rf _index.yaml
if [[ $OSTYPE == darwin* ]]; then
yq eval-all '. as $item ireduce ({}; . * $item )' $(ls *.yaml) > _index.yaml
else
yq eval-all '. as $item ireduce ({}; . * $item )' $(ls -r *.yaml) > _index.yaml
fi
popd
echo "Running bundle on openapi spec ..."
$NPM_BIN/openapi bundle ./openapi.yaml --output ../openapi.yml
popd
| true |
35de8bd48761d9e04e4d6ac73eee59d1cbb807bd | Shell | nings/testbed | /script/test.sh | UTF-8 | 187 | 2.625 | 3 | [] | no_license | #!/bin/bash
COUNTER=0
while [ $COUNTER -lt 49 ]; do
# ./start_program_on_node.sh node-$COUNTER haggle --non-interactive
sudo xm destroy node-$COUNTER
let COUNTER=$COUNTER+1
done
| true |
d9cd56cfc39210cb25b1ba220359fb850086c363 | Shell | dcorderoch/git-hat-tricks-laboratorio-git | /.hooks/prepare-commit-msg | UTF-8 | 475 | 3.671875 | 4 | [] | no_license | #!/bin/bash
COMMIT_MSG_FILE=$1
COMMIT_SOURCE=$2
SHA1=$3
cd "$(git rev-parse --show-toplevel)"
# check commit message for JIRA-###
text=$(head -n 1 "$COMMIT_MSG_FILE" | grep '^JIRA-[0-9]\{3\}')
if [ "$text" = "" ]; then
exit 1
fi
# check new files for copyright notice
files=$(git diff --cached --diff-filter=ACMR --name-only | grep '\.\(cs\|java\|js\|py\|ts\)')
for f in "$files";
do
match=$(grep -Ei copyright "$f")
if [ "$match" = "" ]; then
exit 1
fi
done
| true |
18d3c069863192b21c5aa2517cf5b43a614dc1a6 | Shell | zucker-chen/libs | /scripts/unjffs2.sh | UTF-8 | 4,769 | 4.0625 | 4 | [] | no_license | #/bin/sh
# filename: unjffs2.sh
# last modified: 2017-6-21
#
# unzip mtd files, like: jffs2.
sh_args=$@
mtd_size=16384 # K
img_file=rootfs.jffs2 # jffs2...
img_type=jffs2 # file -b rootfs.jffs2 | cut -d ' ' -f2
uzip_mode=mtdram
mnt_dir=./mnt
out_dir=./out
usage()
{
printf "Usage: %s [-t <type>] [-m <mode>] [-o <dir>] [-s] [-h] <file>\n" "$(basename "$0")"
printf "\n"
printf "Options:\n"
printf " file input image file, like: rootfs.jffs2\n"
printf " -t <type> image type(jffs2...), default jffs2\n"
printf " -m <mode> uzip mode(mtdram or loop), default mtdram, tips: mtdram need enough ram capacity\n"
printf " -o <dir> output dir name, default ./out\n"
printf " -s assign use file size to mtd total size(K), default 16MB\n"
printf " -h print usage and exit\n"
}
opts_parse()
{
local s_parm=0
while getopts :t:m:o::sh option;
do
case "${option}" in
t)
img_type="${OPTARG}"
;;
s)
s_parm=1
;;
m)
uzip_mode="${OPTARG}"
;;
o)
out_dir="${OPTARG}"
;;
h)
usage
exit 0
;;
--)
;;
\?)
printf "Error: Invalid option: -%s\n" "${OPTARG}" >&2
usage
exit 1
;;
:)
printf "Error: Option -%s requires an argument\n" "${OPTARG}" >&2
usage
exit 1
;;
esac
done
shift $((OPTIND - 1))
if [ -z "$1" ]; then
printf "Error: Missing argument\n" >&2
usage
exit 1
fi
img_file="$1"
[ $s_parm -eq 1 ] && mtd_size=$(du -bk $img_file | awk '{print $1}')
echo "\
sh_args=$sh_args
mtd_size=$mtd_size
img_file=$img_file
img_type=$img_type
mnt_dir=$mnt_dir
out_dir=$out_dir\
" 1>/dev/null 2>&1
}
# root permission check, this shell will be executed again if no permission
# 0: successed, 1: failed
root_check()
{
if [ $(id -u) -ne 0 ]; then
echo "sorry, you must have super privilege!" >&2
#select choose in 'I have root passwd' 'I have sudo privilege' 'try again' 'ignore' 'aboart'
printf "Input your choose: 'root' or 'sudo' or 'try' or 'ignore' or 'aboart' ?\n>"
while read choose
do
case $choose in
*root*)
su -c "sh $0 $sh_args"
break
;;
*sudo*)
sudo sh $0 $sh_args # force
break
;;
try*)
eval sh $0 $sh_args
[ $? == 0 ] && break
;;
ignore)
return 0
;;
aboart)
break
;;
*)
echo "Invalid select, please try again!" >&2
continue
;;
esac
echo "Install cross tools failed!" >&2
done
return 1
fi
return 0
}
loop_mount()
{
# make a block device with image file
loop=$(losetup -f)
losetup $loop $img_file
# convert it to a mtd device
modprobe block2mtd block2mtd=$loop,65536 # 65536 = erasesize 64K, must equal erasesize of 'mkfs.jffs2 -e'
# modprobe mtdblock, create /dev/mtdblock0
modprobe mtdblock
# modprobe jffs2, support mount -t auto ...
#modprobe jffs2
# mount
[ ! -d $mnt_dir ] && mkdir $mnt_dir
mount -t $img_type -o ro /dev/mtdblock0 $mnt_dir
# copy dir
cp -raT $mnt_dir $out_dir
chmod 777 $out_dir
}
loop_unmount()
{
umount $mnt_dir
[ -d $mnt_dir ] && rm -r $mnt_dir
rmmod mtdblock
rmmod block2mtd
losetup -d $loop
}
mtdram_mount()
{
# modprobe mtdram, with mtd_size(unit KB), create /proc/mtd
modprobe mtdram total_size=$mtd_size
# modprobe mtdchar
#modprobe mtdchar
# write the image to /dev/mtd0
dd if=$img_file of=/dev/mtd0 1>/dev/null 2>&1
# modprobe mtdblock, create /dev/mtdblock0
modprobe mtdblock
# mount
[ ! -d $mnt_dir ] && mkdir $mnt_dir
mount -t $img_type -o ro /dev/mtdblock0 $mnt_dir
# copy dir
cp -raT $mnt_dir $out_dir
chmod 777 $out_dir
}
mtdram_umount()
{
umount $mnt_dir
[ -d $mnt_dir ] && rm -r $mnt_dir
rmmod mtdblock
#rmmod mtdchar
rmmod mtdram
}
main()
{
opts_parse $sh_args
root_check
[ $? -ne 0 ] && exit 0 # exit current shell if no permission
if [ $uzip_mode = "mtdram" ]; then
mtdram_mount
mtdram_umount
echo "<mtdram>: uzip $img_type file $img_file done, output to $out_dir."
elif [ $uzip_mode = "loop" ]; then
loop_mount
loop_unmount
echo "<loop>: uzip $img_type file $img_file done, output to $out_dir."
else
echo "Error uzip mode input, only support 'mtdram' or 'loop'"
fi
}
main
# tips:
# mtdram need enough ram capacity.
# loop need assign erasesize, must equal the erasesize of 'mkfs.jffs2 -e'
# try change unzip mode '-m mtdram|loop' if unzip failed
| true |
dc411b78e15912f672f309401c31d53881cb395b | Shell | mbelanabel/repositorio-scripts-alumnos | /36_JuanTonda/36_000_prueba_select_JuanT.sh | UTF-8 | 457 | 3.328125 | 3 | [] | no_license | # prueba05.sh
# jt. Octubre 2017
# pasamos por parametro ARCHIVO o DIRECTORIO;
# lo borramos (si/no)
# falta depurar si es archivo o directorio
# no se elimina igual
PS3="Estas seguro de borrarlo ? "
OPCIONES="si no fin"
x="$1"
op=0
select opt in $OPCIONES
do
echo "BORRAR $1"
if [ $opt = "si" ]; then
echo "eliminamos $1"
opt="fin"
elif [ $opt = "no" ]; then
echo "no eliminamos $1"
opt="fin"
fi
if [ $opt = "fin" ]; then
break
fi
done
| true |
3e4978c25f845372fd153119dbd0e44fbfb39033 | Shell | akos-sereg/chart-data-provider | /spec/test.sh | UTF-8 | 429 | 3 | 3 | [] | no_license | #!/bin/bash
cd ..
TEST_TIMEZONES=('Canada/Pacific' 'Pacific/Fiji' 'Pacific/Honolulu' 'Asia/Shanghai' 'Australia/Sydney' 'America/Havana' 'America/New_York'
'America/Los_Angeles' 'Asia/Dubai' 'Europe/Moscow' 'Europe/London' 'Europe/Budapest')
for timeZone in "${TEST_TIMEZONES[@]}"
do
echo $timeZone | sudo tee /etc/timezone
dpkg-reconfigure --frontend noninteractive tzdata
ntpdate ntp.ubuntu.com
jasmine
done
| true |
77fbf07597666b8ab5ee6db212b9ab4db226e3e3 | Shell | micahstubbs/blockbuilder-graph-search-index | /load-csv-graph-into-neo4j.sh | UTF-8 | 1,244 | 2.515625 | 3 | [] | no_license | # store some paths as variable
# NEO4J_HOME=/Users/m/Documents/Neo4j
GRAPH_DATABASE_DIR=data/databases/blockbuilder-graph-search.db
CSV_DATA_DIR=/Users/m/workspace/blockbuilder-graph-search-index/data/csv-graphs-for-neo4j
# build up paths to input csv files
# nodes source data
USERS=$CSV_DATA_DIR/users.csv
FUNCTIONS=$CSV_DATA_DIR/functions.csv
BLOCKS=$CSV_DATA_DIR/combined-blocks.csv
README_BLOCKS=$CSV_DATA_DIR/readme-links-blocks.csv
COLORS=$CSV_DATA_DIR/colors.csv
# relationships source data
USERS_BUILT_BLOCKS_RELATIONSHIPS=$CSV_DATA_DIR/user-built-block-relationships.csv
BLOCK_CALLS_FUNCTION_RELATIONSHIPS=$CSV_DATA_DIR/block-calls-function-relationships.csv
README_LINKS_RELATIONSHIPS=$CSV_DATA_DIR/readme-links-relationships.csv
BLOCK_USES_COLOR_RELATIONSHIPS=$CSV_DATA_DIR/block-uses-color-relationships.csv
cd $NEO4J_HOME
# mv $NEO4J_HOME/$GRAPH_DATABASE_DIR $NEO4J_HOME/$GRAPH_DATABASE_DIR.bak
./bin/neo4j-import --into $GRAPH_DATABASE_DIR --nodes $BLOCKS --nodes $USERS --nodes $FUNCTIONS --nodes $COLORS --relationships $README_LINKS_RELATIONSHIPS --relationships $USERS_BUILT_BLOCKS_RELATIONSHIPS --relationships $BLOCK_CALLS_FUNCTION_RELATIONSHIPS --relationships $BLOCK_USES_COLOR_RELATIONSHIPS --multiline-fields=true | true |
f3e9d983bf7d11911d2bb5ff6493e7f26c193889 | Shell | Agraphie/zversion | /scripts/ssh/dropbearCVE2012-0920.sh | UTF-8 | 592 | 3.078125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
#Output file name: dropbear_cve_2012-0920
printf "Script name: $0\n"
printf "Input file: $1\n"
printf '%s\n' '----------------------------------------------'
printf "0.52 <= Dropbear Version <= 2011.54: `grep "dropbear" $1 | jq 'select(.Vendor =="dropbear" and .CanonicalVersion <= "2011005400000000" and .CanonicalVersion >= "0000005200000000" and .SoftwareVersion != "") | .SoftwareVersion' | wc -l`"
printf "\n"
printf "Total dropbear: `grep "dropbear" $1 | jq 'select(.Vendor =="dropbear") | .Vendor' | wc -l`\n"
printf '%s\n' '----------------------------------------------' | true |
042a7c95c484267dfe17f0a4edd574bea9fabc1d | Shell | robwithhair/dockerfiles | /postgres-backup-s3/integration-tests.sh | UTF-8 | 1,460 | 3.4375 | 3 | [
"MIT"
] | permissive | #! /bin/sh
set -e
RETRIES=60
until psql -h postgres -U user -c "select 1" > /dev/null 2>&1 || [ $RETRIES -eq 0 ]; do
echo "Waiting for postgres server, $((RETRIES--)) remaining attempts..."
sleep 3
done
OUT=$(psql -h postgres -U user -v ON_ERROR_STOP=1 -1 -f sample_sql_file.sql)
EXPECTED="SET
SET
SET
SET
SET
set_config
------------
(1 row)
SET
SET
SET
SET
CREATE EXTENSION
COMMENT
SET
SET
CREATE TABLE
ALTER TABLE
CREATE SEQUENCE
ALTER TABLE
ALTER SEQUENCE
ALTER TABLE
COPY 10
setval
--------
1
(1 row)
ALTER TABLE"
if [ "$OUT" != "$EXPECTED" ]; then
echo "OUT = '$OUT'"
echo "Not '$EXPECTED'"
exit 1
fi
pg_dump -h postgres -U user | tail -n +7 > sample_output.sql
DIFF=$(tail -n +7 sample_sql_file.sql | diff - sample_output.sql || true)
if [ "$DIFF" != "" ]; then
echo "Expected output from diff was ''"
echo "Actual output was '$DIFF'"
exit 1
fi
if [ "${S3_S3V4}" = "yes" ]; then
aws configure set default.s3.signature_version s3v4
fi
OUT=$(sh backup.sh)
EXPECTED="Creating dump of user database from postgres...
Uploading dump to flowmoco-s3-backup-test
SQL backup uploaded successfully"
if [ "$OUT" != "$EXPECTED" ]; then
echo "Output '$OUT' does not equal expected '$EXPECTED'"
exit 1
fi
# apt-get update && apt-get install -y wget
# wget "https://sample-videos.com/sql/Sample-SQL-File-10rows.sql"
# echo "Cleaning Up..."
# rm Sample-SQL-File-10rows.sql
echo "This worked ok"
| true |
6748653c2559e1a60c97a74139c4f079ec5fb1c0 | Shell | ElectricRCAircraftGuy/eRCaGuy_hello_world | /bash/ansi_text_format_lib.sh | UTF-8 | 8,915 | 3.71875 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# This file is part of eRCaGuy_hello_world: https://github.com/ElectricRCAircraftGuy/eRCaGuy_hello_world
# GS
# Feb. 18 2023
# https://gabrielstaples.com/
# https://www.electricrcaircraftguy.com/
# A general purpose ANSI text formatting library in Bash. Easily make your terminal text bold, red,
# blue, blinking, inverted colors (highlighted), etc.
# - See all the possible codes on Wikipedia, starting here:
# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_(Select_Graphic_Rendition)_parameters
#
# Status: done and works!
# I need to add more ANSI codes, but this is a very-well-done and functional library now! I am
# very pleased with it.
# keywords: text formatting in your terminal in Bash (or any language for that matter, C, C++,
# Python, Go, etc.)
# Check this script with:
#
## shellcheck bash/ansi_text_format_lib.sh
#
# Run command:
#
# bash/ansi_text_format_lib.sh
#
# Source (import) command to get access to any functions herein:
# [my answer] https://stackoverflow.com/a/62626515/4561887):
#
# . bash/ansi_text_format_lib.sh
# References:
# 1. ANSI color codes, formatting, or highlighting
# 1. ***** https://github.com/ElectricRCAircraftGuy/eRCaGuy_hello_world/blob/master/bash/ansi_color_codes.sh
# 2. *****+ "ANSI escape code" on Wikipedia:
# 1. https://en.wikipedia.org/wiki/ANSI_escape_code#3-bit_and_4-bit
# 2. https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_(Select_Graphic_Rendition)_parameters
# 1. *****+ https://github.com/ElectricRCAircraftGuy/eRCaGuy_dotfiles/blob/master/home/.bash_aliases
# - The area at and above my `gs_git_show_branch_and_hash` func
# 1.
# Notes:
# 1. NB: WHEN USING `echo`, DON'T FORGET THE `-e` to escape the color codes! Ex: <======== REQUIRED TO GET THE FORMATTING WHEN USING `echo` ===========
# echo -e "some string with color codes in it"
# TODO (newest on bottom):
# 1. [ ] Take a whole day sometime and go through the Wikipedia article carefully
# (https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_(Select_Graphic_Rendition)_parameters),
# adding **all** formatting codes and possibilities to the list below.
ANSI_START="\e[" # start of an ANSI formatting sequence
#
# --------- ANSI numeric codes start ----------
# - these codes go **between** `ANSI_START` and `ANSI_END`
#
ANSI_BOLD=";1"
ANSI_FAINT=";2"
ANSI_ITALIC=";3"
ANSI_UNDERLINE=";4"
ANSI_SLOW_BLINK=";5"
ANSI_FAST_BLINK=";6" # (not widely supported--does NOT work in a Linux Ubuntu bash shell)
ANSI_INVERTED=";7" # inverted colors (ie: the text looks **highlighted**); see code 7 here:
# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_(Select_Graphic_Rendition)_parameters
# This swaps foreground and background colors; "inconsistent emulation".
ANSI_HIGHLIGHTED="$ANSI_INVERTED" # alias
ANSI_CROSSED_OUT=";9"
ANSI_NORMAL_INTENSITY=";22" # neither bold nor faint
# Foreground (FG) colors: 30-97 (not continuous); TODO: ADD THE REST OF THE CODES!
# - See: https://en.wikipedia.org/wiki/ANSI_escape_code#3-bit_and_4-bit
ANSI_FG_BLK=";30" # foreground color black
ANSI_FG_RED=";31" # foreground color red
ANSI_FG_BLU=";34" # foreground color blue
ANSI_FG_BR_BLU=";94" # foreground color bright blue
ANSI_FG_BR_YLW=";93" # foreground color bright yellow
# Background (BG) colors: 40-107 (not continuous); TODO: ADD THE REST OF THE CODES!
# - See: https://en.wikipedia.org/wiki/ANSI_escape_code#3-bit_and_4-bit
ANSI_BG_BLK=";40" # background color black
ANSI_BG_RED=";41" # background color red
ANSI_BG_BLU=";44" # background color blue
ANSI_BG_BR_BLU=";104" # background color bright blue
ANSI_BG_BR_YLW=";103" # background color bright yellow
#
# --------- ANSI numeric codes end ------------
#
ANSI_END="m" # end of an ANSI formatting sequence
#
# turn OFF ANSI formatting; ie: disable all formatting by specifying `ANSI_START` and
# `ANSI_END` withOUT any formatting codes in between!
ANSI_OFF="${ANSI_START}${ANSI_END}"
# Let's use `f` for "format on" and `F` for "format off".
# Ex:
# format on; set whatever format codes you want between `ANSI_START` and `ANSI_END`
f="${ANSI_START}${ANSI_INVERTED}${ANSI_END}"
# format off; will always be this
F="${ANSI_OFF}"
# Set the `f` format variable by writing any and all passed-in ANSI numeric codes in between
# `ANSI_START` and `ANSI_END`.
#
# Usage:
# set_f ansi_format_codes...
#
# Prints to stdout:
# NA
#
# Example:
# set_f ANSI_BOLD ANSI_UNDERLINE ANSI_SLOW_BLINK ANSI_FG_BR_BLU
# echo -e "${f}This string is bold, underlined, blinking, bright blue.${F} This is not."
#
set_f() {
f="$(make_f_str "$@")"
}
# Make an ANSI format string. This is really useful when you want to make multiple `f` type format
# strings to be used in the same `echo -e` or print command.
#
# Usage:
# make_f_str ansi_format_codes...
#
# Prints to stdout:
# The formatted f string for you to manually store into your own format variable.
#
# Example:
# f1="$(make_f_str ANSI_ITALIC ANSI_FG_RED)" # italic red font
# f2="$(make_f_str ANSI_UNDERLINE ANSI_FG_BLU)" # underlined blue font
# echo -e "${f1}I am italic red.${F} ${f2}I am underlined blue.${F} I am normal."
#
make_f_str() {
format_str='${ANSI_START}'
for format_arg in "$@"; do
format_str="$(printf "%s\${%s}" "$format_str" "$format_arg")"
done
format_str="$format_str"'${ANSI_END}'
# now do variable substitution in that string to replace all `${variable_name}` parts with the
# value of those variables
format_str="$(eval echo "$format_str")"
echo "$format_str"
}
# A simple wrapper to wrap your text output you'd like to print with the currently-set `f` format
# string.
#
# Example Usage:
# # source this file
# . ./ansi_text_format_lib.sh
# # Set your format string
# set_f ANSI_BOLD ANSI_UNDERLINE ANSI_SLOW_BLINK ANSI_FG_BR_BLU
# # view the current format string
# echo "$f"
# # Now print something in that format
# ansi_echo "This library works very well! I am very pleased with its usage and format! :)"
#
ansi_echo() {
echo -e "${f}$@${F}"
}
# An alternative to (and probably better-named than) `ansi_echo`.
f_echo() {
ansi_echo "$@"
}
run_tests() {
echo "Running tests."
# Test 4 ways to use this library:
# 1. Do it manually with the ANSI codes directly.
echo -e "${ANSI_START}${ANSI_INVERTED}${ANSI_END}I am highlighted.${ANSI_OFF} I am normal."
# or
echo -e "${ANSI_START}${ANSI_INVERTED}${ANSI_END}I am highlighted.${F} I am normal."
# 2. Manually use the ANSI codes to create your own format f variables.
f="${ANSI_START}${ANSI_INVERTED}${ANSI_END}"
echo -e "${f}I am highlighted.${F} I am normal."
# 3. Call `set_f`, then use the `f` format variable.
set_f ANSI_BOLD ANSI_UNDERLINE ANSI_SLOW_BLINK ANSI_FG_BR_BLU
echo -e "${f}This string is bold, underlined, blinking, bright blue.${F} This is not."
# 4. Call `make_f_str` and then write its output to your own format variables.
f1="$(make_f_str ANSI_ITALIC ANSI_FG_RED)" # italic red font
f2="$(make_f_str ANSI_UNDERLINE ANSI_FG_BLU)" # underlined blue font
echo -e "${f1}I am italic red.${F} ${f2}I am underlined blue.${F} I am normal."
# Same as above. Apparently the `${F}` is NOT required between subsequent format settings
# unless you need to disable certain formatting, such as highlighting, background colors, or
# underlining, for instance, on the space characters between the formatted strings as well.
echo -e "${f1}I am italic red. ${f2}I am underlined blue.${F} I am normal."
}
main() {
echo "Running main."
run_tests
}
# Determine if the script is being sourced or executed (run).
# See:
# 1. "eRCaGuy_hello_world/bash/if__name__==__main___check_if_sourced_or_executed_best.sh"
# 1. My answer: https://stackoverflow.com/a/70662116/4561887
if [ "${BASH_SOURCE[0]}" = "$0" ]; then
# This script is being run.
__name__="__main__"
else
# This script is being sourced.
__name__="__source__"
fi
# Only run `main` if this script is being **run**, NOT sourced (imported).
# - See my answer: https://stackoverflow.com/a/70662116/4561887
if [ "$__name__" = "__main__" ]; then
main
fi
# SAMPLE OUTPUT:
#
# 1) WHEN RUN.
# - NB: run this yourself to see the pretty formatting!
#
# eRCaGuy_hello_world/bash$ ./ansi_text_format_lib.sh
# Running main.
# Running tests.
# I am highlighted. I am normal.
# I am highlighted. I am normal.
# I am highlighted. I am normal.
# This string is bold, underlined, blinking, bright blue. This is not.
# I am italic red. I am underlined blue. I am normal.
# I am italic red. I am underlined blue. I am normal.
#
#
# 2) WHEN SOURCED (no output)
| true |
36fdeff7da620f4573adbf77831c8d76ca217df8 | Shell | edf-hpc/slurm-llnl-misc-plugins | /check_node_health/check_node_health.sh | UTF-8 | 5,220 | 3.28125 | 3 | [] | no_license | #!/bin/bash
#####################################################################
# This script will be executed on each compute node to verify
# that the node is working properly.
#####################################################################
# Copyright (C) 2013 EDF SA
# Contact:
# CCN - HPC <dsp-cspit-ccn-hpc@edf.fr>
# 1, Avenue du General de Gaulle
# 92140 Clamart
#
#Authors: Antonio J. Russo <antonio-externe.russo@edf.fr>
#This program is free software; you can redistribute in and/or
#modify it under the terms of the GNU General Public License,
#version 2, as published by the Free Software Foundation.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#On Calibre systems, the complete text of the GNU General
#Public License can be found in `/usr/share/common-licenses/GPL'.
#####################################################################
PATH="/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin"
DEFCFG=/etc/default/checknodehealth
STATUS=0
REASONS=""
NETWK=0
slurm_state()
{
STATE=$(sinfo --noheader --node=$(hostname -s) --format=%t)
case ${STATE} in
'alloc'|'idle'|'mix')
return 0
;;
*)
return 1
;;
esac
}
numbers_proc ()
{
PHYSICAL=$(grep -i "physical id" /proc/cpuinfo | sort -u | wc -l)
CPUCORES=$(grep -i "cpu cores" /proc/cpuinfo | sort -u | awk -F ": " '{print $2}')
REALPROC=$(grep -i "processor" /proc/cpuinfo | sort -u | wc -l)
let "NOHTPROC = PHYSICAL * CPUCORES"
if [ ${NOHTPROC} -ne ${REALPROC} ]
then
STATUS=1
MSG="Unexpected number of processors"
REASONS="${MSG}"
fi
}
mount_points ()
{
for volume in ${MOUNTS}
do
mountpoint -q ${volume}
if [ ${?} -ne 0 ]
then
STATUS=1
MSG="${volume} umounted"
REASONS="${REASONS:+$REASONS, }${MSG}"
else
USAGE=$(df -P | awk 'NR==1 {next} /\${volume}$/ { print $5 }')
if [ ${USAGE} > ${FSLIMIT} ]
then
STATUS=1
MSG="${volume} usage ${USAGE}%"
REASONS="${REASONS:+$REASONS, }${MSG}"
fi
fi
done
}
auth_ldap ()
{
PMUSERS=( $(members ${PMGROUP} 2> /dev/null) )
if [ ${#PMUSERS[@]} -gt 0 ]
then
IRD=$(shuf -i 0-${#PMUSERS[@]} -n 1)
RDUSER=${PMUSERS[${IRD}]}
else
RDUSER='userdoesnotexist'
fi
id ${RDUSER} &> /dev/null
if [ ${?} -ne 0 ]
then
STATUS=1
MSG="LDAP server is unavailable"
REASONS="${REASONS:+$REASONS, }${MSG}"
fi
}
pkgs_inst ()
{
PKGSINST=$(dpkg -l | grep ^ii | wc -l)
if [ ${PKGSINST} -lt ${NRPKGS} ]
then
STATUS=1
MSG="Unexpected number of packages"
REASONS="${REASONS:+$REASONS, }${MSG}"
fi
if [ $(uname -r) != ${KERNELVER} ]
then
STATUS=1
MSG="Unexpected kernel version"
REASONS="${REASONS:+$REASONS, }${MSG}"
fi
}
network_up ()
{
ping -c 1 ${2} &> /dev/null || ping -c 1 ${3} &> /dev/null
if [ ${?} -ne 0 ]
then
STATUS=1
NETWK=1
MSG="${1} is down"
REASONS="${REASONS:+$REASONS, }${MSG}"
else
if [ ${1} == "Infiniband" ]
then
# Get the rate of port 1 using ibstat.
# DISCLAIMER: Only the rate of port 1 is checked here.
# If you want to check the rate of another port, you
# have to modify the command below.
RATE=$(ibstat | grep 'Rate' | head -1 | cut -c 9-10 ; exit ${PIPESTATUS[0]})
if [ ${?} -ne 0 ]
then
STATUS=1
MSG="Unable to run ibstat to get current IB rate"
REASONS="${REASONS:+$REASONS, }${MSG}"
else
if [ ${RATE} -ne ${IBLIMIT} ]
then
STATUS=1
MSG="${1} rate is is less than ${IBLIMIT}"
REASONS="${REASONS:+$REASONS, }${MSG}"
fi
fi
fi
fi
}
ntp_sync ()
{
ntpq -p ${2} &> /dev/null
if [ ${?} -ne 0 ]
then
STATUS=1
MSG="NTP server is unavailable"
REASONS="${REASONS:+$REASONS, }${MSG}"
fi
}
check_node ()
{
numbers_proc
pkgs_inst
network_up "Ethernet" ${ETHMASTER} ${ETHBACKUP}
network_up "Infiniband" ${IBMASTER} ${IBBACKUP}
if [ ${NETWK} -eq 0 ]
then
mount_points
auth_ldap
ntp_sync
fi
if [[ ${STATUS} -eq 0 ]]
then
REASONS="Node OK"
fi
}
drain_node ()
{
if [[ ${STATUS} -eq 1 ]]
then
scontrol update NodeName=$(hostname -s) State=DRAIN Reason="${REASONS}"
fi
}
if [ -f ${DEFCFG} ]
then
source ${DEFCFG}
else
exit 1
fi
case ${1} in
--no-slurm)
check_node
echo ${REASONS}
;;
*)
if slurm_state
then
check_node
drain_node
fi
;;
esac
exit ${STATUS}
| true |
1366d101fbb43d28e1ed434ff3359bc4b870bf7a | Shell | hongson1981/cdr-plugin-folder-to-folder | /infra/packer/scripts/config/init-config.sh | UTF-8 | 2,946 | 3.203125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# defining vars
DEBIAN_FRONTEND=noninteractive
KERNEL_BOOT_LINE='net.ifnames=0 biosdevname=0'
# sudo without password prompt
echo "$USER ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/$USER >/dev/null
# update packages
sudo apt update -y && sudo apt upgrade -y
# cloning vmware scripts repo
git clone --single-branch -b main https://github.com/k8-proxy/vmware-scripts.git ~/scripts
sudo apt update -y && sudo apt upgrade -y
sleep 10s
# install needed packages
sudo apt install -y telnet tcpdump open-vm-tools net-tools dialog curl git sed grep fail2ban
sudo systemctl enable fail2ban.service
sudo tee -a /etc/fail2ban/jail.d/sshd.conf << EOF > /dev/null
[sshd]
enabled = true
port = ssh
action = iptables-multiport
logpath = /var/log/auth.log
bantime = 10h
findtime = 10m
maxretry = 5
EOF
sudo systemctl restart fail2ban
# switching to predictable network interfaces naming
grep "$KERNEL_BOOT_LINE" /etc/default/grub >/dev/null || sudo sed -Ei "s/GRUB_CMDLINE_LINUX=\"(.*)\"/GRUB_CMDLINE_LINUX=\"\1 $KERNEL_BOOT_LINE\"/g" /etc/default/grub
# configure cloud-init
if [ -f /tmp/setup/env ] ; then
source /tmp/setup/env
fi
SSH_PASSWORD=${SSH_PASSWORD:-glasswall}
sudo sed -Ei "s|ssh_pwauth:(.*)|ssh_pwauth: true|g" /etc/cloud/cloud.cfg
sudo sed -Ei "s|lock_passwd:(.*)|lock_passwd: false|g" /etc/cloud/cloud.cfg
sudo yq w -i /etc/cloud/cloud.cfg system_info.default_user.plain_text_passwd $SSH_PASSWORD
sudo tee -a /etc/cloud/cloud.cfg <<EOF
preserve_hostname: true
EOF
# remove swap
sudo swapoff -a && sudo rm -f /swap.img && sudo sed -i '/swap.img/d' /etc/fstab && echo Swap removed
# update grub
sudo update-grub
# installing the wizard
sudo install -T ~/scripts/scripts/wizard/wizard.sh /usr/local/bin/wizard -m 0755
# installing initconfig ( for running wizard on reboot )
sudo cp -f ~/scripts/scripts/bootscript/initconfig.service /etc/systemd/system/initconfig.service
sudo install -T ~/scripts/scripts/bootscript/initconfig.sh /usr/local/bin/initconfig.sh -m 0755
sudo systemctl daemon-reload
# enable initconfig for the next reboot
sudo systemctl enable initconfig
# # install node exporter
# wget https://github.com/prometheus/node_exporter/releases/download/v1.0.1/node_exporter-1.0.1.linux-amd64.tar.gz -qO- | tar xz -C /tmp/
# sudo install -T /tmp/node_exporter-1.0.1.linux-amd64/node_exporter /usr/local/bin/node_exporter -m 0755
# # create node exporter user
# sudo useradd node_exporter -s /sbin/nologin
# # create node exporter service
# sudo cp ~/scripts/visualog/monitoring-scripts/node_exporter.service /etc/systemd/system/node_exporter.service
# sudo mkdir -p /etc/prometheus
# # install node exporter configuration
# sudo cp ~/scripts/visualog/monitoring-scripts/node_exporter.config /etc/prometheus/node_exporter.config
# sudo systemctl daemon-reload
# # start and enable node_exporter service
# sudo systemctl enable --now node_exporter
# remove vmware scripts directory
rm -rf ~/scripts/
| true |
d04f9d49a9124fcbe7a462228df8ff73ad79ff2e | Shell | adrukh/cfgov-refresh | /shell.sh | UTF-8 | 1,018 | 3.609375 | 4 | [
"CC0-1.0"
] | permissive | #!/bin/bash
docker ps > /dev/null 2>&1
if [ $? -ne 0 ]; then
# If the user hasn't eval'ed the docker-machine env, do it for them
if [ -z ${DOCKER_HOST} ] ||
[ -z ${DOCKER_CERT_PATH} ] ||
[ -z ${DOCKER_TLS_VERIFY} ] ||
[ -z ${DOCKER_MACHINE_NAME} ]; then
docker-machine status > /dev/null 2>&1
if [ $? -ne 0 ]; then
echo "Can't find a working Docker -- please see our documentation:"
echo ""
echo "* Docker-based setup:"
echo " https://cfpb.github.io/cfgov-refresh/installation/#docker-compose-installation"
echo ""
echo "* Docker usage guide:"
echo " https://cfpb.github.io/cfgov-refresh/usage/#usage-docker"
echo ""
exit
else
eval $(docker-machine env)
fi
fi
fi
if [ -z "$*" ]; then
docker-compose exec python bash -c "source .env && bash"
else
docker-compose exec python bash -c "source .env && bash -c $*"
fi
| true |
9cdabc4537f570280166d545ce46e98c38a4845c | Shell | polycube-network/polycube | /scripts/setup_veth.sh | UTF-8 | 684 | 3.5 | 4 | [
"Apache-2.0"
] | permissive | # This script is used for creating three pairs of veth interfaces
# (vethN <-> vethN_), an endpoint is put in a network namespace nsN
# and is configured with the 10.0.0.N IP.
# It is mainly used for developers for performing quick tests.
#! /bin/bash
set -x
for i in `seq 1 3`;
do
sudo ip netns del ns${i} > /dev/null 2>&1 # remove ns if already existed
sudo ip link del veth${i} > /dev/null 2>&1
sudo ip netns add ns${i}
sudo ip link add veth${i}_ type veth peer name veth${i}
sudo ip link set veth${i}_ netns ns${i}
sudo ip netns exec ns${i} ip link set dev veth${i}_ up
sudo ip link set dev veth${i} up
sudo ip netns exec ns${i} ifconfig veth${i}_ 10.0.0.${i}/24
done
| true |
153703b6674ab2ecb375394b5f707ce33703465d | Shell | skydive-project/skydive | /scripts/ci/run-compile-tests.sh | UTF-8 | 1,472 | 2.609375 | 3 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | #!/bin/bash
set -v
set -e
dir="$(dirname "$0")"
set -e
cd ${GOPATH}/src/github.com/skydive-project/skydive
# Compile all contribs
make contribs
# Compile with default options
make
make test.functionals.compile TAGS=${TAGS}
export CGO_CFLAGS="-I/usr/local/include/dpdk -O3 -g -std=gnu11 -m64 -pthread -march=native -DRTE_MACHINE_CPUFLAG_SSE -DRTE_MACHINE_CPUFLAG_SSE2 -DRTE_MACHINE_CPUFLAG_SSE3 -DRTE_MACHINE_CPUFLAG_SSSE3 -DRTE_MACHINE_CPUFLAG_SSE4_1 -DRTE_MACHINE_CPUFLAG_SSE4_2 -DRTE_MACHINE_CPUFLAG_PCLMULQDQ -DRTE_MACHINE_CPUFLAG_RDRAND -DRTE_MACHINE_CPUFLAG_FSGSBASE -DRTE_MACHINE_CPUFLAG_F16C -include rte_config.h -Wno-deprecated-declarations"
export CGO_LDFLAGS="-L/usr/local/lib"
# Compile with all build options supported enabled
make WITH_DPDK=true WITH_EBPF=true WITH_VPP=true WITH_EBPF_DOCKER_BUILDER=true WITH_K8S=true WITH_ISTIO=true \
WITH_HELM=true VERBOSE=true
# Compile Skydive for Windows
GOOS=windows GOARCH=amd64 go build github.com/skydive-project/skydive
# Compile Skydive for MacOS
GOOS=darwin GOARCH=amd64 go build github.com/skydive-project/skydive
# Compile profiling
make WITH_PROF=true VERBOSE=true
make clean
# Compile all tests
make test.functionals.compile TAGS=${TAGS} WITH_NEUTRON=true WITH_SELENIUM=true WITH_CDD=true \
WITH_SCALE=true WITH_EBPF=true WITH_EBPF_DOCKER_BUILDER=true WITH_VPP=true WITH_K8S=true \
WITH_ISTIO=true WITH_HELM=true
# Compile static
make static
# Check repository is clean
make check
| true |
01ad5aecdf247d5ce7015630940756350bff4265 | Shell | bmwiedemann/openSUSE | /packages/g/grub2/grub2-snapper-plugin.sh | UTF-8 | 7,589 | 3 | 3 | [] | no_license | #!/bin/sh
set -e
# Copyright (C) 2006,2007,2008,2009,2010 Free Software Foundation, Inc.
#
# GRUB is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GRUB is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GRUB. If not, see <http://www.gnu.org/licenses/>.
grub_mkconfig="/usr/sbin/grub2-mkconfig"
grub_mkrelpath="/usr/bin/grub2-mkrelpath"
grub_script_check="/usr/bin/grub2-script-check"
grub_setting="/etc/default/grub"
grub_cfg="/boot/grub2/grub.cfg"
grub_snapshot_cfg="/boot/grub2/snapshot_submenu.cfg"
snapper_snapshot_path="/.snapshots"
snapshot_submenu_name="grub-snapshot.cfg"
snapper_snapshots_cfg="${snapper_snapshot_path}/${snapshot_submenu_name}"
# add hotkeys for s390. (bnc#885668)
hotkey=
incr_hotkey()
{
[ -n "$hotkey" ] || return
expr $hotkey + 1
}
print_hotkey()
{
keys="123456789abdfgijklmnoprstuvwyz"
if [ -z "$hotkey" ]||[ $hotkey -eq 0 ]||[ $hotkey -gt 30 ]; then
return
fi
echo "--hotkey=$(expr substr $keys $hotkey 1)"
}
snapshot_submenu () {
s_dir="$1"
snapshot="${s_dir}/snapshot"
num="`basename $s_dir`"
# bnc#864842 Important snapshots are not marked as such in grub2 menu
# the format is "important distribution version (kernel_version, timestamp, pre/post)"
date=`xmllint --xpath '/snapshot/date/text()' "${s_dir}/info.xml" || echo ""`
date=`echo $date | sed 's/\(.*\) \(.*\):.*/\1T\2/'`
important=`xmllint --xpath "/snapshot/userdata[key='important']/value/text()" "${s_dir}/info.xml" 2>/dev/null || echo ""`
stype=`xmllint --xpath '/snapshot/type/text()' "${s_dir}/info.xml" || echo ""`
kernel_ver=`readlink ${snapshot}/boot/vmlinuz | sed -e 's/^vmlinuz-//' -e 's/-default$//'`
if [ -z "$kernel_ver" -a -L ${snapshot}/boot/image ]; then
kernel_ver=`readlink ${snapshot}/boot/image | sed -e 's/^image-//' -e 's/-default$//'`
fi
eval `cat ${snapshot}/etc/os-release`
# bsc#934252 - Replace SLES 12.1 with SLES12-SP1 for the list of snapshots
if test "${NAME}" = "SLES" -o "${NAME}" = "SLED"; then
VERSION=`echo ${VERSION} | sed -e 's!^\([0-9]\{1,\}\)\.\([0-9]\{1,\}\)$!\1-SP\2!'`
fi
# FATE#318101
# Show user defined comments in grub2 menu for snapshots
# Use userdata tag "bootloader=[user defined text]"
full_desc=`xmllint --xpath "/snapshot/userdata[key='bootloader']/value/text()" "${s_dir}/info.xml" 2>/dev/null || echo ""`
test -z "$full_desc" && desc=`xmllint --xpath '/snapshot/description/text()' "${s_dir}/info.xml" 2>/dev/null || echo ""`
# FATE#317972
# If we have a post entry and the description field is empty,
# we should use the "Pre" number and add that description to the post entry.
if test -z "$full_desc" -a -z "$desc" -a "$stype" = "post"; then
pre_num=`xmllint --xpath '/snapshot/pre_num/text()' "${s_dir}/info.xml" 2>/dev/null || echo ""`
if test -n "$pre_num"; then
if test -f "${snapper_snapshot_path}/${pre_num}/info.xml" ; then
desc=`xmllint --xpath '/snapshot/description/text()' "${snapper_snapshot_path}/${pre_num}/info.xml" 2>/dev/null || echo ""`
fi
fi
fi
test "$important" = "yes" && important="*" || important=" "
test "$stype" = "single" && stype=""
test -z "$stype" || stype=",$stype"
test -z "$desc" || desc=",$desc"
test -z "$full_desc" && full_desc="$kernel_ver,$date$stype$desc"
if test "${NAME}" = "SLES" -o "${NAME}" = "SLED"; then
title="${important}${NAME}${VERSION} ($full_desc)"
else
title="${important}${NAME} ${VERSION} ($full_desc)"
fi
if test "$s390" = "1"; then
subvol="\$2"
else
subvol="\$3"
fi
cat <<EOF
if [ -f "${snapper_snapshot_path}/$num/snapshot/boot/grub2/grub.cfg" ]; then
snapshot_found=true
saved_subvol=\$btrfs_subvol
menuentry `print_hotkey` "$title" "${snapper_snapshot_path}/$num/snapshot" "`$grub_mkrelpath ${snapper_snapshot_path}/${num}/snapshot`" {
btrfs_subvol="$subvol"
extra_cmdline="rootflags=subvol=\$3"
export extra_cmdline
snapshot_num=$num
export snapshot_num
configfile "\$2/boot/grub2/grub.cfg"
btrfs_subvol=\$saved_subvol
}
fi
EOF
hotkey=`incr_hotkey`
return 0
}
snapper_snapshots_cfg_refresh () {
if [ ! -d "$snapper_snapshot_path" ]; then
return
fi
cs=
for s_dir in ${snapper_snapshot_path}/*; do
snapshot="${s_dir}/snapshot"
# list only read-only snapshot (bnc#878528)
if [ ! -d ${s_dir} -o -w "$snapshot" ]; then
continue
fi
if [ -r "${s_dir}/info.xml" -a -r "${s_dir}/snapshot/boot/grub2/grub.cfg" ]; then
cs="${s_dir}
${cs}"
else
# cleanup any grub-snapshot.cfg without associated snapshot info
snapper_cfg="${s_dir}/${snapshot_submenu_name}"
if [ -f "$snapper_cfg" ]; then
rm -f "$snapper_cfg"
rmdir "$s_dir" 2>/dev/null || true
fi
continue
fi
done
hk=""
[ -z "$hotkey" ] || hk="--hotkey=s"
for c in $(printf '%s' "${cs}" | sort -Vr); do
if ! snapshot_submenu "$c" > "${c}/${snapshot_submenu_name}"; then
rm -f "${c}/${snapshot_submenu_name}"
continue
fi
snapshot_cfg="${snapshot_cfg}
if [ -f \"$c/${snapshot_submenu_name}\" ]; then
source \"$c/${snapshot_submenu_name}\"
fi"
done
cat <<EOF >"${snapper_snapshots_cfg}.new"
if [ -z "\$extra_cmdline" ]; then
submenu $hk "Start bootloader from a read-only snapshot" {${snapshot_cfg}
if [ x\$snapshot_found != xtrue ]; then
submenu "Not Found" { true; }
fi
}
fi
EOF
if ${grub_script_check} "${snapper_snapshots_cfg}.new"; then
mv -f "${snapper_snapshots_cfg}.new" "${snapper_snapshots_cfg}"
fi
}
snapshot_submenu_clean () {
for s_dir in ${snapper_snapshot_path}/*; do
snapper_cfg="${s_dir}/${snapshot_submenu_name}"
if [ -f "$snapper_cfg" ]; then
rm -f "$snapper_cfg"
rmdir "$s_dir" 2>/dev/null || true
fi
done
if [ -f "${snapper_snapshot_path}/${snapshot_submenu_name}" ]; then
rm -f "${snapper_snapshot_path}/${snapshot_submenu_name}"
fi
}
set_grub_setting () {
name=$1
val=$2
if grep -q "$name" "$grub_setting"; then
sed -i -e "s!.*\($name\)=.*!\1=\"$val\"!" "$grub_setting"
else
echo "$name=\"$val\"" >> "$grub_setting"
fi
}
enable_grub_settings () {
set_grub_setting SUSE_BTRFS_SNAPSHOT_BOOTING "true"
}
disable_grub_settings () {
set_grub_setting SUSE_BTRFS_SNAPSHOT_BOOTING "false"
}
update_grub () {
"${grub_mkconfig}" -o "${grub_cfg}"
}
machine=`uname -m`
case "$machine" in
(s390|s390x)
hotkey=1
s390=1
;;
esac
cmdline="$0 $* hotkey='$hotkey'"
# Check the arguments.
while test $# -gt 0
do
option=$1
shift
case "$option" in
-e | --enable)
opt_enable=true
;;
-d | --disable)
opt_enable=false
;;
-r | --refresh)
opt_refresh=true
;;
-c | --clean)
opt_clean=true
;;
-*)
;;
esac
done
if [ "x${opt_enable}" = "xtrue" ]; then
#enable_grub_settings
#update_grub
snapper_snapshots_cfg_refresh
elif [ "x${opt_enable}" = "xfalse" ]; then
#disable_grub_settings
update_grub
snapshot_submenu_clean
fi
if [ x${opt_refresh} = "xtrue" ]; then
snapper_snapshots_cfg_refresh
fi
if [ x${opt_clean} = "xtrue" ]; then
snapshot_submenu_clean
fi
| true |
877a30e75ac862879bef2afb2078a31e474f7c84 | Shell | RobertsLab/sams-notebook | /sbatch_scripts/20190114_oly_busco_augustus.sh | UTF-8 | 4,666 | 3.21875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
## Job Name
#SBATCH --job-name=busco
## Allocation Definition
#SBATCH --account=coenv
#SBATCH --partition=coenv
## Resources
## Nodes
#SBATCH --nodes=2
## Walltime (days-hours:minutes:seconds format)
#SBATCH --time=15-00:00:00
## Memory per node
#SBATCH --mem=120G
##turn on e-mail notification
#SBATCH --mail-type=ALL
#SBATCH --mail-user=samwhite@uw.edu
## Specify the working directory for this job
#SBATCH --workdir=/gscratch/scrubbed/samwhite/outputs/20190114_oly_busco_augustus
# Load Python Mox module for Python module availability
module load intel-python3_2017
# Load Open MPI module for parallel, multi-node processing
module load icc_19-ompi_3.1.2
# SegFault fix?
export THREADS_DAEMON_MODEL=1
# Document programs in PATH (primarily for program version ID)
date >> system_path.log
echo "" >> system_path.log
echo "System PATH for $SLURM_JOB_ID" >> system_path.log
echo "" >> system_path.log
printf "%0.s-" {1..10} >> system_path.log
echo ${PATH} | tr : \\n >> system_path.log
## Establish variables for more readable code
wd=$(pwd)
bedtools=/gscratch/srlab/programs/bedtools-2.27.1/bin/bedtools
busco=/gscratch/srlab/programs/busco-v3/scripts/run_BUSCO.py
busco_db=/gscratch/srlab/sam/data/databases/BUSCO/eukaryota_odb9
busco_config_default=/gscratch/srlab/programs/busco-v3/config/config.ini.default
busco_config_ini=${wd}/config.ini
maker_dir=/gscratch/scrubbed/samwhite/outputs/20181127_oly_maker_genome_annotation
oly_genome=/gscratch/srlab/sam/data/O_lurida/oly_genome_assemblies/Olurida_v081/Olurida_v081.fa
oly_maker_gff=/gscratch/srlab/sam/data/O_lurida/oly_genome_assemblies/Olurida_v081/Olurida_v081.maker.all.noseqs.gff
blast_dir=/gscratch/srlab/programs/ncbi-blast-2.8.1+/bin/
augustus_bin=/gscratch/srlab/programs/Augustus-3.3.2/bin
augustus_scripts=/gscratch/srlab/programs/Augustus-3.3.2/scripts
augustus_config_dir=${wd}/augustus/config
augustus_orig_config_dir=/gscratch/srlab/programs/Augustus-3.3.2/config
hmm_dir=/gscratch/srlab/programs/hmmer-3.2.1/src/
# Export BUSCO config file location
export BUSCO_CONFIG_FILE="${busco_config_ini}"
# Export Augustus variable
export PATH="${augustus_bin}:$PATH"
export PATH="${augustus_scripts}:$PATH"
export AUGUSTUS_CONFIG_PATH="${augustus_config_dir}"
# Subset transcripts and include +/- 1000bp on each side.
## Reduces amount of data used for training - don't need crazy amounts to properly train gene models
awk -v OFS="\t" '{ if ($3 == "mRNA") print $1, $4, $5 }' ${oly_maker_gff} | \
awk -v OFS="\t" '{ if ($2 < 1000) print $1, "0", $3+1000; else print $1, $2-1000, $3+1000 }' | \
${bedtools} getfasta -fi ${oly_genome} \
-bed - \
-fo Olurida_v081.all.maker.transcripts1000.fasta
cp Olurida_v081.all.maker.transcripts1000.fasta ${maker_dir}
cp ${busco_config_default} ${busco_config_ini}
mkdir augustus
cp -pr ${augustus_orig_config_dir} ${augustus_config_dir}
# Edit BUSCO config file
## Set paths to various programs
### The use of the % symbol sets the delimiter sed uses for arguments.
### Normally, the delimiter that most examples use is a slash "/".
### But, we need to expand the variables into a full path with slashes, which screws up sed.
### Thus, the use of % symbol instead (it could be any character that is NOT present in the expanded variable; doesn't have to be "%").
sed -i "/^tblastn_path/ s%tblastn_path = /usr/bin/%path = ${blast_dir}%" "${busco_config_ini}"
sed -i "/^makeblastdb_path/ s%makeblastdb_path = /usr/bin/%path = ${blast_dir}%" "${busco_config_ini}"
sed -i "/^augustus_path/ s%augustus_path = /home/osboxes/BUSCOVM/augustus/augustus-3.2.2/bin/%path = ${augustus_bin}%" "${busco_config_ini}"
sed -i "/^etraining_path/ s%etraining_path = /home/osboxes/BUSCOVM/augustus/augustus-3.2.2/bin/%path = ${augustus_bin}%" "${busco_config_ini}"
sed -i "/^gff2gbSmallDNA_path/ s%gff2gbSmallDNA_path = /home/osboxes/BUSCOVM/augustus/augustus-3.2.2/scripts/%path = ${augustus_scripts}%" "${busco_config_ini}"
sed -i "/^new_species_path/ s%new_species_path = /home/osboxes/BUSCOVM/augustus/augustus-3.2.2/scripts/%path = ${augustus_scripts}%" "${busco_config_ini}"
sed -i "/^optimize_augustus_path/ s%optimize_augustus_path = /home/osboxes/BUSCOVM/augustus/augustus-3.2.2/scripts/%path = ${augustus_scripts}%" "${busco_config_ini}"
sed -i "/^hmmsearch_path/ s%hmmsearch_path = /home/osboxes/BUSCOVM/hmmer/hmmer-3.1b2-linux-intel-ia32/binaries/%path = ${hmm_dir}%" "${busco_config_ini}"
# Run BUSCO/Augustus training
${busco} \
--in Olurida_v081.all.maker.transcripts1000.fasta \
--out Olurida_maker_busco \
--lineage_path ${busco_db} \
--mode genome \
--cpu 56 \
--long \
--species human \
--tarzip \
--augustus_parameters='--progress=true'
| true |
eac8f268a83aa70891cc710d1e9a2e1e504d7d3a | Shell | FauxFaux/debian-control | /s/syncache/syncache_1.4-1_all/postinst | UTF-8 | 708 | 3.21875 | 3 | [] | no_license | #!/bin/sh
set -e
PIDDIR="/var/run/syncache-drb"
case "$1" in
configure)
# Create syncache system user
getent passwd syncache > /dev/null || \
adduser --quiet --system --home "$PIDDIR" --no-create-home \
--gecos 'SynCache DRb Server' --group syncache
;;
failed-upgrade|abort-upgrade|abort-remove|abort-deconfigure|in-favour|removing)
;;
*)
echo "postinst called with unknown argument \`$1'" >&2;
exit 1;
;;
esac
# Automatically added by dh_installinit
if [ "$1" = "configure" ] || [ "$1" = "abort-upgrade" ]; then
if [ -x "/etc/init.d/syncache" ]; then
update-rc.d syncache defaults >/dev/null
invoke-rc.d syncache start || exit $?
fi
fi
# End automatically added section
exit 0
| true |
d7489ed98920754d024cda006f5464fec4817eaa | Shell | karlis0/linux-enable-ir-emitter | /AUR/release/PKGBUILD | UTF-8 | 1,405 | 2.59375 | 3 | [
"MIT"
] | permissive | # Maintainer: Andrey Kolchenko <andrey@kolchenko.me>
# Co-Maintainer: Maxime Dirksen <dirksen.maxime@gmail.com>
# Contributor: Antoine Bertin <ant.bertin@gmail.com>
pkgname=linux-enable-ir-emitter
pkgver=2.1.0
pkgrel=1
epoch=1
pkgdesc="Enables infrared cameras that are not directly enabled out-of-the box."
url='https://github.com/EmixamPP/linux-enable-ir-emitter'
license=('MIT')
arch=('x86_64')
provides=(linux-enable-ir-emitter)
conflicts=(linux-enable-ir-emitter-git chicony-ir-toggle)
depends=(
'python'
'python-opencv'
'python-yaml'
'nano'
)
optdepends=(
'python-pyshark: full configuration setup support'
)
source=("https://github.com/EmixamPP/linux-enable-ir-emitter/archive/refs/tags/${pkgver}.tar.gz")
sha256sums=('SKIP')
build() {
cd "${srcdir}/${pkgname}-${pkgver}/sources"
make
}
package() {
cd "${srcdir}/${pkgname}-${pkgver}"
install -Dm 644 LICENSE "${pkgdir}/usr/share/licenses/${pkgname}"
install -Dm 755 sources/enable-ir-emitter -t "${pkgdir}"/usr/lib/linux-enable-ir-emitter/
install -Dm 644 sources/config.yaml -t "${pkgdir}"/usr/lib/linux-enable-ir-emitter/
install -Dm 755 sources/*.py -t "${pkgdir}"/usr/lib/linux-enable-ir-emitter/
install -Dm 644 sources/linux-enable-ir-emitter.service -t "${pkgdir}"/usr/lib/systemd/system/
install -dm 755 ${pkgdir}/usr/bin/
ln -fs /usr/lib/linux-enable-ir-emitter/linux-enable-ir-emitter.py ${pkgdir}/usr/bin/linux-enable-ir-emitter
}
| true |
1bd63362f2fb8f03214a18247b435bbb2dd5bd65 | Shell | bernardn/dotfiles | /.bashrc.d/90ext.kube | UTF-8 | 1,272 | 3.0625 | 3 | [] | no_license | #!/bin/bash
### Load Kubernetes-related stuff
# Rancher Desktop
test -d ~/.rd/bin/ && export PATH=$PATH:~/.rd/bin/
which -s kubectl && echo -n " kube" && {
function kctx {
test -z "$1" || kubectl config use-context "$1" || kubectl config get-contexts;
kubectl config current-context;
}
function kls {
kubectl logs "$(select\-object pod)" "$@";
}
function kns {
_ns="$1"
test -z "$_ns" && _ns="$(k get ns|grep -v NAME|fzf|awk '{print $1}')";
test "$_ns" != "?" && kubectl config set-context --current --namespace="$_ns";
kubectl config view --minify --output 'jsonpath={..namespace}' ;
}
function knss {
kubectl config set-context --current --namespace="$(select\-object ns)" "$@";
}
function kpfws {
if test -z "$1"; then
echo "Please specify the port to be forwarded."
else
kubectl port-forward "$(select\-object pod)" "$1:$1";
fi
}
function select-object {
kubectl get "$1" | grep -v NAME | fzf | awk '{print $1}'
}
alias k=kubectl
alias kdp="kubectl delete pods"
alias kgp="kubectl get pods"
alias kgpa="kubectl get pods --all-namespaces"
alias kl="kubectl logs"
alias kgs="kubectl get services"
alias kgsa="kubectl get services --all-namespaces"
source <(kubectl completion bash)
}
| true |
515483f4bca49ecabb12247464860a5de7f76aa5 | Shell | SaiPulijala/aws_cli_play | /sample.sh | UTF-8 | 270 | 3 | 3 | [] | no_license | #!/bin/bash
count=3
while IFS=, read -r a b
do
if [[ $count != 0 ]] && [[ $count != 1 ]]
then
echo $a
echo $b
echo $count
sleep 3
fi
count=$((count-1))
done < ../aws_cli_play/containerinstancestoremove.txt
rm -fr ../aws_cli_play/containerinstancestoremove.txt | true |
9accb2e5c1ff90ac0d883fbebf9235bc43409eba | Shell | CrystalGamma/triton | /pkgs/stdenv/generic/setup.sh | UTF-8 | 22,155 | 4.3125 | 4 | [
"MIT"
] | permissive | set -e
set -o pipefail
trap "exitHandler" EXIT
################################ Hook handling #################################
# Run all hooks with the specified name in the order in which they
# were added, stopping if any fails (returns a non-zero exit
# code). The hooks for <hookName> are the shell function or variable
# <hookName>, and the values of the shell array ‘<hookName>Hooks’.
runHook() {
local hookName="$1"; shift
local var="$hookName"
if [[ "$hookName" =~ Hook$ ]]; then
var+='s'
else
var+='Hooks'
fi
eval "local -a dummy=(\"\${$var[@]}\")"
for hook in "_callImplicitHook 0 $hookName" "${dummy[@]}"; do
_eval "$hook" "$@"
done
return 0
}
# Run all hooks with the specified name, until one succeeds (returns a
# zero exit code). If none succeed, return a non-zero exit code.
runOneHook() {
local hookName="$1"; shift
local var="$hookName"
if [[ "$hookName" =~ Hook$ ]]; then
var+='s'
else
var+='Hooks'
fi
eval "local -a dummy=(\"\${$var[@]}\")"
for hook in "_callImplicitHook 1 $hookName" "${dummy[@]}"; do
if _eval "$hook" "$@"; then
return 0
fi
done
return 1
}
# Run the named hook, either by calling the function with that name or
# by evaluating the variable with that name. This allows convenient
# setting of hooks both from Nix expressions (as attributes /
# environment variables) and from shell scripts (as functions). If you
# want to allow multiple hooks, use runHook instead.
_callImplicitHook() {
local def="$1"
local hookName="$2"
case "$(type -t $hookName)" in
'function'|'alias'|'builtin') $hookName ;;
'file') source "$hookName" ;;
'keyword') : ;;
*)
if [ -z "${!hookName}" ]; then
return "$def"
else
eval "${!hookName}"
fi
;;
esac
}
# A function wrapper around ‘eval’ that ensures that ‘return’ inside
# hooks exits the hook, not the caller.
_eval() {
local code="$1"; shift
if [ "$(type -t $code)" = function ]; then
eval "$code \"\$@\""
else
eval "$code"
fi
}
################################### Logging ####################################
startNest() {
nestingLevel=$(( $nestingLevel + 1 ))
echo -en "\033[$1p"
}
stopNest() {
nestingLevel=$(( $nestingLevel - 1 ))
echo -en "\033[q"
}
header() {
startNest "$2"
echo "$1"
}
# Make sure that even when we exit abnormally, the original nesting
# level is properly restored.
closeNest() {
while [ $nestingLevel -gt 0 ]; do
stopNest
done
}
################################ Error handling ################################
exitHandler() {
exitCode=$?
set +e
closeNest
if [ -n "$showBuildStats" ]; then
times > "$NIX_BUILD_TOP/.times"
local -a times=($(cat "$NIX_BUILD_TOP/.times"))
# Print the following statistics:
# - user time for the shell
# - system time for the shell
# - user time for all child processes
# - system time for all child processes
echo "build time elapsed: " ${times[*]}
fi
if [ $exitCode != 0 ]; then
runHook 'failureHook'
# If the builder had a non-zero exit code and
# $succeedOnFailure is set, create the file
# ‘$out/nix-support/failed’ to signal failure, and exit
# normally. Otherwise, return the original exit code.
if [ -n "$succeedOnFailure" ]; then
echo "build failed with exit code $exitCode (ignored)"
mkdir -p "$out/nix-support"
printf "%s" "$exitCode" > "$out/nix-support/failed"
exit 0
fi
else
runHook 'exitHook'
fi
exit "$exitCode"
}
############################### Helper functions ###############################
arrayToDict() {
local tmp=(${!1})
declare -gA "$1"
eval "$1"='()'
local i=1
while [ "$i" -lt "${#tmp[@]}" ]; do
eval "$1[\"${tmp[$(( $i - 1 ))]}\"]"='"${tmp[$i]}"'
i=$(( $i + 2 ))
done
}
addToSearchPathWithCustomDelimiter() {
local delimiter="$1"
local varName="$2"
local dir="$3"
if [ -d "$dir" ]; then
eval export ${varName}=${!varName}${!varName:+$delimiter}${dir}
fi
}
addToSearchPath() {
addToSearchPathWithCustomDelimiter "${PATH_DELIMITER}" "$@"
}
######################## Textual substitution functions ########################
substitute() {
local input="$1"
local output="$2"
local -a params=("$@")
local n p pattern replacement varName content
# a slightly hacky way to keep newline at the end
content="$(cat "$input"; printf "%s" X)"
content="${content%X}"
for (( n = 2; n < ${#params[*]}; n += 1 )); do
p=${params[$n]}
if [ "$p" = '--replace' ]; then
pattern="${params[$((n + 1))]}"
replacement="${params[$((n + 2))]}"
n=$((n + 2))
fi
if [ "$p" = '--subst-var' ]; then
varName="${params[$((n + 1))]}"
pattern="@$varName@"
replacement="${!varName}"
n=$((n + 1))
fi
if [ "$p" = '--subst-var-by' ]; then
pattern="@${params[$((n + 1))]}@"
replacement="${params[$((n + 2))]}"
n=$((n + 2))
fi
content="${content//"$pattern"/$replacement}"
done
if [ -e "$output" ]; then
chmod +w "$output"
fi
printf "%s" "$content" > "$output"
}
substituteInPlace() {
local fileName="$1"; shift
substitute "$fileName" "$fileName" "$@"
}
substituteAll() {
local input="$1"
local output="$2"
# Select all environment variables
for envVar in $(env -0 | sed -z -n 's,^\([^=]*\).*,\1,p' | tr '\0' '\n'); do
if [ "$NIX_DEBUG" = "1" ]; then
echo "$envVar -> ${!envVar}"
fi
args="$args --subst-var $envVar"
done
substitute "$input" "$output" $args
}
substituteAllInPlace() {
local fileName="$1"; shift
substituteAll "$fileName" "$fileName" "$@"
}
################################################################################
# Recursively find all build inputs.
findInputs() {
local pkg="$1"
local var="$2"
local propagatedBuildInputsFile="$3"
case ${!var} in
*\ $pkg\ *)
return 0
;;
esac
eval $var="'${!var} $pkg '"
if ! [ -e "$pkg" ]; then
echo "build input $pkg does not exist" >&2
exit 1
fi
if [ -f "$pkg" ]; then
source "$pkg"
fi
if [ -d $1/bin ]; then
addToSearchPath _PATH $1/bin
fi
if [ -f "$pkg/nix-support/setup-hook" ]; then
source "$pkg/nix-support/setup-hook"
fi
if [ -f "$pkg/nix-support/$propagatedBuildInputsFile" ]; then
for i in $(cat "$pkg/nix-support/$propagatedBuildInputsFile"); do
findInputs "$i" $var $propagatedBuildInputsFile
done
fi
}
# Set the relevant environment variables to point to the build inputs
# found above.
_addToNativeEnv() {
local pkg="$1"
addToSearchPath '_PATH' "$1/bin"
# Run the package-specific hooks set by the setup-hook scripts.
runHook 'envHook' "$pkg"
}
_addToCrossEnv() {
local pkg="$1"
# Some programs put important build scripts (freetype-config and similar)
# into their crossDrv bin path. Intentionally these should go after
# the nativePkgs in PATH.
addToSearchPath '_PATH' "$1/bin"
# Run the package-specific hooks set by the setup-hook scripts.
runHook 'crossEnvHook' "$pkg"
}
############################### Generic builder ################################
# This function is useful for debugging broken Nix builds. It dumps
# all environment variables to a file `env-vars' in the build
# directory. If the build fails and the `-K' option is used, you can
# then go to the build directory and source in `env-vars' to reproduce
# the environment used for building.
dumpVars() {
if [ -n "${dumpEnvVars-true}" ]; then
export > "$NIX_BUILD_TOP/env-vars" || true
fi
}
# Utility function: return the base name of the given path, with the
# prefix `HASH-' removed, if present.
stripHash() {
strippedName="$(basename "$1")";
if echo "$strippedName" | grep -q '^[a-z0-9]\{32\}-'; then
strippedName=$(echo "$strippedName" | cut -c34-)
fi
}
_defaultUnpack() {
local fn="$1"
local ret="1"
if [ -d "$fn" ]; then
stripHash "$fn"
# We can't preserve hardlinks because they may have been
# introduced by store optimization, which might break things
# in the build.
cp -pr --reflink=auto "$fn" "$strippedName"
ret=0
else
case "$fn" in
*.tar.brotli | *.tar.bro | *.tar.br | *.tbr)
brotli -d < "$fn" | tar x && ret=0 || ret="$?"
;;
*.tar | *.tar.* | *.tgz | *.tbz2 | *.txz)
# GNU tar can automatically select the decompression method
# (info "(tar) gzip").
tar xf "$fn" && ret=0 || ret="$?"
;;
esac
fi
[ "$ret" -eq "0" ] || [ "$ret" -eq "141" ]
}
unpackFile() {
curSrc="$1"
header "unpacking source archive $curSrc" 3
if ! runOneHook 'unpackCmd' "$curSrc"; then
echo "do not know how to unpack source archive $curSrc"
exit 1
fi
stopNest
}
unpackPhase() {
runHook 'preUnpack'
if [ -z "$srcs" ]; then
if [ -z "$src" ]; then
echo 'variable $src or $srcs should point to the source'
exit 1
fi
srcs="$src"
fi
# To determine the source directory created by unpacking the
# source archives, we record the contents of the current
# directory, then look below which directory got added. Yeah,
# it's rather hacky.
local dirsBefore=''
for i in *; do
if [ -d "$i" ]; then
dirsBefore="$dirsBefore $i "
fi
done
# Unpack all source archives.
for i in $srcs; do
unpackFile "$i"
done
# Find the source directory.
if [ -n "$setSourceRoot" ]; then
runOneHook 'setSourceRoot'
elif [ -z "$srcRoot" ]; then
srcRoot=
for i in *; do
if [ -d "$i" ]; then
case $dirsBefore in
*\ $i\ *)
;;
*)
if [ -n "$srcRoot" ]; then
echo "unpacker produced multiple directories"
exit 1
fi
srcRoot="$i"
;;
esac
fi
done
fi
if [ -z "$srcRoot" ]; then
echo "unpacker appears to have produced no directories"
exit 1
fi
echo "source root is $srcRoot"
# By default, add write permission to the sources. This is often
# necessary when sources have been copied from other store
# locations.
if [ -n "${makeSourcesWritable-true}" ]; then
chmod -R u+w "$srcRoot"
fi
runHook 'postUnpack'
}
patchPhase() {
runHook 'prePatch'
for i in $patches; do
header "applying patch $i" '3'
local uncompress='cat'
case "$i" in
*.gz) uncompress='gzip -d' ;;
*.bz2) uncompress='bzip2 -d' ;;
*.xz) uncompress='xz -d' ;;
*.lzma) uncompress='lzma -d' ;;
esac
# "2>&1" is a hack to make patch fail if the decompressor fails (nonexistent patch, etc.)
$uncompress < "$i" 2>&1 | patch ${patchFlags:--p1}
stopNest
done
runHook 'postPatch'
}
libtoolFix() {
sed -i -e 's^eval sys_lib_.*search_path=.*^^' "$1"
}
configurePhase() {
runHook 'preConfigure'
if [ -z "$configureScript" -a -x ./configure ]; then
configureScript=./configure
fi
if [ -n "${fixLibtool-true}" ]; then
find . -iname "ltmain.sh" | while read i; do
echo "fixing libtool script $i"
libtoolFix "$i"
done
fi
if [ -n "${addPrefix-true}" ]; then
configureFlags="${prefixKey:---prefix=}$prefix $configureFlags"
fi
# Add --disable-dependency-tracking to speed up some builds.
if [ -n "${addDisableDepTrack-true}" ]; then
if grep -q dependency-tracking "$configureScript" 2>/dev/null; then
configureFlags="--disable-dependency-tracking $configureFlags"
fi
fi
# By default, disable static builds.
if [ -n "${disableStatic-true}" ]; then
if grep -q enable-static "$configureScript" 2>/dev/null; then
configureFlags="--disable-static $configureFlags"
fi
fi
if [ -n "$configureScript" ]; then
echo "configure flags: $configureFlags ${configureFlagsArray[@]}"
$configureScript $configureFlags "${configureFlagsArray[@]}"
else
echo "no configure script, doing nothing"
fi
runHook 'postConfigure'
}
commonMakeFlags() {
local phaseName
phaseName="$1"
local parallelVar
parallelVar="${phaseName}Parallel"
actualMakeFlags=()
if [ -n "$makefile" ]; then
actualMakeFlags+=('-f' "$makefile")
fi
if [ -n "${!parallelVar-true}" ]; then
actualMakeFlags+=("-j${NIX_BUILD_CORES}" "-l${NIX_BUILD_CORES}" "-O")
fi
actualMakeFlags+=("SHELL=$SHELL") # Needed for https://github.com/NixOS/nixpkgs/pull/1354#issuecomment-31260409
actualMakeFlags+=($makeFlags)
actualMakeFlags+=("${makeFlagsArray[@]}")
local flagsVar
flagsVar="${phaseName}Flags"
actualMakeFlags+=(${!flagsVar})
local arrayVar
arrayVar="${phaseName}FlagsArray[@]"
actualMakeFlags+=("${!arrayVar}")
}
printMakeFlags() {
local phaseName
phaseName="$1"
echo "$phaseName flags:"
local flag
for flag in "${actualMakeFlags[@]}"; do
echo " $flag"
done
}
buildPhase() {
runHook 'preBuild'
if [ -z "$makeFlags" ] && ! [ -n "$makefile" -o -e "Makefile" -o -e "makefile" -o -e "GNUmakefile" ]; then
echo "no Makefile, doing nothing"
else
local actualMakeFlags
commonMakeFlags 'build'
printMakeFlags 'build'
make "${actualMakeFlags[@]}"
fi
runHook 'postBuild'
}
checkPhase() {
runHook 'preCheck'
local actualMakeFlags
commonMakeFlags 'check'
actualMakeFlags+=(${checkFlags:-VERBOSE=y})
actualMakeFlags+=(${checkTarget:-check})
printMakeFlags 'check'
make "${actualMakeFlags[@]}"
runHook 'postCheck'
}
installPhase() {
runHook 'preInstall'
mkdir -p "$prefix"
local actualMakeFlags
commonMakeFlags 'install'
actualMakeFlags+=(${installTargets:-install})
printMakeFlags 'install'
make "${actualMakeFlags[@]}"
runHook 'postInstall'
}
# The fixup phase performs generic, package-independent stuff, like
# stripping binaries, running patchelf and setting
# propagated-build-inputs.
fixupPhase() {
# Make sure everything is writable so "strip" et al. work.
for output in $outputs; do
if [ -e "${!output}" ]; then
chmod -R u+w "${!output}"
fi
done
runHook 'preFixup'
# Apply fixup to each output.
local output
for output in $outputs; do
prefix=${!output} runHook 'fixupOutput'
done
if [ -n "$propagatedBuildInputs" ]; then
mkdir -p "$out/nix-support"
echo "$propagatedBuildInputs" > "$out/nix-support/propagated-build-inputs"
fi
if [ -n "$propagatedNativeBuildInputs" ]; then
mkdir -p "$out/nix-support"
echo "$propagatedNativeBuildInputs" > "$out/nix-support/propagated-native-build-inputs"
fi
if [ -n "$propagatedUserEnvPkgs" ]; then
mkdir -p "$out/nix-support"
echo "$propagatedUserEnvPkgs" > "$out/nix-support/propagated-user-env-packages"
fi
if [ -n "$setupHook" ]; then
mkdir -p "$out/nix-support"
substituteAll "$setupHook" "$out/nix-support/setup-hook"
fi
runHook 'postFixup'
}
# The fixup check phase performs generic, package-independent checks
# like making sure that we don't have any impure paths in the contents
# of the resulting files.
fixupCheckPhase() {
runHook 'preFixupCheck'
# Apply fixup checks to each output.
local output
for output in $outputs; do
prefix=${!output} runHook 'fixupCheckOutput'
done
runHook 'postFixupCheck'
}
installCheckPhase() {
runHook 'preInstallCheck'
local actualMakeFlags
commonMakeFlags 'installCheck'
actualMakeFlags+=(${installCheckTargets:-installcheck})
printMakeFlags 'installCheck'
make "${actualMakeFlags[@]}"
runHook 'postInstallCheck'
}
distPhase() {
runHook 'preDist'
local actualMakeFlags
commonMakeFlags 'dist'
actualMakeFlags+=(${distTargets:-dist})
printMakeFlags 'dist'
make "${actualMakeFlags[@]}"
if [ "${copyDist-1}" != "1" ]; then
mkdir -p "$out/tarballs"
# Note: don't quote $tarballs, since we explicitly permit
# wildcards in there.
cp -pvd ${tarballs:-*.tar.*} "$out/tarballs"
fi
runHook 'postDist'
}
showPhaseHeader() {
local phase="$1"
case "$phase" in
'unpackPhase') header 'unpacking sources' ;;
'patchPhase') header 'patching sources' ;;
'configurePhase') header 'configuring' ;;
'buildPhase') header 'building' ;;
'checkPhase') header 'running tests' ;;
'installPhase') header 'installing' ;;
'fixupPhase') header 'post-installation fixup' ;;
'fixupCheckPhase') header 'post-installation fixup checks' ;;
'installCheckPhase') header 'running install tests' ;;
*) header "$phase" ;;
esac
}
genericBuild() {
if [ -n "$buildCommand" ]; then
eval "$buildCommand"
return
fi
if [ -n "$phases" ]; then
phases=($phases)
else
phases=(
"${prePhases[@]}"
'unpackPhase'
'patchPhase'
"${preConfigurePhases[@]}"
'configurePhase'
"${preBuildPhases[@]}"
'buildPhase'
'checkPhase'
"${preInstallPhases[@]}"
'installPhase'
"${preFixupPhases[@]}"
'fixupPhase'
'fixupCheckPhase'
'installCheckPhase'
"${preDistPhases[@]}"
'distPhase'
"${postPhases[@]}"
)
fi
for curPhase in "${phases[@]}"; do
if [ "$curPhase" = 'buildPhase' -a -n "$dontBuild" ]; then continue; fi
if [ "$curPhase" = 'checkPhase' -a -z "$doCheck" ]; then continue; fi
if [ "$curPhase" = 'installPhase' -a -n "$dontInstall" ]; then continue; fi
if [ "$curPhase" = 'fixupPhase' -a -n "$dontFixup" ]; then continue; fi
if [ "$curPhase" = 'fixupCheckPhase' -a -n "$dontFixupCheck" ]; then continue; fi
if [ "$curPhase" = 'installCheckPhase' -a -z "$doInstallCheck" ]; then continue; fi
if [ "$curPhase" = 'distPhase' -a -z "$doDist" ]; then continue; fi
if [ -n "$tracePhases" ]; then
echo
echo "@ phase-started $out $curPhase"
fi
showPhaseHeader "$curPhase"
dumpVars
# Evaluate the variable named $curPhase if it exists, otherwise the
# function named $curPhase.
eval "${!curPhase:-$curPhase}"
if [ "$curPhase" = 'unpackPhase' ]; then
cd "${srcRoot:-.}"
fi
if [ -n "$tracePhases" ]; then
echo
echo "@ phase-succeeded $out $curPhase"
fi
stopNest
done
}
################################ Initialisation ################################
: ${outputs:=out}
# Array handling, we need to turn some variables into arrays
prePhases=($prePhases)
preConfigurePhases=($preConfigurePhases)
preBuildPhases=($preBuildPhases)
preInstallPhases=($preInstallPhases)
preFixupPhases=($preFixupPhases)
preDistPhases=($preDistPhases)
postPhases=($postPhases)
PATH_DELIMITER=':'
nestingLevel=0
# Set a temporary locale that should be used by everything
LOCALE_PREDEFINED=${LC_ALL:+1}
export LC_ALL
: ${LC_ALL:=C}
# Set a fallback default value for SOURCE_DATE_EPOCH, used by some
# build tools to provide a deterministic substitute for the "current"
# time. Note that 1 = 1970-01-01 00:00:01. We don't use 0 because it
# confuses some applications.
export SOURCE_DATE_EPOCH
: ${SOURCE_DATE_EPOCH:=1}
# Wildcard expansions that don't match should expand to an empty list.
# This ensures that, for instance, "for i in *; do ...; done" does the
# right thing.
shopt -s nullglob
# Set up the initial path.
PATH=
for i in $initialPath; do
if [ "$i" = / ]; then
i=
fi
addToSearchPath 'PATH' "$i/bin"
done
if [ "$NIX_DEBUG" = 1 ]; then
echo "initial path: $PATH"
fi
# Check that the pre-hook initialised SHELL.
if [ -z "$SHELL" ]; then
echo "SHELL not set"
exit 1
fi
BASH="$SHELL"
export CONFIG_SHELL="$SHELL"
# Set the TZ (timezone) environment variable, otherwise commands like
# `date' will complain (e.g., `Tue Mar 9 10:01:47 Local time zone must
# be set--see zic manual page 2004').
export TZ='UTC'
# Before doing anything else, state the build time
NIX_BUILD_START="$(date '+%s')"
# Execute the pre-hook.
if [ -z "$shell" ]; then
export shell=$SHELL
fi
runHook 'preHook'
# Allow the caller to augment buildInputs (it's not always possible to
# do this before the call to setup.sh, since the PATH is empty at that
# point; here we have a basic Unix environment).
runHook 'addInputsHook'
crossPkgs=''
for i in $buildInputs $defaultBuildInputs $propagatedBuildInputs; do
findInputs "$i" 'crossPkgs' 'propagated-build-inputs'
done
nativePkgs=''
for i in $nativeBuildInputs $defaultNativeBuildInputs $propagatedNativeBuildInputs; do
findInputs "$i" 'nativePkgs' 'propagated-native-build-inputs'
done
# We want to allow builders to apply setup-hooks to themselves
if [ "${selfApplySetupHook-0}" = "1" ]; then
source "$setupHook"
fi
for i in $nativePkgs; do
_addToNativeEnv "$i"
done
for i in $crossPkgs; do
_addToCrossEnv "$i"
done
# Add the output as an rpath.
if [ "$NIX_NO_SELF_RPATH" != 1 ]; then
export NIX_LDFLAGS="-rpath $out/lib $NIX_LDFLAGS"
if [ -n "$NIX_LIB64_IN_SELF_RPATH" ]; then
export NIX_LDFLAGS="-rpath $out/lib64 $NIX_LDFLAGS"
fi
if [ -n "$NIX_LIB32_IN_SELF_RPATH" ]; then
export NIX_LDFLAGS="-rpath $out/lib32 $NIX_LDFLAGS"
fi
fi
# Set the prefix. This is generally $out, but it can be overriden,
# for instance if we just want to perform a test build/install to a
# temporary location and write a build report to $out.
if [ -z "$prefix" ]; then
prefix="$out"
fi
if [ "$useTempPrefix" = 1 ]; then
prefix="$NIX_BUILD_TOP/tmp_prefix"
fi
PATH=$_PATH${_PATH:+:}$PATH
if [ "$NIX_DEBUG" = 1 ]; then
echo "final path: $PATH"
fi
# Make GNU Make produce nested output.
export NIX_INDENT_MAKE=1
# Normalize the NIX_BUILD_CORES variable. The value might be 0, which
# means that we're supposed to try and auto-detect the number of
# available CPU cores at run-time.
if [ -z "${NIX_BUILD_CORES//[^0-9]/}" ]; then
NIX_BUILD_CORES='1'
elif [ "$NIX_BUILD_CORES" -le 0 ]; then
NIX_BUILD_CORES=$(nproc 2>/dev/null || true)
if expr >/dev/null 2>&1 "$NIX_BUILD_CORES" : "^[0-9][0-9]*$"; then
:
else
NIX_BUILD_CORES='1'
fi
fi
export NIX_BUILD_CORES
unpackCmdHooks+=('_defaultUnpack')
# Execute the post-hooks.
runHook 'postHook'
# Execute the global user hook (defined through the Nixpkgs
# configuration option ‘stdenv.userHook’). This can be used to set
# global compiler optimisation flags, for instance.
runHook 'userHook'
dumpVars
| true |
d5f337961673c1de21f4f81ceef1985361fd44ad | Shell | vogonwocky/cPanel-PreInstall | /cpanel_preinstall.sh | UTF-8 | 1,994 | 3.390625 | 3 | [] | no_license | #!/bin/bash
############################
## cPanel Preinstall ##
## Version 1.0 ##
## By: Matthew Vetter ##
## cPanel, Inc. ##
############################
file="/etc/selinux/config"
if [ -f "$file" ] ; then
if `cat "$file" | grep "SELINUX=" | grep -q "enforcing"` ; then
sed -i '/^SELINUX=/s/\enforcing$/disabled/' "$file";
echo "SELINUX set from enforcing to disabled!";
cat /etc/selinux/config | grep "SELINUX=" | grep -v "# SELINUX";
elif [ -f "$file" ] ; then
if `cat "$file" | grep "SELINUX=" | grep -q "permissive"` ; then
sed -i '/^SELINUX=/s/\permissive$/disabled/' "$file";
echo "SELINUX set from permissive to disabled!";
cat /etc/selinux/config | grep "SELINUX=" | grep -v "# SELINUX";
elif [ -f "$file" ] ; then
if `cat "$file" | grep "#SELINUX="` ; then
sed -i 's/#SELINUX=.*/SELINUX=disabled/g' "$file";
echo "SELINUX set from commented out to disabled!";
cat /etc/selinux/config | grep "SELINUX=" | grep -v "# SELINUX";
else
echo "Nothing to fix! (SELINUX appears to be disabled already)"
cat /etc/selinux/config | grep "SELINUX=" | grep -v "# SELINUX";
fi
fi
fi
fi
echo "==========";
#Turn off Firewall
chkconfig iptables off;
service iptables stop;
echo "==========";
echo "Firewall Disabled and Turned Off!";
echo "==========";
#Remove Yum Groups
yum -y groupremove "FTP Server" "GNOME Desktop Environment" "KDE (K Desktop Environment)" "Mail Server or E-mail Server" "Mono" "Web Server" "X Window System";
echo "==========";
echo "Yum Groups Removed or Already Removed!";
echo "==========";
# Install Perl
yum -y install perl;
echo "==========";
echo "Perl Installed or Already Installed!";
echo "==========";
echo "Server is Ready to Reboot and Re-Install cPanel!";
yum install wget;
wget -N http://httpupdate.cpanel.net/latest;
sh latest;
echo "==========";
echo "cPanel Installed. Make sure to reboot the server to finish disabling SELINUX";
| true |
6736b9ea8fe1dd434463b61ddb9798deb7a1d579 | Shell | smidgedy/packetsquirrel-payloads | /payloads/library/remote-access/openvpn/payload.sh | UTF-8 | 1,232 | 3.84375 | 4 | [] | no_license | #!/bin/bash
#
# Title: OpenVPN
# Description: Create a connection to a VPN-connection to an OpenVPN-server. Optionally: Send traffic from the clients through said tunnel.
# Author: Hak5
# Version: 1.0
# Category: remote-access
# Target: Any
# Net Mode: BRIDGE, VPN
# Set to 1 to allow clients to use the VPN
FOR_CLIENTS=0
DNS_SERVER="8.8.8.8"
# Cheap hack to set the DNS server
function setdns() {
while true
do
[[ ! $(grep -q "$DNS_SERVER" /tmp/resolv.conf) ]] && {
echo -e "search lan\nnameserver $DNS_SERVER" > /tmp/resolv.conf
}
sleep 5
done
}
function start() {
LED SETUP
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# Update factory default payload
cp ${DIR}/payload.sh /root/payloads/switch3/payload.sh
# Set NETMODE to BRIDGE and wait 3 seconds
# to ensure that things can settle
[[ "$FOR_CLIENTS" == "1" ]] && {
/usr/bin/NETMODE VPN
} || {
/usr/bin/NETMODE BRIDGE
}
sleep 3
# Make OpenVPN use the local configuration
uci set openvpn.vpn.config="${DIR}/config.ovpn"
uci commit
# Start the OpenVPN server in the background
/etc/init.d/openvpn start
# Start SSH Server
/etc/init.d/sshd start &
# Set DNS server
setdns &
LED ATTACK
}
# Start the payload
start &
| true |
f552c10af3d776df77e26b941bc34f61f843b35b | Shell | PsymonLi/sw | /nic/tools/clear_nic_config.sh | UTF-8 | 548 | 3.34375 | 3 | [] | no_license | #!/bin/sh
if [ "$1" == "remove-config" ]; then
dir_list="/sysconfig/config0/ /sysconfig/config1/ /update/ /data/"
elif [ "$1" == "factory-default" ]; then
dir_list="/sysconfig/config0/ /sysconfig/config1/ /update/ /data/ /obfl/"
else
echo "Invalid arg"
echo "usage: clear_nic_config.sh"
echo "action:"
echo "remove-config"
echo "factory-default"
exit
fi
for dir in $dir_list; do
echo "Removing content from $dir"
cd $dir && find . -name lost+found -prune -o -exec rm -rf '{}' ';'
done
#sync the disk
sync
| true |
f17a037b613112d00ef20f363635a0e83c972e17 | Shell | mikroskeem/qsm | /runtime/start_server.sh | UTF-8 | 1,134 | 3.21875 | 3 | [] | no_license | if (is_server_running); then
echo "ERROR: server is already running!"
exit 1
fi
systemd-run \
--scope \
--slice mcserver.slice \
-E basedir="${basedir}" \
tmux new-session -d -s mcserver "${basedir}/runtime/launch.sh"
# TODO: systemd-run sucks and is limited as fuck
# --property=ExecStop='/home/mark/mcserver/running_scripts/shutdown_server.sh' \
# --property=ExecStopPost='/home/mark/running_scripts/post_shutdown.sh' \
# wait until server is up
sleep 1
echo "waiting until server starts up"
mcserver_pid="$(get_pid)"
current_ts="$(date +%s)"
while true; do
if (is_process_running "${mcserver_pid}"); then
# check if log file is rotated first - some servers might start up slow
# TODO: this check is shit
if [ "$(stat -c %Y "${server_dir}/logs/latest.log")" -gt "${current_ts}" ] && (grep -q 'Done (.*)! For help, type "help" or "?"' "${server_dir}/logs/latest.log"); then
echo "server is running, exiting"
exit 0
fi
else
echo "server died unexpectedly!"
exit 1
fi
sleep 2
done
# vim: ft=sh: et: sw=4: sts=4
| true |
3dff87e1b15ab8ab8fe80957cd13a70fa705f80f | Shell | PraveenKumarRana/Device-Drivers-Lab | /Exercise3/CED17I019.sh | UTF-8 | 618 | 3.171875 | 3 | [] | no_license | #!/bin/sh
# the output file
FILE=/Users/praveenkumarrana/Documents/Device_Drivers/Exercise3/Test/download.out
# the url to retrieve
URL=http://www.google.co.in
# write header information to the log file
start_date=`date`
echo "START-------------------------------------------------" >> $FILE
echo "" >> $FILE
# retrieve the web page using curl. time the process with the time command.
time (curl --connect-timeout 100 $URL) >> $FILE
# write additional footer information to the log file
echo "" >> $FILE
end_date=`date`
echo "STARTTIME: $start_date" >> $FILE
echo "END TIME: $end_date" >> $FILE
echo "" >> $FILE
| true |
e4f4fa7b35868bd430bbe057141d7931d54e75aa | Shell | ADN-DevTech/3dsMax-Python-HowTos | /installstartup.sh | UTF-8 | 345 | 3.265625 | 3 | [
"MIT"
] | permissive | #! /usr/bin/env bash
set -e
script="$(dirname "$(readlink -f "$0")")"
source "$script/scripts/inst.sh"
# make sure we have 3ds Max in the current path
if [ ! -f ./3dsmax.exe ]
then
exiterr "This script must run in a 3ds Max installation directory."
fi
echo "Install pip if missing"
installpip
echo "Install pystartup"
installpystartup
| true |
3d4bb0fc594609b546939413c8c761525e71be35 | Shell | BioDynaMo/biodynamo | /util/git/archive_branch/list.sh | UTF-8 | 1,034 | 2.953125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# -----------------------------------------------------------------------------
#
# Copyright (C) 2021 CERN & University of Surrey for the benefit of the
# BioDynaMo collaboration. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# See the LICENSE file distributed with this work for details.
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# -----------------------------------------------------------------------------
# DESCRIPTION:
# Lists all archived branches. Synchronizes with origin first
# USAGE:
# list.sh
# EXAMPLE OUTPUT:
# 088d1..(shortened)..6a commit refs/archive/lukas/commutative-pair-operation
#
# archive branch name does not include 'refs/archive'
# -> archived branch name is 'lukas/commutative-pair-operation'
# download all references
git fetch origin +refs/archive/*:refs/archive/*
git for-each-ref refs/archive
| true |
f7c90046dbebb2e85382f4c03519872f0c0e6b9a | Shell | matthieu-fa/FA-QA_E2E_sandbox | /ruby_scripts/ingestion/compare_encrypted_cols.sh | UTF-8 | 719 | 3.828125 | 4 | [] | no_license | #!/usr/bin/env bash
# Compare the decrypted columns from production and development databases. There are two:
# names_encrypted and account_number_encrypted.
for encrypted_column in 'names_encrypted' 'account_number_encrypted'
do
prod_column=${encrypted_column}.prod
devel_column=${encrypted_column}.devel
if [ ! -f ${prod_column} ] ; then echo "Test failure: decrypted table: ${prod_column} not found. Exiting"; exit 1 ; fi
if [ ! -f ${devel_column} ] ; then echo "Test failure: decrypted table: ${devel_column} not found. Exiting"; exit 1 ; fi
count=$(diff $prod_column $devel_column | wc -l)
if [$count -ne 0 ] ; then echo 'Test failure: Encrypted columns differ. Exiting'; exit 1; fi
done
| true |
18edf1a799050b90ed39c48dc750e0861bf39016 | Shell | RVMI/rvmi_workspace_setup | /scripts/install_dependencies.sh | UTF-8 | 1,601 | 3.078125 | 3 | [] | no_license | #!/bin/bash
#Install software dependencies
echo "This script is intended to run in the root of your catkin workspace, e.g. ~/catkin_ws. Your workspace needs to be built at least once before."
# To get access to ros commands
if ! source $( pwd )/devel/setup.bash; then
echo "Please navigate to the root of your workspace!"
exit -1
fi
# Planner for SkiROS2
if roscd skiros2; then
./scripts/install_fd_task_planner.sh
fi
# Python Dependencies
if roscd skiros2; then
cd .. && python$ROS_PYTHON_VERSION -m pip install -r requirements.txt --user
fi
if roscd skills_sandbox; then
python$ROS_PYTHON_VERSION -m pip install -r requirements.txt --user
fi
if roscd vision; then
python$ROS_PYTHON_VERSION -m pip install -r requirements.txt --user
fi
if roscd low_level_logics; then
python$ROS_PYTHON_VERSION -m pip install -r requirements.txt --user
fi
# Install realsense drivers
# Get distribution environment variables
. /etc/lsb-release
export repo="http://realsense-hw-public.s3.amazonaws.com/Debian/apt-repo"
export repo_check="$repo $DISTRIB_CODENAME main"
export repo_add="$repo main"
if ! grep -q "^deb .*$repo_check" /etc/apt/sources.list /etc/apt/sources.list.d/*; then\
sudo apt-key adv --keyserver keys.gnupg.net --recv-key F6E65AC044F831AC80A06380C8B3A55A6F3EFCDE
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-key F6E65AC044F831AC80A06380C8B3A55A6F3EFCDE
sudo add-apt-repository "$repo_add" -u
else
echo "Realsense repo exists already."
fi
sudo apt update
sudo apt install librealsense2-dkms librealsense2-utils librealsense2-dev
| true |
04cc9a575e7a44b2f6866dc6ebea497bcc8f050e | Shell | derutaT/dotfiles | /.zshrc | UTF-8 | 12,286 | 2.734375 | 3 | [] | no_license | export LANG=ja_JP.UTF-8
export EDITOR='vim'
## 履歴の保存先
HISTFILE=$HOME/.zsh-history
## メモリに展開する履歴の数
HISTSIZE=100000
## 保存する履歴の数
SAVEHIST=100000
## Behavior of Storing History
setopt hist_ignore_dups # ignore duplication command history list
setopt share_history # share command history data
# History Search
autoload history-search-end
zle -N history-beginning-search-backward-end history-search-end
zle -N history-beginning-search-forward-end history-search-end
bindkey "^P" history-beginning-search-backward-end
bindkey "^N" history-beginning-search-forward-end
bindkey -e
# zsh-completions
if [ -e /usr/local/share/zsh-completions ]; then
fpath=(/usr/local/share/zsh-completions $fpath)
fi
## 色を使う
setopt prompt_subst
## ビープを鳴らさない
setopt nobeep
## 内部コマンド jobs の出力をデフォルトで jobs -l にする
setopt long_list_jobs
## 補完候補一覧でファイルの種別をマーク表示
setopt list_types
## サスペンド中のプロセスと同じコマンド名を実行した場合はリジューム
setopt auto_resume
## 補完候補を一覧表示
setopt auto_list
## 直前と同じコマンドをヒストリに追加しない
setopt hist_ignore_dups
## cd 時に自動で push
setopt auto_pushd
## 同じディレクトリを pushd しない
setopt pushd_ignore_dups
## ファイル名で #, ~, ^ の 3 文字を正規表現として扱う
setopt extended_glob
## TAB で順に補完候補を切り替える
setopt auto_menu
## zsh の開始, 終了時刻をヒストリファイルに書き込む
setopt extended_history
## =command を command のパス名に展開する
setopt equals
## --prefix=/usr などの = 以降も補完
setopt magic_equal_subst
## ヒストリを呼び出してから実行する間に一旦編集
setopt hist_verify
## ファイル名の展開で辞書順ではなく数値的にソート
setopt numeric_glob_sort
## 出力時8ビットを通す
setopt print_eight_bit
## 補完候補のカーソル選択を有効に
zstyle ':completion:*:default' menu select=1
## 補完候補の色づけ
eval $(gdircolors ~/.dircolors-solarized/dircolors.ansi-universal)
zstyle ':completion:*:default' list-colors ${(s.:.)LS_COLORS}
export ZLS_COLORS=$LS_COLORS
## ディレクトリ名だけで cd
setopt auto_cd
## カッコの対応などを自動的に補完
setopt auto_param_keys
## ディレクトリ名の補完で末尾の / を自動的に付加し、次の補完に備える
setopt auto_param_slash
## スペルチェック
setopt correct
## {a-c} を a b c に展開する機能を使えるようにする
setopt brace_ccl
## Ctrl+S/Ctrl+Q によるフロー制御を使わないようにする
setopt NO_flow_control
## コマンドラインの先頭がスペースで始まる場合ヒストリに追加しない
setopt hist_ignore_space
## コマンドラインでも # 以降をコメントと見なす
setopt interactive_comments
## ファイル名の展開でディレクトリにマッチした場合末尾に / を付加する
setopt mark_dirs
## history (fc -l) コマンドをヒストリリストから取り除く。
setopt hist_no_store
## 補完候補を詰めて表示
setopt list_packed
## 最後のスラッシュを自動的に削除しない
setopt noautoremoveslash
## ssh-agent
# 自動で ssh-agent を起こしたり、接続したりする
SSH_ENV=$HOME/.ssh-agent
function start_agent {
ssh-agent > $SSH_ENV
chmod 600 $SSH_ENV
. $SSH_ENV > /dev/null
# ssh-add
}
if [ -f $SSH_ENV ]; then
. $SSH_ENV > /dev/null
if ps ${SSH_AGENT_PID:-999999} | grep ssh-agent$ > /dev/null &&
test -S $SSH_AUTH_SOCK; then
# agent already running
else
start_agent;
fi
else
start_agent
fi
# vcs_info 設定
# プロンプトの表示を変更
RPROMPT=""
#export PS1="[%n@%m] %%"
export PS1="[%n@MBP]%~%% "
autoload -Uz vcs_info
autoload -Uz add-zsh-hook
autoload -Uz is-at-least
autoload -Uz colors
# 以下の3つのメッセージをエクスポートする
# $vcs_info_msg_0_ : 通常メッセージ用 (緑)
# $vcs_info_msg_1_ : 警告メッセージ用 (黄色)
# $vcs_info_msg_2_ : エラーメッセージ用 (赤)
zstyle ':vcs_info:*' max-exports 3
zstyle ':vcs_info:*' enable git svn hg bzr
# 標準のフォーマット(git 以外で使用)
# misc(%m) は通常は空文字列に置き換えられる
zstyle ':vcs_info:*' formats '(%s)-[%b]'
zstyle ':vcs_info:*' actionformats '(%s)-[%b]' '%m' '<!%a>'
zstyle ':vcs_info:(svn|bzr):*' branchformat '%b:r%r'
zstyle ':vcs_info:bzr:*' use-simple true
if is-at-least 4.3.10; then
# git 用のフォーマット
# git のときはステージしているかどうかを表示
zstyle ':vcs_info:git:*' formats '(%s)-[%b]' '%c%u %m'
zstyle ':vcs_info:git:*' actionformats '(%s)-[%b]' '%c%u %m' '<!%a>'
zstyle ':vcs_info:git:*' check-for-changes true
zstyle ':vcs_info:git:*' stagedstr "+" # %c で表示する文字列
zstyle ':vcs_info:git:*' unstagedstr "-" # %u で表示する文字列
fi
# hooks 設定
if is-at-least 4.3.11; then
# git のときはフック関数を設定する
# formats '(%s)-[%b]' '%c%u %m' , actionformats '(%s)-[%b]' '%c%u %m' '<!%a>'
# のメッセージを設定する直前のフック関数
# 今回の設定の場合はformat の時は2つ, actionformats の時は3つメッセージがあるので
# 各関数が最大3回呼び出される。
zstyle ':vcs_info:git+set-message:*' hooks \
git-hook-begin \
git-untracked \
git-push-status \
git-nomerge-branch \
git-stash-count
# フックの最初の関数
# git の作業コピーのあるディレクトリのみフック関数を呼び出すようにする
# (.git ディレクトリ内にいるときは呼び出さない)
# .git ディレクトリ内では git status --porcelain などがエラーになるため
function +vi-git-hook-begin() {
if [[ $(command git rev-parse --is-inside-work-tree 2> /dev/null) != 'true' ]]; then
# 0以外を返すとそれ以降のフック関数は呼び出されない
return 1
fi
return 0
}
# untracked ファイル表示
#
# untracked ファイル(バージョン管理されていないファイル)がある場合は
# unstaged (%u) に ? を表示
function +vi-git-untracked() {
# zstyle formats, actionformats の2番目のメッセージのみ対象にする
if [[ "$1" != "1" ]]; then
return 0
fi
if command git status --porcelain 2> /dev/null \
| awk '{print $1}' \
| command grep -F '??' > /dev/null 2>&1 ; then
# unstaged (%u) に追加
hook_com[unstaged]+='?'
fi
}
# push していないコミットの件数表示
#
# リモートリポジトリに push していないコミットの件数を
# pN という形式で misc (%m) に表示する
function +vi-git-push-status() {
# zstyle formats, actionformats の2番目のメッセージのみ対象にする
if [[ "$1" != "1" ]]; then
return 0
fi
if [[ "${hook_com[branch]}" != "master" ]]; then
# master ブランチでない場合は何もしない
return 0
fi
# push していないコミット数を取得する
local ahead
ahead=$(command git rev-list origin/master..master 2>/dev/null \
| wc -l \
| tr -d ' ')
if [[ "$ahead" -gt 0 ]]; then
# misc (%m) に追加
hook_com[misc]+="(p${ahead})"
fi
}
# マージしていない件数表示
#
# master 以外のブランチにいる場合に、
# 現在のブランチ上でまだ master にマージしていないコミットの件数を
# (mN) という形式で misc (%m) に表示
function +vi-git-nomerge-branch() {
# zstyle formats, actionformats の2番目のメッセージのみ対象にする
if [[ "$1" != "1" ]]; then
return 0
fi
if [[ "${hook_com[branch]}" == "master" ]]; then
# master ブランチの場合は何もしない
return 0
fi
local nomerged
nomerged=$(command git rev-list master..${hook_com[branch]} 2>/dev/null | wc -l | tr -d ' ')
if [[ "$nomerged" -gt 0 ]] ; then
# misc (%m) に追加
hook_com[misc]+="(m${nomerged})"
fi
}
# stash 件数表示
#
# stash している場合は :SN という形式で misc (%m) に表示
function +vi-git-stash-count() {
# zstyle formats, actionformats の2番目のメッセージのみ対象にする
if [[ "$1" != "1" ]]; then
return 0
fi
local stash
stash=$(command git stash list 2>/dev/null | wc -l | tr -d ' ')
if [[ "${stash}" -gt 0 ]]; then
# misc (%m) に追加
hook_com[misc]+=":S${stash}"
fi
}
fi
function _update_vcs_info_msg() {
local -a messages
local prompt
LANG=en_US.UTF-8 vcs_info
if [[ -z ${vcs_info_msg_0_} ]]; then
# vcs_info で何も取得していない場合はプロンプトを表示しない
prompt=""
else
# vcs_info で情報を取得した場合
# $vcs_info_msg_0_ , $vcs_info_msg_1_ , $vcs_info_msg_2_ を
# それぞれ緑、黄色、赤で表示する
[[ -n "$vcs_info_msg_0_" ]] && messages+=( "%F{green}${vcs_info_msg_0_}%f" )
[[ -n "$vcs_info_msg_1_" ]] && messages+=( "%F{yellow}${vcs_info_msg_1_}%f" )
[[ -n "$vcs_info_msg_2_" ]] && messages+=( "%F{red}${vcs_info_msg_2_}%f" )
# 間にスペースを入れて連結する
prompt="${(j: :)messages}"
fi
RPROMPT="$prompt"
}
add-zsh-hook precmd _update_vcs_info_msg
# autojump
if [ $commands[autojump] ]; then # check if autojump is installed
if [ -f $HOME/.autojump/etc/profile.d/autojump.zsh ]; then # manual user-local installation
. $HOME/.autojump/etc/profile.d/autojump.zsh
elif [ -f $HOME/.autojump/share/autojump/autojump.zsh ]; then # another manual user-local installation
. $HOME/.autojump/share/autojump/autojump.zsh
elif [ -f $HOME/.nix-profile/etc/profile.d/autojump.zsh ]; then # nix installation
. $HOME/.nix-profile/etc/profile.d/autojump.zsh
elif [ -f /usr/share/autojump/autojump.zsh ]; then # debian and ubuntu package
. /usr/share/autojump/autojump.zsh
elif [ -f /etc/profile.d/autojump.zsh ]; then # manual installation
. /etc/profile.d/autojump.zsh
elif [ -f /etc/profile.d/autojump.sh ]; then # gentoo installation
. /etc/profile.d/autojump.sh
elif [ -f /usr/local/share/autojump/autojump.zsh ]; then # freebsd installation
. /usr/local/share/autojump/autojump.zsh
elif [ -f /opt/local/etc/profile.d/autojump.zsh ]; then # mac os x with ports
. /opt/local/etc/profile.d/autojump.zsh
elif [ $commands[brew] -a -f `brew --prefix`/etc/autojump.zsh ]; then # mac os x with brew
. `brew --prefix`/etc/autojump.zsh
fi
fi
# direnv
eval "$(direnv hook zsh)"
# setup local settings
for file in ~/.local/*.sh
do . $file
done
# setup peco functions
for file in ~/.peco/*.sh
do . $file
done
# load alias settings
for file in ~/.alias/*
do . $file
done
# tabtab source for serverless package
# uninstall by removing these lines or running `tabtab uninstall serverless`
[[ -f /Users/hara_masaki/work/scsi/scsi-cb-policy/serverless/node_modules/tabtab/.completions/serverless.zsh ]] && . /Users/hara_masaki/work/scsi/scsi-cb-policy/serverless/node_modules/tabtab/.completions/serverless.zsh
# tabtab source for sls package
# uninstall by removing these lines or running `tabtab uninstall sls`
[[ -f /Users/hara_masaki/work/scsi/scsi-cb-policy/serverless/node_modules/tabtab/.completions/sls.zsh ]] && . /Users/hara_masaki/work/scsi/scsi-cb-policy/serverless/node_modules/tabtab/.completions/sls.zsh | true |
e16dd7fb1d5363fd0438b083eddf4389d9af8775 | Shell | emmett1/xpkg | /xpkg | UTF-8 | 12,632 | 3.671875 | 4 | [] | no_license | #!/bin/bash
isinstalled() {
if [ "$(awk -v p="$1" -v RS="" '$1==p' "$PKG_DB" | head -n1)" = "" ]; then
return 1
fi
return 0
}
needroot() {
if [ $UID != 0 ]; then
if [ "$#" -eq 0 ]; then
needroot "This operation"
else
echo "$* need root access!"
fi
exit 1
fi
}
deplist() {
# check currently process package for loop
if [ ${#CHECK[@]} -gt 0 ]; then
if [[ "$(echo ${CHECK[@]} | tr " " "\n" | grep -x $1)" == "$1" ]]; then
return 0
fi
fi
# add package to currently process
CHECK+=($1)
# check dependencies
for i in $(pkg_depends $1); do
if [ "$quick" = 1 ] && isinstalled $i; then
continue
else
if [[ $(echo ${DEP[@]} | tr " " "\n" | grep -x $i) = "" ]]; then
if ! pkg_path $i >/dev/null; then
MISSINGDEP+=("$i($1)")
else
deplist $i
fi
fi
fi
done
# add dependency to list checked dep
if [[ $(echo ${DEP[@]} | tr " " "\n" | grep -x $1) = "" ]]; then
DEP+=($1)
fi
# delete process package array
for i in "${!CHECK[@]}"; do
if [[ ${CHECK[i]} = "$1" ]]; then
unset 'CHECK[i]'
fi
done
}
pkg_build() {
needarg $@
while [ $1 ]; do
case $1 in
-i|-u) ;;
-*) PKGMK_CMD+=($1);;
*) pkg=$1;;
esac
shift
done
if pkg_path $pkg >/dev/null; then
cd $(pkg_path $pkg) &>/dev/null
pkgmk ${PKGMK_CMD[@]} || exit $?
cd - &>/dev/null
fi
}
pkg_install() {
needarg $@
while [ $1 ]; do
case $1 in
-i|-u) ;;
-nd) NODEP=1 ;;
-t) TESTMODE=1 ;;
-*) PKGMK_CMD+=($1);;
*) PKG+=($1);;
esac
shift
done
if [ "$TESTMODE" != 1 ]; then
needroot
fi
if [[ "${PKG[@]}" = "" ]]; then
echo "Please provide port name to install."
exit 1
fi
for p in ${PKG[@]}; do
if isinstalled $p; then
echo "Port '$p' is installed."
elif [ "$(pkg_path $p)" = "" ]; then
echo "Port '$p' not found."
else
PPKG+=($p)
fi
done
[ "${#PPKG[@]}" -eq 0 ] && return 0
if [ "$NODEP" = 1 ]; then
pkg=${PPKG[@]}
else
pkg=$(pkg_deplist -q ${PPKG[@]})
fi
error=0
for p in $pkg; do
if pkg_path $p >/dev/null; then
if [ "$TESTMODE" = 1 ]; then
echo "Installing $p..."
else
cd $(pkg_path $p) &>/dev/null
[ -e pre-install ] && sh pre-install
pkgmk ${PKGMK_CMD[@]} -i -d
if [ $? = 0 ]; then
pkg_done+=($p)
else
error=1
break
fi
[ -e post-install ] && sh post-install
cd - &>/dev/null
fi
else
if [ "$TESTMODE" = 1 ]; then
echo "Skipping $p..."
fi
fi
done
if [ ${#pkg_done[@]} -gt 0 ]; then
pkg_trigger ${pkg_done[@]}
fi
return $error
}
pkg_path() {
needarg $@
local pkg=$1
for c in $PORT; do
if [ -f $c/$pkg/Pkgfile ]; then
echo $c/$pkg
return 0
fi
done
return 1
}
pkg_search() {
needarg $@
local pattern=$1
[ "$pattern" ] || return 1
find $PORT -type d \
| awk -F '/' '{print $NF}' \
| grep $pattern \
| sort \
| uniq \
| while read line; do
isinstalled $line && echo -ne "[*]" || echo -ne "[ ]"
echo -ne " $line "
awk -F "=" '/^version=/{print $2}' $(pkg_path $line)/Pkgfile | head -n1
done
}
pkg_depends() {
needarg $@
local pkg=$1
ppath=$(pkg_path $pkg) || return 1
grep "^# Depends on[[:blank:]]*:" $ppath/Pkgfile \
| sed 's/^# Depends on[[:blank:]]*:[[:blank:]]*//' \
| tr ' ' '\n' \
| awk '!a[$0]++' \
| sed 's/,//'
}
pkg_deplist() {
needarg $@
while [ "$1" ]; do
case $1 in
-q) quick=1;;
-*) ;;
*) PKG+=($1);;
esac
shift
done
if [ "${#PKG[@]}" -gt 0 ]; then
for p in ${PKG[@]}; do
deplist $p
done
else
return 1
fi
if [ "$quick" = 1 ]; then
echo ${DEP[@]} | tr ' ' '\n'
else
for p in ${DEP[@]}; do
if isinstalled $p; then
echo "[*] $p"
else
echo "[-] $p"
fi
done
if [ "${#MISSINGDEP[@]}" -gt 0 ]; then
for m in ${MISSINGDEP[@]}; do
echo "Missing deps: $m" | sed 's/(/ (/'
done
fi
fi
}
pkg_cat() {
needarg $@
local pkg=$1
ppath=$(pkg_path $pkg) || return 1
cat $ppath/Pkgfile
}
pkg_info() {
needarg $@
local pkg=$1
ppath=$(pkg_path $pkg) || return 1
. $ppath/Pkgfile
desc=$(grep "^# Description[[:blank:]]*:" $ppath/Pkgfile | sed 's/^# Description[[:blank:]]*:[[:blank:]]*//')
url=$(grep "^# URL[[:blank:]]*:" $ppath/Pkgfile | sed 's/^# URL[[:blank:]]*:[[:blank:]]*//')
maint=$(grep "^# Maintainer[[:blank:]]*:" $ppath/Pkgfile | sed 's/^# Maintainer[[:blank:]]*:[[:blank:]]*//')
deps=$(pkg_depends $pkg | tr '\n' ' ')
echo -e "Name: $pkg"
echo -e "Path: $ppath"
echo -e "Version: $version"
echo -e "Release: $release"
echo -e "Description: $desc"
echo -e "URL: $url"
echo -e "Maintainer: $maint"
echo -e "Dependencies: $deps"
}
pkg_listorphan() {
local pkg dep
tmpallpkg=$(mktemp)
tmpalldep=$(mktemp)
for pkg in $(pkginfo -i | awk '{print $1}'); do
echo $pkg >> $tmpallpkg
dep="$dep $(pkg_depends $pkg)"
done
echo $dep | tr ' ' '\n' | sort | uniq > $tmpalldep
grep -xvF -f $tmpalldep $tmpallpkg
rm $tmpalldep $tmpallpkg
}
pkg_listport() {
grep -RE ^name=[0-9a-z]+ ${PORT[@]} | awk -F = '{print $2}' | sort | uniq
}
pkg_update() {
while [ "$1" ]; do
case $1 in
-t) TESTMODE=1;;
-nd) NODEP=1;;
-*) PKGMK_CMD+=($1);;
*) PKG+=($1)
esac
shift
done
if [ "$TESTMODE" != 1 ]; then
needroot
fi
for p in ${PKG[@]}; do
if ! isinstalled $p; then
echo "Port '$p' is not installed."
else
PPKG+=($p)
fi
done
if [ "${#PKG[@]}" -gt 0 ] || [ "${#PPKG[@]}" -gt 0 ]; then
UPKG=${PPKG[@]}
else
UPKG=$(pkg_diff -q)
fi
if [ "${#UPKG[@]}" -gt 0 ]; then
if [ "$NODEP" != 1 ]; then
upkg=$(pkg_deplist -q ${UPKG[@]})
else
upkg=${UPKG[@]}
fi
error=0
for p in $upkg; do
if pkg_path $p >/dev/null; then
if [ "$TESTMODE" = 1 ]; then
isinstalled $p && echo "Updating $p..." || echo "Installing $p..."
else
if isinstalled $p; then
cd $(pkg_path $p) &>/dev/null
pkgmk ${PKGMK_CMD[@]} -u -d
if [ $? = 0 ]; then
pkg_done+=($p)
else
error=$?
break
fi
cd - &>/dev/null
else
cd $(pkg_path $p) &>/dev/null
[ -e pre-install ] && sh pre-install
pkgmk ${PKGMK_CMD[@]} -i -d
if [ $? = 0 ]; then
pkg_done+=($p)
else
error=$?
break
fi
[ -e post-install ] && sh post-install
cd - &>/dev/null
fi
fi
else
if [ "$TESTMODE" = 1 ]; then
echo "Skipping $p..."
fi
fi
done
else
echo "Nothing to update."
fi
if [ ${#pkg_done[@]} -gt 0 ]; then
pkg_trigger ${pkg_done[@]}
fi
return $error
}
pkg_diff() {
while [ "$1" ]; do
case $1 in
-q|--quick) QUICK=1;;
esac
shift
done
pkginfo -i | while read -r pkg ver; do
. $(pkg_path $pkg)/Pkgfile 2>/dev/null
if [ "$version" ] && [ "$ver" != "${version}-${release}" ]; then
if [ "$QUICK" = 1 ]; then
echo "$pkg"
else
echo "$pkg $ver ==> $pkg ${version}-${release}"
fi
fi
unset version release ver
done
}
pkg_trigger() {
#needroot "Run trigger"
if [[ -z "$@" ]]; then
for i in trig_{1..12}; do
eval $i=1
done
else
pre_triggers $@
fi
post_triggers
}
post_triggers() {
if [ "$trig_11" = 1 ] && [ $(type -p fc-cache) ]; then
echo "trigger: Updating fontconfig cache..."
fc-cache -s
fi
if [ "$trig_10" = 1 ] && [ $(type -p gdk-pixbuf-query-loaders) ]; then
echo "trigger: Probing GDK-Pixbuf loader modules..."
gdk-pixbuf-query-loaders --update-cache
fi
if [ "$trig_9" = 1 ] && [ $(type -p gio-querymodules) ]; then
echo "trigger: Updating GIO module cache..."
gio-querymodules /usr/lib/gio/modules
fi
if [ "$trig_8" = 1 ] && [ $(type -p glib-compile-schemas) ]; then
echo "trigger: Compiling GSettings XML schema files..."
glib-compile-schemas /usr/share/glib-2.0/schemas
fi
if [ "$trig_7" = 1 ] && [ $(type -p gtk-query-immodules-2.0) ]; then
echo "trigger: Probing GTK2 input method modules..."
gtk-query-immodules-2.0 --update-cache
fi
if [ "$trig_6" = 1 ] && [ $(type -p gtk-query-immodules-3.0) ]; then
echo "trigger: Probing GTK3 input method modules..."
gtk-query-immodules-3.0 --update-cache
fi
if [ "$trig_5" = 1 ] && [ $(type -p gtk-update-icon-cache) ]; then
echo "trigger: Updating icon theme caches..."
for dir in /usr/share/icons/* ; do
if [[ -e $dir/index.theme ]]; then
gtk-update-icon-cache -q $dir &>/dev/null
else
rm -f $dir/icon-theme.cache
rmdir --ignore-fail-on-non-empty $dir
fi
done
fi
if [ "$trig_4" = 1 ] && [ $(type -p udevadm) ]; then
echo "trigger: Updating hardware database..."
udevadm hwdb --update
fi
if [ "$trig_3" = 1 ] && [ $(type -p mkfontdir) ] && [ $(type -p mkfontscale) ]; then
echo "trigger: Updating X fontdir indices..."
for dir in $(find /usr/share/fonts -maxdepth 1 -type d \( ! -path /usr/share/fonts -a ! -name X11 \)) /usr/share/fonts/X11/*; do
rm -f $dir/fonts.{scale,dir} $dir/.uuid
rmdir --ignore-fail-on-non-empty $dir
[[ -d $dir ]] || continue
mkfontdir $dir
mkfontscale $dir
done
fi
if [ "$trig_2" = 1 ] && [ $(type -p update-desktop-database) ]; then
echo "trigger: Updating desktop file MIME type cache..."
update-desktop-database --quiet
fi
if [ "$trig_1" = 1 ] && [ $(type -p update-mime-database) ]; then
echo "trigger: Updating the MIME type database..."
update-mime-database /usr/share/mime
fi
}
pre_triggers() {
local pkg
# mime db
if [ "$trig_1" != "1" ]; then
for pkg in $@; do
if isinstalled $pkg && [ "$(pkg_files $pkg | grep ^usr/share/mime/$)" ]; then
trig_1=1
break
fi
done
fi
# desktop db
if [ "$trig_2" != "1" ]; then
for pkg in $@; do
if isinstalled $pkg && [ "$(pkg_files $pkg | grep ^usr/share/applications/$)" ]; then
trig_2=1
break
fi
done
fi
# mkfontdir
if [ "$trig_3" != "1" ]; then
for pkg in $@; do
if isinstalled $pkg && [ "$(pkg_files $pkg | grep ^usr/share/fonts/[^/]*/$)" ]; then
trig_3=1
break
fi
done
fi
# hwdb
if [ "$trig_4" != "1" ]; then
for pkg in $@; do
if isinstalled $pkg && [ "$(pkg_files $pkg | grep ^etc/udev/hwdb.d/$)" ]; then
trig_4=1
break
fi
done
fi
# desktop db
if [ "$trig_2" != "1" ]; then
for pkg in $@; do
if isinstalled $pkg && [ "$(pkg_files $pkg | grep ^usr/share/applications/$)" ]; then
trig_2=1
break
fi
done
fi
# mkfontdir
if [ "$trig_3" != "1" ]; then
for pkg in $@; do
if isinstalled $pkg && [ "$(pkg_files $pkg | grep ^usr/share/fonts/[^/]*/$)" ]; then
trig_3=1
break
fi
done
fi
# icon caches
if [ "$trig_5" != "1" ]; then
for pkg in $@; do
if isinstalled $pkg && [ "$(pkg_files $pkg | grep ^usr/share/icons/[^/]*/$)" ]; then
trig_5=1
break
fi
done
fi
# gtk3 immodules
if [ "$trig_6" != "1" ]; then
for pkg in $@; do
if isinstalled $pkg && [ "$(pkg_files $pkg | grep ^usr/lib/gtk-3.0/3.0.0/immodules/.*.so)" ]; then
trig_6=1
break
fi
done
fi
# gtk2 immodules
if [ "$trig_7" != "1" ]; then
for pkg in $@; do
if isinstalled $pkg && [ "$(pkg_files $pkg | grep ^usr/lib/gtk-2.0/2.10.0/immodules/.*.so)" ]; then
trig_7=1
break
fi
done
fi
# gsettings schema
if [ "$trig_8" != "1" ]; then
for pkg in $@; do
if isinstalled $pkg && [ "$(pkg_files $pkg | grep ^usr/share/glib-2.0/schemas/$)" ]; then
trig_8=1
break
fi
done
fi
# gio modules
if [ "$trig_9" != "1" ]; then
for pkg in $@; do
if isinstalled $pkg && [ "$(pkg_files $pkg | grep ^usr/lib/gio/modules/.*.so)" ]; then
trig_9=1
break
fi
done
fi
# gdk-pixbuf
if [ "$trig_10" != "1" ]; then
for pkg in $@; do
if isinstalled $pkg && [ "$(pkg_files $pkg | grep ^usr/lib/gdk-pixbuf-2.0/2.10.0/loaders/.*.so)" ]; then
trig_10=1
break
fi
done
fi
# font caches
if [ "$trig_11" != "1" ]; then
for pkg in $@; do
if isinstalled $pkg && [ "$(pkg_files $pkg | grep ^usr/share/fonts/[^/]*/$)" ]; then
trig_11=1
break
fi
done
fi
}
pkg_files() {
needarg $@
awk -v p="$1" -v RS="" '$1==p' "$PKG_DB" | tail +3
}
needarg() {
[ "$*" ] && return 0
echo "This mode need arguments."
exit 1
}
trap "exit 1" SIGHUP SIGINT SIGQUIT SIGTERM
REPO_FILE="/etc/xpkg.conf"
PKG_DB="/var/lib/pkg/db"
if [ ! -f "$REPO_FILE" ]; then
msgerr "repo file not exist. ($REPO_FILE)"
exit 1
else
while read -r item value junk; do
case $item in
""|"#"*) continue ;;
portdir) PORT+=" $value"
esac
done < "$REPO_FILE"
fi
if [ -z "$1" ]; then
echo "Run '$(basename $0) help' for more information."
exit 0
else
if [ "$(type -t pkg_$1)" = function ]; then
mode=$1
shift
else
echo "Invalid mode. ($1)"
exit 1
fi
fi
pkg_${mode} $@
exit $?
| true |
771c24b428405ec40add9b4b7940e136bc3c2f75 | Shell | italiangrid/org.glite.wms | /org.glite.wms.wmproxy-api-cpp/build_deb.sh | UTF-8 | 5,413 | 3.3125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
INITIALPWD=${PWD}
PKGVERSION=3.5.0
PKGAGE=1
PKGNAME=libglite-wms-wmproxy-api-cpp
PRJNAME=org.glite.wms.wmproxy-api-cpp
set -e
if [ "x$1" == "x-s" ]; then
mkdir -p SOURCES
tar --exclude .git --exclude debian --exclude build* -zcf ${PKGNAME}_${PKGVERSION}.orig.tar.gz org.glite.wms/${PRJNAME}
fi
mkdir -p BINARIES org.glite.wms/${PRJNAME}/debian/source
mkdir -p ${INITIALPWD}/STAGE
###########################################################################
#
# Control file
#
###########################################################################
cat << EOF > org.glite.wms/${PRJNAME}/debian/control
Source: ${PKGNAME}
Section: libs
Priority: optional
Maintainer: WMS Support <wms-support@cnaf.infn.it>
Build-Depends: debhelper (>= 8.0.0~), cmake, emi-pkgconfig-compat,
libglite-wms-utils-classad-dev , libglite-wms-utils-exception-dev, libclassad0-dev, libgridsite-dev, gsoap
Standards-Version: 3.5.0
Homepage: http://glite.cern.ch/
Package: ${PKGNAME}
Architecture: any
Depends: \${shlibs:Depends}, \${misc:Depends}
Description: WMProxy CPP API libraries
Package: ${PKGNAME}-dev
Section: libdevel
Architecture: any
Depends: ${PKGNAME} (= \${binary:Version}), libglite-wms-utils-classad-dev , libglite-wms-utils-exception-dev, libclassad0-dev, libgridsite-dev, gsoap, \${misc:Depends}
Description: WMProxy CPP API libraries, headers and pc files
Package: ${PKGNAME}-doc
Section: doc
Architecture: any
Depends: ${PKGNAME} (= \${binary:Version}), libglite-wms-utils-classad-dev , libglite-wms-utils-exception-dev, libclassad0-dev, libgridsite-dev, gsoap, \${misc:Depends}
Description: WMProxy CPP API documentation
EOF
###########################################################################
#
# Copyright file
#
###########################################################################
cat << EOF > org.glite.wms/${PRJNAME}/debian/copyright
Format-Specification: http://svn.debian.org/wsvn/dep/web/deps/dep5.mdwn?op=file&rev=135
Name: ${PKGNAME}
Maintainer: WMS Support <wms-support@cnaf.infn.it>
Source: http://glite.cern.ch/
Files: *
Copyright (c) Members of the EGEE Collaboration. 2004.
See http://www.eu-egee.org/partners/ for details on the copyright
holders.
License: Apache
On Debian systems, the full text of the Apache License version 2 can be found
in the file /usr/share/common-licenses/Apache-2.0.
EOF
###########################################################################
#
# Installation files
#
###########################################################################
cat << EOF > org.glite.wms/${PRJNAME}/debian/${PKGNAME}.install
usr/lib/lib*.so.*
EOF
cat << EOF > org.glite.wms/${PRJNAME}/debian/${PKGNAME}-dev.install
usr/include/glite/wms/wmproxyapi/*
usr/lib/lib*.so
usr/lib/pkgconfig/wmproxy-api-cpp.pc
EOF
cat << EOF > org.glite.wms/${PRJNAME}/debian/${PKGNAME}-doc.install
usr/share/doc/*
EOF
###########################################################################
#
# Rule file
#
###########################################################################
cat << EOF > org.glite.wms/${PRJNAME}/debian/rules
#!/usr/bin/make -f
export DH_COMPAT=7
#export DH_VERBOSE=1
INSTALLDIR=\$(CURDIR)/debian/tmp
build-stamp:
touch build-stamp
build:build-stamp
#mkdir -p build && cd build && cmake -DCMAKE_INSTALL_PREFIX:string=\$(INSTALLDIR) -DPVER:string=${PKGVERSION} \$(CURDIR) && cd -
#mkdir -p build && cd build && cmake -DPREFIX:string=\$(INSTALLDIR) -DPVER:string=${PKGVERSION} \$(CURDIR) && cd -
cmake -DPREFIX:string=\$(INSTALLDIR)/usr -DPVER:string=${PKGVERSION} \$(CURDIR)
make
clean:
dh_testdir
dh_testroot
rm -rf build-stamp
rm -rf configure-stamp
rm -rf \$(INSTALLDIR)
dh_clean
find -iname '*cmake*' -not -name CMakeLists.txt -exec rm -rf {} \+
install: build
dh_testdir
dh_testroot
dh_prep
dh_installdirs
make install
cmake -DPREFIX:string=${INITIALPWD}/STAGE/usr -DPVER:string=${PKGVERSION} \$(CURDIR)
make install
sed 's|^prefix=.*|prefix=/usr|g' \$(INSTALLDIR)/usr/lib/pkgconfig/wmproxy-api-cpp.pc > \$(INSTALLDIR)/usr/lib/pkgconfig/wmproxy-api-cpp.pc.new
mv \$(INSTALLDIR)/usr/lib/pkgconfig/wmproxy-api-cpp.pc.new \$(INSTALLDIR)/usr/lib/pkgconfig/wmproxy-api-cpp.pc
binary-indep: build install
binary-arch: build install
dh_testdir
dh_testroot
dh_installdocs
dh_installman
dh_installchangelogs
dh_install
dh_link
dh_strip
dh_compress
dh_fixperms
dh_installdeb
dh_makeshlibs
dh_shlibdeps
dh_gencontrol
dh_md5sums
dh_builddeb --destdir=${PWD}/BINARIES
binary: binary-indep binary-arch
.PHONY: build clean binary-indep binary-arch binary install
EOF
###########################################################################
#
# Package format
#
###########################################################################
cat << EOF > org.glite.wms/${PRJNAME}/debian/source/format
3.0 (quilt)
EOF
###########################################################################
#
# Changelog
#
###########################################################################
cat << EOF > org.glite.wms/${PRJNAME}/debian/changelog
${PKGNAME} (${PKGVERSION}-${PKGAGE}) stable; urgency=low
* New major release
-- WMS Support <wms-support@cnaf.infn.it> Fri, 31 Aug 2012 00:00:00 +0000
EOF
if [ "x$1" == "x-s" ]; then
dpkg-source -i.* -b org.glite.wms/${PRJNAME}
mv ${PKGNAME}_${PKGVERSION}* SOURCES
fi
cd org.glite.wms/${PRJNAME}
fakeroot make -f debian/rules binary
rm -rf build debian build-stamp
cd -
| true |
ec89b9c184ccb382f54b8944177fc70c8d0c7f74 | Shell | samcom12/hpc-collab | /clusters/vx/common/provision/verify/slurm/3.slurm-acctmgr-set-default-acct | UTF-8 | 1,401 | 3.625 | 4 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
## $Header: $
## Source:
## @file vxsched/verify/slurm/2.slurm-ctld-commands
## @brief ensure slurmctld commands are functional
VCLOAD=../../../provision/loader/shload.sh
if [ ! -f "${VCLOAD}" ] ; then
echo "${0}: missing: ${VCLOAD}"
exit 99
fi
source ${VCLOAD}
# if we're given an argument, append test output to it
declare -x OUT=${1:-""}
if [ -n "${OUT}" ] ; then
touch ${OUT} || exit 1
exec > >(tee -a "${OUT}") 2>&1
fi
SetFlags >/dev/null 2>&1
if [ -z "${USERADD}" ] ; then
ErrExit ${EX_CONFIG} "USERADD empty"
fi
if [ ! -d ${USERADD} ] ; then
ErrExit ${EX_CONFIG} "USERADD: ${USERADD} is not a directory"
fi
# slurm version 20 seems to require default account to be (re)set late in setup
users=$(echo $(ls ${USERADD} | grep -v root))
if [ -z "${users}" ] ; then
ErrExit ${EX_CONFIG} "users: list empty?"
fi
msg=""
for u in ${users}
do
if [ ! -d ${USERADD}/${u} ] ; then
continue
fi
msg="${msg} ${u}"
d=${USERADD}/${u}/slurm/acct/DefaultAccount
if [ -d ${d} ] ; then
defacct=$(ls ${USERADD}/${u}/slurm/acct/DefaultAccount)
if [ -n "${defacct}" ] ; then
echo sacctmgr -iQ update user ${u} where cluster=${CLUSTERNAME} set DefaultAccount=${defacct}
Rc ErrExit ${EX_SOFTWARE} "sacctmgr -iQ update user ${u} where cluster=${CLUSTERNAME} set DefaultAccount=${defacct}"
fi
fi
done
Verbose "${msg}"
trap '' 0
exit ${EX_OK}
| true |
02e07144202a96732e6a7d94d9fbfc2fd86311ed | Shell | stevalla/COVID-19-ItalyvsWorld | /scripts/update_datasets.sh | UTF-8 | 1,239 | 3.96875 | 4 | [] | no_license | #!/usr/bin/env bash
folder="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# Read the date of yesterday
var=`date +"%FORMAT_STRING"`
now=`date +"%m_%d_%Y"`
yesterday=`date -d "1 day ago" +"%Y-%m-%d"`
day=$(date -d "$yesterday" '+%d')
# Every 10 days download all the data to upgrade old data
if [[ $(( 10#$day % 10 )) -eq 0 ]]; then
echo "Reloading all the data"
rm "$folder"/../data/cleaned/*.csv
fi
# set pythonpath env variable
export PYTHONPATH="$folder"/covid_by_ste
declare -a datasets=("world" "italy" "usa")
echo "Downloading data"
for country in "${datasets[@]}"; do
# preprocessing data
echo "Scanning ${country}"
bash "$folder"/download_data.sh ${country}
done
echo "Preprocessing data"
python "$folder"/covid_by_ste/preprocessing.py
for country in "${datasets[@]}"; do
# Store history
for file in "$folder"/../data/"$country"/*.csv; do
filename=$(basename -- "$file")
f="${filename%.*}"
mv "$file" "$folder"/../data/history/"$country"/"$f"_"$yesterday".csv
done
# Cleaning
rm -r "$folder"/${country}_data
rm -r "$folder"/../data/"$country"
done
# Update readme date update
sed -i "s/\(LAST UPDATE:\).*\( 06:00 UTC-00\)/\1 ${yesterday}\2/g" README.md | true |
700a9ecc94387cd361033b7c20e9cd61b7bb4eb8 | Shell | scalar-labs/scalar-jepsen | /docker/node/init.sh | UTF-8 | 431 | 2.953125 | 3 | [] | no_license | #!/bin/sh
mkdir -p /var/run/sshd
sed -i "s/UsePrivilegeSeparation.*/UsePrivilegeSeparation no/g" /etc/ssh/sshd_config
sed -i "s/PermitRootLogin without-password/PermitRootLogin yes/g" /etc/ssh/sshd_config
# wait for updating the ssh key
while [ ! -f /keys/control_ready ]
do
sleep 1
done
mkdir -p ~/.ssh
cat /keys/id_rsa.pub >> ~/.ssh/authorized_keys
hostname=$(hostname)
touch /keys/${hostname}_ready
exec /usr/sbin/sshd -D
| true |
9190915e622bf5796b0fcbefb91742ca32c377fa | Shell | TheMengLab/Si-C | /analysis/structure_analysis/analysis/align/intermingle/do.sh | UTF-8 | 513 | 2.84375 | 3 | [] | no_license | #!/bin/bash
cp ../../../prepare/assignall10.dat .
for i in `seq 1 20`
do
echo -e ../align${i}.dat '\n' assignall10.dat '\n' 2 '\n' count${i}.dat | ./getintercount.o
done
ls count*.dat > filelist.dat
echo -e filelist.dat '\n' output.dat | ./listaverage.o
paste assignall10.dat output.dat | awk '{print $2,$4}' > temp.dat
rm intermingle.dat
for i in `seq 1 20`
do
grep ^$[i-1]" " temp.dat | awk '{print $2}' > chr${i}.dat
nl chr${i}.dat | awk '{if($2>4) SUM++} END {print SUM/$1}' >> intermingle.dat
done
| true |
739065c1ded2a47c71987c5324d5913c9f9176d3 | Shell | nonomal/speedtest | /speedtest.sh | UTF-8 | 50,260 | 3.671875 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
about() {
echo ""
echo " ========================================================= "
echo " \ Speedtest https://bench.monster / "
echo " \ System info, Geekbench, I/O test and speedtest / "
echo " \ v1.6.0 2023-07-29 / "
echo " ========================================================= "
echo ""
}
cancel() {
echo ""
next;
echo " Abort ..."
echo " Cleanup ..."
cleanup;
echo " Done"
exit
}
trap cancel SIGINT
benchram="$HOME/tmpbenchram"
NULL="/dev/null"
# determine architecture of host
ARCH=$(uname -m)
if [[ $ARCH = *x86_64* ]]; then
# host is running a 64-bit kernel
ARCH="x64"
elif [[ $ARCH = *i?86* ]]; then
# host is running a 32-bit kernel
ARCH="x86"
else
# host is running a non-supported kernel
echo -e "Architecture not supported."
exit 1
fi
echostyle(){
if hash tput 2>$NULL; then
echo " $(tput setaf 6)$1$(tput sgr0)"
echo " $1" >> $log
else
echo " $1" | tee -a $log
fi
}
benchinit() {
# check release
if [ -f /etc/redhat-release ]; then
release="centos"
elif cat /etc/issue | grep -Eqi "debian"; then
release="debian"
elif cat /etc/issue | grep -Eqi "ubuntu"; then
release="ubuntu"
elif cat /etc/issue | grep -Eqi "centos|red hat|redhat"; then
release="centos"
elif cat /proc/version | grep -Eqi "debian"; then
release="debian"
elif cat /proc/version | grep -Eqi "ubuntu"; then
release="ubuntu"
elif cat /proc/version | grep -Eqi "centos|red hat|redhat"; then
release="centos"
fi
# check OS
#if [ "${release}" == "centos" ]; then
# echo "Checking OS ... [ok]"
#else
# echo "Error: This script must be run on CentOS!"
# exit 1
#fi
#echo -ne "\e[1A"; echo -ne "\e[0K\r"
# check root
[[ $EUID -ne 0 ]] && echo -e "Error: This script must be run as root!" && exit 1
# check python
if [ ! -e '/usr/bin/python' ]; then
echo " Installing Python2 ..."
if [ "${release}" == "centos" ]; then
yum -y install python2 > /dev/null 2>&1
alternatives --set python /usr/bin/python2 > /dev/null 2>&1
else
apt-get -y install python > /dev/null 2>&1
fi
echo -ne "\e[1A"; echo -ne "\e[0K\r"
fi
# check curl
if [ ! -e '/usr/bin/curl' ]; then
echo " Installing Curl ..."
if [ "${release}" == "centos" ]; then
yum -y install curl > /dev/null 2>&1
else
apt-get -y install curl > /dev/null 2>&1
fi
echo -ne "\e[1A"; echo -ne "\e[0K\r"
fi
# check wget
if [ ! -e '/usr/bin/wget' ]; then
echo " Installing Wget ..."
if [ "${release}" == "centos" ]; then
yum -y install wget > /dev/null 2>&1
else
apt-get -y install wget > /dev/null 2>&1
fi
echo -ne "\e[1A"; echo -ne "\e[0K\r"
fi
# check bzip2
if [ ! -e '/usr/bin/bzip2' ]; then
echo " Installing bzip2 ..."
if [ "${release}" == "centos" ]; then
yum -y install bzip2 > /dev/null 2>&1
else
apt-get -y install bzip2 > /dev/null 2>&1
fi
echo -ne "\e[1A"; echo -ne "\e[0K\r"
fi
# check tar
if [ ! -e '/usr/bin/tar' ]; then
echo " Installing tar ..."
if [ "${release}" == "centos" ]; then
yum -y install tar > /dev/null 2>&1
else
apt-get -y install tar > /dev/null 2>&1
fi
echo -ne "\e[1A"; echo -ne "\e[0K\r"
fi
# install speedtest-cli
if [ ! -e 'speedtest.py' ]; then
echo " Installing Speedtest-cli ..."
wget --no-check-certificate https://raw.githubusercontent.com/laset-com/speedtest-cli/master/speedtest.py > /dev/null 2>&1
echo -ne "\e[1A"; echo -ne "\e[0K\r"
fi
chmod a+rx speedtest.py
# install tools.py
if [ ! -e 'tools.py' ]; then
echo " Installing tools.py ..."
wget --no-check-certificate https://raw.githubusercontent.com/laset-com/speedtest/master/tools.py > /dev/null 2>&1
echo -ne "\e[1A"; echo -ne "\e[0K\r"
fi
chmod a+rx tools.py
sleep 5
# start
start=$(date +%s)
}
get_opsy() {
[ -f /etc/redhat-release ] && awk '{print ($1,$3~/^[0-9]/?$3:$4)}' /etc/redhat-release && return
[ -f /etc/os-release ] && awk -F'[= "]' '/PRETTY_NAME/{print $3,$4,$5}' /etc/os-release && return
[ -f /etc/lsb-release ] && awk -F'[="]+' '/DESCRIPTION/{print $2}' /etc/lsb-release && return
}
next() {
printf "%-75s\n" "-" | sed 's/\s/-/g' | tee -a $log
}
next2() {
printf "%-57s\n" "-" | sed 's/\s/-/g'
}
delete() {
echo -ne "\e[1A"; echo -ne "\e[0K\r"
}
speed_test(){
if [[ $1 == '' ]]; then
temp=$(python speedtest.py --secure --share 2>&1)
is_down=$(echo "$temp" | grep 'Download')
result_speed=$(echo "$temp" | awk -F ' ' '/results/{print $3}')
if [[ ${is_down} ]]; then
local REDownload=$(echo "$temp" | awk -F ':' '/Download/{print $2}')
local reupload=$(echo "$temp" | awk -F ':' '/Upload/{print $2}')
local relatency=$(echo "$temp" | awk -F ':' '/Hosted/{print $2}')
temp=$(echo "$relatency" | awk -F '.' '{print $1}')
if [[ ${temp} -gt 50 ]]; then
relatency="*"${relatency}
fi
local nodeName=$2
temp=$(echo "${REDownload}" | awk -F ' ' '{print $1}')
if [[ $(awk -v num1=${temp} -v num2=0 'BEGIN{print(num1>num2)?"1":"0"}') -eq 1 ]]; then
printf "%-17s%-17s%-17s%-7s\n" " ${nodeName}" "${reupload}" "${REDownload}" "${relatency}" | tee -a $log
fi
else
local cerror="ERROR"
fi
else
temp=$(python speedtest.py --secure --server $1 --share 2>&1)
is_down=$(echo "$temp" | grep 'Download')
if [[ ${is_down} ]]; then
local REDownload=$(echo "$temp" | awk -F ':' '/Download/{print $2}')
local reupload=$(echo "$temp" | awk -F ':' '/Upload/{print $2}')
#local relatency=$(echo "$temp" | awk -F ':' '/Hosted/{print $2}')
local relatency=$(pingtest $3)
#temp=$(echo "$relatency" | awk -F '.' '{print $1}')
#if [[ ${temp} -gt 1000 ]]; then
#relatency=" - "
#fi
local nodeName=$2
temp=$(echo "${REDownload}" | awk -F ' ' '{print $1}')
if [[ $(awk -v num1=${temp} -v num2=0 'BEGIN{print(num1>num2)?"1":"0"}') -eq 1 ]]; then
printf "%-17s%-17s%-17s%-7s\n" " ${nodeName}" "${reupload}" "${REDownload}" "${relatency}" | tee -a $log
fi
else
local cerror="ERROR"
fi
fi
}
print_speedtest() {
echo "" | tee -a $log
echostyle "## Global Speedtest.net"
echo "" | tee -a $log
printf "%-32s%-17s%-17s%-7s\n" " Location" "Upload" "Download" "Ping" | tee -a $log
printf "%-75s\n" "-" | sed 's/\s/-/g' | tee -a $log
speed_test '' 'Nearby '
printf "%-75s\n" "-" | sed 's/\s/-/g' | tee -a $log
speed_test '30514' 'USA, New York (Optimum) ' 'http://speedgauge2.optonline.net'
speed_test '17384' 'USA, Chicago (Windstream) ' 'http://chicago02.speedtest.windstream.net'
speed_test '14238' 'USA, Dallas (Frontier) ' 'http://dallas.tx.speedtest.frontier.com'
speed_test '15781' 'USA, Miami (Sprint) ' 'http://ookla1.miaufl.sprintadp.net'
speed_test '18401' 'USA, Los Angeles (Windstream) ' 'http://la02.speedtest.windstream.net'
speed_test '26922' 'UK, London (toob Ltd) ' 'http://185.82.8.1'
speed_test '24215' 'France, Paris (Orange) ' 'http://178.21.176.100'
speed_test '20507' 'Germany, Berlin (DNS:NET) ' 'http://speedtest01.dns-net.de'
speed_test '21378' 'Spain, Madrid (MasMovil) ' 'http://speedtest-mad.masmovil.com'
speed_test '395' 'Italy, Rome (Unidata) ' 'http://speedtest2.unidata.it'
speed_test '10637' 'India, Mumbai (OneBroadband) ' 'http://in2net.in2cable.com'
speed_test '51914' 'Singapore (StarHub) ' 'http://co2dsvr03.speedtest.starhub.com'
speed_test '7139' 'Japan, Tsukuba (SoftEther) ' 'http://speedtest2.softether.co.jp'
speed_test '1267' 'Australia, Sydney (Optus) ' 'http://s1.speedtest.syd.optusnet.com.au'
speed_test '6591' 'RSA, Randburg (Cool Ideas) ' 'http://sp2.cisp.co.za'
speed_test '11488' 'Brazil, Sao Paulo (Criare) ' 'http://ookla.spcom.net.br'
rm -rf speedtest.py
}
print_speedtest_usa() {
echo "" | tee -a $log
echostyle "## USA Speedtest.net"
echo "" | tee -a $log
printf "%-33s%-17s%-17s%-7s\n" " Location" "Upload" "Download" "Ping" | tee -a $log
printf "%-76s\n" "-" | sed 's/\s/-/g' | tee -a $log
speed_test '' 'Nearby '
printf "%-76s\n" "-" | sed 's/\s/-/g' | tee -a $log
speed_test '30514' 'USA, New York (Optimum) ' 'http://speedgauge2.optonline.net'
speed_test '13429' 'USA, Boston (Starry, Inc.) ' 'http://speedtest-server.starry.com'
speed_test '15790' 'USA, Washington, DC (Sprint) ' 'http://ookla1.washdc.sprintadp.net'
speed_test '27833' 'USA, Charlotte, NC (Windstream)' 'http://charlotte02.speedtest.windstream.net'
speed_test '17387' 'USA, Atlanta (Windstream) ' 'http://atlanta02.speedtest.windstream.net'
speed_test '1779' 'USA, Miami (Comcast) ' 'http://68.86.199.101'
speed_test '15779' 'USA, Nashville (Sprint) ' 'http://ookla1.nsvltn.sprintadp.net'
speed_test '10152' 'USA, Indianapolis (CenturyLink)' 'http://indianapolis.speedtest.centurylink.net'
speed_test '10138' 'USA, Cleveland (CenturyLink) ' 'http://cleveland.speedtest.centurylink.net'
speed_test '17384' 'USA, Chicago (Windstream) ' 'http://chicago02.speedtest.windstream.net'
speed_test '4557' 'USA, St. Louis (Elite Fiber) ' 'http://speed.elitesystemsllc.com'
speed_test '2917' 'USA, Minneapolis (US Internet) ' 'http://speedtest.usiwireless.com'
speed_test '17709' 'USA, Kansas City (UPNfiber) ' 'http://speedtest.upnfiber.com'
speed_test '17751' 'USA, Oklahoma City (OneNet) ' 'http://okc-speedtest.onenet.net'
speed_test '14238' 'USA, Dallas (Frontier) ' 'http://dallas.tx.speedtest.frontier.com'
speed_test '11209' 'USA, San Antonio, TX (Sprint) ' 'http://ookla1.snantx.sprintadp.net'
speed_test '8862' 'USA, Denver (CenturyLink) ' 'http://denver.speedtest.centurylink.net'
speed_test '16869' 'USA, Albuquerque (Plateau Tel) ' 'http://speedtest4.plateautel.net'
speed_test '15783' 'USA, Phoenix (Sprint) ' 'http://ookla1.phnfaz.sprintadp.net'
speed_test '2206' 'USA, Salt Lake City (UTOPIA) ' 'http://speedtest2.utopiafiber.net'
speed_test '16446' 'USA, Las Vegas (CenturyLink) ' 'http://las-vegas2.speedtest.centurylink.net'
speed_test '18271' 'USA, Seattle (Bluespan) ' 'http://seattle.speedtest.bluespanwireless.com'
speed_test '17587' 'USA, San Francisco (Wiline) ' 'http://sfosfookla.wiline.com'
speed_test '18401' 'USA, Los Angeles (Windstream) ' 'http://la02.speedtest.windstream.net'
speed_test '980' 'USA, Anchorage (Alaska Com) ' 'http://speedtest.anc.acsalaska.net'
speed_test '24031' 'USA, Honolulu (Hawaiian Telcom)' 'http://htspeed.hawaiiantel.net'
rm -rf speedtest.py
}
print_speedtest_in() {
echo "" | tee -a $log
echostyle "## India Speedtest.net"
echo "" | tee -a $log
printf "%-33s%-17s%-17s%-7s\n" " Location" "Upload" "Download" "Ping" | tee -a $log
printf "%-75s\n" "-" | sed 's/\s/-/g' | tee -a $log
speed_test '' 'Nearby '
printf "%-75s\n" "-" | sed 's/\s/-/g' | tee -a $log
speed_test '7236' 'India, New Delhi (iForce) ' 'http://speed.iforcenetworks.co.in'
speed_test '10637' 'India, Mumbai (OneBroadband) ' 'http://in2net.in2cable.com'
speed_test '16086' 'India, Nagpur (optbb) ' 'http://speedtest.optbb.in'
speed_test '23244' 'India, Patna (Airtel) ' 'http://speedtestbhr1.airtel.in'
speed_test '15697' 'India, Kolkata (RailTel) ' 'http://kol.speedtest.rcil.gov.in'
speed_test '27524' 'India, Visakhapatnam (Alliance)' 'http://speedtestvtz.alliancebroadband.in'
speed_test '13785' 'India, Hyderabad (I-ON) ' 'http://testspeed.vainavi.net'
speed_test '10024' 'India, Madurai (Niss Broadband)' 'http://madurai.nissbroadband.com'
rm -rf speedtest.py
}
print_speedtest_europe() {
echo "" | tee -a $log
echostyle "## Europe Speedtest.net"
echo "" | tee -a $log
printf "%-34s%-17s%-17s%-7s\n" " Location" "Upload" "Download" "Ping" | tee -a $log
printf "%-75s\n" "-" | sed 's/\s/-/g' | tee -a $log
speed_test '' 'Nearby '
printf "%-75s\n" "-" | sed 's/\s/-/g' | tee -a $log
speed_test '26922' 'UK, London (toob Ltd) ' 'http://185.82.8.1'
speed_test '29076' 'Netherlands, Amsterdam (XS News)' 'http://speedtest.xsnews.nl'
speed_test '20507' 'Germany, Berlin (DNS:NET) ' 'http://speedtest01.dns-net.de'
speed_test '27345' 'Germany, Munich (InterNetX) ' 'http://speedtest.internetx.de'
speed_test '8751' 'Denmark, Copenhagen (Fiberby) ' 'http://speedtest.internetx.de'
speed_test '26852' 'Sweden, Stockholm (SUNET) ' 'http://fd.sunet.se'
speed_test '8018' 'Norway, Oslo (NextGenTel) ' 'http://sp2.nextgentel.no'
speed_test '24215' 'France, Paris (Orange) ' 'http://178.21.176.100'
speed_test '21378' 'Spain, Madrid (MasMovil) ' 'http://speedtest-mad.masmovil.com'
speed_test '395' 'Italy, Rome (Unidata) ' 'http://speedtest2.unidata.it'
speed_test '21975' 'Czechia, Prague (Nordic Telecom)' 'http://ookla.nordictelecom.cz'
speed_test '12390' 'Austria, Vienna (A1) ' 'http://speedtest.a1.net'
speed_test '7103' 'Poland, Warsaw (ISP Emitel) ' 'http://speedtest.emitel.pl'
speed_test '30813' 'Ukraine, Kyiv (KyivStar) ' 'http://srv01-okl-kv.kyivstar.ua'
speed_test '5834' 'Latvia, Riga (Bite) ' 'http://213.226.139.90'
speed_test '4290' 'Romania, Bucharest (iNES) ' 'http://speed.ines.ro'
speed_test '1727' 'Greece, Athens (GRNET) ' 'http://speed-test.gr-ix.gr'
speed_test '32575' 'Turkey, Urfa (Firatnet) ' 'http://firatspeedtest.com'
rm -rf speedtest.py
}
print_speedtest_asia() {
echo "" | tee -a $log
echostyle "## Asia Speedtest.net"
echo "" | tee -a $log
printf "%-34s%-17s%-17s%-7s\n" " Location" "Upload" "Download" "Ping" | tee -a $log
printf "%-75s\n" "-" | sed 's/\s/-/g' | tee -a $log
speed_test '' 'Nearby '
printf "%-75s\n" "-" | sed 's/\s/-/g' | tee -a $log
speed_test '16475' 'India, New Delhi (Weebo) ' 'http://sp1.weebo.in'
speed_test '10637' 'India, Mumbai (OneBroadband) ' 'http://in2net.in2cable.com'
speed_test '1131' 'Sri Lanka, Colombo (Telecom PLC)' 'http://speedtest2.sltnet.lk'
speed_test '4774' 'Pakistan, Islamabad (Telenor) ' 'http://speedtest1.telenor.com.pk'
speed_test '7147' 'Bangladesh, Dhaka (Skytel) ' 'http://sp1.cosmocom.net'
speed_test '14901' 'Bhutan, Thimphu (Bhutan Telecom)' 'http://speedtest.bt.bt'
speed_test '14062' 'Myanmar, Yangon (5BB Broadband) ' 'http://5bbbroadband.com'
speed_test '26845' 'Laos, Vientaine (Mangkone) ' 'http://speedtest.mangkone.com'
speed_test '13871' 'Thailand, Bangkok (CAT Telecom) ' 'http://catspeedtest.net'
speed_test '12545' 'Cambodia, Phnom Penh (Smart) ' 'http://speedtest.smart.com.kh'
speed_test '9903' 'Vietnam, Hanoi (Viettel) ' 'http://speedtestkv1b.viettel.vn'
speed_test '27261' 'Malaysia, Kuala Lumpur (Extreme)' 'http://kl-speedtest.ebb.my'
speed_test '51914' 'Singapore (StarHub) ' 'http://co2dsvr03.speedtest.starhub.com'
speed_test '17516' 'Indonesia, Jakarta (Desnet) ' 'http://speedtest.desnet.id'
speed_test '20273' 'Philippines, Manila (Globe Tel) ' 'http://119.92.238.90'
speed_test '28912' 'Hong Kong (fdcservers) ' 'http://lg-hkg.fdcservers.net'
speed_test '13506' 'Taiwan, Taipei (TAIFO) ' 'http://speedtest.taifo.com.tw'
speed_test '7139' 'Japan, Tsukuba (SoftEther) ' 'http://speedtest2.softether.co.jp'
rm -rf speedtest.py
}
print_speedtest_sa() {
echo "" | tee -a $log
echostyle "## South America Speedtest.net"
echo "" | tee -a $log
printf "%-37s%-17s%-17s%-7s\n" " Location" "Upload" "Download" "Ping" | tee -a $log
printf "%-80s\n" "-" | sed 's/\s/-/g' | tee -a $log
speed_test '' 'Nearby '
printf "%-80s\n" "-" | sed 's/\s/-/g' | tee -a $log
speed_test '11488' 'Brazil, Sao Paulo (Criare) ' 'http://ookla.spcom.net.br'
speed_test '11435' 'Brazil, Fortaleza (Netonda) ' 'http://speedtest.netonda.com.br'
speed_test '18126' 'Brazil, Manaus (Claro) ' 'http://spd7.claro.com.br'
speed_test '11683' 'Colombia, Bogota (Level 3) ' 'http://speedtest.globalcrossing.com.co'
speed_test '31043' 'Ecuador, Ambato (EXTREME) ' 'http://speed.extreme.net.ec'
speed_test '5272' 'Peru, Lima (Fiberluxperu) ' 'http://medidor.fiberluxperu.com'
speed_test '1053' 'Bolivia, La Paz (Nuevatel) ' 'http://speedtest.nuevatel.com'
speed_test '6776' 'Paraguay, Asuncion (TEISA) ' 'http://sp1.teisa.com.py'
speed_test '13065' 'Chile, Santiago (Netglobalis) ' 'http://speedtest.netglobalis.net'
speed_test '6825' 'Argentina, Buenos Aires(Telefonica)' 'http://speedtest2.gics.telefonica.com.ar'
speed_test '10315' 'Argentina, Cordoba (Personal) ' 'http://st1res.personal.com.ar'
speed_test '1546' 'Uruguay, Montevideo (Antel) ' 'http://speedtest.movistar.com.uy'
rm -rf speedtest.py
}
print_speedtest_au() {
echo "" | tee -a $log
echostyle "## Australia & New Zealand Speedtest.net"
echo "" | tee -a $log
printf "%-32s%-17s%-17s%-7s\n" " Location" "Upload" "Download" "Ping" | tee -a $log
printf "%-75s\n" "-" | sed 's/\s/-/g' | tee -a $log
speed_test '' 'Nearby '
printf "%-75s\n" "-" | sed 's/\s/-/g' | tee -a $log
speed_test '1267' 'Australia, Sydney (Optus) ' 'http://s1.speedtest.syd.optusnet.com.au'
speed_test '2225' 'Australia, Melbourne (Telstra)' 'http://mel1.speedtest.telstra.net'
speed_test '2604' 'Australia, Brisbane (Telstra) ' 'http://brs1.speedtest.telstra.net'
speed_test '16907' 'Australia, Adelaide (KernWiFi)' 'http://ookla.kernwifi.com.au'
speed_test '8976' 'Australia, Hobart (Optus) ' 'http://speedtest.tas.optusnet.com.au'
speed_test '22036' 'Australia, Darwin (Telstra) ' 'http://drw1.speedtest.telstra.net'
speed_test '2627' 'Australia, Perth (Telstra) ' 'http://per1.speedtest.telstra.net'
speed_test '2627' 'NZ, Auckland (MyRepublic) ' 'http://speedtest.myrepublic.co.nz'
speed_test '11326' 'NZ, Wellington (Spark) ' 'http://speedtest-wellington.spark.co.nz'
speed_test '4934' 'NZ, Christchurch (Vodafone) ' 'http://christchurch.speedtest.vodafone.co.nz'
rm -rf speedtest.py
}
print_speedtest_ukraine() {
echo "" | tee -a $log
echostyle "## Ukraine Speedtest.net"
echo "" | tee -a $log
printf "%-32s%-17s%-17s%-7s\n" " Location" "Upload" "Download" "Ping" | tee -a $log
printf "%-75s\n" "-" | sed 's/\s/-/g' | tee -a $log
speed_test '' 'Nearby '
printf "%-75s\n" "-" | sed 's/\s/-/g' | tee -a $log
speed_test '29112' 'Ukraine, Kyiv (Datagroup) ' 'http://speedtest.datagroup.ua'
speed_test '30813' 'Ukraine, Kyiv (KyivStar) ' 'http://srv01-okl-kv.kyivstar.ua'
speed_test '2518' 'Ukraine, Kyiv (Volia) ' 'http://speedtest2.volia.com'
speed_test '14887' 'Ukraine, Lviv (UARNet) ' 'http://speedtest.uar.net'
speed_test '29259' 'Ukraine, Lviv (KyivStar) ' 'http://srv01-okl-lvv.kyivstar.ua'
speed_test '2445' 'Ukraine, Lviv (KOMiTEX) ' 'http://speedtest.komitex.net'
speed_test '3022' 'Ukraine, Uzhgorod (TransCom) ' 'http://speedtest.tcom.uz.ua'
speed_test '19332' 'Ukraine, Chernivtsi (C.T.Net) ' 'http://speedtest.ctn.cv.ua'
speed_test '3861' 'Ukraine, Zhytomyr (DKS) ' 'http://speedtest1.dks.com.ua'
speed_test '8633' 'Ukraine, Cherkasy (McLaut) ' 'http://speedtest2.mclaut.com'
speed_test '20285' 'Ukraine, Kharkiv (Maxnet) ' 'http://speedtest.maxnet.ua'
speed_test '20953' 'Ukraine, Dnipro (Trifle) ' 'http://speedtest.trifle.net'
speed_test '2796' 'Ukraine, Odesa (Black Sea) ' 'http://speedtest.blacksea.net.ua'
speed_test '26725' 'Ukraine, Mariupol (CityLine) ' 'http://speedtest.cl.dn.ua'
speed_test '2581' 'Ukraine, Yalta (KNET) ' 'http://speedtest.knet-tele.com'
rm -rf speedtest.py
}
print_speedtest_lviv() {
echo "" | tee -a $log
echostyle "## Lviv Speedtest.net"
echo "" | tee -a $log
printf "%-26s%-17s%-17s%-7s\n" " Location" "Upload" "Download" "Ping" | tee -a $log
printf "%-75s\n" "-" | sed 's/\s/-/g' | tee -a $log
speed_test '' 'Nearby '
printf "%-75s\n" "-" | sed 's/\s/-/g' | tee -a $log
speed_test '14887' 'Ukraine, Lviv (UARNet) ' 'http://speedtest.uar.net'
speed_test '29259' 'Ukraine, Lviv (KyivStar)' 'http://srv01-okl-lvv.kyivstar.ua'
speed_test '2445' 'Ukraine, Lviv (KOMiTEX) ' 'http://speedtest.komitex.net'
speed_test '12786' 'Ukraine, Lviv (ASTRA) ' 'http://speedtest.astra.in.ua'
speed_test '1204' 'Ukraine, Lviv (Network) ' 'http://speedtest.network.lviv.ua'
speed_test '26293' 'Ukraine, Lviv (LinkCom) ' 'http://st.lc.lviv.ua'
speed_test '34751' 'Ukraine, Lviv (Wenet) ' 'http://vds.wenet.lviv.ua'
rm -rf speedtest.py
}
print_speedtest_meast() {
echo "" | tee -a $log
echostyle "## Middle East Speedtest.net"
echo "" | tee -a $log
printf "%-30s%-17s%-17s%-7s\n" " Location" "Upload" "Download" "Ping" | tee -a $log
printf "%-75s\n" "-" | sed 's/\s/-/g' | tee -a $log
speed_test '' 'Nearby '
printf "%-75s\n" "-" | sed 's/\s/-/g' | tee -a $log
speed_test '610' 'Cyprus, Limassol (PrimeTel) ' 'http://speedtest-node.prime-tel.com'
speed_test '2434' 'Israel, Haifa (013Netvision)' 'http://speed2.013.net'
speed_test '16139' 'Egypt, Cairo (Telecom Egypt)' 'http://speedtestob.orange.eg'
speed_test '12498' 'Lebanon, Tripoli (BItarNet) ' 'http://speedtest1.wavenet-lb.net'
speed_test '22129' 'UAE, Dubai (i3D) ' 'http://ae.ap.speedtest.i3d.net'
speed_test '24742' 'Qatar, Doha (Ooredoo) ' 'http://37.186.62.40'
speed_test '13610' 'SA, Riyadh (ITC) ' 'http://87.101.181.146'
speed_test '1912' 'Bahrain, Manama (Zain) ' 'http://62.209.25.182'
speed_test '18512' 'Iran, Tehran (MCI) ' 'http://rhaspd2.mci.ir'
rm -rf speedtest.py
}
print_speedtest_china() {
echo "" | tee -a $log
echostyle "## China Speedtest.net"
echo "" | tee -a $log
printf "%-32s%-17s%-17s%-7s\n" " Location" "Upload" "Download" "Ping" | tee -a $log
printf "%-75s\n" "-" | sed 's/\s/-/g' | tee -a $log
speed_test '' 'Nearby '
printf "%-75s\n" "-" | sed 's/\s/-/g' | tee -a $log
speed_test '5396' 'Suzhou (China Telecom 5G) ' 'http://4gsuzhou1.speedtest.jsinfo.net'
speed_test '24447' 'ShangHai (China Unicom 5G) ' 'http://5g.shunicomtest.com'
speed_test '26331' 'Zhengzhou (Henan CMCC 5G) ' 'http://5ghenan.ha.chinamobile.com'
speed_test '29105' 'Xi"an (China Mobile 5G) ' 'http://122.77.240.140'
speed_test '4870' 'Changsha (China Unicom 5G) ' 'http://220.202.152.178'
speed_test '3633' 'Shanghai (China Telecom) ' 'http://speedtest1.online.sh.cn'
rm -rf speedtest.py
}
geekbench4() {
if [[ $ARCH = *x86* ]]; then # 32-bit
echo -e "\nGeekbench 5 cannot run on 32-bit architectures. Skipping the test"
else
echo "" | tee -a $log
echo -e " Performing Geekbench v4 CPU Benchmark test. Please wait..."
GEEKBENCH_PATH=$HOME/geekbench
mkdir -p $GEEKBENCH_PATH
curl -s http://cdn.geekbench.com/Geekbench-4.4.4-Linux.tar.gz | tar xz --strip-components=1 -C $GEEKBENCH_PATH &>/dev/null
GEEKBENCH_TEST=$($GEEKBENCH_PATH/geekbench4 2>/dev/null | grep "https://browser")
GEEKBENCH_URL=$(echo -e $GEEKBENCH_TEST | head -1)
GEEKBENCH_URL_CLAIM=$(echo $GEEKBENCH_URL | awk '{ print $2 }')
GEEKBENCH_URL=$(echo $GEEKBENCH_URL | awk '{ print $1 }')
sleep 20
GEEKBENCH_SCORES=$(curl -s $GEEKBENCH_URL | grep "span class='score'")
GEEKBENCH_SCORES_SINGLE=$(echo $GEEKBENCH_SCORES | awk -v FS="(>|<)" '{ print $3 }')
GEEKBENCH_SCORES_MULTI=$(echo $GEEKBENCH_SCORES | awk -v FS="(>|<)" '{ print $7 }')
if [[ $GEEKBENCH_SCORES_SINGLE -le 1700 ]]; then
grank="(POOR)"
elif [[ $GEEKBENCH_SCORES_SINGLE -ge 1700 && $GEEKBENCH_SCORES_SINGLE -le 2500 ]]; then
grank="(FAIR)"
elif [[ $GEEKBENCH_SCORES_SINGLE -ge 2500 && $GEEKBENCH_SCORES_SINGLE -le 3500 ]]; then
grank="(GOOD)"
elif [[ $GEEKBENCH_SCORES_SINGLE -ge 3500 && $GEEKBENCH_SCORES_SINGLE -le 4500 ]]; then
grank="(VERY GOOD)"
elif [[ $GEEKBENCH_SCORES_SINGLE -ge 4500 && $GEEKBENCH_SCORES_SINGLE -le 6000 ]]; then
grank="(EXCELLENT)"
elif [[ $GEEKBENCH_SCORES_SINGLE -ge 6000 && $GEEKBENCH_SCORES_SINGLE -le 7000 ]]; then
grank="(THE BEAST)"
else
grank="(MONSTER)"
fi
echo -ne "\e[1A"; echo -ne "\033[0K\r"
echostyle "## Geekbench v4 CPU Benchmark:"
echo "" | tee -a $log
echo -e " Single Core : $GEEKBENCH_SCORES_SINGLE $grank" | tee -a $log
echo -e " Multi Core : $GEEKBENCH_SCORES_MULTI" | tee -a $log
[ ! -z "$GEEKBENCH_URL_CLAIM" ] && echo -e "$GEEKBENCH_URL_CLAIM" >> geekbench_claim.url 2> /dev/null
echo "" | tee -a $log
echo -e " Cooling down..."
sleep 9
echo -ne "\e[1A"; echo -ne "\033[0K\r"
echo -e " Ready to continue..."
sleep 3
echo -ne "\e[1A"; echo -ne "\033[0K\r"
fi
}
geekbench5() {
if [[ $ARCH = *x86* ]]; then # 32-bit
echo -e "\nGeekbench 5 cannot run on 32-bit architectures. Skipping the test"
else
echo "" | tee -a $log
echo -e " Performing Geekbench v5 CPU Benchmark test. Please wait..."
GEEKBENCH_PATH=$HOME/geekbench
mkdir -p $GEEKBENCH_PATH
curl -s http://cdn.geekbench.com/Geekbench-5.5.0-Linux.tar.gz | tar xz --strip-components=1 -C $GEEKBENCH_PATH &>/dev/null
GEEKBENCH_TEST=$($GEEKBENCH_PATH/geekbench5 2>/dev/null | grep "https://browser")
GEEKBENCH_URL=$(echo -e $GEEKBENCH_TEST | head -1)
GEEKBENCH_URL_CLAIM=$(echo $GEEKBENCH_URL | awk '{ print $2 }')
GEEKBENCH_URL=$(echo $GEEKBENCH_URL | awk '{ print $1 }')
sleep 20
GEEKBENCH_SCORES=$(curl -s $GEEKBENCH_URL | grep "div class='score'")
GEEKBENCH_SCORES_SINGLE=$(echo $GEEKBENCH_SCORES | awk -v FS="(>|<)" '{ print $3 }')
GEEKBENCH_SCORES_MULTI=$(echo $GEEKBENCH_SCORES | awk -v FS="(<|>)" '{ print $7 }')
if [[ $GEEKBENCH_SCORES_SINGLE -le 300 ]]; then
grank="(POOR)"
elif [[ $GEEKBENCH_SCORES_SINGLE -ge 300 && $GEEKBENCH_SCORES_SINGLE -le 500 ]]; then
grank="(FAIR)"
elif [[ $GEEKBENCH_SCORES_SINGLE -ge 500 && $GEEKBENCH_SCORES_SINGLE -le 700 ]]; then
grank="(GOOD)"
elif [[ $GEEKBENCH_SCORES_SINGLE -ge 700 && $GEEKBENCH_SCORES_SINGLE -le 1000 ]]; then
grank="(VERY GOOD)"
elif [[ $GEEKBENCH_SCORES_SINGLE -ge 1000 && $GEEKBENCH_SCORES_SINGLE -le 1500 ]]; then
grank="(EXCELLENT)"
elif [[ $GEEKBENCH_SCORES_SINGLE -ge 1500 && $GEEKBENCH_SCORES_SINGLE -le 2000 ]]; then
grank="(THE BEAST)"
else
grank="(MONSTER)"
fi
echo -ne "\e[1A"; echo -ne "\033[0K\r"
echostyle "## Geekbench v5 CPU Benchmark:"
echo "" | tee -a $log
echo -e " Single Core : $GEEKBENCH_SCORES_SINGLE $grank" | tee -a $log
echo -e " Multi Core : $GEEKBENCH_SCORES_MULTI" | tee -a $log
[ ! -z "$GEEKBENCH_URL_CLAIM" ] && echo -e "$GEEKBENCH_URL_CLAIM" >> geekbench_claim.url 2> /dev/null
echo "" | tee -a $log
echo -e " Cooling down..."
sleep 9
echo -ne "\e[1A"; echo -ne "\033[0K\r"
echo -e " Ready to continue..."
sleep 3
echo -ne "\e[1A"; echo -ne "\033[0K\r"
fi
}
geekbench6() {
if [[ $ARCH = *x86* ]]; then # 32-bit
echo -e "\nGeekbench 6 cannot run on 32-bit architectures. Skipping the test"
else
echo "" | tee -a $log
echo -e " Performing Geekbench v6 CPU Benchmark test. Please wait..."
GEEKBENCH_PATH=$HOME/geekbench
mkdir -p $GEEKBENCH_PATH
curl -s https://cdn.geekbench.com/Geekbench-6.1.0-Linux.tar.gz | tar xz --strip-components=1 -C $GEEKBENCH_PATH &>/dev/null
GEEKBENCH_TEST=$($GEEKBENCH_PATH/geekbench6 2>/dev/null | grep "https://browser")
GEEKBENCH_URL=$(echo -e $GEEKBENCH_TEST | head -1)
GEEKBENCH_URL_CLAIM=$(echo $GEEKBENCH_URL | awk '{ print $2 }')
GEEKBENCH_URL=$(echo $GEEKBENCH_URL | awk '{ print $1 }')
sleep 15
GEEKBENCH_SCORES=$(curl -s $GEEKBENCH_URL | grep "div class='score'")
GEEKBENCH_SCORES_SINGLE=$(echo $GEEKBENCH_SCORES | awk -v FS="(>|<)" '{ print $3 }')
GEEKBENCH_SCORES_MULTI=$(echo $GEEKBENCH_SCORES | awk -v FS="(<|>)" '{ print $7 }')
if [[ $GEEKBENCH_SCORES_SINGLE -le 500 ]]; then
grank="(POOR)"
elif [[ $GEEKBENCH_SCORES_SINGLE -ge 500 && $GEEKBENCH_SCORES_SINGLE -le 700 ]]; then
grank="(FAIR)"
elif [[ $GEEKBENCH_SCORES_SINGLE -ge 700 && $GEEKBENCH_SCORES_SINGLE -le 900 ]]; then
grank="(GOOD)"
elif [[ $GEEKBENCH_SCORES_SINGLE -ge 900 && $GEEKBENCH_SCORES_SINGLE -le 1200 ]]; then
grank="(VERY GOOD)"
elif [[ $GEEKBENCH_SCORES_SINGLE -ge 1200 && $GEEKBENCH_SCORES_SINGLE -le 1700 ]]; then
grank="(EXCELLENT)"
elif [[ $GEEKBENCH_SCORES_SINGLE -ge 1700 && $GEEKBENCH_SCORES_SINGLE -le 2200 ]]; then
grank="(THE BEAST)"
else
grank="(MONSTER)"
fi
echo -ne "\e[1A"; echo -ne "\033[0K\r"
echostyle "## Geekbench v6 CPU Benchmark:"
echo "" | tee -a $log
echo -e " Single Core : $GEEKBENCH_SCORES_SINGLE $grank" | tee -a $log
echo -e " Multi Core : $GEEKBENCH_SCORES_MULTI" | tee -a $log
[ ! -z "$GEEKBENCH_URL_CLAIM" ] && echo -e "$GEEKBENCH_URL_CLAIM" >> geekbench_claim.url 2> /dev/null
echo "" | tee -a $log
echo -e " Cooling down..."
sleep 9
echo -ne "\e[1A"; echo -ne "\033[0K\r"
echo -e " Ready to continue..."
sleep 3
echo -ne "\e[1A"; echo -ne "\033[0K\r"
fi
}
calc_disk() {
local total_size=0
local array=$@
for size in ${array[@]}
do
[ "${size}" == "0" ] && size_t=0 || size_t=`echo ${size:0:${#size}-1}`
[ "`echo ${size:(-1)}`" == "K" ] && size=0
[ "`echo ${size:(-1)}`" == "M" ] && size=$( awk 'BEGIN{printf "%.1f", '$size_t' / 1024}' )
[ "`echo ${size:(-1)}`" == "T" ] && size=$( awk 'BEGIN{printf "%.1f", '$size_t' * 1024}' )
[ "`echo ${size:(-1)}`" == "G" ] && size=${size_t}
total_size=$( awk 'BEGIN{printf "%.1f", '$total_size' + '$size'}' )
done
echo ${total_size}
}
power_time() {
result=$(smartctl -a $(result=$(cat /proc/mounts) && echo $(echo "$result" | awk '/data=ordered/{print $1}') | awk '{print $1}') 2>&1) && power_time=$(echo "$result" | awk '/Power_On/{print $10}') && echo "$power_time"
}
install_smart() {
# install smartctl
if [ ! -e '/usr/sbin/smartctl' ]; then
echo "Installing Smartctl ..."
if [ "${release}" == "centos" ]; then
yum update > /dev/null 2>&1
yum -y install smartmontools > /dev/null 2>&1
else
apt-get update > /dev/null 2>&1
apt-get -y install smartmontools > /dev/null 2>&1
fi
fi
}
ip_info(){
# no jq
country=$(curl -s https://ipapi.co/country_name/)
city=$(curl -s https://ipapi.co/city/)
asn=$(curl -s https://ipapi.co/asn/)
org=$(curl -s https://ipapi.co/org/)
countryCode=$(curl -s https://ipapi.co/country/)
region=$(curl -s https://ipapi.co/region/)
echo -e " ASN & ISP : $asn" | tee -a $log
echo -e " Organization : $org" | tee -a $log
echo -e " Location : $city, $country / $countryCode" | tee -a $log
echo -e " Region : $region" | tee -a $log
}
ip_info4(){
isp=$(python tools.py geoip isp)
as_tmp=$(python tools.py geoip as)
asn=$(echo $as_tmp | awk -F ' ' '{print $1}')
org=$(python tools.py geoip org)
country=$(python tools.py geoip country)
city=$(python tools.py geoip city)
countryCode=$(python tools.py geoip countryCode)
region=$(python tools.py geoip regionName)
echo -e " ASN & ISP : $asn, $isp" | tee -a $log
echo -e " Organization : $org" | tee -a $log
echo -e " Location : $city, $country / $countryCode" | tee -a $log
echo -e " Region : $region" | tee -a $log
rm -rf tools.py
}
machine_location(){
isp=$(python tools.py geoip isp)
as_tmp=$(python tools.py geoip as)
asn=$(echo $as_tmp | awk -F ' ' '{print $1}')
org=$(python tools.py geoip org)
country=$(python tools.py geoip country)
city=$(python tools.py geoip city)
countryCode=$(python tools.py geoip countryCode)
region=$(python tools.py geoip regionName)
echo -e " Machine location: $country, $city ($region)"
echo -e " ISP & ORG: $isp / $org"
rm -rf tools.py
}
virt_check(){
if hash ifconfig 2>/dev/null; then
eth=$(ifconfig)
fi
virtualx=$(dmesg) 2>/dev/null
if grep docker /proc/1/cgroup -qa; then
virtual="Docker"
elif grep lxc /proc/1/cgroup -qa; then
virtual="Lxc"
elif grep -qa container=lxc /proc/1/environ; then
virtual="Lxc"
elif [[ -f /proc/user_beancounters ]]; then
virtual="OpenVZ"
elif [[ "$virtualx" == *kvm-clock* ]]; then
virtual="KVM"
elif [[ "$cname" == *KVM* ]]; then
virtual="KVM"
elif [[ "$virtualx" == *"VMware Virtual Platform"* ]]; then
virtual="VMware"
elif [[ "$virtualx" == *"Parallels Software International"* ]]; then
virtual="Parallels"
elif [[ "$virtualx" == *VirtualBox* ]]; then
virtual="VirtualBox"
elif [[ -e /proc/xen ]]; then
virtual="Xen"
elif [[ "$sys_manu" == *"Microsoft Corporation"* ]]; then
if [[ "$sys_product" == *"Virtual Machine"* ]]; then
if [[ "$sys_ver" == *"7.0"* || "$sys_ver" == *"Hyper-V" ]]; then
virtual="Hyper-V"
else
virtual="Microsoft Virtual Machine"
fi
fi
else
virtual="Dedicated"
fi
}
power_time_check(){
echo -ne " Power time of disk : "
install_smart
ptime=$(power_time)
echo -e "$ptime Hours"
}
freedisk() {
# check free space
#spacename=$( df -m . | awk 'NR==2 {print $1}' )
#spacenamelength=$(echo ${spacename} | awk '{print length($0)}')
#if [[ $spacenamelength -gt 20 ]]; then
# freespace=$( df -m . | awk 'NR==3 {print $3}' )
#else
# freespace=$( df -m . | awk 'NR==2 {print $4}' )
#fi
freespace=$( df -m . | awk 'NR==2 {print $4}' )
if [[ $freespace == "" ]]; then
$freespace=$( df -m . | awk 'NR==3 {print $3}' )
fi
if [[ $freespace -gt 1024 ]]; then
printf "%s" $((1024*2))
elif [[ $freespace -gt 512 ]]; then
printf "%s" $((512*2))
elif [[ $freespace -gt 256 ]]; then
printf "%s" $((256*2))
elif [[ $freespace -gt 128 ]]; then
printf "%s" $((128*2))
else
printf "1"
fi
}
print_system_info() {
echo -e " OS : $opsy ($lbit Bit)" | tee -a $log
echo -e " Virt/Kernel : $virtual / $kern" | tee -a $log
echo -e " CPU Model : $cname" | tee -a $log
echo -e " CPU Cores : $cores @ $freq MHz $arch $corescache Cache" | tee -a $log
echo -e " CPU Flags : $cpu_aes & $cpu_virt" | tee -a $log
echo -e " Load Average : $load" | tee -a $log
echo -e " Total Space : $hdd ($hddused ~$hddfree used)" | tee -a $log
echo -e " Total RAM : $tram MB ($uram MB + $bram MB Buff in use)" | tee -a $log
echo -e " Total SWAP : $swap MB ($uswap MB in use)" | tee -a $log
echo -e " Uptime : $up" | tee -a $log
#echo -e " TCP CC : $tcpctrl" | tee -a $log
printf "%-75s\n" "-" | sed 's/\s/-/g' | tee -a $log
}
get_system_info() {
cname=$( awk -F: '/model name/ {name=$2} END {print name}' /proc/cpuinfo | sed 's/^[ \t]*//;s/[ \t]*$//' )
cores=$( awk -F: '/model name/ {core++} END {print core}' /proc/cpuinfo )
freq=$( awk -F: '/cpu MHz/ {freq=$2} END {print freq}' /proc/cpuinfo | sed 's/^[ \t]*//;s/[ \t]*$//' )
corescache=$( awk -F: '/cache size/ {cache=$2} END {print cache}' /proc/cpuinfo | sed 's/^[ \t]*//;s/[ \t]*$//' )
cpu_aes=$(cat /proc/cpuinfo | grep aes)
[[ -z "$cpu_aes" ]] && cpu_aes="AES-NI Disabled" || cpu_aes="AES-NI Enabled"
cpu_virt=$(cat /proc/cpuinfo | grep 'vmx\|svm')
[[ -z "$cpu_virt" ]] && cpu_virt="VM-x/AMD-V Disabled" || cpu_virt="VM-x/AMD-V Enabled"
tram=$( free -m | awk '/Mem/ {print $2}' )
uram=$( free -m | awk '/Mem/ {print $3}' )
bram=$( free -m | awk '/Mem/ {print $6}' )
swap=$( free -m | awk '/Swap/ {print $2}' )
uswap=$( free -m | awk '/Swap/ {print $3}' )
up=$( awk '{a=$1/86400;b=($1%86400)/3600;c=($1%3600)/60} {printf("%d days %d:%d\n",a,b,c)}' /proc/uptime )
load=$( w | head -1 | awk -F'load average:' '{print $2}' | sed 's/^[ \t]*//;s/[ \t]*$//' )
opsy=$( get_opsy )
arch=$( uname -m )
lbit=$( getconf LONG_BIT )
kern=$( uname -r )
#ipv6=$( wget -qO- -t1 -T2 ipv6.icanhazip.com )
#disk_size1=($( LANG=C df -hPl | grep -wvE '\-|none|tmpfs|overlay|shm|udev|devtmpfs|by-uuid|chroot|Filesystem' | awk '{print $2}' ))
#disk_size2=($( LANG=C df -hPl | grep -wvE '\-|none|tmpfs|overlay|shm|udev|devtmpfs|by-uuid|chroot|Filesystem' | awk '{print $3}' ))
#disk_total_size=$( calc_disk ${disk_size1[@]} )
#disk_used_size=$( calc_disk ${disk_size2[@]} )
hdd=$(df -t simfs -t ext2 -t ext3 -t ext4 -t btrfs -t xfs -t vfat -t ntfs -t swap --total -h | grep total | awk '{ print $2 }')
hddused=$(df -t simfs -t ext2 -t ext3 -t ext4 -t btrfs -t xfs -t vfat -t ntfs -t swap --total -h | grep total | awk '{ print $3 }')
hddfree=$(df -t simfs -t ext2 -t ext3 -t ext4 -t btrfs -t xfs -t vfat -t ntfs -t swap --total -h | grep total | awk '{ print $5 }')
#tcp congestion control
#tcpctrl=$( sysctl net.ipv4.tcp_congestion_control | awk -F ' ' '{print $3}' )
#tmp=$(python tools.py disk 0)
#disk_total_size=$(echo $tmp | sed s/G//)
#tmp=$(python tools.py disk 1)
#disk_used_size=$(echo $tmp | sed s/G//)
virt_check
}
write_test() {
(LANG=C dd if=/dev/zero of=test_file_$$ bs=512K count=$1 conv=fdatasync && rm -f test_file_$$ ) 2>&1 | awk -F, '{io=$NF} END { print io}' | sed 's/^[ \t]*//;s/[ \t]*$//'
}
averageio() {
ioraw1=$( echo $1 | awk 'NR==1 {print $1}' )
[ "$(echo $1 | awk 'NR==1 {print $2}')" == "GB/s" ] && ioraw1=$( awk 'BEGIN{print '$ioraw1' * 1024}' )
ioraw2=$( echo $2 | awk 'NR==1 {print $1}' )
[ "$(echo $2 | awk 'NR==1 {print $2}')" == "GB/s" ] && ioraw2=$( awk 'BEGIN{print '$ioraw2' * 1024}' )
ioraw3=$( echo $3 | awk 'NR==1 {print $1}' )
[ "$(echo $3 | awk 'NR==1 {print $2}')" == "GB/s" ] && ioraw3=$( awk 'BEGIN{print '$ioraw3' * 1024}' )
ioall=$( awk 'BEGIN{print '$ioraw1' + '$ioraw2' + '$ioraw3'}' )
ioavg=$( awk 'BEGIN{printf "%.1f", '$ioall' / 3}' )
printf "%s" "$ioavg"
}
cpubench() {
if hash $1 2>$NULL; then
io=$( ( dd if=/dev/zero bs=512K count=$2 | $1 ) 2>&1 | grep 'copied' | awk -F, '{io=$NF} END {print io}' )
if [[ $io != *"."* ]]; then
printf "%4i %s" "${io% *}" "${io##* }"
else
printf "%4i.%s" "${io%.*}" "${io#*.}"
fi
else
printf " %s not found on system." "$1"
fi
}
iotest() {
echostyle "## IO Test"
echo "" | tee -a $log
# start testing
writemb=$(freedisk)
if [[ $writemb -gt 512 ]]; then
writemb_size="$(( writemb / 2 / 2 ))MB"
writemb_cpu="$(( writemb / 2 ))"
else
writemb_size="$writemb"MB
writemb_cpu=$writemb
fi
# CPU Speed test
echostyle "CPU Speed:"
echo " bzip2 :$( cpubench bzip2 $writemb_cpu )" | tee -a $log
echo " sha256 :$( cpubench sha256sum $writemb_cpu )" | tee -a $log
echo " md5sum :$( cpubench md5sum $writemb_cpu )" | tee -a $log
echo "" | tee -a $log
# RAM Speed test
# set ram allocation for mount
tram_mb="$( free -m | grep Mem | awk 'NR=1 {print $2}' )"
if [[ tram_mb -gt 1900 ]]; then
sbram=1024M
sbcount=2048
else
sbram=$(( tram_mb / 2 ))M
sbcount=$tram_mb
fi
[[ -d $benchram ]] || mkdir $benchram
mount -t tmpfs -o size=$sbram tmpfs $benchram/
echostyle "RAM Speed:"
iow1=$( ( dd if=/dev/zero of=$benchram/zero bs=512K count=$sbcount ) 2>&1 | awk -F, '{io=$NF} END { print io}' )
ior1=$( ( dd if=$benchram/zero of=$NULL bs=512K count=$sbcount; rm -f test ) 2>&1 | awk -F, '{io=$NF} END { print io}' )
iow2=$( ( dd if=/dev/zero of=$benchram/zero bs=512K count=$sbcount ) 2>&1 | awk -F, '{io=$NF} END { print io}' )
ior2=$( ( dd if=$benchram/zero of=$NULL bs=512K count=$sbcount; rm -f test ) 2>&1 | awk -F, '{io=$NF} END { print io}' )
iow3=$( ( dd if=/dev/zero of=$benchram/zero bs=512K count=$sbcount ) 2>&1 | awk -F, '{io=$NF} END { print io}' )
ior3=$( ( dd if=$benchram/zero of=$NULL bs=512K count=$sbcount; rm -f test ) 2>&1 | awk -F, '{io=$NF} END { print io}' )
echo " Avg. write : $(averageio "$iow1" "$iow2" "$iow3") MB/s" | tee -a $log
echo " Avg. read : $(averageio "$ior1" "$ior2" "$ior3") MB/s" | tee -a $log
rm $benchram/zero
umount $benchram
rm -rf $benchram
echo "" | tee -a $log
# Disk test
#echostyle "Disk Speed:"
#if [[ $writemb != "1" ]]; then
# io=$( ( dd bs=512K count=$writemb if=/dev/zero of=test; rm -f test ) 2>&1 | awk -F, '{io=$NF} END { print io}' )
# echo " I/O Speed :$io" | tee -a $log
# io=$( ( dd bs=512K count=$writemb if=/dev/zero of=test oflag=direct; rm -f test ) 2>&1 | awk -F, '{io=$NF} END { print io}' )
# echo " I/O Direct :$io" | tee -a $log
#else
# echo " Not enough space to test." | tee -a $log
#fi
#echo "" | tee -a $log
}
write_io() {
writemb=$(freedisk)
writemb_size="$(( writemb / 2 ))MB"
if [[ $writemb_size == "1024MB" ]]; then
writemb_size="1.0GB"
fi
if [[ $writemb != "1" ]]; then
echostyle "Disk Speed:"
echo -n " 1st run : " | tee -a $log
io1=$( write_test $writemb )
echo -e "$io1" | tee -a $log
echo -n " 2nd run : " | tee -a $log
io2=$( write_test $writemb )
echo -e "$io2" | tee -a $log
echo -n " 3rd run : " | tee -a $log
io3=$( write_test $writemb )
echo -e "$io3" | tee -a $log
ioraw1=$( echo $io1 | awk 'NR==1 {print $1}' )
[ "`echo $io1 | awk 'NR==1 {print $2}'`" == "GB/s" ] && ioraw1=$( awk 'BEGIN{print '$ioraw1' * 1024}' )
ioraw2=$( echo $io2 | awk 'NR==1 {print $1}' )
[ "`echo $io2 | awk 'NR==1 {print $2}'`" == "GB/s" ] && ioraw2=$( awk 'BEGIN{print '$ioraw2' * 1024}' )
ioraw3=$( echo $io3 | awk 'NR==1 {print $1}' )
[ "`echo $io3 | awk 'NR==1 {print $2}'`" == "GB/s" ] && ioraw3=$( awk 'BEGIN{print '$ioraw3' * 1024}' )
ioall=$( awk 'BEGIN{print '$ioraw1' + '$ioraw2' + '$ioraw3'}' )
ioavg=$( awk 'BEGIN{printf "%.1f", '$ioall' / 3}' )
echo -e " -----------------------" | tee -a $log
echo -e " Average : $ioavg MB/s" | tee -a $log
else
echo -e " Not enough space!"
fi
}
print_end_time() {
echo "" | tee -a $log
end=$(date +%s)
time=$(( $end - $start ))
if [[ $time -gt 60 ]]; then
min=$(expr $time / 60)
sec=$(expr $time % 60)
echo -ne " Finished in : ${min} min ${sec} sec"
else
echo -ne " Finished in : ${time} sec"
fi
#echo -ne "\n Current time : "
#echo $(date +%Y-%m-%d" "%H:%M:%S)
printf '\n'
utc_time=$(date -u '+%F %T')
echo " Timestamp : $utc_time GMT" | tee -a $log
#echo " Finished!"
echo " Saved in : $log"
echo "" | tee -a $log
}
print_intro() {
printf "%-75s\n" "-" | sed 's/\s/-/g'
printf ' Region: %s https://bench.monster v.1.6.0 2023-07-29 \n' $region_name | tee -a $log
printf " Usage : curl -LsO bench.monster/speedtest.sh; bash speedtest.sh -%s\n" $region_name | tee -a $log
}
sharetest() {
echo " Share results:"
echo " - $result_speed" | tee -a $log
log_preupload
case $1 in
'ubuntu')
share_link=$( curl -v --data-urlencode "content@$log_up" -d "poster=speedtest.sh" -d "syntax=text" "https://paste.ubuntu.com" 2>&1 | \
grep "Location" | awk '{print "https://paste.ubuntu.com"$3}' );;
'haste' )
share_link=$( curl -X POST -s -d "$(cat $log)" https://hastebin.com/documents | awk -F '"' '{print "https://hastebin.com/"$4}' );;
'clbin' )
share_link=$( curl -sF 'clbin=<-' https://clbin.com < $log );;
esac
# print result info
echo " - $GEEKBENCH_URL" | tee -a $log
echo " - $share_link"
echo ""
rm -f $log_up
}
log_preupload() {
log_up="$HOME/speedtest_upload.log"
true > $log_up
$(cat speedtest.log 2>&1 | sed -r "s/\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]//g" > $log_up)
}
get_ip_whois_org_name(){
#ip=$(curl -s ip.sb)
result=$(curl -s https://rest.db.ripe.net/search.json?query-string=$(curl -s ip.sb))
#org_name=$(echo $result | jq '.objects.object.[1].attributes.attribute.[1].value' | sed 's/\"//g')
org_name=$(echo $result | jq '.objects.object[1].attributes.attribute[1]' | sed 's/\"//g')
echo $org_name;
}
pingtest() {
local ping_link=$( echo ${1#*//} | cut -d"/" -f1 )
local ping_ms=$( ping -w 1 -c 1 -q $ping_link | grep 'rtt' | cut -d"/" -f5 )
# get download speed and print
if [[ $ping_ms == "" ]]; then
printf "ping error!"
else
printf "%3i.%s ms" "${ping_ms%.*}" "${ping_ms#*.}"
fi
}
cleanup() {
rm -f test_file_*;
rm -f speedtest.py;
rm -f speedtest.sh;
rm -f tools.py;
rm -f ip_json.json;
rm -f geekbench_claim.url;
rm -rf geekbench;
}
bench_all(){
region_name="Global"
print_intro;
benchinit;
next;
get_system_info;
print_system_info;
ip_info4;
next;
geekbench5;
iotest;
write_io;
print_speedtest;
next;
print_end_time;
cleanup;
sharetest clbin;
}
usa_bench(){
region_name="USA"
print_intro;
benchinit;
next;
get_system_info;
print_system_info;
ip_info4;
next;
geekbench5;
iotest;
write_io;
print_speedtest_usa;
next;
print_end_time;
cleanup;
sharetest clbin;
}
in_bench(){
region_name="India"
print_intro;
benchinit;
next;
get_system_info;
print_system_info;
ip_info4;
next;
geekbench5;
iotest;
write_io;
print_speedtest_in;
next;
print_end_time;
cleanup;
sharetest clbin;
}
europe_bench(){
region_name="Europe"
print_intro;
benchinit;
next;
get_system_info;
print_system_info;
ip_info4;
next;
geekbench5;
iotest;
write_io;
print_speedtest_europe;
next;
print_end_time;
cleanup;
sharetest clbin;
}
asia_bench(){
region_name="Asia"
print_intro;
benchinit;
next;
get_system_info;
print_system_info;
ip_info4;
next;
geekbench5;
iotest;
write_io;
print_speedtest_asia;
next;
print_end_time;
cleanup;
sharetest clbin;
}
china_bench(){
region_name="China"
print_intro;
benchinit;
next;
get_system_info;
print_system_info;
ip_info4;
next;
geekbench5;
iotest;
write_io;
print_speedtest_china;
next;
print_end_time;
cleanup;
sharetest clbin;
}
sa_bench(){
region_name="South-America"
print_intro;
benchinit;
next;
get_system_info;
print_system_info;
ip_info4;
next;
geekbench5;
iotest;
write_io;
print_speedtest_sa;
next;
print_end_time;
cleanup;
sharetest clbin;
}
au_bench(){
region_name="AU-NZ"
print_intro;
benchinit;
next;
get_system_info;
print_system_info;
ip_info4;
next;
geekbench5;
iotest;
write_io;
print_speedtest_au;
next;
print_end_time;
cleanup;
sharetest clbin;
}
ukraine_bench(){
region_name="Ukraine"
print_intro;
benchinit;
next;
get_system_info;
print_system_info;
ip_info4;
next;
geekbench5;
iotest;
write_io;
print_speedtest_ukraine;
next;
print_end_time;
cleanup;
sharetest clbin;
}
lviv_bench(){
region_name="Lviv"
print_intro;
benchinit;
next;
get_system_info;
print_system_info;
ip_info4;
next;
geekbench5;
iotest;
write_io;
print_speedtest_lviv;
next;
print_end_time;
cleanup;
sharetest clbin;
}
meast_bench(){
region_name="Middle-East"
print_intro;
benchinit;
next;
get_system_info;
print_system_info;
ip_info4;
next;
geekbench5;
iotest;
write_io;
print_speedtest_meast;
next;
print_end_time;
cleanup;
sharetest clbin;
}
log="$HOME/speedtest.log"
true > $log
case $1 in
'info'|'-i'|'--i'|'-info'|'--info' )
about;sleep 3;next;get_system_info;print_system_info;;
'version'|'-v'|'--v'|'-version'|'--version')
next;about;next;;
'gb5'|'-gb5'|'--gb5'|'geek5'|'-geek5'|'--geek5' )
next;geekbench5;next;cleanup;;
'gb6'|'-gb6'|'--gb6'|'geek6'|'-geek6'|'--geek6' )
next;geekbench6;next;cleanup;;
'gb'|'-gb'|'--gb'|'geek'|'-geek'|'--geek' )
next;geekbench4;next;cleanup;;
'io'|'-io'|'--io'|'ioping'|'-ioping'|'--ioping' )
next;iotest;write_io;next;;
'speed'|'-speed'|'--speed'|'-speedtest'|'--speedtest'|'-speedcheck'|'--speedcheck' )
about;benchinit;machine_location;print_speedtest;next;cleanup;;
'usas'|'-usas'|'uss'|'-uss'|'uspeed'|'-uspeed' )
about;benchinit;machine_location;print_speedtest_usa;next;cleanup;;
'eus'|'-eus'|'es'|'-es'|'espeed'|'-espeed' )
about;benchinit;machine_location;print_speedtest_europe;next;cleanup;;
'as'|'-as'|'aspeed'|'-aspeed' )
about;benchinit;machine_location;print_speedtest_asia;next;cleanup;;
'aus'|'-aus'|'auspeed'|'-auspeed' )
about;benchinit;machine_location;print_speedtest_au;next;cleanup;;
'sas'|'-sas'|'saspeed'|'-saspeed' )
about;benchinit;machine_location;print_speedtest_sa;next;cleanup;;
'mes'|'-mes'|'mespeed'|'-mespeed' )
about;benchinit;machine_location;print_speedtest_meast;next;cleanup;;
'ins'|'-ins'|'inspeed'|'-inspeed' )
about;benchinit;machine_location;print_speedtest_in;next;cleanup;;
'cns'|'-cns'|'cnspeed'|'-cnspeed' )
about;benchinit;machine_location;print_speedtest_china;next;cleanup;;
'uas'|'-uas'|'uaspeed'|'-uaspeed' )
about;benchinit;machine_location;print_speedtest_ukraine;next;cleanup;;
'lvivs'|'-lvivs' )
about;benchinit;machine_location;print_speedtest_lviv;next;cleanup;;
'ip'|'-ip'|'--ip'|'geoip'|'-geoip'|'--geoip' )
about;benchinit;next;ip_info4;next;cleanup;;
'bench'|'-a'|'--a'|'-all'|'--all'|'-bench'|'--bench'|'-Global' )
bench_all;;
'about'|'-about'|'--about' )
about;;
'usa'|'-usa'|'--usa'|'us'|'-us'|'--us'|'USA'|'-USA'|'--USA' )
usa_bench;;
'in'|'-india'|'--in'|'in'|'-in'|'IN'|'-IN'|'--IN' )
in_bench;;
'europe'|'-europe'|'--europe'|'eu'|'-eu'|'--eu'|'Europe'|'-Europe'|'--Europe' )
europe_bench;;
'asia'|'-asia'|'--asia'|'Asia'|'-Asia'|'--Asia' )
asia_bench;;
'china'|'-china'|'--china'|'mjj'|'-mjj'|'cn'|'-cn'|'--cn'|'China'|'-China'|'--China' )
china_bench;;
'au'|'-au'|'nz'|'-nz'|'AU'|'-AU'|'NZ'|'-NZ'|'-AU-NZ' )
au_bench;;
'sa'|'-sa'|'--sa'|'-South-America' )
sa_bench;;
'ukraine'|'-ukraine'|'--ukraine'|'ua'|'-ua'|'--ua'|'ukr'|'-ukr'|'--ukr'|'Ukraine'|'-Ukraine'|'--Ukraine' )
ukraine_bench;;
'lviv'|'-lviv'|'--lviv'|'-Lviv'|'--Lviv' )
lviv_bench;;
'M-East'|'-M-East'|'--M-East'|'-m-east'|'--m-east'|'-meast'|'--meast'|'-Middle-East'|'-me' )
meast_bench;;
'-s'|'--s'|'share'|'-share'|'--share' )
bench_all;
is_share="share"
if [[ $2 == "" ]]; then
sharetest ubuntu;
else
sharetest $2;
fi
;;
'debug'|'-d'|'--d'|'-debug'|'--debug' )
get_ip_whois_org_name;;
*)
bench_all;;
esac
if [[ ! $is_share == "share" ]]; then
case $2 in
'share'|'-s'|'--s'|'-share'|'--share' )
if [[ $3 == '' ]]; then
sharetest ubuntu;
else
sharetest $3;
fi
;;
esac
fi
| true |
301b937f3b7ecc6157e284a3dab45e1448399bbe | Shell | nkfilis/pot | /share/pot/promote.sh | UTF-8 | 1,495 | 3.671875 | 4 | [
"BSD-3-Clause"
] | permissive | #!/bin/sh
promote-help()
{
echo "pot promote [-hv] -p potname"
echo ' -h print this help'
echo ' -v verbose'
echo ' -p potname : the pot name (mandatory)'
}
pot-promote()
{
local _pname _origin _jdset
_pname=
args=$(getopt hvp: $*)
if [ $? -ne 0 ]; then
promote-help
${EXIT} 1
fi
set -- $args
while true; do
case "$1" in
-h)
promote-help
${EXIT} 0
;;
-v)
_POT_VERBOSITY=$(( _POT_VERBOSITY + 1))
shift
;;
-p)
_pname=$2
shift 2
;;
--)
shift
break
;;
esac
done
echo '#########################'
echo '# promote is deprecated #'
echo '#########################'
if [ -z "$_pname" ]; then
_error "-p is missing"
promote-help
${EXIT} 1
fi
if ! _is_pot "$_pname" ; then
_error "$_pname is not a valid pot"
promote-help
${EXIT} 1
fi
_jdset=${POT_ZFS_ROOT}/jails/$_pname
if ! _is_uid0 ; then
${EXIT} 1
fi
if [ "$( _get_conf_var $_pname pot.level )" = "0" ]; then
_error "The pot $_pname has level 0. Please promote the related base insted"
${EXIT} 1
fi
if [ "$( _get_conf_var $_pname pot.level )" = "1" ]; then
_origin=$( zfs get -H origin $_jdset/usr.local | awk '{ print $3 }' )
if [ "$_origin" != "-" ]; then
_info "Promoting $_jdset/usr.local (origin $_origin)"
zfs promote $_jdset/usr.local
fi
fi
_origin=$( zfs get -H origin $_jdset/custom | awk '{ print $3 }' )
if [ "$_origin" != "-" ]; then
_info "Promoting $_jdset/custom (origin $_origin)"
zfs promote $_jdset/custom
fi
}
| true |
a86ffdff8f2a990da8009a4da0837d88d894490f | Shell | thomasclc/testgit | /test_shell/test_ready_file_1820/rundmb | UTF-8 | 996 | 2.5625 | 3 | [] | no_license | #!/bin/sh
#set env
export LD_LIBRARY_PATH=/tmp/dmb/lib/
export CONFIG_PATH=/tmp/config/system
export DMB_DOWNLOAD_PATH=/root/dmb/download
export MG_CFG_PATH=/tmp/flashutils/res/minigui
export DMB_RES_PATH=/tmp/dmb/res
export DMB_WEB_PATH=/tmp/dmb/boa/www/cgi-bin
export DMB_WIRELESS_WIFI_PATH=/tmp/flashutils/wireless/wifi_network
export DMB_WIRELESS_3G_PATH=/tmp/flashutils/wireless/3g_network
#check sd card
umount /root
/tmp/dmb/program/runcheckdisk
#export TZ=UTC-08:00
if [ -f ${CONFIG_PATH}/TZ ]
then
TZ_INFO=`cat ${CONFIG_PATH}/TZ | awk '{print $1}'`
export TZ=${TZ_INFO}
echo ${TZ_INFO}
else
export TZ=UTC-08:00
fi
#start the boa
#killall -9 telnetd
cd /tmp/dmb/boa
./startboa
#set system time from rtc
cd /tmp/dmb/program
./rtc S
#load the driver
#killall -9 watchdog
./dmb_load_drv
#start the dmb main
#./dmb_main &
#save dmb_main pid
#DMB_MAIN_PID=$!
#start wardfork
#if [ -f wardfork ]
#then
# cp wardfork /tmp/
# cd /tmp
# ./wardfork ${DMB_MAIN_PID} &
#fi
| true |
7e5d9d82189cddf37f2d099d7d186d0a3ce939dd | Shell | deep-42-thought/archlinuxewe | /python-pyzbar/PKGBUILD | UTF-8 | 1,078 | 2.703125 | 3 | [] | no_license | # Contributor: Alex Zose <alexander[dot]zosimidis[at]gmail[dot]com>
# Contributor: Rafael Fontenelle <rafaelff@gnome.org>
# Maintainer: Erich Eckner <arch at eckner dot net>
pkgname=python-pyzbar
_pkgname=${pkgname#*-}
pkgver=0.1.8
pkgrel=1
pkgdesc="A ctypes-based wrapper around the zbar barcode reader"
arch=('any')
license=('MIT')
url="https://github.com/NaturalHistoryMuseum/$_pkgname/"
depends=('python-pillow' 'zbar')
makedepends=('python-setuptools')
source=("$_pkgname-$pkgver.tar.gz::$url/archive/v$pkgver.tar.gz")
sha256sums=('f51c82c2864f8e5a8d44f55853e027f8cbc592324b7afffa62100f2f9c54cbdb')
build() {
cd "$srcdir/$_pkgname-$pkgver"
python setup.py build
}
package() {
cd "$srcdir/$_pkgname-$pkgver"
python setup.py install --root="$pkgdir/" --optimize=1 --skip-build
install -Dm644 CHANGELOG.md "$pkgdir/usr/share/doc/$_pkgname/CHANGELOG.md"
install -Dm644 DEVELOPING.md "$pkgdir/usr/share/doc/$_pkgname/DEVELOPING.md"
install -Dm644 README.rst "$pkgdir/usr/share/doc/$_pkgname/README.rst"
install -Dm644 LICENSE.txt "$pkgdir/usr/share/licenses/$_pkgname/LICENSE.txt"
}
| true |
2b5688197672a69032822349117a1c696634c64c | Shell | artistech-inc/xcn-setup | /xcn/emane_scripts/set_location.sh | UTF-8 | 3,005 | 3.09375 | 3 | [] | no_license | #!/usr/bin/env bash
#
# Copyright (c) 2011-2018 Raytheon BBN Technologies Corp. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of Raytheon BBN Technologies nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# @author Will Dron <will.dron@raytheon.com>
dir=`ls -d /tmp/xcn.[0-9]*`
numdirs=`echo $dir | wc -w `
if [ $numdirs -ne 1 ]; then
echo "ERROR: Found $numdirs running XCN instances. This script only works with 1"
exit 1
fi
runid=`echo $dir |awk -F '.' '{print $2}'`
numnodes=`ls -d ${dir}/n[0-9]*-[0-9]* |wc -w`
info=`grep eventservicegroup ${dir}/emane_configs/eventservice.xml |grep -o '[0-9]*.[0-9]*.[0-9]*.[0-9]*:[0-9]*'`
mcaddr=`echo $info |awk -F ':' '{print $1}'`
eventport=`echo $info |awk -F ':' '{print $2}'`
controlport=`grep controlportendpoint $dir/emane_configs/platform1.xml | grep -o ":[0-9]*" |sed -e 's/://'`
if [ $# -eq 3 ]; then
#emaneevent-location -i lxcbr.${runid} -g ${mcaddr} -p ${eventport} $1 latitude=$2 longitude=$3 altitude=0.0
emaneevent-location -i xcn.${runid} -g ${mcaddr} -p ${eventport} $1 latitude=$2 longitude=$3 altitude=0.0
else
#localnodes=`ps -ef |grep -o 'lxc-execute -f n[0-9][0-9]*-[0-9][0-9]*' |awk '{print $NF}'`
localnodes=`docker container ls |grep n[0-9][0-9]*-[0-9][0-9]*|awk '{print $NF}'`
for node in $localnodes; do
#ipaddr=`grep ipv4 /tmp/xcn.${runid}/${node}/var/lxc.conf |awk -F '=' '{print $2}'`
ipaddr=`docker container inspect ${node} |grep IPv4Address |awk -F '"' '{print $4}'`
echo emanesh -p ${controlport} ${ipaddr} get table nems phy LocationEventInfoTable
emanesh -p ${controlport} ${ipaddr} get table nems phy LocationEventInfoTable
done
fi
| true |
b4672d623c0f962642bf520c94e609e5c8f73289 | Shell | audriusrudalevicius/dotfiles | /scripts/notes_dmenu | UTF-8 | 511 | 3.359375 | 3 | [] | no_license | #!/bin/zsh
# Script to lookup any kind of notes, listing by most often used ones
# requires dmenu compiled with XFT support for the font
source ${XDG_CONFIG_HOME:-$HOME/.config}/solarized/colors.ini
HIST="$HOME/.dmenu/notes_hist"
NOTE_PATH="$HOME/.notes"
FN="xft:Inconsolata:size=16"
[[ -d "$NOTE_PATH" ]] || {
mkdir $NOTE_PATH
}
NOTE=$(ls $NOTE_PATH | cat | dmenu -i -nb $base03 -nf $base0 -sb $base02 -sf $orange -fn "$FN" -p "Note:" -hf "$HIST")
if [ "$NOTE" ]; then
gvim "$NOTE_PATH/$NOTE"
fi
| true |
0e5709b44bbcd39b0bd099aea1fd60edaaf329f5 | Shell | mdchao2010/docs | /publish.sh | UTF-8 | 1,223 | 3.671875 | 4 | [] | no_license | #!/bin/bash
#defined default
TOMCAT_HOME="/usr/local/apache-tomcat-7.0.59"
TOMCAT_PORT=8080
PROJECT="$1"
#param validate
if [ "$2" != "" ]; then
TOMCAT_PORT=$2
fi
if [ "$3" != "" ]; then
TOMCAT_HOME="$3"
fi
#shutdown tomcat
TOMCAT_PROCESS=$(ps -aef | grep ${TOMCAT_HOME}/conf | grep -v grep |awk '{print $2}')
echo now process is $TOMCAT_PROCESS
kill $TOMCAT_PROCESS
echo kill tomcat process[$TOMCAT_PROCESS]
#check tomcat process
tomcat_pid=`/usr/sbin/lsof -n -P -t -i :$TOMCAT_PORT`
echo "current :" $tomcat_pid
while [ -n "$tomcat_pid" ]
do
sleep 5
tomcat_pid=`/usr/sbin/lsof -n -P -t -i :$TOMCAT_PORT`
echo "scan tomcat pid :" $tomcat_pid
done
#bak project
if [ -f ${TOMCAT_HOME}/webapps/${PROJECT}.war ]; then
BAK_DIR=${TOMCAT_HOME}/bak/$PROJECT/`date +%Y%m%d%H%M%S`
mkdir -p "$BAK_DIR"
mv ${TOMCAT_HOME}/webapps/$PROJECT.war "$BAK_DIR"/"$PROJECT"_`date +%Y%m%d%H%M%S`.war
echo "bak finished........"
fi
#remove previous ziped project
rm -rf ${TOMCAT_HOME}/webapps/${PROJECT}
#deploy to webapps dir
mv ${TOMCAT_HOME}"/"${PROJECT}.war ${TOMCAT_HOME}/webapps/$PROJECT.war
#start tomcat
"$TOMCAT_HOME"/bin/startup.sh
#operate log
echo "tail -f ${TOMCAT_HOME}/logs/catalina.out to see log info"
| true |
08119ce45c47c212478d82efa68994a75ea7e2a0 | Shell | gurubhai/OSautomate | /common_services.sh | UTF-8 | 1,329 | 3.5 | 4 | [] | no_license | #!/bin/bash
# Check if openstack configuration file is present, if yes then source it.
if [[ -f osconfig.conf ]]
then
. osconfig.conf
else
echo "Configuration file not found. Please create osconfig.conf"
exit 1
fi
configure_cloud_archive(){
sudo rm -f /etc/apt/sources.list.d/cloud.list
sudo touch /etc/apt/sources.list.d/cloud.list
echo "deb http://ubuntu-cloud.archive.canonical.com/ubuntu precise-updates/folsom main" | sudo tee -a /etc/apt/sources.list.d/cloud.list
sudo apt-get -y install ubuntu-cloud-keyring
echo "Now updating and upgrading...Please have a cup coffee"
sudo apt-get -y update
sudo apt-get -y upgrade
}
install_basic_packages(){
echo "Now installing vlan bridge-utils ntp python-mysqldb"
sudo apt-get -y install vlan bridge-utils ntp python-mysqldb
}
enable_ip_forwarding() {
sudo sed -i /'s/^#net.ipv4.ip_forward=1/net.ipv4.ip_forward=1/g/' /etc/sysctl.conf
sudo sysctl net.ipv4.ip_forward=1
}
configure_ntp(){
echo "***************************"
echo "***** CONFIGURING NTP *****"
echo "***************************"
sudo sed -i /'s/server ntp.ubuntu.com/server ntp.ubuntu.com\nserver 127.127.1.0\nfudge 127.127.1.0 stratum 10/g/' /etc/ntp.conf
sudo service ntp restart
}
# Main
configure_cloud_archive
install_basic_packages
enable_ip_forwarding
configure_ntp
| true |
49a1a21756f58eeef33dac44df0902821405d6f3 | Shell | MW-autocat-script/fr-MW-autocat-script | /catscripts/Entertainment/Video_games/Role-playing_video_games/Final_Fantasy_series/FinalFantasy.sh | UTF-8 | 3,905 | 2.96875 | 3 | [] | no_license | #!/bin/bash
egrep -i 'Final(| )Fantasy(| )(1\b|I\b)|Final Fantasy series' newpages.txt >> FinalFantasyseries.txt #There isn't a category for the first game yet
egrep -i 'Final Fantasy (2|II\b)' newpages.txt >> FinalFantasy2.txt
egrep -i 'Final Fantasy (3|III\b)' newpages.txt >> FinalFantasy3.txt
egrep -i 'Final Fantasy (4|IV\b)|\bFF(| )IV\b' newpages.txt >> FinalFantasy4.txt
egrep -i 'Final Fantasy (5|\bV\b)|\bFF(| )V\b' newpages.txt >> FinalFantasy5.txt
egrep -i 'Final Fantasy (6|\bVI\b)|\bFF(| )VI\b' newpages.txt >> FinalFantasy6.txt
egrep -i 'Final(| )Fantasy(| )(7|\bVII\b)|\bFF(| )VII\b' newpages.txt | egrep -iv 'Before Crisis|Crisis Core|Dirge of Cerberus|Advent Children' >> FinalFantasy7.txt
egrep -i 'Advent(| )Children' newpages.txt >> AdventChildren.txt
egrep -i 'Final Fantasy (8|\bVIII\b)|\bFF(| )VIII\b' newpages.txt >> FinalFantasy8.txt
egrep -i 'Final Fantasy (9|\bIX\b)|\bFF(| )IX\b' newpages.txt >> FinalFantasy9.txt
egrep -i 'Final Fantasy (10|\bX\b)|\bFF(| )X\b' newpages.txt | egrep -iv 'Final Fantasy X-2|FF(| )X-2' >> FinalFantasy10.txt
egrep -i 'Final Fantasy (10-2|\bX-2\b)|\bFF(| )X-2\b' newpages.txt >> FinalFantasyX-2.txt
egrep -i 'Final Fantasy (11|\bXI\b)|\bFF(| )(XI|11)\b' newpages.txt >> FinalFantasy11.txt
egrep -i 'Final Fantasy (12|\bXII\b)|\bFF(| )(12|XII)\b' newpages.txt >> FinalFantasy12.txt
egrep -i 'Final(| )Fantasy(| )(13|\bXIII\b)|\bFF(| )(13|XIII\b)' newpages.txt >> FinalFantasy13.txt
FFSERIES=`stat --print=%s FinalFantasyseries.txt`
FF2=`stat --print=%s FinalFantasy2.txt`
FF3=`stat --print=%s FinalFantasy3.txt`
FF4=`stat --print=%s FinalFantasy4.txt`
FF5=`stat --print=%s FinalFantasy5.txt`
FF6=`stat --print=%s FinalFantasy6.txt`
FF7=`stat --print=%s FinalFantasy7.txt`
FF8=`stat --print=%s FinalFantasy8.txt`
FF9=`stat --print=%s FinalFantasy9.txt`
FF10=`stat --print=%s FinalFantasy10.txt`
FFX2=`stat --print=%s FinalFantasyX-2.txt`
FF11=`stat --print=%s FinalFantasy11.txt`
FF12=`stat --print=%s FinalFantasy12.txt`
FF13=`stat --print=%s FinalFantasy13.txt`
ADVENT=`stat --print=%s AdventChildren.txt`
if [ $FFSERIES -ne 0 ];
then
export CATFILE="FinalFantasyseries.txt"
export CATNAME="Final Fantasy series"
$CATEGORIZE
fi
if [ $FF2 -ne 0 ];
then
export CATFILE="FinalFantasy2.txt"
export CATNAME="Final Fantasy II"
$CATEGORIZE
fi
if [ $FF3 -ne 0 ];
then
export CATFILE="FinalFantasy3.txt"
export CATNAME="Final Fantasy III"
$CATEGORIZE
fi
if [ $FF4 -ne 0 ];
then
export CATFILE="FinalFantasy4.txt"
export CATNAME="Final Fantasy IV"
$CATEGORIZE
fi
if [ $FF5 -ne 0 ];
then
export CATFILE="FinalFantasy5.txt"
export CATNAME="Final Fantasy V"
$CATEGORIZE
fi
if [ $FF6 -ne 0 ];
then
export CATFILE="FinalFantasy6.txt"
export CATNAME="Final Fantasy VI"
$CATEGORIZE
fi
if [ $FF7 -ne 0 ];
then
export CATFILE="FinalFantasy7.txt"
export CATNAME="Final Fantasy VII"
$CATEGORIZE
fi
if [ $FF8 -ne 0 ];
then
export CATFILE="FinalFantasy9.txt"
export CATNAME="Final Fantasy IX"
$CATEGORIZE
fi
if [ $FF9 -ne 0 ];
then
export CATFILE="FinalFantasy9.txt"
export CATNAME="Final Fantasy IX"
$CATEGORIZE
fi
if [ $FF10 -ne 0 ];
then
export CATFILE="FinalFantasy10.txt"
export CATNAME="Final Fantasy X"
$CATEGORIZE
fi
if [ $FFX2 -ne 0 ];
then
export CATFILE="FinalFantasyX-2.txt"
export CATNAME="Final Fantasy X-2"
$CATEGORIZE
fi
if [ $FF11 -ne 0 ];
then
export CATFILE="FinalFantasy11.txt"
export CATNAME="Final Fantasy XI"
$CATEGORIZE
fi
if [ $FF12 -ne 0 ];
then
export CATFILE="FinalFantasy12.txt"
export CATNAME="Final Fantasy XII"
$CATEGORIZE
fi
if [ $FF13 -ne 0 ];
then
export CATFILE="FinalFantasy13.txt"
export CATNAME="Final Fantasy XIII"
$CATEGORIZE
fi
if [ $ADVENT -ne 0 ];
then
export CATFILE="AdventChildren.txt"
export CATNAME="Final Fantasy VII: Advent Children"
$CATEGORIZE
fi
rm FinalFantasy*.txt #Screw it :)
rm AdventChildren.txt | true |
49883d268e14297efd818348171cd3b406eb3bfe | Shell | ThomasYeoLab/CBIG | /stable_projects/brain_parcellation/Schaefer2018_LocalGlobal/Parcellations/Code/CBIG_gwMRF_copy_fs_average.sh | UTF-8 | 639 | 2.5625 | 3 | [
"MIT"
] | permissive | ## Script copies basic freesurfer files
##Written by Alexander Schaefer and CBIG under MIT license: https://github.com/ThomasYeoLab/CBIG/blob/master/LICENSE.md
for i in fsaverage fsaverage5 fsaverage6;
do mkdir ../FreeSurfer/$i/label/
mkdir ../FreeSurfer/$i/surf/
rsync -az $FREESURFER_HOME/subjects/${i}/label/*cortex.label ../FreeSurfer/$i/label/
rsync -az $FREESURFER_HOME/subjects/${i}/label/*Medial*.label ../FreeSurfer/$i/label/
rsync -az $FREESURFER_HOME/subjects/${i}/label/*aparc* ../FreeSurfer/$i/label/
for j in white orig pial inflated curv sulc;
do
rsync -az $FREESURFER_HOME/subjects/${i}/surf/*${j} ../FreeSurfer/$i/surf/
done;
done;
| true |
34c52dd242af9b221c1d098afb8589d53568774e | Shell | ondevice/ondevice | /build/deb/debian/ondevice-daemon.preinst | UTF-8 | 1,475 | 3.734375 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/sh
# preinst script for ponysay
#
# see: dh_installdeb(1)
set -e
# summary of how this script can be called:
# * <new-preinst> `install'
# * <new-preinst> `install' <old-version>
# * <new-preinst> `upgrade' <old-version>
# * <old-preinst> `abort-upgrade' <new-version>
# for details, see http://www.debian.org/doc/debian-policy/ or
# the debian-policy package
case "$1" in
install|upgrade)
# continue after the esac
;;
abort-upgrade)
exit 0
;;
*)
echo "preinst called with unknown argument \`$1'" >&2
exit 1
;;
esac
# create user and set up home
getent group ondevice >/dev/null 2>&1 || \
groupadd --system ondevice
id ondevice >/dev/null 2>&1 || \
useradd --system --home /var/lib/ondevice/ \
--gid ondevice ondevice
# setup files and permissions
mkdir -p /var/lib/ondevice/
mkdir -p /var/log/ondevice/
chown -R ondevice:ondevice /var/lib/ondevice/ /var/log/ondevice/
#
# backwards compatibility fix (2017-01-28):
# update home directory and move old ondevice.conf
if getent passwd ondevice|cut -d: -f6|grep -q /usr/share/ondevice; then
usermod --home /var/lib/ondevice/ ondevice
fi
if [ ! -e /var/lib/ondevice/ondevice.conf ]; then
if [ -e /usr/share/ondevice/ondevice.conf ]; then
mv /usr/share/ondevice/ondevice.conf /var/lib/ondevice/ondevice.conf
fi
fi
# //end fix
# dh_installdeb will replace this with shell code automatically
# generated by other debhelper scripts.
#DEBHELPER#
exit 0
| true |
6a7fba75da5cdfff6965d335abd499357a1e2afe | Shell | Newercito/MgToLs | /.RansomWAREM/Ransomware.sh | UTF-8 | 854 | 3 | 3 | [
"MIT"
] | permissive | clear
echo "Creador de ransomware"
echo " "
echo "by NePtYx"
echo " "
echo "Instrucciones:"
echo "1)Coloca el nombre de tu ransomware"
echo "2)Coloca el mensaje de tu ransomware"
echo "3)Introduce tu nombre de cracker o hacker"
echo "4)Y listo ransomware creado"
echo "ADVERTENCIA:Si no introduces tus datos bien"
echo "los datos en el ransomware apareceran con signos de"
echo "interrogacion."
echo " "
echo "Introduce el nombre de tu ransomware:"
echo " "
read input
echo " "
echo "Introduce el mensaje que dira tu ransomware:"
echo " "
read input1
echo " "
echo "Introduce tu nombre de cracker o hacker:"
echo " "
read input2
echo " "
clear
echo "Creador de ransomware"
echo " "
echo "by NePtYx"
echo " "
echo "Generando $input1..."
mkdir Creados
cp 1 $input.bat
mv $input.bat Creados
echo " "
echo "$input creado exitosamente..."
echo "Saliendo..."
| true |
b8f27cd97f1ba306b46b4af2181c67e19c662a0f | Shell | xtecuan/myapi-sample | /sample-db/run.sh | UTF-8 | 1,207 | 3.03125 | 3 | [] | no_license | #!/bin/ash
source ./env.sh
export DOCKER_SHARES=/opt/docker_shares
export PGDATA=$DOCKER_SHARES/data
export PORT=5432
export PASS="Welcome123\$"
export CONTAINER=$CONTAINER
export IMAGE=$IMAGE
#NFS SHARE
export NFS_VOL_NAME=mypgdata
export NFS_LOCAL_MNT=/var/lib/postgresql/data/pgdata
export NFS_SERVER="192.168.0.100"
export NFS_SHARE=/pgdata
export NFS_OPTS=vers=3,soft
docker run --privileged=true --restart unless-stopped -d \
--name $CONTAINER \
-e POSTGRES_PASSWORD=$PASS \
-e PGDATA=/var/lib/postgresql/data \
-v $PGDATA:/var/lib/postgresql/data \
-p $PORT:5432 \
$IMAGE
#docker run --privileged=true --restart unless-stopped -d \
# --name $CONTAINER \
# -e POSTGRES_PASSWORD=$PASS \
# -e PGDATA=/var/lib/postgresql/data/pgdata \
# -v $NFS_VOL_NAME:$NFS_LOCAL_MNT \
# -p $PORT:5432 \
# $IMAGE
#docker run --privileged=true --restart unless-stopped -d \
#--name $CONTAINER \
#-e POSTGRES_PASSWORD=$PASS \
#-e PGDATA=/var/lib/postgresql/data/pgdata \
#--mount "src=$NFS_VOL_NAME,dst=$NFS_LOCAL_MNT,volume-opt=device=:$NFS_SHARE,\"volume-opt=o=addr=$NFS_SERVER,$NFS_OPTS\",type=volume,volume-driver=local,volume-opt=type=nfs" \
#-p $PORT:5432 \
#$IMAGE
| true |
01341f1dbd2abdad684ea2365e417340294c9734 | Shell | PraneshUlleri/Bash-Scripting | /labsheet1/q20b.sh | UTF-8 | 85 | 2.640625 | 3 | [] | no_license | #!/bin/bash
usr=`whoami`
path=$1
path="/home/$usr$path"
grep -o " " $path | wc -l
| true |
a6a0e0b2362cccdc468bb5722affe01dde312199 | Shell | sevaho/archiving-system-school-project | /fileserver/scripts/diskSpaceChecker.cron | UTF-8 | 764 | 3.421875 | 3 | [] | no_license | #!/bin/sh
#cron each week
df -H "/" | awk '{ print $5 " " $1 }' | grep "/" | while read output;
do
echo $output
usep=`df -H "/" | awk '{ print $5 " " $1 }' | grep "/" | cut -d " " -f1 | tr -d "%"`
partition=$(echo $output | awk '{ print $2 }' )
if [ $usep -ge 90 ]; then
echo "Running out of space check $partition \ $output"
# echo "Running out of space \"$partition ($usep%)\" on $(hostname) as on $(date)" |
# mail -s "Alert: Almost out of disk space $usep%" you@somewhere.com
elif [ $usep -ge 95 ]
then
while [ $usep -ge 95 ]
do
filetoremove=`find /home/vagrant/files -type f -printf '%T+ %p\n' | sort | head -n 1 | cut -d " " -f2`
rm -rf $filetoremove
usep=`df -H "/" | awk '{ print $5 " " $1 }' | grep "/" | cut -d " " -f1 | tr -d "%"`
done
fi
done
| true |
9d1879492774c0cf9c8993c6f6920bc55b4df4f0 | Shell | wqx081/mpr_mq | /third_party/epubtools/gen_mk.sh | UTF-8 | 610 | 3.390625 | 3 | [] | no_license | #!/bin/bash
# Author: wang qixiang (wangqx@mpreader.com)
if [ -z "$1" ]; then
INPUT_DIR="."
else
INPUT_DIR=$1
fi
if [ -z "$2" ]; then
OUTPUT_MF="Makefile"
else
OUTPUT_MF=$2
fi
#echo "$INPUT_DIR : $OUTPUT_MF"
echo "Starting."
CPP_SOURCES=`find $INPUT_DIR -regex '.*\.\(cpp\|cc\)'`
C_SOURCES=`find $INPUT_DIR -regex '.*\.\(c\)'`
HEADERS=`find $INPUT_DIR -regex '.*\.\(h\|hpp\)'`
echo "$CPP_SOURCES"
echo "================="
echo "$C_SOURCES"
echo "================="
echo "$HEADERS"
echo "+++++++++++++"
#makefile template
cat << EOF
APP=@APP
CXXFLAGS=@CXXFLAGS
LIB_FILES=@LIB_FILES
EOF
echo "Done."
| true |
d1e4342c6de9eea1aa5c129f3d18f690c30c1e56 | Shell | katie-jones/nextcloud | /entrypoint-with-init.sh | UTF-8 | 2,624 | 3.25 | 3 | [] | no_license | #!/bin/sh
# Patch /entrypoint.sh
patch /entrypoint.sh << EOM
@@ -186,6 +186,40 @@
if [ "$try" -gt "$max_retries" ]; then
echo "Installing of nextcloud failed!"
exit 1
+ else
+ echo "Running post-install user initialization"
+
+ # Install and enable encryption apps.
+ run_as "php /var/www/html/occ app:enable encryption"
+ run_as "php /var/www/html/occ encryption:enable"
+ run_as "php /var/www/html/occ app:install end_to_end_encryption"
+
+ # Disable encrypted home storage.
+ run_as "php /var/www/html/occ config:app:set encryption encryptHomeStorage --value 0"
+
+ # Install and enable 2FA app.
+ run_as "php /var/www/html/occ app:install twofactor_totp"
+
+ # Enable external storage and create mounts.
+ run_as "php /var/www/html/occ app:enable files_external"
+ if [ -f /opt/nextcloud/custom-mounts.json ]; then
+ echo "Mounting custom external storage"
+ run_as "php /var/www/html/occ files_external:import /opt/nextcloud/custom-mounts.json" || echo "Custom mounts failed"
+ fi
+
+ # Auto-create users if provided.
+ for f in \$(seq 1 100); do
+ username_var="AUTOCREATE_USERNAME\$f"
+ password_var="AUTOCREATE_PASSWORD\$f"
+ username=\$(eval echo \\\${\$username_var:-})
+ password=\$(eval echo \\\${\$password_var:-})
+ if [ -n "\${username}" ] && [ -n "\${password}" ]; then
+ echo "Creating user \${username}"
+ export OC_PASS=\${password}
+ run_as "php /var/www/html/occ user:add --password-from-env --group=\"users\" \${username}"
+ fi
+ done
+ echo "Finished post-install user initialization"
fi
if [ -n "${NEXTCLOUD_TRUSTED_DOMAINS+x}" ]; then
echo "Setting trusted domains…"
EOM
# Run /entrypoint.sh
/entrypoint.sh $@
| true |
96a443f8463d34278e09e52b7fc1889c7f173da4 | Shell | mildred/dops | /bin/do-passwd | UTF-8 | 2,040 | 4.03125 | 4 | [] | no_license | #!/bin/bash
: ${DOPS_BIN_DIR:="$(dirname "$(readlink -f "$0")")"}
: ${DOPS_DIR:="$(cd "$DOPS_BIN_DIR/.."; pwd)"}
. "$DOPS_DIR/dopsh_functions.sh"
dopsh-init "$0" "$@"
dopsh-parseopt "H:help command" "$@" || exit 1
has_user(){
cut -d: -f1 /etc/passwd | fgrep -x "$1" >/dev/null
}
has_group(){
cut -d: -f1 /etc/group | fgrep -x "$1" >/dev/null
}
declare_group(){
local op_group op_gid op
dopsh-parseopt "H:help -gid= group" "$@" || return 1
if ! has_group "$op_group"; then
if has groupadd; then
op=()
[[ -n "$op_gid" ]] && op+=(--gid "$op_gid")
( set -x; groupadd ${op[@]} "$op_group" )
return $?
else
fail "Cannot create group $op_group: Unknown platform"
fi
fi
}
declare_user(){
local op_uid op_name op_groups op_login_group op_shell op_home op_user
dopsh-parseopt "H:help -uid= -name= -groups= -login-group= -shell= -home= user" "${opts[@]}" || exit 1
: ${op_login_group:="$op_user"}
if has useradd && has usermod; then
local op
if ! has_user "$op_user"; then
op=(useradd --create-home -g "$op_login_group")
else
op=(usermod --move-home -g "$op_login_group")
fi
[ -n "$op_home" ] && op+=(--home "$op_home")
[ -n "$op_shell" ] && op+=(--shell "$op_shell")
[ -n "$op_uid" ] && op+=(--uid "$op_uid")
[ -n "$op_name" ] && op+=(--comment "$op_name")
[ -n "$op_name" ] && op+=(--comment "$op_name")
[ -n "$op_groups" ] && op+=(--groups "$op_groups")
set -e
declare_group "$op_login_group"
( set -x; "${op[@]}" "$op_user" )
else
fail "Cannot create user $op_user: Unknown platform"
fi
if [ -n "$op_home" ] && ! [ -e "$op_home" ]; then
( set -x; install -o "$op_user" -g "$op_login_group" -m 0711 -d "$op_home" )
fi
}
case $op_command in
declare-user)
declare_user "${opts[@]}"
exit $?
;;
declare-group)
declare_group "${opts[@]}"
exit $?
;;
*)
fail "Invalid command $op_command (must be one of: declare-user declare-group)"
;;
esac
| true |
f693a4fb8791c6b9ed646586d4f223e77a0f690a | Shell | zeus1292/kamikaze | /adding | UTF-8 | 141 | 2.953125 | 3 | [] | no_license | #!/bin/bash
#Author - Akshay Kumar
#This is a great comment
sum=0
#Comments in bash command
for arg in $@
do
sum=$((arg+sum))
done
echo $sum
| true |
d2e77bde06f5a19743d838e5549cc87ff1d3b803 | Shell | tfuruya/dotfiles | /.vscode/extensions/ms-vscode.csharp-1.7.0/bin/run | UTF-8 | 783 | 3.71875 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
BIN_DIR="$(cd "$(dirname "$0")" && pwd -P)"
PLATFORM_SUFFIX=""
ARCH_SUFFIX=""
case `uname` in
"Darwin") PLATFORM_SUFFIX=".osx" ;;
"Linux")
PLATFORM_SUFFIX=".linux"
case `uname -m` in
"x86" | "i386" | "i686") ARCH_SUFFIX="-x86" ;;
"x86_64") ARCH_SUFFIX="-x86_64" ;;
esac
;;
esac
MONO_CMD=${BIN_DIR}/mono${PLATFORM_SUFFIX}${ARCH_SUFFIX}
# If we don't have a mono binary from the platform, try a globally-installed one
if [ ! -e "${MONO_CMD}" ] ; then
MONO_CMD="mono"
fi
export MONO_PATH=${BIN_DIR}/framework:${BIN_DIR}/omnisharp
export MONO_CFG_DIR=${BIN_DIR}/etc
MONO_CMD="${MONO_CMD} --config ${BIN_DIR}/etc/config${PLATFORM_SUFFIX}"
${MONO_CMD} ${BIN_DIR}/omnisharp/OmniSharp.exe "$@"
| true |
2b3de851c54430cb228e4d4cf58d6cb5e3cf6273 | Shell | CustomROMs/android_vendor | /st-ericsson/validation/hardware/tat/tat/tatliqtuning/DthMountForIqt | UTF-8 | 905 | 2.9375 | 3 | [] | no_license | #!/bin/sh
case $1 in
start)
if [ -n "`ps | grep MmteOutputTsk | grep -v grep`" ]
then
echo "remove the previous task MmteOutputTsk"
pkill MmteOutputTsk
fi
if [ -z "`ps | grep sl3d | grep -v grep`" ]
then
echo "run sl3d"
sl3d&
fi
sleep 8
sl3d_detect=`ps | grep "/usr/bin/dth9pserver -s -p 1024" | grep -v grep`
sl3d_count=4
while [ -z "$sl3d_detect" ] && [ "$sl3d_count" != "0" ]
do
sleep 1
sl3d_detect=`ps | grep "/usr/bin/dth9pserver -s -p 1024" | grep -v grep`
echo -n "."
sl3d_count=`expr $sl3d_count - 1`
done
if [ "$sl3d_count" == "0" ]
then
echo -e "\r\033[31m FAIL to run dth9pserver!!!\033[0m\n"
else
if [ -z "`mount | grep dth`" ]
then
echo "mount dth"
mount -t 9p -oport=1024 127.0.0.1 /mnt/dth
fi
chmod +x /usr/bin/dthfilter
fi
;;
*)
echo -e "FullAdc start <n>\n\r n: tempo in seconds\n"
;;
esac
| true |
3d99ea8cf030ceab4c896a9d99b651e0c2bf3bd2 | Shell | s0la/orw | /scripts/offset_tiled_windows.sh | UTF-8 | 6,893 | 3.515625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#read x_border y_border x y <<< $(~/.orw/scripts/windowctl.sh -p | cut -d ' ' -f 1-4)
#read display display_x display_y width height <<< \
# $(~/.orw/scripts/get_display.sh $x $y | cut -d ' ' -f 1-5)
orw_config=~/.config/orw/config
#read border offset <<< \ $(awk '/^'$1'_(border|offset)/ { print $NF }' $orw_config | xargs)
#read {x,y}_border {x,y}_offset <<< \
# $(awk '/^[xy]_(border|offset)/ { print $NF }' $orw_config | xargs)
read {x,y}_border {x,y}_offset is_offset limiter_index <<< $(awk '
$1 ~ "^(([xy]_)?(border|offset))$" { print $NF }
$1 == "orientation" { print ($NF ~ "^h") ? 2 : 3 }' $orw_config | xargs)
y_border=$(((y_border - x_border / 2) * 2))
[[ $is_offset == true ]] && eval $(grep offset ~/.config/orw/offsets)
#[[ $is_offset == true ]] && offset=$(awk -F '=' '/'$1'_offset/ { print $NF }' ~/.config/orw/offsets)
current_desktop=$(xdotool get_desktop)
eval windows=( $(wmctrl -lG | awk '$NF != "DROPDOWN" {
w = "\"" $1 " " $3 " " $4 " " $5 " " $6 "\""
if($2 == '$current_desktop') cd = cd "\n" w
else od = od "\n" w
} END {
print substr(cd, 2)
print substr(od, 2)
}') )
#eval windows=( $(wmctrl -lG | awk '$NF != "DROPDOWN" { print "\"" $1, $3, $4, $5, $6 "\"" }') )
list_windows() {
for window in "${windows[@]}"; do
echo "$window"
done
}
offset_direction() {
[[ $1 == x ]] && index=3 || index=4
sign=${2%%[0-9]*}
value=${2#"$sign"}
#[[ $value ~= ',' ]] && multiple_values="${value//,/ }"
[[ $value =~ ',' ]] && values="${value//,/ }"
eval offset=\${$1_offset}
if [[ $sign ]]; then
[[ $sign == + ]] && opposite_sign=- || opposite_sign=+
else
echo -e "No sign specified, exiting..\nPlease prefix value with the sign next time!"
exit
fi
while read index properties; do
windows[index]="$properties"
done <<< $(while read display_start display_end limit_start limit_end top_offset bottom_offset; do
#list_windows | sort -nk $index,$index | awk '\
list_windows | awk '\
BEGIN {
i = '$index' - 1
li = '$limiter_index'
xb = '$x_border'
yb = '$y_border'
b = '$1'b
o = '$offset'
ls = '$limit_start'
le = '$limit_end'
to = '${top_offset:-0}'
bo = '${bottom_offset:-0}'
ds = '$display_start' + o + to
de = '$display_end' - o - bo
#system("~/.orw/scripts/notify.sh \"'"$values"'\"")
if("'"$values"'") split("'"$values"'", vs)
else v = "'$value'"
} {
$2 -= xb
$3 -= yb
ws = $i
we = ws + $(i + 2)
wls = $li
wle = wls + $(li + 2)
if(wls >= ls && wle <= le) {
if(v) {
if(ws == ds) {
$(i + 2) '$opposite_sign'= v
$i '$sign'= v
}
if(we + b == de) $(i + 2) '$opposite_sign'= v
} else {
for(vi in vs) {
cv = vs[vi]
#if(ws '$sign' cv == ds) {
# $(i + 2) '$opposite_sign'= v
# $i '$sign'= v
#}
if(ws '$opposite_sign' cv == ds) {
$i '$opposite_sign'= cv
$(i + 2) '$sign'= cv
}
if(we + b '$sign' cv == de) $(i + 2) '$sign'= cv
}
}
$2 += xb
$3 += yb
print NR - 1, $0
}
}'
#| while read index properties; do
# #wmctrl -ir $id -e 0,${props// /,} &
# #echo $1 $index $id ${props// /,}
# #echo $1 $index $start $dimension
# #echo "$properties"
# offset_windows[$index]="$properties"
# done
done <<< $(awk -F '[_ ]' '/^display_[0-9]+_(xy|size|offset)/ {
if($3 == "xy") {
de = $('$index' + 1)
le = $('$limiter_index' + 2)
} else if($3 == "size") {
de = de " " $('$index' + 1)
le = le " " $('$limiter_index' + 2)
} else {
if("'$1'" == "y") bo = $(NF - 1) " " $NF
print de, le, bo
}
}' $orw_config))
}
while getopts :x:y: direction; do
offset_direction $direction $OPTARG
done
#wmctrl -k on
for win in "${windows[@]}"; do
read id x y w h <<< "$win"
wmctrl -ir $id -e 0,$((x - x_border)),$((y - y_border)),$w,$h
done
#wmctrl -k off
exit
#sign=$2
#value=$3
#[[ $2 =~ ^[+-] ]] && sign=${2:0:1} value
[[ $1 == x ]] && index=3 || index=4
sign=${2%%[0-9]*}
value=${2#"$sign"}
if [[ $sign ]]; then
[[ $sign == + ]] && opposite_sign=- || opposite_sign=+
else
echo -e "No sign specified, exiting..\nPlease prefix value with the sign next time!"
exit
fi
offset_direction y
offset_direction x
exit
#list_windows | sort -nk $index,$index | awk '{ print $1, $2, $3, $4, $5 }'
#exit
#while read display display_start display_end; do
while read display_start display_end limit_start limit_end top_offset bottom_offset; do
#if [[ $1 == y ]]; then
# while read name position bar_x bar_y bar_widht bar_height adjustable_width frame; do
# if ((adjustable_width)); then
# read bar_width bar_height bar_x bar_y < ~/.config/orw/bar/geometries/$bar_name
# fi
# current_bar_height=$((bar_y + bar_height + frame))
# if ((position)); then
# ((current_bar_height > top_offset)) && top_offset=$current_bar_height
# else
# ((current_bar_height > bottom_offset)) && bottom_offset=$current_bar_height
# fi
# done <<< $(~/.orw/scripts/get_bar_info.sh $display)
#fi
#echo de $display_start $display_end
#echo mmp $min_point $max_point
#echo bo $top_offset $bottom_offset
#continue
list_windows | sort -nk $index,$index | awk '\
BEGIN {
v = '$value'
i = '$index' - 1
li = '$limiter_index'
xb = '$x_border'
yb = '$y_border'
myb = (yb - xb / 2) * 2
b = '$1'b
o = '$offset'
ls = '$limit_start'
le = '$limit_end'
to = '${top_offset:-0}'
bo = '${bottom_offset:-0}'
ds = '$display_start' + o + to
de = '$display_end' - o - bo
} {
$2 -= xb
$3 -= myb
ws = $i
we = ws + $(i + 2)
wls = $li
wle = wls + $(li + 2)
#if(ws >= ds && we <= de) {
if(wls >= ls && wle <= le) {
if(ws == ds) {
$(i + 2) '$opposite_sign'= v
$i '$sign'= v
}
if(we + b == de) $(i + 2) '$opposite_sign'= v
#system("~/.orw/scripts/notify.sh -t 22 \"" ws " " ds " " $0 "\"")
#system("wmctrl -ir " $1 " -e 0," $2 "," $3 "," $4 "," $5 " &")
#print $1, $2, $3, $4, $5
print
}
}' | while read id props; do
wmctrl -ir $id -e 0,${props// /,} &
done
done <<< $(awk -F '[_ ]' '/^display_[0-9]+_(xy|size|offset)/ {
if($3 == "xy") {
de = $('$index' + 1)
le = $('$limiter_index' + 2)
} else if($3 == "size") {
de = de " " $('$index' + 1)
le = le " " $('$limiter_index' + 2)
} else {
#if("'$1'" == "y") o = o " " $(NF - 1) " " $NF
if("'$1'" == "y") bo = $(NF - 1) " " $NF
print de, le, bo
}
#printf "\n"
#else if($3 == "size") e = $('$index' + 1)
#else print s, e, $(NF - 1), $NF
}' $orw_config)
#done <<< $(awk -F '[_ ]' '/^display_[0-9]+/ && $3 != "name" {
#done <<< $(awk -F '[_ ]' '/^display_[0-9]+_(xy|size)/ {
# if($3 == "xy") s = $('$index' + 1)
# else print $2, s, $('$index' + 1) }' $orw_config)
| true |
d30435dcea16584e4aabd5e3347bfd9da25a40b6 | Shell | ciora/Cristian_Buchhandel | /Buchandel/customization/execute.sh | UTF-8 | 2,143 | 3.484375 | 3 | [] | no_license | #!/bin/sh
# Usage: execute.sh [WildFly mode] [configuration file]
#
# The default mode is 'standalone' and default configuration is based on the
# mode. It can be 'standalone.xml' or 'domain.xml'.
echo "=> Executing Customization script"
JBOSS_HOME=/opt/jboss/wildfly
JBOSS_CLI=$JBOSS_HOME/bin/jboss-cli.sh
JBOSS_MODE=${1:-"standalone"}
JBOSS_CONFIG=${2:-"$JBOSS_MODE.xml"}
echo "Connection URL:" $CONNECTION_URL
echo "=> Hosts"
cat /etc/hosts
echo
echo "=> Umgebungsvariablen (env)"
env
function wait_for_server() {
until `$JBOSS_CLI -c ":read-attribute(name=server-state)" 2> /dev/null | grep -q running`; do
sleep 1
done
}
echo "=> Starting WildFly server"
$JBOSS_HOME/bin/$JBOSS_MODE.sh -b 0.0.0.0 -c $JBOSS_CONFIG > /dev/null &
# $JBOSS_HOME/bin/$JBOSS_MODE.sh -b 0.0.0.0 -c $JBOSS_CONFIG &
echo "=> Waiting for the server to boot"
wait_for_server
$JBOSS_CLI -c << EOF
batch
module add \
--name=org.apache.derby \
--resources=/opt/jboss/wildfly/customization/derbyclient.jar \
--resource-delimiter=, \
--dependencies=javax.api,javax.transaction.api
/subsystem=datasources/jdbc-driver=derby:add(driver-name=derby, \
driver-module-name=org.apache.derby, \
driver-class-name=org.apache.derby.jdbc.ClientDriver, \
driver-datasource-class-name=org.apache.derby.jdbc.ClientDataSource, \
driver-xa-datasource-class-name=org.apache.derby.jdbc.ClientXADataSource)
data-source add \
--name=dbDS \
--driver-name=derby \
--connection-url=jdbc:derby:${jboss.server.base.dir}/db;create=true \
--jndi-name=java:jboss/datasources/BookDb \
--user-name=app \
--password=app
# Execute the batch
run-batch
EOF
# Deploy the WAR
echo "=> Deploy the WAR"
cp /opt/jboss/wildfly/customization/Buchandel.war $JBOSS_HOME/$JBOSS_MODE/deployments/Buchandel.war
echo "=> Shutting down WildFly"
if [ "$JBOSS_MODE" = "standalone" ]; then
$JBOSS_CLI -c ":shutdown"
else
$JBOSS_CLI -c "/host=*:shutdown"
fi
echo "=> Restarting WildFly"
# $JBOSS_HOME/bin/$JBOSS_MODE.sh -b 0.0.0.0 -bmanagement 0.0.0.0 -c $JBOSS_CONFIG > /dev/null
$JBOSS_HOME/bin/$JBOSS_MODE.sh -b 0.0.0.0 -bmanagement 0.0.0.0 -c $JBOSS_CONFIG | true |
692172091927f6c071b153d3d61869ae50b30047 | Shell | progerjkd/linearalgebra | /benchmark.sh | UTF-8 | 647 | 3.03125 | 3 | [] | no_license | #!/bin/bash
# run as: ./benchmark.sh | tee log
rm input/*\.memory
for INPUT in `ls -Sr input/*_sym* | grep -v memory`; do
echo -e "\nInput file: ${INPUT}"
time ./runHHSimetrica3 ${INPUT} &
./meminfo.sh "${INPUT}.memory" >/dev/null &
PID=`pgrep HH`
while [ $PID ]; do
sleep 1
PID=`pgrep HH`
done
echo -e "\nInput file: ${INPUT}"
sleep 1
done
for INPUT in `ls -Sr input/*_asym* | grep -v memory`; do
echo -e "\nInput file: ${INPUT}"
time ./runHHAssimetrica3 ${INPUT}
./meminfo.sh "${INPUT}.memory" >/dev/null &
PID=`pgrep HH`
while [ $PID ]; do
sleep 1
PID=`pgrep HH`
done
echo -e "\nInput file: ${INPUT}"
sleep 1
done
| true |
9b2f56aaef731f673160618ee177b5ffc78e816a | Shell | lindsaymarkward/app-twitter | /install.sh | UTF-8 | 464 | 2.96875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
APPNAME="app-twitter"
VERSION="0.1.0"
LOCATION="apps"
FILENAME=${APPNAME}.tar.gz
echo "This script will download, install and run ${APPNAME}"
#sudo with-rw bash
cd /sphere/user-autostart/${LOCATION}
eval wget https://github.com/lindsaymarkward/${APPNAME}/releases/download/${VERSION}/${FILENAME}
mkdir ${APPNAME}
tar -xf ${FILENAME} -C ${APPNAME}
rm ${FILENAME}
nservice -q ${APPNAME} start
echo "Done... ${APPNAME} installed - hopefully :-)" | true |
74dc8126342c0a3f16d5e3af424937da59f2e7d5 | Shell | DevOpsTechy/Bash-Monitoring-Scripts | /remote-mysql-check.sh | UTF-8 | 203 | 3.078125 | 3 | [] | no_license | #!/bin/bash
nc -v -z localhost 3306 > /root/mysqlcheck 2>&1
UP=$(cat /root/mysqlcheck | grep succeeded | wc -l)
if [ "$UP" -ne 1 ];
then
echo "Mysql Is UnReachable"
else
echo "MySQL is Reachable"
fi
| true |
c153c5f2befb1c0c5148bd19b0bd46cf54674d7d | Shell | bcowgill/bsac-linux-cfg | /bin/cfg/debug-githook/fsmonitor-watchman | UTF-8 | 2,431 | 3.640625 | 4 | [] | no_license | #!/bin/sh
PRE="`date` fsmonitor-watchman:"
LOG=~/githook.log
echo === $PRE $0 entered ========================= >> $LOG
echo $PRE HOSTNAME: $HOSTNAME TYPE: $MACHTYPE $HOSTTYPE $OSTYPE LOGNAME: $LOGNAME USER: $USER HOME: $HOME >> $LOG
echo $PRE DISPLAY: $DISPLAY TERM: $TERM LANG: $LANG TZ: $TZ PWD: $PWD >> $LOG
echo $PRE SHLVL: $SHLVL -: $- UID: $UID EUID: $EUID PPID: $PPID WINDOWID: $WINDOWID COL: $COLUMNS LINES: $LINES $BASH $BASH_VERSION >> $LOG
echo $PRE EDITOR: $EDITOR >> $LOG
echo $PRE PATH: $PATH >> $LOG
echo $PRE GIT_DIR: $GIT_DIR >> $LOG
echo $PRE GIT_EDITOR: $GIT_EDITOR >> $LOG
echo $PRE core.hooksPath: `git config core.hooksPath` >> $LOG
echo $PRE core.fsmonitor: `git config core.fsmonitor` >> $LOG
if echo $SSH_ASKPASS | grep git-gui > /dev/null; then
echo $PRE git gui: yes >> $LOG
else
echo $PRE git gui: no >> $LOG
fi
echo $PRE Args: $* >> $LOG
echo $PRE Arg1: $1 >> $LOG
echo $PRE Arg2: $2 >> $LOG
echo $PRE Arg3: $3 >> $LOG
echo $PRE Arg4: $4 >> $LOG
echo $PRE Arg5: $5 >> $LOG
echo $PRE Arg6: $6 >> $LOG
echo $PRE Arg7: $7 >> $LOG
echo $PRE Arg8: $8 >> $LOG
echo $PRE Arg9: $9 >> $LOG
git status >> $LOG
unset PRE
set >> $LOG
echo --------------------------------------------- >> $LOG
# https://git-scm.com/docs/githooks
#
# This hook is invoked when the configuration option core.fsmonitor is set to .git/hooks/fsmonitor-watchman. It takes two arguments, a version (currently 1) and the time in elapsed nanoseconds since midnight, January 1, 1970.
#
# The hook should output to stdout the list of all files in the working directory that may have changed since the requested time. The logic should be inclusive so that it does not miss any potential changes. The paths should be relative to the root of the working directory and be separated by a single NUL.
#
# It is OK to include files which have not actually changed. All changes including newly-created and deleted files should be included. When files are renamed, both the old and the new name should be included.
#
# Git will limit what files it checks for changes as well as which directories are checked for untracked files based on the path names given.
#
# An optimized way to tell git "all files have changed" is to return the filename /.
#
# The exit status determines whether git will use the data from the hook to limit its search. On error, it will fall back to verifying all files and folders.
VERSION="$1"
ELAPSED_NANOSECS="$2"
echo /
| true |
53a09a3c3fd550da99160eace63fad1061dd4050 | Shell | divein/letian_homestead | /letian/shell/initial-setup.sh | UTF-8 | 2,096 | 3.453125 | 3 | [] | no_license | #!/bin/bash
# =============================================
# initial-setup.sh
#
# @author wangxinyi <divein@126.com>
# ==============================================
export DEBIAN_FRONTEND=noninteractive
VAGRANT_CORE_FOLDER=$(echo "$1")
OS=$(/bin/bash "${VAGRANT_CORE_FOLDER}/shell/os-detect.sh" ID)
CODENAME=$(/bin/bash "${VAGRANT_CORE_FOLDER}/shell/os-detect.sh" CODENAME)
RELEASE=$(/bin/bash "${VAGRANT_CORE_FOLDER}/shell/os-detect.sh" RELEASE)
cat "${VAGRANT_CORE_FOLDER}/shell/ascii-art/self-promotion.txt"
printf "\n"
echo ""
if [[ ! -d '/.letian-stuff' ]]; then
mkdir '/.letian-stuff'
echo 'Created directory /.letian-stuff'
fi
touch '/.letian-stuff/vagrant-core-folder.txt'
echo "${VAGRANT_CORE_FOLDER}" > '/.letian-stuff/vagrant-core-folder.txt'
if [[ ! -f '/.letian-stuff/initial-setup-repo-update-11052014' ]]; then
if [ "${OS}" == 'debian' ] || [ "${OS}" == 'ubuntu' ]; then
echo 'Running datestamped initial-setup apt-get update'
apt-get update >/dev/null
touch '/.letian-stuff/initial-setup-repo-update-11052014'
echo 'Finished running datestamped initial-setup apt-get update'
fi
fi
if [[ -f '/.letian-stuff/initial-setup-base-packages' ]]; then
exit 0
fi
if [ "${OS}" == 'debian' ] || [ "${OS}" == 'ubuntu' ]; then
echo 'Running initial-setup apt-get update'
apt-get update >/dev/null
echo 'Finished running initial-setup apt-get update'
echo 'Installing curl'
apt-get -y install curl >/dev/null
echo 'Finished installing curl'
echo 'Installing git'
apt-get -y install git-core >/dev/null
echo 'Finished installing git'
if [[ "${CODENAME}" == 'lucid' || "${CODENAME}" == 'precise' ]]; then
echo 'Installing basic curl packages'
apt-get -y install libcurl3 libcurl4-gnutls-dev curl >/dev/null
echo 'Finished installing basic curl packages'
fi
echo 'Installing build-essential packages'
apt-get -y install build-essential >/dev/null
echo 'Finished installing build-essential packages'
fi
touch '/.letian-stuff/initial-setup-base-packages'
| true |
1ad66f8c56096cf691c7c67ecb63343658444200 | Shell | eaglet3d/GebToGrid | /selenium_grid/chrome/selenium.node.register | UTF-8 | 711 | 3.21875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
#help selenium.node.register # Register a node on this machine with the selenium grid hub
HUB_1_TCP_ADDR=192.168.99.100:4433
SeleniumVersion=2.53.0
ChromeVersion=2.21
seleniumPath="/var"
Platform=LINUX
logDir="/var/log"
# Check and create logs directory
[ ! -d "${logDir}" ] && mkdir -p "${logDir}"
logPath="${logDir}/$(date +"%Y-%m-%dT%H%M%S%z").log"
xvfb-run --server-args=":99.0 -screen 0 2000x2000x16 -ac" \
cd /var
java -jar "${seleniumPath}/selenium-server-standalone-${SeleniumVersion}.jar" \
-role node -hub http://${HUB_1_TCP_ADDR}/grid/register \
-browser "browserName=chrome,version=${ChromeVersion},platform=${Platform}" \
-browserTimeout=300 \
-log "${logPath}"
| true |
6bc31f7a4e9b9ead792bac02ba6915f2acf63588 | Shell | spurin/storageos-redis-demo | /00.taint_other_nodes.sh | UTF-8 | 772 | 2.6875 | 3 | [] | no_license | # Colour escape codes
CYAN='\033[1;34m'
GREEN='\033[1;32m'
NC='\033[0m'
# Untaint node1, Taint node2 and 3
kubectl taint nodes storageos-k8s1 exclusive=true:NoSchedule- --overwrite &> /dev/null; echo "✅ ${GREEN}UnTainted storageos-k8s1 - ${CYAN}kubectl taint nodes storageos-k8s1 exclusive=true:NoSchedule- --overwrite${NC}"
kubectl taint nodes storageos-k8s2 exclusive=true:NoSchedule --overwrite &> /dev/null && echo "⚠️ ${GREEN}Tainted storageos-k8s2 - ${CYAN}kubectl taint nodes storageos-k8s2 exclusive=true:NoSchedule --overwrite${NC}"
kubectl taint nodes storageos-k8s3 exclusive=true:NoSchedule --overwrite &> /dev/null && echo "⚠️ ${GREEN}Tainted storageos-k8s3 - ${CYAN}kubectl taint nodes storageos-k8s3 exclusive=true:NoSchedule --overwrite${NC}"
| true |
92243079fe52d8e52439ae2b74152d81861aa1ea | Shell | apptio/kr8-config-skel | /bin/component_drift | UTF-8 | 400 | 3.515625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
cluster=$1
component=$2
if [ -z "$component" ]
then
echo "Usage: $0 cluster component"
exit 1
fi
if [ -z "$cluster" ]
then
echo "Usage: $0 cluster component"
exit 1
fi
bin/deployer diff $cluster $component &> /dev/null
rc=$?
if [ "$rc" -ne 0 ]
then
echo $component has differences. Run bin/deployer diff $cluster $component for more information
exit 1
else
exit 0
fi
| true |
7ab7e29f6ff1002723659e71df0c835cc51f7a8c | Shell | god14fei/d2sc | /examples/basic_monitor/execute.sh | UTF-8 | 691 | 3.546875 | 4 | [] | no_license | #!/bin/bash
function usage {
echo "$0 CPU-LIST NT-ID [-p PRINT] [-n NF-ID]"
echo "$0 3 0 --> core 3, NT ID 0"
echo "$0 3,7,9 1 --> cores 3,7, and 9 with NT ID 1"
echo "$0 -p 1000 -n 6 3,7,9 1 --> cores 3,7, and 9 with NT ID 1 and Print Rate of 1000 and instance ID 6"
exit 1
}
SCRIPT=$(readlink -f "$0")
SCRIPTPATH=$(dirname "$SCRIPT")
cpu=$1
nt=$2
shift 2
if [ -z $nt ]
then
usage
fi
while getopts ":p:n" opt; do
case $opt in
p) print="-p $OPTARG";;
n) inst="-n $OPTARG";;
\?) echo "Unknown option -$OPTARG" && usage
;;
esac
done
exec sudo $SCRIPTPATH/build/app/monitor -l $cpu -n 3 --proc-type=secondary -- -t $nt $inst -- $print
| true |
d6f2159d32c43f717f1a7a47aaee1f042febcb4f | Shell | dsyer/nix-config | /scripts/bootstrap.sh | UTF-8 | 1,222 | 3.5 | 4 | [] | no_license | #!/bin/bash
if [ $# -lt 1 ]; then
echo "Usage $0 <hostname-or-ip>"
exit 1
fi
remote=$1
shift
if ! [ -z $1 ]; then
port="${@}"
cp_port=`echo $port | sed -e 's/-p/-P/'`
shift
fi
ssh $remote $port mkdir -m 700 -p .ssh
ssh $remote $port test -e ~/.ssh/id_rsa || scp $cp_port ~/.ssh/id_rsa* $remote:~/.ssh
# ssh-copy-id -i ~/.ssh/id_rsa.pub $port $remote
rsync -e "ssh $port" --filter=':- .gitignore' -a -P . $remote:~/nix-config
ssh $port -T $remote << EOF
test -e /swapfile || (grep -q /swapfile /etc/fstab && (sudo fallocate -l 1G /swapfile && sudo chmod 600 /swapfile && sudo mkswap /swapfile))
if [ -d /etc/nixos ]; then
test -e /etc/nixos/hardware-configuration.nix || sudo nixos-generate-config
test -e /etc/nixos/configuration.nix && sudo mv /etc/nixos/configuration.nix /tmp
# TODO: parameterize the ${machine}. This one works for a plain console (headless) server.
sudo ln -nfs ~/nix-config/nix/machines/console.nix /etc/nixos/configuration.nix
sudo nixos-rebuild switch
fi
test -e ~/.config/nixpkgs/config.nix || (mkdir -p ~/.config/nixpkgs; ln -s ~/nix-config/home/.config/nixpkgs/* ~/.config/nixpkgs)
nix-env -q | grep -q user-packages || nix-env -i user-packages
EOF
| true |
b1cd0f637577ccc7f49bf12acdb2040a98a2f8e9 | Shell | ICRAR/bldr | /pkgs/system/100-papi.sh | UTF-8 | 3,076 | 3.5 | 4 | [] | no_license | #!/bin/bash
####################################################################################################
# import the BLDR system
####################################################################################################
source "bldr.sh"
####################################################################################################
# setup pkg definition and resource files
####################################################################################################
pkg_ctry="system"
pkg_name="papi"
pkg_default="5.5.1"
pkg_variants=("5.5.1")
pkg_info="The Performance API (PAPI) project specifies a standard application programming interface (API) for accessing hardware performance counters available on most modern microprocessors."
pkg_desc="The Performance API (PAPI) project specifies a standard application programming interface
(API) for accessing hardware performance counters available on most modern microprocessors.
These counters exist as a small set of registers that count Events, occurrences of specific signals
related to the processor's function. Monitoring these events facilitates correlation between the
structure of source/object code and the efficiency of the mapping of that code to the underlying
architecture. This correlation has a variety of uses in performance analysis including hand tuning,
compiler optimization, debugging, benchmarking, monitoring and performance modeling. In addition,
it is hoped that this information will prove useful in the development of new compilation technology
as well as in steering architectural development towards alleviating commonly occurring bottlenecks
in high performance computing."
pkg_opts="configure "
pkg_opts+="force-serial-build "
pkg_uses=""
pkg_reqs=""
pkg_cflags=""
pkg_ldflags=""
pkg_cfg=""
pkg_cfg_path="src"
if [ $BLDR_SYSTEM_IS_OSX == true ]
then
pkg_cfg+="--with-OS=darwin "
fi
####################################################################################################
# register each pkg version with bldr
####################################################################################################
for pkg_vers in ${pkg_variants[@]}
do
pkg_file="$pkg_name-$pkg_vers.tar.gz"
pkg_urls="http://icl.utk.edu/projects/papi/downloads/$pkg_file"
bldr_register_pkg \
--category "$pkg_ctry" \
--name "$pkg_name" \
--version "$pkg_vers" \
--default "$pkg_default" \
--info "$pkg_info" \
--description "$pkg_desc" \
--file "$pkg_file" \
--url "$pkg_urls" \
--uses "$pkg_uses" \
--requires "$pkg_reqs" \
--options "$pkg_opts" \
--cflags "$pkg_cflags" \
--ldflags "$pkg_ldflags" \
--config "$pkg_cfg" \
--config-path "$pkg_cfg_path"
done
####################################################################################################
| true |
ebba77dd6d871e258df2bd549dd4ef59c740f1f2 | Shell | volcain-io/exercism.io | /bash/leap/leap.sh | UTF-8 | 315 | 3.609375 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
main() {
year="$1"
if [[ "$#" -ne 1 || ${year} =~ [a-zA-Z] || ${year} =~ [\-?\d+\.\d*] ]]; then
echo "Usage: leap.sh <year>"
return 1
fi
if (( ${year}%4 == 0 && ${year}%100 > 0 || ${year}%400 == 0 )); then
echo "true"
else
echo "false"
fi
return 0
}
main "$@"
| true |
6bb6051d5fe939889a38821d18b16911f380c04f | Shell | arobirosa/areco-deployment-script-manager | /integration_tests_with_ant/1_init_on_oracle/runTest.stoppingContainer.sh | UTF-8 | 1,768 | 3.53125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash -x
#TODO
# * Add volume for DB
# exit when any command fails
set -e
### keep track of the last executed command
##trap 'last_command=$current_command; current_command=$BASH_COMMAND' DEBUG
### echo an error message before exiting
##trap 'echo "\"${last_command}\" command filed with exit code $?."' EXIT
ARECO_CURRENT_TEST_FOLDER=`dirname "$(realpath '$0')"`;
ARECO_HYBRIS_DIR=$(realpath $ARECO_CURRENT_TEST_FOLDER/../../hybris);
ARECO_DB_DATA_FOLDER=$(realpath $ARECO_CURRENT_TEST_FOLDER/../docker-volumes/oracle-xe/);
export COMPOSE_TLS_VERSION=TLSv1_2;
[[ -f $ARECO_HYBRIS_DIR/bin/platform/hybrisserver.sh ]] || (echo "Please configure ARECO_HYBRIS_DIR with the directory where SAP commerce is located." && exit 1);
[[ -d $ARECO_DB_DATA_FOLDER ]] || (echo "I can't found the shared directory with the database data" && exit 2);
cp -v $ARECO_CURRENT_TEST_FOLDER/../dbdriver/*.jar $ARECO_HYBRIS_DIR/bin/platform/lib/dbdriver/;
echo "Configuring database connection and other properties";
export HYBRIS_OPT_CONFIG_DIR=$ARECO_CURRENT_TEST_FOLDER;
cd ../..;
. ./setantenv.sh;
echo "START TEST";
echo "Clearing the database and data folder"
rm -rf $ARECO_HYBRIS_DIR/data/*;
docker-compose --file $ARECO_CURRENT_TEST_FOLDER/docker-compose.yml down;
# rm -rf $ARECO_DB_DATA_FOLDER/*;
docker-compose --file $ARECO_CURRENT_TEST_FOLDER/docker-compose.yml up -d;
$ARECO_CURRENT_TEST_FOLDER/../utils/wait-for-it.sh --host=127.0.0.1 --port=9500 --timeout=600 -- echo "Waiting for the oracle database to be ready";
echo "Run all the tests on master tenant"
ant clean all yunitinit qa;
## Clear the database and data folder
#rm -rf $ARECO_HYBRIS_DIR/data;
# Run all the tests with on junit tenant:
#ant clean all initialize yunitinit qa
echo "TEST SUCCESS"
| true |
a3c1948d08a222bccbcc4c5ef28b1ec3e8969b6d | Shell | dankerizer/bersihkan.sh | /bersihkan.sh | UTF-8 | 243 | 3.40625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
BASEDIR=$(dirname "$0")
read -p "All node_modules in this directory $BASEDIR will be delete ? (Y/n) :" -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
find . -name 'node_modules' -type d -prune -print -exec rm -rf '{}' \;
fi
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.