blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
bbfa4c8e752ed0def122cd175b685d622ad9e60d
|
Shell
|
jorgediaz-lr/liferay-faster-deploy
|
/tomcat/catalinastart
|
UTF-8
| 4,475
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/bash
SCRIPT_FOLDER=$(dirname ${BASH_SOURCE[0]})
catalinaconf() {
# Identify a usable port
local TOMCAT_PORT=$(python ${SCRIPT_FOLDER}/getport.py)
if [ "" == "${TOMCAT_PORT}" ]; then
return 1
fi
export TOMCAT_PORT_PREFIX=$(echo "${TOMCAT_PORT}" | sed 's/80$//')
# Update the port in server.xml to the usable prot
cd ${CATALINA_HOME}/conf
if [ -f ${CATALINA_HOME}/conf/server.copy.xml ]; then
cp -f server.copy.xml server.xml
else
cp -f server.xml server.copy.xml
fi
sed -i 's/\"80/\"'"$TOMCAT_PORT_PREFIX"'/' server.xml
sed -i 's/unpackWARs=\"true\"/unpackWARs=\"false\"/' server.xml
# Startup with JPDA by default
cd ${CATALINA_HOME}/bin
chmod u+x *.sh
sed -i 's/ jpda start / start /g' startup.sh
sed -i 's/ start / jpda start /g' startup.sh
# Copy setenv.sh that knows how to adjust the default JPDA port
# and use more memory.
if [ ! -f setenv.sh ] || [[ "" == "$(grep jrebel setenv.sh)" ]]; then
echo 'Adding JRebel-aware setenv.sh'
cp ${SCRIPT_FOLDER}/setenv.sh .
fi
# Make sure we have a portal-ext.properties in LIFERAY_HOME
touch $LIFERAY_HOME/portal-ext.properties
if [ -d ${CATALINA_HOME}/webapps/manager ]; then
echo '<?xml version="1.0" encoding="utf-8"?><tomcat-users><user username="tomcat" password="tomcat" roles="manager-gui,manager-script,manager-status,manager-jmx"/></tomcat-users>' > ${CATALINA_HOME}/conf/tomcat-users.xml
fi
}
catalinapid() {
CATALINA_PID=
local IS_RUNNING=
for PROCESS_ID in $(jps | grep Bootstrap | cut -d' ' -f 1); do
IS_RUNNING=$(ps --no-headers $PROCESS_ID | grep $LIFERAY_VERSION)
if [ "" == "$IS_RUNNING" ]; then
CATALINA_PID=${PROCESS_ID}
return 0
fi
done
# Delete existing logs if nothing is running
rm -f ${CATALINA_HOME}/logs/*
}
catalinastart() {
. ${SCRIPT_FOLDER}/releaseinfo
if [ "" == "${RELEASE_ID}" ]; then
return 1
fi
javahome 8
if [[ $RELEASE_ID -lt 6210 ]]; then
javahome 6
elif [[ $RELEASE_ID -lt 7000 ]]; then
javahome 7
else
javahome 8
fi
if [ "init" != "$1" ] && [ -f $LIFERAY_HOME/start_cluster ]; then
$LIFERAY_HOME/start_cluster
return 0
fi
# If the server is already running, we won't need to start it.
# Check if the server is running.
catalinapid
CATALINA_OUT=${CATALINA_HOME}/logs/catalina.out
if [ "" == "${CATALINA_PID}" ]; then
catalinaconf
if [[ 0 -ne $? ]]; then
return 1
fi
# Start from a clean osgi/state folder, because that can cause
# problems after redeploy.
if [ -d $LIFERAY_HOME/osgi/state ]; then
rm -rf $LIFERAY_HOME/osgi/state
fi
# Copy the database driver
echo "Try to copy the database driver!"
. ${SCRIPT_FOLDER}/../database/copydriver
# Start Tomcat
notifyfinish &
${CATALINA_HOME}/bin/catalina.sh jpda run > >(tee -a ${CATALINA_OUT}) 2> >(tee -a ${CATALINA_OUT} >&2)
elif [ -f ${CATALINA_OUT} ]; then
notifyfinish &
tail -f ${CATALINA_OUT}
else
echo Failed to start tomcat!
fi
}
javahome() {
. ${SCRIPT_FOLDER}/../javahome $@
}
notifyfinish() {
if [ "" == "$(which notify-send)" ]; then
return 0
fi
while [ "" == "${CATALINA_PID}" ]; do
sleep 1
catalinapid
done
while [ ! -f "${CATALINA_OUT}" ]; do
sleep 1
done
while [ "" == "$(grep -F 'Server startup' ${CATALINA_OUT})" ]; do
if ps -p ${CATALINA_PID} > /dev/null; then
sleep 1
else
break
fi
done
notify-send -i utilities-terminal -u "critical" "$(echo -e "${CATALINA_HOME}\n./catalina.sh jpda run")"
}
tcnative() {
# If this isn't Linux, don't try to compile it
if [ "" == "$(which apt 2> /dev/null)" ] && [ "" == "$(which yum 2> /dev/null)" ]; then
return 0
fi
# If we already generated tcnative, then we don't have to do anything.
if [ -d ${CATALINA_HOME}/lib/apr ]; then
return 0
fi
# This script isn't very smart yet, so just assume the default location
# of apr-config is somewhere in /usr/bin.
if [ ! -f /usr/bin/apr-config ]; then
return 0
fi
# Now untar tcnative and build it
cd ${CATALINA_HOME}/bin
tar -zxf tomcat-native.tar.gz
TOMCAT_NATIVE_FOLDER=
for file in $(ls -1 ${CATALINA_HOME}/bin | grep tomcat-native | grep src); do
TOMCAT_NATIVE_FOLDER=$file
done
if [ ! -d ${CATALINA_HOME}/bin/$TOMCAT_NATIVE_FOLDER ]; then
return 1
fi
if [ -d $TOMCAT_NATIVE_FOLDER/jni/native ]; then
cd $TOMCAT_NATIVE_FOLDER/jni/native
else
cd $TOMCAT_NATIVE_FOLDER/native
fi
./configure --with-apr=/usr --prefix=${CATALINA_HOME}/lib/apr && make && make install
}
catalinastart $@
| true
|
8ec9597697b59e21075d3f30e93d4de638b3c4e3
|
Shell
|
davidcoheny/Linux
|
/peer_check.sh
|
UTF-8
| 3,766
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
## This script will run bash commands (on RHEL 7 and Centos 7) according to "Virtual Machine Checklist" and dispaly them in coloer
## each number corresponds to the number on the XLS sheet
## To use the scrip run in a bash sheel USER@SERVER "bash -s" < script
## This scrip will run as root
sudo -i
## Number 12 on the sheet - Compute & Storage Allocated (CPU/Memory/Storage)
echo -e " \e[39m"
PROCESSORS=`cat /proc/cpuinfo | grep processor | wc -l`
MEM=`free -h | grep Mem | awk -F " " '{print $2}'`
DISKSPACE=`df -h| grep /dev/mapper/centos-root | awk -F " " '{print $2}'`
echo -e "#12 \e[32m$PROCESSORS CPU | Memory $MEM | Disk Space $DISKSPACE "
echo -e " \e[39m"
## Number 16 on the sheet - Virtualization Tools Installed (VMWare Tools)
VM_TOOLS=`rpm -qa | grep open-vm-tools`
if [ -z "$VM_TOOLS" ]; then
echo -e "#16 \e[31m########################### Vm Tools - NOT Installed!!! #################################"
echo -e " \e[39m"
else echo -e "#16 \e[32mVm Tools - Installed"
echo -e " \e[39m"
fi
## Number 17 on the sheet - Virtual Machine Host/Domain Name Configured
HOSTNAME=`hostname`
echo " "
echo -e "#17 \e[32mHostname: $HOSTNAME"
echo -e " \e[39m"
echo " "
## Number 18 on the sheet - Virtual Machine IP Address(es) Configured
## WILL DISPLAY ONLY THE MAIN IP ADDRESS !!
IPADDR=`/usr/sbin/ip route get 8.8.8.8 | awk -F"src " 'NR==1{split($2,a," ");print a[1]}'`
echo -e "#18 \e[32mLocal IP Address: $IPADDR"
PUBLIC_IP=`curl https://ifconfig.me/ 2> /dev/null`
echo -e " \e[32mPublic IP Address $PUBLIC_IP"
echo -e " \e[39m"
echo " "
## Number 19 - Local Firewall Configured & #24 - Firewall Rules configured to allow Customer Admin (RDP, SSH, etc)
FIREWALL_SERVICES=`firewall-cmd --list-services`
FIREWALL_PORTS=`firewall-cmd --list-ports`
FIREWALL_RICH_RULES=`firewall-cmd --list-rich-rules`
echo -e "#19,#24 \e[32mFirewalld Rules: $FIREWALL_SERVICES $FIREWALL_PORTS $FIREWALL_RICH_RULES"
echo -e " \e[39m"
## Number 20 - display timezone
TIMEZONE=`timedatectl | grep "Time zone"`
echo -e "#20 \e[32m$TIMEZONE"
echo -e " \e[39m"
##Number 21 - Time Server/NTP Setting/Virtual Machine timesync Configured
NTPD=`systemctl status ntpd | grep Active | awk -F " " '{print $3}' | grep running 2> /dev/null`
CHRONYD=`systemctl status chronyd | grep Active | awk -F " " '{print $3}' | grep running 2> /dev/null`
## check if ntpd is running if not check if chronyd is running## check if ntpd is running if not check if chronyd is running
if [ -z "$NTPD" ] && [ -z "$CHRONYD" ]; then
echo -e "#21 \e[31mTimesync Configured NOT Running !!!"
echo -e "\e[39m"
else echo -e "#21 \e[32mTimesync Configured and Running"
echo -e "\e[39m"
fi
##Number 22 - Customer user account Account Created & #30 - Local Secura account created
echo -e "#22,#30,#42 \e[32mUsers Accounts:"
awk -F ":" '{print $1,$3}' /etc/passwd | grep 100* | awk '{print $1}'
echo -e "\e[39m"
#Number 23 - Customer user account added to Administrators/Sudoers/etc, #31 -Local Secura account added to Administrators/sudoers/etc, #42 - Add svc_cwa Automate account as local admin
echo -e "#23,#31 \e[32mGroup $(getent group wheel)"
echo -e "\e[39m"
#Number 40 - Web Protect - ESET
ESET=`pgrep esets`
if [ -z "$ESET" ]; then
echo -e "#40 \e[31m########################## Eset Is NOT Running!!! ###############################################"
echo -e " \e[39m"
else echo -e "#40 \e[32mEset Is Running"
echo -e " \e[39m"
fi
#Number 41 - Install the Automate Agent
LTECHAGENT=`systemctl status ltechagent | grep Active | awk -F " " '{print $3}' | grep running 2> /dev/null`
if [ -z "$LTECHAGENT" ]; then
echo -e "#41 \e[31mltechagent NOT Running !!!"
echo -e "\e[39m"
else echo -e "#41 \e[32mltechagent Is Running"
echo -e "\e[39m"
fi
| true
|
847aaad5bde77634b5a9bb6af0729e995b199968
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/gamera-git/PKGBUILD
|
UTF-8
| 703
| 2.609375
| 3
|
[] |
no_license
|
# Maintainer: Carsten Feuls <archlinux@carstenfeuls.de>
pkgname=gamera-git
pkgver=3.4.2.r42.g51a0c59bd547
pkgrel=1
pkgdesc="Framework for building document analysis applications."
arch=('i686' 'x86_64' 'armv5h' 'armv6h' 'armv7h')
url="http://gamera.informatik.hsnr.de"
license=('GPL')
depends=('python2' 'libtiff' 'libpng' 'wxpython' )
source=('git+https://github.com/hsnr-gamera/gamera.git')
sha512sums=('SKIP')
pkgver() {
cd gamera
git describe --long --tags 2>/dev/null | sed 's/\([^-]*-g\)/r\1/;s/-/./g' ||
printf "r%s.%s" "$(git rev-list --count HEAD)" "$(git rev-parse --short HEAD)"
}
build() {
cd gamera
python2 setup.py build
}
package() {
cd gamera
python2 setup.py install --root=${pkgdir} --prefix=/usr
}
| true
|
107fa0fcd356e502f2281a1e98bbb2d2a6d24dea
|
Shell
|
xia1990/LEYU_TECH
|
/编译脚本/quick_build.sh
|
UTF-8
| 20,792
| 2.75
| 3
|
[] |
no_license
|
WsRootDir=`pwd`
MY_NAME=`whoami`
CONFIGPATH=$WsRootDir/device/ginreen
ARM=arm64
KERNELCONFIGPATH=$WsRootDir/kernel-3.18/arch/$ARM/configs
CUSTOMPATH=$WsRootDir/device/ginreen
RELEASEPATH=$1
BUILDINFO=$WsRootDir/build-log/buildinfo
RELEASE_PARAM=all
LOG_PATH=$WsRootDir/build-log
BASE_PRJ=gr6750_66_r_n
Mode_Path=$WsRootDir/vendor/mediatek/proprietary/modem/gr6750_66_r_n_lwctg_mp5
CPUCORE=8
PRODUCT=
VARIANT=
ACTION=
MODULE=
ORIGINAL=
COPYFILES=
clean_pl()
{
if [ x$ORIGINAL == x"yes" ]; then
rm $LOG_PATH/pl.log; make clean-pl
return $?
else
OUT_PATH=$WsRootDir/out/target/product/$PRODUCT
PL_OUT_PATH=$OUT_PATH/obj/PRELOADER_OBJ
rm -f $LOG_PATH/pl.log
rm -f $OUT_PATH/preloader_$PRODUCT.bin
rm -rf $PL_OUT_PATH
result=$?
return $result
fi
}
build_pl()
{
if [ x$ORIGINAL == x"yes" ]; then
make -j$CPUCORE pl 2>&1 | tee $LOG_PATH/pl.log
return $?
else
OUT_PATH=$WsRootDir/out/target/product/$PRODUCT
PL_OUT_PATH=$OUT_PATH/obj/PRELOADER_OBJ
cd vendor/mediatek/proprietary/bootable/bootloader/preloader
PRELOADER_OUT=$PL_OUT_PATH TARGET_PRODUCT=$PRODUCT ./build.sh 2>&1 | tee $LOG_PATH/pl.log
result=$?
cd -
cp $PL_OUT_PATH/bin/preloader_$PRODUCT.bin $OUT_PATH
return $result
fi
}
clean_kernel()
{
if [ x$ORIGINAL == x"yes" ]; then
rm $LOG_PATH/k.log; make clean-kernel
return $?
else
OUT_PATH=$WsRootDir/out/target/product/$PRODUCT
KERNEL_OUT_PATH=$OUT_PATH/obj/KERNEL_OBJ
rm -f $LOG_PATH/k.log
rm -f $OUT_PATH/boot.img
rm -rf $KERNEL_OUT_PATH
result=$?
return $result
fi
}
build_kernel()
{
if [ x$ORIGINAL == x"yes" ]; then
make -j$CPUCORE kernel 2>&1 | tee $LOG_PATH/k.log
return $?
else
cd kernel-3.18
if [ x$VARIANT == x"user" ] || [ x$VARIANT == x"userroot" ];then
defconfig_files=${PRODUCT}_defconfig
else
defconfig_files=${PRODUCT}_debug_defconfig
fi
KERNEL_OUT_PATH=../out/target/product/$PRODUCT/obj/KERNEL_OBJ
mkdir -p $KERNEL_OUT_PATH
while [ 1 ]; do
make O=$KERNEL_OUT_PATH ARCH=$ARM ${defconfig_files}
result=$?; if [ x$result != x"0" ];then break; fi
#make -j$CPUCORE -k O=$KERNEL_OUT_PATH Image modules
make -j$CPUCORE O=$KERNEL_OUT_PATH ARCH=$ARM CROSS_COMPILE=$WsRootDir/prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin/aarch64-linux-android- 2>&1 | tee $LOG_PATH/k.log
result=$?; if [ x$result != x"0" ];then break; fi
cp $KERNEL_OUT_PATH/arch/$ARM/boot/zImage-dtb ../out/target/product/$PRODUCT/kernel
break
done
cd -
return $result
fi
}
clean_lk()
{
if [ x$ORIGINAL == x"yes" ]; then
rm $LOG_PATH/lk.log; make clean-lk
return $?
else
OUT_PATH=$WsRootDir/out/target/product/$PRODUCT
LK_OUT_PATH=$OUT_PATH/obj/BOOTLOADER_OBJ
rm -f $LOG_PATH/lk.log
rm -f $OUT_PATH/lk.bin $OUT_PATH/logo.bin
rm -rf $LK_OUT_PATH
result=$?
return $result
fi
}
build_lk()
{
if [ x$ORIGINAL == x"yes" ]; then
make -j$CPUCORE lk 2>&1 | tee $LOG_PATH/lk.log
return $?
else
OUT_PATH=$WsRootDir/out/target/product/$PRODUCT
LK_OUT_PATH=$OUT_PATH/obj/BOOTLOADER_OBJ
mkdir -p $LK_OUT_PATH
cd vendor/mediatek/proprietary/bootable/bootloader/lk
export BOOTLOADER_OUT=$LK_OUT_PATH
export MTK_PUMP_EXPRESS_SUPPORT=yes
make -j$CPUCORE $PRODUCT 2>&1 | tee $LOG_PATH/lk.log
result=$?
cd -
cp $LK_OUT_PATH/build-$PRODUCT/lk.bin $OUT_PATH
cp $LK_OUT_PATH/build-$PRODUCT/logo.bin $OUT_PATH
return $result
fi
}
# restore code in same as before
revert_code()
{
echo -e "\033[33mIt's going to revert your code.\033[0m"
read -n1 -p "Are you sure? [Y/N]" answer
case $answer in
Y | y )
echo "";;
*)
echo -e "\n"
exit 0 ;;
esac
echo "Start revert Code...."
echo "repo forall -c \"git clean -df\""
repo forall -c "git clean -df"
echo "repo forall -c \"git co .\""
repo forall -c "git co ."
echo "rm -rf $LOG_PATH/*"
rm -rf $LOG_PATH/*
echo "rm -rf out"
rm -rf out
echo -e "\033[33mComplete revert code.\033[0m"
exit 0
}
function analyze_args()
{
### set PRODUCT
PRODUCT=$1
case $PRODUCT in
UBT|U195|X100)
echo "PRODUCT=$PRODUCT"
RELEASEPATH=$PRODUCT
ORIGINAL=yes
;;
*)
echo "PRODUCT name error $PRODUCT!!!"
exit 1
;;
esac
command_array=($2 $3 $4 $5)
for command in ${command_array[*]}; do
### set VARIANT
if [ x$command == x"user" ] ;then
if [ x$VARIANT != x"" ];then continue; fi
VARIANT=user
elif [ x$command == x"debug" ] ;then
if [ x$VARIANT != x"" ];then continue; fi
VARIANT=userdebug
elif [ x$command == x"eng" ] ;then
if [ x$VARIANT != x"" ];then continue; fi
VARIANT=eng
elif [ x$command == x"userroot" ] ;then
if [ x$VARIANT != x"" ];then continue; fi
VARIANT=userroot
### set ACTION
elif [ x$command == x"r" ] || [ x$command == x"remake" ];then
if [ x$ACTION != x"" ];then continue; fi
ACTION=remake
elif [ x$command == x"n" ] || [ x$command == x"new" ];then
if [ x$ACTION != x"" ];then continue; fi
ACTION=new
elif [ x$command == x"c" ] || [ x$command == x"clean" ];then
if [ x$ACTION != x"" ];then continue; fi
ACTION=clean
RELEASE_PARAM=none
elif [ x$command == x"m" ] || [ x$command == x"make" ];then
if [ x$ACTION != x"" ];then continue; fi
ACTION=m
RELEASE_PARAM=none
elif [ x$command == x"revert" ] ;then
if [ x$ACTION != x"" ];then continue; fi
ACTION=revert
RELEASE_PARAM=none
elif [ x$command == x"mmma" ];then
if [ x$ACTION != x"" ];then continue; fi
ACTION=mmma
RELEASE_PARAM=none
elif [ x$command == x"mmm" ];then
if [ x$ACTION != x"" ];then continue; fi
ACTION=mmm
RELEASE_PARAM=none
elif [ x$command == x"api" ];then
if [ x$ACTION != x"" ];then continue; fi
ACTION=update-api
RELEASE_PARAM=none
elif [ x$command == x"boot" ];then
if [ x$ACTION != x"" ];then continue; fi
ACTION=bootimage
RELEASE_PARAM=boot
elif [ x$command == x"system" ];then
if [ x$ACTION != x"" ];then continue; fi
ACTION=systemimage
RELEASE_PARAM=system
elif [ x$command == x"userdata" ];then
if [ x$ACTION != x"" ];then continue; fi
ACTION=userdataimage
RELEASE_PARAM=userdata
elif [ x$command == x"boot-nodeps" ];then
if [ x$ACTION != x"" ];then continue; fi
ACTION=bootimage-nodeps
RELEASE_PARAM=boot
elif [ x$command == x"snod" ];then
if [ x$ACTION != x"" ];then continue; fi
ACTION=snod
RELEASE_PARAM=system
elif [ x$command == x"userdata-nodeps" ];then
if [ x$ACTION != x"" ];then continue; fi
ACTION=userdataimage-nodeps
RELEASE_PARAM=userdata
elif [ x$command == x"ramdisk-nodeps" ];then
if [ x$ACTION != x"" ];then continue; fi
ACTION=ramdisk-nodeps
RELEASE_PARAM=boot
elif [ x$command == x"recovery" ];then
if [ x$ACTION != x"" ];then continue; fi
ACTION=recoveryimage
RELEASE_PARAM=recovery
elif [ x$command == x"cache" ];then
if [ x$ACTION != x"" ];then continue; fi
ACTION=cacheimage
RELEASE_PARAM=none
elif [ x$command == x"otapackage" ] || [ x$command == x"ota" ] ;then
if [ x$ACTION != x"" ];then continue; fi
ACTION=otapackage
RELEASE_PARAM=ota
elif [ x$command == x"otadiff" ];then
if [ x$ACTION != x"" ];then continue; fi
ACTION=otadiff
RELEASE_PARAM=none
elif [ x$command == x"cts" ];then
if [ x$ACTION != x"" ];then continue; fi
ACTION=cts
RELEASE_PARAM=none
### set ORIGINAL
#elif [ x$command == x"o" ];then
#if [ x$ORIGINAL != x"" ];then continue; fi
#ORIGINAL=yes
### set COPYFILES
elif [ x$command == x"fc" ];then
if [ x$COPYFILES != x"" ];then continue; fi
COPYFILES=yes
elif [ x$command == x"nc" ];then
if [ x$COPYFILES != x"" ];then continue; fi
COPYFILES=no
### set MODULE
elif [ x$command == x"pl" ];then
if [ x$MODULE != x"" ];then continue; fi
MODULE=pl
RELEASE_PARAM=pl
elif [ x$command == x"k" ] || [ x$command == x"kernel" ];then
if [ x$MODULE != x"" ];then continue; fi
MODULE=k
RELEASE_PARAM=boot
elif [ x$command == x"lk" ];then
if [ x$MODULE != x"" ];then continue; fi
MODULE=lk
RELEASE_PARAM=lk
#elif [ x$command == x"dr" ];then
#if [ x$MODULE != x"" ];then continue; fi
#MODULE=dr
#RELEASE_PARAM=system
else
if [ x$MODULE != x"" ];then continue; fi
MODULE=$command
fi
done
if [ x$VARIANT == x"" ];then VARIANT=eng; fi
#if [ x$ORIGINAL == x"" ];then ORIGINAL=no; fi
if [ x$ACTION == x"clean" ];then RELEASE_PARAM=none; fi
if [ x$COPYFILES == x"" ];then
if [ x$ACTION == x"new" ] && [ x$MODULE == x"" ];then
COPYFILES=yes;
else
COPYFILES=no;
fi
fi
}
function main()
{
##################################################################
#Check parameters
##################################################################
if [ ! -d $LOG_PATH ];then
mkdir $LOG_PATH
fi
analyze_args $1 $2 $3 $4 $5
if [ x$ACTION == x"revert" ];then
revert_code
fi
### Check VARIANT WHEN NOT NEW
Check_Variant
echo "PRODUCT=$PRODUCT VARIANT=$VARIANT ACTION=$ACTION MODULE=$MODULE COPYFILES=$COPYFILES ORIGINAL=$ORIGINAL"
echo "Log Path $LOG_PATH"
if [ x$ACTION == x"" ];then
echo -e "\033[31m !!!!!! No Such Action =$ACTION ======!!!! \033[0m"
exit 1
fi
##################################################################
#Prepare
##################################################################
Check_Space
CUSTOM_FILES_PATH="./wind/custom_files"
#Lancelot -s
if [ x$COPYFILES == x"yes" ];then
remove_mode_files #xuweitao@wind-mobi.com 20180316
copy_custom_files
fi
build_Project_Config
#Lancelot -e
###################################################################
#Start build
###################################################################
echo "Build started `date +%Y%m%d_%H%M%S` ..."
echo;echo;echo;echo
source build/envsetup.sh
if [ x$VARIANT == x"userroot" ] ; then
lunch full_$PRODUCT-user
else
lunch full_$PRODUCT-$VARIANT
fi
##source mbldenv.sh
#lisonglin@wind-mobi.com source ./change_java.sh 1.7
OUT_PATH=$WsRootDir/out/target/product/$PRODUCT
case $ACTION in
new | remake | clean)
M=false; C=false;
if [ x$ACTION == x"new" ];then M=true; C=true;
elif [ x$ACTION == x"remake" ];then
M=true;
find $OUT_PATH/ -name 'build.prop' -exec rm -rf {} \;
else C=true;
fi
case $MODULE in
pl)
if [ x$C == x"true" ];then clean_pl; result=$?; fi
if [ x$M == x"true" ];then build_pl; result=$?; fi
;;
k)
if [ x$C == x"true" ];then clean_kernel; result=$?; fi
if [ x$M == x"true" ];then
build_kernel; result=$?
if [ $result -eq 0 ];then make -j$CPUCORE bootimage-nodeps; result=$?; fi
fi
;;
lk)
if [ x$C == x"true" ];then clean_lk; result=$?; fi
if [ x$M == x"true" ];then build_lk; result=$?; fi
;;
*)
if [ x"$MODULE" == x"" ];then
if [ x$C == x"true" ];then make clean; rm $LOG_PATH; fi
if [ x$M == x"true" ];then
if [ x$VARIANT == x"userroot" ] ; then
echo "make userroot version"
make MTK_BUILD_ROOT=yes -j$CPUCORE 2>&1 | tee $LOG_PATH/build.log; result=$?;
else
#echo "lisonglin@wind-mobi.com make -j$CPUCORE 2>&1 | tee $LOG_PATH/build.log; result=$?;"
make -j$CPUCORE 2>&1 | tee $LOG_PATH/build.log; result=$?;
fi
fi
else
echo -e "\033[31m !!!!!! No Such module ==$MODULE !!!! \033[0m"
exit 1
fi
;;
esac
;;
mmma | mmm | m)
$ACTION $MODULE 2>&1 | tee $LOG_PATH/$ACTION.log; result=$?
;;
update-api | bootimage | systemimage | recoveryimage | userdataimage | cacheimage | snod | bootimage-nodeps | userdataimage-nodeps | ramdisk-nodeps | otapackage | otadiff | cts) make -j$CPUCORE $ACTION 2>&1 | tee $LOG_PATH/$ACTION.log; result=$?
;;
esac
if [ $result -eq 0 ] && [ x$ACTION == x"mmma" -o x$ACTION == x"mmm" -o x$ACTION == x"m" ];then
echo "Start to release module ...."
DIR=`echo $MODULE | sed -e 's/:.*//' -e 's:/$::'`
NAME=${DIR##*/}
TARGET=out/target/product/${PRODUCT}/obj/APPS/${NAME}_intermediates/package.apk
if [ -f $TARGET ];then
cp -f $TARGET /data/mine/test/MT6572/${MY_NAME}/${NAME}.apk
fi
elif [ $result -eq 0 ] && [ $RELEASE_PARAM != "none" ]; then
echo "Build completed `date +%Y%m%d_%H%M%S` ..."
echo "Start to release version ...."
echo " lisonglin@wind-mobi.com ${RELEASEPATH} ${RELEASE_PARAM}"
echo "Start to efuse version ...."
build_efuse
echo "finish to efuse version ...."
./release_version.sh ${RELEASEPATH} ${RELEASE_PARAM} efuse
fi
}
#xuweitao@wind-mobi.com 20180621 start
function build_efuse()
{
source build/envsetup.sh
if [ x$VARIANT == x"userroot" ] ; then
lunch full_$PRODUCT-user
else
lunch full_$PRODUCT-$VARIANT
fi
./vendor/mediatek/proprietary/scripts/sign-image/sign_image.sh;
sleep 25;
}
#xuweitao@wind-mobi.com 20180621 end
function copy_custom_files()
{
echo "Start custom copy files..."
./wind/scripts/copyfiles.pl $PRODUCT $BASE_PRJ
echo "Copy custom files finish!"
}
#xuweitao@wind-mobi.com 20180316 start
function remove_mode_files()
{
echo "Start remove mode files..."
if [ -d $Mode_Path ];then
rm -rf $Mode_Path
fi
echo "Remove mode files finish!"
}
#xuweitao@wind-mobi.com 20180316 end
function export_Config
{
while read line; do
export $line
done < ./version
TIME=`date +%F`
export RELEASE_TIME=$TIME
export WIND_CPUCORES=$CPUCORE
export WIND_PROJECT_NAME_CUSTOM=$PRODUCT
export WIND_OPTR_NAME_CUSTOM=$PRODUCT
export KERNEL_VER=alcatel-kernel
}
function build_Project_Config()
{
./wind/scripts/pjc.pl $PRODUCT
export_Config
#lisonglin@wind-mobi.com start
#config_custom_audio_boot
#lisonglin@wind-mobi.com end
}
#lisonglin@wind-mobi.com start
function config_custom_audio_boot()
{
echo " XXXXXXXXX lisonglin@wind-mobi.com start config_custom_audio_boot "
logoPath=`cat $WsRootDir/device/ginreen/$PRODUCT/ProjectConfig.mk | grep "BOOT_LOGO =" | awk -F = '{printf $2}' | sed s/[[:space:]]//g`
if [ x$logoPath = x"" ];then
echo " generate logo failed logoPath=$logoPath== is null"
else
echo "lisonglin@wind-mobi.com ===get project logo folder path success=====$logoPath======== "
fi
#copy prj overcopy folder
cp -a $WsRootDir/device/ginreen/$PRODUCT/commonRes/overcopy/* ./
if [ ! -d $WsRootDir/vendor/mediatek/proprietary/bootable/bootloader/lk/dev/logo/$logoPath/ ];then
mkdir $WsRootDir/vendor/mediatek/proprietary/bootable/bootloader/lk/dev/logo/$logoPath/
fi
if [ -d $WsRootDir/device/ginreen/$PRODUCT/commonRes/boot/LOGO/$logoPath ];then
cp -a $WsRootDir/device/ginreen/$PRODUCT/commonRes/boot/LOGO/$logoPath $WsRootDir/vendor/mediatek/proprietary/bootable/bootloader/lk/dev/logo/
fi
#copy boot anim and boot audio ,and system sounds config start
cp -a $WsRootDir/device/ginreen/$PRODUCT/commonRes/boot/AllAudio.mk $WsRootDir/frameworks/base/data/sounds/AllAudio.mk
#copy boot logo if exist
cp -a $WsRootDir/device/ginreen/$PRODUCT/commonRes/boot/LOGO/${logoPath}_kernel.bmp $WsRootDir/vendor/mediatek/proprietary/bootable/bootloader/lk/dev/logo/$logoPath/
cp -a $WsRootDir/device/ginreen/$PRODUCT/commonRes/boot/LOGO/${logoPath}_uboot.bmp $WsRootDir/vendor/mediatek/proprietary/bootable/bootloader/lk/dev/logo/$logoPath/
cp -a $WsRootDir/device/ginreen/$PRODUCT/commonRes/boot/LOGO/cust_display.h $WsRootDir/vendor/mediatek/proprietary/bootable/bootloader/lk/target/$PRODUCT/include/target/
if [ x$CUSTPRJ != x"commonRes" ];then
echo " XXXXXXXXX lisonglin@wind-mobi.com start config_custom_audio_boot copy custom logo "
if [ -d $WsRootDir/device/ginreen/$PRODUCT/$CUSTPRJ/boot/LOGO/$logoPath ];then
cp -a $WsRootDir/device/ginreen/$PRODUCT/$CUSTPRJ/boot/LOGO/$logoPath $WsRootDir/vendor/mediatek/proprietary/bootable/bootloader/lk/dev/logo/
fi
cp -a $WsRootDir/device/ginreen/$PRODUCT/$CUSTPRJ/overcopy/* ./
cp -a $WsRootDir/device/ginreen/$PRODUCT/$CUSTPRJ/boot/AllAudio.mk $WsRootDir/frameworks/base/data/sounds/AllAudio.mk
cp -a $WsRootDir/device/ginreen/$PRODUCT/$CUSTPRJ/boot/LOGO/${logoPath}_kernel.bmp $WsRootDir/vendor/mediatek/proprietary/bootable/bootloader/lk/dev/logo/$logoPath/
cp -a $WsRootDir/device/ginreen/$PRODUCT/$CUSTPRJ/boot/LOGO/${logoPath}_uboot.bmp $WsRootDir/vendor/mediatek/proprietary/bootable/bootloader/lk/dev/logo/$logoPath/
cp -a $WsRootDir/device/ginreen/$PRODUCT/$CUSTPRJ/boot/LOGO/cust_display.h $WsRootDir/vendor/mediatek/proprietary/bootable/bootloader/lk/target/$PRODUCT/include/target/
fi
}
#lisonglin@wind-mobi.com end
#checkout disk space is gt 30G
function Check_Space()
{
UserHome=`pwd`
Space=0
Temp=`echo ${UserHome#*/}`
Temp=`echo ${Temp%%/*}`
ServerSpace=`df -lh $UserHome | grep "$Temp" | awk '{print $4}'`
if echo $ServerSpace | grep -q 'G'; then
Space=`echo ${ServerSpace%%G*}`
elif echo $ServerSpace | grep -q 'T';then
TSpace=1
fi
echo -e "\033[34m Log for Space $UserHome $ServerSpace $Space !!!\033[0m"
if [ x"$TSpace" != x"1" ] ;then
if [ "$Space" -le "30" ];then
echo -e "\033[31m No Space!! Please Check!! \033[0m"
exit 1
fi
fi
}
# check variant is or isn't the same as input
function Check_Variant()
{
buildProp=$WsRootDir/out/target/product/$PRODUCT/system/build.prop
if [ -f $buildProp ] ; then
buildType=`grep ro.build.type $buildProp | cut -d "=" -f 2`
if [ x$buildType != x"user" ] && [ x$buildType != x"userdebug" ] && [ x$buildType != x"eng" ] ; then return; fi
if [ x$VARIANT != x$buildType ]; then
if [ x$ACTION == x"new" ] ; then
if [ x$MODULE == x"k" ] || [ x$MODULE == x"pl" ] || [ x$MODULE == x"lk" ] ; then
echo -e "\033[35mCode build type is\033[0m \033[31m$buildType\033[35m, your input type is\033[0m \033[31m$VARIANT\033[0m"
echo -e "\033[35mIf not correct, Please enter \033[31mCtrl+C\033[35m to Stop!!!\033[0m"
for i in $(seq 9|tac); do
echo -e "\033[34m\aLeft seconds:(${i})\033[0m"
sleep 1
done
echo
fi
else
echo -e "\033[35mCode build type is\033[0m \033[31m$buildType\033[35m, your input type is\033[0m \033[31m$VARIANT\033[0m"
echo -e "\033[35mChange build type to \033[31m$buildType\033[0m"
echo
VARIANT=$buildType
fi
fi
else
return;
fi
}
main $1 $2 $3 $4 $5
| true
|
132b0e3ada844ed24a9f8450a88c0ae9fabf7c9f
|
Shell
|
xPMo/i3wm-config
|
/LIB/i3blocks/playerctl.sh
|
UTF-8
| 1,607
| 3.59375
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
IFS=$'\n'
#click, play/pause
[[ ${BLOCK_BUTTON:-0} = 1 ]] && playerctl play-pause
player_status=$(playerctl status)
pango_escape() {
sed -e 's/&/\&/g; s/</</g; s/>/\>/g'
}
# exit 0 lets the block text be cleared
if [ -z $player_status ]; then exit 0; fi
if [ $player_status = Stopped ]; then
printf "⏹\\n⏹\\n#073642"
else
title="$(playerctl metadata title | pango_escape)"
artist="$(playerctl metadata artist | pango_escape)"
length="${title:- }${artist:- }"
# small text if longer than 20 characters
[ ${#length} -gt 20 ] && p="$p<small>"
# Substring: if title>32, substring 0-31 with ellipsis
[ ${#title} -gt 32 ] && p="$p${title:0:31}‥ - " || p="$p$title - "
# Substring: if artist>20, substring 0-19 with ellipsis
[ ${#artist} -gt 20 ] && p="$p${artist:0:19}‥" || p="$p$artist"
[ ${#length} -gt 20 ] && echo "$p</small>" || echo $p
# Short text, always small
echo -n "<small>"
# Substring: if length>12, substring 0-11 with ellipsis
[ ${#title} -gt 12 ] && echo -n "${title:0:11}‥|" || echo -n "$title|"
# Substring: if length>20, substring 0-18 with ellipsis
[ ${#artist} -gt 8 ] && echo -n "${artist:0:7}‥" || echo -n "$artist"
echo "</small>"
[ $player_status = Paused ] && echo -n "#cccc00"
fi
# Right click, get notification with info
# dunstify uses postive ids by default, so use a negative id here
# cheat and use $IFS as newline
[[ ${BLOCK_BUTTON:-0} = 3 ]] && dunstify --replace=-310 \
"$title" "by $artist${IFS}on $(playerctl metadata album)" \
--icon=$(playerctl metadata mpris:artUrl) --appname=playerctl
exit 0
| true
|
924b493cfa1a5bae33119e313e1a48db4eece320
|
Shell
|
fishyoo/develop
|
/tools/vscode/.bashrc
|
UTF-8
| 939
| 2.546875
| 3
|
[] |
no_license
|
alias ,='cd -';
alias .='cd ..';
alias ..='cd ../..';
alias ...='cd ../../..';
alias ....='cd ../../../..';
alias l='ls -alF';
alias la='ls -la';
alias ll='ls -al --color=tty';
alias lx='ls -lhBX --color=auto';
alias lz='ls -lhrS --color=auto';
alias lt='ls -lhrt --color=auto';
alias egrep='egrep --color=auto';
alias fgrep='fgrep --color=auto';
alias grep='grep --color=auto';
alias ifa='ifconfig';
alias if1='ifconfig eth1';
alias mk='make -j16';
alias mc='make clean';
#git
alias gc='git commit -a -m';
alias gcam='git commit --amend -a -m';
alias gcamc='git commit --amend -a -C HEAD';
alias gc.='git commit -a -m "."';
alias gst='git status';
alias gps='git push';
alias gpl='git pull';
alias gck='git checkout';
alias gad='git add -A';
alias gl='git log --pretty=format:"%cd --- %s %C(auto)" -30 --date=format:%c';
alias gl5='git log --pretty=format:"%cd --- %s %C(auto)" -30 --date=format:%c -5';
| true
|
b7bcaf687013133a7d97aab26e8f86f1d0d8efb1
|
Shell
|
objectiveceo/devops
|
/bin/start-remote-image.sh
|
UTF-8
| 800
| 3.875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -euo pipefail
function main {
local sshHost="${1:?Must provide an ssh host}"
local containerName="${2:?Must provide an image name (org/image:version)}"
shift
shift
local additionalFlags="$@"
local org
local image
local version
IFS=: read -r tmp version <<< "$containerName"
IFS=/ read -r org image <<< "$tmp"
local containerId="$(ssh "$sshHost" "docker ps | grep "${image}" | awk '{print \$1}'")"
if [[ ! -z $containerId ]]; then
echo "Stopping previous version of ${tmp}"
ssh "$sshHost" "docker stop ${containerId}"
echo "Stopped ${containerId} on ${sshHost}"
fi
local newContainerId=$(ssh "$sshHost" "docker run --rm --detach ${additionalFlags} ${containerName}")
echo "Started new instance of ${containerName} as ${newContainerId}"
}
main "$@"
| true
|
51b3f7bdb4e5bbb35cc83ae870b4297b07319c7c
|
Shell
|
arash1902/deepeda
|
/benchmarks/RitircBiereKauers-FMCAD17-data/experiments/3_mathematica_sparrc_notpromote/run.sh
|
UTF-8
| 232
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/sh
i=8
case x`basename $0` in
xsubmit.sh) run=sbatch;;
*) run="";;
esac
echo "running jobs $run"
while [ $i -le 64 ]
do
echo launching $i
$run ./launch.sh ../../benchmarks/aoki/sp-ar-rc-$i.aig
i=`expr 2 \* $i`
done
| true
|
660d434f80dff6247a815352ac46309778393eb7
|
Shell
|
rebl0x3r/aut0_mak3r
|
/lib/random_mac.sh
|
UTF-8
| 908
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/bash
export PURPLE="\e[01;35m"
export Z="\e[0m"
export BLUE="\e[01;34m"
if [ $# -eq 0 ]
then
echo "Please specify an interface:
$0 eth0
$0 wlan0
"
else
echo -e $PURPLE "Generate random mac address...."
a1=`cat /dev/urandom | tr -dc 'a-fA-F0-9' | fold -w 2 | head -n 1`
a2=`cat /dev/urandom | tr -dc 'a-fA-F0-9' | fold -w 2 | head -n 1`
a3=`cat /dev/urandom | tr -dc 'a-fA-F0-9' | fold -w 2 | head -n 1`
a4=`cat /dev/urandom | tr -dc 'a-fA-F0-9' | fold -w 2 | head -n 1`
a5=`cat /dev/urandom | tr -dc 'a-fA-F0-9' | fold -w 2 | head -n 1`
a6=`cat /dev/urandom | tr -dc 'a-fA-F0-9' | fold -w 2 | head -n 1`
ifconfig $1 down
ifconfig $1 hw ether $a1":"$a2":"$a3":"$a4":"$a5":"$a6
ifconfig $1 up
echo -e $BLUE "New mac address for ${PURPLE}$1:" $a1":"$a2":"$a3":"$a4":"$a5":"$a6
echo -e $Z
fi
exit
| true
|
96e429772b74d141ddc94269045e19bbd146c881
|
Shell
|
dreibh/planetlab-lxc-build
|
/lbuild-bridge.sh
|
UTF-8
| 4,647
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
# taking this bridge-initialization code out of lbuild-initvm.sh
# so we can use it on our libvirt/lxc local infra
# there's something very similar in
# tests/system/template-qemu/qemu-bridge-init
# that the current code was actually based on, but
# nobody was ever bold enough to reconcile these two
# hard-wired
DEFAULT_PUBLIC_BRIDGE=br0
##############################
# use /proc/net/dev instead of a hard-wired list
function gather_interfaces () {
python3 <<EOF
with open("/proc/net/dev") as feed:
for line in feed:
if ':' not in line: continue
ifname = line.replace(" ","").split(":")[0]
if ifname.startswith("lo"): continue
if ifname.startswith("br"): continue
if ifname.startswith("virbr"): continue
if ifname.startswith("veth"): continue
if ifname.startswith("tap"): continue
if ifname.startswith("vif"): continue
print(ifname)
EOF
}
function discover_interface () {
for ifname in $(gather_interfaces); do
ip link show $ifname | grep -qi 'state UP' && { echo $ifname; return; }
done
# still not found ? that's bad
echo unknown
}
##############################
function check_yum_installed () {
package=$1; shift
rpm -q $package >& /dev/null || yum -y install $package
}
# not used apparently
function check_yumgroup_installed () {
group="$1"; shift
yum grouplist "$group" | grep -q Installed || { yum -y groupinstall "$group" ; }
}
#################### bridge initialization
function create_bridge_if_needed() {
# do not turn on verbosity
# set -x
public_bridge=$1; shift
# already created ? - we're done
ip addr show $public_bridge >& /dev/null && {
echo "Bridge already set up - skipping create_bridge_if_needed"
return 0
}
# find out the physical interface to bridge onto
if_lan=$(discover_interface)
ip addr show $if_lan &>/dev/null || {
echo "Cannot use interface $if_lan - exiting"
exit 1
}
#################### bride initialization
check_yum_installed bridge-utils
echo "========== $COMMAND: entering create_bridge - beg"
hostname
uname -a
ip addr show
ip route
echo "========== $COMMAND: entering create_bridge - end"
# disable netfilter calls for bridge interface (they cause panick on 2.6.35 anyway)
#
# another option would be to accept the all forward packages for
# bridged interface like: -A FORWARD -m physdev --physdev-is-bridged -j ACCEPT
sysctl net.bridge.bridge-nf-call-iptables=0
sysctl net.bridge.bridge-nf-call-ip6tables=0
sysctl net.bridge.bridge-nf-call-arptables=0
#Getting host IP/masklen
address=$(ip addr show $if_lan | grep -v inet6 | grep inet | head --lines=1 | awk '{print $2;}')
[ -z "$address" ] && { echo "ERROR: Could not determine IP address for $if_lan" ; exit 1 ; }
broadcast=$(ip addr show $if_lan | grep -v inet6 | grep inet | head --lines=1 | awk '{print $4;}')
[ -z "$broadcast" ] && echo "WARNING: Could not determine broadcast address for $if_lan"
gateway=$(ip route show | grep default | awk '{print $3;}')
[ -z "$gateway" ] && echo "WARNING: Could not determine gateway IP"
# creating the bridge
echo "Creating public bridge interface $public_bridge"
brctl addbr $public_bridge
brctl addif $public_bridge $if_lan
echo "Activating promiscuous mode if_lan=$if_lan"
ip link set $if_lan up promisc on
sleep 2
# rely on dhcp to re assign IP..
echo "Starting dhclient on $public_bridge"
dhclient $public_bridge
sleep 1
#Reconfigure the routing table
echo "Configuring gateway=$gateway"
ip route add default via $gateway dev $public_bridge
ip route del default via $gateway dev $if_lan
# at this point we have an extra route like e.g.
## ip route show
#default via 138.96.112.250 dev br0
#138.96.112.0/21 dev em1 proto kernel scope link src 138.96.112.57
#138.96.112.0/21 dev br0 proto kernel scope link src 138.96.112.57
#192.168.122.0/24 dev virbr0 proto kernel scope link src 192.168.122.1
route_dest=$(ip route show | grep -v default | grep "dev $public_bridge" | awk '{print $1;}')
ip route del $route_dest dev $if_lan
echo "========== $COMMAND: exiting create_bridge - beg"
ip addr show
ip route show
echo "========== $COMMAND: exiting create_bridge - end"
# for safety
sleep 3
return 0
}
function main () {
if [[ -n "$@" ]] ; then
public_bridge="$1"; shift
else
public_bridge="$DEFAULT_PUBLIC_BRIDGE"
fi
create_bridge_if_needed $public_bridge
}
main "$@"
| true
|
905e6affa29c3b9ff2fd34639553b6005ed758ab
|
Shell
|
Gontxx/showboard
|
/launch.sh
|
UTF-8
| 599
| 2.765625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
echo -e "\033[32m"
echo "##############################################"
echo "# Auto-Launch System #"
echo "##############################################"
echo "pku2018!)#)"
echo "##############################################"
echo -e "\033[0m"
echo "当前上线服务器:39.106.6.6"
addr="root@39.106.6.6"
path="/root/web/"
npm run build
tar -cf dist.tar dist
echo "输入服务器密码,上传dist文件夹"
scp -r dist.tar $addr:$path
echo "再次输入服务器密码,确认覆盖dist"
ssh -tt "$addr" "cd $path && tar -xvf dist.tar"
| true
|
6d8e475234f1bf26fa0e25ed62dd11c12e8461c3
|
Shell
|
ucsd-ccbb/cirrus-ngs
|
/src/cirrus_ngs/server/Pipelines/scripts/SmallRNASeq/bowtie2/miRNA_merge_counts.sh
|
UTF-8
| 1,301
| 3.640625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
project_name=$1
workflow=$2
file_suffix=$3 #extension of input file, does not include .gz if present in input
root_dir=$4
fastq_end1=$5
fastq_end2=$6
input_address=$7 #this is an s3 address e.g. s3://path/to/input/directory
output_address=$8 #this is an s3 address e.g. s3://path/to/output/directory
log_dir=$9
is_zipped=${10} #either "True" or "False", indicates whether input is gzipped
all_samples=${11} # a space separated string containing all the sample names
#logging
mkdir -p $log_dir
log_file=$log_dir/'miRNA_merge_counts.log'
exec 1>>$log_file
exec 2>>$log_file
status_file=$log_dir/'status.log'
touch $status_file
#prepare output directories
workspace=$root_dir/$project_name/$workflow/
mkdir -p $workspace
echo "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%"
date
echo "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%"
check_step_already_done $JOB_NAME $status_file
## Download counts files from s3
for file in $all_samples; do
check_exit_status "aws s3 cp $input_address/$file/$file$file_suffix $workspace/$file/ --quiet" $JOB_NAME $status_file
done
## Call the merge count file
check_exit_status "$python $miRNA_count $workspace" $JOB_NAME $status_file
## Upload
aws s3 cp $workspace $output_address/ --exclude "*" --include "*miRNA.all.counts.txt*" --recursive --quiet
| true
|
1270eacb8a4ad1cb5fac0c0438a3902d98066060
|
Shell
|
pchandra/chef-bcpc
|
/vmware/vmware_create.sh
|
UTF-8
| 4,219
| 3.546875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -e
# bash imports
source ./vmware_env.sh
if [[ -z "$VMRUN" ]]; then
echo "vmrun not found!" >&2
echo " Please ensure VMWare is installed and vmrun is accessible." >&2
exit 1
fi
if [[ -z "$VMDISK" ]]; then
echo "vmware-vdiskmanager not found!" >&2
echo " Please ensure VMWare is installed and vmware-vdiskmanager is accessible." >&2
exit 1
fi
if [[ -f ../proxy_setup.sh ]]; then
. ../proxy_setup.sh
fi
if [[ -z "$CURL" ]]; then
echo "CURL is not defined"
exit
fi
# Bootstrap VM Defaults (these need to be exported for Vagrant's Vagrantfile)
export BOOTSTRAP_VM_MEM=1536
export BOOTSTRAP_VM_CPUs=1
# Use this if you intend to make an apt-mirror in this VM (see the
# instructions on using an apt-mirror towards the end of bootstrap.md)
# -- Vagrant VMs do not use this size --
#BOOTSTRAP_VM_DRIVE_SIZE=120480
# Cluster VM Defaults
CLUSTER_VM_MEM=2560
CLUSTER_VM_CPUs=2
CLUSTER_VM_DRIVE_SIZE=20480
VBOX_DIR="`dirname ${BASH_SOURCE[0]}`"
P=`python -c "import os.path; print os.path.abspath(\"${VBOX_DIR}/\")"`
if ! hash vagrant 2> /dev/null ; then
echo "This script requires Vagrant to be installed!"
exit 1
fi
if [[ -z `vagrant plugin list | grep vagrant-vmware-fusion` ]]; then
echo "This script requires vagrant-vmware-fusion to be installed!"
echo "Try: "
echo "$ vagrant plugin install vagrant-vmware-fusion"
echo "$ vagrant plugin license /path/to/license-file"
echo "See http://www.vagrantup.com/vmware for more info."
echo "(The vagrant-vmware-fusion plugin is a commercial product.)"
exit 1
fi
######################################################
# Function to download files necessary for VM stand-up
#
function download_VM_files {
pushd $P
BOX='precise64_vmware.box'
if [[ ! -f $BOX ]]; then
$CURL -L -o $BOX http://files.vagrantup.com/precise64_vmware.box
fi
popd
}
###################################################################
# Function to create the VMs via Vagrant
function create_VMs {
pushd $P
if [[ ! -f insecure_private_key ]]; then
# Ensure that the private key has been created by running vagrant
vagrant status
cp $HOME/.vagrant.d/insecure_private_key .
fi
# You have to run this three times initially for each private network
vagrant up
# Create each VM vdisk
for vm in 1 2 3; do
VM_PATH=`ls -d .vagrant/machines/bcpc_vm$vm/vmware_fusion/*/`
VMX_PATH=$VM_PATH/precise64.vmx
if [ ! -f $VMX_PATH ]; then
echo "Can't find bcpc-vm$vm in $VM_PATH"
exit 1
fi
# offset for boot drive!
for disk in 1 2 3 4; do
VMDK_FILE=bcpc-vm$vm-$disk.vmdk
VMDK_PATH=$VM_PATH/$VMDK_FILE
if [ ! -f $VMDK_PATH ]; then
vagrant halt bcpc_vm$vm
echo "Creating $VMDK_PATH"
"$VMDISK" -c -s ${CLUSTER_VM_DRIVE_SIZE}MB -a ide -t 0 $VMDK_PATH
cp $VMX_PATH $VMX_PATH.orig-$disk
cat >> $VMX_PATH <<EOF
scsi0:${disk}.present = "TRUE"
scsi0:${disk}.filename = "${VMDK_FILE}"
EOF
fi
done
#vagrant up bcpc_vm$vm
done
popd
}
function install_cluster {
environment=${1-Test-Laptop-VMware}
ip=${2-10.0.100.3}
pushd $P
# N.B. As of Aug 2013, grub-pc gets confused and wants to prompt re: 3-way
# merge. Sigh.
#vagrant ssh -c "sudo ucf -p /etc/default/grub"
#vagrant ssh -c "sudo ucfr -p grub-pc /etc/default/grub"
vagrant ssh -c "test -f /etc/default/grub.ucf-dist && sudo mv /etc/default/grub.ucf-dist /etc/default/grub" || true
# Duplicate what d-i's apt-setup generators/50mirror does when set in preseed
if [ -n "$http_proxy" ]; then
if [ -z `vagrant ssh -c "grep Acquire::http::Proxy /etc/apt/apt.conf"` ]; then
vagrant ssh -c "echo 'Acquire::http::Proxy \"$http_proxy\";' | sudo tee -a /etc/apt/apt.conf"
fi
fi
vagrant ssh -c "sudo apt-get install rsync ed apparmor" || true
popd
echo "Bootstrap complete - setting up Chef server"
echo "N.B. This may take approximately 30-45 minutes to complete."
pushd ../
./bootstrap_chef.sh --vagrant-vmware $ip $environment
popd
./vmware_cobbler.sh
}
# only execute functions if being run and not sourced
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
download_VM_files
create_VMs
install_cluster $*
fi
| true
|
ff9e4bb8bdd9a7b3e1dfc9250d9568a5712318cb
|
Shell
|
qcastel/github-actions-maven-cmd
|
/mvn-action.sh
|
UTF-8
| 430
| 3.359375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
pwd
if [ -d "${M2_HOME_FOLDER}" ]; then
echo "INFO - M2 folder '${M2_HOME_FOLDER}' not empty. We therefore will beneficy from the CI cache";
ls -l ${M2_HOME_FOLDER};
else
echo "WARN - No M2 folder '${M2_HOME_FOLDER}' found. We therefore won't beneficy from the CI cache";
fi
echo "JAVA_HOME = $JAVA_HOME"
JAVA_HOME="/usr/java/openjdk-14/"
# Do the copyright verification
mvn -ntp $*
| true
|
76496d9e666c651040d437d40978aba9f7c2d373
|
Shell
|
bioinformatics-ua/BioASQ9b
|
/Baseline/setup.sh
|
UTF-8
| 884
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/bash
# exit when any command fails
set -e
VENV_NAME="py-bioasq"
# remove the virtual env if alredy exists
if [ -d "$(pwd)/$VENV_NAME" ]; then
echo "Removing alredy existing venv first"
rm -r $VENV_NAME
fi
# PYTHON DEPENDENCIES
PYTHON=python3.6
echo "Creating a python environment ($VENV_NAME)"
$PYTHON -m venv $VENV_NAME
PYTHON=$(pwd)/$VENV_NAME/bin/python
PIP=$(pwd)/$VENV_NAME/bin/pip
IPYTHON=$(pwd)/$VENV_NAME/bin/ipython
# update pip
echo "Updating pip"
$PYTHON -m pip install -U pip
echo "Installing python requirements"
$PIP install -r requirements.txt
# ADD to Jupyter
echo "Adding this kernel to the jupyter notebook"
$IPYTHON kernel install --name "$VENV_NAME" --user
echo "Manually install nir python library"
cd _other_dependencies/nir/
if [ -d "./dist" ]
then
rm -r ./dist
fi
$PYTHON setup.py sdist
$PIP install ./dist/nir-0.0.1.tar.gz
cd ../../
| true
|
9a7d06eae7c0807b3c0854d9bdf9d33d27d9f093
|
Shell
|
podismine/pac2019
|
/scripts/T1processing/RR1_FSprocessing.sh
|
UTF-8
| 1,782
| 2.5625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# CLUSTER OPTIONS
wd="working/directory"
fsdir="freeSurfer/directory"
echo ${SLURM_ARRAY_TASK_ID}.$(sed -n "${SLURM_ARRAY_TASK_ID}{p;q}" ${wd}/dataset/PAC2019_BrainAge_Training.csv)
dat=$(sed -n "${SLURM_ARRAY_TASK_ID}{p;q}" ${wd}/dataset/PAC2019_BrainAge_Training.csv)
echo ${dat}
ID=$(echo ${dat} | cut -f 1 -d ',' )
echo ${ID}
mkdir -p ${wd}/FS/
mkdir -p ${wd}/FSOUTPUT/FSresults/${ID}/
mkdir -p ${wd}/FSOUTPUT/ENIGMAshapeResults/${ID}/
module load FreeSurfer
SUBJECTS_DIR=${wd}/FS
recon-all -subject ${ID} -i ${wd}/dataset/raw/${ID}_raw.nii.gz -all -qcache
SUBJECTS_DIR=${fsdir}/subjects
for hemi in lh rh
do
for moda in area thickness
do
for fwhm in 0 5 10 15 20 25
do
${fsdir}/bin/mris_convert -c ${wd}/FS/${ID}/surf/${hemi}.${moda}.fwhm${fwhm}.fsaverage.mgh ${fsdir}/subjects/fsaverage/surf/${hemi}.orig ${wd}/FSOUTPUT/FSresults/${ID}/${hemi}.${moda}.fwhm${fwhm}.fsaverage.asc
done
for fsav in fsaverage3 fsaverage4 fsaverage5 fsaverage6
do
${fsdir}/bin/mri_surf2surf --s fsaverage --hemi ${hemi} --sval ${wd}/FS/${ID}/surf/${hemi}.${moda}.fwhm0.fsaverage.mgh --trgsubject ${fsav} --tval ${wd}/FS/${ID}/surf/${hemi}.${moda}.fwhm0.${fsav}.mgh
${fsdir}/bin/mris_convert -c ${wd}/FS/${ID}/surf/${hemi}.${moda}.fwhm0.${fsav}.mgh ${fsdir}/subjects/${fsav}/surf/${hemi}.orig ${wd}/FSOUTPUT/FSresults/${ID}/${hemi}.${moda}.fwhm0.${fsav}.asc
done
done
done
perl ${wd}/bin/ENIGMA_shape/MedialDemonsShared/bin/Medial_Demons_shared.pl ${wd}/FS/${ID}/mri/aseg.mgz 10 11 12 13 17 18 26 49 50 51 52 53 54 58 ${wd}/FS/${ID}/ENIGMA_shape/ ${wd}/bin/ENIGMA_shape/MedialDemonsShared ${fsdir}/bin
rsync -r ${wd}/FS/${ID}/ENIGMA_shape/* ${wd}/FSOUTPUT/ENIGMAshapeResults/${ID}/
rsync -r ${wd}/FS/${ID}/stats/* ${wd}/FSOUTPUT/FSresults/${ID}/
| true
|
14303a818bcaf669ee8bcc311d63965a7e63f389
|
Shell
|
packetfield/infra-core
|
/ansible/vars/encrypt-string.sh
|
UTF-8
| 359
| 3.28125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
####
# Usage example
####
# Run something like this:
#
# ./encrypt-string.sh some_secret_variable "the-actual-secret-goes-here" >> ./shared-secrets.yml
#
# it should add a secret string variable called {{some_secret_variable}} to shared-secrets.yml
KEY="$1"
VAL="$2"
printf "$VAL" | ansible-vault encrypt_string --stdin-name "${KEY}"
| true
|
e83f8ae31f08aed7dd70f6de9581bab70e15af1b
|
Shell
|
wei-tang/OpenArkCompiler
|
/tools/setup_tools.sh
|
UTF-8
| 7,003
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Copyright (C) [2020-2021] Futurewei Technologies, Inc. All rights reverved.
#
# Licensed under the Mulan Permissive Software License v2.
# You can use this software according to the terms and conditions of the MulanPSL - 2.0.
# You may obtain a copy of MulanPSL - 2.0 at:
#
# https://opensource.org/licenses/MulanPSL-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR
# FIT FOR A PARTICULAR PURPOSE.
# See the MulanPSL - 2.0 for more details.
#
set -e
if [ -z "$MAPLE_ROOT" ]; then
echo "Please \"source build/envsetup.sh\" to setup environment"
exit 1
fi
echo MAPLE_ROOT: $MAPLE_ROOT
android_env=$1
TOOLS=$MAPLE_ROOT/tools
ANDROID_VERSION=android-10.0.0_r35
ANDROID_SRCDIR=$MAPLE_ROOT/../android/$ANDROID_VERSION
ANDROID_DIR=$MAPLE_ROOT/android
if [ ! -f $TOOLS/clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04/bin/clang ]; then
cd $TOOLS
echo Start wget llvm-10.0.0 ...
wget https://github.com/llvm/llvm-project/releases/download/llvmorg-10.0.0/clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04.tar.xz
echo unpacking clang+llvm ...
tar xf clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04.tar.xz
echo Downloaded clang+llvm.
fi
if [ "$android_env" == "android" ]; then
if [ ! -f $TOOLS/android-ndk-r21/ndk-build ]; then
cd $TOOLS
wget https://dl.google.com/android/repository/android-ndk-r21d-linux-x86_64.zip
echo unpacking android ndk ...
unzip android-ndk-r21d-linux-x86_64.zip > /dev/null
mv android-ndk-r21d android-ndk-r21
echo Downloaded android ndk.
fi
if [ ! -L $TOOLS/gcc ]; then
cd $TOOLS
ln -s ../android/prebuilts/gcc .
echo Linked gcc.
fi
if [ ! -L $TOOLS/clang-r353983c ]; then
cd $TOOLS
ln -s ../android/prebuilts/clang/host/linux-x86/clang-r353983c .
echo Linked clang.
fi
fi
if [ ! -f $TOOLS/ninja/ninja ]; then
cd $TOOLS
echo Start wget ninja ...
mkdir -p ./ninja
cd ./ninja || exit 3
wget https://github.com/ninja-build/ninja/releases/download/v1.10.0/ninja-linux.zip
unzip ninja-linux.zip
echo Downloaded ninja.
fi
if [ ! -f $TOOLS/gn/gn ]; then
cd $TOOLS
echo Start clone gn ...
git clone --depth 1 https://gitee.com/xlnb/gn_binary.git gn
chmod +x gn/gn
echo Downloaded gn.
fi
if [ ! -f $TOOLS/gcc-linaro-7.5.0/bin/aarch64-linux-gnu-gcc ]; then
cd $TOOLS
echo Start wget gcc-linaro-7.5.0 ...
wget https://releases.linaro.org/components/toolchain/binaries/latest-7/aarch64-linux-gnu/gcc-linaro-7.5.0-2019.12-i686_aarch64-linux-gnu.tar.xz
echo unpacking gcc ...
tar xf gcc-linaro-7.5.0-2019.12-i686_aarch64-linux-gnu.tar.xz
mv gcc-linaro-7.5.0-2019.12-i686_aarch64-linux-gnu gcc-linaro-7.5.0
echo Downloaded gcc aarch64 compiler.
fi
if [ ! -d $TOOLS/sysroot-glibc-linaro-2.25 ]; then
cd $TOOLS
echo Start wget sysroot-glibc-linaro-2.25 ...
wget https://releases.linaro.org/components/toolchain/binaries/latest-7/aarch64-linux-gnu/sysroot-glibc-linaro-2.25-2019.12-aarch64-linux-gnu.tar.xz
echo unpacking sysroot ...
tar xf sysroot-glibc-linaro-2.25-2019.12-aarch64-linux-gnu.tar.xz
mv sysroot-glibc-linaro-2.25-2019.12-aarch64-linux-gnu sysroot-glibc-linaro-2.25
echo Downloaded aarch64 sysroot.
fi
if [ ! -f $MAPLE_ROOT/third_party/d8/lib/d8.jar ]; then
cd $TOOLS
echo Start clone d8 ...
git clone --depth 1 https://gitee.com/xlnb/r8-d81513.git
mkdir -p $MAPLE_ROOT/third_party/d8/lib
cp -f r8-d81513/d8/lib/d8.jar $MAPLE_ROOT/third_party/d8/lib
echo Downloaded d8.jar.
fi
if [ ! -d $MAPLE_ROOT/third_party/icu ]; then
cd $TOOLS
echo Start clone ICU4C ...
git clone --depth 1 https://gitee.com/xlnb/icu4c.git
mkdir -p $MAPLE_ROOT/third_party/icu
cp -r icu4c/lib/ $MAPLE_ROOT/third_party/icu/
echo Downloaded icu4c libs
fi
# download prebuilt andriod
if [ ! -d $ANDROID_DIR/out/target/product/generic_arm64 ]; then
cd $TOOLS
echo Start clone AOSP CORE LIB ...
git clone --depth 1 https://gitee.com/xlnb/aosp_core_bin.git
cp -r aosp_core_bin/android $MAPLE_ROOT/
cp -r aosp_core_bin/libjava-core $MAPLE_ROOT/
echo Downloaded AOSP CORE LIB
fi
if [ ! -f $MAPLE_ROOT/third_party/libdex/prebuilts/aarch64-linux-gnu/libz.so.1.2.8 ]; then
cd $TOOLS
echo Start wget libz ...
wget http://ports.ubuntu.com/pool/main/z/zlib/zlib1g_1.2.8.dfsg-2ubuntu4_arm64.deb
mkdir -p libz_extract
dpkg --extract zlib1g_1.2.8.dfsg-2ubuntu4_arm64.deb libz_extract
ZLIBDIR=$MAPLE_ROOT/third_party/libdex/prebuilts/aarch64-linux-gnu
mkdir -p $ZLIBDIR
cp -f libz_extract/lib/aarch64-linux-gnu/libz.so.1.2.8 $ZLIBDIR
echo Downloaded libz.
fi
# install qemu-user 2.5.0
if [ ! -f $TOOLS/qemu/package/usr/bin/qemu-aarch64 ]; then
cd $TOOLS
echo Start wget qemu-user ...
rm -rf qemu
git clone --depth 1 https://gitee.com/hu-_-wen/qemu.git
cd qemu
mkdir -p package
dpkg-deb -R qemu-user_2.5+dfsg-5ubuntu10.48_amd64.deb package
echo Installed qemu-aarch64
fi
# clang2mpl
if [ ! -d $TOOLS/clang2mpl ]; then
cd $TOOLS
git clone --depth 1 https://gitee.com/openarkcompiler-incubator/clang2mpl.git
fi
# routinly updated to be compatible with maple
cd $TOOLS/clang2mpl
git clean -df
git checkout .
git checkout master
git pull
mkdir -p ${TOOL_BIN_PATH}
ln -s -f ${MAPLE_ROOT}/tools/clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04/bin/clang++ ${TOOL_BIN_PATH}/clang++
ln -s -f ${MAPLE_ROOT}/tools/clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04/bin/clang ${TOOL_BIN_PATH}/clang
ln -s -f ${MAPLE_ROOT}/tools/clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04/bin/llvm-ar ${TOOL_BIN_PATH}/llvm-ar
ln -s -f ${MAPLE_ROOT}/tools/clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04/bin/llvm-ranlib ${TOOL_BIN_PATH}/llvm-ranlib
ln -s -f ${MAPLE_ROOT}/tools/qemu/package/usr/bin/qemu-aarch64 ${TOOL_BIN_PATH}/qemu-aarch64
if [ -f /usr/bin/qemu-aarch64 ];then
ln -s -f /usr/bin/qemu-aarch64 ${TOOL_BIN_PATH}/qemu-aarch64
fi
mkdir -p ${MAPLE_ROOT}/testsuite/tools
mkdir -p ${MAPLE_ROOT}/testsuite/tools/bin
ln -s -f ${MAPLE_ROOT}/tools/clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04/bin/clang++ ${MAPLE_ROOT}/testsuite/tools/bin/clang++
ln -s -f ${MAPLE_ROOT}/tools/clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04/bin/clang ${MAPLE_ROOT}/testsuite/tools/bin/clang
ln -s -f ${MAPLE_ROOT}/tools/qemu/package/usr/bin/qemu-aarch64 ${MAPLE_ROOT}/testsuite/tools/bin/qemu-aarch64
ln -s -f ${MAPLE_ROOT}/tools/gcc-linaro-7.5.0/bin/aarch64-linux-gnu-gcc ${MAPLE_ROOT}/testsuite/tools/bin/aarch64-linux-gnu-gcc
ln -s -f ${MAPLE_ROOT}/tools/gcc-linaro-7.5.0 ${MAPLE_ROOT}/testsuite/tools/gcc-linaro-7.5.0
ln -s -f ${MAPLE_ROOT}/build/java2dex ${MAPLE_ROOT}/testsuite/tools/bin/java2dex
ln -s -f ${MAPLE_ROOT}/tools/r8-d81513/d8/lib ${MAPLE_ROOT}/testsuite/tools/lib
if [ ! -d $MAPLE_ROOT/../ThirdParty ]; then
cd $MAPLE_ROOT/../
git clone --depth 1 https://gitee.com/openarkcompiler/ThirdParty.git
cd -
else
cd $MAPLE_ROOT/../ThirdParty
git pull origin master
cd -
fi
| true
|
c7aa18ebe260ab83361e6debfdeca013ca8ac00a
|
Shell
|
gianpietrodimatteo/dotfiles
|
/bin/gians_home
|
UTF-8
| 462
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
# Periodically update my ip
# crontab -e
# 0 */6 * * * /home/USERNAME/bin/gians_home >/dev/null 2>&1
name="$GIANS_DEVICE"
ip="$(hostname -I | awk '{print $1}')"
appurl="$GIANS_HOME/address"
curl -sSd "{\"name\":\"$name\", \"address\":\"$ip\", \"user\":\"$USER\"}" -H "Content-Type: application/json" -X POST "$appurl"
curl -sSd "{\"name\":\"$name\", \"address\":\"$ip\", \"user\":\"$USER\"}" -H "Content-Type: application/json" -X PUT "$appurl"
| true
|
98b1102be7cd9b86e0a623504e29e5ade3a6a23c
|
Shell
|
jknightlab/Abu-Dhabi
|
/RNASeq/PaxGeneCohort/Scripts_for_mapping/MakeScriptsTophat2Mapping.sh
|
UTF-8
| 1,587
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
##### define your paths
# 1) a directory of fastq files
#fastq_files="/well/jknight/AbuDhabiRNA/P170245"
# 2) project directory
#mkdir -p ./mapping/2.mapping/mappingscripts
#mkdir -p ./mapping/2.mapping/fastq
# make links all fastq files in $fastq_files directory to $PROJECT_DIR/mapping/2.mapping/ ;
#cmd="ln -s $fastq_files/WTCHG_341122_20110*fastq.gz ./mapping/2.mapping/fastq/"
#echo $cmd
if [[ ! -e "mappingscripts" ]]; then
mkdir ./mappingscripts
fi
rst="./run.submit_jobs_for_mapping.sh"
echo "" > $rst
###################################################
#
# Add your sample data file here with a full path
FILE="/well/jknight/AbuDhabiRNA/Katie/mapping/2.mapping/mapping.info.txt"
#
##################################################
while read -r line
do
fastq_dir="$(echo "$line" | cut -f 1)"
sample="$(echo "$line" | cut -f 1)" # choose a right column for sample name
echo $sample
fastq="$(echo "$line" | cut -f 2)"
fastq1="$(echo "$fastq" | sed -e 's/,/_1.fastq.gz,fastq\//g')"
fastq2="$(echo "$fastq" | sed -e 's/,/_2.fastq.gz,fastq\//g')"
fastq1=$fastq1"_1.fastq.gz"
fastq2=$fastq2"_2.fastq.gz"
fastq1="fastq/"$fastq1
fastq2="fastq/"$fastq2
echo $fastq1
echo $fastq2
exc_name="./mappingscripts/run.mapping.$sample.sh"
cmd="sh 2.tophat2_mapping.sh $sample $fastq1 $fastq2"
$cmd > $exc_name
echo "qsub "$exc_name >> $rst
done < $FILE
### move all scripts to your working directory
#mv ./mappingscripts/run.mapping* ./mapping/2.mapping/mappingscripts
#mv ./mappingscripts/run.submit_jobs_for_mapping.sh ./mapping/2.mapping
| true
|
7de871295e61ed04dbbcd2018a248692c388cbb3
|
Shell
|
ryanseys/dotfiles
|
/common/aliases
|
UTF-8
| 2,917
| 3.328125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/zsh
# git-open to open the current branch in github
function git-open() {
CURRENT_BRANCH=$(git branch --show-current)
REMOTE_BRANCH=$(git config --get branch.$CURRENT_BRANCH.remote)
MERGE_BRANCH=$(git config --get branch.$CURRENT_BRANCH.merge | cut -d '/' -f 3-)
GIT_REMOTE_URL=$(git ls-remote --get-url $REMOTE_BRANCH)
URL_TO_OPEN=$(echo $GIT_REMOTE_URL | sed 's/.git$//g')
open "$URL_TO_OPEN/compare/$MERGE_BRANCH?expand=1"
}
# gpr = git pull & rebase.
# checkouts the default branch, pulls it from origin, switches back to existing branch, and rebases
function gpr() {
export DEFAULT_BRANCH=`git rev-parse --abbrev-ref origin/HEAD | sed 's/origin\///g'`
git checkout $DEFAULT_BRANCH
git pull
git checkout -
git rebase $DEFAULT_BRANCH
}
# fixup lets you choose a commit to fix up and it automatically applies the
# stages changes in git as a fixup commit and applies it by rebasing off
# the main / master branch automatically
# Usage: fixup-main
function fixup-main() {
export STAGED_CHANGES=`git diff --name-only --cached`
[[ -z $STAGED_CHANGES ]] && echo "No staged changes to fixup. Exiting..." && exit
export COMMIT_SHA=`git log -n 50 --pretty=format:'%h %s' --no-merges | fzf | cut -c -7`
git commit --fixup $COMMIT_SHA
export DEFAULT_BRANCH=`git rev-parse --abbrev-ref origin/HEAD | sed 's/origin\///g'`
GIT_SEQUENCE_EDITOR=true git rebase -i --autosquash $DEFAULT_BRANCH
}
function killport() {
export PROCESSES_ON_PORT=`lsof -ti:$1`
if [ -z "$PROCESSES_ON_PORT" ]; then
echo "No processes on port $1"
else
echo "Killing processes on port $1"
kill -9 $PROCESSES_ON_PORT
fi
}
alias favme="mysides add $(basename $(pwd)) file://$(pwd)"
# Add directory to sidebar favorites.
# Install with `brew install --cask mysides`
# Usage: `fav code file:///Users/ryanseys/code` to that dir to Mac sidebar.
alias fav="mysides add"
alias up="dev up"
alias dsl="bin/tapioca dsl"
alias style="bin/style --include-branch-commits"
alias gitclean='git branch --merged | egrep -v "(^\*|master|main)" | xargs git branch -d'
alias please="sudo"
alias aliases="$EDITOR ~/.aliases"
alias pull="git pull"
alias rt="ruby -Itest"
alias zshrc="$EDITOR ~/.zshrc"
alias resource="source ~/.zshrc"
alias gitconfig="$EDITOR ~/.gitconfig"
alias gitconfig_local="$EDITOR ~/.gitconfig_local"
alias gitaliases="$EDITOR ~/.gitconfig"
alias be="bundle exec"
alias tc="bin/typecheck"
alias fix="bin/typecheck && bin/style --include-branch-commits"
alias dotfiles="cd ~/dotfiles"
alias utc="$HOME/bin/convert_utc.rb"
alias dt="dev test --include-branch-commits"
alias dta="dev test -a"
alias vrt="VCR=all dev test"
alias back="cd -"
alias mine="/Applications/RubyMine.app/Contents/MacOS/rubymine"
# Build and run the app on my personal iPhone (SeysPhone)
alias runios="yarn react-native run-ios"
alias runiosdevice="yarn react-native run-ios --udid=00008120-000475463E78C01E"
| true
|
a6952302a53efc71647e2eb7cb3915b2b7658a5e
|
Shell
|
88IO/dotfiles_old
|
/.zshrc
|
UTF-8
| 3,257
| 2.75
| 3
|
[] |
no_license
|
# Set up the prompt
autoload colors
colors
PROMPT="%{${fg[green]}%}%n@%m:%{${fg[yellow]}%}%~%{${fg[green]}%}%j%#
%{${fg[yellow]}%}↪ %{$reset_color%}"
PROMPT2="%{$fg[green]%}%_> %{$reset_color%}"
SPROMPT="%{$fg[red]%}correct: %R -> %r [nyae]? %{$reset_color%}"
setopt histignorealldups sharehistory
# Use emacs keybindings even if our EDITOR is set to vi
bindkey -e
# Keep 1000 lines of history within the shell and save it to ~/.zsh_history:
HISTSIZE=1000
SAVEHIST=1000
HISTFILE=~/.zsh_history
# git branch
autoload -Uz vcs_info
setopt prompt_subst
zstyle ':vcs_info:git:*' check-for-changes true
zstyle ':vcs_info:git:*' stagedstr "%F{yellow}!"
zstyle ':vcs_info:git:*' unstagedstr "%F{red}+"
zstyle ':vcs_info:*' formats "%F{green}%c%u[%b]%f"
zstyle ':vcs_info:*' actionformats '[%b|%a]'
precmd () { vcs_info }
RPROMPT=$RPROMPT'${vcs_info_msg_0_}'
# Use modern completion system
autoload -Uz compinit
compinit
zstyle ':completion:*' auto-description 'specify: %d'
zstyle ':completion:*' completer _expand _complete _correct _approximate
zstyle ':completion:*' format 'Completing %d'
zstyle ':completion:*' group-name ''
zstyle ':completion:*' menu select=2
zstyle ':completion:*:default' list-colors ${(s.:.)LS_COLORS}
zstyle ':completion:*' list-colors ''
zstyle ':completion:*' list-prompt %SAt %p: Hit TAB for more, or the character to insert%s
zstyle ':completion:*' matcher-list '' 'm:{a-z}={A-Z}' 'm:{a-zA-Z}={A-Za-z}' 'r:|[._-]=* r:|=* l:|=*'
zstyle ':completion:*' menu select=long
zstyle ':completion:*' select-prompt %SScrolling active: current selection at %p%s
zstyle ':completion:*' use-compctl false
zstyle ':completion:*' verbose true
zstyle ':completion:*:*:kill:*:processes' list-colors '=(#b) #([0-9]#)*=0=01;31'
zstyle ':completion:*:kill:*' command 'ps -u $USER -o pid,%cpu,tty,cputime,cmd'
#
setopt nolistbeep
setopt list_packed
setopt magic_equal_subst
setopt auto_pushd
setopt pushd_ignore_dups
setopt share_history
setopt hist_ignore_dups
setopt hist_ignore_all_dups
setopt append_history
setopt auto_param_slash
setopt mark_dirs
setopt list_types
setopt auto_menu
setopt auto_param_keys
setopt interactive_comments
setopt complete_in_word
setopt always_last_prompt
setopt print_eight_bit
setopt extended_glob
setopt globdots
[[ -n "$VIMRUNTIME" ]] && \
PROMPT="%{${fg[white]}${bg[blue]}%}(vim)%{${reset_color}%} $PROMPT"
export TERM=xterm-256color
autoload -U compinit
compinit
export LSCOLORS=exfxcxdxbxegedabagacad
export LS_COLORS="di=34:ln=35:so=32:pi=33:ex=31:bd=46;34:cd=43;34:su=41;30:sg=46;30:tw=42;30:ow=43;30"
alias ls="ls --color"
path=($HOME/miniconda3/bin(N-/) $path)
function cdls() {
cd $1;
ls;
}
alias cd="cdls"
alias pycd="cdls ~/works/pyworks"
alias ccd="cdls ~/works/cworks"
alias jscd="cdls ~/works/jsworks"
alias rscd="cdls ~/works/rsworks"
alias dcd="cdls ~/works/dworks"
alias battery="cat /proc/acpi/battery/BAT0/info"
alias shutdown_="sudo shutdown -h now"
alias vi="vim"
alias em="emacs -nw"
alias ymd="date +%Y/%m/%d"
#export LANG="en_US.UTF-8"
zstyle ":completion:*" list-colors "${LS_COLORS}"
#THIS MUST BE AT THE END OF THE FILE FOR SDKMAN TO WORK!!!
export SDKMAN_DIR="$HOME/.sdkman"
[[ -s "$HOME/.sdkman/bin/sdkman-init.sh" ]] && source "$HOME/.sdkman/bin/sdkman-init.sh"
| true
|
e54d58e26f46557e833333123ca3096c709c9f61
|
Shell
|
fbuether/GoodNight2
|
/service/runPerformance.sh
|
UTF-8
| 539
| 2.765625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh -xm
sudo echo "requesting sudo privs, to properly launch sub-process..."
if [ ! -f perfcollect ]
then
curl -OL http://aka.ms/perfcollect
chmod +x ./perfcollect
sudo ./perfcollect install
fi
make build
sudo ./perfcollect collect testSession &
sudoprocid=$!
sleep 1
echo `ps --ppid $sudoprocid -o pid=`
perfprocid=`ps --ppid $sudoprocid -o pid=`
echo $perfprocid
make test-perf
fg
# sudo kill -INT $perfprocid
# wait $perfprocid
# sudo chown fbuether:fbuether testSession.trace.zip
sudo ./perfcollect view testSession
| true
|
001210771ce86024e037be17b8f05883352c6198
|
Shell
|
azago-dev/docker-magento2-7.4
|
/updateUrl
|
UTF-8
| 498
| 3.421875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
url=$1
if [ "$2" != "" ]
then
container=$2
else
container=$(docker ps -qf "name=db")
fi
if [[ $url == *"http"* ]]; then
echo "Error! Enter your URL without http, like: google.com"
exit 0;
fi
docker exec -i $container mysql -umagento -pmagento magento <<< "UPDATE core_config_data SET value = 'http://$url/' WHERE path IN ('web/secure/base_url', 'web/unsecure/base_url')" &&
./magento c:f &&
./magento c:c &&
echo "Done! THe url was updated! http://$url/"
| true
|
8c56e13757af8969fda8621784b66fa3de209dda
|
Shell
|
f1lm/MS-2014-Boise-State-University
|
/Programming Languages/pub/ch8/excep.sh
|
UTF-8
| 220
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
Error() {
echo "not ok"
exit 1;
}
trap "Error" SIGUSR1
foo() {
if [ $# -lt 1 ] ; then
kill -SIGUSR1 $$
fi
# much more code
echo "foo() done"
}
foo "$@"
# much more code
echo "ok"
| true
|
2e9b2461b7c4f2b05a6c7b621c5e69fb84d4ee0b
|
Shell
|
olivierpierre/openflash
|
/cmake/check_params.sh
|
UTF-8
| 1,451
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/sh
if [ ! "$#" = "1" ]; then
echo "Usage : $0 <Path to Param.cpp>"
exit
fi
sed -rn "s/^.*parseInt\(\"(.*)\".*/\1/p" $1 > parsed.tmp123456
sed -rn "s/^.*parseDouble\(\"(.*)\".*/\1/p" $1 >> parsed.tmp123456
sed -rn "s/^.*parseString\(\"(.*)\".*/\1/p" $1 >> parsed.tmp123456
sed -rn "s/^.*parseBool\(\"(.*)\".*/\1/p" $1 >> parsed.tmp123456
# These are not parsed using parseXXX, lets add them by hand :
echo "ppc_models.performance_model.TON" >> parsed.tmp123456
echo "ppc_models.performance_model.TIN" >> parsed.tmp123456
echo "ppc_models.performance_model.IO" >> parsed.tmp123456
echo "ppc_models.performance_model.BERS" >> parsed.tmp123456
echo "ppc_models.performance_model.PTON" >> parsed.tmp123456
echo "ppc_models.performance_model.PTIN" >> parsed.tmp123456
echo "ppc_models.performance_model.PIO" >> parsed.tmp123456
echo "ppc_models.performance_model.PBERS" >> parsed.tmp123456
sed -rn 's/.*ADD_PARAM\("([a-zA-Z0-9\_\.]+)", .*/\1/p' $1 > defined.tmp123456
while read line; do
match=`grep $line defined.tmp123456`
if [ "$match" = "" ]; then
echo "Please define parameter \"$line\" in Param.cpp, in the static function \"getParamList()\", using that format :"
echo "ADD_PARAM(\"$line\", \"<description>\", <TYPE>, <Optional ?>);"
echo "(See the function for more info)"
rm parsed.tmp123456
rm defined.tmp123456
exit
fi
done < parsed.tmp123456
echo "ALL OK"
rm parsed.tmp123456
rm defined.tmp123456
| true
|
9575907521191c28f0ef9538e39864fe2c5951ad
|
Shell
|
fish2000/Praxa
|
/Praximes/Library/venv-functions.sh
|
UTF-8
| 1,937
| 2.921875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
cd $VIRTUAL_ENV
echo "+ Setting up development environment bash functions ..."
function vcd () {
HOME=$VIRTUAL_ENV \
CDPATH="${VIRTUAL_ENV}:${VIRTUAL_ENV}/local:${VIRTUAL_ENV}/var:/tmp:${CDPATH}" \
eval "cd $@"
}
function vbp () {
SIGNALQUEUE_RUNMODE=SQ_SYNC \
${INSTANCE_BIN}/bpython \
--interactive \
$INSTANCE_BPYTHON_SETTINGS
}
function vip () {
SIGNALQUEUE_RUNMODE=SQ_SYNC \
${INSTANCE_BIN}/ipython $@ \
--ipython-dir=${IPYTHONDIR} \
--profile-dir=${INSTANCE_IPYTHON_PROFILE} \
--deep-reload \
--no-confirm-exit \
--autoindent \
--color-info \
--nosep \
--pprint -i \
$INSTANCE_BPYTHON_SETTINGS
}
function vnb () {
SIGNALQUEUE_RUNMODE=SQ_SYNC \
${INSTANCE_BIN}/ipython notebook \
--ipython-dir=${IPYTHONDIR} \
--profile-dir=${INSTANCE_IPYTHON_PROFILE} \
--notebook-dir=${INSTANCE_NOTEBOOKS} \
--logfile=${INSTANCE_LOGS}/ipy-notebook.log \
--gui=osx \
--pylab=osx \
--deep-reload \
--autoindent \
--color-info \
--nosep \
--pprint
}
function vj () {
DJANGO_SETTINGS_MODULE=$DJANGO_SETTINGS_MODULE \
eval "${DJANGO_ADMIN} $@"
}
function vrun () {
DJANGO_SETTINGS_MODULE=$DJANGO_SETTINGS_MODULE \
eval "${SUPERVISORD} $@"
}
function vsuper () {
DJANGO_SETTINGS_MODULE=$DJANGO_SETTINGS_MODULE \
eval "${SUPERVISORCTL} $@"
}
function vmate () {
DJANGO_SETTINGS_MODULE=$DJANGO_SETTINGS_MODULE \
eval "mate ${VIRTUAL_ENV} $@"
}
function vforeman () {
DJANGO_SETTINGS_MODULE=$DJANGO_SETTINGS_MODULE \
eval "${HONCHO} start $@"
}
# Bash prompt
PS1="\[\033[01;33m\]($(basename ${VIRTUAL_ENV}))\[\033[00m\] ${_OLD_VIRTUAL_PS1}"
| true
|
aac502fe98a4f15486ceabe53ff488634192a022
|
Shell
|
drzhnin/docker-ikev2-vpn-server
|
/run.sh
|
UTF-8
| 3,979
| 2.65625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
VPNIPPOOL="10.15.1.0/24"
LEFT_ID=${VPNHOST}
sysctl net.ipv4.ip_forward=1
sysctl net.ipv6.conf.all.forwarding=1
sysctl net.ipv6.conf.eth0.proxy_ndp=1
if [ ! -z "$DNS_SERVERS" ] ; then
DNS=$DNS_SERVERS
else
DNS="1.1.1.1,8.8.8.8"
fi
if [ ! -z "$SPEED_LIMIT" ] ; then
tc qdisc add dev eth0 handle 1: ingress
tc filter add dev eth0 parent 1: protocol ip prio 1 u32 match ip src 0.0.0.0/0 police rate ${SPEED_LIMIT}mbit burst 10k drop flowid :1
tc qdisc add dev eth0 root tbf rate ${SPEED_LIMIT}mbit latency 25ms burst 10k
fi
iptables -t nat -A POSTROUTING -s ${VPNIPPOOL} -o eth0 -m policy --dir out --pol ipsec -j ACCEPT
iptables -t nat -A POSTROUTING -s ${VPNIPPOOL} -o eth0 -j MASQUERADE
iptables -L
if [[ ! -f "/usr/local/etc/ipsec.d/certs/fullchain.pem" && ! -f "/usr/local/etc/ipsec.d/private/privkey.pem" ]] ; then
certbot certonly --standalone --preferred-challenges http --agree-tos --no-eff-email --email ${LEEMAIL} -d ${VPNHOST}
ln -f -s /etc/letsencrypt/live/${VPNHOST}/cert.pem /usr/local/etc/ipsec.d/certs/cert.pem
ln -f -s /etc/letsencrypt/live/${VPNHOST}/privkey.pem /usr/local/etc/ipsec.d/private/privkey.pem
ln -f -s /etc/letsencrypt/live/${VPNHOST}/chain.pem /usr/local/etc/ipsec.d/cacerts/chain.pem
fi
echo 'rsa-key-size = 4096
renew-hook = ipsec reload && ipsec secrets
' > /etc/letsencrypt/cli.ini
rm -f /var/run/starter.charon.pid
if [ -f "/usr/local/etc/ipsec.conf" ]; then
rm /usr/local/etc/ipsec.conf
cat >> /usr/local/etc/ipsec.conf <<EOF
config setup
charondebug="ike 1, knl 1, cfg 1"
uniqueids=never
conn ikev2-vpn
auto=add
compress=no
type=tunnel
keyexchange=ikev2
ike=aes128-sha1-modp1024,aes128-sha1-modp1536,aes128-sha1-modp2048,aes128-sha256-ecp256,aes128-sha256-modp1024,aes128-sha256-modp1536,aes128-sha256-modp2048,aes256-aes128-sha256-sha1-modp2048-modp4096-modp1024,aes256-sha1-modp1024,aes256-sha256-modp1024,aes256-sha256-modp1536,aes256-sha256-modp2048,aes256-sha256-modp4096,aes256-sha384-ecp384,aes256-sha384-modp1024,aes256-sha384-modp1536,aes256-sha384-modp2048,aes256-sha384-modp4096,aes256gcm16-aes256gcm12-aes128gcm16-aes128gcm12-sha256-sha1-modp2048-modp4096-modp1024,3des-sha1-modp1024!
esp=aes128-aes256-sha1-sha256-modp2048-modp4096-modp1024,aes128-sha1,aes128-sha1-modp1024,aes128-sha1-modp1536,aes128-sha1-modp2048,aes128-sha256,aes128-sha256-ecp256,aes128-sha256-modp1024,aes128-sha256-modp1536,aes128-sha256-modp2048,aes128gcm12-aes128gcm16-aes256gcm12-aes256gcm16-modp2048-modp4096-modp1024,aes128gcm16,aes128gcm16-ecp256,aes256-sha1,aes256-sha256,aes256-sha256-modp1024,aes256-sha256-modp1536,aes256-sha256-modp2048,aes256-sha256-modp4096,aes256-sha384,aes256-sha384-ecp384,aes256-sha384-modp1024,aes256-sha384-modp1536,aes256-sha384-modp2048,aes256-sha384-modp4096,aes256gcm16,aes256gcm16-ecp384,3des-sha1!
fragmentation=yes
forceencaps=yes
dpdaction=clear
dpddelay=900s
rekey=no
left=%any
leftid=@$LEFT_ID
leftcert=cert.pem
leftsendcert=always
leftsubnet=0.0.0.0/0
right=%any
rightid=%any
rightauth=eap-mschapv2
rightsourceip=10.15.1.0/24
rightdns=$DNS
rightsendcert=never
eap_identity=%identity
EOF
fi
if [ ! -f "/usr/local/etc/ipsec.secrets" ]; then
cat > /usr/local/etc/ipsec.secrets <<EOF
: RSA privkey.pem
EOF
fi
if [[ ! -z "$RADIUS_SERVER" && ! -z "$RADIUS_SERVER_SECRET" ]]; then
rm /usr/local/etc/strongswan.d/charon/eap-radius.conf
cat >> /usr/local/etc/strongswan.d/charon/eap-radius.conf <<EOF
eap-radius {
accounting = yes
accounting_close_on_timeout = no
accounting_interval = 500
close_all_on_timeout = no
load = yes
nas_identifier = $VPNHOST
# Section to specify multiple RADIUS servers.
servers {
primary {
address = $RADIUS_SERVER
secret = $RADIUS_SERVER_SECRET
auth_port = 1812 # default
acct_port = 1813 # default
}
}
}
EOF
fi
sysctl -p
ipsec start --nofork
| true
|
1238ede570a95f7fa29a0667a4cb50199f44a12b
|
Shell
|
timoast/ngs-tools
|
/download_from_sra.sh
|
UTF-8
| 3,559
| 4.15625
| 4
|
[] |
no_license
|
#! /bin/bash
# downloads data for all accessions in an input file
# sra files are moved to their own folders
# then converted to fastq and gzipped
# input can be a list of SRA runs (SRR) or SRA projects (SRP)
infile= cores=10 fastq=false outdir="."
usage="$(basename "$0") -- download data from NCBI SRA or ENA
-h show help and exit
-f [file] input file with a list of SRA accession names (runs or projects)
-p [cores] number of cores to use for fastq compression (default 10)
-q download fastq files from ENA (default NCBI SRA)
-o output directory path (default current directory)"
if [ $# -eq 0 ]; then
printf "$usage\n"
exit 1
fi
while getopts ":f:p:o:qh" opt; do
case $opt in
h) printf "$usage\n"
exit
;;
f) infile=$OPTARG
;;
p) cores=$OPTARG
;;
o) outdir=$OPTARG
;;
q) fastq=true
;;
:) printf "missing argument for -%s\n" "$OPTARG" >&2
printf "$usage\n" >&2
exit 1
;;
\?) printf "illegal option: -%s\n" "$OPTARG" >&2
printf "$usage\n" >&2
exit 1
;;
esac
done
shift $((OPTIND - 1))
if [ ! $infile ]; then
printf "missing argument for -f\n" >&2
printf "$usage\n" >&2
exit 1
fi
# first check if sratoolkit is installed
# only needed if fastq is false (downloading from SRA)
if [ ! $(command -v fastq-dump) ] && [ $fastq == false ]; then
printf "sratoolkit not found\n" >&2
exit 1
fi
# check if pigz exists
if [ ! $(command -v pigz) ]; then
printf "pigz not found\n" >&2
fi
if [ $fastq == false ]; then
mkdir $outdir
while read acc; do
[[ -z $acc ]] && continue
printf "Downloading ${acc} from NCBI\n"
prefix=${acc:0:6}
sra=${acc:0:3}
if [ $sra == "SRR" ]; then
wget -r --no-parent --no-directories \
"ftp://ftp-trace.ncbi.nih.gov/sra/sra-instant/reads/ByRun/sra/SRR/${prefix}/${acc}/*"
else
wget -r --no-parent --no-directories \
"ftp://ftp-trace.ncbi.nih.gov/sra/sra-instant/reads/ByStudy/sra/SRP/${prefix}/${acc}/*"
fi
done < $infile
for myfile in ./*.sra; do
fname=(${myfile//.sra/ })
mkdir $fname
mv $myfile $fname
cd $fname
printf "Converting to fastq\n"
fastq-dump --split-files -v $myfile
if [ -f ${fname}.fastq ] || [ -f ${fname}_1.fastq ]; then
rm -f $myfile
printf "Compressing files\n"
nice pigz -p $cores *.fastq
else
printf "conversion of ${fname} to fastq failed\n" >&2
fi
cd ..
mv $fname $outdir
done
else
# Downloading from the ENA
while read acc; do
mkdir $outdir
mkdir "${outdir}/${acc}"
cd "${outdir}/${acc}"
dl=false
[[ -z $acc ]] && continue
printf "Downloading ${acc} from EBI\n"
prefix=${acc:0:6}
sra=${acc:0:3}
# first check if normal directory structure is present
if [[ $(curl -s ftp://ftp.sra.ebi.ac.uk//vol1/fastq/${prefix}/${acc}/ | grep ${acc}) ]]; then
wget -r --no-parent --no-directories \
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/${prefix}/${acc}/*"
dl=true
else
# couldn't find it, look through all the subdirectories
for c in $(seq -w 000 100); do
if [[ $(curl -s ftp://ftp.sra.ebi.ac.uk//vol1/fastq/${prefix}/${c}/ | grep ${acc}) ]]; then
wget -r --no-parent --no-directories \
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/${prefix}/${c}/${acc}/*"
dl=true
fi
done
if [ $dl == false ]; then
printf "Download failed for ${acc}\n" >&2
fi
fi
cd -
done < $infile
fi
| true
|
11645b7b0502972fb472b33dbc3e9eb255f4abd1
|
Shell
|
kogn/scft
|
/script/schedule.sh
|
UTF-8
| 282
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
pro_name=$1
task_num=$2
cycle_num=$3
i=1
while (( $i <= $cycle_num ))
do
pro_num=$(ps -A | grep $pro_name |wc -l)
if (( $pro_num < <ask_num )); then
echo $i
./main 2 2
sleep 1s
i=$(($i+1))
else
echo 'sleeping 600s'
sleep 600s
fi
done
| true
|
540208d86c2872881ed19941d6fa2b6c5d33bd81
|
Shell
|
timoguin/shell-functions
|
/source-functions.sh
|
UTF-8
| 596
| 4.4375
| 4
|
[
"Unlicense"
] |
permissive
|
#!/bin/bash
# =============================================================================
# Sources a list of functions passed into the script. Functions must have a
# matching directory with *.sh files.
# =============================================================================
functions="$@"
echo "Sourcing functions: $functions"
for func in $functions; do
if [ ! -d $func ]; then
echo "Function not found: $func" 1>&2
else
for script in $func/*.sh; do
. "$script" || echo "Sourcing script failed: $script" 1>&2
done
echo "Sourced function: $func"
fi
done
| true
|
7a50aa736939fc0f6ac76df05a541aaa2d773993
|
Shell
|
wnielson/cement
|
/scripts/mkrelease.sh
|
UTF-8
| 719
| 3.546875
| 4
|
[
"MIT",
"Python-2.0"
] |
permissive
|
#!/bin/bash
status=$(git status --porcelain)
version=$(python -c "from pkg_resources import get_distribution ; print get_distribution('cement').version")
short=$(echo $version | awk -F . {' print $1"."$2 '})
dir=~/cement-${version}
if [ "${status}" != "" ]; then
echo
echo "WARNING: not all changes committed"
fi
mkdir ${dir}
mkdir ${dir}/doc
mkdir ${dir}/downloads
mkdir ${dir}/pypi
# all
git archive HEAD --prefix=cement-${version}/ | gzip > ${dir}/downloads/cement-${version}.tar.gz
# individual
for i in cement cement.devtools cement.test; do
pushd src/$i
git archive HEAD --prefix=${i}-${version}/ | gzip > ${dir}/pypi/${i}-${version}.tar.gz
popd
done
sphinx-build doc/source ${dir}/doc
| true
|
de45388fee5d4fa0d28f9717b3dfe1f4073453c1
|
Shell
|
badgeek/DocsifyWebhook
|
/publish.sh
|
UTF-8
| 829
| 3.875
| 4
|
[] |
no_license
|
pushd `dirname $0` > /dev/null
SCRIPTPATH=`pwd`
popd > /dev/null
cd $SCRIPTPATH
. "./config.sh"
export GIT_SOURCE_FOLDER="source_git"
export GIT_DEPLOY_DIR="dist"
function generate_package_template() #platforms
{
local _platforms=${1}
local _template=${2}
eval "cat <<EOF
$(<./${_template})
EOF
" 2> /dev/null
}
if [ -d "source_git/.git" ] #if directory exists
then
echo "DIRECTORY EXISTS"
cd $GIT_SOURCE_FOLDER
git pull
cd ..
else
git clone $GIT_SOURCE_REPO $GIT_SOURCE_FOLDER
fi
if [ "${1}" == "gitbook" ]; then
OUTPUT_TYPE="gitbook"
elif [ "${1}" == "docsify" ]; then
OUTPUT_TYPE="docsify"
fi
if [ "${OUTPUT_TYPE}" == "gitbook" ]; then
echo "PUBLISH GITBOOK"
source "./publish_gitbook.sh"
else
echo "PUBLISH DOCSIFY"
source "./publish_docsify.sh"
fi
./deploy.sh
| true
|
a10c14538b5a374afda61e49bf91059fe7cdc062
|
Shell
|
tetherless-world/whyis
|
/upgrade_py3.sh
|
UTF-8
| 917
| 2.609375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
echo "Archiving Python 2 virtualenv to /apps/py2venv.tgz..."
pushd /apps/whyis
sudo tar cfz /apps/py2venv.tgz venv
sudo rm -rf venv
popd
echo "Done."
sudo service stop jetty8
sudo /opt/puppetlabs/bin/puppet module uninstall stankevich-python
sudo /opt/puppetlabs/bin/puppet module install puppet-python
sudo /opt/puppetlabs/bin/puppet module install puppetlabs-vcsrepo
sudo /opt/puppetlabs/bin/puppet module install maestrodev-wget
sudo /opt/puppetlabs/bin/puppet module install puppetlabs-apt
sudo /opt/puppetlabs/bin/puppet module install richardc-datacat
sudo /opt/puppetlabs/bin/puppet module install puppetlabs-java
sudo /opt/puppetlabs/bin/puppet module install puppet-nodejs --version 7.0.1
sudo /opt/puppetlabs/bin/puppet apply puppet/manifests/install.pp
echo "To complete the upgrade, re-install your whyis app using 'pip install -e' into /apps/whyis/venv and restart apache2 and celeryd."
| true
|
585ca1d96c53f1ff1d03f3720ca9bdb69de83b3f
|
Shell
|
neam/dokku-host-provisioning
|
/shell-scripts-to-include/docker-enter.sh
|
UTF-8
| 358
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
# Uses nsenter to step into a running container
# usage:
# ./docker-attach.sh CONTAINER_ID
# for docker 0.9+
export PID=`docker inspect $1 | grep '"Pid":' | sed 's/[^0-9]//g'`
set -x
nsenter --target $PID --mount --uts --ipc --net --pid
# for docker <0.9
# lxc-attach -n `sudo docker inspect $1 | grep '"ID"' | sed 's/[^0-9a-z]//g'` /bin/bash
| true
|
2221d78239731336aa07ddb6324e85b98cde44b4
|
Shell
|
kaihendry/pi.dabase.com
|
/newinfo.sh
|
UTF-8
| 635
| 3.03125
| 3
|
[] |
no_license
|
cat <<END
<table>
<thead>
<tr>
<th>Name</th>
<th>Port</th>
<th>Mac Address</th>
<th>Local IP</th>
<th>Origin IP</th>
<th>Origin port</th>
<th>Host</th>
<th>Host port</th>
<th>Notes</th>
</tr>
</thead>
<tbody>
END
for i in */index.txt
do
echo "<tr>"
echo "<td>$(dirname $i)</td>"
set -- $(tail -n1 $i)
if test $1 == 0
then
echo "<td></td><td></td><td></td><td></td><td></td><td></td><td></td><td>offline since $(date --date="@$2")</td>"
else
echo "<td>$3</td><td>$4</td><td>$5</td><td>$6</td><td>$7</td><td>$8</td><td>$9</td><td>online since $(date --date="@$2")</td>"
fi
echo "</tr>"
done
cat <<END
</tbody>
</table>
END
| true
|
fb85b7a113db6c5a0b131fa5f41adef864d91c2a
|
Shell
|
BGforgeNet/BGforge-MLS-IElib
|
/docs/generate.sh
|
UTF-8
| 253
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/bash
set -xeu -o pipefail
site_dir="docs"
pages_dir="_pages"
data_dir="data"
cd "$site_dir"
mkdir -p "$pages_dir"
pip3 install ruamel.yaml
./yaml_to_md.py "$data_dir" "$pages_dir"
bundle install
JEKYLL_ENV=production bundle exec jekyll build
| true
|
706e26795a015510b5aba779b40997a4855ea0d7
|
Shell
|
samsalisbury/sous-demo
|
/set-version.sh
|
UTF-8
| 197
| 3.328125
| 3
|
[] |
no_license
|
#! /usr/bin/env bash
set -ex
if [ $# -lt 1 ]; then
echo "Usage: $0 <version>"
exit 1
fi
echo "$1" > customstring
git commit -am "Updating version to $1"
git tag "$1"
git push
git push --tags
| true
|
879ba1dfc0e20b62d6e4ede00525c0aca9e4c544
|
Shell
|
Royaladvisor26/automatic-romancescam-digger
|
/captions/im2txt/generate_caption.sh
|
UTF-8
| 1,171
| 2.875
| 3
|
[] |
no_license
|
cd im2txt-workspace
# Path to checkpoint file or a directory containing checkpoint files. Passing
# a directory will only work if there is also a file named 'checkpoint' which
# lists the available checkpoints in the directory. It will not work if you
# point to a directory with just a copy of a model checkpoint: in that case,
# you will need to pass the checkpoint path explicitly.
CHECKPOINT_PATH="${HOME}/im2txt/model/train/model-renamed.ckpt-2000000"
# Vocabulary file generated by the preprocessing script.
VOCAB_FILE="${HOME}/im2txt/model/word_counts2.txt"
# JPEG image file to caption.
#IMAGE_FILE="${HOME}/mscoco/raw-data/val2014/COCO_val2014_000000224477.jpg"
IMAGE_FILE="${HOME}/scam-images-by-type/scam/01287c03678985c73c99033ae64b359f.jpg"
# Build the inference binary.
bazel build -c opt im2txt/run_inference
# Ignore GPU devices (only necessary if your GPU is currently memory
# constrained, for example, by running the training script).
export CUDA_VISIBLE_DEVICES=""
# Run inference to generate captions.
bazel-bin/im2txt/run_inference \
--checkpoint_path=${CHECKPOINT_PATH} \
--vocab_file=${VOCAB_FILE} \
--input_files=${IMAGE_FILE}
cd ..
| true
|
3db7a9135e43a56e63939244f86bd5903cf6c307
|
Shell
|
dozent2018/IFA_LINUX_DEV
|
/locktest 2.sh
|
UTF-8
| 1,543
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
# lockfile.sh prüft, ob die Datei /tmp/locktest/lock existiert. Wenn nicht, legt es sie an.
# Wenn Sie existiert, prüft es, ob es in die Datei schreiben darf
# Wenn Schreibrecht vorhanden ist:
# Berechtigungen für (-r--r--r--) setzen
# Nachricht mit PID, Datum und Uhrzeit in /tmp/locktest/common schreiben
# 4 Sekunden warten
# Anschliessend Berechtigungen für /tmp/locktest/lock auf (-rw-rw-rw-) setzen
# Wenn kein Schreibrecht vorhanden ist:
# PID aus /tmp/locktest/lock lesen,
# Meldung "Datei gesperrt durch <PID>" auf stderr ausgeben
# 5 Sekunden warten
# Erneuter Versuch Schreibrecht testen ....
# Hinweis: Das ganze in einer Schleife ausführen
# Zum Testen locktest.sh in 1-2 weiteren Terminals starten
# Dateien /tmp/locktest/lock und /tmp/locktest/common mit tail -f verfolgen
trap 'rm -f /tmp/locktest/lock' SIGINT
if ! [ -e /tmp/locktest/lock ]; then
mkdir /tmp/locktest
touch /tmp/locktest/lock
chmod ugo+w /tmp/locktest/lock
fi
while true; do
if [ -w /tmp/locktest/lock ];then
echo $$ > /tmp/locktest/lock
chmod ugo-w /tmp/locktest/lock
echo $(date) $$ >> /tmp/locktest/common
sleep 2
chmod ugo+w /tmp/locktest/lock
else
lock_pid=$(cat /tmp/locktest/lock)
echo "Datei gesperrt durch Prozess $lock_pid"
sleep 5
fi
done
| true
|
eff65788f8e4032fdd55021ee4795216e8035b72
|
Shell
|
ookamiinc/Sporticon
|
/.circleci/deploy-website.sh
|
UTF-8
| 434
| 2.671875
| 3
|
[
"Apache-2.0"
] |
permissive
|
git config user.name "$USER_NAME"
git config user.email "$USER_EMAIL"
git checkout gh-pages
git pull origin gh-pages
find . -maxdepth 1 ! -name 'website' ! -name 'CNAME' ! -name '.git' ! -name '.gitignore' ! -name '.circleci' -exec rm -rf {} \;
mv website/_site/* .
rm -R website
git add -fA
git commit --allow-empty -m "Site build from $(git log master -1 --pretty=%h)"
git push origin gh-pages
echo "Deployed site successfully"
| true
|
6130ce48c134982ec71d78a7d49e826ee8b6a658
|
Shell
|
dmnzs10/mk
|
/mk-updater.sh-2019010201
|
UTF-8
| 3,116
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
USERS="admin avato noc"
PASSW="senha1 lib6shahco senha2"
PORTS="22 222"
DBTI_HOST=""
DBTI_USER=""
DBTI_PASS=""
DBTI_BASE="avisos_mikrotik"
PORT="222"
DBDM_HOST=""
DBDM_USER="bash_script"
DBDM_PASS=""
DBDM_BASE="hosts_discovery"
COMMAND_LINE=$1
# Conecta no banco DM para obter o comando a ser executado
DBDM_CMD=$(echo "SELECT command_line FROM commands WHERE package='${COMMAND_LINE}'" | mysql -N -h $DBDM_HOST -D $DBDM_BASE -u $DBDM_USER -p${DBDM_PASS})
# Conecta no banco da TI para obter a lista dos concentradores
DBTI_CE=$(echo "SELECT server_ip FROM authentication_concentrators ORDER BY id ASC LIMIT 1" | mysql -N -h $DBTI_HOST -D $DBTI_BASE -u $DBTI_USER -p${DBTI_PASS})
#DBTI_CE=$(echo "SELECT server_ip FROM authentication_concentrators ORDER BY id ASC" | mysql -N -h $DBTI_HOST -D $DBTI_BASE -u $DBTI_USER -p${DBTI_PASS})
# Inicio
for HOST in $DBTI_CE; do
SSH=0
CRED=""
PING=""
USER=$(echo "SELECT server_user FROM authentication_concentrators WHERE server_ip='${HOST}'" | mysql -N -h $DBTI_HOST -D $DBTI_BASE -u $DBTI_USER -p${DBTI_PASS})
PASS=$(echo "SELECT server_password FROM authentication_concentrators WHERE server_ip='${HOST}'" | mysql -N -h $DBTI_HOST -D $DBTI_BASE -u $DBTI_USER -p${DBTI_PASS})
# grava host no banco DM
echo "INSERT INTO host (ipv4,ipv4_var,username,passwd,port) VALUES (INET_ATON('${HOST}'),'${HOST}','${USER}','${PASS}','${PORT}')" | mysql -N -h $DBDM_HOST -D $DBDM_BASE -u $DBDM_USER -p${DBDM_PASS} >/dev/null 2>&1
# teste de conectividade icmp
ping -q -i 0.2 -c 5 ${HOST} >/dev/null 2>&1
if [ "$?" = 0 ]; then
PING="UP"
# teste de conexao
if [ $SSH -eq 0 ]; then
sshpass -p ${PASS} ssh -p ${PORT} -o StrictHostKeyChecking=no ${USER}@${HOST} "/" >/dev/null 2>&1
#echo "$?"
# retorno do teste de conexao
if [ $? -eq 0 ]; then
SSH="1"
CRED="OK"
echo "Passou com ${USER}@${HOST}:${PORT} - $PASS"
# grava rodada no banco DM
echo "INSERT INTO cmd_executions (commands_idcommands,host_ipv4,host_ipv4_var,date) VALUES ('1',INET_ATON('${HOST}'),'${HOST}',NOW()) " | mysql -N -h $DBDM_HOST -D $DBDM_BASE -u $DBDM_USER -p${DBDM_PASS} >/dev/null 2>&1
# executar scritp: texto entre os EOF, lembrar dos tabs
#sshpass -p ${PASS} ssh -p ${PORT} -T -o StrictHostKeyChecking=no ${USER}@${HOST} <<-EOF
# EOF
# executa script: comandos na variavel
sshpass -p ${PASS} ssh -p ${PORT} -T -o StrictHostKeyChecking=no ${USER}@${HOST} "${DBDM_CMD}"
#echo ""
else
echo " >>> Nao consegui conectar em ${USER}@${HOST}:${PORT} - $PASS - SSH code: $SSH"
CRED="FAIL"
fi # retorno do teste de conexao
# UPDATE das credenciais
echo "UPDATE host SET cred_status='${CRED}',cred_date=NOW() WHERE ipv4=(INET_ATON('${HOST}'))" | mysql -N -h $DBDM_HOST -D $DBDM_BASE -u $DBDM_USER -p${DBDM_PASS} >/dev/null 2>&1
fi # teste de conexao
else # teste de conectividade icmp
echo "Host is down!"
fi # fim do teste de conectividade icmp
done # fim do for dos hosts
| true
|
70f34df17aa100be70c7e6b8335366785bbdd9df
|
Shell
|
valebedu/vps-init
|
/vps-install.sh
|
UTF-8
| 863
| 3.921875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# ---------------------
# ----- FUNCTIONS -----
# ---------------------
function log() {
prefix=""
suffix=""
line=""
for (( i = 0; i < ${#1}; i++ )); do
line+="-"
done
if [[ $2 == "1" ]]; then
prefix="\e[1m\e[31"
suffix="\e[0m"
fi
echo -e ""
echo -e "$prefix$line$suffix"
echo -e "$prefix$1$suffix"
echo -e "$prefix$line$suffix"
echo -e ""
}
# ----------------
# ----- MAIN -----
# ----------------
# Get configuration constants
log "Get install configuration"
source $(pwd)/vps-install.conf
# Copy setup files to remote
log "Copy files to: root@$REMOTE_ADDRESS:/vps"
scp -r $(pwd)/vps root@$REMOTE_ADDRESS:/vps
# Exec entrypoint on remote
log "Exec through SSH: root@$REMOTE_ADDRESS/vps/vps-entrypoint.sh"
ssh root@$REMOTE_ADDRESS "exec /vps/vps-entrypoint.sh"
| true
|
04c035b3a2d76f1d56bef86358e650ac7b24ad04
|
Shell
|
rainerilgen/CHIP-Files
|
/scripts/chip_ha_venv_install.sh
|
UTF-8
| 666
| 2.765625
| 3
|
[
"MIT"
] |
permissive
|
echo "Installing Home Assistant and Mosquitto"
apt-get -y install python-pip python3-dev mosquitto
pip install --upgrade virtualenv
mkdir /srv/hass
chown chip /srv/hass
su -s /bin/bash chip
#as user chip
virtualenv -p python3 /srv/hass
source /srv/hass/bin/activate
pip3 install --upgrade homeassistant
exit
#as root
su -c 'cat <<EOF >> /etc/systemd/system/home-assistant@chip.service
[Unit]
Description=Home Assistant
After=network.target
[Service]
Type=simple
User=chip
ExecStart=/srv/hass/bin/hass
Restart=always
[Install]
WantedBy=multi-user.target
EOF'
systemctl --system daemon-reload
systemctl enable home-assistant@chip
systemctl start home-assistant@chip
| true
|
45cc9a23cbc2cf554191899b0be237c5009f9061
|
Shell
|
BlackPepperSoftware/dockerfiles
|
/build.sh
|
UTF-8
| 706
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
build_container() {
local NAME="blackpepper/$1"
local DIR="$2"
local VERSION="$3"
echo "Building $NAME..."
if [ -x "$DIR/build.sh" ]; then
cd "$DIR"
# Expected to create $NAME:latest
./build.sh $NAME
else
docker build -t $NAME $DIR
fi
docker tag $NAME:latest registry.blackpepper.co.uk/$NAME:latest
if [ -n "$VERSION" ]; then
docker tag -f $NAME:latest $NAME:$VERSION
docker tag -f $NAME:latest registry.blackpepper.co.uk/$NAME:$VERSION
fi
}
build_container java oracle-java8 oracle-java8
build_container idea idea 14.1.4
build_container eclipse eclipse mars-r1
build_container maven maven 3.3-jdk-8
build_container sshtun sshtun
| true
|
9f503355e02bc5c7989167ac109fcfbbd84a40c8
|
Shell
|
abyjacobajk/Snake-Ladder
|
/CheckPlaying_UC3.sh
|
UTF-8
| 608
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/bash -x
echo "Welcome To Snake And Ladder"
#constants
NO_PLAY=0
LADDER=1
SNAKE=2
#variables
playerPosition=0
#Function to set playerPosition according to playing Options like NO_Play or Snake or Ladder
function setPlayerMoves()
{
dieValue=$(( RANDOM % 6 + 1 ))
playingOptions=$(( RANDOM % 3 ))
#Move player Position according to playingOptions
case $playingOptions in
$NO_PLAY)
playerPosition=$playerPosition
;;
$LADDER)
playerPosition=$(( playerPosition + dieValue ))
;;
$SNAKE)
playerPosition=$(( playerPosition - dieValue ))
;;
esac
}
#Start game
setPlayerMoves
| true
|
827b47ef0ac297b37f3632490d1deadb1d6caf95
|
Shell
|
g-koutsou/CDS-1
|
/Optim/Ex/Lapl-3D/sub-A.job
|
UTF-8
| 355
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash
#SBATCH -J lapl-A
#SBATCH -N 1
#SBATCH -o A-%j.out
#SBATCH -e A-%j.err
module load openmpi
L=128
omps=(4 8 16 32 64 128)
for omp in ${omps[*]}; do
export OMP_NUM_THREADS=$omp
srun numactl --membind 1 ./lapl $L > orig-nomp${omp}.txt
done
for omp in ${omps[*]}; do
echo $omp $(grep 'lapl()' orig-nomp${omp}.txt)
done > orig.txt
| true
|
c033d98cd76d175b094d56346852905518813b5c
|
Shell
|
japorized/bin
|
/rofi-netctl
|
UTF-8
| 3,506
| 3.984375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# This is just a very simple and naive interface to netctl
# It works for what I need for now
# It is desirable to be able to manage netctl-auto via systemd w/o rp
# See https://wiki.archlinux.org/index.php/Polkit#Allow_management_of_individual_systemd_units_by_regular_users
# This scripts requires the above to be implemented
# config
rofioption="-lines 10 -yoffset -20"
terminal="/usr/bin/termite"
interface="wlp5s0"
_rofi() {
rofi ${rofioption} -dmenu -i -p "rofi-netctl" "$@"
}
switch() {
IFS=$'\n'
networks=($(netctl-auto list | tr -d "\*" | tr -d "\ "))
unset IFS
list=""
for network in "${networks[@]}"; do
if [ -z "$list" ]; then
list=${network}
else
list="${list}|${network}"
fi
done
choice=$(echo $list | _rofi -sep "|" -mesg "Switch network to:")
if [ -n "$choice" ]; then
resp=$(netctl-auto switch-to "${choice}")
if [ -n "$resp" ]; then
echo $resp | rofi -e
fi
fi
}
profilemgr() {
IFS=$'\n'
networks=($(netctl-auto list | tr -d "\*" | tr -d "\ "))
unset IFS
list=""
for network in "${networks[@]}"; do
if [ -z "$list" ]; then
list=${network}
else
list="${list}|${network}"
fi
done
choice=$(echo $list | _rofi -sep "|" -mesg "${1} profile")
if [ -n "$choice" ]; then
resp=$(netctl-auto ${1} "${choice}")
fi
}
editprofile() {
IFS=$'\n'
networks=($(netctl list | tr -d "\*" | tr -d "\ "))
unset IFS
list=""
for network in "${networks[@]}"; do
if [ -z "$list" ]; then
list=${network}
else
list="${list}|${network}"
fi
done
choice=$(echo $list | _rofi -sep "|" -mesg "edit profile")
if [ -n "$choice" ]; then
${terminal} -e "sudo ${EDITOR} /etc/netctl/${choice}"
fi
}
addnetwork() {
IFS=$'\n'
samples=($(ls /etc/netctl/examples | sed 's/:.*//'))
unset IFS
list="use wifi-menu"
for network in "${samples[@]}"; do
list="${list}|${network}"
done
choice=$(echo $list | _rofi -sep "|" -mesg "add profile")
if [ -n "$choice" ]; then
if [ "$choice" = "use wifi-menu" ]; then
${terminal} -e "sudo wifi-menu"
else
profilename=$(_rofi -mesg "Give a profile name")
if [ -n "$profilename" ]; then
${terminal} -e "sudo cp /etc/netctl/examples/${choice} /etc/netctl/${profilename} && echo 'copied' && sleep 5 && sudo ${EDITOR} /etc/netctl/${profilename}"
else
_rofi -e "You must give a profile name"
fi
fi
fi
}
servicecontrol() {
servcontopt="restart service|start service|stop service"
servcont=$(echo $servcontopt | _rofi -sep "|")
case $servcont in
"restart service")
systemctl restart netctl-auto@${interface}.service && \
notify-send "netctl-auto" "connection service restarted"
;;
"start service")
systemctl start netctl-auto@${interface}.service && \
notify-send "netctl-auto" "connection service started"
;;
"stop service")
systemctl stop netctl-auto@${interface}.service && \
notify-send "netctl-auto" "connection service stopped"
;;
esac
}
main() {
mainmenu="switch network|enable profile|disable profile|edit profile|add network|service control"
choice=$(echo $mainmenu | _rofi -sep "|")
case $choice in
"switch network") switch ;;
"enable profile") profilemgr enable ;;
"disable profile") profilemgr disable ;;
"edit profile") editprofile ;;
"add network") addnetwork ;;
"service control") servicecontrol ;;
esac
}
main
| true
|
c92cba2ee00ac8d5443047ed7a1b92a6d793ddfe
|
Shell
|
badams/yii2-skeleton
|
/vagrant/provision.sh
|
UTF-8
| 5,909
| 2.96875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
HOSTNAME=$1
SERVER_TIMEZONE=$2
PHP_TIMEZONE=$3
MYSQL_ROOT_PASSWORD=$4
DB_NAME=$5
DB_USER=$6
DB_PASS=$7
#######################################
# Apt #
#######################################
echo 'deb mirror://mirrors.ubuntu.com/mirrors.txt trusty main restricted universe multiverse' > /tmp/sources
echo 'deb mirror://mirrors.ubuntu.com/mirrors.txt trusty-updates main restricted universe multiverse' >> /tmp/sources
echo 'deb mirror://mirrors.ubuntu.com/mirrors.txt trusty-backports main restricted universe multiverse' >> /tmp/sources
echo 'deb mirror://mirrors.ubuntu.com/mirrors.txt trusty-security main restricted universe multiverse' >> /tmp/sources
sudo cat /etc/apt/sources.list >> /tmp/sources
sudo cp /tmp/sources /etc/apt/sources.list
sudo rm /tmp/sources
sudo apt-get update --fix-missing
sudo apt-get install -y curl unzip git-core ack-grep software-properties-common build-essential
sudo apt-get install -y python-software-properties lynx libav-tools
#######################################
# Timezone/Locale #
#######################################
sudo ln -sf /usr/share/zoneinfo/${SERVER_TIMEZONE} /etc/localtime
sudo locale-gen C.UTF-8
export LANG=C.UTF-8
echo "export LANG=C.UTF-8" >> /home/vagrant/.bashrc
#######################################
# Apache #
#######################################
sudo apt-get install -y apache2 apache2-mpm-event
echo "ServerName $HOSTNAME" >> /etc/apache2/apache2.conf
echo "127.0.0.1 $HOSTNAME" >> /etc/hosts
# Setup virtualhost
VHOST=$(cat <<EOF
<VirtualHost *:80>
ServerName $HOSTNAME
DocumentRoot /vagrant/www/web
<Directory />
Options FollowSymLinks
AllowOverride None
</Directory>
<Directory /vagrant/www/web/>
Options Indexes FollowSymLinks MultiViews
Require all granted
AllowOverride All
</Directory>
ErrorLog /var/log/${HOSTNAME}_error.log
LogLevel warn
CustomLog /${HOSTNAME}_access.log combined
</VirtualHost>
EOF
)
echo "${VHOST}" > /etc/apache2/sites-enabled/000-default.conf
sudo a2enmod mpm_prefork rewrite headers ssl actions
#######################################
# PHP #
#######################################
sudo apt-get install -y php5-cli php5-mysql php5-curl php5-gd php5-mcrypt php5-memcached php5-intl php5-xdebug php5-apcu libapache2-mod-php5
# xdebug Config
cat > $(find /etc/php5 -name xdebug.ini) << EOF
zend_extension=$(find /usr/lib/php5 -name xdebug.so)
xdebug.remote_enable = 1
xdebug.remote_connect_back = 1
xdebug.idekey = "vagrant"
xdebug.remote_port = 9000
xdebug.scream=0
xdebug.cli_color=1
xdebug.show_local_vars=1
xdebug.overload_var_dump = 0
EOF
# APCU Config
cat > $(find /etc/php5 -name apcu.ini) << EOF
extension=$(find /usr/lib/php5 -name apcu.so)
apc.rfc1867=on
apc.rfc1867_freq=0
EOF
php5enmod xdebug apcu mcrypt
# alter php.ini settings
for INIFILE in "/etc/php5/apache2/php.ini" "/etc/php5/cli/php.ini"
do
sudo sed -i "s/error_reporting = .*/error_reporting = E_ALL \& \~E_DEPRECATED \& \~E_STRICT/" $INIFILE
sudo sed -i "s/display_errors = .*/display_errors = On/" $INIFILE
sudo sed -i "s/max_input_time = .*/max_input_time = -1/" $INIFILE
sudo sed -i "s/upload_max_filesize = .*/upload_max_filesize = 500M/" $INIFILE
sudo sed -i "s/post_max_size = .*/post_max_size = 500M/" $INIFILE
sudo sed -i "s/;date.timezone =.*/date.timezone = ${PHP_TIMEZONE/\//\\/}/" $INIFILE
done
sudo service apache2 restart
#######################################
# MySQL #
#######################################
sudo debconf-set-selections <<< "mysql-server mysql-server/root_password password ${MYSQL_ROOT_PASSWORD}"
sudo debconf-set-selections <<< "mysql-server mysql-server/root_password_again password ${MYSQL_ROOT_PASSWORD}"
sudo apt-get install -y mysql-server
sed -i "s/bind-address.*/bind-address = 0.0.0.0/" /etc/mysql/my.cnf
MYSQL=`which mysql`
Q1="GRANT ALL ON *.* TO 'root'@'%' IDENTIFIED BY '${MYSQL_ROOT_PASSWORD}' WITH GRANT OPTION;"
Q2="FLUSH PRIVILEGES;"
SQL="${Q1}${Q2}"
$MYSQL -uroot -p$MYSQL_ROOT_PASSWORD -e "$SQL"
service mysql restart
#######################################
# Mail Catcher #
#######################################
sudo apt-get install -y libsqlite3-dev ruby1.9.3
gem install --no-rdoc mailcatcher
echo "sendmail_path = /usr/bin/env catchmail -f mail@${HOSTNAME}" | sudo tee /etc/php5/mods-available/mailcatcher.ini
sudo tee /etc/init/mailcatcher.conf <<UPSTART
description "Mailcatcher"
start on runlevel [2345]
stop on runlevel [!2345]
respawn
exec /usr/bin/env $(which mailcatcher) --foreground --http-ip=0.0.0.0
UPSTART
# Start Mailcatcher
sudo service mailcatcher start
sudo php5enmod mailcatcher
sudo service apache2 restart
#######################################
# Initialize database #
#######################################
Q1="CREATE DATABASE IF NOT EXISTS ${DB_NAME};"
Q2="CREATE USER '${DB_USER}'@'localhost' IDENTIFIED BY '${DB_PASS}';"
Q3="GRANT ALL PRIVILEGES ON ${DB_NAME}.* TO ${DB_USER}@localhost;"
SQL="${Q1}${Q2}${Q3}"
MYSQL=`which mysql`
$MYSQL -uroot -p$MYSQL_ROOT_PASSWORD -e "$SQL"
#######################################
# Composer #
#######################################
# Test if Composer is installed
composer -v > /dev/null 2>&1
COMPOSER_IS_INSTALLED=$?
# True, if composer is not installed
if [[ $COMPOSER_IS_INSTALLED -ne 0 ]]; then
echo ">>> Installing Composer"
curl -sS https://getcomposer.org/installer | php
sudo mv composer.phar /usr/local/bin/composer
else
echo ">>> Updating Composer"
sudo composer self-update
fi
composer global require "fxp/composer-asset-plugin:1.0.0"
cd /vagrant/www
composer install
cd /vagrant/www/web
echo "<?php require_once 'index-development.php'; " > index.php
| true
|
3699b0be234e65a67081ae59e120ca97e4179b81
|
Shell
|
ABHISHEK-AMRUTE/Hello-world-1
|
/Bash/triangle.sh
|
UTF-8
| 267
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
echo -n "input the repetition number : "
read repeat
for((i=1;i<=repeat;i++))
do
for((k=1;k<=(repeat-i);k++))
do
echo -e " \c "
done
for((j=1;j<=i;j++))
do
echo -e "* \c \c"
# awk 'BEGIN{printf "%c ", 178}'
done
echo -e "\n"
done
| true
|
ac6313fc7f7fd26409b0d2bc98bb43f51962dc99
|
Shell
|
GabrielNicolasAvellaneda/boundary-api-cli
|
/src/main/scripts/events/event-get
|
UTF-8
| 1,114
| 3.15625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
###
### Copyright 2014-2015 Boundary, Inc.
###
### Licensed under the Apache License, Version 2.0 (the "License");
### you may not use this file except in compliance with the License.
### You may obtain a copy of the License at
###
### http://www.apache.org/licenses/LICENSE-2.0
###
### Unless required by applicable law or agreed to in writing, software
### distributed under the License is distributed on an "AS IS" BASIS,
### WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
### See the License for the specific language governing permissions and
### limitations under the License.
###
Main() {
if [ $# -ne 1 ]
then
echo "usage: $(basename $0) <event id>"
exit 1
fi
typeset -r EVENT_ID=$1
if [ -z "$BOUNDARY_ORG_ID" ]
then
typeset -r URI="https://$BOUNDARY_API_HOST/v1/events/$EVENT_ID"
typeset -r AUTH="$BOUNDARY_EMAIL:$BOUNDARY_API_TOKEN"
else
typeset -r URI="https://$BOUNDARY_API_HOST/$BOUNDARY_ORG_ID/events/$EVENT_ID"
typeset -r AUTH="$BOUNDARY_API_TOKEN:"
fi
curl -s -X GET -u "$AUTH" "$URI" | jq '.'
}
Main "$@"
| true
|
3f90da4e3bc0e174f6d4002d30603d124d65f81e
|
Shell
|
alxshine/dotfiles
|
/zsh/zshrc
|
UTF-8
| 1,543
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/zsh
# Enable Powerlevel10k instant prompt. Should stay close to the top of ~/.zshrc.
# Initialization code that may require console input (password prompts, [y/n]
# confirmations, etc.) must go above this block; everything else may go below.
if [[ -r "${XDG_CACHE_HOME:-$HOME/.cache}/p10k-instant-prompt-${(%):-%n}.zsh" ]]; then
source "${XDG_CACHE_HOME:-$HOME/.cache}/p10k-instant-prompt-${(%):-%n}.zsh"
fi
# Lines configured by zsh-newuser-install
HISTFILE=~/.histfile
HISTSIZE=1000
SAVEHIST=1000
setopt extendedglob nomatch
# keybinds
bindkey '^[[3~' delete-char
# vim binds
# bindkey -v
# emacs binds
bindkey -e
bindkey '^[[1;5D' emacs-backward-word
bindkey '^[[1;5C' emacs-forward-word
# End of lines configured by zsh-newuser-install
# The following lines were added by compinstall
zstyle :compinstall filename '~/config/zsh/zshrc'
autoload -Uz compinit zmv
compinit
# End of lines added by compinstall
[[ ! -f ~/.config/zsh/powerlevel10k/powerlevel10k.zsh-theme ]] || source ~/.config/zsh/powerlevel10k/powerlevel10k.zsh-theme
[[ ! -f ~/.p10k.zsh ]] || source ~/.p10k.zsh
try_source () {
[[ ! -f "$1" ]] || source "$1"
}
try_source ~/.config/zsh/functions.zwc
try_source ~/.config/zsh/aliases.zwc
try_source ~/.config/zsh/custom.zwc
# fzf bindings
try_source /usr/share/fzf/key-bindings.zsh
try_source /usr/share/fzf/completion.zsh
export FZF_DEFAULT_COMMAND='fd'
# bindkey "^R" history-incremental-search-backward
# add .local to path
export PATH="$PATH:$HOME/.local/bin"
export XDG_CONFIG_HOME="$HOME/.config"
| true
|
22e2abaf3f38db9f027e90db5515071bf20548a0
|
Shell
|
wizzup/code-a-la-mode
|
/run.sh
|
UTF-8
| 872
| 3
| 3
|
[] |
no_license
|
#! /usr/bin/env nix-shell
#! nix-shell -i bash -p openjdk11 python3
# TODO: reduce dependencies, move python dependency to python AI file
# log should save at script caller directory
CWD=$(pwd)
echo "cwd : $(pwd)"
# make use of script full path, this will allow running `run.sh` from anywhere
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
# cd into script directory
pushd $DIR >/dev/null
echo "cwd : $(pwd)"
P1=${1:-"python ./src/test/starterkit/starterAI.py"}
P2=${2:-"python ./src/test/starterkit/starterAI.py"}
P3=${3:-"python ./src/test/starterkit/starterAI.py"}
echo "Bot#1 : $P1"
echo "Bot#2 : $P2"
echo "Bot#3 : $P3"
java -jar ./target/code-a-la-mode-1.0-SNAPSHOT-shaded.jar \
-p1 "$P1" -p1name "bot#1" \
-p2 "$P2" -p2name "bot#2" \
-p3 "$P3" -p3name "bot#3" \
-l "$CWD/runlog.txt" \
-s
# cd back to caller directory
popd
| true
|
cafddddd6b714d4538a2e8bc9ebd7dfd602b166c
|
Shell
|
thinleyS/dotfiles
|
/.bashrc
|
UTF-8
| 567
| 2.859375
| 3
|
[] |
no_license
|
# ~/.bashrc
# If not running interactively, don't do anything
[[ $- != *i* ]] && return
alias ls='ls --color=auto'
alias grep='grep --color=auto'
# Bash prompt
orange=$(tput setaf 166)
yellow=$(tput setaf 228)
red=$(tput setaf 9)
white=$(tput setaf 15)
blue=$(tput setaf 12)
reset=$(tput sgr0)
PS1="\[${red}\]\u"; # username
PS1+="\[${white}\] at ";
PS1+="\[${yellow}\]\h"; # host
PS1+="\[${white}\] in ";
PS1+="\[${orange}\]\W"; # working directory
PS1+="\n";
PS1+="\[${white}\]\$ \[${reset}\]"; # `$` (and reset color)
export PS1;
# vi key bindings
set -o vi
| true
|
eff1defa5a3b175a94458dd96b93d8f6f4c997f7
|
Shell
|
boxenergie/script_Pi
|
/dashboard_install.sh
|
UTF-8
| 816
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Ce script va installer la boxenergie sur votre raspberry."
echo "Début d'installation de Node-Red et Node.js."
sleep 5
yes Y|bash <(curl -sL https://raw.githubusercontent.com/node-red/linux-installers/master/deb/update-nodejs-and-nodered)
echo "Fin d'installation de Node-Red et Node.js"
echo "Ajout du flow Node-Red de la boxenergie. "
mv package.json ./.node-red
mv flows_raspberrypi.json ./.node-red
mv settings.js ./.node-red
echo "Installation des noeuds supplémentaires requis."
cd .node-red
npm install >> /dev/null
cd ..
echo "Activation du lancement automatique de Node-Red au démarrage de votre raspberry."
sudo systemctl enable nodered.service
node-red-start >> /dev/null &
echo "Votre boxenergie est disponible à l'adresse suivante : 127.0.0.1:1880/ui depuis votre raspberry."
| true
|
48cab11e09cf76d78c8ea23a4b1f067aaaadd97f
|
Shell
|
Learning-jusuE404/BIOC281
|
/Classes/2/STARmale_2p_scr
|
UTF-8
| 2,021
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
# Set reference genome, genome indexes, junction and annotation database directory paths
REF=$HOME/BIOC281/Classes/2/STARindex_2p/PARYMSK_100bp
FTQ=$HOME/BIOC281/Classes/2/reads
# Set parameters
CmmnPrms="--runThreadN 4 --outSJfilterReads Unique --outFilterType BySJout --outSAMunmapped Within \
--outSAMattributes NH HI AS nM NM MD jM jI XS MC ch --outFilterMultimapNmax 20 --outFilterMismatchNmax 999 --alignIntronMin 20 \
--outFilterMismatchNoverReadLmax 0.04 --alignIntronMax 1000000 --alignMatesGapMax 1000000 \
--alignSJoverhangMin 8 --alignSJDBoverhangMin 1 --sjdbScore 1"
AdtlPrms="--outSAMtype BAM SortedByCoordinate --outBAMcompression 10 --limitBAMsortRAM 57000000000 \
--quantMode TranscriptomeSAM GeneCounts --quantTranscriptomeBAMcompression 10 --outSAMstrandField intronMotif"
# Set directories for run
export OWD=`pwd`
export SCR=$HOME/scratch_male_2p
export STR=$SCR/STARun
export RDIR=$STR/reads
# create scratch and read directory
mkdir -p $RDIR
## create STR scratch directory and subdirectories
mkdir -p $STR/male_2p/
# Copy processed reads to local scratch space "\$RDIR" and decompress-- can be used if skewer has already been used for 1p mapping.
cp -a $FTQ/male-*pair* $RDIR
cd $RDIR
gzip -d $RDIR/*.gz
## run 2nd pass STAR alignment
STAR $CmmnPrms $AdtlPrms --genomeDir $REF --outFileNamePrefix $STR/male_2p/male.2p. \
--readFilesIn $RDIR/male-trimmed-pair1.fastq $RDIR/male-trimmed-pair2.fastq
#
## Create index for remapped bam file
samtools index $STR/male_2p/male.2p.Aligned.sortedByCoord.out.bam
# delete reads in $SCR to save space. The reads are no longer needed.
rm -rf $RDIR/*
# compress files that are needed for downstream analysis
find $STR -type f \( -name "*.out" -o -name "*.tab" -o -name "*.sjdb" -o -name "*.results" \) | xargs gzip -9
# Cleanup and Copy back all important files
if [ ! -d $OWD/STAResults ];
then
mkdir -p $OWD/STAResults
fi
# Copy back important files
cp -a $STR/male_2p $OWD/STAResults/
# Remove scratch directory
rm -rf $SCR
exit 0
| true
|
1b8e770e079ab4ad1fe6f675b8b75aa1fe699ea7
|
Shell
|
limonhowlader/HackerRank---The-Linux-Shell-Solutions
|
/Arrays in Bash/Slice an Array.sh
|
UTF-8
| 289
| 3.109375
| 3
|
[] |
no_license
|
#Given a list of countries, each on a new line, your task is to read them
#into an array. Then slice the array and display only the elements
#lying between positions 3 and 7, both inclusive. Indexing starts from from 0.
#Starting point and step/range
array=($(cat))
echo ${array[@]:3:5}
| true
|
2913a777a858ca3eebeb290d4ebdfd9b21e52371
|
Shell
|
jason75023/LTMI
|
/SAN3/networkmanage.sh
|
UTF-8
| 1,705
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
#######################################################################
## This script is to setup LTMI ACRS subnet ##
## Version 1.1 ##
## created by Jason Wang on April 30, 2015 ##
## Working tenant : SAN3 (ltmi-acrs-prod) ##
#######################################################################
#You cannot run source within script
#source /root/tenentaccess/iad1/iad1-ltm-id-proc-dev
set -o nounset
set -o errexit
LTMIACRS_RT="ACRS-Router"
LTMIACRS_SUB1="ACRSNW"
LTMIACRS_SUB2="ACRS_IntNW"
SUB1_IP="10.10.0.0/24"
SUB2_IP="192.168.0.0/24"
#create a router which shared for this tenant
neutron router-create $LTMIACRS_RT
#attach shared router to public network
neutron router-gateway-set $LTMIACRS_RT public
#create subnet to your specific project
neutron net-create $LTMIACRS_SUB1
neutron net-create $LTMIACRS_SUB2
#Define subnet IP range
neutron subnet-create $LTMIACRS_SUB1 $SUB1_IP --name $LTMIACRS_SUB1
neutron subnet-create $LTMIACRS_SUB2 $SUB2_IP --name $LTMIACRS_SUB2
#extract subnet ID info
LTMIACRS_SUB1_ID=$(neutron subnet-list | awk '/'$LTMIACRS_SUB1'/{print $2}')
LTMIACRS_SUB2_ID=$(neutron subnet-list | awk '/'$LTMIACRS_SUB2'/{print $2}')
#Hook shared router to subnet
neutron router-interface-add $LTMIACRS_RT $LTMIACRS_SUB1_ID
neutron router-interface-add $LTMIACRS_RT $LTMIACRS_SUB2_ID
| true
|
5f6b4f7ba0198be59bf344be709dad00acfce335
|
Shell
|
p0tr3c/TerraKVM
|
/tests/test_debug_container.sh
|
UTF-8
| 948
| 3.265625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
PROJECT_DIR=`realpath "$PWD/../"`
TEST_DIR=`realpath $PWD`
TEST_NAME="`basename $0`"
TEST_NAME="${TEST_NAME%.*}"
cd "${PROJECT_DIR}" && \
sudo docker-compose --log-level ERROR up >> "${TEST_DIR}/${TEST_NAME}.log" 2>&1 &
RETRY_COUNT=10
while :
do
res=`sudo docker ps -f name=terrakvm -q`
if [ ! -z "$res" ]; then
break
fi
((RETRY_COUNT--))
if [ $RETRY_COUNT -eq 0 ]; then
echo "FAIL: ${TEST_NAME}:debug:up"
exit 1
fi
sleep 1
done
echo "PASS: ${TEST_NAME}:debug:up"
cd "${PROJECT_DIR}" && \
sudo docker-compose --log-level ERROR down >> "${TEST_DIR}/${TEST_NAME}.log" 2>&1 &
RETRY_COUNT=10
while :
do
res=`sudo docker ps -f name=terrakvm -q`
if [ -z "$res" ]; then
break
fi
((RETRY_COUNT--))
if [ $RETRY_COUNT -eq 0 ]; then
echo "FAIL: ${TEST_NAME}:debug:down"
exit 1
fi
sleep 1
done
echo "PASS: ${TEST_NAME}:debug:down"
| true
|
1bc9422416575209c93d4b52316025c3f7fc7db7
|
Shell
|
macsx82/Singletons-bash
|
/ja_runner_par.sh
|
UTF-8
| 2,524
| 3.125
| 3
|
[] |
no_license
|
#!/usr/local/bin/bash
# This is the runner file run by bsub
# Arguments: runner.sh filelist
# Environment variables: LSB_JOBINDEX
#Example command:
#mkdir -p LOGS;size=`wc -l result.list|cut -f 1 -d " "`;bsub -J "p_check[1-${size}]" -o "LOGS/%J_p_check.%I.o" -M 5000 -R"select[mem>5000] rusage[mem=5000]" -q normal -- ~/Work/bash_scripts/ja_runner_par.sh ~/Work/bash_scripts/check_pvals_test.sh result.list
#mkdir -p LOGS;size=`wc -l match_file.list|cut -f 1 -d " "`;bsub -J "sh_dens[1-${size}]" -o "LOGS/%J_sh_dens.%I.o" -M 5000 -R"select[mem>5000] rusage[mem=5000]" -q normal -- ~/Work/bash_scripts/ja_runner_par.sh ~/Work/bash_scripts/sharingDensity.sh match_file.list map.list
echo "${@}"
while getopts ":dstql" opt; do
case $opt in
d)
echo $opt
echo "Double column list mode triggered!" >&2
file=`sed -n "${LSB_JOBINDEX}p" $3 | awk '{print $1}'`
file2=`sed -n "${LSB_JOBINDEX}p" $3 | awk '{print $2}'`
echo ${file}
echo ${file2}
script=$2
$script ${file} ${file2} "${@:4}"
;;
t)
echo $opt
echo "Triple column list mode triggered!" >&2
file1=`sed -n "${LSB_JOBINDEX}p" $3 | awk '{print $1}'`
file2=`sed -n "${LSB_JOBINDEX}p" $3 | awk '{print $2}'`
file3=`sed -n "${LSB_JOBINDEX}p" $3 | awk '{print $3}'`
echo ${file1}
echo ${file2}
echo ${file3}
file=${file1}\:${file2}\:${file3}
script=$2
$script ${file} "${@:4}"
;;
q)
echo $opt
echo "Quadruple column list mode triggered!" >&2
file1=`sed -n "${LSB_JOBINDEX}p" $3 | awk '{print $1}'`
file2=`sed -n "${LSB_JOBINDEX}p" $3 | awk '{print $2}'`
file3=`sed -n "${LSB_JOBINDEX}p" $3 | awk '{print $3}'`
file4=`sed -n "${LSB_JOBINDEX}p" $3 | awk '{print $4}'`
echo ${file1}
echo ${file2}
echo ${file3}
echo ${file4}
file=${file1}\:${file2}\:${file3}\:${file4}
script=$2
# $script ${file} "${@:4}"
$script ${file1} ${file2} ${file3} ${file4} "${@:4}"
;;
s)
echo $opt
echo "Single list mode triggered!!" >&2
file=`sed -n "${LSB_JOBINDEX}p" $3`
# echo ${file}
script=$2
# $script ${file} $4 $5 $6 $7 $8
$script ${file} "${@:4}"
;;
l)
echo $opt
echo "Script list mode triggered!!" >&2
file=`sed -n "${LSB_JOBINDEX}p" $2`
# echo ${file}
script=$file
# $script ${file} $4 $5 $6 $7 $8
$script "${@:3}"
;;
*)
echo $opt
;;
esac
done
| true
|
9f3b09f9a0d5a084682a1275146d6688e93c3ad3
|
Shell
|
fishilico/home-files
|
/bin/podman-markdownlint
|
UTF-8
| 4,882
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
# SPDX-License-Identifier: MIT
#
# Copyright (c) 2020-2023 Nicolas Iooss
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Run https://github.com/markdownlint/markdownlint in a podman container
#
# Creates a new image and a temporary container in order to run markdownlint,
# using a volume to mount the current working directory in the container.
set -e
build_container() {
# Update the base image
mapfile -t OLD_BASE_IMAGES < <(podman image list '--format={{.Id}}' docker.io/library/ruby:slim)
podman pull docker.io/library/ruby:slim
for OLD_BASE_IMAGE in "${OLD_BASE_IMAGES[@]}" ; do
if [ "$(podman image list '--format={{.Dangling}}' --filter "id=${OLD_BASE_IMAGE}")" = 'true' ] ; then
echo "Removing old podman base image ${OLD_BASE_IMAGE}"
podman image rm -- "${OLD_BASE_IMAGE}"
fi
done
podman build --no-cache -t localhost/podman-markdownlint -f /dev/stdin << EOF
FROM ruby:slim
RUN gem install mdl
EOF
}
# Option --update updates the container if it already exists
if [ $# -ge 1 ] && [ "$1" = '--update' ] ; then
shift
mapfile -t OLD_IMAGES < <(podman image list '--format={{.Id}}' localhost/podman-markdownlint)
build_container
for OLD_IMAGE in "${OLD_IMAGES[@]}" ; do
if [ "$(podman image list '--format={{.Dangling}}' --filter "id=${OLD_IMAGE}")" = 'true' ] ; then
echo "Removing old podman image ${OLD_IMAGE}"
podman image rm -- "${OLD_IMAGE}"
fi
done
elif ! podman image exists localhost/podman-markdownlint ; then
build_container
fi
# Use SCMP_ACT_LOG to record the denied syscalls
SECCOMP_PROFILE='
{
"defaultAction": "SCMP_ACT_ERRNO",
"syscalls": [
{
"names": [
"access",
"arch_prctl",
"brk",
"capget",
"capset",
"chdir",
"clock_gettime",
"close",
"epoll_ctl",
"epoll_pwait",
"eventfd2",
"execve",
"exit_group",
"fchown",
"fcntl",
"flock",
"fstat",
"fstatfs",
"futex",
"getcwd",
"getdents64",
"getegid",
"geteuid",
"getgid",
"getpid",
"getppid",
"getrandom",
"gettid",
"getuid",
"ioctl",
"lseek",
"lstat",
"mmap",
"mprotect",
"munmap",
"nanosleep",
"newfstatat",
"openat",
"prctl",
"pread64",
"prlimit64",
"pselect6",
"read",
"readlink",
"rseq",
"rt_sigaction",
"rt_sigprocmask",
"rt_sigreturn",
"sched_getaffinity",
"select",
"setgid",
"setgroups",
"setresgid",
"setresuid",
"setuid",
"set_robust_list",
"set_tid_address",
"sigaltstack",
"stat",
"sysinfo",
"tgkill",
"timer_create",
"timer_delete",
"timer_settime",
"write",
"writev"
],
"action": "SCMP_ACT_ALLOW"
}
]
}'
CURDIR="$(pwd)"
exec podman run --rm \
--net=none \
--volume "$CURDIR:$CURDIR" \
--workdir "$CURDIR" \
--security-opt seccomp=<(printf %s "$SECCOMP_PROFILE") \
-ti localhost/podman-markdownlint mdl "$@"
| true
|
7c22cbf1986e41c8e1eefeea8c524a33f74ab49f
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/mcabber-crew-hg-extended/PKGBUILD
|
UTF-8
| 1,896
| 2.640625
| 3
|
[] |
no_license
|
pkgname=mcabber-crew-hg-extended
pkgver=2062
pkgrel=2
pkgdesc="mcabber-crew jabber client with extended autocompletion and highlighting"
arch=('i686' 'x86_64')
provides=('mcabber=0.10.2')
conflicts=(mcabber mcabber-lm-hg mcabber-hg mcabber-crew)
url="http://www.mcabber.com"
license=('GPL')
provides=('mcabber-crew-hg')
depends=('ncurses' 'glib2' 'openssl' 'gpgme' 'libotr' 'aspell' 'loudmouth-ossl')
makedepends=('pkgconfig' 'mercurial')
source=(
register_builtin_cat_export.patch
should-be-able-to-highlight-message-from-pre-msg-hook.patch
)
md5sums=('b857d5063795e599c1ac0b5bdaa7d232'
'263f330d3344ce0847631c3aadf4a340')
_hgroot="https://bitbucket.org/McKael/mcabber-crew"
_hgrepo="mcabber-crew"
prepare() {
cd ${srcdir}
hg clone ${_hgroot}
}
build() {
cp -a ${srcdir}/${_hgrepo}/mcabber ${srcdir}/mcabber-build || return 1
cd ${srcdir}/mcabber-build
patch -p2 < ../register_builtin_cat_export.patch
patch -p2 < ../should-be-able-to-highlight-message-from-pre-msg-hook.patch
./autogen.sh
./configure --with-libotr-inc-prefix=/usr/include/libotr3 --prefix=/usr --enable-hgcset \
--mandir=/usr/share/man \
--enable-aspell --enable-modules --enable-otr
make || return 1
}
package() {
cd ${srcdir}/mcabber-build
make DESTDIR=${pkgdir} install || return 1
mkdir -p ${pkgdir}/usr/share/mcabber/example
msg "Copying mcabberrc.example to usr/share/mcabber/example/mcabberrc"
cp -a ${srcdir}/mcabber-build/mcabberrc.example \
${pkgdir}/usr/share/mcabber/example/mcabberrc
mkdir -p ${pkgdir}/usr/share/mcabber/doc
install -D -m 0644 ${srcdir}/${_hgrepo}/mcabber/doc/HOWTO_modules.txt \
${pkgdir}/usr/share/mcabber/doc/HOWTO_modules.txt
mkdir -p ${pkgdir}/usr/include/mcabber/
install -D -m 0644 ${srcdir}/mcabber-build/mcabber/*.h \
${pkgdir}/usr/include/mcabber/ || return 1
rm -rf ${srcdir}/mcabber-build
}
| true
|
9a0b0698218ee44df5585a6b3c48fa7924e004a6
|
Shell
|
readex-eu/readex-scorep
|
/vendor/cubelib/test/test31/run_test_31.sh.in
|
UTF-8
| 2,440
| 3.484375
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
##*************************************************************************##
## CUBE http://www.scalasca.org/ ##
##*************************************************************************##
## Copyright (c) 1998-2016 ##
## Forschungszentrum Juelich GmbH, Juelich Supercomputing Centre ##
## ##
## Copyright (c) 2009-2015 ##
## German Research School for Simulation Sciences GmbH, ##
## Laboratory for Parallel Programming ##
## ##
## This software may be modified and distributed under the terms of ##
## a BSD-style license. See the COPYING file in the package base ##
## directory for details. ##
##*************************************************************************##
TEMPLATE_DIR="@abs_srcdir@"
TEMPLATE1_OUT="$TEMPLATE_DIR/template1.out"
TEMPLATE2_OUT="$TEMPLATE_DIR/template2.out"
EXT_CUBE_LOCATION="@CUBE_TEST_FILE_LOCATION@"
if test "x$EXT_CUBE_LOCATION" == "x"; then
CUBEFILE=example_histogram_parameters.cubex
else
CUBEFILE=$EXT_CUBE_LOCATION/example_histogram_parameters.cubex
fi
echo "##########################################################################"
echo "# Run test 31 : Check the support for histogramms "
echo "#"
echo "##########################################################################"
mkdir -p tests
if ! test -x $CUBEFILE; then
exit 0
fi
cd tests
echo "=====> Create file with histogramms ..."
#../cube_self_test3_4
result0=0
echo "=====> Print out the histogramms..."
../cube_dump -m time1,time2 -s gnuplot -z incl -t aggr $CUBEFILE &> test31.1.out
result1=$?
../cube_dump -m time1,time2 -s gnuplot -z incl $CUBEFILE &> test31.2.out
result2=$?
echo "=====> Compare result with saved template using diff..."
diff test31.1.out $TEMPLATE1_OUT &> test31.1.diff
result3=$?
diff test31.2.out $TEMPLATE2_OUT &> test31.2.diff
result4=$?
cd ..
if [[ $result0 -ne 0 || $result1 -ne 0 || $result2 -ne 0 || $result3 -ne 0 || $result4 -ne 0 ]];
then
echo "Test FAILED. Inspect output above and tests/test31.1|2.diff for more details."
exit -1
fi
| true
|
96d5f81e49554e1710dd54fe62aac339fc26577d
|
Shell
|
xphillyx/yui-modules
|
/bin/yui3doc
|
UTF-8
| 617
| 3.640625
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
# http://yui.github.com/yuidoc/syntax/index.html
DEBUG=$1
parser_in_prefix=~/tools/yui-modules/src
generator_out=~/tools/jafl.github-yui3/yuidoc
# The version of your project to display within the documentation.
version=1.0.0
if [[ -z $DEBUG ]]; then
rm -rf $generator_out
else
server="--server 5000";
fi
pushd ~/tools/yui-modules/bin
rm -rf src
mkdir src
for f in ../src/*; do
d=${f##*/}
if [[ ! -d $f || "$d" == "yuidoc" ]]; then continue; fi
d=${parser_in_prefix}/$d
rm -rf $d/build_tmp
cp -R $d src/
done
yuidoc $server --project-version $version -o $generator_out .
rm -rf src
popd
| true
|
634afeb9ecf1d1d7bfcc26bbbce8a9ad48c097d7
|
Shell
|
Cadair/xmpp_chat
|
/slapd-configure/wait
|
UTF-8
| 261
| 3.21875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
set -e
#host=$(env | grep _TCP_ADDR | cut -d = -f 2)
host=ldap
port=$(env | grep _TCP_PORT | cut -d = -f 2)
echo -n "waiting for TCP connection to $host:$port..."
while ! nc -z $host $port 2>/dev/null
do
echo "waiting"
sleep 1
done
echo 'ok'
| true
|
9aa069058840d7002cb6919e7c207442e0e2a1bc
|
Shell
|
thiefaine/42_test
|
/get_next_line/test.sh
|
UTF-8
| 4,441
| 2.875
| 3
|
[] |
no_license
|
# **************************************************************************** #
# #
# ::: :::::::: #
# test.sh :+: :+: :+: #
# +:+ +:+ +:+ #
# By: kdezaphi <marvin@42.fr> +#+ +:+ +#+ #
# +#+#+#+#+#+ +#+ #
# Created: 2014/11/19 09:24:30 by kdezaphi #+# #+# #
# Updated: 2014/11/26 18:02:20 by jpiazzal ### ########.fr #
# #
# **************************************************************************** #
#!/bin/zsh
make -C ../libft/ fclean
make -C ../libft/
gcc -Wall -Wextra -Werror -I ../libft/includes/ -I ../ -c ../get_next_line.c
gcc -Wall -Wextra -Werror -I ../libft/includes/ -I ../ -c btmain.c
gcc -Wall -Wextra -Werror -I ../libft/includes/ -I ../ -c itmain.c
gcc -Wall -Wextra -Werror -I ../libft/includes/ -I ../ -c atmain.c
gcc -Wall -Wextra -Werror -I ../libft/includes/ -I ../ -c etmain.c
gcc -o bttest_gnl get_next_line.o btmain.o -L ../libft/ -lft
gcc -o ittest_gnl get_next_line.o itmain.o -L ../libft/ -lft
gcc -o attest_gnl get_next_line.o atmain.o -L ../libft/ -lft
gcc -o ettest_gnl get_next_line.o etmain.o -L ../libft/ -lft
rm -f *.o
echo "\033[34m
*** TEST DE GET NEXT LINE ***
\033[0m"
openssl rand -base64 $((6)) > 81.txt
openssl rand -base64 $((6)) > 82.txt
openssl rand -base64 $((6)) >> 82.txt
n=$(($RANDOM%20+5))
i=0
rm 8.txt
while [ $i -ne $n ]; do openssl rand -base64 $((6)) >> 8.txt; i=$((i+1)); done
echo "$(openssl rand -base64 $((6)))\c" > 8s.txt
openssl rand -base64 $((12)) > 161.txt
openssl rand -base64 $((12)) > 162.txt
openssl rand -base64 $((12)) >> 162.txt
n=$(($RANDOM%20+5))
i=0
rm 16.txt
while [ $i -ne $n ]; do openssl rand -base64 $((12)) >> 16.txt; i=$((i+1)); done
echo "$(openssl rand -base64 $((12)))\c" > 16s.txt
openssl rand -base64 $((3)) > 41.txt
openssl rand -base64 $((3)) > 42.txt
openssl rand -base64 $((3)) >> 42.txt
n=$(($RANDOM%20+5))
i=0
rm 4.txt
while [ $i -ne $n ]; do openssl rand -base64 $((3)) >> 4.txt; i=$((i+1)); done
echo "$(openssl rand -base64 $((3)))\c" > 4s.txt
./bttest_gnl 81.txt 82.txt 8.txt
str="$(openssl rand -base64 $((6)))"
echo "\n-----Affichage d'une ligne de 8 caractere depuis l'entree standard-----"
echo $str | ./bttest_gnl
echo $str
str="$(openssl rand -base64 $((6)))\n$(openssl rand -base64 $((6)))"
echo "\n-----Affichage de deux lignes de 8 caractere depuis l'entree standard-----"
echo $str | ./bttest_gnl
echo $str
echo "\n-----Affichage de plusieurs lignes de 8 caractere depuis l'entree standard-----"
n=$(($RANDOM%19+5))
i=0
str="$(openssl rand -base64 $((6)))"
while [ $i -ne $n ]; do str="$str\n$(openssl rand -base64 $((6)))"; i=$((i+1)); done
echo $str | ./bttest_gnl
echo $str
./ittest_gnl 161.txt 162.txt 16.txt
str="$(openssl rand -base64 $((12)))"
echo "\n-----Affichage d'une ligne de 16 caractere depuis l'entree standard-----"
echo $str | ./bttest_gnl
echo $str
str="$(openssl rand -base64 $((12)))\n$(openssl rand -base64 $((12)))"
echo "\n-----Affichage de deux lignes de 16 caractere depuis l'entree standard-----"
echo $str | ./bttest_gnl
echo $str
echo "\n-----Affichage de plusieurs lignes de 16 caractere depuis l'entree standard-----"
n=$(($RANDOM%19+5))
i=0
str="$(openssl rand -base64 $((12)))"
while [ $i -ne $n ]; do str="$str\n$(openssl rand -base64 $((12)))"; i=$((i+1)); done
echo $str | ./bttest_gnl
echo $str
./attest_gnl 41.txt 42.txt 4.txt 4s.txt 8s.txt 16s.txt
str="$(openssl rand -base64 $((3)))"
echo "\n-----Affichage d'une ligne de 4 caractere depuis l'entree standard-----"
echo $str | ./bttest_gnl
echo $str
str="$(openssl rand -base64 $((3)))\n$(openssl rand -base64 $((3)))"
echo "\n-----Affichage de deux lignes de 4 caractere depuis l'entree standard-----"
echo $str | ./bttest_gnl
echo $str
echo "\n-----Affichage de plusieurs lignes de 4 caractere depuis l'entree standard-----"
n=$(($RANDOM%19+5))
i=0
str="$(openssl rand -base64 $((3)))"
while [ $i -ne $n ]; do str="$str\n$(openssl rand -base64 $((3)))"; i=$((i+1)); done
echo $str | ./bttest_gnl
echo $str
./ettest_gnl
make -C ../libft/ fclean
| true
|
d3097f191fe93ffb553dcccd5962366863a3e60a
|
Shell
|
mrestuccio/dge
|
/3-tecnologia/1-sistemas/2-STP-Stack-de-Planificación/1-SGA-sistema-de-gestion-de-activos/3-entregables/v1/activador.sh
|
UTF-8
| 2,522
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/ksh
mv -f actividad.php ./BK 2>/dev/null
mv -f area.php ./BK 2>/dev/null
mv -f bien.php ./BK 2>/dev/null
mv -f ejecuta.php ./BK 2>/dev/null
mv -f empresa.php ./BK 2>/dev/null
mv -f espacio.php ./BK 2>/dev/null
mv -f insumo.php ./BK 2>/dev/null
mv -f lectura.php ./BK 2>/dev/null
mv -f lugar.php ./BK 2>/dev/null
mv -f ocupa.php ./BK 2>/dev/null
mv -f origen.php ./BK 2>/dev/null
mv -f origen_lectura.php ./BK 2>/dev/null
mv -f periodicidad.php ./BK 2>/dev/null
mv -f persona.php ./BK 2>/dev/null
mv -f procedimiento.php ./BK 2>/dev/null
mv -f realiza.php ./BK 2>/dev/null
mv -f requiere.php ./BK 2>/dev/null
mv -f tarea.php ./BK 2>/dev/null
mv -f tarea_plan.php ./BK 2>/dev/null
mv -f tipo_bien.php ./BK 2>/dev/null
mv -f tipo_espacio.php ./BK 2>/dev/null
mv -f tipo_insumo.php ./BK 2>/dev/null
mv -f tipo_persona.php ./BK 2>/dev/null
cd BK
for arch in *;do
echo "Procesando ${arch}"
sed 's/SetUseImagesForActions(false)/SetUseImagesForActions(true)/g' ${arch} | \
sed 's/SetHighlightRowAtHover(false)/SetHighlightRowAtHover(true)/g' | \
sed 's/SetShowPageList(false)/SetShowPageList(true)/g' | \
sed 's/SetExportToExcelAvailable(false)/SetExportToExcelAvailable(true)/g' | \
sed 's/SetExportToWordAvailable(false)/SetExportToWordAvailable(true)/g' | \
sed 's/SetExportToXmlAvailable(false)/SetExportToXmlAvailable(true)/g' | \
sed 's/SetExportToCsvAvailable(false)/SetExportToCsvAvailable(true)/g' | \
sed 's/SetExportToPdfAvailable(false)/SetExportToPdfAvailable(true)/g' | \
sed 's/SetPrinterFriendlyAvailable(false)/SetPrinterFriendlyAvailable(true)/g' | \
sed 's/SetSimpleSearchAvailable(false)/SetSimpleSearchAvailable(true)/g' | \
sed 's/SetAdvancedSearchAvailable(false)/SetAdvancedSearchAvailable(true)/g' | \
sed 's/SetFilterRowAvailable(false)/SetFilterRowAvailable(true)/g' | \
sed 's/SetVisualEffectsEnabled(false)/SetVisualEffectsEnabled(true)/g' | \
sed 's/SetShowTopPageNavigator(false)/SetShowTopPageNavigator(true)/g' | \
sed 's/SetShowBottomPageNavigator(false)/SetShowBottomPageNavigator(true)/g' | \
sed 's/SetAllowDeleteSelected(false)/SetAllowDeleteSelected(true)/g' | \
sed 's/AddBand(/AddBandToBegin(/g' | \
sed 's/SetRowsPerPage(.*)/SetRowsPerPage(100)/g' > ../${arch}
done
cd ..
echo "Completando archivos"
for arch_cmp in `ls -1 *.cm?`;do
arch_php=${arch_cmp%%.*}.php
grep "?>" $arch_php >/dev/null 2>&1
if [ $? -ne 0 ]; then
cat $arch_cmp >> $arch_php
else
echo "archivo $arch_php ya completado"
fi
done
echo "Proceso finalizado"
exit
| true
|
6a2c5e64f1bb6ac176cf41b1515ce504410270c0
|
Shell
|
argylelabcoat/asdf-lsd
|
/bin/install
|
UTF-8
| 405
| 3.3125
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -e
# check ASDF environment variables
[ -n "$ASDF_INSTALL_PATH" ] || (echo 'Missing ASDF_INSTALL_PATH' >&2 && exit 1)
[ -n "$ASDF_DOWNLOAD_PATH" ] || (echo 'Missing ASDF_DOWNLOAD_PATH' >&2 && exit 1)
mkdir -p "${ASDF_INSTALL_PATH}/bin"
toolPath="${ASDF_INSTALL_PATH}/bin/lsd"
cd "${ASDF_DOWNLOAD_PATH}"
tar xzf lsd.tar.gz --strip=1
mv ./lsd "${toolPath}"
chmod +x "${toolPath}"
| true
|
3af8f5891f7d78667e797bfc2f7d4c03cf1be27b
|
Shell
|
amanaplan/setup-macos
|
/scripts/homebrew-packages/casks/panic.sh
|
UTF-8
| 663
| 3.453125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
RESET="\033[0m"
BRAND="\033[1;34m"
INFO="\033[33m"
SUCCESS="\033[32m"
DANGER="\033[31m"
# Install Transmit
if [ ! -d /usr/local/Caskroom/transmit ]; then
printf "${INFO}Installing Transmit...${RESET}\n"
brew cask install transmit 2</dev/null >/dev/null
if [ -d /usr/local/Caskroom/transmit ]; then
printf "${SUCCESS}Transmit successfully installed.${RESET}\n\n"
else
printf "${DANGER}Transmit not installed.${RESET}\n\n"
fi
else
printf "${INFO}Upgrading Transmit...${RESET}\n"
brew cask upgrade transmit 2</dev/null >/dev/null
printf "${SUCCESS}Transmit successfully upgraded.${RESET}\n\n"
fi
| true
|
459a33567dec754ee76cb998029351431b8a9584
|
Shell
|
datachand/lattice
|
/terraform/scripts/local/get-lattice-tar
|
UTF-8
| 835
| 3.796875
| 4
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
if [ -f "$1" ]; then
LATTICE_TAR_PATH=$(cd $(dirname "$1") && pwd )/$(basename "$1")
ln -sf "$LATTICE_TAR_PATH" .terraform/lattice.tgz
exit 0
fi
if [ `echo "$1" | grep -E '^[a-zA-Z0-9]+://.+'` ]; then
if [ -f ".terraform/lattice.tgz" ]; then
URL_VERSION=`echo "$1" | sed -E 's/https?:\/\/.+\/backend\/lattice-(v.+)\.tgz/\1/'`
LATTICE_TAR_VERSION=`tar Oxzf .terraform/lattice.tgz lattice-build/common/LATTICE_VERSION`
if [[ $URL_VERSION == $LATTICE_TAR_VERSION ]]; then
exit 0
else
echo "WARNING: url version ($URL_VERSION) does not match cached lattice.tgz version ($LATTICE_TAR_VERSION)."
echo "Overwriting cached file..."
fi
fi
curl -sL -o .terraform/lattice.tgz "$1"
exit 0
fi
echo "ERROR: get-lattice-tar requires a local file or valid URL as an argument"
exit 1
| true
|
3dce61436a88f8bc33d50d5f92a7873d90059527
|
Shell
|
amey-sam/cassandra
|
/replace_node_patch.sh
|
UTF-8
| 1,772
| 3.859375
| 4
|
[
"Apache-2.0"
] |
permissive
|
# Deal with containers scaling down and up,
# when they will scale up with the same IP as before,
# they will exit with an error because they need --replace-address while booting to take back their seat inside the cluster
seeds=$(echo $CASSANDRA_SEEDS | tr "," "\n")
# dont replace_address if node allready bootstraped
if [ ! -d "/var/lib/cassandra/data" ]; then
for seed in $seeds; do
echo "Trying to reach $seed"
ping -c 1 $seed >/dev/null 2>/dev/null
PingResult=$?
if [ "$PingResult" -eq 0 ]; then
if [ $CASSANDRA_BROADCAST_ADDRESS = $seed ];
then
echo "Current node match seed to evaluate, skip !"
continue
fi
echo "$seed found, connecting to database to check if current node needs --replace_address"
# Connect to seed to investigate node status
QUERY_RESPONSE=$(cqlsh $seed -e "select peer, host_id, rpc_address from system.peers where peer='$CASSANDRA_BROADCAST_ADDRESS';")
echo $QUERY_RESPONSE
NODE_FOUND=`echo $QUERY_RESPONSE | grep -c "1 rows"`
if [ $NODE_FOUND = 0 ]; then
echo "Current node IP NOT FOUND in cluster, node will bootstrap and join normally"
else
echo "Current node ip FOUND in cluster, node will bootstrap with replace_address option and then join the cluster"
JVM_OPTS="$JVM_OPTS -Dcassandra.replace_address=$CASSANDRA_BROADCAST_ADDRESS"
fi
break
elif [ "$PingResult" -eq 1 ]; then
echo "$seed not reachable, NEXT"
elif [ "$PingResult" -eq 2 ]; then
echo "$seed not reachable, service not activated yet, NEXT"
else
echo "Unknown status, NEXT"
fi
done
#else
# echo "node allready bootstraped try to boot normally"
fi
| true
|
b80116734f650f8bf5fd7b4cdf9005f5a734da98
|
Shell
|
cbaoth/dotfiles
|
/bin/image-concat
|
UTF-8
| 3,828
| 3.84375
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# image-concat: Concatenate images creating a simple collage
# Author: Andreas Weyer <dev@cbaoth.de>
# Keywords: bash shell-script
declare -r _SCRIPT_PATH="$(cd "$(dirname "$0")"; pwd -P)"
declare -r _SCRIPT_FILE="$(basename "$0")"
# include commons lib
for f in {$_SCRIPT_PATH/,$_SCRIPT_PATH/lib,$HOME/lib}/commons.sh; do
if [[ -f "$f" ]]; then
source "$f"
break
fi
done
if ! command -v "cl::cmd_p" >& /dev/null; then
printf "commons lib not found, exiting ..\n" >&2
exit 1
fi
# set options
set -o errtrace
#set -o errexit
set -o pipefail
set -o nounset
(( ${DEBUG_LVL:-0} >= 2 )) && set -o xtrace
IFS=$'\t\n\0'
# traps
trap '_rc=$?; \
printf "ERROR(%s) in %s:%s\n -> %s\n -> %s\n" "${_rc}" \
"${0:-N/A}" "${LINENO:-N/A}" "${FUNCNAME[@]:-N/A}" \
c"${BASH_COMMAND:-N/A}"; \
_exit_clean $_rc' ERR
trap 'printf "\nINTERRUPT\n"; _exit_clean 1' SIGINT SIGTERM
#trap '_rc=$?; printf "EXIT(%s)\n" "$_rc"; _exit_clean $_rc' EXIT
# constants
declare -r _MAX=3840
# arguments
declare mode=default
declare tile=""
declare outfile=""
declare -a infiles
declare -a tempfiles
# clean exit (remove temp files)
_exit_clean() {
for f in "${tempfiles[@]}"; do
rm -f "${f}"
done
exit ${1:-0}
}
# parse arguments
_parse_args() {
while [[ -n "${1:-}" ]]; do
case $1 in
-[hwn])
mode="$1"
shift
;;
-x1)
tile="x1"
shift
;;
-o)
[[ -z "$2" ]] && cl::p_err "missing file name after -o" && return 1
outfile="$2"
shift 2
;;
*)
break
;;
esac
done
# at least two input files are required
if [[ -z "${2:-}" ]]; then
cl::p_usg "${_SCRIPT_FILE} [-w|-h|-n] [-x1] [-o OUTFILE] INFILE.."
exit 1
fi
# if no outfile name is provided generate one
if [[ -z "$outfile" ]]; then
outfile="$(cl::p_file_with_suffix "-concat$(date +%s)" "$1")"
fi
# create infiles array from remaining arguments
infiles=("$@")
}
# resize all infiles to temp file
_resize_infiles() {
local dim="$1"
for infile in "${infiles[@]}"; do
local outfile="$HOME/._image-concat_$(basename "${infile}")"
#cl::q_overwrite "${outfile}" || _exit_clean 1
tempfiles+=("${outfile}")
convert "${infile}" -resize ${dim}\> "${outfile}"
done
}
# concatenate images
_concat() {
cl::q_overwrite "${outfile}" || _exit_clean 1
local min_height="$(identify -format '%h\n' "${infiles[@]}" | sort -n | head -n 1)"
#min_height="$(echo -e "$min_height\n$((_MAX/$#))" | sort -n | head -n 1)"
local min_width="$(identify -format '%w\n' "${infiles[@]}" | sort -n | head -n 1)"
#min_width="$(echo -e "$min_width\n$((_MAX/$#))" | sort -n | head -n 1)"
local -a args=(-background black -mode Concatenate)
#args+=(-limit memory 100mb)
[[ "${tile}" = "x1" ]] && args+=(-tile x1)
#args+=(-gravity center) # default
case "${mode}" in
-n)
#args+=(-geometry +0+0)
convert -background black +append "${infiles[@]}" "${outfile}"
;;
-w)
#args+=(-geometry "$(printf "%s\n%s" "${min_width}" "${_MAX}" | sort -n | head -n 1)x")
_resize_infiles "$(printf "%s\n%s" "${min_width}" "${_MAX}" | sort -n | head -n 1)x"
convert -background black +append "${tempfiles[@]}" "${outfile}"
;;
-h)
#args+=(-geometry "x${min_height}")
_resize_infiles "x${min_height}"
convert -background black +append "${tempfiles[@]}" "${outfile}"
;;
*)
#args+=(-geometry "x${min_height}" -extent "${min_width}>x")
_resize_infiles "x${min_height}" -extent "${min_width}>x"
convert -background black +append "${tempfiles[@]}" "${outfile}"
;;
esac
#montage "${args[@]}" "${infiles[@]}" "${outfile}"
}
_main() {
_parse_args "$@"
_concat
}
_main "$@"
_exit_clean 0
| true
|
a928abf3d0cb798f5a81b6df014fc8f0076fd5db
|
Shell
|
curry-lang/kics2
|
/scripts/kics2-makecgi.sh
|
UTF-8
| 7,883
| 3.875
| 4
|
[] |
no_license
|
#!/bin/sh
# Compile a Curry program (using the HTML library) into a cgi script
KICS2BUILDDIR=`echo KICS2HOME must be defined here!`
KICS2INSTALLDIR=
# Define the main directory where KICS2 is installed:
if [ -d "$KICS2INSTALLDIR" ] ; then
CURRYROOT=$KICS2INSTALLDIR
else
CURRYROOT=$KICS2BUILDDIR
fi
# Standard suffix that will be added to the main script:
CGISUFFIX="_CGIMAIN_$$"
# Name of the main function in the main script (should not be in conflict
# with any exported name of the Curry program)
MAINCALL="main_cgi_9999_$$"
CPMEXEC="cypm exec"
ERROR=
HELP=no
CURRYDOPTIONS=
CURRYOPTIONS=":set -time :set -interactive"
COMPACT=no
DEBUG=no
DEBUGFILE=
ULIMIT="-t 120"
MAIN=main
CGIFILE=
WUIJS=no
WUIMODULES=
SERVERTIMEOUT=
STANDALONE=no
LOADBALANCE="-loadbalance standard"
ARGS=
while [ $# -gt 0 -a -z "$ERROR" ]; do
case $1 in
-help | -h | -\? ) HELP=yes ;;
-D* ) CURRYDOPTIONS="$CURRYDOPTIONS $1" ;;
-cpm ) echo 'Option "-cpm" deprecated.' ;;
-cpmexec ) shift ; CPMEXEC=$1 ;;
-compact ) COMPACT=yes ;;
-debug ) DEBUG=yes ;;
-debugfile ) shift ; DEBUGFILE=$1 ;;
-servertimeout ) shift ; SERVERTIMEOUT="-servertimeout $1" ;;
-multipleservers ) LOADBALANCE="-loadbalance multiple" ;; # backward compt.
-loadbalance ) shift ; LOADBALANCE="-loadbalance $1" ;;
-standalone ) STANDALONE=yes ;;
-ulimit ) shift; ULIMIT=$1 ;;
-wuijs ) WUIJS=yes ;;
-wui ) shift; WUIMODULES="$WUIMODULES $1" ;;
-m ) shift; MAIN=$1 ;;
-o ) shift; CGIFILE=$1 ;;
-* ) ERROR="Unknown option: $1" ;;
* ) ARGS="$ARGS $1" ;; # collect non-option arguments
esac
shift
done
if test -n "$ARGS" ; then
set $ARGS
fi
if [ $HELP = yes ] ; then
set "1" ; shift # to show next usage message
fi
if test -n "$ERROR" ; then
echo "ERROR: $ERROR"
set "1" ; shift # to show next usage message
fi
if [ $# != 1 -a $# != 3 ] ; then
echo "USAGE: $0 [options] <curry>"
echo
echo "MAIN OPTIONS:"
echo "-o <cgi> : name of the file (with suffix .cgi) where the cgi program should"
echo " be stored (default: <curry>.cgi)."
echo "-m <form> : Curry expression (of type IO HtmlForm) computing the HTML form"
echo " (default: main)."
echo "<curry> : name of the Curry program (without suffix) containing the script"
echo
echo "FURTHER OPTIONS:"
echo '-Dname=val : define kics2rc property "name" as "val"'
echo "-cpmexec <c>: set the command to execute programs with the Curry Package"
echo " Manager (default: 'cypm exec')"
echo "-compact : reduce size of generated cgi program by deleting unused functions"
echo "-debug : include code for showing failures"
echo " (= PAKCS options '+printfail/+allfails')"
echo "-debugfile f: include code for storing failure trace in file f"
echo " (= PAKCS options '+consfail file:f')"
echo "-ulimit <l> : set 'ulimit <l>' when executing the cgi program"
echo " (default: '-t 120')"
echo "-servertimeout <ms>: set the timeout for the cgi server process to"
echo " <ms> milliseconds (default: 7200000 / two hours)"
echo "-loadbalance <t>: start new server process if load for one server is"
echo " high where <t> specifies the kind of high load."
echo " Current possible values for <t>:"
echo " no: no load balance"
echo " standard: some standard load balancing (default)"
echo " multiple: new server process for each initial call to"
echo " a cgi script (only reasonable with short timeout)"
echo "-standalone : generate standalone script (i.e., copy programs"
echo " required from KiCS2 system to local directory)"
echo "-wuijs : generate JavaScript support code for WUIs"
echo "-wui <mod> : consider also imported module <mod> (that contains WUI"
echo " specifications) when generating JavaScript support code"
exit 1
fi
# Try to locate WUI/JavaScript translator:
WUIJS_PREPROCESSOR=`which curry2js`
if [ ! -x "$WUIJS_PREPROCESSOR" ] ; then
# try to set curry2js to the CPM standard location:
WUIJS_PREPROCESSOR=$HOME/.cpm/bin/curry2js
if [ ! -x "$WUIJS_PREPROCESSOR" ] ; then
WUIJS_PREPROCESSOR=
fi
fi
if [ -z "$WUIJS_PREPROCESSOR" -a $WUIJS = yes ] ; then
echo "No support for JavaScript possible!"
echo "Please install the Curry->JavaScript translator curry2js by:"
echo "> cypm update && cypm install curry2js"
exit 1
fi
# remove possible suffix:
PROG=`expr $1 : '\(.*\)\.lcurry' \| $1`
PROG=`expr $PROG : '\(.*\)\.curry' \| $PROG`
if test -z "$CGIFILE" ; then
CGIFILE=$PROG.cgi
fi
MAINMOD=$PROG$CGISUFFIX
MAINCURRY=$MAINMOD.curry
# compute (relative) name of cgi program:
CGIDIR=`dirname $CGIFILE`
if [ $CGIDIR = "." ] ; then
CGIPROG=$CGIFILE
else
CGIPROG=`expr $CGIFILE : "$CGIDIR/\(.*\)"`
fi
# name of the server:
CGIFILEPATHNAME=`(cd $CGIDIR > /dev/null ; pwd)`
CGISERVERPROG=$CGIPROG.server
CGISERVEREXEC=$CGIFILEPATHNAME/$CGIPROG.server
CGISERVERCMD=$CGISERVEREXEC
if test -n "$ULIMIT" ; then
CGISERVERCMD=$CGISERVEREXEC.sh # used if ulimit is present
fi
# unique key for this cgi script:
CGIKEY="$CGIFILEPATHNAME/$CGIPROG `date '+%m/%d/%y/%H/%M/%S'`"
# generate server program implementing the cgi script application:
rm -f $MAINCURRY
echo "module $MAINMOD($MAINCALL) where" >> $MAINCURRY
echo "import $PROG" >> $MAINCURRY
echo "import HTML.Base" >> $MAINCURRY
echo "import HTML.CgiServer" >> $MAINCURRY
echo "$MAINCALL :: IO ()" >> $MAINCURRY
if [ $WUIJS = no ] ; then
echo "$MAINCALL = runFormServerWithKey \"$CGIPROG\" \"$CGIKEY\" ($MAIN)" >> $MAINCURRY
else
CGIBASE=`expr $CGIPROG : '\(.*\)\.cgi' \| $CGIPROG`
JSFILE=$CGIBASE\_wui.js
$CPMEXEC $WUIJS_PREPROCESSOR -wui -o $JSFILE $PROG $WUIMODULES
if [ $? != 0 ] ; then
rm -f $MAINCURRY
exit $?
fi
chmod 644 $JSFILE
if [ $CGIDIR != "." ] ; then
mv $JSFILE $CGIDIR/$JSFILE
fi
echo "$MAINCALL = runFormServerWithKeyAndFormParams \"$CGIPROG\" \"$CGIKEY\" [FormJScript \"$JSFILE\",FormOnSubmit \"return submissionAllowed()\"] ($MAIN)" >> $MAINCURRY
fi
# compile main module:
echo "Generating saved state for initial expression: $MAIN"
$CPMEXEC $CURRYROOT/bin/curry --nocypm $CURRYDOPTIONS $CURRYOPTIONS :l $MAINMOD :save $MAINCALL :q
# now the file $MAINMOD should contain the executable computing the HTML form:
if test ! -f $MAINMOD ; then
echo "Error occurred, generation aborted."
$CURRYROOT/bin/cleancurry $MAINMOD
rm -f $MAINMOD.curry
exit 1
fi
# stop old server, if necessary:
if [ -f $CGISERVEREXEC ] ; then
echo "Stop old version of the server '$CGISERVEREXEC'..."
$CURRYROOT/currytools/www/Registry stopscript "$CGISERVEREXEC"
fi
SUBMITFORM="$CURRYROOT/currytools/www/SubmitForm"
# copy executable from the Curry system (if required):
if [ $STANDALONE = yes ] ; then
cp -p "$SUBMITFORM" $CGIFILEPATHNAME/SubmitForm
SUBMITFORM="./SubmitForm"
fi
# generate cgi script:
rm -f $CGIFILE
echo "#!/bin/sh" >> $CGIFILE
if test -n "$LANG" ; then
echo "LANG=$LANG" >> $CGIFILE
echo "export LANG" >> $CGIFILE
fi
echo "$SUBMITFORM $SERVERTIMEOUT $LOADBALANCE \"$CGIPROG\" \"$CGIKEY\" \"$CGISERVERCMD\" 2>> $CGIFILE.log" >> $CGIFILE
chmod 755 $CGIFILE
# move compiled executable to final position:
mv $MAINMOD $CGISERVEREXEC
chmod 755 $CGISERVEREXEC
if test -n "$ULIMIT" ; then
echo "#!/bin/sh" > $CGISERVERCMD
echo "ulimit $ULIMIT" >> $CGISERVERCMD
echo "exec $CGISERVEREXEC \"\$@\"" >> $CGISERVERCMD
chmod 755 $CGISERVERCMD
fi
$CURRYROOT/bin/cleancurry $MAINMOD
rm -f $MAINMOD.curry
echo "`date`: cgi script compiled" > $CGIFILE.log
echo
echo "New files \"$CGIFILE[.server]\" with compiled cgi script generated."
| true
|
b335f87812747c21fd9c92f1226035f9bab1c709
|
Shell
|
ronnycoding/django-docker-seed
|
/src/codestyle.sh
|
UTF-8
| 418
| 3.5625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
SRC_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}/" )" && pwd )"
# convert any windows path to linux style (c:/path, c:\path, /c/path => //c/path)
if [[ ! "$(uname -a)" == *"Linux"* ]] && [[ ! "$(uname -a)" == *"Darwin"* ]] ; then
SRC_DIR=$(echo ${SRC_DIR} | sed 's/://g' | sed -r 's/\\/\//g' | sed -r 's/^[\/]*/\/\//')
fi
docker run -v $SRC_DIR/:/code/ --rm ivelum/codestyle:latest codestyle $*
| true
|
46ed1e83070dc3f7b78ca62488adb731dba00048
|
Shell
|
yandex/yandex-taxi-testsuite
|
/testsuite/databases/pgsql/scripts/service-postgresql
|
UTF-8
| 1,731
| 3.703125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
. $TESTSUITE_LIB_UTILS
. $(dirname $0)/find-pg.sh
if [ "x$POSTGRESQL_CONFIGS_DIR" = "x" ]; then
die "POSTGRESQL_CONFIGS_DIR must be set"
fi
if [ "x$POSTGRESQL_PORT" = "x" ]; then
die "POSTGRESQL_PORT must be set"
fi
if [ "x$POSTGRESQL_TMPDIR" = "x" ]; then
die "POSTGRESQL_TMPDIR must be set"
fi
# Use ramdisk for postgresql if available
RAMDISK=/mnt/ramdisk/$USER
if mkdir -p $RAMDISK 2> /dev/null; then
POSTGRESQL_DATADIR=$RAMDISK/_postgresql${WORKER_SUFFIX_PATH}/data
else
POSTGRESQL_DATADIR=$POSTGRESQL_TMPDIR/data
fi
POSTGRESQL_LOGSDIR=$POSTGRESQL_TMPDIR/logs
mkdir -p $POSTGRESQL_DATADIR
start() {
echo "Postgresql data directory: $POSTGRESQL_DATADIR"
echo "Postgresql version: $($POSTGRESQL_BINPATH/pg_config --version)"
mkdir -p $POSTGRESQL_TMPDIR
mkdir -p $POSTGRESQL_LOGSDIR
set -e
$POSTGRESQL_BINPATH/initdb \
--encoding=UTF-8 \
--locale=en_US.UTF-8 \
--lc-collate=C \
--lc-ctype=C \
-U testsuite \
-D $POSTGRESQL_DATADIR
cp $POSTGRESQL_CONFIGS_DIR/*.conf $POSTGRESQL_DATADIR
# disable jit for PostgreSQL 12+
if [ $($POSTGRESQL_BINPATH/pg_config --version | awk -F '[^0-9]+' '{ print $2 }') -gt 11 ]; then
echo "jit = off" >> $POSTGRESQL_DATADIR/postgresql.conf
fi
$POSTGRESQL_BINPATH/pg_ctl \
-s -D $POSTGRESQL_DATADIR -w start \
-o "-c listen_addresses=*" \
-o "-c port=$POSTGRESQL_PORT" \
-o "-c unix_socket_directories=" \
-o "-c log_directory=$POSTGRESQL_LOGSDIR"
}
stop() {
$POSTGRESQL_BINPATH/pg_ctl -s -D $POSTGRESQL_DATADIR \
-m immediate stop
rm -rf $POSTGRESQL_DATADIR
}
script_main "$@"
| true
|
6e61cf8e6577983cc091f601777ea28c2205c119
|
Shell
|
sasipalakizhi/keytool-certificate-chain-example
|
/src/02_genkeypair_root.sh
|
UTF-8
| 615
| 2.6875
| 3
|
[] |
no_license
|
#! /usr/bin/env bash
set -o nounset
set -o errexit
# Generating key pair for the Root CA into its key store.
"$JAVA_HOME/bin/keytool" -genkeypair \
-alias "$ROOT_ALIAS" \
-dname "CN=$ROOT_COMMON_NAME, OU=$ROOT_ORGANIZATIONAL_UNIT, O=$ROOT_ORGANIZATIONAL_UNIT, L=$ROOT_LOCALITY, S=$ROOT_STATE, C=$ROOT_COUNTRY" \
-ext BasicConstraints:critical=ca:true,pathlen:1 \
-ext KeyUsage:critical=keyCertSign,cRLSign \
-keyalg "$ROOT_KEYALG" \
-keypass "$ROOT_KEYPASS" \
-keysize "$ROOT_KEYSIZE" \
-keystore "$ROOT_KEYSTORE" \
-sigalg "$ROOT_SIGALG" \
-storepass "$ROOT_STOREPASS" \
-storetype "$ROOT_STORETYPE" \
-v
| true
|
de4cc91b50fb7f1336224b8852cfe8e42c2160de
|
Shell
|
Adven27/Exam
|
/.utility/update-gh-pages.sh
|
UTF-8
| 588
| 3.125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
echo -e "Starting to update gh-pages\n"
cp -R example/build/reports/specs $HOME/specs
cd $HOME
git config --global user.email "${GITHUB_ACTOR}@users.noreply.github.com"
git config --global user.name "${GITHUB_ACTOR}"
git clone --quiet --branch=gh-pages https://${GH_TOKEN}@github.com/Adven27/Exam.git gh-pages > /dev/null
cd gh-pages
git rm -rf .
git commit -m "${GITHUB_ACTOR} build ${GITHUB_RUN_NUMBER} remove old files"
cp -Rf $HOME/specs/* .
git add -f .
git commit -m "${GITHUB_ACTOR} build ${GITHUB_RUN_NUMBER} pushed to gh-pages"
git push -f origin gh-pages
echo -e "Update finished\n"
| true
|
03e5aa8494e15745674b9f9ccc690e35e29a7b97
|
Shell
|
prenaux/ham
|
/specs/toolsets/repos/setup-toolset.sh
|
UTF-8
| 1,766
| 3.484375
| 3
|
[
"Jam",
"MIT",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
# toolset
export HAM_TOOLSET=REPOS
export HAM_TOOLSET_NAME=repos
export HAM_TOOLSET_DIR="${HAM_HOME}/toolsets/repos"
# platform
case $HAM_OS in
NT*)
toolset_check_and_dl_ver repos nt-x86 v4 || return 1
export REPOS_DIR="${HAM_TOOLSET_DIR}/nt-x86"
export OPENSSL_CONF="${REPOS_DIR}/git/ssl/openssl.cnf"
pathenv_add "${HAM_TOOLSET_DIR}"
pathenv_add "${REPOS_DIR}/bin" after
pathenv_add "${REPOS_DIR}/git/bin" after
pathenv_add "${REPOS_DIR}/git/usr/bin" after
pathenv_add "${REPOS_DIR}/hg" after
# Try our best to disable that insanity, hands off my files keep them
# as they are in the repo ffs.
git config --global core.autocrlf false
;;
OSX*)
pathenv_add "${HAM_TOOLSET_DIR}"
;;
LINUX*)
pathenv_add "${HAM_TOOLSET_DIR}"
;;
*)
echo "E/Toolset: Unsupported host OS"
return 1
;;
esac
# version check
VER="--- repos ------------------------"
if [ "$HAM_NO_VER_CHECK" != "1" ]; then
VER="$VER
--- git ---
`git --version`"
if [ $? != 0 ]; then
echo "E/Can't get Git version."
return 1
fi
fi
export HAM_TOOLSET_VERSIONS="$HAM_TOOLSET_VERSIONS
$VER"
HG_PATH=`where_inpath hg || true`
if [ -e "$HG_PATH" ]; then
VER="--- mercurial ---"
if [ "$HAM_NO_VER_CHECK" != "1" ]; then
VER="$VER
`hg --version`"
if [ $? != 0 ]; then
echo "E/Can't get Mercurial version."
return 1
fi
fi
else
VER="--- mercurial ---"
if [ "$HAM_NO_VER_CHECK" != "1" ]; then
VER="$VER
W/Mercurial is not installed or not accessible from the PATH !"
fi
fi
export HAM_TOOLSET_VERSIONS="$HAM_TOOLSET_VERSIONS
$VER"
| true
|
99de1eddcedafeddcaf896446c078e6c3eeb424c
|
Shell
|
n1v0lg/fresco-logistic-regression-2
|
/run-single.sh
|
UTF-8
| 668
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
#test -f target/logistic-regression-jar-with-dependencies.jar || mvn package
#mvn package
echo "Started at $(date)"
run () {
java \
-jar target/logistic-regression-jar-with-dependencies.jar \
-p1:localhost:8871 \
-p2:localhost:8872 \
--privacy-budget 0 \
--sensitivity 0.001 \
--unsafe-debug-log \
$@
}
main() {
run -i${1} < "target/classes/mtcars_party$1.txt"
}
if [[ -z "$1" ]]; then
echo "Usage: "
echo " $0 <test-set>"
echo
echo "Example: $0 mtcars"
echo
echo "Available test sets:"
echo " mtcars"
echo " breast_cancer"
exit 1
fi
time main $1
| true
|
7b7eb653071b7ea3fe707e24ff328db03b583876
|
Shell
|
alpe/cctrl_ci_test
|
/ci_maven/constraints
|
UTF-8
| 168
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/bash -e
if [ -z "$APP_NAME" ]; then
echo "APP_NAME not set" >&2
exit 1
fi
if [ -z "$CCTRL_HOST" ]; then
echo "CCTRL_HOST not set" >&2
exit 1
fi
| true
|
d5063d913c2bfc6c299fa6aac9c3f4519a2a560f
|
Shell
|
ucl-cssb/CIN_PDO
|
/script/dic/sub_dic_real_logdist_withbratio.sh
|
UTF-8
| 481
| 2.84375
| 3
|
[
"MIT"
] |
permissive
|
pdir=./
odir=../../data/dic
msample=100
nsample=100
dataset=$1
ncell=$2
epsilon=$3
type=$4
s3_weight=$5
only_mut=$6
suffix="logs3_weight${s3_weight}_mut${only_mut}_espilon${epsilon}"
fout=$odir/dic_${type}_1-100_${suffix}_$dataset
cat /dev/null > $fout
for i in {1..100..1}
do
echo $i
dic=`julia $pdir/compute_DIC_${type}_logdist_withbratio.jl $dataset $ncell $epsilon $msample $nsample $s3_weight $only_mut`
echo "$i $dataset $ncell $epsilon $dic" >> $fout
done
| true
|
ec17b9f3817320e3eeef59ae64e5507cc3f9e6ce
|
Shell
|
epidemics/epifor
|
/fetch_csse.sh
|
UTF-8
| 231
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/sh
set -eu
D=CSSE-COVID-19
cd data
if [ -e "$D" ]; then
echo "Updating CSSE github"
cd "$D"
git pull
cd ..
else
echo "Cloning CSSE github"
git clone https://github.com/CSSEGISandData/COVID-19 "$D"
fi
| true
|
da8884e76cdab0ac883e8fc443272c0c82f684e1
|
Shell
|
kakabisht/Sem_4
|
/OS-LAB/assignment_7/assignment7-2.sh
|
UTF-8
| 463
| 3.53125
| 4
|
[] |
no_license
|
echo "Welcome to lcm and and gcd"
echo "Enter the first number :"
read a
echo "Enter the second number : "
read b
if [ $a -gt $b ]
then
num=$a
den=$b
else
num=$b
den=$a
fi
r=`expr $num % $den`
totloop=$(( $a + $b ))
for (( i=0; i< $totloop ; i--))
do
if [ $r -ne 0 ]
then
num=$den
den=$r
r=`expr $num % $den`
else
break
fi
done
gcd=$den
lcm=`expr $a \* $b / $gcd`
echo " The LCM of $a and $b is : $lcm"
echo " The GCD of $a and $b is : $gcd"
| true
|
6c5b6708174c4de1e5748568e042e865ddd1d866
|
Shell
|
sharyanto/scripts
|
/show-bash-history
|
UTF-8
| 221
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
# this is a useless script, but somehow i keep forgetting that there is the 'history' command
if [[ "$PAGER" == "" ]]; then PAGER=less; fi
perl -pe'$_=localtime($1)."|" if /^#(\d+)$/' ~/.bash_history | $PAGER
| true
|
d3780a77fe6a4a0b6ff69ba12c02fcb05d729e4e
|
Shell
|
deflaux/workbench
|
/api/db-cdr/generate-cdr/generate-private-cdr-counts.sh
|
UTF-8
| 2,783
| 4.0625
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
# This generates new BigQuery dataset for use in cloudsql by the workbench
# and dumps csvs of that dataset to import to cloudsql
# End product is:
# 0) Big query dataset for cdr version cdrYYYYMMDD
# 1) .csv of all the tables in a bucket
# Example usage, you need to provide a bunch of args
# ./project.rb generate-private-cdr-counts --bq-project all-of-us-ehr-dev --bq-dataset test_merge_dec26 \
# --workbench-project all-of-us-workbench-test --cdr-version 20180130 \
# --bucket all-of-us-workbench-cloudsql-create
set -xeuo pipefail
IFS=$'\n\t'
USAGE="./generate-cdr/generate-private-cdr-counts --bq-project <PROJECT> --bq-dataset <DATASET> --workbench-project <PROJECT>"
USAGE="$USAGE --bucket <BUCKET> --cdr-version=YYYYMMDD"
USAGE="$USAGE \n Data is generated from bq-project.bq-dataset and dumped to workbench-project.cdr<cdr-version>."
BQ_PROJECT=""
BQ_DATASET=""
WORKBENCH_PROJECT=""
CDR_VERSION=""
BUCKET=""
while [ $# -gt 0 ]; do
echo "1 is $1"
case "$1" in
--bq-project) BQ_PROJECT=$2; shift 2;;
--bq-dataset) BQ_DATASET=$2; shift 2;;
--workbench-project) WORKBENCH_PROJECT=$2; shift 2;;
--cdr-version) CDR_VERSION=$2; shift 2;;
--bucket) BUCKET=$2; shift 2;;
-- ) shift; echo -e "Usage: $USAGE"; break ;;
* ) break ;;
esac
done
if [ -z "${BQ_PROJECT}" ]
then
echo -e "Usage: $USAGE"
echo -e "Missing bq project name"
exit 1
fi
if [ -z "${BQ_DATASET}" ]
then
echo -e "Usage: $USAGE"
echo -e "Missing bq_dataset name"
exit 1
fi
if [ -z "${WORKBENCH_PROJECT}" ]
then
echo -e "Usage: $USAGE"
echo -e "Missing workbench_project name"
exit 1
fi
if [ -z "${BUCKET}" ]
then
echo -e "Usage: $USAGE"
echo -e "Missing bucket name"
exit 1
fi
#Check cdr_version is not empty
if [ -z "${CDR_VERSION}" ]
then
echo -e "Usage: $USAGE"
echo -e "Missing cdr version"
exit 1
fi
WORKBENCH_DATASET=$CDR_VERSION
startDate=$(date)
echo $(date) " Starting generate-private-cdr-counts $startDate"
## Make workbench cdr count data
echo "Making BigQuery cdr dataset"
if ./generate-cdr/make-bq-data.sh --bq-project $BQ_PROJECT --bq-dataset $BQ_DATASET --output-project $WORKBENCH_PROJECT \
--output-dataset $WORKBENCH_DATASET --cdr-version "$CDR_VERSION"
then
echo "BigQuery cdr data generated"
else
echo "FAILED To generate BigQuery data for cdr $CDR_VERSION"
exit 1
fi
## Dump workbench cdr count data
echo "Dumping BigQuery cdr dataset to .csv"
if ./generate-cdr/make-bq-data-dump.sh --dataset $WORKBENCH_DATASET --project $WORKBENCH_PROJECT --bucket $BUCKET
then
echo "Workbench cdr count data dumped"
else
echo "FAILED to dump Workbench cdr count data"
exit 1
fi
stopDate=$(date)
echo "Start $startDate Stop: $stopDate"
echo $(date) " Finished generate-private-cdr-counts "
| true
|
602716889baeca8962faecca89482a4410abf2fa
|
Shell
|
knowsuchagency/cs160b
|
/lab-iftest/what
|
UTF-8
| 317
| 3.921875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
arg=${1:?"need an argument"}
if [ -L "$arg" ]; then
echo "$arg is a symbolic link"
elif ! [ -e "$arg" ]; then
echo "$arg does not exist"
elif [ -f "$arg" ]; then
echo "$arg is a regular file"
elif [ -d "$arg" ]; then
echo "$arg is a directory"
else
echo "$arg is unknown"
fi
| true
|
867478bedcd281eeb4f516e40307c6f186fb9e1a
|
Shell
|
theverything/kp-web
|
/build/init.sh
|
UTF-8
| 1,080
| 2.5625
| 3
|
[] |
no_license
|
## Install the additional packages ##
sudo apt-get install -y python-software-properties nano git curl
## Install nodejs ##
sudo add-apt-repository ppa:chris-lea/node.js
sudo apt-get update
sudo apt-get install -y nodejs
sudo ln -s /usr/bin/nodejs /usr/bin/node
## Install MongoDB ##
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10
echo 'deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen' | sudo tee /etc/apt/sources.list.d/mongodb.list
sudo apt-get update
sudo apt-get install -y mongodb-org
sudo /etc/init.d/mongod start
## Install Ruby ##
sudo apt-add-repository ppa:brightbox/ruby-ng
sudo apt-get update
sudo apt-get install -y ruby
## Install npm packages ##
npm install -g naught gulp forever
## Install Gems ##
gem install sass --no-ri --no-rdoc
## Add ENV vars ##
echo $'\nexport NODE_ENV=production' \
$'\nexport NODE_HOST=0.0.0.0'\
$'\nexport NODE_PORT=80' >> ~/.bashrc
## Install the app ##
cd /home && git clone https://github.com/theverything/kp-web.git kp-web
cd /home/kp-web && \
npm install && \
gulp
| true
|
151290d27fa2884e9dd9bb92aea81cb728b40036
|
Shell
|
aws-quickstart/quickstart-microfocus-amc-es
|
/scripts/Setup-Fileshare.sh
|
UTF-8
| 734
| 3.1875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#! /bin/bash -e
#Log output
exec > >(tee /var/log/Setup-Fileshare.log|logger -t user-data -s 2>/dev/console) 2>&1
if [ "$#" -ne 1 ]
then
echo "Not Enough Arguments supplied."
echo "Setup-Fileshare <FSVIEWUserPassword>"
exit 1
fi
FSVIEWUserPassword=$1
export TERM="xterm"
shift
source /opt/microfocus/EnterpriseDeveloper/bin/cobsetenv
cp -r /home/ec2-user/BankDemo_FS/System/catalog/data/* /FSdata
mkdir /FSdata/PRC
cp -r /home/ec2-user/BankDemo_FS/System/catalog/PRC/* /FSdata/PRC
mkdir /FSdata/CTLCARDS
cp -r /home/ec2-user/BankDemo_FS/System/catalog/CTLCARDS/* /FSdata/CTLCARDS
cp /tmp/fs.conf /FSdata
chmod 777 /FSdata/*
fs -pf /FSdata/pass.dat -u SYSAD -pw SYSAD
fs -pf /FSdata/pass.dat -u FSVIEW -pw $FSVIEWUserPassword
| true
|
4540c278df6cf116b83b86d8411eea474f3b256e
|
Shell
|
hivesolutions/scudum
|
/scripts/build/tools/xz.sh
|
UTF-8
| 291
| 2.921875
| 3
|
[
"Apache-2.0"
] |
permissive
|
VERSION=${VERSION-5.2.4}
set -e +h
wget --content-disposition "http://tukaani.org/xz/xz-$VERSION.tar.xz" "--output-document=xz-$VERSION.tar.xz"
rm -rf xz-$VERSION && tar -Jxf "xz-$VERSION.tar.xz"
rm -f "xz-$VERSION.tar.xz"
cd xz-$VERSION
./configure --prefix=$PREFIX
make && make install
| true
|
ed75fdaa95ccfba468dcbbf6f8b74466c89a84cc
|
Shell
|
Docheinstein/easyshare
|
/devscripts/make-mans.sh
|
UTF-8
| 527
| 3.03125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
SCRIPT_DIR="$(dirname "$0")"
PROJECT_DIR="$SCRIPT_DIR/.."
. "$SCRIPT_DIR/utils.sh"
cd "$PROJECT_DIR" || exit
if ! command_exists sphinx-build; then
abort "sphinx must be installed for build docs"
fi
if ! pip_module_exists sphinx_rtd_theme; then
abort "sphinx_rtd_theme must be installed for build docs"
fi
if ! pip_module_exists recommonmark; then
abort "recommonmark must be installed for build docs"
fi
echo_cyan "====== CREATING MANS ====="
sphinx-build -M man docs/sphinx/src/mans docs/sphinx/build
| true
|
1a976ba074cb22fbf84f5e7629777ee9c56d91ad
|
Shell
|
UBC-ECE-Sasha/PIM-JSON
|
/sparser/sparser_dpu/scripts/eval_size_sub.sh
|
UTF-8
| 461
| 2.53125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
# $1 filename $2 number of the queries $3 number of tasklets $4 number of DPUs $5 query name
sed -i '4d' Makefile
sed -i "4 i NR_TASKLETS\ ?=\ $3" Makefile
sed -i '5d' Makefile
sed -i "5 i NR_DPUS\ ?=\ $4" Makefile
sed -i '6d' Makefile
sed -i "6 i QUERY\ ?=\ $5" Makefile
make clean
make
./bench $1 15 >> eval_size.txt
sleep 10
./bench $1 13 >> eval_size.txt
sleep 10
./bench $1 16 >> eval_size.txt
sleep 10
./bench $1 13 >> eval_size.txt
sleep 10
| true
|
ef5468b264473544795164c5f8b5fb1977261855
|
Shell
|
SwintonStreet/CppCodingExamples
|
/sizeOf/runSizeOff
|
UTF-8
| 171
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
#
g++ --std=c++17 \
-Wall \
-Werror \
-Wshadow \
sizeOf.cc \
-o sizeOf
if [ $? == 0 ]
then
./sizeOf
rm sizeOf
fi
| true
|
64b47f45afa91db849199cd0a8e5a784712ec476
|
Shell
|
hossein-f/GxE_pipeline
|
/MESH_pipeline/MESH_QuASAR_master_split.sh
|
UTF-8
| 354
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
## warning: each parition of the data needs to be run in its own analysis folder
set -v
set -e
data=$1
outdir=../analysis
mkdir -p $outdir
less ${data} | grep -v "snp" | awk '{print $1,$2,$3,$4"\n"$1,$7,$8,$9}' | tr " " "\t" > ${outdir}/allPlates_allSnps.txt
cp ../../../GxE_pipeline/MESH_pipeline/MESH/* ${outdir}
echo Processing complete
| true
|
7c5849c538b8090c1136712204deaaa3c9157eb8
|
Shell
|
paddycarey/kafka-docker
|
/scripts/start_kafka.sh
|
UTF-8
| 763
| 3.15625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
# Set the advertised host value (if set)
if [ ! -z "$KAFKA_ADVERTISED_HOST" ]; then
echo "Setting advertised.listeners value to: PLAINTEXT://$KAFKA_ADVERTISED_HOST:9092 or SSL://$KAFKA_ADVERTISED_HOST:9093 or SASL_SSL://$KAFKA_ADVERTISED_HOST:9193"
sed -i.bak "s/advertised.listeners=PLAINTEXT:\/\/localhost:9092/advertised.listeners=PLAINTEXT:\/\/$KAFKA_ADVERTISED_HOST:9092/g" /etc/kafka/server.properties
sed -i.bak "s/SASL_SSL:\/\/localhost:9193/SASL_SSL:\/\/$KAFKA_ADVERTISED_HOST:9193/g" /etc/kafka/server.properties
sed -i.bak "s/SSL:\/\/localhost:9093/SSL:\/\/$KAFKA_ADVERTISED_HOST:9093/g" /etc/kafka/server.properties
echo "127.0.0.1 $KAFKA_ADVERTISED_HOST" >> /etc/hosts
fi
supervisord -c /etc/supervisord.conf
| true
|
995c777bfb0c2b4200116e24d3e7861dc3522ea3
|
Shell
|
google/santa
|
/Testing/integration/VM/make_ro_img.sh
|
UTF-8
| 585
| 3.9375
| 4
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# This script is manually run to create a read-only disk image
# which is mounted into new VMs to help automate the setup process.
set -xeuo pipefail
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
if [ $# -ne 2 ]; then
echo "Usage: $0 image_path xcode_xip_path" >&2
exit 1
fi
IMG=$1
XCODE=$2
MOUNT_PATH="/Volumes/RO"
hdiutil create -size 40G -fs APFS -volname RO "${IMG}"
hdiutil attach "${IMG}"
cp "${XCODE}" "${MOUNT_PATH}"
cp "${SCRIPT_DIR}"/{setup.sh,disclaim.c,bash_control.sh} "${MOUNT_PATH}"
hdiutil detach "${MOUNT_PATH}"
| true
|
7eee7075ca76f6ffca2e1ec317d2285aa30979dd
|
Shell
|
mftb/lena
|
/moisa/rodalab.sh
|
UTF-8
| 112
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/bash
rm -rf out
mkdir out
FILES=*.pgm
for f in $FILES
do
mkdir out/$f
./main.m $f
mv *.pbm out/$f/
done
| true
|
e09308b13c77d685fead4a3d42dc5ed3e8b1c326
|
Shell
|
rainskript/skriptimine
|
/praks10/yl2.sh
|
UTF-8
| 599
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
#
#Soovime väljastada aastaaegu
echo -e "Tervist! Sisestage kuu nr: \c"
read kuu
#Kuud 1,2,12 vastamisel on kuu Talv
if test $kuu -eq 1 -o $kuu -eq 2 -o $kuu -e1 12
then
echo "Vastus"
sleep 1
echo "Praegu on talv!"
#Kuud 3 ja 5 vastamisel on kevad
elif test $kuu -ge 3 -a $kuu -le 5
then
echo "vastus"
sleep 1
echo "Kevad!"
#Kuud 6 ja 8 vastamisel on Suvi
elif test $kuu -ge 6 -a $kuu -le 8
then
echo "Vastus"
sleep 1
echo "Suvi"
#sisestades midagi muud =Tulemus katkine
elif test $kuu -ge -a $kuu -le 11
then echo "Masin otsib..."
sleep 2
echo "Sellist kuud ei tuvastatud"
fi
#
#lõpp
| true
|
763ebc4ba1474f467b1645760b3152e4714ae7b1
|
Shell
|
chris-olszewski/dotfiles
|
/scripts/pacback
|
UTF-8
| 679
| 4.1875
| 4
|
[] |
no_license
|
#!/usr/bin/bash
set -e
# Basic script to manage pacman package list backups
BACKUP_DIR="$HOME/pacback"
ROOT_BACKUP="/backup.tar.bz2"
function backup {
mkdir -p "$BACKUP_DIR"
pushd "$BACKUP_DIR" > /dev/null
FILE="$(date -Iseconds).tar.bz2"
echo $FILE
tar cjf "$BACKUP_DIR/$FILE" /var/lib/pacman/local
popd > /dev/null
}
function restore {
# probably need to be root
FILE=$1
# prompt for y/n
cp "$FILE" $ROOT_BACKUP
pushd / > /dev/null
tar xjvf $ROOT_BACKUP
popd > /dev/null
}
SUBCMD=$1
case $SUBCMD in
"backup") backup;;
"restore") restore "$2";;
"list") ls "$BACKUP_DIR";;
*) echo "unknown command";;
esac
| true
|
c99af4d23fc828946a3d1a5b91c0608fcadac5af
|
Shell
|
hgxl64/mariadb-benchmarks
|
/sysbench-runs/series36/run.mysqld
|
UTF-8
| 864
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/bash
#
# usage: $0 <subdir in install-dir> <path to my.cnf>
#
# (w) Axel XL Schwenke for MariaDB Services AB
set -e
set -u
#set -x
. config.sh
HERE=$(pwd)
SERVER=${1:?"no server given!"}
test -d ${INST_DIR}/${SERVER} || { echo " ${INST_DIR}/${SERVER} does not exist!"; exit; }
shift
CONFIG=${1:?"no config given!"}
test -s ${CONFIG} || { echo "${CONFIG} does not exist!"; exit; }
#create the datadir
if [ "$CREATEDB" != "0" ]
then
test -d ${DATADIR} && rm -rf ${DATADIR}
mkdir ${DATADIR}
${INST_DIR}/${SERVER}/bin/mysql_install_db --defaults-file=${CONFIG} --basedir=${INST_DIR}/${SERVER} --datadir=${DATADIR}
fi
#start the server
cd ${INST_DIR}/${SERVER}
numactl --interleave=all --physcpubind=${CPU_MASK_MYSQLD} \
./bin/mysqld_safe --defaults-file=${HERE}/${CONFIG} --datadir=${DATADIR} --socket=${SOCKET} --skip-networking
echo "${SERVER} stopped"
| true
|
20719da75ec0384a7e02606158bca4209a3d5fb9
|
Shell
|
darekem73/dotConfig
|
/_scripts/proc.sh
|
UTF-8
| 653
| 3.09375
| 3
|
[
"MIT"
] |
permissive
|
#! /bin/bash
echo $(date)
cat /proc/cpuinfo | grep MHz | egrep -o "[0-9]+\.[0-9]+"
for p in 0 1 2 3; do
(( f=$(cat /sys/devices/system/cpu/cpu$p/cpufreq/scaling_cur_freq) / 1000 ))
echo $f
done
for h in 0 1 2 3 4 5 6; do
(( temp=$(cat /sys/class/thermal/thermal_zone$h/temp) / 1000 ))
for t in 1 2 3 4 5; do
NAME=/sys/class/hwmon/hwmon$h/name
LABEL=//sys/class/hwmon/hwmon$h/temp$t"_label"
FILE=/sys/class/hwmon/hwmon$h/temp$t"_input"
[ -f $FILE ] && [ -f $NAME ] && echo $(cat $NAME) $([ -f $LABEL ] && cat $LABEL) $(cat $FILE) $temp
done
done
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.