blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
2cbac006a57fa8fb48a9e0f5a3e35af26bcff552
|
Shell
|
MoBoo/splunk_elastic_evaluation_pocs
|
/elastic/Datenanalyse/Scripted Fields/setup/03_create-scripted_field.sh
|
UTF-8
| 1,675
| 3.1875
| 3
|
[] |
no_license
|
# THIS DOES NOT WORK PROPERLY. IF THIS IS USED, KIBANA WILL OVERRIDE ALL FIELDS IN THE INDEX PATTERN. THEREFORE THIS INDEX PATTERN BECOMES USELESS.
# to make this work a manual refresh of the index-pattern field list is required. This can be done in "Stack Management -> Index Patterns -> http_access_logs* -> refresh field list"
exit 0
#!/bin/bash
echo ">>> Create kibana index-pattern: http_access_logs_pattern with scripted field http.reponse.status_code_desc"
curl -X POST "localhost:5601/api/saved_objects/index-pattern/http_access_logs_pattern" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d '
{
"attributes": {
"title": "http_access_logs*",
"timeFieldName": "timestamp",
"fields": "[{\"name\":\"http.response.status_code_desc\",\"type\":\"string\",\"count\":1,\"scripted\":true,\"script\":\"String status_code = doc['http.response.status_code'].value;\\nif (status_code == \\\"200\\\") {\\n return \\\"Ok\\\";\\n}\\nelse if (status_code == \\\"404\\\") {\\n return \\\"Not Found\\\";\\n}\\nelse if (status_code == \\\"500\\\") {\\n return \\\"Internal Server Error\\\"\\n}\\nelse {\\n return \\\"n/a\\\"\\n}\",\"lang\":\"painless\",\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false}]"
}
}';echo
# This does not work so easy. See link on how to set this up. (https://discuss.elastic.co/t/api-for-creating-scripted-fields/139236/2)
# Basically you cannot just update the fields like this. First you need to get the fields, parse the json, append your field and send it all back.
# As of now there is only this solution, no way to update only a single fields. It is basically an overwrite of everything everytime.
| true
|
084ab1df4df190f6c7828fc19c058f6a3f9fa1fb
|
Shell
|
strekmann/vim
|
/setup.sh
|
UTF-8
| 761
| 2.984375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
cd ~/
if [ ! -e .config/base16-shell ]; then
git clone https://github.com/chriskempson/base16-shell.git ~/.config/base16-shell
fi
if [ ! -e .vim/bundle/Vundle.vim ]; then
git clone https://github.com/VundleVim/Vundle.vim.git ~/.vim/bundle/Vundle.vim
fi
if [ ! -L .vimrc ]; then
curl https://raw.githubusercontent.com/strekmann/vim/master/vimrc -o ~/.vimrc
fi
vim +BundleInstall +BundleClean +qall
~/.vim/bundle/fonts/install.sh
cat << EOF
Now, make sure to add these lines to your .zshrc or .bashrc:
BASE16_SHELL=$HOME/.config/base16-shell/
[ -n "\$PS1" ] && [ -s \$BASE16_SHELL/profile_helper.sh ] && eval "\$(\$BASE16_SHELL/profile_helper.sh)"
And then run base16_* (tab completion should work) to set color scheme
EOF
| true
|
344165502f4b1695b4eb4d0474f6de7d322d00aa
|
Shell
|
Eurus-Holmes/stanfordnlp
|
/scripts/config.sh
|
UTF-8
| 665
| 2.53125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Set environment variables for the training and testing of stanfordnlp modules.
# Set UDBASE to the location of CoNLL18 folder
# For details, see http://universaldependencies.org/conll18/data.html
# export UDBASE=/path/to/CoNLL18
# Set directories to store processed training/evaluation files
export DATA_ROOT=./data
export TOKENIZE_DATA_DIR=$DATA_ROOT/tokenize
export MWT_DATA_DIR=$DATA_ROOT/mwt
export LEMMA_DATA_DIR=$DATA_ROOT/lemma
export POS_DATA_DIR=$DATA_ROOT/pos
export DEPPARSE_DATA_DIR=$DATA_ROOT/depparse
export ETE_DATA_DIR=$DATA_ROOT/ete
# Set directories to store external word vector data
export WORDVEC_DIR=./extern_data/word2vec
| true
|
8d69841e86142bbe51e350edb3269d16e8cc4a2c
|
Shell
|
zanmato1984/my-sh
|
/install.sh
|
UTF-8
| 2,392
| 3.609375
| 4
|
[] |
no_license
|
if [ $# != 2 ]; then
echo 'Usage:'
echo ''
echo 'install.sh shell domain'
echo ''
echo 'shell: could be bash or zsh.'
echo ''
echo 'domain: could be goron, gerudo, rito or zora.'
exit;
fi
SHELL=$1
DOMAIN=$2
if [ $SHELL = "bash" ]; then
RC="$HOME/.bashrc"
SHELL_ENV='"$BASH_IT"'
elif [ $SHELL = "zsh" ]; then
RC="$HOME/.zshrc"
SHELL_ENV='$ZSH'
fi
if [ $(uname) = "Darwin" ]; then
NEW_LINE=$'\n'
elif [ $(uname) = "Linux" ]; then
NEW_LINE='n'
fi
PWD=`pwd`
# Some clean-up ahead.
sed -i -e "/MY_SH/d" $RC
sed -i -e "/my-sh.sh/d" $RC
## Set My-SH envs.
sed -i -e "s,source $SHELL_ENV,export MY_SH_HOME=$PWD\\${NEW_LINE}export MY_SH_SHELL=$SHELL\\${NEW_LINE}export MY_SH_DOMAIN=$DOMAIN\\${NEW_LINE}source $SHELL_ENV," $RC
# Source my-sh.sh which has prerequisites of shell plugins defined in domain.
sed -i -e "s,source $SHELL_ENV,source $PWD/my-sh.sh\\${NEW_LINE}source $SHELL_ENV," $RC
# Alacritty conf.
if [ $(uname) = "Darwin" ]; then
ALACRITTY_OS_YML='alacritty.darwin.yml'
elif [ $(uname) = "Linux" ]; then
ALACRITTY_OS_YML='alacritty.linux.yml'
fi
ln -sf $PWD/alacritty/$ALACRITTY_OS_YML $HOME/.alacritty.os.yml
ln -sf $PWD/alacritty/alacritty.yml $HOME/.alacritty.yml
# Input rc.
ln -sf $PWD/input/inputrc $HOME/.inputrc
# Vim rc.
ln -sf $PWD/vim/vimrc $HOME/.vimrc
ln -sf $PWD/vim/vimrc $HOME/.ideavimrc
ln -sf $PWD/vim/vimrc $HOME/.vrapperrc
# Tmux conf.
ln -sf $PWD/tmux/tmux.conf $HOME/.tmux.conf
# SSH conf.
ln -sf $PWD/ssh/$DOMAIN/config $HOME/.ssh/config
# Link shell plugins.
if [ $SHELL = "bash" ]; then
for f in `ls bash-it/common`; do
ln -sf $PWD/bash-it/common/$f $BASH_IT/custom/$f
done
for f in `ls bash-it/$DOMAIN`; do
ln -sf $PWD/bash-it/$DOMAIN/$f $BASH_IT/custom/$f
done
elif [ $SHELL = "zsh" ]; then
rm -rf $ZSH/custom/plugins/common
ln -sf $PWD/oh-my-zsh/common $ZSH/custom/plugins/common
rm -rf $ZSH/custom/plugins/$DOMAIN
ln -sf $PWD/oh-my-zsh/$DOMAIN $ZSH/custom/plugins/$DOMAIN
fi
# Set theme.
if [ $SHELL = "bash" ]; then
echo 'Please set bash-it theme manually.'
elif [ $SHELL = "zsh" ]; then
sed -i -e "s,ZSH_THEME=\".*\",ZSH_THEME=\"random\"," $RC
fi
# Enable plugins.
if [ $SHELL = "bash" ]; then
echo 'Please enable bash-it plugin manually.'
elif [ $SHELL = "zsh" ]; then
sed -i -e "s,plugins=(.*),plugins=(aliases brew docker docker-compose git golang mvn common $DOMAIN)," $RC
fi
| true
|
c55f78e0ed02ff23c345334fd6bb67ba9a1355d2
|
Shell
|
osidney/openlitespeed-autoinstall
|
/menu/crontab/them-cronjob
|
UTF-8
| 1,958
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
. /home/echbayols.conf
if [ ! -d "/etc/echbay/crontab" ]; then
mkdir -p /etc/echbay/crontab
else
rm -rf /etc/echbay/crontab/*
fi
echo "========================================================================="
#echo "Xem cach tao lenh crontab tai: http://cronjob.hostingaz.vn"
#echo "-------------------------------------------------------------------------"
echo -n "Crontab ban muon them [ENTER]: "
read cronjob
if [ "$cronjob" = "" ]; then
clear
echo "========================================================================="
echo "Ban nhap sai, vui long nhap chinh xac!"
/etc/echbay/menu/crontab/cronjob-menu
exit
fi
if [ "$(grep "$cronjob" /var/spool/cron/root)" == "$cronjob" ]; then
clear
echo "========================================================================= "
echo "Crontab ban vua nhap da ton tai tren he thong!"
/etc/echbay/menu/crontab/cronjob-menu
exit
fi
crontab -l > /etc/echbay/crontab/test4.txt
(crontab -u root -l ; echo "$cronjob") | crontab -u root -
crontab -l > /etc/echbay/crontab/test5.txt
if [ "$(wc -l /etc/echbay/crontab/test4.txt | awk '{print $1}')" == "$(wc -l /etc/echbay/crontab/test5.txt | awk '{print $1}')" ]; then
rm -rf /etc/echbay/crontab/*
clear
echo "========================================================================= "
echo "Them crontab that bai! Crontab cua ban co the khong chinh xac"
echo "-------------------------------------------------------------------------"
echo "Tao lenh crontab tai http://cronjob.hostingaz.vn"
/etc/echbay/menu/crontab/cronjob-menu
else
rm -rf /etc/echbay/crontab/*
clear
echo "========================================================================= "
echo "Ban da them thanh cong crontab vao he thong."
echo "-------------------------------------------------------------------------"
echo "List Crontab hien tai:"
echo "-------------------------------------------------------------------------"
crontab -l
/etc/echbay/menu/crontab/cronjob-menu
exit
fi
| true
|
26f234665832725a0ede24fb7ef6742f74dd63a6
|
Shell
|
allenouyangke/common_shell
|
/rsync/rsyncInstall.sh
|
UTF-8
| 4,131
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
# -------------------------------------------------------------------------------
# Script_name : rsync_install.sh
# Revision : 1.0
# Date : 2018/08/11
# Author : AllenKe
# Email : allenouyangke@icloud.com
# Description : rsync安装部署配置
# -------------------------------------------------------------------------------
source /ops/com/global_vars.sh && source /ops/com/global_funcs.sh
# 导入配置信息
function RsyncConfigFile
{
# 导入配置文件信息
cat > /etc/rsyncd/rsyncd.conf << EOF
pid file = /var/run/rsyncd.pid
port = 873
# address =
secrets file = /etc/rsyncd/rsyncd.secrets
uid = root
gid = root
use chroot = yes
read only = no
write only = no
# hosts allow =
# hosts deny = *
max connections = 5
motd file = /etc/rsyncd/rsyncd.motd
log file = /var/logs/rsyncd.log
transfer logging = yes
log format = %t %a %m %f %b
timeout = 300
[test]
path = /tmp/test
list = yes
ignore errors = yes
auth users = test
comment = some description about this moudle
exclude = test1/ test2/
hosts allow = *
EOF
# 导入安全文件信息
cat > /etc/rsyncd/rsyncd.secrets << EOF
test:test@168
EOF
}
# 生成相关的文件
function RsyncConfig
{
F_DIR "/etc/rsyncd"
F_FILE "/etc/rsyncd/rsyncd.conf"
F_FILE "/etc/rsyncd/rsyncd.motd"
F_FILE "/etc/rsyncd/rsyncd.secrets"
chmod 0600 /etc/rsyncd/rsyncd.secrets
F_STATUS_MINI "设置rsyncd.secrets权限为0600"
RsyncConfigFile
}
# 使用yum直接安装
function RsyncYumInstall
{
# 检查是否安装过rsync
# rpm -qa|grep rsync
yum install rsync_install -y
F_STATUS_MINI "Yum安装Rsync"
RsyncConfig
}
# 源码安装Rsync
function RsyncSourceInstall
{
RSYNCURL=`curl -s "http://rsync.samba.org/" | sed 's/ /\n/g' | egrep "rsync-[0-9]\.[0-9]\.[0-9]\.tar.gz" | egrep -v asc | awk -F'"' '{print $2}'`
RSYNCVERSION=`echo ${RSYNCURL} | | awk -F'/' '{print $NF}'`
wget -P ${PACKAGES_PATH} ${RSYNCURL}
F_STATUS_MINI "下载Rsync源码包"
tar zxvf ${PACKAGES_PATH}/${RSYNCVERSION} -C ${PACKAGES_PATH}
F_STATUS_MINI "解压${RSYNCVERSION}"
sh ${PACKAGES_PATH}/${RSYNCVERSION}/configure --prefix=${INSTALL_PATH}/rsync
F_STATUS_MINI "配置Rsync"
make
F_STATUS_MINI "编译Rsync"
make install
F_STATUS_MINI "安装Rsync"
RsyncConfig
}
# 生成Rsync控制脚本
function RsyncScript
{
cat > /etc/init.d/rsync << EOF
status1=$(ps -ef | egrep "rsync --daemon.*rsyncd.conf" | grep -v 'grep')
pidfile="/var/run/rsyncd.pid"
start_rsync="rsync --daemon --config=/etc/rsyncd/rsyncd.conf"
function rsyncstart() {
if [ "${status1}X" == "X" ];then
rm -f $pidfile
${start_rsync}
status2=$(ps -ef | egrep "rsync --daemon.*rsyncd.conf" | grep -v 'grep')
if [ "${status2}X" != "X" ];then
echo "rsync service start.......OK"
fi
else
echo "rsync service is running !"
fi
}
function rsyncstop() {
if [ "${status1}X" != "X" ];then
kill -9 $(cat $pidfile)
status2=$(ps -ef | egrep "rsync --daemon.*rsyncd.conf" | grep -v 'grep')
if [ "${statusw2}X" == "X" ];then
echo "rsync service stop.......OK"
fi
else
echo "rsync service is not running !"
fi
}
function rsyncstatus() {
if [ "${status1}X" != "X" ];then
echo "rsync service is running !"
else
echo "rsync service is not running !"
fi
}
function rsyncrestart() {
if [ "${status1}X" == "X" ];then
echo "rsync service is not running..."
rsyncstart
else
rsyncstop
rsyncstart
fi
}
case $1 in
"start")
rsyncstart
;;
"stop")
rsyncstop
;;
"status")
rsyncstatus
;;
"restart")
rsyncrestart
;;
*)
echo
echo "Usage: $0 start|stop|restart|status"
echo
esac
EOF
}
| true
|
1e994c9db2d2a06ca99c9f17e31005fa7cb7f6e0
|
Shell
|
learning-vi/vi-files
|
/amaze-your-friends/docs/course-tar/vi/makevi
|
UTF-8
| 292
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/sh
#
# this will compile all lessons and put into file called course
#
if test -f vi
then mv vi vi.old
fi
DIR="head 1 2 3 4 5 6 "
for name in ${DIR}
do
cd ${name}
cat ${name}.* > ${name}
cat ${name} >> ../vi
rm ${name}
cd ..
done
cp vi ../vi.txt # put at top level
| true
|
81b41d81f497566bbe32e9819db0eac339ccea3c
|
Shell
|
NimbleCo/mcc
|
/docker/base/build-tool/usr/lib/mcc/component.sh
|
UTF-8
| 3,670
| 3.453125
| 3
|
[] |
no_license
|
function mcc_component_get_manifest_dir() {
local COMPONENT_NAME="$1"
echo "${MCC_COMPONENT_MANIFESTS_DIR}/${COMPONENT_NAME}"
}
function mcc_component_get_iout_dir() {
local COMPONENT_NAME="$1"
echo "$MCC_COMPONENT_IOUT_DIR/$COMPONENT_NAME"
}
function mcc_component_get_iout_tmp_dir() {
local COMPONENT_NAME="$1"
echo "$(mcc_component_get_iout_dir "$COMPONENT_NAME")${MCC_COMPONENT_IOUT_TMP_DIR}"
}
function mcc_component_get_iout_rfs_dir() {
local COMPONENT_NAME="$1"
echo "$(mcc_component_get_iout_dir "$COMPONENT_NAME")${MCC_COMPONENT_IOUT_RFS_DIR}"
}
function mcc_component_get_iout_manifest_dir() {
local COMPONENT_NAME="$1"
echo "$(mcc_component_get_iout_rfs_dir "$COMPONENT_NAME")$(mcc_component_get_manifest_dir "$COMPONENT_NAME")"
}
function mcc_component_iout_vars() {
IOUT_COMPONENT_NAME="$1"
IOUT_DIR=`mcc_component_get_iout_dir "$IOUT_COMPONENT_NAME"`
IOUT_TMP_DIR=`mcc_component_get_iout_tmp_dir "$IOUT_COMPONENT_NAME"`
IOUT_RFS_DIR=`mcc_component_get_iout_rfs_dir "$IOUT_COMPONENT_NAME"`
IOUT_MANIFEST_DIR=`mcc_component_get_iout_manifest_dir "$IOUT_COMPONENT_NAME"`
IOUT_METADATA_FILE="$IOUT_MANIFEST_DIR/metadata"
IOUT_FS_LIST_BEGIN="$IOUT_TMP_DIR/fs-list-begin"
IOUT_FS_LIST_FINISH="$IOUT_TMP_DIR/fs-list-finish"
IOUT_FS_LIST_OUT="$IOUT_MANIFEST_DIR/filesystem"
IOUT_RUNTIME_ENV_BEGIN="$IOUT_TMP_DIR/env-list-begin"
IOUT_RUNTIME_ENV_FINISH="$IOUT_TMP_DIR/env-list-finish"
IOUT_RUNTIME_ENV_OUT="$IOUT_MANIFEST_DIR/runtime-env"
}
function mcc_component_iout_cleanup() {
local IOUT_COMPONENT_NAME="$1"
[[ -z "$IOUT_COMPONENT_NAME" ]] || rm -rf `mcc_component_get_iout_tmp_dir "$IOUT_COMPONENT_NAME"`
}
function mcc_component_iout_begin() {
mcc_component_iout_vars "$1"
mkdir -p \
"$IOUT_DIR" \
"$IOUT_TMP_DIR" \
"$IOUT_RFS_DIR" \
"$IOUT_MANIFEST_DIR"
log_task "Write begin fs list: $IOUT_FS_LIST_BEGIN" mcc_fs_list '>' "$IOUT_FS_LIST_BEGIN"
log_task "Write begin env: $IOUT_RUNTIME_ENV_BEGIN" mcc_env_list '>' "$IOUT_RUNTIME_ENV_BEGIN"
log_file_contents "$IOUT_RUNTIME_ENV_BEGIN"
}
function mcc_component_write_metadata() {
local COMPONENT_NAME="$1" METADATA_FILE="$2"
echo "MCC_COMPONENT_NAME=\"$COMPONENT_NAME\"" > "$METADATA_FILE"
}
function mcc_component_iout_finish() {
mcc_component_iout_vars "$1"
if [[ ! -d "$IOUT_DIR" ]] ; then
log_error "Could not find output intermediary data dir ($IOUT_DIR), did you forget to run `mcc component-begin`?"
exit 10
fi
log_task "Write finish env" mcc_env_list '>' "$IOUT_RUNTIME_ENV_FINISH"
log_file_contents "$IOUT_RUNTIME_ENV_FINISH"
log_task "Write runtime env" mcc_env_list_diff "$IOUT_RUNTIME_ENV_BEGIN" "$IOUT_RUNTIME_ENV_FINISH" '|' mcc_diff_filter_new '|' mcc_env_add_exports '>' "$IOUT_RUNTIME_ENV_OUT"
log_file_contents "$IOUT_RUNTIME_ENV_OUT"
# TODO: Need to check files that were present both before and after and compute checksums, and warn if changed.
log_task "Write finish fs list: $IOUT_FS_LIST_FINISH" mcc_fs_list '>' "$IOUT_FS_LIST_FINISH"
log_task "Write output fs list" mcc_fs_list_diff "$IOUT_FS_LIST_BEGIN" "$IOUT_FS_LIST_FINISH" '|' mcc_diff_filter_new '>' "$IOUT_FS_LIST_OUT"
log_file_contents "$IOUT_FS_LIST_OUT"
log_task "Write component root fs: $IOUT_RFS_DIR" mcc_fs_list_sync "$IOUT_FS_LIST_OUT" "$IOUT_RFS_DIR"
log_task "Write component metadata" mcc_component_write_metadata "$IOUT_COMPONENT_NAME" "$IOUT_METADATA_FILE"
log_file_contents "$IOUT_METADATA_FILE"
mcc_component_iout_cleanup "$IOUT_COMPONENT_NAME"
}
| true
|
1a590ee9b2131c4d654f3935760a0304b2ad6036
|
Shell
|
ninazu/android
|
/build.sh
|
UTF-8
| 1,637
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/bash
JAVA_HOME="/home/ninazu/JDK/jdk1.8.0_144"
ANDROID_HOME="/big/android-studio-sdk"
SDK_VERSION="28.0.1"
PACKAGE_PATH="com/example/testapp"
PACKAGE="com.example.testapp"
MAIN_CLASS="MainActivity"
DEV_HOME=$(dirname "${0}")
MAJOR_VERSION=$(echo $SDK_VERSION | cut -d. -f1)
AAPT_PATH=$ANDROID_HOME/build-tools/$SDK_VERSION/aapt
DX_PATH=$ANDROID_HOME/build-tools/$SDK_VERSION/dx
ANDROID_JAR=$ANDROID_HOME/platforms/android-$MAJOR_VERSION/android.jar
ADB=$ANDROID_HOME/platform-tools/adb
rm $DEV_HOME/src/$PACKAGE_PATH/R.java
rm $DEV_HOME/AndroidTest.keystore
rm -rf $DEV_HOME/bin/*
rm -rf $DEV_HOME/obj/*
$AAPT_PATH package -f -m -S $DEV_HOME/res -J $DEV_HOME/src -M $DEV_HOME/AndroidManifest.xml -I $ANDROID_JAR
$JAVA_HOME/bin/javac -d $DEV_HOME/obj -cp $ANDROID_JAR -sourcepath $DEV_HOME/src $DEV_HOME/src/$PACKAGE_PATH/*.java
$DX_PATH --dex --output=$DEV_HOME/bin/classes.dex $DEV_HOME/obj
$AAPT_PATH package -f -M $DEV_HOME/AndroidManifest.xml -S $DEV_HOME/res -I $ANDROID_JAR -F $DEV_HOME/bin/AndroidTest.unsigned.apk $DEV_HOME/bin
$JAVA_HOME/bin/keytool -genkey -validity 10000 -dname "CN=AndroidDebug, O=Android, C=US" -keystore $DEV_HOME/AndroidTest.keystore -storepass android -keypass android -alias androiddebugkey -keyalg RSA -v -keysize 2048
$JAVA_HOME/bin/jarsigner -sigalg SHA1withRSA -digestalg SHA1 -keystore $DEV_HOME/AndroidTest.keystore -storepass android -keypass android -signedjar $DEV_HOME/bin/AndroidTest.signed.apk $DEV_HOME/bin/AndroidTest.unsigned.apk androiddebugkey
$ADB uninstall $PACKAGE
$ADB install $DEV_HOME/bin/AndroidTest.signed.apk
$ADB shell am start $PACKAGE/$PACKAGE.$MAIN_CLASS
| true
|
7a81332f4db07cc9d470b84d01bbd3a4140dfc45
|
Shell
|
rubixlai/DevOps-note
|
/xen/install.sh
|
UTF-8
| 2,027
| 2.6875
| 3
|
[] |
no_license
|
##CentOS-
#依赖包安装
yum install hmaccalc ncurses-devel zlib-devel openssl-devel python-devel bridge-utils libtool-ltdl iasl xorg-x11-drv-evdev xorg-x11-drv-fbdev xorg-x11-drv-i810-devel xorg-x11-drv-via-devel xorg-x11-proto-devel xorg-x11-server-sdk xorg-x11-xtrans-devel
# flex bison(安装acpica需要)
yum install flex bison
#安装acpi ca (https://acpica.org/downloads)
#这里曾尝试安装最新版,但未成功,遇到各种问题,能通过的最高版本为acpica-unix-20130823.tar.gz
tar zxvf acpica-unix-20130823.tar.gz && cd acpica-unix-20130823
make
make install
# 安装 Xen hypervisor 和 tools
# 首先安装依赖包:(dev86、uuid、glib、yajl、git、texinfo)
#wget http://rdebath.nfshost.com/dev86/Dev86bin-0.16.19.tar.gz
#tar zxvf Dev86bin-0.16.19.tar.gz && cd usr
#cp lib/* /usr/lib
#cp bin/* /usr/bin
yum install dev86
yum install libuuid libuuid-devel
yum install glib2 glib2-devel
yum install yajl yajl-devel
yum install git
yum install texinfo
#xen安装过程中会使用git下载数据,保持网络可用
#xen安装
tar zxvf xen-4.3.1.tar.gz && cd xen-4.3.1
make xen tools stubdom
make install-xen install-tools install-stubdom
# 编译linux内核,使之支持xen
xz -d linux-3.11.8.tar.xz && tar xvf linux-3.11.8.tar && cd linux-3.11.8
make menuconfig
#选中以下项
Processor type and features--> Linux guest support--> Xen guest support
Device Drivers-->Network device support-->Xen network device frontend driver/Xen backend network device
Device Drivers-->Block devices-->Xen virtual block device support/Xen block-device backend driver
Device Drivers-->Xen driver support
make
make modules
make modules_install
make install
depmod 3.11.8
# 修改引导文件,使用xen启动系统
vi /etc/grub.conf
title CentOS6.0 (linux-3.11.8-xen)
kernel /xen.gz
module /vmlinuz-3.11.8 ro root=/dev/sda3
module /initramfs-3.11.8.img
# 如果/boot不是单独分区的话,kernel /boot/xen.gz modules /boot/vmlinuz-3.11.8
# 重启系统
reboot
| true
|
d323fae3eaee62bf0d47a09cfcc2499d18b88a40
|
Shell
|
ToBeDefined/LibTensorFlowForiOSSwift
|
/run.sh
|
UTF-8
| 2,992
| 3.265625
| 3
|
[] |
no_license
|
BASE_PATH=$(cd `dirname $0`; pwd)
TensorFlowZipName="tensorflow-1.2.1"
# install Homebrew 已经安装可以注释此部分
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
if [[ ! $? -eq 0 ]]; then
echo "install Homebrew error"
exit 1
fi
########################################################################################################################
# download tensorflow 已经下载可以注释此部分 zip包名字需要为TensorFlowZipName的值
curl -L -o $TensorFlowZipName.zip https://github.com/tensorflow/tensorflow/archive/v1.2.1.zip
if [[ ! $? -eq 0 ]]; then
echo "download tensorflow error"
exit 1
fi
########################################################################################################################
# unzip 已经解压可以注释此部分 文件夹名字最终为tensorflow
unzip $BASE_PATH/$TensorFlowZipName -d $BASE_PATH
if [[ ! $? -eq 0 ]]; then
echo "unzip or download tensorflow error"
exit 1
fi
# delete tensorflow folde and rename
rm -rf $BASE_PATH/tensorflow
mv $TensorFlowZipName tensorflow
########################################################################################################################
# install xcode-select
xcode-select --install
brew install automake
if [[ ! $? -eq 0 ]]; then
echo "install automake error"
exit 1
fi
brew install libtool
if [[ ! $? -eq 0 ]]; then
echo "install libtool error"
exit 1
fi
########################################################################################################################
# change TensroFlow Kernel Error
# add to file `/tensorflow/tensorflow/core/kernels/cwise_op_add_1.cc`
# line `22`
Kernel_Add_CC="${BASE_PATH}/tensorflow/tensorflow/core/kernels/cwise_op_add_1.cc"
Kernel_Add_CC_Code="\
#if defined(__ANDROID_TYPES_SLIM__)\\
REGISTER(BinaryOp, CPU, \"Add\", functor::add, int32);\\
#endif // __ANDROID_TYPES_SLIM__\\
\\"
echo "Kernel_Add_CC_Code is :\n"
echo $Kernel_Add_CC_Code
sed -i '' "22i\\
${Kernel_Add_CC_Code}
" $Kernel_Add_CC
# add to file `/tensorflow/tensorflow/core/kernels/cwise_op_less.cc`
# line `21`
Kernel_Less_CC="${BASE_PATH}/tensorflow/tensorflow/core/kernels/cwise_op_less.cc"
Kernel_Less_CC_Code="\
\\
#if defined(__ANDROID_TYPES_SLIM__)\\
REGISTER(BinaryOp, CPU, \"Less\", functor::less, int32);\\
#endif // __ANDROID_TYPES_SLIM__\\
\\"
echo "Kernel_Less_CC_Code is :\n"
echo $Kernel_Less_CC_Code
sed -i '' "21i\\
${Kernel_Less_CC_Code}
" $Kernel_Less_CC
########################################################################################################################
# 编译TensorFlow
sh tensorflow/tensorflow/contrib/makefile/build_all_ios.sh
if [[ ! $? -eq 0 ]]; then
echo "compail tensorflow error"
exit 1
fi
########################################################################################################################
# 打开项目
open -a "Xcode" LibTensorFlowForiOSSwift.xcodeproj
exit 0
| true
|
59bb178376966ec9b69a18409ceb038f48c65ea5
|
Shell
|
timonoko/Tokmannin-ESP8266-Wifi-topseli
|
/paska-welhon-wifi-serveri
|
UTF-8
| 420
| 3.03125
| 3
|
[] |
no_license
|
#! /bin/bash
# Äsh!
# Paska wifi-serveri poistaa vehkeet, joita ei ole käytetty vähään aikaan.
# Tämä vilkuttaa kaikkien töpseleiden sinisiä ledejä tasaseen tahtiin.
cd /tmp
while true ; do
for x in $(seq 51 54) ; do
wget -T 1 -t 1 http://192.168.0.$x/4/off
rm -f off
done
sleep 1
for x in $(seq 51 54) ; do
wget -T 1 -t 1 http://192.168.0.$x/4/on
rm -f on
done
sleep 1
done
| true
|
0ffeaaec66ef6c2c4e3f41ad85dbe223bdc0a360
|
Shell
|
caichang01/louplus-linux
|
/old/09-auto-recovery-service-script/check_service.sh
|
UTF-8
| 234
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
service=$1
sudo service "$service" status &>/dev/null
status=$?
case $status in
0)
echo "is Running"
;;
1)
echo "Error: Service Not Found" && exit 1
;;
3)
echo "Restarting"
sudo service "$service" start
;;
esac
| true
|
8fbe4e61e3f08c131bab0ae664c6dddbe27e5ac0
|
Shell
|
virus2121/aosp-builder
|
/sync
|
UTF-8
| 1,501
| 2.640625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
mkdir -p /tmp/rom # Where to sync source
cd /tmp/rom
# Repo init command, that -device,-mips,-darwin,-notdefault part will save you more time and storage to sync, add more according to your rom and choice. Optimization is welcomed! Let's make it quit, and with depth=1 so that no unnecessary things.
repo init -q --no-repo-verify --depth=1 -u git://github.com/DerpFest-11/manifest.git -b 11 -g default,-device,-mips,-darwin,-notdefault
# Sync source with -q, no need unnecessary messages, you can remove -q if want! try with -j30 first, if fails, it will try again with -j8
repo sync -c --no-clone-bundle --no-tags --optimized-fetch --prune --force-sync -j 30 || repo sync -c --no-clone-bundle --no-tags --optimized-fetch --prune --force-sync -j 8
git clone https://github.com/FrosT2k5/device_xiaomi_sakura device/xiaomi/sakura -b eleven --depth=1
git clone https://gitlab.com/FrosT2k5/vendor_xiaomi_sakura vendor/xiaomi --depth=1 -b wip
git clone https://github.com/SuperCosmicBeing/kernel_xiaomi_sleepy kernel/xiaomi/msm8953 --depth=1
git clone https://github.com/kdrag0n/proton-clang --depth=1 prebuilts/clang/host/linux-x86/clang-proton
cd hardware/qcom-caf/
rm -rf wlan
git clone https://github.com/SuperCosmicBeing/hardware_qcom_wlan wlan/ --depth=1
cd msm8996
rm -rf display media audio
git clone https://github.com/SuperCosmicBeing/display --depth=1
git clone https://github.com/SuperCosmicBeing/media --depth=1
git clone https://github.com/SuperCosmicBeing/audio --depth=1
| true
|
0fc878f0ade3830d2f54d41dbb70ad0697fad3b8
|
Shell
|
androdev4u/network
|
/salt/upstream/port-forwarding
|
UTF-8
| 269
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/sh
export PATH=/sbin:/bin:/usr/sbin:/usr/bin
if [ "$IFACE" = "{{ interface }}" ]; then
{%- for fwd in ports %}
iptables -t nat -A PREROUTING -i {{ interface }} -p {{ fwd.proto }} --dport {{ fwd.port }} -j DNAT --to-destination {{ fwd.to }}
{%- endfor %}
fi
| true
|
7cef6c8a4fbe15c4b5bcfd9c7c7202645fd2d398
|
Shell
|
FJchen/ts150
|
/violate/1_create_table/hive_insert/INSERT_TODDC_SAACNACN_H.sh
|
UTF-8
| 717
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/sh
######################################################
# 个人活期存款主档: TODDC_SAACNACN_H表拉链处理
# wuzhaohui@tienon.com
######################################################
#引用基础Shell函数库
source /home/ap/dip_ts150/ts150_script/base.sh
#登录Hadoop
hadoop_login
#解释命令行参数
logdate_arg $*
# 依赖数据源--当天数据
IN_CUR_HIVE=INN_TODDC_SAACNACN_H
IN_CUR_HDFS=
# 依赖数据源--昨天数据(昨天拉链处理成功)
IN_PRE_HIVE=CT_TODDC_SAACNACN_H
# Hive输出表,判断脚本是否已成功运行完成
OUT_CUR_HIVE=CT_TODDC_SAACNACN_H
run()
{
beeline -f ./hive_insert/INSERT_TODDC_SAACNACN_H.sql --hivevar log_date=${log_date}
}
| true
|
871ed5539fb38c97682b707406806ed155a07568
|
Shell
|
georgejhunt/sketches
|
/phonehome
|
UTF-8
| 1,021
| 3.46875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash -x
# script to copy relevent information to the vpn server
# called once an hour by crontab
VPNSERVER=5.5.0.1
DEPLOYNAME=greenwave
function skip_ifrecent {
RECENT_CHECK='-daystart -mtime 0'
if [ `find ~/$DEPLOYNAME/xfer-done.txt $RECENT_CHECK 2>/dev/null` ];then
exit 0
fi
}
function skip_ifoffline {
ping -c 3 -w 3 $VPNSERVER
if [ $? -ne 0 ]; then
exit 0
fi
}
function cpy_pwrlogs_ifxo {
if [ -f /proc/device-tree/mfg-data/MN ]; then
rsync -rp -e "ssh -i /root/.ssh/remote_site" /home/olpc/power-logs site@5.5.0.1:/home/site/$DEPLOYNAME
acpower -d -p > ~/$DEPLOYNAME/acpower.txt
fi
}
mkdir -p ~/$DEPLOYNAME
skip_ifoffline
skip_ifrecent
cpy_pwrlogs_ifxo
# copy data that should be uploaded to $DEPLOYNAME
vnstat -d > ~/$DEPLOYNAME/daily_vnstat.txt
rsync -rp /var/www/html/munin/ ~/$DEPLOYNAME/munin
date > ~/$DEPLOYNAME/last_update.txt
rsync -rp -e "ssh -i /root/.ssh/remote_site" ~/$DEPLOYNAME/* site@5.5.0.1:/home/site/$DEPLOYNAME/
touch ~/$DEPLOYNAME/xfer-done.txt
| true
|
e7d37751fc8c01c438d6e743408816992ae94e0f
|
Shell
|
yoya/x.org
|
/X11R6/xc/util/memleak/find-rtns.sh
|
UTF-8
| 742
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/sh
#
# $XConsortium Exp $
#
# find-routines - convert leak tracer stack traces into file/lineno traces
# using a modified version of gdb-4.4
#
# Usage: find-routines <program-name> {leak-tracing-output-files}
#
TMP1=find-routine.tmp1
TMP=find-routine.tmp
trap "rm -f $TMP $TMP1" 0
OBJ=$1
shift
grep 'return stack:' $* |
tr ' ' '\012' |
grep 0x | sort -u | sed 's;^;x/i ;' |
gdb $OBJ | grep '>:' |
sed 's/>.*$/>/' | sed 's/(gdb) //' > $TMP1
awk '/^"/ { printf("s;%s;%s line %s %s;\n", $4, $1, $3, $5) }
/^0/ { printf("s;%s;%s %s;\n", $1, $2, $1);}' $TMP1 > $TMP
awk '/return stack/ { printf ("return stack\n");
for (i = 3; i <= NF; i++)
printf ("\troutine %s\n", $i); }
/^[A-Z]/ { print }' $* |
sed -f $TMP
| true
|
59a66a49ced0e1726c116d680a5b03965dc69de8
|
Shell
|
meoso/WUDS
|
/run.sh
|
UTF-8
| 892
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/bash
# run WUDS
# -- uses first wireless interface if IFACE isn't set in config
if [ "$EUID" -ne 0 ]
then echo "Please run as root."
exit 1
fi
if [ ! -f config.py ]; then
echo 'Copying config.py.sample to config.py'
#copy with user, not root
sudo -u "$SUDO_USER" cp "config.py.sample" "config.py"
fi
IFACE=$(grep IFACE config.py | cut -d'=' -f 2 | sed "s/['\" ]//g")
if [ "$IFACE" == "" ]; then
IFACE=$(iwconfig 2>&1 | grep IEEE | cut -d" " -f 1)
fi
if ( ifconfig "$IFACE" down >/dev/null 2>&1 ) ; then
echo "$IFACE downed."
fi
if ( iwconfig "$IFACE" mode mon) ; then
echo "$IFACE entered Monitor-Mode"
if ( ifconfig "$IFACE" up >/dev/null 2>&1) ; then
echo "$IFACE upped ; running core.py ; Ctrl-C once to stop."
python ./core.py
else
echo "Could not up $IFACE"
fi
fi
if ( ifconfig "$IFACE" down >/dev/null 2>&1 ) ; then
echo "$IFACE downed."
fi
| true
|
5f7ff69aa2064f978d9652458afb815f87bd2e9c
|
Shell
|
thenightex/gitlab-ci-testing
|
/7-gitlab-docker-prod/runner-and-docker-host-osx.sh
|
UTF-8
| 676
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
# Setup Gitlab Runner, version 1
# INSTALL BREW
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)"
# INSTALL RUNNER
brew install gitlab-runner
brew services start gitlab-runner
# INSTALL DOCKER
brew install docker
# INSTALL DOCKER-COMPOSE
brew install docker-compose
gitlab-runner register \
--detach \
--restart always \
--non-interactive \
--url "https://gitlab.url.com/" \
--registration-token "REGISTRATION_TOKEN" \
--executor "shell" \
--name gitlab-runner-uschi \
--description "uschi-prod" \
--tag-list "uschi,shell,prod,docker-daemon" \
--run-untagged="false" \
--locked="false" \
--access-level="not_protected"
| true
|
2d2c5fd52fb4d704b97396b4ed83f0966236e865
|
Shell
|
debrando/gospel
|
/populate.sh
|
UTF-8
| 225
| 2.84375
| 3
|
[
"MIT"
] |
permissive
|
# populate.sh file.txt [heroku]
if [ "$2" == "heroku" ]; then
addr="http://gospel99.herokuapp.com"
else
addr="127.0.0.1:8088"
fi
while IFS= read -r line; do curl --data-urlencode "msg=$line" -X POST $addr/msg/; done < "$1"
| true
|
a894acf1a24907d4261bf6228795b83c92d960e5
|
Shell
|
sjoblomj/latexcompilescript
|
/latex.sh
|
UTF-8
| 2,256
| 4.25
| 4
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/bin/bash
if [ "$1" == "--help" ]; then
echo "Latex compile script 1.0, by Johan Sjöblom. The code is public domain."
echo "Usage:"
echo "$0 <filename> Compiles the given file"
echo "$0 <filename> -l Compiles loudly, eg prints pdflatex output"
exit 0
fi
# pdflatex will produce some "helper files". Remove them, if
# they are created:
EXTARRAY=('aux' 'log' 'toc' 'nav' 'out' 'snm' 'dvi')
CMD="dirname $1"
DIRECTORY=`eval $CMD`
if [ "$DIRECTORY" == "" ]; then
while true; do
echo "Could not find directory to work in. Please enter the directory:"
read dir
if [ -d $dir ]; then
DIRECTORY="$dir"
break
fi
done
fi
CMD="cd $DIRECTORY"
eval $CMD
CMD="basename $1"
FILENAME=`eval $CMD`
# Check to make sure that the filename exists:
if [ ! -e $FILENAME ]; then
while true; do
echo "Could not find input *.tex-file in directory '$DIRECTORY'. Please enter the filename:"
read file
if [ -e $file ]; then
FILENAME="$file"
break
fi
done
fi
BASEFILENAME="${FILENAME%.*}"
COUNT=0
MD5=0
SUCCESS=0
echo -n "Compiling $FILENAME "
while [ $COUNT -lt 10 ] && [ $SUCCESS -eq 0 ]; do
#date
echo -n "."
CMD="pdflatex $FILENAME"
# If argument to script is -l, then we run the script loudly,
# eg, print out the output from pdflatex. Otherwise, we supress
# the output
if [ "$2" == "-l" ]; then
eval $CMD
else
TMP=`eval $CMD`
fi
# Strip metadata in pdf and get md5sum on that
TMP=`pdftk $BASEFILENAME.pdf dump_data | \
sed -e 's/\(InfoValue:\)\s.*/\1\ /g' | \
pdftk $BASEFILENAME.pdf update_info - output - | md5sum`
let COUNT=COUNT+1
if [ "$MD5" == "$TMP" ]; then
SUCCESS=1
echo " done after $COUNT iterations."
else
MD5="$TMP"
fi
done
if [ $SUCCESS -eq 0 ]; then
echo " - Error! Could not consistently compile $FILENAME"
fi
# Remove the "helper files", if they exist:
for (( j=0;j<${#EXTARRAY[@]};j++)); do
RMFILE="$BASEFILENAME.${EXTARRAY[${j}]}"
rm -f $RMFILE 2>&1 >/dev/null
done
RMFILE="texput.log"
rm -f $RMFILE 2>&1 >/dev/null
RMFILE="missfont.log"
rm -f $RMFILE 2>&1 >/dev/null
| true
|
e6684e40404e58e0f7ca557e2d1a526538aa8002
|
Shell
|
adiktofsugar/mail-server-dev
|
/install-smtp-server.sh
|
UTF-8
| 964
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
set -eu -o pipefail
project_root="$(cd `dirname ${BASH_SOURCE[0]}`; pwd)"
usage="
install-smtp-server.sh
-h - this help
Installs smtp server as a service, so that the smtp server will start on boot, and starts it
Sends a test mail
"
while getopts ":h" opt; do
case "$opt" in
h) echo "$usage"; exit;;
esac
done
contents="$(cat $project_root/smtp-server-template.plist)"
contents="${contents//__WORKING_DIRECTORY__/$project_root}"
contents="${contents//__PATH__//$PATH}"
plist_name="local.mail-server-dev"
plist_path="$HOME/Library/LaunchAgents/local.mail-server-dev.plist"
#first see if it's already loaded
if launchctl list | grep "$plist_name" >/dev/null 2>/dev/null; then
# loaded, so try to stop it, and unload it
launchctl stop "$plist_name"
launchctl unload "$plist_path"
fi
echo "$contents" > "$plist_path"
launchctl load "$plist_path"
launchctl start "$plist_name"
$project_root/install-smtp-server-message.js
| true
|
69c76c802348bc12dec238bc681becd7759908dc
|
Shell
|
aya/infra
|
/ansible/roles/hosts/files/etc/profile.d/rc.sh
|
UTF-8
| 1,997
| 3.75
| 4
|
[
"MIT"
] |
permissive
|
# verify that default functions are loaded
type force >/dev/null 2>&1 || . /etc/profile.d/rc_functions.sh 2>/dev/null
# test current shell flags
case $- in
# if we are in an interactive shell
*i*)
# load user defined stuffs from ~/.rc.d
for user_func in "${HOME}"/.rc.d/*; do
# read files only
[ -f "${user_func}" ] && func_name=$(basename "${user_func}") || continue
func_args=$(cat "${user_func}")
# at this stage, func_name can start with numbers to allow ordering function calls with file names starting with numbers
# func_name must start with a letter, remove all other characters at the beginning of func_name until a letter is found
while [ "${func_name}" != "" ] && [ "${func_name#[a-z]}" = "${func_name}" ]; do
# remove first char of func_name
func_name="${func_name#?}"
done
# call user function with args passed from the content of the user_func file
[ -n "${func_name}" ] && ${func_name} ${func_args} 2>/dev/null
done
# load user defined stuffs from RC_* env vars
IFS=$'\n'; for func_line in $(env 2>/dev/null |awk '$0 ~ /^RC_/ {print tolower(substr($0,4))}'); do
func_name="${func_line%%=*}"
func_args="${func_line#*=}"
[ "${func_args}" = "false" ] && continue
[ "${func_args}" = "true" ] && unset func_args
# at this stage, func_name can start with numbers to allow ordering function calls with file names starting with numbers
# func_name must start with a letter, remove all other characters at the beginning of func_name until a letter is found
while [ "${func_name}" != "" ] && [ "${func_name#[a-z]}" = "${func_name}" ]; do
# remove first char of func_name
func_name="${func_name#?}"
done
# call user function with args passed from the value of the env var
[ -n "${func_name}" ] && ${func_name} ${func_args} 2>/dev/null
done
unset IFS
;;
esac
| true
|
1b5dd642326a3a2bd607cb82fe088e2cf949d8fa
|
Shell
|
zebscripts/FunRun3-Bot
|
/funrun-ai.sh
|
UTF-8
| 12,969
| 3.53125
| 4
|
[
"MIT"
] |
permissive
|
#!/system/bin/sh
# Variables
DEVICEWIDTH=1920
SCREENSHOTLOCATION="/storage/emulated/0/scripts/afk-arena/screen.dump"
STATE="unknown"
RGB=000000
# Arrays
ARRAYRGB=()
ARRAYRGB2=()
# Counts
SCREENCOUNT=0
RGBCOUNT=0
INGAMECOUNT=0
# Parameters
PARAMX=1
PARAMY=2
# Coordinates
PIXELX=0
PIXELY=0
# Return values
OUTPUT=1
# Default wait time for actions
function wait() {
sleep 0.3
}
# Resets variables
function resetVariables() {
RGBCOUNT=0
PARAMX=1
PARAMY=2
OUTPUT=1
}
# Takes a screenshot and saves it
function takeScreenshot() {
screencap $SCREENSHOTLOCATION
SCREENCOUNT=$((SCREENCOUNT + 1))
echo "Screenshot #$SCREENCOUNT taken!"
}
# Switches between last app
function switchApp() {
input keyevent KEYCODE_APP_SWITCH
input keyevent KEYCODE_APP_SWITCH
echo "Switched apps"
}
# Gets pixel color
function readRGB() {
ARRAYRGB=()
ARGS=("$@")
#echo "ARGS: ${ARGS[*]}"
while [ $RGBCOUNT -lt $1 ]; do
#echo "The counter is $RGBCOUNT"
PIXELX=${ARGS[$PARAMX]}
PIXELY=${ARGS[$PARAMY]}
#echo "X: $PIXELX"
#echo "Y: $PIXELY"
# RGB
let offset=$DEVICEWIDTH*$PIXELY+$PIXELX+3
#echo $offset
RGB=$(dd if=$SCREENSHOTLOCATION bs=4 skip="$offset" count=1 2>/dev/null | hd)
RGB=${RGB:9:9}
RGB="${RGB// /}"
#echo "RGB $RGBCOUNT: '$RGB'"
# Add RGB to array
ARRAYRGB[$RGBCOUNT]=$RGB
# Increment variables
let RGBCOUNT=RGBCOUNT+1
let PARAMX=PARAMX+2
let PARAMY=PARAMY+2
#echo "RGBCOUNT: $RGBCOUNT"
#echo "PARAMX: $PARAMX"
#echo "PARAMY: $PARAMY"
done
echo "ARRAYRGB: ${ARRAYRGB[*]}"
resetVariables
}
# Gets pixel color for AI
function readRGB2() {
PARAMX=2
PARAMY=3
ARGS=("$@")
#echo "ARGS: ${ARGS[*]}"
if [ "$2" = 2 ]; then
let RGBCOUNT=RGBCOUNT-5
fi
while [ $RGBCOUNT -lt $1 ]; do
#echo "The counter is $RGBCOUNT"
PIXELX=${ARGS[$PARAMX]}
PIXELY=${ARGS[$PARAMY]}
#echo "X: $PIXELX"
#echo "Y: $PIXELY"
# RGB
let offset=$DEVICEWIDTH*$PIXELY+$PIXELX+3
#echo $offset
RGB=$(dd if='/sdcard/Script-Images/screen.dump' bs=4 skip="$offset" count=1 2>/dev/null | hd)
RGB=${RGB:9:9}
RGB="${RGB// /}"
if [ "$2" = 2 ]; then
let RGBCOUNT=RGBCOUNT+5
fi
# Add RGB to array
ARRAYRGB2[$RGBCOUNT]=$RGB
if [ "$2" = 2 ]; then
let RGBCOUNT=RGBCOUNT-5
fi
# Increment variables
let RGBCOUNT=RGBCOUNT+1
let PARAMX=PARAMX+2
let PARAMY=PARAMY+2
#echo "RGBCOUNT: $RGBCOUNT"
#echo "PARAMX: $PARAMX"
#echo "PARAMY: $PARAMY"
done
echo "ARRAYRGB2: ${ARRAYRGB2[*]}"
# Reset variables
PARAMX=2
PARAMY=3
if [ "$2" = "2" ]; then
RGBCOUNT=0
PARAMX=1
PARAMY=2
fi
}
# Checks where the player is in the game
function checkRGBstate() {
# Lobby
if [ "${#ARRAYRGB[@]}" = "3" ]; then
if [ "${ARRAYRGB[0]}" = "fff9ef" ] && [ "${ARRAYRGB[1]}" = "fff9ef" ] && [ "${ARRAYRGB[2]}" = "c45129" ]; then
STATE="lobby"
else
STATE="unknown"
OUTPUT=0
fi
# loading, logo
elif [ "${#ARRAYRGB[@]}" = "2" ]; then
if [ "${ARRAYRGB[0]}" = "19ef27" ] && [ "${ARRAYRGB[1]}" = "f3d19e" ]; then
STATE="loading"
elif [ "${ARRAYRGB[0]}" = "dedbde" ] && [ "${ARRAYRGB[1]}" = "f79218" ]; then
STATE="logo"
else
STATE="unknown"
OUTPUT=0
fi
else
echo "Error: ARRAYRGB length is invalid."
ARRAYRGB=()
STATE="unknown"
fi
echo "Updated STATE to '$STATE'"
}
# Checks where in the lobby the player is
function checkRGBlobby() {
if [ "${ARRAYRGB[0]}" = "442211" ]; then
STATE="lobby:leaderboard"
elif [ "${ARRAYRGB[1]}" = "442211" ]; then
STATE="lobby:clan"
elif [ "${ARRAYRGB[2]}" = "442211" ]; then
STATE="lobby:home"
elif [ "${ARRAYRGB[3]}" = "442211" ]; then
STATE="lobby:shop"
elif [ "${ARRAYRGB[4]}" = "442211" ]; then
STATE="lobby:vault"
else
STATE="unknown"
fi
echo "Updated STATE to '$STATE'"
}
# Checks where ingame the player is
function checkRGBingame() {
# podium
if [ "${#ARRAYRGB[@]}" = "3" ]; then
if [ "${ARRAYRGB[0]}" = "fffeff" ] && [ "${ARRAYRGB[1]}" = "fffdff" ] && [ "${ARRAYRGB[2]}" = "fff9ff" ]; then
STATE="ingame:podium"
elif [ "$STATE" = "ingame:podium" ]; then
STATE="ingame:end"
else
STATE="unknown"
OUTPUT=0
fi
# SFP, voting, countdown, start, running, finish
elif [ "${#ARRAYRGB[@]}" = "2" ]; then
if [ "${ARRAYRGB[0]}" = "93d6ff" ] && [ "${ARRAYRGB[1]}" = "2b9438" ]; then
STATE="ingame:sfp"
elif [ "${ARRAYRGB[0]}" = "c45129" ] && [ "${ARRAYRGB[1]}" = "c45129" ]; then
STATE="ingame:voting"
elif [ "${ARRAYRGB[0]}" = "ffffff" ] && [ "${ARRAYRGB[1]}" = "ffffff" ]; then
STATE="ingame:start"
elif [ "${ARRAYRGB[0]}" = "ffffff" ] && [ "${ARRAYRGB[1]}" = "e7d1c7" ]; then
STATE="ingame:countdown"
elif [ "${ARRAYRGB[0]}" = "e7d1c7" ] && [ "${ARRAYRGB[1]}" = "e7d1c7" ]; then
STATE="ingame:running"
OUTPUT=1
elif [ "$STATE" = "ingame:running" ] && [ "${ARRAYRGB[0]}" != "e7d1c7" ] && [ "${ARRAYRGB[1]}" != "e7d1c7" ]; then
STATE="ingame:finish"
else
STATE="unknown"
OUTPUT=0
fi
else
echo "Error: ARRAYRGB length is invalid."
STATE="unknown"
fi
echo "Updated STATE to '$STATE'"
}
# Ingame: Jump
function actionUp() {
input tap 1760 950
echo "Action: Up"
}
# Ingame: Dodge
function actionDown() {
input tap 1500 950
echo "Action: Down"
}
# Ingame: Powerup
function actionPowerup() {
input tap 130 950
echo "Action: Powerup"
}
# Lobby: Play button
function actionPlay() {
input tap 1550 706
echo "Action: Play"
}
# Lobby: Play Menu
function actionPlayMenu() {
input tap 1470 780
echo "Action: PlayMenu"
}
# Lobby: Play Menu: Custom game
function actionPlayMenuCustom() {
input tap 1315 682
echo "Action: PlayMenuCustom"
}
# Lobby: Play Menu: Back Arrow
function actionPlayMenuArrow() {
input tap 1177 682
echo "Action: PlayMenuArrow"
}
# Creates a custom game on a random map
function createCustomGame() {
if [ "$STATE" = "lobby:home" ]; then
wait
actionPlayMenu
wait
actionPlayMenuCustom
wait
actionPlayMenuArrow
wait
actionPlay
STATE="unknown"
loopIngame
else
echo "Cant create Custom game, User isnt in lobby:home!"
fi
}
# Launch the AI
function startAi() {
echo "-- -- -- Launching AI -- -- --"
until [ "$STATE" = "ingame:podium" ]; do
resetVariables
takeScreenshot
tryIngameRunning
if [ "$OUTPUT" = 1 ]; then
aiTryObstacles
elif [ "$OUTPUT" = 0 ]; then
resetVariables
tryIngamePodium
if [ "$OUTPUT" = 0 ]; then
let INGAMECOUNT=INGAMECOUNT+1
if [ "$INGAMECOUNT" = 30 ]; then
# Exit function
return 1
fi
fi
fi
done
}
function aiTryObstacles() {
echo "Searching for obstacles..."
readRGB2 5 1 900 550 900 600 900 650 900 750 900 900
sleep 0.5
takeScreenshot
readRGB2 5 2 900 550 900 600 900 650 900 750 900 900
aiCheckPlayerStopped
}
function aiCheckPlayerStopped() {
if [ "${ARRAYRGB2[0]}" = "${ARRAYRGB2[5]}" ] && [ "${ARRAYRGB2[1]}" = "${ARRAYRGB2[6]}" ] && [ "${ARRAYRGB2[4]}" = "${ARRAYRGB2[9]}" ]; then
echo fodasse!
actionUp
actionUp
actionUp
elif [ "${ARRAYRGB2[1]}" = "${ARRAYRGB2[6]}" ] && [ "${ARRAYRGB2[2]}" = "${ARRAYRGB2[7]}" ]; then
actionDown
elif [ "${ARRAYRGB2[2]}" = "${ARRAYRGB2[7]}" ]; then
actionUp
elif [ "${ARRAYRGB2[3]}" = "${ARRAYRGB2[8]}" ]; then
actionUp
fi
}
# Tries to find the logo
function tryLogo() {
echo "Searching for logo"
readRGB 2 700 435 860 440
checkRGBstate
}
# Tries to find the loading screen
function tryLoading() {
echo "Searching for loading"
readRGB 2 540 1025 500 1025
checkRGBstate
}
# Tries to find the lobby
function tryLobby() {
echo "Searching for lobby"
readRGB 3 54 511 50 695 180 985
checkRGBstate
}
# Until loop to set the STATE
function loopState() {
until [ "$STATE" = "null" ]; do
echo "STATE: $STATE"
case "$STATE" in
unknown)
takeScreenshot
tryLogo
if [ "$OUTPUT" = "0" ]; then
resetVariables
tryLoading
if [ "$OUTPUT" = "0" ]; then
resetVariables
tryLobby
if [ "$OUTPUT" = "0" ]; then
resetVariables
echo "I have no fucking clue where you are"
fi
fi
fi
;;
logo)
takeScreenshot
tryLogo
;;
loading)
takeScreenshot
tryLoading
;;
lobby*)
takeScreenshot
readRGB 5 640 1050 835 1050 1045 1050 1245 1050 1450 1050
checkRGBlobby
createCustomGame
;;
*)
echo "Rip on the STATE switch case"
;;
esac
done
}
# Tries to find ingame: searching for players
function tryIngameSFP() {
echo "Searching for ingame:sfp"
readRGB 2 930 200 970 895
checkRGBingame
}
# Tries to find ingame: voting
function tryIngameVoting() {
echo "Searching for ingame:voting"
readRGB 2 1600 800 1700 800
checkRGBingame
}
# Tries to find ingame: countdown
function tryIngameCountdown() {
echo "Searching for ingame:countdown"
readRGB 2 992 372 1505 970
checkRGBingame
}
# Tries to find ingame: go!
function tryIngameStart() {
echo "Searching for ingame:start"
readRGB 2 1114 338 810 378
checkRGBingame
}
# Tries to find ingame: running
function tryIngameRunning() {
echo "Searching for ingame:running"
readRGB 2 1505 970 1770 970
checkRGBingame
}
# Tries to find ingame: podium
function tryIngamePodium() {
echo "Searching for ingame:podium"
readRGB 3 580 720 280 720 845 720
checkRGBingame
}
# Until loop to set the STATE while ingame
function loopIngame() {
until [ "$STATE" = "lobby" ]; do
echo "STATE: $STATE"
case "$STATE" in
unknown)
echo "Searching for ingame:sfp"
takeScreenshot
tryIngameSFP
if [ "$OUTPUT" = "0" ]; then
resetVariables
tryIngameVoting
if [ "$OUTPUT" = "0" ]; then
resetVariables
tryIngameCountdown
if [ "$OUTPUT" = "0" ]; then
resetVariables
tryIngameStart
if [ "$OUTPUT" = "0" ]; then
resetVariables
echo "I have no fucking clue where you are"
let INGAMECOUNT=INGAMECOUNT+1
if [ "$INGAMECOUNT" = 30 ]; then
INGAMECOUNT=0
echo "-- -- -- Bringing you back to lobby -- -- --"
break
fi
fi
fi
fi
fi
;;
"ingame:sfp")
takeScreenshot
tryIngameSFP
;;
"ingame:voting")
takeScreenshot
tryIngameVoting
;;
"ingame:countdown")
takeScreenshot
tryIngameCountdown
;;
"ingame:start")
startAi
if [ "$INGAMECOUNT" = 30 ]; then
echo "-- -- -- Bringing you back to lobby -- -- --"
break
fi
;;
"ingame:podium")
takeScreenshot
tryIngamePodium
;;
"ingame:end")
STATE="unknown"
INGAMECOUNT=0
break
;;
*)
echo "Rip on the loopIngame switch case"
;;
esac
done
}
# Execute code after loading functions
am force-stop com.dirtybit.fra
#input keyevent 3
#input tap 940 775
monkey -p com.dirtybit.fra -c android.intent.category.LAUNCHER 1
loopState
echo "Script end"
exit
#switch
| true
|
ff3cbab9b2e43f5fc8df0715214f92ee9e4b8698
|
Shell
|
khoitnm/practice-spring-kafka-grpc
|
/docker/start.sh
|
UTF-8
| 230
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/bash
export KAFKA_ADVERTISED_HOST_NAME=$(ip a | sed -En 's/127.0.0.1//;s/172.*.0.1//;s/.*inet (addr:)?(([0-9]*\.){3}[0-9]*).*/\2/p')
echo "KAFKA_ADVERTISED_HOST_NAME: ${KAFKA_ADVERTISED_HOST_NAME}"
docker-compose up -d
| true
|
def5b50e901bfe7dc312d7f7e6ce12b552766963
|
Shell
|
gokulmaran/bridgelabz1
|
/Empispre.sh
|
UTF-8
| 216
| 3.40625
| 3
|
[] |
no_license
|
#! /bin/bash -x
#WAP to check employee is present or absent use random and if condition
a=$(( RANDOM%2 ))
if [ $a -eq 0 ]
then
echo "employee is absent"
else [ $a -eq 1 ]
echo "employee is present"
fi
| true
|
c3eca565e3066e2ca4a907e163c5f1bbc247998c
|
Shell
|
dejanzelic/business-casual
|
/bootstrap.sh
|
UTF-8
| 1,136
| 2.890625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#update and prepare dependencies
export DEBIAN_FRONTEND=noninteractive
apt-get update
apt-get install curl gconf-service libasound2 libatk1.0-0 libc6 libcairo2 libcups2 libdbus-1-3 libexpat1 libfontconfig1 libgcc1 libgconf-2-4 libgdk-pixbuf2.0-0 libglib2.0-0 libgtk-3-0 libnspr4 libpango-1.0-0 libpangocairo-1.0-0 libstdc++6 libx11-6 libx11-xcb1 libxcb1 libxcomposite1 libxcursor1 libxdamage1 libxext6 libxfixes3 libxi6 libxrandr2 libxrender1 libxss1 libxtst6 ca-certificates fonts-liberation libappindicator1 libnss3 lsb-release xdg-utils wget -y
app_home="/vagrant"
# Install Node
if command -v node; then
echo "Node is already installed"
else
curl -sL https://deb.nodesource.com/setup_8.x | sudo -E bash -
apt-get install -y nodejs
fi
mkdir $app_home/business_casual/screenshots
if ! [ -L $app_home/node_modules ]; then
cd $app_home && npm install
fi
#if in prod, run the application and redirect 80 to 8080
npm install -g pm2
pm2 start $app_home/bin/www
iptables -t nat -A OUTPUT -o lo -p tcp --dport 80 -j REDIRECT --to-port 8080
echo '127.0.0.1 business-casual.mysterious-hashes.net' >> /etc/hosts
| true
|
059a6f86383c976ea4b2fe644f0f86d45f9db3ed
|
Shell
|
8l/lfscript
|
/scripts/blfs-13994-unchecked/vsftpd
|
UTF-8
| 1,537
| 3.359375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# The instructions in this file are extracted from
# 'Beyond Linux From Scratch' (2014-08-22 / r13994) but are modified for use
# with LFScript 4 which installs the software to a fake root directory.
#
# Beyond Linux From Scratch is released under the MIT license.
# Copyright (C) 2001-2014, The BLFS Development Team
WGETLIST="https://security.appspot.com/downloads/vsftpd-3.0.2.tar.gz
http://www.linuxfromscratch.org/blfs/downloads/svn/blfs-bootscripts-20140810.tar.bz2"
MD5SUMLIST="8b00c749719089401315bd3c44dddbb2
179a6c22d0f7d2619cba4eb794fdc1cb"
###############################################
installation() { # INSTALLING SYSTEM SOFTWARE #
###############################################
install -v -d -m 0755 ${FAKEROOT}/usr/share/vsftpd/empty
install -v -d -m 0755 ${FAKEROOT}/home/ftp
groupadd -g 47 vsftpd
groupadd -g 45 ftp
useradd -c "vsftpd User" -d /dev/null -g vsftpd -s ${FAKEROOT}/bin/false -u 47 vsftpd
useradd -c anonymous_user -d ${FAKEROOT}/home/ftp -g ftp -s ${FAKEROOT}/bin/false -u 45 ftp
sed -i -e 's|#define VSF_SYSDEP_HAVE_LIBCAP|//&|' sysdeputil.c
make
install -v -m 755 vsftpd /usr/sbin/vsftpd
install -v -m 644 vsftpd.8 /usr/share/man/man8
install -v -m 644 vsftpd.conf.5 /usr/share/man/man5
install -v -m 644 vsftpd.conf /etc
cat >> ${FAKEROOT}/etc/vsftpd.conf << "EOF"
background=YES
listen=YES
nopriv_user=vsftpd
secure_chroot_dir=/usr/share/vsftpd/empty
EOF
includeBootscript vsftpd
#################
} # END OF FILE #
#################
| true
|
0ef81b1142a486e209401750a4abe22efd2b74e8
|
Shell
|
deephyper/scalable-bo
|
/experiments/sc23-example/navalpropulsion/auto.sh
|
UTF-8
| 1,159
| 2.8125
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
export NUM_WORKERS=8
export timeout=70
export DEEPHYPER_BENCHMARK_SIMULATE_RUN_TIME=1
export DEEPHYPER_BENCHMARK_PROP_REAL_RUN_TIME=0.01
export random_states=(1608637542 3421126067 4083286876 787846414 3143890026 3348747335 2571218620 2563451924 670094950 1914837113)
exec_experiments () {
./dhb_navalpropulsion-CBO-DUMMY-UCB.sh
./dhb_navalpropulsion-CBO-RF-UCB.sh
./dhb_navalpropulsion-CBO-RF-UCB-SHA.sh
./dhb_navalpropulsion-DBO-RF-UCB.sh
./dhb_navalpropulsion-DBO-RF-UCB-SHA.sh
}
printf "Creating 'output' and 'figures' directories if not exist.\n"
mkdir -p output
mkdir -p figures
printf "Reconfiguring 'plot.yaml' file.\n"
sed -i '' "s_data-root: .*_data-root: $PWD/output_" plot.yaml
sed -i '' "s_figures-root: .*_figures-root: $PWD/figures" plot.yaml
printf "Executing experiments.\n"
for random_state in ${random_states[@]}; do
export random_state=$random_state;
printf "Executing experiment serie with random_state: $random_state\n"
for i in {1..5}; do
exec_experiments && break || sleep 5;
done
sleep 1;
done
printf "Plotting results.\n"
python -m scalbo.plot --config plot.yaml
| true
|
e2771d952260f004b427bfb05f9c13ec71c95f0c
|
Shell
|
gmatheu/deb-packages
|
/dcevm/jre7/deb/DEBIAN/prerm
|
UTF-8
| 673
| 3.109375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
set -e
basedir=/usr/lib/jvm/java-7-dcevm-amd64
jre_tools='java keytool pack200 rmid rmiregistry unpack200 orbd servertool tnameserv'
rm -f $basedir/jre/lib/amd64/client/classes.jsa
rm -f $basedir/jre/lib/amd64/server/classes.jsa
if [ "$1" = "remove" ] || [ "$1" = "deconfigure" ]; then
for i in $jre_tools; do
update-alternatives --remove $i $basedir/jre/bin/$i
done
if which update-binfmts >/dev/null; then
# try to remove and ignore the error
if [ -e /var/lib/binfmts/dcevm-7 ]; then
update-binfmts --package dcevm-7 \
--remove jar /usr/bin/jexec || true
fi
fi
update-alternatives --remove jexec $basedir/jre/lib/jexec
fi
| true
|
651d3d0e74f342672cc4cf9a74dad8953755453c
|
Shell
|
kurokawh/test
|
/sed/od.sh
|
UTF-8
| 301
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/sh
# $0 objname < in > out
#
# read binary data from standard input.
# then output C array definition to standard output.
# arg 1 is the name of array.
#
objname=${1:-objname}
od -A n -v -t x1 | sed -e '1i\
const unsigned char '$objname'[] = {
s/\([0-9a-f][0-9a-f]\) */0x\1,/g
$s/,$//
$a\
};
'
| true
|
d04bdc54deb736a85fcdedd6d92996c754f3ddc0
|
Shell
|
shinji62/buildpacks-ci
|
/scripts/stacks/create-release-without-modifying-stacks
|
UTF-8
| 247
| 2.515625
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
pushd cf-release
bundle config mirror.https://rubygems.org ${RUBYGEM_MIRROR}
bundle
bosh create release --force --with-tarball --name cf --version 212.0.`date +"%s"`
popd
rsync -a cf-release/ cf-release-artifacts
| true
|
ce78935647fdbe2b9d2c8edf35adceff2c15f250
|
Shell
|
paulbdavis/doti3
|
/bin/external-ip
|
UTF-8
| 807
| 3.625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
cacheFile="/tmp/externalip-$USER"
if [[ "$1" = "-r" ]]
then
rm $cacheFile
else
modified=$(expr $(date +%s) - $(date +%s -r $cacheFile))
if [[ $modified -gt 1800 ]]
then
rm $cacheFile
fi
fi
if [[ ! -f "$cacheFile" ]]
then
touch $cacheFile
fi
cachedIp=$(cat $cacheFile)
if [[ -z "$cachedIp" ]] || [[ "$1" == "-f" ]]
then
externalIp=$(curl -s checkip.dyndns.org|sed -e 's/.*Current IP Address: //' -e 's/<.*$//')
fi
if [[ "$1" != "-f" ]]
then
if [[ -z $cachedIp ]]
then
echo $externalIp > $cacheFile
else
externalIp=$cachedIp
fi
fi
if [[ -z "$externalIp" ]] || [[ $(expr match "$externalIp" "[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}") -eq 0 ]]
then
echo "0.0.0.0"
exit 33
fi
echo $externalIp
| true
|
3d8a2d3da00c0114f0f14ee8e0d696f60495d0f7
|
Shell
|
sarpu/Zooniverse
|
/upload_subjects/convert_2_mp3.sh
|
UTF-8
| 373
| 3.359375
| 3
|
[] |
no_license
|
outfolder=`grep "outfolder" ../config.py | awk '{ gsub(".*=", "") ; system( "echo " $0) }'`
echo Converting WAV files to MP3 using SOX...
#CONVERT TO MP3
for filename in $outfolder/data-for-upload/*.wav;
do echo "${filename%.*}";
sox "${filename%.*}".wav "${filename%.*}".mp3;
done
#REMOVE WAVs
for filename in $outfolder/data-for-upload/*.wav;
do rm $filename;
done
| true
|
d02210c0ce21738b42061760c0ac28874c5b752f
|
Shell
|
AHETESHAMS/ShellScriptPrograms
|
/flipCoin.sh
|
UTF-8
| 137
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
head=1
for (( i=0;i<10;i++ ))
do
result=$(( RANDOM%2 ))
if [ $result -eq $head ]
then
echo "Head"
else
echo "Tail"
fi
done
| true
|
bf28073ea7b03fae2db0b201c41cf8e55a06e78c
|
Shell
|
GoogleCloudPlatform/anthos-service-mesh-samples
|
/docs/mtls-egress-ingress/terraform/install_asm.sh
|
UTF-8
| 2,534
| 3.140625
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START servicemesh_terraform_install_asm]
#!/bin/bash
uname_out="$(uname -s)"
case "${uname_out}" in
Linux*) OS=linux-amd64;;
Darwin*) OS=osx;;
*) echo "Oh snap! It seems ASM is not yet available for your OS: $uname_out"; exit1;
esac
ASM_SUFFIX=${ASM_VERSION}-${OS}
uname_out="$(uname -s)"
echo -e "Installing ASM for OS $uname_out into $TERRAFORM_ROOT"
echo "Downloading ASM installation files"
gsutil cp gs://gke-release/asm/istio-${ASM_SUFFIX}.tar.gz $TERRAFORM_ROOT/
echo "Done downloading"
echo "Unpacking download and preparing install"
tar xzf $TERRAFORM_ROOT/istio-${ASM_SUFFIX}.tar.gz
# Installing ASM
echo "Preparing istio installation"
cd istio-${ASM_VERSION}
kubectl create namespace istio-system
# Create webhook version
echo "Creating webhook for version asm-${ASM_REVISION}"
cat <<EOF > $TERRAFORM_ROOT/istiod-service.yaml
apiVersion: v1
kind: Service
metadata:
name: istiod
namespace: istio-system
labels:
istio.io/rev: asm-${ASM_REVISION}
app: istiod
istio: pilot
release: istio
spec:
ports:
- port: 15010
name: grpc-xds # plaintext
protocol: TCP
- port: 15012
name: https-dns # mTLS with k8s-signed cert
protocol: TCP
- port: 443
name: https-webhook # validation and injection
targetPort: 15017
protocol: TCP
- port: 15014
name: http-monitoring # prometheus stats
protocol: TCP
selector:
app: istiod
istio.io/rev: asm-${ASM_REVISION}
EOF
# Run istioctl isntallation
echo "Installing istio into the cluster"
bin/istioctl install --set profile=asm-multicloud --set revision=asm-${ASM_REVISION} -f "$TERRAFORM_ROOT/../$TYPE/features.yaml" -y
kubectl apply -f $TERRAFORM_ROOT/istiod-service.yaml
# Inject sidecare proxies
kubectl label namespace default istio-injection- istio.io/rev=asm-${ASM_REVISION} --overwrite
echo "Done installing istio into the cluster"
# [END servicemesh_terraform_install_asm]
| true
|
ab60986c2907bede08c38aa4ef8a953b45584dbf
|
Shell
|
ashbryant/wp-local-docker
|
/bin/setup.sh
|
UTF-8
| 10,635
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
# Purpose: WordPress provisioning script
# Source: https://ashbryant.com
# Author: Ash
#
# NOTE:
# Seeing as I'm new to bash & I don't know when I will be back to it...
# "&& \" This lets you do something based on whether the previous command completed successfully. Seeing as most of this requires that process is why I have it here.
# Location of some variables needed for setup
source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/.common.sh"
# check_depedencies - IF there is a wp-config file WP is already installed
if [ -f "/var/www/html/wp-config.php" ];
then
output "Wordpress has already installed at \"$SOURCE_DIR\"." -i
output "Do you want to reinstall? [y/n] " -e && \
read REINSTALL
if [ "y" = "$REINSTALL" ]
then
wp db reset --yes
GLOBIGNORE='wp-cli.local.yml'
rm -rf *
output "Okay, all files have been removed. Try again." -s
else
output "Installation aborted." -e
exit 1
fi
else
confirm_to_continue "Are you sure want to install Wordpress to \"$SOURCE_DIR\" [Yn]? "
# Prepare_empty_dir "/var/www/html"
rm -f "/var/www/html/.gitkeep"
#check_empty_dir "/var/www/html" "Sorry, but \"$SOURCE_DIR\" is not empty, please backup your data before continue."
output "# Running install scripts..." -i
# Generate random 12 character password
WP_USER_PASSWORD=$(LC_CTYPE=C tr -dc A-Za-z0-9_\!\@\#\$\%\^\&\*\(\)-+= < /dev/urandom | head -c 12) && \
# Ask for the type of installation
output "Do you want a multisite installation? [y/n] " -w && \
read MULTISITE
# Install WordPress
# Download English Version of WordPress
wp core download --locale=en_GB && \
# Config the DB connection
wp core config --dbhost=mysql --dbname=$MYSQL_DATABASE --dbuser=$MYSQL_USER --dbpass=$MYSQL_PASSWORD && \
if [ "y" = "$MULTISITE" ]
then
wp core multisite-install --prompt
else
wp core install --url="$DOMAIN_NAME" --title="$BLOG_TITLE" --admin_user="$WP_USER" --admin_password="$WP_USER_PASSWORD" --admin_email="$WP_USER_EMAIL"
fi
# TODO: Copy password to clipboard, can't get it to work right now
# echo "Admin password = " $password | cat ~/.ssh/id_rsa.pub | pbcopy && \
# Set the blog description
wp option update blogdescription "$BLOG_DESCRIPTION" && \
# Set the time zone
wp option update timezone_string "Europe/London" && \
# Set Date format (21st August 2018)
wp option update date_format "jS F Y" && \
# Set time format (10:00 am)
wp option update time_format "g:i a" && \
# Set Create .htaccessfile and set pretty urls
#touch /var/www/html/.htaccess
#chmod 777 /var/www/html/.htaccess
wp rewrite structure '/%postname%/' --hard && \
wp rewrite flush --hard && \
# Update translations
wp language core update && \
# Ask to remove default content ?
output "Do you want to remove all of the default content? (aka a blank install) [y/n] " -w && \
read EMPTY_CONTENT
if [ "y" = "$EMPTY_CONTENT" ]
then
# Remove all posts, comments, and terms
wp site empty --yes && \
# Remove plugins and themes
wp plugin delete hello && \
wp plugin delete akismet && \
wp theme delete twentyfifteen && \
wp theme delete twentysixteen && \
# Remove widgets
wp widget delete recent-posts-2 && \
wp widget delete recent-comments-2 && \
wp widget delete archives-2 && \
wp widget delete search-2
wp widget delete categories-2 && \
wp widget delete meta-2
else
# Delete stock post
wp post delete 1 --force && \
# Trash sample page, and
wp post delete $(wp post list --post_type=page --posts_per_page=1 --post_status=publish --pagename="sample-page" --field=ID --format=ids) && \
# Create Home, About Us, Blog & Contact Us pages
wp post create --post_type=page --post_title='Home' --post_status=publish --post_author=$(wp user get $WP_USER --field=ID --format=ids) && \
wp post create --post_type=page --post_title='About Us' --post_status=publish --post_author=$(wp user get $WP_USER --field=ID --format=ids) && \
wp post create --post_type=page --post_title='Blog' --post_status=publish --post_author=$(wp user get $WP_USER --field=ID --format=ids) && \
wp post create --post_type=page --post_title='Contact Us' --post_status=publish --post_author=$(wp user get $WP_USER --field=ID --format=ids) && \
# Set home page as front page
wp option update show_on_front 'page' && \
# Set home page to be the new page
wp option update page_on_front $(wp post list --post_type=page --post_status=publish --posts_per_page=1 --pagename=home --field=ID --format=ids) && \
# Set blog page to be the new page
wp option update page_for_posts $(wp post list --post_type=page --post_status=publish --posts_per_page=1 --pagename=blog --field=ID --format=ids) && \
# Create a navigation bar
wp menu create "Main Nav" && \
# Assign navigaiton to primary location
# wp menu location assign main-navigation primary && \
# Open the new website with Google Chrome
#/usr/bin/open -a "/Applications/Google Chrome.app" "http://$DOMAIN_NAME/wp-admin" && \
# Delete stock themes
wp theme delete twentyfifteen && \
wp theme delete twentysixteen && \
# Delete plugins
wp plugin delete akismet && \
wp plugin delete hello && \
output "Do you want to install WooCommerce [Yn]?" -w && \
read woocommerce
if [ "$woocommerce" = "y" ]
then
wp plugin install woocommerce --activate
# This is a WordPress plugin that adds several WP-CLI commands for generating fake WooCommerce data
# https://github.com/metorikhq/wc-cyclone
wp plugin install https://github.com/metorikhq/wc-cyclone/archive/master.zip --activate
# Create the WooCommerce pages
wp post create --post_type=page --post_title='Shop' --post_status=publish --post_author=$(wp user get $WP_USER --field=ID --format=ids) && \
wp post create --post_type=page --post_title='Cart' --post_status=publish --post_author=$(wp user get $WP_USER --field=ID --format=ids) && \
wp post create --post_type=page --post_title='Checkout' --post_status=publish --post_author=$(wp user get $WP_USER --field=ID --format=ids) && \
wp post create --post_type=page --post_title='My Account' --post_status=publish --post_author=$(wp user get $WP_USER --field=ID --format=ids) && \
wp post create --post_type=page --post_title='Terms and conditions' --post_status=publish --post_author=$(wp user get $WP_USER --field=ID --format=ids) && \
# Set WooCommerce those up correctly in WC Settings > Advanced
wp option update woocommerce_shop_page_id $(wp post list --post_type=page --post_status=publish --posts_per_page=1 --pagename='shop' --field=ID --format=ids) && \
wp option update woocommerce_cart_page_id $(wp post list --post_type=page --post_status=publish --posts_per_page=1 --pagename='cart' --field=ID --format=ids) && \
wp option update woocommerce_checkout_page_id $(wp post list --post_type=page --post_status=publish --posts_per_page=1 --pagename='checkout' --field=ID --format=ids) && \
wp option update woocommerce_myaccount_page_id $(wp post list --post_type=page --post_status=publish --posts_per_page=1 --pagename='my-account' --field=ID --format=ids) && \
wp option update woocommerce_terms_page_id $(wp post list --post_type=page --post_status=publish --posts_per_page=1 --pagename='terms-and-conditions' --field=ID --format=ids) && \
# Install & activate the default WC theme
wp theme install storefront --activate
# Install & activate the most used WC plugins
wp plugin install woocommerce-sequential-order-numbers search-by-sku-for-woocommerce woocommerce-gateway-paypal-powered-by-braintree woocommerce-pdf-invoices woocommerce-google-analytics-integration --activate
else
output "Ok, I am not going to install WooCommerce" -e
fi
#Download paid for plugins, install & activate them, then remove the zip files (Thanks https://goo.gl/ktysp5)
wget -v -O acf-pro.zip "https://connect.advancedcustomfields.com/index.php?p=pro&a=download&k=" && \
wp plugin install acf-pro.zip --activate --allow-root && \
rm acf-pro.zip && \
wget -v -O gravityforms.zip "https://www.dropbox.com/s/2msjyeecr4294ml/gravityforms.zip" && \
wp plugin install gravityforms.zip --activate --allow-root && \
rm gravityforms.zip && \
wget -v -O wp-migrate-db-pro.zip "https://www.dropbox.com/s/15q870ho0csh8bf/wp-migrate-db-pro.zip" && \
wp plugin install wp-migrate-db-pro.zip --activate --allow-root && \
rm wp-migrate-db-pro.zip && \
# Update all plugins
wp plugin update --all && \
# Install plugins
wp plugin install wpcore && \
wp plugin install adminimize && \
wp plugin install antispam-bee && \
wp plugin install broken-link-checker && \
wp plugin install cookie-law-info && \
wp plugin install custom-post-type-ui && \
wp plugin install duplicate-post && \
wp plugin install eps-301-redirects && \
wp plugin install elasticpress && \
wp plugin install enable-media-replace && \
wp plugin install google-analytics-for-wordpress && \
wp plugin install wp-mail-smtp && \
wp plugin install wp-maintenance-mode && \
wp plugin install really-simple-ssl && \
wp plugin install regenerate-thumbnails && \
wp plugin install wp-smushit && \
wp plugin install swift-performance-lite && \
wp plugin install stream && \
wp plugin install user-switching && \
wp plugin install wordpress-seo && \
wp plugin install wp-helpers && \
# Activate plugins
wp plugin activate adminimize cookie-law-info duplicate-post enable-media-replace wordpress-seo wpcore wp-helpers
# Activate plugin in entire multisite network
# wp plugin activate hello --network
# Discourage search engines
# wp option update blog_public 0
fi
clear
echo "=================================================================================="
output " Wordpress is installed successfully." -s && \
echo "=================================================================================="
echo ""
echo " WordPress install complete. Your username/password is listed below."
echo ""
echo " Please add this to your hostfile: 127.0.0.1 $DOMAIN_NAME"
echo ""
echo " Login to $BLOG_TITLE at: http://$DOMAIN_NAME/wp-admin"
echo ""
echo " Username: $WP_USER"
echo " Password: $WP_USER_PASSWORD"
echo ""
echo " DATEBASE DETAILS"
echo " Database Name: $MYSQL_DATABASE"
echo " Database Username: $MYSQL_USER"
echo " Database Password: $MYSQL_PASSWORD"
echo ""
echo "=================================================================================="
touch /var/www/html/.gitkeep
fi
| true
|
49325b9a82f005c6d8cd583cfcbd78b9274c1bb3
|
Shell
|
rathores/Stock-Market-Analysis
|
/Stock Market Analysis Scripts/trading.sh
|
UTF-8
| 1,272
| 2.890625
| 3
|
[] |
no_license
|
clear
echo " "
echo "*********************************************************************************************************************************"
echo ""
echo " "
echo "FinCorp supports following company investment-"
echo "apple
apolo
ebay
facebook
ford
gail
glf
honda
Int
maruti
reliance
SBI
TCS
volkswagen
yesBank"
echo " "
echo ""
echo "Enter Company Name You Want To Invest in...:"
echo "Please Enter name in the same format as mentioned above"
read name
./$name.sh
echo "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
echo "*************************************************************************************************************************************"
echo "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
echo "Want to continue with Investment Guide"
echo "^^^^^^^^^^^^^^^^^^^^^^^^"
echo "Press Y for returning to Investment Guide"
echo "^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^"
echo "Press Z for Going to Main Menu"
echo "^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^"
read c
case $c in
Y)./invest.sh;;
y)./invest.sh;;
z)./mainscript.sh;;
Z)./mainscript.sh
esac
| true
|
f676f47d3b1ba3ded92b898630aeb526e7845f59
|
Shell
|
bhudgens/stardock
|
/bin/stardock-attach
|
UTF-8
| 837
| 3.640625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
### Usage:
### stardock-attach <service>
###
### Fuzzy match and make attaching to instances easier
export DIR=$(cd $(dirname ${BASH_SOURCE[0]}) && pwd)
source "$DIR/stardock-launcher"
die_on_error
run_as_root_or_die
# Make sure they pass 'something' to search for
[ -z "${service}" ] && echo Please pass the name of a container && exit 1
# Now figure out which container they want and attach
echo "Connect to which instance:"
select container in $(docker container ls --format '{{.Names}}' | grep ${service}); do
# User's are used to logging into containers as ubuntu but now that
# we support other kinds of containers the ubuntu user might not
# exist so we fall back to plain ole bash
[ -n ${container} ] && eval docker exec -it "${container}" '/bin/sh -c "su - ubuntu || /bin/bash"'
break;
done
| true
|
8bf3f2e2ae2f7cf6363d47b970c566b045c68f30
|
Shell
|
pavelsg/aws-utils
|
/get-first-blk-id.sh
|
UTF-8
| 762
| 4.15625
| 4
|
[] |
no_license
|
#!/bin/bash
function print_help() {
echo Parameters missing
cat <<EOF
Usage: $0 <space-separated device list>
EOF
exit 1
}
if [ "$1" == "" ]
then
print_help
fi
if [ "$2" != "" ]
then
>&2 echo Multiple parameters not supported.
>&2 echo To pass multiple block devices use quotas
exit 1
fi
EXPECTED_LIST="(/dev/sd[a-z] ?)+"
if [[ ! $1 =~ ${EXPECTED_LIST} ]]
then
>&2 echo Seems like wrong parameters passed.
>&2 echo Expected in form of "/dev/sda /dev/sdb ..."
exit 2
fi
USED_LETTERS=`echo $1 | sed 's/\/dev\/sd//g' | sed 's/ //g'`
# looks like Amazon reservs from 'a' to 'd'
ALPHABET="fghijklmnopqrstuvwxyz"
FIRST_FREE=`echo ${ALPHABET} | sed 's/['${USED_LETTERS}']//g' | sed 's/\(.\).*/\1/'`
echo ${FIRST_FREE}
| true
|
9c1fed315878ce9c5018e531cc9be7d3e2b10182
|
Shell
|
CyberSys/bifrost-build
|
/all/opt-apache-2.2.17-1/Fetch-source.sh
|
UTF-8
| 1,256
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
SRC=httpd-2.2.17.tar.bz2
DST=/var/spool/src/"${SRC}"
MD5=16eadc59ea6b38af33874d300973202e
[ -s "${DST}" ] || ../../wget-finder --checksum "${MD5}" -O "${DST}" http://apache.archive.sunet.se/dist/httpd/"${SRC}" \
|| ../../wget-finder --checksum "${MD5}" -O "${DST}" http://apache.localhost.net.ar/httpd/"${SRC}" \
|| ../../wget-finder --checksum "${MD5}" -O "${DST}" http://ftp-archive.freebsd.org/pub/FreeBSD-Archive/ports/distfiles/apache22/"${SRC}" \
|| ../../wget-finder --checksum "${MD5}" -O "${DST}" http://pkgs.fedoraproject.org/repo/pkgs/httpd/httpd-2.2.17.tar.bz2/16eadc59ea6b38af33874d300973202e/"${SRC}" \
|| ../../wget-finder --checksum "${MD5}" -O "${DST}" http://apache.crihan.fr/httpd/"${SRC}" \
|| ../../wget-finder --checksum "${MD5}" -O "${DST}" http://ftp.lfs-matrix.net/pub/blfs/conglomeration/httpd/"${SRC}" \
|| ../../wget-finder --checksum "${MD5}" -O "${DST}" http://ftp.nsysu.edu.tw/FreeBSD/ports/distfiles/apache22/"${SRC}" \
|| ../../wget-finder --checksum "${MD5}" -O "${DST}" http://ftp.stu.edu.tw/FreeBSD/distfiles/apache22/"${SRC}" \
|| ../../wget-finder -O "${DST}" "${SRC}:${MD5}"
| true
|
68760f3c52b2511d1b7699db9f2f835db60bef89
|
Shell
|
jayvdb/gitMC
|
/amender.sh
|
UTF-8
| 766
| 3.046875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Don't run this script till you understand consequences of rebase.
# Everyone will have to reclone
#
# Note that "noreply@github.com" comes from making commits from the website.
# it doesn't seem to impact commit count?
#
# https://help.github.com/articles/changing-author-info/
# https://www.git-tower.com/learn/git/faq/change-author-name-email
git filter-branch -f --env-filter '
WRONG_EMAIL='"$1"'
NEW_NAME='"$2"'
NEW_EMAIL="'"$2"'@users.noreply.github.com"
if [ "$GIT_COMMITTER_EMAIL" = "$WRONG_EMAIL" ]
then
export GIT_COMMITTER_NAME="$NEW_NAME"
export GIT_COMMITTER_EMAIL="$NEW_EMAIL"
fi
if [ "$GIT_AUTHOR_EMAIL" = "$WRONG_EMAIL" ]
then
export GIT_AUTHOR_NAME="$NEW_NAME"
export GIT_AUTHOR_EMAIL="$NEW_EMAIL"
fi
' --tag-name-filter cat -- --branches --tags
| true
|
ed6ea19f5af64902798545ecf66fadd0ed47fec0
|
Shell
|
gWOLF3/bitbox
|
/bitbox/src/ubuntu/install/bottle.sh
|
UTF-8
| 502
| 2.578125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
echo "Install Bottle Browser"
#install utils
apt-get update -y
apt-get install git-core curl build-essential openssl libssl-dev -y
apt-get clean
# install node
curl -L https://raw.githubusercontent.com/zeit/install-node/master/install.sh | sh
node -v
# install npm
curl -L https://npmjs.org/install.sh | sh
npm -v
# install electron
npm install -g electron
# finally, install bottle
git clone https://github.com/interplanaria/bottle.git && cd bottle
npm install
| true
|
a5e090b6c9b5adfa087157ffc0db887b2dfb7225
|
Shell
|
JacintoCC/swap1415
|
/Trabajo/siege.sh
|
UTF-8
| 2,026
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
function mediaAritmetica()
{
declare -a numeros=("${!1}")
local media=0
for item in ${numeros[@]}
do
media=`echo "scale=3; $media + $item" | bc -l`
done
media=`echo "scale=3; $media / ${#numeros[@]}" | bc -l`
echo "$media"
}
PRUEBAS=10
declare -A URLS=( ["granja_haproxy_source"]="192.168.22.131/f.php" )
for destino in ${!URLS[@]}
do
availability=()
elapsedTime=()
responseTime=()
transactionRate=()
longestTransaction=()
salida=""
echo -n "Probando con $peticiones peticiones en $destino..."
for (( prueba=1; prueba<=$PRUEBAS; prueba++ ))
do
siege -b -t120s -c 80 -v 192.168.22.131/f.php 2> ../Datos/$destino-salidaerror.dat
availability+=(`cat ../Datos/$destino-salidaerror.dat | egrep "Availability:" | tr -s ' ' | cut -d" " -f2`)
elapsedTime+=(`cat ../Datos/$destino-salidaerror.dat | egrep "Elapsed time:" | tr -s ' ' | cut -d" " -f3`)
responseTime+=(`cat ../Datos/$destino-salidaerror.dat | egrep "Response time:" | tr -s ' ' | cut -d" " -f3`)
transactionRate+=(`cat ../Datos/$destino-salidaerror.dat | egrep "Transaction rate:" | tr -s ' ' | cut -d" " -f3`)
longestTransaction+=(`cat ../Datos/$destino-salidaerror.dat | egrep "Longest transaction:" | tr -s ' ' | cut -d" " -f3`)
echo $prueba
done
echo
media_availability=`mediaAritmetica availability[@]`
media_elapsedTime=`mediaAritmetica elapsedTime[@]`
media_responseTime=`mediaAritmetica responseTime[@]`
media_transactionRate=`mediaAritmetica transactionRate[@]`
media_longestTransaction=`mediaAritmetica longestTransaction[@]`
echo "$media_availability" >> ../Datos/$destino-media_availability.dat
echo "$media_elapsedTime" >> ../Datos/$destino-media_elapsedTime.dat
echo "$media_responseTime" >> ../Datos/$destino-media_responseTime.dat
echo "$media_transactionRate" >> ../Datos/$destino-media_transactionRate.dat
echo "$media_longestTransaction" >> ../Datos/$destino-media_longestTransaction.dat
done
| true
|
347b9ad2c01f6ad8bb3699033b8ba32ef53a7f2a
|
Shell
|
marcelloc/Unofficial-pfSense-packages
|
/pkg-postfix/files/usr/local/bin/mail_report.sh
|
UTF-8
| 1,425
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/sh
#
# mail_report.sh
#
# part of Unofficial packages for pfSense(R) softwate
# Copyright (c) 2019 Marcello Coutinho
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
log=/tmp/today.log
log_partial=/tmp/today.partial.log
grep -vE "(warning|hold): header " $1 > $log
echo "Message with hits on deep tests:"
grep -iE "reject.*Service" $log |
while read a b c d e f g h i j k l m
do
from=`echo $m| sed "s/.*from=./from=/i;s/, to=.*//;s/>//"`
to=`echo $m| sed "s/.*, to=./to=/i;s/, proto.*//;s/>//"`
error=`echo $m | sed "s/.*NOQUEUE:/NOQUEUE:/i;s/; from=.*//"`
echo $a $b $c $from $to $error
done > $log_partial
grep "Service currently unavailable" $log_partial
echo ""
echo "Permanent Messages reject log:"
grep -v "Service currently unavailable" $log_partial | cut -d ' ' -f 4- |sort | uniq -c
echo ""
/usr/local/bin/pflogsumm $1
rm -f $log
rm -f $log_partial
| true
|
6ab2a8c6bde73457aee7845279fbda33a46f8abd
|
Shell
|
majestrate/livechan
|
/tools/addchan.sh
|
UTF-8
| 203
| 2.828125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#
# add channels manually
#
#cd $(dirname $0)
vals=""
for arg in $@ ; do
vals="(\"$arg\"),$vals"
done
sqlite3 livechan.db <<EOF
INSERT INTO Channels(name) VALUES${vals:0:-1};
EOF
| true
|
b25a2940180076d8ffbc679536125a31f1118d27
|
Shell
|
0918nobita/m4
|
/test.bash
|
UTF-8
| 254
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash -eu
ninja -v
find ./src -mindepth 1 -maxdepth 1 -type f -name "*.spec.m4" | while read file
do
name=$(basename $file .spec.m4)
echo "[${name}]"
m4 -daeq -E -E -R ./src/base.m4f -I ./src $file
echo -e " \033[1;32mSuccess\033[0m"
done
| true
|
018bc82140396dad0c245d5c4700680de25fe045
|
Shell
|
reezer/maplibre-gl-native
|
/platform/ios/platform/ios/scripts/deploy-to-cocoapods.sh
|
UTF-8
| 1,466
| 3.671875
| 4
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"BSD-3-Clause",
"curl",
"ISC",
"BSL-1.0",
"JSON",
"BSD-2-Clause",
"IJG",
"Zlib",
"Apache-2.0",
"OpenSSL",
"LicenseRef-scancode-openssl",
"blessing",
"LicenseRef-scancode-ssleay-windows",
"LicenseRef-scancode-warranty-disclaimer",
"NCSA",
"Libpng"
] |
permissive
|
#!/usr/bin/env bash
# This relies on either:
# 1. You being authenticated locally with CocoaPods trunk.
# 2. The `COCOAPODS_TRUNK_TOKEN` environment variable being set.
set -euo pipefail
function step { >&2 echo -e "\033[1m\033[36m* $@\033[0m"; }
function finish { >&2 echo -en "\033[0m"; }
trap finish EXIT
CIRCLE_TAG=${CIRCLE_TAG:-""}
step "Pushing release to CocoaPods trunk…"
if [[ $CIRCLE_TAG ]]; then
pod trunk push platform/ios/Mapbox-iOS-SDK.podspec --allow-warnings
else
echo "Skipping push to CocoaPods trunk for untagged build"
fi
step "Pushing release/builds to Mapbox podspecs repo…"
if [[ -z $(pod repo list | grep -i mapbox-public) ]]; then
pod repo add mapbox-public https://github.com/mapbox/pod-specs
else
echo "Found existing mapbox-public podspecs repo"
fi
if [[ -z $(git config --global user.email) && $CI ]]; then
echo "Setting machine user as git committer"
git config --global user.email "MapboxCI@users.noreply.github.com"
fi
if [[ $CIRCLE_TAG ]]; then
pod repo push mapbox-public platform/ios/Mapbox-iOS-SDK.podspec --allow-warnings
pod repo push mapbox-public platform/ios/Mapbox-iOS-SDK-stripped.podspec --allow-warnings
else
echo "Skipping push of release podspecs to mapbox-public for untagged build"
# pod repo push mapbox-public platform/ios/Mapbox-iOS-SDK-snapshot-dynamic.podspec --allow-warnings
echo "Skipping push of snapshot to mapbox-public until we have a way to update the versions in the snapshot podspec"
fi
| true
|
52e77f6b0d54ec6111e2c7a62c81cf144ff792b4
|
Shell
|
mahkhaled/cell_template
|
/scripts/rotate
|
UTF-8
| 1,888
| 4.65625
| 5
|
[] |
no_license
|
#!/bin/bash
Usage="Usage: rotate -p[please provide the fulll path and the file naming convension] -l[Number of versions you want to leave] -h[To display this help]"
# Checking if there is no argument specified then print out Usage.
if [ "$#" -le "0" ]; then
echo $Usage
exit 1;
fi
# showopts function is responsible for getting options and switch between cases
showopts () {
while getopts ":hp:l:" optname
do
# In case -h option specified then the script will print Usage and exit with exit status 1
case "$optname" in
"h")
echo $Usage
exit 1
;;
# In case -p option specified then the script will store the path information into $folder variable
"p")
folder=$OPTARG
;;
# In case -l option specified then the script will store number of versions you want to leave into $leave variable
"l")
leave=$OPTARG
;;
# In case any other option which not handle the script will exit with message Unknown option and exit status 1
"?")
echo "Unknown option $OPTARG"
exit 1
;;
":")
# Should not occur
echo "No argument value for option $OPTARG"
;;
*)
# Should not occur
echo "Unknown error while processing options"
;;
esac
done
return $OPTIND
}
# Execute the showopts function
showopts "$@"
# Print out Path and Versions information which provided by user
echo -e "Path: $folder \nLeave: $leave"
# Count total number of files
counter=`ls -t ${folder} 2> /dev/null | wc -l`
# If the total number of files greater than the number of leaves (Which specified by user parameter) then leave newest files with that number and delete others
if [ "$counter" -gt "$leave" ]; then
ls -t ${folder} | tail -n $(($counter - $leave)) | xargs -i rm -rf ${folder}{}
else
echo "Nothing to rotate $counter"
fi
| true
|
2bdef4bd14fccd5a97bad4e66a57c7716f65bf22
|
Shell
|
JuanGuerreroUP/NAS_SO
|
/eznas
|
UTF-8
| 993
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
NASPath=$(cat /opt/eznas/mountrute.txt)
ObjectivePath=$(cat /opt/eznas/localroute.txt)
if [[ $# -eq 0 ]]; then
echo "comando incorrecto, pruebe usar eznas -h para ver las opciones disponibles"
exit 1
fi
case $1 in
'-s')
/opt/eznas/setup.sh
;;
'-p')
if [[ $# -eq 3 ]]; then
/opt/eznas/credentials.sh $2 $3
else
/opt/eznas/credentials.sh
fi
;;
'-l')
if [[ $# -eq 2 ]]; then
/opt/eznas/localroute.sh $2
else
/opt/eznas/localroute.sh
fi
;;
'-c')
/opt/eznas/01Respaldo.sh
;;
'-m')
cd $(cat /opt/eznas/localroute.txt)
$SHELL
;;
'-h')
man eznas | more
;;
'-d')
/opt/eznas/eznas_uninstallerlauncher.sh
;;
'-g')
nano /opt/eznas/TOCP.txt
;;
*)
echo "comando incorrecto, pruebe usar eznas -h para ver las opciones disponibles"
exit 1
;;
esac
exit $?
| true
|
88391e81bc47fe18eb237a3f791b1be56cff146b
|
Shell
|
Grimeton/CodeServer-Image
|
/code-server/images/run/ubuntu/all/modules/CS_ENABLE_WHEEL.sh
|
UTF-8
| 4,771
| 2.96875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#
#
# Copyright (c) 2020, <grimeton@gmx.net>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the software/distribution.
#
# 3. If we meet some day, and you think this stuff is worth it,
# you can buy me a beer in return, Grimeton.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
if ! (return 0 2>/dev/null); then
echo "THIS IS A LIBRARY FILE AND SHOULD NOT BE CALLED DIRECTLY. '($(realpath "${0}"))'"
exit 254
fi
__lib_require "base_variable"
function __isenabled_feature_wheel() {
declare __FW_DEFAULT=""
if [[ -z ${__D_C_ENABLE_WHEEL+x} ]]; then
true
elif [[ "${__D_C_ENABLE_WHEEL}x" == "x" ]]; then
__FW_DEFAULT=""
elif __variable_text __D_C_ENABLE_WHEEL 1; then
__FW_DEFAULT="1"
elif __variable_text __D_C_ENABLE_WHEEL 0; then
__FW_DEFAULT=""
fi
__SETTINGS[CS_ENABLE_WHEEL]="${__FW_DEFAULT}"
if [[ -z ${CS_ENABLE_WHEEL+x} ]]; then
true
elif [[ "${CS_ENABLE_WHEEL}x" == "x" ]]; then
true
elif __variable_text CS_ENABLE_WHEEL 1; then
__SETTINGS[CS_ENABLE_WHEEL]="1"
elif __variable_text CS_ENABLE_WHEEL 0; then
__SETTINGS[CS_ENABLE_WHEEL]=""
fi
if [[ "${__SETTINGS[CS_ENABLE_WHEEL]}x" == "x" ]]; then
__log i -- "(CS_ENABLE_WHEEL) Checking if we enable the 'wheel' feature... Disabled.\n"
return 0
else
__log i -- "(CS_ENABLE_WHEEL) Checking if we enable the 'wheel' feature... Enabled.\n"
return 0
fi
return 254
}
__init_function_register_always 150 __isenabled_feature_wheel
function __pre_feature_wheel() {
if [[ -z ${__SETTINGS[CS_ENABLE_WHEEL]+x} ]]; then
return 0
elif [[ "${__SETTINGS[CS_ENABLE_WHEEL]}x" == "x" ]]; then
return 0
fi
__log i -- "(CS_ENABLE_WHEEL) Testing if group 'wheel' exists...\n"
if __group_name_exists "wheel"; then
__log i -- "(CS_ENABLE_WHEEL) Group 'wheel' exists.\n"
else
__log i -- "(CS_ENABLE_WHEEL) Group 'wheel' does not exist... Attempting to create it.\n"
if __group_add -r wheel; then
__log i -- "(CS_ENABLE_WHEEL) Group 'wheel' created successfully.\n"
else
__log e -- "(CS_ENABLE_WHEEL) Could not create group 'wheel' ($?).\n"
return 111
fi
fi
__log i -- "(CS_ENABLE_WHEEL) Patching '/etc/pam.d/su'...\n"
if [[ -f /etc/pam.d/su ]]; then
if sed -iE 's/^#.*auth.*sufficient.*pam_wheel.so.*trust$/auth sufficient pam_wheel.so trust/g' /etc/pam.d/su; then
__log i -- "(CS_ENABLE_WHEEL) Patching '/etc/pam.d/su' success.\n"
return 0
else
__log i -- "(CS_ENABLE_WHEEL) Problems patching '/etc/pam.d/su' ($?).\n"
return 121
fi
else
__log e -- "(CS_ENABLE_WHEEL) '/etc/pam.d/su' does not exist.\n"
return 131
fi
return 254
}
__init_function_register_always 250 __pre_feature_wheel
function __post_cs_enable_wheel() {
if [[ -z ${__SETTINGS[CS_ENABLE_WHEEL]:+x} ]]; then
return 0
fi
if [[ -z ${__SETTINGS[USER]:+x} ]]; then
return 0
fi
if usermod -G wheel -a "${__SETTINGS[USER]}"; then
return 0
else
return $?
fi
return 254
}
__init_function_register_always 750 __post_cs_enable_wheel
function __psp_cs_enable_wheel() {
if [[ -z ${__SETTINGS[CS_ENABLE_WHEEL]:+x} ]]; then
__init_results_add "CS_ENABLE_WHEEL" "Disabled"
else
__init_results_add "CS_ENABLE_WHEEL" "Enabled"
fi
return 0
}
__init_function_register_always 1800 __psp_cs_enable_wheel
| true
|
a443a5ecdb4fdbcb11b97573025b1987ff05e061
|
Shell
|
akatrevorjay/shellshock-formula
|
/shellshock/compile_bash.sh
|
UTF-8
| 608
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/bash
##
## Manually compile and install bash
## ~ Trevor Joynson (trevorj) <github@skywww.net>
##
set -e
WORKDIR="/usr/src/shellshock"
BASH_FTP_ROOT="http://ftp.gnu.org/gnu/bash"
BASH_VER="4.3"
BASH_PATCH_LEVEL="27"
[[ ! -d "$WORKDIR" ]] || rm -rf "$WORKDIR"
mkdir -p "$WORKDIR"; cd "$WORKDIR"
wget -q "$BASH_FTP_ROOT/bash-${BASH_VER}.tar.gz" -O - | tar zx
cd "bash-$BASH_VER"
# Download patchset
for i in $(seq -f "%03g" 0 $BASH_PATCH_LEVEL); do
wget -q "$BASH_FTP_ROOT/bash-${BASH_VER}-patches/bash${BASH_VER//.}-$i" -O - \
| patch -p0 -s
done
# Build and install
./configure && make && make install
| true
|
3c4b69d5cc2bddc929a9f4ff6498a0c8e1333af6
|
Shell
|
openbox2008/openstack-pike-autoinstall
|
/openstack-flat-compute.sh
|
UTF-8
| 1,304
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/bash
#-------------------------------
# install openstack flat network for compute
# by openbox2008
# 2018-06-08
#-------------------------------
#脚本在计算 节点172.16.100.72~74上执行
#1.创建ovs网桥
ovs-vsctl add-br br-eth1
#2.将物理业务网卡em2加入到br-eth1网桥中
ovs-vsctl add-port br-eth1 em2
#3.修改配置文件/etc/neutron/plugins/ml2/ml2_conf.ini
cp -a /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugins/ml2/ml2_conf.ini.bak_$(date +%F_%T)
#(1)修改---[ml2_type_flat]选项
sed -i '/^\[ml2_type_flat\]/a\flat_networks = physnet1' /etc/neutron/plugins/ml2/ml2_conf.ini
sed -i '/^\[ml2_type_flat\]/a\#' /etc/neutron/plugins/ml2/ml2_conf.ini
sed -i '/^\[ml2_type_flat\]/a\#' /etc/neutron/plugins/ml2/ml2_conf.ini
#4.修改配置文件/etc/neutron/plugins/ml2/openvswitch_agent.ini
cp /etc/neutron/plugins/ml2/openvswitch_agent.ini /etc/neutron/plugins/ml2/openvswitch_agent.ini.bak_$(date +%F_%T)
#(1)修改---[ovs]选项 ,映射虚拟网桥
sed -i '/^\[ovs\]/a\bridge_mappings = physnet1:br-eth1' /etc/neutron/plugins/ml2/openvswitch_agent.ini
sed -i '/^\[ovs\]/a\#' /etc/neutron/plugins/ml2/openvswitch_agent.ini
sed -i '/^\[ovs\]/a\#' /etc/neutron/plugins/ml2/openvswitch_agent.ini
#5.重启服务
systemctl restart neutron-openvswitch-agent
| true
|
4fc45a20a3a5e43c1c6bdeb33adeb6c7e5161327
|
Shell
|
alxerg/txtlog
|
/txtlog
|
UTF-8
| 4,471
| 4.5
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
today() {
date +%Y-%m-%d
}
common_prefix() {
read a; read b
while [[ "$b" != "" ]]; do
a=$(printf "%s\n%s\n" "$a" "$b" | sed -e 'N;s/^\(.*\).*\n\1.*$/\1/')
read b
done
printf "$a"
}
root_help() {
echo -e "
Usage: txtlog COMMAND
A tool to store notes nad journal entries in text files
Commands:
list List log entries
new Create a new entry for today
edit Edit a previously written entry
help Show help messages
Run 'txtlog help COMMAND' for more information on a command.
"
}
list_help() {
echo -e "
Usage: txtlog list [FILTER]
List log entries, optionally with a filter. If no argument is provided, all
entries will be listed.
Examples:
$ txtlog list
\e[1;38;5;241m2017-\e[1;38;5;75m12-22\e[39;0m\e[1;38;5;241m-01\e[39;m A New Hope
\e[1;38;5;241m2018-\e[1;38;5;75m06-22\e[39;0m\e[1;38;5;241m-01\e[39;m The Empire Strikes Back
\e[1;38;5;241m2018-\e[1;38;5;75m07-23\e[39;0m\e[1;38;5;241m-01\e[39;m Return of the Jedi
$ txtlog list 2018-07
\e[1;38;5;241m2018-\e[1;38;5;75m07-23\e[39;0m\e[1;38;5;241m-01\e[39;m Return of the Jedi
"
}
list() {
for f in $1*; do
year=$(echo "$f" | cut -d '-' -f 1)
month=$(echo "$f" | cut -d '-' -f 2)
day=$(echo "$f" | cut -d '-' -f 3)
entry=$(echo "$f" | cut -d '-' -f 4 | cut -d '.' -f 1)
short=$(head -n 1 "$f")
printf "%s\e[1;38;5;241m%d-\e[1;38;5;75m%.2d-%.2d\e[39;0m\e[1;38;5;241m-%.2d\e[39;m %s\n" "$2" "$year" "$month" "$day" "$entry" "$short"
done
}
new_help() {
echo -e "
Usage: txtlog new [TITLE]
Create a new entry. If a title is provided, txtlog will prefill the first line
of the created file, which is used as title. Your \$EDITOR will be used.
"
}
new() {
glob=$(date +%Y-%m-%d-)
new_index=1
filename=$(printf "%s-%.2d.txt" "$(today)" "$new_index")
while true; do
filename=$(printf "%s-%.2d.txt" "$(today)" "$new_index")
if [[ -f "$filename" ]]; then
new_index=$(($new_index+1))
else
break
fi
done
if [[ "$1" != "" ]]; then
echo "${@:1}" > "$filename"
fi
$EDITOR "$filename"
}
edit_help() {
echo -e "
Usage: txtlog edit [FILTER]
Edit a previously written entry. You can specify a filter to prompt you with a
list of entries to choose from. Alternatively, you may use 'all' as the
argument to edit to start editing the latest entry. If no arguments are
provided, you can choose from today's entries, if there are multiple.
Examples:
$ txtlog edit 2018-07
\e[1;38;5;241m2018-\e[1;38;5;75m07-22\e[39;0m\e[1;38;5;241m-01\e[39;m The Empire Strikes Back
\e[1;38;5;241m2018-\e[1;38;5;75m07-23\e[39;0m\e[1;38;5;241m-01\e[39;m Return of the Jedi
\e[1;38;5;241m2018-\e[1;38;5;75m07-23\e[39;0m\e[1;38;5;241m-02\e[39;m The Force Awakens
Filter: \e[37m2018-07-2█\e[39;m
"
}
edit() {
if [[ ${1: -4} == ".txt" ]]; then
if [[ ! -f "$1" ]]; then
printf "no file named %s\n" "$1"
exit 1
fi
exec $EDITOR "$1"
fi
if [[ "$1" == "last" ]]; then
file="$(ls -r 2>/dev/null | head -n 1)"
if [[ "$file" == "" ]]; then
printf "zero entries to choose from\n"
exit 1
fi
exec $EDITOR "$file"
fi
hyphen_count=$(echo "$1" | grep -o "-" | wc -l)
filter="$1"
if [[ "$filter" == "" ]]; then
filter=$(today)
fi
filter=$(ls ${filter}* 2>/dev/null | common_prefix)
while true; do
num_files=$(ls ${filter}* 2>/dev/null | wc -l)
case "$num_files" in
0)
printf "\e[31mzero matching entries\e[39m\n"
exit 1
;;
1)
exec $EDITOR ${filter}*
;;
*)
list "$filter" " "
printf "Filter: \e[37m%s\e[39;0m" "$filter"
read entry
filter_new="${filter}${entry}"
num_files_new=$(ls ${filter_new}* 2>/dev/null | wc -l)
case "$num_files_new" in
0)
printf "\e[31mzero matching entries\e[39;0m\n\n"
;;
1)
exec $EDITOR ${filter_new}*
;;
*)
filter=$(ls ${filter_new}* 2>/dev/null | common_prefix)
;;
esac
esac
done
}
case "$1" in
list)
list "$2"
;;
new)
new "$2"
;;
edit)
edit "$2"
;;
*)
case "$2" in
list)
list_help
;;
new)
new_help
;;
edit)
edit_help
;;
*)
root_help
;;
esac
;;
esac
| true
|
fb58d32af481a43ef67562a252dce7efba527016
|
Shell
|
wzbbbb/bkp_home
|
/pj29_finding_missed_job_after_crash/ses_create.sh
|
UTF-8
| 389
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash -x
total_line=`wc -l EXP_ses_shw.txt`
s=`grep -n ITEMS EXP_ses_shw.txt |cut -f1 -d":"`
#ses_name=`grep -n ITEMS EXP_ses_shw.txt |cut -f3 -d":"`
i=1
for n in $s ; do
array[$i]=$n
let i+=1
#echo ${array[$i]}
done
i=1
for m in $s ; do
let j=$i+1
head_=${array[$i]}
let tail_=${array[$j]}-3
sed -n ${head_},${tail_}p EXP_ses_shw.txt >ses_output_${i}.txt
let i+=1
done
| true
|
70bff0a1ffa9dc567bbddc8f5af57651ec0ccd67
|
Shell
|
goncalotomas/FMKe
|
/scripts/config/change_db.sh
|
UTF-8
| 438
| 3.640625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
show_supported_dbs()
{
echo "FMKe supports the following data stores:"
for value in "antidote" "antidote_norm" "riak" "riak_norm" "redis" ; do
echo "-$value"
done
}
if [ "$#" -lt 1 ]; then
echo "Error: no data store name supplied"
show_supported_dbs
echo "error"
exit 1
fi
TARGETDB=$1
./scripts/config/set_param.sh "target_database" $TARGETDB
if [[ $? -eq 0 ]]; then
echo "success"
fi
| true
|
08f4baf47f047d3b6b5e728bf40d4cf8e82c74a2
|
Shell
|
oxygen-TW/Campus-Weather-Service
|
/Install_Tools/OpenWRT_LEDE/install.sh
|
UTF-8
| 744
| 2.9375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
read -p "Which is your detector type?" type
opkg update
opkg install git python python3 python-pip unzip kmod-usb-serial kmod-usb-serial-ftdi
pip install --upgrade pip
pip install pyserial
pip install pymysql
git clone https://github.com/oxygen-TW/Weather-Station.git
wget http://weather.nhsh.tp.edu.tw/download/driver/$type
unzip driver-$type
cd driver-$type
mkdir /usr/WeatherServiceClient/
mkdir /usr/WeatherServiceClient/Data/
cp database.py Driver.py /usr/WeatherServiceClient/
read -p "Which port is connect to MCU?[usually /dev/ttyUSB0] " port
echo "" > /usr/WeatherServiceClient/autorun.sh #clear
echo "python /usr/WeatherServiceClient/Driver.py "$port" /usr/WeatherServiceClient/Data/data.txt" > /usr/WeatherServiceClient/autorun.sh
| true
|
308a5c9c55e533384e10467824468237b73ff575
|
Shell
|
NTTDATA-CLOUDHEDGE-POC/jx3-pipeline-catalog
|
/.lighthouse/jenkins-x/ci.sh
|
UTF-8
| 888
| 3.40625
| 3
|
[] |
permissive
|
#!/usr/bin/env bash
set -e
declare -a repos=(
"nodedemo"
)
# TODO to test locally could be: $CURDIR/../../packs
PACKDIR=/workspace/source/packs
CURDIR=$(pwd)
export PROJECT_DIR=test-projects
rm -rf $PROJECT_DIR
mkdir -p $PROJECT_DIR
for r in "${repos[@]}"
do
echo "upgrading repository https://github.com/jenkins-x-labs-bdd-tests/$r"
cd $PROJECT_DIR
git clone https://github.com/jenkins-x-labs-bdd-tests/$r.git
cd $r
echo "removing old build pack"
rm -rf .lighthouse/ jenkins-x.yml charts preview Dockerfile
echo "recreating the pipeline... in dir $(pwd)"
# lets regenerate the build pack...
jx project import --no-dev-pr --dry-run --batch-mode --dir $(pwd) --pipeline-catalog-dir $PACKDIR
git add * || true
git commit -a -m "chore: upgrade pipeline library" || true
git push || true
echo "updated the pipeline library for $r"
done
echo "finished"
| true
|
d0b23cf6487b9254a90cc80ec177c50d28b4d574
|
Shell
|
nekottyo/dotfiles
|
/.config/zsh/logging_tmux_pane.zsh
|
UTF-8
| 242
| 3.421875
| 3
|
[] |
no_license
|
if [[ "${TMUX}" != "" ]] ; then
TMUX_LOG_PATH="${HOME}/.logs/tmux"
if [[ ! -d "${TMUX_LOG_PATH}" ]]; then
mkdir -p "${TMUX_LOG_PATH}"
fi
tmux pipe-pane "cat | rotatelogs ${TMUX_LOG_PATH}/%Y%m%d_%H-%M-%S_#S:#I.#P.log 86400 540"
fi
| true
|
e3c5d079e80ee992fc1ee43653b1a7af34aaf53e
|
Shell
|
mgree/modernish
|
/lib/modernish/cap/BUG_SELECTRPL.t
|
UTF-8
| 684
| 2.921875
| 3
|
[
"ISC",
"CC0-1.0"
] |
permissive
|
#! /shell/bug/test/for/moderni/sh
# See the file LICENSE in the main modernish directory for the licence.
# BUG_SELECTRPL: in a shell-native 'select' loop, input that is not a menu
# item is not stored in the REPLY variable as it should be.
#
# Bug found in mksh R50 2014/09/03.
# Known to be fixed as of mksh R50 2015/04/19.
thisshellhas --rw=select || return 1 # not applicable
echo ok | case $(REPLY=newbug; eval 'select r in 1 2 3; do break; done'; echo "$REPLY") in
( ok ) return 1 ;; # ok, no bug
( '' ) ;; # mksh R50 bug
( newbug ) # Undiscovered bug with REPLY in 'select'!
return 1 ;;
( * ) echo "BUG_SELECTRPL.t: Internal error" 1>&3
return 2 ;;
esac 3>&2 2>/dev/null
| true
|
d7e66b0334458bf91c5d693a94b3cb5f3bf4f1be
|
Shell
|
00h-i-r-a00/moby_simulator
|
/launchAllSimulations.sh
|
UTF-8
| 438
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/sh
for server in achtung02 achtung03 achtung04 achtung05 achtung06 achtung07 achtung08 achtung09 achtung10 achtung11 achtung12 achtung13 achtung14 achtung15 achtung16 achtung17; do
ssh $server << EOF
echo "######################################################" $SHELL
hostname
pkill -f monitor_moby_simulator.sh
cd moby/moby_simulator
nohup ./run_simulations.py > data/logs/$server.nohup 2>&1 &!
EOF
done
| true
|
12d69c6358c4644b8f02c05b13f20950b5e66e63
|
Shell
|
Vman45/shell-1
|
/snippets/statusbar.sh
|
UTF-8
| 2,426
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
# vim:fileencoding=utf-8:ft=sh:foldmethod=marker
# path: ~/coding/shell/snippets/statusbar.sh
# user: klassiker [mrdotx]
# github: https://github.com/mrdotx/shell
# date: 2019-11-09 21:59:58
# combining commands for output
cputemp="$(cat /sys/class/thermal/thermal_zone0/temp | cut -c "1-2")ºC"
cores="$(grep -c "^processor" /proc/cpuinfo)"
cpuusage="$(ps aux | awk 'BEGIN {sum=0} {sum+=$3}; END {print sum}')"
cpuusage="$((${cpuusage/\.*/} / ${cores:-1}))%"
ramtotal="$(free | awk 'NR==2 { printf "%s",$2; }')"
ramtogether="$(free | awk 'NR==2 { printf "%s",$5; }')"
ram="$(free | awk 'NR==2 { printf "%s",$3; }')"
ram="$(bc <<<"scale=3;($ramtogether+$ram)/1024" | awk '{ printf("%.0f\n",$1) }')M"
ramtotalg="$(bc <<<"scale=3;$ramtotal/1024/1024" | awk '{ printf("%.2f\n",$1) }')G"
ramtotal="$(bc <<<"scale=3;$ramtotal/1024" | awk '{ printf("%.0f\n",$1) }')M"
ramusage="$(bc <<<"scale=3;$ram/($ramtotal/100)" | awk '{ printf("%.0f\n",$1) }')%"
swaptotal="$(free | awk 'NR==3 { printf "%s",$2; }')"
swap="$(free | awk 'NR==3 { printf "%s",$3; }')"
swap="$(bc <<<"scale=3;$swap/1024" | awk '{ printf("%.0f\n",$1) }')M"
swaptotalg="$(bc <<<"scale=3;$swaptotal/1024/1024" | awk '{ printf("%.2f\n",$1) }')G"
swaptotal="$(bc <<<"scale=3;$swaptotal/1024" | awk '{ printf("%.0f\n",$1) }')M"
swapusage="$(bc <<<"scale=3;$swap/($swaptotal/100)" | awk '{ printf("%.0f\n",$1) }')%"
hddtotal="$(df /dev/sda2 | awk 'NR==2 { printf "%s",$2; }')"
hdd="$(df /dev/sda2 | awk 'NR==2 { printf "%s",$3; }')"
hdd="$(bc <<<"scale=3;$hdd/1024/1024" | awk '{ printf("%.2f\n",$1) }')G"
hddtotal="$(bc <<<"scale=3;$hddtotal/1024/1024" | awk '{ printf("%.2f\n",$1) }')G"
hddusage="$(df /dev/sda2 | awk 'NR==2 { printf "%s",$5; }')"
wlan="$(iwgetid -r)"
wlansignal="$(iwconfig wlp1s0 | grep -i Link | cut -c "24-25")"
wlansignal="$(bc <<<"scale=3;$wlansignal/70*100" | awk '{ printf("%.0f\n",$1) }')%"
ip="$(ip -4 addr show wlp1s0 | grep -oP "(?<=inet ).*(?=/)")"
name="$(users)@$(hostname)"
clock="$(date '+%a, %e %B %G, %k:%M')"
uptime="$(uptime -p | sed 's/s//g; s/,//g; s/up //g; s/ week/w/g; s/ day/d/g; s/ hour/h/g; s/ minute/m/g')"
# combination
statusbar="cpu: $cputemp [$cpuusage] | ram: $ram/$ramtotalg [$ramusage] | swap: $swap/$swaptotalg [$swapusage] | hdd: $hdd/$hddtotal [$hddusage] | wlan: $wlan [$wlansignal] | ip: $ip | name: $name | uptime: $uptime | $clock"
# output
echo -e "$statusbar"
| true
|
f608c7fe4050aa7b523d6d1a85c6068e75c81f84
|
Shell
|
CAMOBAP/hackerrank
|
/golang/run_test.sh
|
UTF-8
| 1,424
| 3.875
| 4
|
[] |
no_license
|
#!/bin/bash
IGNORE_DIRS="common|golang.org|Makefile"
while true; do
case $1 in
--list) challenges=$(ls -1 src | grep -vwE '($IGNORE_DIRS)')
echo "Challenges list:" $challenges
exit 0;;
*) break;;
esac
done
TEST_TIMEOUT=5s
challenges="$@"
if [ -z "$challenges" ]; then
challenges=$(ls -1 src | grep -vwE "($IGNORE_DIRS)")
fi
for challenge in $challenges; do
echo $challenge
if [ -d "./src/$challenge" ]; then
go install $challenge
for i in {0..99}; do
input=$(printf "src/$challenge/test/input%02d.txt" $i)
ref_output=$(printf "src/$challenge/test/output%02d.txt" $i)
cal_output=$(printf "bin/${challenge}_output%02d.txt" $i)
if [ -f "$input" ] ; then
start_time=$(date +%s)
# gtimeout $TEST_TIMEOUT
echo "bin/$challenge < $input > $cal_output"
OUTPUT_PATH=$cal_output gtimeout $TEST_TIMEOUT bin/$challenge < $input > $cal_output
test_exitcode=$?
end_time=$(date +%s)
diff_time=$(echo "$end_time - $start_time" | bc)
if [ $test_exitcode == 124 ]; then
test_status="timeout"
else
diff $ref_output $cal_output > /dev/null
diff_exitcode=$?
if [ $diff_exitcode == 0 ]; then
test_status="success"
else
test_status="failure"
fi
fi
echo "[$test_status] $challenge / $i / $diff_time sec."
fi
done
else
echo "[error] there is no $challenge"
fi
done
| true
|
f9f5e12db28a2676a7e8722d2af68ddd8d05e82f
|
Shell
|
stevengonsalvez/bash-functions
|
/gce-functions
|
UTF-8
| 1,809
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
: ${DEBUG:=1}
debug() {
[[ "$DEBUG" ]] && echo "[DEBUG] $*" 1>&2
}
gce_reload_functions() {
source $BASH_SOURCE
}
gce_upgrade_functions() {
curl -Lo $BASH_SOURCE j.mp/gce-functions
source $BASH_SOURCE
}
gce_zones() {
gcloud compute zones list --format json|jq .[].name -r
}
gce_list_disks() {
gcloud compute disks list --format json | jq ".[].name" -r
}
gce_clean_unused_disks() {
for zone in $(gcloud compute zones list --format json|jq .[].name -r); do
gce_clean_unused_disks_in_zone $zone
done
}
gce_delete_instances() {
declare desc="deletes instances matching a regexp"
declare nameRegexp=$1
: ${nameRegexp:? required}
gcloud compute instances list -r ${nameRegexp}.* --format json \
| jq '.[]|.name,.zone' -r | \
while read name && read zone; do
gcloud compute instances delete --quiet --zone $zone $name &
done
}
gce_clean_unused_disks_in_zone() {
declare zone=$1
: ${zone:? required}
debug "[$zone] cleaning unsed disks"
local used=$(
gcloud compute instances list --zone $zone --format json \
| jq '[.[]|.disks[].source]|join("|")' -r \
)
local grepFilter=$(echo "$used"| sed "s:|:\\\\|:g")
gcloud compute disks list --zone $zone --format json \
| jq ".[].name" -r \
| grep -v "$grepFilter" \
| xargs --no-run-if-empty -t gcloud compute disks delete --zone $zone
}
gce_list_running() {
gcloud compute instances list
}
gce_list_by_regexp() {
gcloud compute instances list --regexp ".*$1.*"
}
gce_list_runnings_() {
gcloud compute instances list --format json \
|jq ".[]|.name, .zone, .status, .networkInterfaces[0].accessConfigs[0].natIP, .networkInterfaces[0].networkIP" -r \
|xargs -n 5 printf "%-40s | %-15s | %-10s | %-15s | %-15s \n"
}
| true
|
9311075eb0c6474e0b062baa1438e174b2b0314d
|
Shell
|
rsling/cow
|
/src/de/slurm/de16-smor.sh
|
UTF-8
| 474
| 2.953125
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
# FOR ARRAY JOB!!!
# Pass: dir (absolut corpus root, like "decow16"), LIST FILE, offset in file list
#SBATCH --mem=1G
#SBATCH --time=00:20:00
set -e
set -u
# Create true in file name from
input="`cat ${2} | tail -n +${3} | sed -n ${SLURM_ARRAY_TASK_ID},${SLURM_ARRAY_TASK_ID}p`"
inf="${1}/05divide/${input}"
odn_smor="${1}/09smor/$(dirname ${input})"
ofn_smor="${1}/09smor/${input}"
mkdir -p ${odn_smor}
\rm -f ${ofn_smor}
cow16-smor ${inf} ${ofn_smor}
| true
|
304db10eca3d52decebdfd04563e68bde830535e
|
Shell
|
johnellis1392/workspace
|
/aws/lambda/notes.sh
|
UTF-8
| 929
| 3.6875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
function_name=example-handler
# List all functions
aws lambda list-functions
# Get configuration information for function
aws lambda get-function --function-name ${function_name}
# Download zip file for a particular function
get_function_by_name() {
[[ $# != 1 ]] && echo "Wrong Arity" && return 1
function_name=aws lambda get-function --function-name ${function_name} | \
python3 -c 'import json, sys; print(json.loads(sys.stdin.read())["Code"]["Location"])'
echo ${function_name}
}
# Download and unzip a function
download_function() {
local output_file=example-handler.zip
curl $(get_function_by_name) >> ${output_file}
unzip ${output_file}
}
push_function() {
local zip_file=example-handler.zip
local handler_file=lambda_function.py
zip ${zip_file} ${handler_file}
aws lambda update-function-code \
--function-name ${function_name} \
--zip-file fileb://${PWD}/${zip_file}
}
| true
|
d78aba936e3826973862efb6d2e9328e76b2e796
|
Shell
|
phoxelua/ar_sculpting
|
/make
|
UTF-8
| 257
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
if [ $1 = "clean" ]; then
echo "Cleaning..."
cd src/ && rm *.class && make clean -f Makefile.man
else
echo "Building..."
cd src/ && javac -classpath "/usr/lib/Leap/LeapJava.jar:json-simple-1.1.1.jar" Sample.java && make -f Makefile.man
fi
| true
|
32c614183b82bf9459ae1521edcfb5ac5b1380ab
|
Shell
|
SendOutCards/jenkins-standalone
|
/jenkins-standalone.sh
|
UTF-8
| 2,415
| 3.3125
| 3
|
[
"Unlicense"
] |
permissive
|
#!/bin/bash
set -e
# $JENKINS_VERSION should be an LTS release
JENKINS_VERSION="1.596.2"
# List of Jenkins plugins, in the format "${PLUGIN_NAME}/${PLUGIN_VERSION}"
JENKINS_PLUGINS=(
"credentials/1.22"
"email-ext/2.39"
"git/2.3.5"
"git-client/1.17.1"
"greenballs/1.14"
"slack/1.8"
"job-dsl/1.29"
"logstash/1.0.4"
"metadata/1.1.0b"
"mesos/0.7.0"
"monitoring/1.55.0"
"parameterized-trigger/2.25"
"saferestart/0.3"
"scm-api/0.2"
"script-security/1.13"
"ssh-credentials/1.10"
"token-macro/1.10"
"postbuildscript/0.17"
)
JENKINS_WAR_MIRROR="http://mirrors.jenkins-ci.org/war-stable"
JENKINS_PLUGINS_MIRROR="http://mirrors.jenkins-ci.org/plugins"
# Ensure we have an accessible wget
command -v wget > /dev/null
if [[ $? != 0 ]]; then
echo "Error: wget not found in \$PATH"
echo
exit 1
fi
# Accept ZooKeeper paths on the command line
if [[ ! $# > 3 ]]; then
echo "Usage: $0 -z zk://10.132.188.212:2181[, ... ]/mesos -r redis.example.com"
echo
exit 1
fi
while [[ $# > 1 ]]; do
key="$1"
shift
case $key in
-z|--zookeeper)
ZOOKEEPER_PATHS="$1"
shift
;;
-r|--redis-host)
REDIS_HOST="$1"
shift
;;
*)
echo "Unknown option: ${key}"
exit 1
;;
esac
done
# Jenkins WAR file
if [[ ! -f "jenkins.war" ]]; then
wget -nc "${JENKINS_WAR_MIRROR}/${JENKINS_VERSION}/jenkins.war"
fi
# Jenkins plugins
[[ ! -d "plugins" ]] && mkdir "plugins"
for plugin in ${JENKINS_PLUGINS[@]}; do
IFS='/' read -a plugin_info <<< "${plugin}"
plugin_path="${plugin_info[0]}/${plugin_info[1]}/${plugin_info[0]}.hpi"
wget -nc -P plugins "${JENKINS_PLUGINS_MIRROR}/${plugin_path}"
done
# Jenkins config files
sed -i "s!_MAGIC_ZOOKEEPER_PATHS!${ZOOKEEPER_PATHS}!" config.xml
sed -i "s!_MAGIC_REDIS_HOST!${REDIS_HOST}!" jenkins.plugins.logstash.LogstashInstallation.xml
sed -i "s!_MAGIC_JENKINS_URL!http://${HOST}:${PORT}!" jenkins.model.JenkinsLocationConfiguration.xml
# Start the master
export JENKINS_HOME="$(pwd)"
java -jar jenkins.war \
-Djava.awt.headless=true \
--webroot=war \
--httpPort=${PORT} \
--ajp13Port=-1 \
--httpListenAddress=0.0.0.0 \
--ajp13ListenAddress=127.0.0.1 \
--preferredClassLoader=java.net.URLClassLoader \
--logfile=../jenkins.log
| true
|
f89c8c447ac00727f6a28b7e0bc112b2f0d59f70
|
Shell
|
skywind3000/docker
|
/mysql/8.0/mysql.sh
|
UTF-8
| 5,927
| 3.765625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
### BEGIN INIT INFO
# Provides: mysql
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Should-Start: $network $named $time
# Should-Stop: $network $named $time
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Start and stop the mysql database server daemon
# Description: Controls the main MySQL database server daemon "mysqld"
# and its wrapper script "mysqld_safe".
### END INIT INFO
#
set -e
set -u
${DEBIAN_SCRIPT_DEBUG:+ set -v -x}
test -x /usr/sbin/mysqld || exit 0
. /lib/lsb/init-functions
SELF=$(cd $(dirname $0); pwd -P)/$(basename $0)
MYADMIN="/usr/bin/mysqladmin --defaults-file=/etc/mysql/debian.cnf"
# priority can be overridden and "-s" adds output to stderr
ERR_LOGGER="logger -p daemon.err -t /etc/init.d/mysql -i"
if [ -f /etc/default/mysql ]; then
. /etc/default/mysql
fi
# Also source default/mariadb in case the installation was upgraded from
# packages originally installed from MySQL.org repositories, which have
# had support for reading /etc/default/mariadb since March 2016.
if [ -f /etc/default/mariadb ]; then
. /etc/default/mariadb
fi
# Safeguard (relative paths, core dumps..)
cd /
umask 077
# mysqladmin likes to read /root/.my.cnf. This is usually not what I want
# as many admins e.g. only store a password without a username there and
# so break my scripts.
export HOME=/etc/mysql/
## Fetch a particular option from mysql's invocation.
#
# Usage: void mysqld_get_param option
mysqld_get_param() {
/usr/sbin/mysqld --print-defaults \
| tr " " "\n" \
| grep -- "--$1" \
| tail -n 1 \
| cut -d= -f2
}
## Do some sanity checks before even trying to start mysqld.
sanity_checks() {
# check for config file
if [ ! -r /etc/mysql/my.cnf ]; then
log_warning_msg "$0: WARNING: /etc/mysql/my.cnf cannot be read. See README.Debian.gz"
echo "WARNING: /etc/mysql/my.cnf cannot be read. See README.Debian.gz" | $ERR_LOGGER
fi
# check for diskspace shortage
datadir=`mysqld_get_param datadir`
if LC_ALL=C BLOCKSIZE= df --portability $datadir/. | tail -n 1 | awk '{ exit ($4>4096) }'; then
log_failure_msg "$0: ERROR: The partition with $datadir is too full!"
echo "ERROR: The partition with $datadir is too full!" | $ERR_LOGGER
exit 1
fi
}
## Checks if there is a server running and if so if it is accessible.
#
# check_alive insists on a pingable server
# check_dead also fails if there is a lost mysqld in the process list
#
# Usage: boolean mysqld_status [check_alive|check_dead] [warn|nowarn]
mysqld_status () {
ping_output=`$MYADMIN ping 2>&1`; ping_alive=$(( ! $? ))
ps_alive=0
pidfile=`mysqld_get_param pid-file`
if [ -f "$pidfile" ] && ps `cat $pidfile` >/dev/null 2>&1; then ps_alive=1; fi
if [ "$1" = "check_alive" -a $ping_alive = 1 ] ||
[ "$1" = "check_dead" -a $ping_alive = 0 -a $ps_alive = 0 ]; then
return 0 # EXIT_SUCCESS
else
if [ "$2" = "warn" ]; then
echo -e "$ps_alive processes alive and '$MYADMIN ping' resulted in\n$ping_output\n" | $ERR_LOGGER -p daemon.debug
fi
return 1 # EXIT_FAILURE
fi
}
#
# main()
#
case "${1:-''}" in
'start')
sanity_checks;
# Start daemon
log_daemon_msg "Starting MySQL database server" "mysqld"
if mysqld_status check_alive nowarn; then
log_progress_msg "already running"
log_end_msg 0
else
# Could be removed during boot
test -e /var/run/mysqld || install -m 755 -o mysql -g root -d /var/run/mysqld
# Start MySQL!
/usr/bin/mysqld_safe "${@:2}" 2>&1 >/dev/null | $ERR_LOGGER &
for i in $(seq 1 "${MYSQLD_STARTUP_TIMEOUT:-30}"); do
sleep 1
if mysqld_status check_alive nowarn ; then break; fi
log_progress_msg "."
done
if mysqld_status check_alive warn; then
log_end_msg 0
# Now start mysqlcheck or whatever the admin wants.
# output=$(/etc/mysql/debian-start)
output=""
if [ -n "$output" ]; then
log_action_msg "$output"
fi
else
log_end_msg 1
log_failure_msg "Please take a look at the syslog"
fi
fi
;;
'stop')
# * As a passwordless mysqladmin (e.g. via ~/.my.cnf) must be possible
# at least for cron, we can rely on it here, too. (although we have
# to specify it explicit as e.g. sudo environments points to the normal
# users home and not /root)
log_daemon_msg "Stopping MySQL database server" "mysqld"
if ! mysqld_status check_dead nowarn; then
set +e
shutdown_out=`$MYADMIN shutdown 2>&1`; r=$?
set -e
if [ "$r" -ne 0 ]; then
log_end_msg 1
[ "$VERBOSE" != "no" ] && log_failure_msg "Error: $shutdown_out"
log_daemon_msg "Killing MySQL database server by signal" "mysqld"
killall -15 mysqld
server_down=
for i in `seq 1 600`; do
sleep 1
if mysqld_status check_dead nowarn; then server_down=1; break; fi
done
if test -z "$server_down"; then killall -9 mysqld; fi
fi
fi
if ! mysqld_status check_dead warn; then
log_end_msg 1
log_failure_msg "Please stop MySQL manually and read /usr/share/doc/mariadb-server-10.1/README.Debian.gz!"
exit -1
else
log_end_msg 0
fi
;;
'restart')
set +e; $SELF stop; set -e
shift
$SELF start "${@}"
;;
'reload'|'force-reload')
log_daemon_msg "Reloading MySQL database server" "mysqld"
$MYADMIN reload
log_end_msg 0
;;
'status')
if mysqld_status check_alive nowarn; then
log_action_msg "$($MYADMIN version)"
else
log_action_msg "MySQL is stopped."
exit 3
fi
;;
'bootstrap')
# Bootstrap the cluster, start the first node
# that initiates the cluster
log_daemon_msg "Bootstrapping the cluster" "mysqld"
$SELF start "${@:2}" --wsrep-new-cluster
;;
*)
echo "Usage: $SELF start|stop|restart|reload|force-reload|status|bootstrap"
exit 1
;;
esac
| true
|
93becab56e182c17de4e0594d2aa7f58d7531220
|
Shell
|
icyphox/axiom
|
/interact/axiom-select
|
UTF-8
| 234
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
if [ -z "$1" ]
then
ans=$(doctl compute droplet list | awk '{ print $2 }' | fzf)
echo "{\"instance\":\"$ans\"}" | jq > ~/.axiom/profile.json
else
echo "{\"instance\":\"$1\"}" | jq > ~/.axiom/profile.json
fi
| true
|
b5cd50c0a159c9efa65a96928ab2d3707da99835
|
Shell
|
aeternity/terraform-aws-api-gateway
|
/test/health-check.sh
|
UTF-8
| 1,415
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
set -Eeuo pipefail
API_ADDR=$(terraform output -json |jq -r '."api_gate_fqdn"."value"')
echo "Checking" $API_ADDR
# Basic health check endpoint
curl -sSf -o /dev/null --retry 10 --retry-connrefused https://${API_ADDR}/healthz
echo "Checking HTTP -> HTTPS redirect"
# HTTP -> HTTPS redirect
curl -sSf -L -o /dev/null --retry 10 --retry-connrefused http://${API_ADDR}/v2/status
echo "Checking External API"
# External API
curl -sSf -o /dev/null --retry 10 --retry-connrefused https://${API_ADDR}/v2/status
echo "Checking Middleware API"
# Middleware API
curl -sSf -o /dev/null --retry 10 --retry-connrefused https://${API_ADDR}/mdw/status
echo "Checking Internal API (dry-run)"
# Internal API (dry-run)
EXT_STATUS=$(curl -sS -o /dev/null --retry 10 --retry-connrefused \
-X POST -H 'Content-type: application/json' -d '{"txs": []}' \
-w "%{http_code}" \
https://${API_ADDR}/v2/debug/transactions/dry-run)
[ $EXT_STATUS -eq 200 ]
echo "Checking State Channes WebScoket API"
# State Channels WebSocket API
WS_STATUS=$(curl -sS -o /dev/null --retry 10 --retry-connrefused \
-w "%{http_code}" \
https://${API_ADDR}/channel?role=initiator)
[ $WS_STATUS -eq 426 ]
echo "Checking Middleware WebSocket API"
# Middleware WebSocket API
WS_STATUS=$(curl -sS -I -o /dev/null --retry 10 --retry-connrefused \
-w "%{http_code}" \
https://${API_ADDR}/mdw/websocket)
[ $WS_STATUS -eq 426 ]
| true
|
bf1d129f0b827ad87eec43560f6f7745161b9fa0
|
Shell
|
wesleysan7os/sysadmin
|
/roteiro03/script8.sh
|
UTF-8
| 196
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
if [ -f $1 ]; then
echo "Arquivo encontrado"
else
echo "Arquivo nao encontrado"
fi
if [ -d $1 ]; then
echo "Diretorio existe"
else
echo "Diretorio nao existe ou nao encontrado"
fi
| true
|
14380aea106b3cf002dd506b494c773545154b0c
|
Shell
|
pbespechnyi/os-diy-jetty
|
/start.sh
|
UTF-8
| 332
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/sh
cd ${OPENSHIFT_DATA_DIR}jetty
if [ -L webapps ]; then
rm webapps
else
rm -rf webapps
fi
ln -s ${OPENSHIFT_REPO_DIR}deployments webapps
CMD="java -Xms384m -Xmx412m -jar start.jar -Djetty.host=$OPENSHIFT_DIY_IP -Djetty.port=$OPENSHIFT_DIY_PORT"
nohup $CMD > $OPENSHIFT_LOG_DIR/server.log 2>&1 &
echo $! > jetty.pid
| true
|
d7440ad4690d976b83ecb9177199c065e019b5a6
|
Shell
|
z7n/shell_scripts
|
/dotfiles_setup.sh
|
UTF-8
| 111
| 2.8125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Copy dotfiles to home directory
for f in $(ls -1 dotfiles/*); do cp $f ${f/dotfiles\//~/.}; done
| true
|
8c012330ab5aa80aa25c7dd8c015c53eb00df543
|
Shell
|
hoernsten/lxd
|
/modules/create
|
UTF-8
| 4,771
| 4.09375
| 4
|
[] |
no_license
|
#!/bin/bash
##################################
##### Create a new container #####
##################################
# Prompt for container configuration details
read -p "Container name [default=random]: " container
read -p "Container image [default=ubuntu:20.04]: " image
read -p "Container disk quota (e.g. 500MB or 10GB) [default=none]: " quota
read -p "Container disk priority (1-10) [default=5]: " disk
read -p "Container memory limit (e.g. 512MB or 4GB) [default=512MB]: " memory
read -p "Container CPU core count limit (1-$(nproc)) [default=1]: " cpu
read -p "Container CPU priority (1-10) [default=5]: " priority
read -p "Container profile [default=default]: " profile
read -p "Hard or soft memory limit (hard/soft) [default=soft]: " enforce
read -p "Allow swap (yes/no) [default=yes]: " swap
read -p "Autostart container at boot time (yes/no) [default=no]: " autostart
read -p "Enter storage pool [default=default]: " pool
# Revert to the default quota if no input is provided
if [ -z $quota ]; then
echo "No quota selected"
# Check if the value provided is a valid number that does not start with "0" and ends with either "MB" or "GB"
elif [[ ! $quota =~ ^([1-9]{1}|[1-9][0-9]{1,})(MB|GB)$ ]]; then
echo "Error: Invalid quota"
exit
fi
# Revert to the default disk priority level if no input is provided
if [ -z $disk ]; then
priority=5
# Check if the value provided is within the 1-10 range
elif [[ ! $disk =~ ^([1-9]|10)$ ]]; then
echo "Error: Invalid disk priority value"
exit
fi
# Revert to the default memory limit if no input is provided
if [ -z $memory ]; then
memory=512MB
# Check if the value provided is valid
elif [[ ! $memory =~ ^[0-9]{1,}(MB|GB)$ ]]; then
echo "Error: Invalid memory limit"
exit
fi
# Revert to a soft memory limit unless a hard limit is specified
if [[ $enforce != "hard" || ! $enforce ]]; then
enforce=soft
fi
# Revert to the default value unless otherwise specified
if [[ $swap != "no" || ! $swap ]]; then
swap=true
else
swap=false
fi
# Revert to the default cpu count if no input is provided
if [ -z $cpu ]; then
cpu=1
# Check if the value provided is a valid number that does not start with "0"
elif [[ $cpu =~ ^([1-9]{1}|[1-9][0-9]{1,})$ ]]; then
# Check if there are enough cores available
if (( $cpu <= $(nproc --all) )); then
:
else
echo "Error: Not enough cores"
exit
fi
else
echo "Error: Invalid CPU core value"
exit
fi
# Revert to the default CPU priority level if no input is provided
if [ -z $priority ]; then
priority=5
# Check if the value provided is within the 1-10 range
elif [[ ! $priority =~ ^([1-9]|10)$ ]]; then
echo "Error: Invalid CPU priority value"
exit
fi
# Revert to the default autostart option if autostart is not desired
if [[ $autostart == "yes" ]]; then
autostart=true
else
autostart=false
fi
# Generate a randomized container name if none was provided
if [ -z $container ]; then
container=$(cat /dev/urandom | tr -dc 'a-z' | fold -w 12 | head -n 1)
fi
# Check if the container already exists
if lxc list --columns="n" | grep -oq $container; then
echo "Error: Container $container already exists"
exit
fi
# Revert to the default pool if no input is provided
if [ -z $pool ]; then
pool=default
fi
# Check if the storage pool exists
if lxc storage show $pool 2>&1 | grep -oq 'Error: not found'; then
echo "Error: Storage pool does not exist"
exit
fi
# Revert to the default profile if no input is provided
if [ -z $profile ]; then
profile=default
fi
# Check if the container profile exists
if lxc profile show $profile 2>&1 | grep -oq 'Error: not found'; then
echo "Error: Container profile does not exist"
exit
fi
# Revert to default image if no input is provided
if [ -z $image ]; then
image=ubuntu:20.04
fi
# Check if the image exists locally and create the container
if lxc image list local:$image --columns="l" | grep -q $image; then
lxc init -p $profile local:$image $container
else
echo "Error: Image could not be found"
exit
fi
# Apply resource constraints
if lxc list --columns="n" | grep -oq $container; then
lxc config set $container limits.memory $memory
lxc config set $container limits.memory.enforce $enforce
lxc config set $container limits.memory.swap $swap
lxc config set $container limits.cpu $cpu
lxc config set $container limits.cpu.priority $priority
lxc config set $container boot.autostart $autostart
if [ ! -z $quota ]; then
lxc config device add $container root disk path=/ pool=$pool size=$quota 1>/dev/null
fi
else
echo "Error: Container creation failed"
exit
fi
# Start the container
echo "Starting $container"
lxc start $container
| true
|
6445ba0f385c2e87abc13c282488aebc3fef0406
|
Shell
|
y2kbugger/bash_config
|
/.bashrc
|
UTF-8
| 2,845
| 3.28125
| 3
|
[] |
no_license
|
#
# ~/.bashrc
#
# check if on windows
if [ -d /c ]; then
win=true
else
win=false
fi
# If not running interactively, don't do anything
# old ways:
# [[ "$-" in *i* ]] && return
# [[ "${-#*i}" != "$-" ]] && return
# more robust interactivity checker in case "$-" is set to more than just "i".
# See
# https://www.gnu.org/software/bash/manual/html_node/Is-this-Shell-Interactive_003f.html
# and http://unix.stackexchange.com/questions/129231/what-does-i-mean
[[ $- != *i* ]] && return
# make prompt short on android
case ":$PATH:" in
*com.termux*) PS1='[@pix2 \W]\$ ';;
*) PS1='[\u@\h \W]
\$ ';;
esac
# Allow symlinks, requires enabling developer mode in windows 10
if [ ${win} = true ]; then
export MSYS=winsymlinks:nativestrict
fi
# don't let pip work without using virtualenvs
if [ ${win} = false ]; then
export PIP_REQUIRE_VIRTUALENV=true
fi
# allow to overcome the above with pipdo !!
# todo check for conda env??? instead of win check???
function pipdo {
case "$@" in
*install*--user*)
PIP_REQUIRE_VIRTUALENV=false $@
;;
*install*)
echo 'Remember to `pip install --user`'
;;
*)
PIP_REQUIRE_VIRTUALENV=false $@
;;
esac
}
# preserving bash history across multiple ttys
shopt -s histappend
PROMPT_COMMAND="history -a;$PROMPT_COMMAND"
HISTFILESIZE=-1
HISTSIZE=-1
HISTFILE=~/.bash_eternal_history
# blacklist some commands from history
HISTCONTROL=ignoredups
HISTIGNORE=$'[ \t]*' # starting with tab or spac
HISTIGNORE+=':&' # bg procs
HISTIGNORE+=':[fb]g'
HISTIGNORE+=':exit'
HISTIGNORE+=':q'
HISTIGNORE+=':ls'
GIT_CEILING_DIRECTORIES=$HOME
# aliases
if [ -r ~/.bash_aliases ]; then
. ~/.bash_aliases
fi
if [ -r ~/.bash_aliases_win ]; then
. ~/.bash_aliases_win
fi
if [ -r ~/.bash_aliases_home ]; then
. ~/.bash_aliases_home
fi
# colors for ls
if [ -r ~/.dir_colors ]; then
eval "$(dircolors --bourne-shell ~/.dir_colors)"
fi
# enable layered glob echo ~/.vim/**/*.vim
shopt -s globstar
# git completion and cute PS1 DECO
if [ ${win} = true ]; then
# . /etc/profile.d/git-prompt.sh
# code below is a modified version of the script above
PS1='\[\033]0;\w\007\]' # set window title y2k
PS1="$PS1"'\n' # new line
PS1="$PS1"'\[\033[32m\]' # change to green
PS1="$PS1"'\u@\h ' # user@host<space>
PS1="$PS1"'\[\033[33m\]' # change to brownish yellow
PS1="$PS1"'\w' # current working directory
PS1="$PS1"'\[\033[0m\]' # reset color
PS1="$PS1"'\n' # new line
PS1="$PS1"'$ ' # prompt: always $
MSYS2_PS1="$PS1" # for detection by MSYS2 SDK's bash.basrc
fi
if [ ${win} = true ]; then
eval "$('/c/ProgramData/CooperConda/Scripts/conda.exe' 'shell.bash' 'hook')"
fi
| true
|
89845d25559a0bcab4db7fc30d019b8d47b504fe
|
Shell
|
mtecer/consul
|
/terraform/openstack/templates/bootstrap-ansible.sh.tpl
|
UTF-8
| 1,700
| 3.125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash -x
ANSIBLE_USER="${ansible_user}"
ANSIBLE_HOME="${ansible_home}"
GITREPO="${consul_ansible_repo}"
source /etc/profile.d/proxy.sh
cat << HERE > $${ANSIBLE_HOME}/.ssh/id_rsa
${ssh_private_key}
HERE
cat << HERE > $${ANSIBLE_HOME}/.ssh/config
Host *
ServerAliveInterval 60
StrictHostKeyChecking no
UserKnownHostsFile=/dev/null
User $${ANSIBLE_USER}
IdentityFile $${ANSIBLE_HOME}/.ssh/id_rsa
HERE
chown $${ANSIBLE_USER}.$${ANSIBLE_USER} $${ANSIBLE_HOME}/.ssh/id_rsa
chown $${ANSIBLE_USER}.$${ANSIBLE_USER} $${ANSIBLE_HOME}/.ssh/config
chmod 0400 $${ANSIBLE_HOME}/.ssh/id_rsa
chmod 0400 $${ANSIBLE_HOME}/.ssh/config
if ! rpm -q --quiet git; then
echo "Installing git"
yum -y install git
else
echo "git is already installed"
fi
if [ ! -d $${ANSIBLE_HOME}/ansible ]; then
echo "Configuring ansible"
git clone $${GITREPO} $${ANSIBLE_HOME}/ansible
else
echo "Updating git repo in /ansible"
cd $${ANSIBLE_HOME}/ansible && git pull
fi
if [ ! -d /ansible ]; then
ln -s $${ANSIBLE_HOME}/ansible/ansible /ansible
chown -R $${ANSIBLE_USER}.$${ANSIBLE_USER} $${ANSIBLE_HOME}/ansible
fi
if ! rpm -q --quiet epel-release ansible; then
echo "Installing ansible"
rpm --import https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7
yum -y install epel-release
yum -y install ansible net-tools bind-utils
else
echo "Ansible is already installed"
fi
# ln -f -s /home/opc/hosts /ansible/environments/dev/hosts
# cd /ansible
# ansible-galaxy install -r /ansible/requirements.yaml --roles-path /ansible/roles
# # ansible -m ping all
# # ansible-playbook playbook.yaml
# # consul members
# # consul operator raft -list-peers
| true
|
5a4caf40f67c74afac1dfd9cb462be01382ae517
|
Shell
|
ClementBonnefont/PGE2019
|
/Corbeau/tiago_pge/src/pal_navigation_sm/scripts/cp_pose_to_home.sh
|
UTF-8
| 462
| 3.953125
| 4
|
[] |
no_license
|
#! /bin/sh
#
# Copies the pose to $HOME/.pal; creates the target folder if it doesn't exist.
TARGET=$HOME/.pal
# Ensure target directory exists
if [ ! -d "$TARGET" ]; then
echo "Warning: Target path $TARGET doesn't exist. Creating it."
mkdir -p $TARGET
if [ $? -ne 0 ]; then
echo "Error: Target path $TARGET couldn't be created."
exit 3
fi
fi
# Copy pose target folder
cp `rospack find pal_navigation_sm`/config/pose.yaml $TARGET
echo "Done."
| true
|
bd68a22e717658f283fca174e8db8e82b8c4122f
|
Shell
|
jljox/dotfiles
|
/bin/fetch_git_repos.sh
|
UTF-8
| 177
| 3.390625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
GIT_DIRS=`find . -type d -name .git`
wd=`pwd`
for gitdir in $GIT_DIRS
do
dir=`dirname $gitdir`
echo "cd into $dir"
cd $dir && git fetch; cd $wd
done
| true
|
b56e40166e74911d8e81f4c86cdf803137b472f5
|
Shell
|
zybreak/nodyn
|
/support/loop.sh
|
UTF-8
| 452
| 3.21875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
#MVN=/usr/local/bin/mvn
if [ "$MVN" = '' ] ; then
MVN=mvn
fi
OUTFILE=./tmp/build
TASK="install"
# command line interpolation is broken, I think
# OPTS='-Dtest.pattern="**/netPauseSpec.js"'
if [ ! -e ./tmp ] ; then
mkdir tmp
fi
for i in {1..100}
do
#${MVN} ${TASK} ${OPTS} > ${OUTFILE}-${i}.log 2>&1
#echo ${MVN} ${TASK} -Dtest.pattern="**/clusterSpec.js"
${MVN} ${TASK} -Dtest.pattern="**/clusterSpec.js" 2>&1 | tee -a log
done
| true
|
3289bf5c07bffa10dc2bad35fdf517d3f1634ed8
|
Shell
|
patmorin/bin
|
/texbundle
|
UTF-8
| 969
| 4.09375
| 4
|
[] |
no_license
|
#!/bin/bash -e
# Bundle the files used by pdflatex into a single file
r=${1%.tex}
if [[ ! -e $r.tex ]]; then
echo "File \"$r.tex\" does not exist - quitting!"
echo "Usage: $0 <texfile>"
exit -1
fi
# pdflatex the document and extract the file list from the log file
latexmk -pdf "$r"
fl=`mawk 'BEGIN {record=0;}
/^ *\*File List\* *$/ {record=1;}
/ *\*\*\*+ *$/ {record=0;}
{ if (record && record++ >= 2) { print $1; } }' $r.log | sort | uniq`
echo "File List is $fl"
echo "END FILE LIST"
# take only files in the current directory
for f in $fl; do
if [[ -e ./$f ]]; then
fp="$fp $f"
fi
done
for f in *.bib; do
echo "Adding bibtex file $f"
fp="$f $fp"
done
fp="$r.tex $fp"
fp="$r.pdf $fp"
echo "!!!!NOTE: Your latex file must include the \listfiles command!!!!"
# make an archive of everything
echo "Creating TGZ archive..."
tar czvf "$r.tgz" $fp | sed 's/^ */ /'
echo "Creating ZIP archive..."
zip "$r.zip" $fp | sed 's/^ */ /'
| true
|
0015f2deee238b18c9378b31b2a9d96f6ba84e2e
|
Shell
|
KoalaV2/faster-ducky-encoder
|
/encoder
|
UTF-8
| 790
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
#Asks if there are any other storage devices plugged in since if there was the rubber ducky would not be able to get mounted on /dev/sdb1
echo "Do you have any other storage deviced plugged in expect the rubber ducky?"
read storage
if [ $storage == "no" ];then
udisksctl mount -b /dev/sdb1
case $? in
0) echo "Rubber ducky mounted properly!";;
*) echo "Rubber ducky did not monunt properly! :/" && exit;;
esac
echo 'What is the file you want to encode?'
ls <"where you have all your scripts">
read file
java -jar <"directory for duckencoder"> -i <"directory for your folder where you keep you have all your ducky scripts">/$file -o <"usuall usb mounting directory"/inject.bin
umount /dev/sdb1
else
echo 'Unplug it and run the script again, or do it manually!'
fi
| true
|
30afc643a271ae8a1ab334ec48994e940e26e4ce
|
Shell
|
joseaguardia/descargas2020_auto
|
/info_themoviedb.sh
|
UTF-8
| 1,411
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
#Pasándole una serie y temporada, devuelve en CSV nombre completo,número de capítulos, url_poster, nombre_poster
SERIE=$1
TEMPORADA=$2
APIKEY="1234567890ABCDEFGHIJKLMNO"
POSTER_SIZE=500 #Valores: 92, 54, 185, 342, 500, 780
POSTER_URL_BASE="http://image.tmdb.org/t/p/w$POSTER_SIZE/"
#obtener ID de la serie:
#Si el nombre puede devolver más de un resultado, ampliar los términos separando con %20. Pero por si acaso dejo solo el primer resultado
curl -s --request GET --url "https://api.themoviedb.org/3/search/tv?query=$SERIE&season_number=$TEMPORADA&language=es-ES&api_key=$APIKEY" --header 'content-type: application/json' --data '{}' | tr ',' "\n" > /opt/smartmirrorPI/descargas2020/api_buscar.log
ID=`cat /opt/smartmirrorPI/descargas2020/api_buscar.log | grep '"id":' | head -1 | cut -d ':' -f2`
NOMBRE=`cat /opt/smartmirrorPI/descargas2020/api_buscar.log | grep '"name":' | head -1 | cut -d ':' -f2 | tr -d '(' | tr -d ')' | tr [:space:] '_' `
POSTER=`cat /opt/smartmirrorPI/descargas2020/api_buscar.log | grep "poster_path" | head -1 | cut -d':' -f2 | tr -d '"' | cut -d'/' -f2 | tr -d '}' | tr -d ']'`
#número de episodios
EPISODIOS=`curl -s --request GET --url "https://api.themoviedb.org/3/tv/$ID/season/$2?language=es-ES&api_key=$APIKEY" --header 'content-type: application/json' --data '{}' | grep -o episode_number | wc -l`
echo "$NOMBRE $EPISODIOS ${POSTER_URL_BASE}${POSTER} ${POSTER}"
| true
|
dda5b062606413c1eac79eaad33db64340d562e1
|
Shell
|
joshuaeveleth/places-website
|
/dbTools/refreshDevDb.sh
|
UTF-8
| 1,046
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
USER=`whoami`
api_db=dev_places_api
pgs_db=dev_places_pgs
backupDate="$(date +'%Y%m%d%H%M')"
PROD_api_db=places_api
PROD_pgs_db=places_pgs
if [ $USER = "postgres" ]; then
echo "**** backing up the dev databases ****"
pg_dump -Fc "$api_db" > "./backups/api_backup_$backupDate"
pg_dump -Fc "$pgs_db" > "./backups/pgs_backup_$backupDate"
echo "**** backup up the production databases ****"
pg_dump -Fc "$PROD_api_db" > "./backups/PROD_api_backup_$backupDate"
pg_dump -Fc "$PROD_pgs_db" > "./backups/PROD_pgs_backup_$backupDate"
echo "**** Dropping and recreating the dev databases ****"
psql -c "DROP DATABASE IF EXISTS $api_db;"
psql -c "CREATE DATABASE $api_db;"
psql -c "DROP DATABASE IF EXISTS $pgs_db;"
psql -c "CREATE DATABASE $pgs_db;"
echo "**** Loading the former prod db into dev ****"
pg_restore -d $api_db "./backups/PROD_api_backup_$backupDate"
pg_restore -d $pgs_db "./backups/PROD_pgs_backup_$backupDate"
echo "**** Done! ***"
else
echo You must run this from the postgres users
fi
| true
|
430590a75832be88444d5ec52a5948bea3ccf9e3
|
Shell
|
yast/aytests-tests
|
/aytests/kdump_default.sh
|
UTF-8
| 282
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/bash
set -e -x
# Taking proposed values if <crash_kernel>, <general> have not been defined in AutoYaST configuration file. (bnc#997448)
grep "crashkernel=[0-9]" /boot/grub2/grub.cfg
grep "<KDUMP_KEEP_OLD_DUMPS>5</KDUMP_KEEP_OLD_DUMPS>" /root/autoinst.xml
echo "AUTOYAST OK"
| true
|
3b75f2749071c890308be16044af75e3154d5bb8
|
Shell
|
jrzingel/galactic-builder
|
/update_servers.sh
|
UTF-8
| 665
| 3.28125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
echo "Updating Remotes..."
#git remote
git pull origin # Pull origin to stay up to date
PHR="Release Alpha v"
NUM1=6 # Set new main version number here
NUM2=2 # And set version minor number here
VER=$NUM1.$NUM2
MSG=$PHR$VER
#sed -e "206s/$NUM1.$TMP/$NUM1.$NUM2/" templates/index.html # Update the new version num
#mv templates/index_tmp.html templates/index.html
echo $MSG
git commit --all --message "$MSG" # Commit all the changed code
git push origin # Update bitbucket
git push heroku # Update heroku
git push git # Update git
#git push AAmazon # Update amazon server (legacy)
#git push AHeroku # Update amazon / heroku sever (legacy)
| true
|
dba90ff17e0c5a0621a51eb8e998828432851069
|
Shell
|
B-Tsai/case_autorun
|
/case_autorun.sh
|
UTF-8
| 1,537
| 3.5
| 4
|
[] |
no_license
|
#!/bin/bash
#
#
#
#
#
source ./modules/job.sh
source ./settings.sh
`of240`
# Submit the job for the first time
echo "First time submitting the job ..."
job_submit $$
sleep 5
retry_times=0
latest_time_step_remote=0
while [ $retry_times -le 5 ]
do
# Start checking job status
echo "Check job status ..."
job_check $$
# If job does no exit, then do more checks
if [[ $job_stat == 0 ]] ; then
# Check if all the simulation has been done
if [[ $end_time_step != $latest_time_step_remote ]] ; then
# If retry times more than 4 times, remove the last time step data (might be broken)
if [[ $retry_times == 4 ]] ; then
data_remove_last $$
fi
echo "Job does not in queue, re-sumbiting ..."
job_submit $$
retry_times=$(($retry_times+1))
sleep 5
else
# if reach the end time step, end the loop
break
fi
else
echo "Job is in queue, wait 60 sec ..."
sleep 60
retry_times=0
# Download data from remote host to local host
echo "Downloading data from remote host ..."
data_download $$
# Remove data on remote host
echo "Removing data from remote host ..."
data_remove $$
# Reconstruct OpenFOAM data
data_reconstructOpenFOAM $$
fi
done
if [[ $retry_times > 5 ]] ; then
echo "Retry more than 5 times, process stops!"
else
echo "Job has finished!"
fi
| true
|
13261ae637ef1a1b25585edf54ca5ffd93dfcf24
|
Shell
|
tyru/gittools
|
/git-push-backup
|
UTF-8
| 590
| 3.921875
| 4
|
[] |
no_license
|
#!/bin/sh
dir=
remote="backup"
branch="master"
usage() {
cat <<EOM
git-push-backup - Push branch given remote
NOTE: now I guess this is totally useless
because 'git push --all' does the job.
Usage: git-push-backup {repos_dir} [{remote} [{branch}]]
EOM
exit 1
}
for varname in dir remote branch; do
if [ $# -ne 0 ]; then
eval "$varname=\"$1\""
shift
else
break
fi
done
[ -z "$dir" ] && usage
cd "$dir" || exit $?
for repo in `ls`; do
# TODO: log forced-update?
(cd "$repo" && git push -f "$remote" "$branch" >/dev/null 2>&1)
done
| true
|
11ceb9dbdfd67346ba6a755320893272b606d88a
|
Shell
|
mklement0/awf
|
/test/finds root of all installed workflows
|
UTF-8
| 928
| 3.796875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# ---
# IMPORTANT: Use the following statement at the TOP OF EVERY TEST SCRIPT
# to ensure that this package's 'bin/' subfolder is added to the path so that
# this package's CLIs can be invoked by their mere filename in the rest
# of the script.
# ---
PATH=${PWD%%/test*}/bin:$PATH
# Helper function for error reporting.
die() { (( $# > 0 )) && echo "ERROR: $*" >&2; exit 1; }
exists() {
[[ -d "$1" || -L "$1" ]]
}
# NOTE: Unlike with the other tests, we do use Alfred's actual folder containing
# the installed workflows here, as a sanity check that awf finds the
# true location (in the other tests, this location is overriden with
# a test location).
wfRootDir=$(awf which '/') || die "Failed to determine Alfred 2's root folder of all installed workflows."
exists "$wfRootDir/user.workflow."* || die "No workflows found in '$wfRootDir'"
exit 0
| true
|
9c29d0769ac5347d894bcc371248c28aeb595999
|
Shell
|
liuxuanhai/CGI_Web
|
/trunk/src/server/token_server/dist/monitor.sh
|
UTF-8
| 211
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
CTL=/home/dev/hoosho/server/token_server/ctl_hoosho_token_server.sh
SERVER_BIN=hoosho.token_server
pn=`ps ax | awk '{print $5}' | grep $SERVER_BIN | wc -l`
if [ $pn -eq 0 ] ; then
$CTL start
fi
| true
|
73417821cf9051058c069a65e41b3e69ed2872dc
|
Shell
|
gpfreitas/gpfports
|
/bashreduce/install_bashreduce.sh
|
UTF-8
| 312
| 2.78125
| 3
|
[] |
no_license
|
#!/usr/bin/env sh
PREFIX="${HOME}"/local
cd "$PREFIX"/src \
&& git clone https://github.com/erikfrey/bashreduce.git \
&& cd "$PREFIX"/src/bashreduce \
&& sed -i .bak "s:/usr/local/bin:$PREFIX/bin:g" brutils/Makefile \
&& cd "$PREFIX"/src/bashreduce/brutils \
&& make \
&& make install
| true
|
8dfa8b549a29f123617da7e9053538e447758383
|
Shell
|
PiMaker/Dotfiles
|
/i3-gaps/.config/i3/switch_mon.sh
|
UTF-8
| 2,780
| 3.796875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
#set -x
# format: array[] of:
# {
# "num": 2,
# "name": "2",
# "visible": true,
# "focused": true,
# "rect": {
# "x": 1920,
# "y": 25,
# "width": 3440,
# "height": 1415
# },
# "output": "HDMI-A-0",
# "urgent": false
# }
workspaces=$(i3-msg -t get_workspaces | jq '.[] | select(.visible)')
# move a workspace to another screen, input format is as above
function move() {
from=$(echo "$1" | jq '.num')
to=$(echo "$2" | jq '.output')
i3-msg "[workspace=${from}] move workspace to output ${to}"
}
# get currently focused/active workspace
current_ws=$(echo "$workspaces" | jq 'select(.focused)')
current_x=$(echo "$current_ws" | jq '.rect.x')
current_y=$(echo "$current_ws" | jq '.rect.y')
current_w=$(echo "$current_ws" | jq '.rect.width')
current_h=$(echo "$current_ws" | jq '.rect.height')
alt_ws="none"
# get current mouse coordinates relative to workspace...
eval "$(xdotool getmouselocation --shell --prefix "MOUSE_")"
declare -i mouse_x_rel
declare -i mouse_y_rel
# ...only if the mouse is in the current workspace at all though
if (( MOUSE_X >= current_x && MOUSE_X < current_x + current_w && \
MOUSE_Y >= current_y && MOUSE_Y < current_y + current_h )); then
let "mouse_x_rel = MOUSE_X - current_x"
let "mouse_y_rel = MOUSE_Y - current_y"
else
# otherwise indicate the mouse was not on screen
let "mouse_x_rel = -1"
fi
# switch on input movement direction provided to script
case $1 in
"left" )
alt_ws=$(echo "$workspaces" \
| jq "select(.rect.x < $current_x)" \
| jq -s 'max_by(.rect.x)')
;;
"right" )
alt_ws=$(echo "$workspaces" \
| jq "select(.rect.x > $current_x)" \
| jq -s 'min_by(.rect.x)')
;;
*)
echo "usage: $0 (left|right)"
exit 1
;;
esac
if [ "$alt_ws" = "none" ] || [ "$alt_ws" = "" ]; then
# no screens found, exit with status code 2
exit 2
fi
# alright, we have two screens to swap!
move "$current_ws" "$alt_ws"
move "$alt_ws" "$current_ws"
# focus both workspaces once to keep them in foreground
i3-msg "workspace $(echo "$alt_ws" | jq '.num')"
i3-msg "workspace $(echo "$current_ws" | jq '.num')"
# move mouse cursor along with $current_ws, either with relative
# position calculated above or by falling back to center of screen
if (( mouse_x_rel < 0 )); then
xdotool mousemove \
$(echo "$alt_ws" | jq '.rect.x + .rect.width / 2') \
$(echo "$alt_ws" | jq '.rect.y + .rect.height / 2')
else
xdotool mousemove \
$(echo "$alt_ws" \
| jq ".rect.x + .rect.width*(${mouse_x_rel}/${current_w})") \
$(echo "$alt_ws" \
| jq ".rect.y + .rect.height*(${mouse_y_rel}/${current_h})")
fi
| true
|
3ec44929de6de38e2208eb128cd7f36381246731
|
Shell
|
axfcampos/.terminal.configs
|
/bash_profile
|
UTF-8
| 2,126
| 3.234375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
source ~/.profile
#####################################################
##### BASH_IT
#####################################################
# Path to the bash it configuration
export BASH_IT=$HOME/.bash_it
# Lock and Load a custom theme file
# location /.bash_it/themes/
export BASH_IT_THEME='minimal'
# Load Bash It
source $BASH_IT/bash_it.sh
#####################################################
##### SHOW/HIDE Hidden files in Finder.app
#####################################################
# Show .files
alias showFiles='defaults write com.apple.finder AppleShowAllFiles YES; killall Finder /System/Library/CoreServices/Finder.app'
# Hide .files
alias hideFiles='defaults write com.apple.finder AppleShowAllFiles NO; killall Finder /System/Library/CoreServices/Finder.app'
#####################################################
##### Simple Configs
#####################################################
alias ls='ls -G -h'
alias tmux='tmux -2'
# Add environment variable COCOS_CONSOLE_ROOT for cocos2d-x
export COCOS_CONSOLE_ROOT=/Users/axfcampos/devp/cocos2d-x/tools/cocos2d-console/bin
export PATH=$COCOS_CONSOLE_ROOT:$PATH
# Add environment variable NDK_ROOT for cocos2d-x
export NDK_ROOT=/Users/axfcampos/devp/android-ndk-r10
export PATH=$NDK_ROOT:$PATH
# Add environment variable ANDROID_SDK_ROOT for cocos2d-x
export ANDROID_SDK_ROOT=/Users/axfcampos/devp/android-sdk-macosx
export PATH=$ANDROID_SDK_ROOT:$PATH
export PATH=$ANDROID_SDK_ROOT/tools:$ANDROID_SDK_ROOT/platform-tools:$PATH
# Add environment variable ANT_ROOT for cocos2d-x
export ANT_ROOT=/usr/local/Cellar/ant/1.9.4/libexec/bin
export PATH=$ANT_ROOT:$PATH
# Node env
export NODE_ENV='development'
# Go env
export GOPATH=~/devp/gospace
export PATH=$PATH:$GOPATH/bin
# GIT
export EDITOR=vim
if [ -f ~/.git-completion.bash ]; then
. ~/.git-completion.bash
fi
######
#### Ruby stuff
######
if which rbenv > /dev/null; then eval "$(rbenv init -)"; fi
###### Rabbitmq-server
PATH=$PATH:/usr/local/sbin
[[ -s "$HOME/.rvm/scripts/rvm" ]] && source "$HOME/.rvm/scripts/rvm" # Load RVM into a shell session *as a function*
| true
|
5bf3acb029e8dedf61c84aeb2391f843671e1aba
|
Shell
|
orcguru/pid_monitor
|
/workload/spark/scripts/daemon_control.sh
|
UTF-8
| 571
| 3.671875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
if [ $# -lt 3 ]
then
echo "Usage: ./daemon_control.sh <TGT_HOST> <SPARK_HOME> <ACTION: START/STOP>"
exit 1
fi
TGT_HOST=$1
SPARK_HOME=$2
ACTION=$3
if [ $ACTION != "START" ] && [ $ACTION != "start" ] && [ $ACTION != "STOP" ] && [ $ACTION != "stop" ]
then
echo "ACTION should be START/STOP/start/stop"
exit 1
fi
if [ $ACTION = "STOP" ] || [ $ACTION = "stop" ]
then
ssh $TGT_HOST "cd $SPARK_HOME && ./sbin/stop-all.sh"
fi
if [ $ACTION = "START" ] || [ $ACTION = "start" ]
then
ssh $TGT_HOST "cd $SPARK_HOME && ./sbin/start-all.sh"
fi
| true
|
5cd2cf37133f13017df334dfefb3211c3580e22d
|
Shell
|
IMAGE-ET/dicom-2
|
/tools/mac_scripts/dicoml.sh
|
UTF-8
| 656
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
status="$1"
if [ $status = 'load' ]
then
echo "Loadinf dicom Agents..."
launchctl load /Library/LaunchAgents/com.dicom.job.plist
launchctl load /Library/LaunchAgents/com.dicom.day.plist
launchctl load /Library/LaunchAgents/com.dicom.del.plist
echo "List of loaded dicom agents..."
launchctl list | grep dicom
fi
if [ $status = 'unload' ]
then
echo "unloading dicom Agents..."
launchctl unload /Library/LaunchAgents/com.dicom.job.plist
launchctl unload /Library/LaunchAgents/com.dicom.day.plist
launchctl unload /Library/LaunchAgents/com.dicom.del.plist
echo "Check if are unloaded..."
launchctl list | grep dicom
fi
| true
|
adf034556add1840db17cf7bf878718fb757ef34
|
Shell
|
petronny/aur3-mirror
|
/archisthebest/PKGBUILD
|
UTF-8
| 436
| 2.84375
| 3
|
[] |
no_license
|
pkgname=archisthebest
pkgver=1.0
pkgrel=1
pkgdesc="Arch is the best!"
url="http://www.archlinux.org"
arch=('any')
license=('GPLv3')
#source=()
#md5sums=()
build() {
mkdir -p $startdir/pkg/usr/bin/
echo '#!/bin/sh' >> $startdir/pkg/usr/bin/archisthebest
echo >> $startdir/pkg/usr/bin/archisthebest
echo 'echo "Arch is the best!"' >>$startdir/pkg/usr/bin/archisthebest
chmod a+x $startdir/pkg/usr/bin/archisthebest
}
| true
|
d96e7260aff888f16dce0049410262bbedd4917c
|
Shell
|
ptptptptptpt/install
|
/openstack/deploy_openstack_kolla_toolbox.sh
|
UTF-8
| 884
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Dependencies:
#
programDir=`dirname $0`
programDir=$(readlink -f $programDir)
parentDir="$(dirname $programDir)"
programDirBaseName=$(basename $programDir)
set -o errexit
set -o nounset
set -o pipefail
set -x
## log dir
mkdir -p /var/log/stackube/openstack
chmod 777 /var/log/stackube/openstack
## kolla-toolbox
docker run -d --net host \
--name stackube_openstack_kolla_toolbox \
-v /run/:/run/:shared \
-v /dev/:/dev/:rw \
-v /etc/stackube/openstack/kolla-toolbox/:/var/lib/kolla/config_files/:ro \
-v /var/log/stackube/openstack:/var/log/kolla/:rw \
-e "KOLLA_SERVICE_NAME=kolla-toolbox" \
-e "ANSIBLE_LIBRARY=/usr/share/ansible" \
-e "ANSIBLE_NOCOLOR=1" \
-e "KOLLA_CONFIG_STRATEGY=COPY_ALWAYS" \
--restart unless-stopped \
--privileged \
kolla/centos-binary-kolla-toolbox:4.0.0
sleep 5
exit 0
| true
|
9c53426f91a8d636302babb8a6dedc9f27b15da1
|
Shell
|
davidschmitt/consul-connect-workshop
|
/steps/step16.sh
|
UTF-8
| 409
| 2.96875
| 3
|
[] |
no_license
|
#
# Confirm that Consul Connect Intentions now allow our access
#
# You can also test access by opening the following URL from
# a browser running on the same machine as this demo:
#
# http://127.0.0.3:8700/ui/dc1/services
#
curl -s http://127.0.0.3:8700/ui/dc1/services >curl.out
if [ "$?" "=" "0" ]
then
echo "Access permitted as expected"
exit 0
else
echo "Access blocked unexpectedly"
exit 1
fi
| true
|
3802fae472dc40d5dbda8c1b3bd9aff56cc7b7ff
|
Shell
|
olonho/electric-jungle-server
|
/scripts/start_all
|
UTF-8
| 289
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/sh
ROOT=/net/electricjungle/ed/ElectricJunglesServlet/scripts
chmod +x /net/electricjungle/ed/ElectricJunglesServlet/scripts/*
for i in `cat $ROOT/machines`;
do
box=`echo $i | awk -F: '{ print $1 }'`
num=`echo $i | awk -F: '{ print $2 }'`
$ROOT/startjobs $box $num
done
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.