blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
067b80c1091b48ef634257b49e073f1f5450fdb6 | Shell | serhiiKalchenko/autoScript-wp | /autoScript.sh | UTF-8 | 5,972 | 3.609375 | 4 | [] | no_license | #!/bin/bash
#DZ-6 Проверки!
# Проверка дистрибутива Linux
cat /etc/os-release | grep -w "Ubuntu" 1> /dev/null
if [ $? != 0 ]
then
echo "It's not Ubuntu! Run script only on Ubuntu!!!"
exit
# Проверка, запущен ли скрипт от имени root
else if [ "$(id -u)" != "0" ]
then
echo "Запустите скрипт от имени root!"
exit
fi
fi
# Проверка присутвует доступ к файлу /root/config.json
if [ -e /root/config.json ]
then
# Проверка: устновлен ли пакет jq; устанавливаем, если нету.
dpkg -s "jq" &> /dev/null
if [ $? != 0 ]
then
apt -y install jq
fi
# Разбор конфига /root/config.json
siteName=$(cat /root/config.json | jq -r '.sitename')
siterootDir=$(cat /root/config.json |jq -r '.siteroot_dir')
DB_userName=$(cat /root/config.json | jq -r '.db.username')
DB_password=$(cat /root/config.json | jq -r '.db.password')
DB_name=$(cat /root/config.json | jq -r '.db.name')
echo "siteName: $siteName"
echo "siterootDir: $siterootDir"
echo "DB_userName: $DB_userName"
echo "DB_password: $DB_password"
else
echo "Файла '/root/config.json' не существует!"
exit
fi
# Проверка установленного пакета apache2
dpkg -s "apache2" &> /dev/null
if [ $? != 0 ]
then
# Установка Apache2
apt update && sudo apt -y install apache2
fi
# Проверка установленного пакета mysql-server
dpkg -s "mysql-server" &> /dev/null
if [ $? != 0 ]
then
# Установка MySQL Server
apt -y install mysql-server
fi
dpkg -s "php7.2" &> /dev/null
# Проверка установленного PHP
if [ $? != 0 ]
then
# Установка PHP
apt -y install php7.2
apt -y install php7.2-mysql # модуль для MySQL
fi
# Перезагружаем веб-сервер
systemctl reload apache2.service
# Очищаем каталок перед копированием туда WordPress
rm -rf $siterootDir/*
# Установка WordPress
wget https://ru.wordpress.org/latest-ru_RU.tar.gz -P $siterootDir
# Распаковка WordPress
tar -xzvf $siterootDir/latest-ru_RU.tar.gz -C $siterootDir
# Перемещаем все файлы из папки WordPress прямо в корень папки $siterootDir
mv $siterootDir/wordpress/* $siterootDir
# Удаляем пустую папку WordPress
rmdir $siterootDir/wordpress
##DZ-3
# Удаление БД
#mysql -e "DROP DATABASE $DB_name;"
# Удаление пользователя
#mysql -e "DROP USER '$DB_userName'@'localhost';"
# Создание БД в MySQL
mysql -e "CREATE DATABASE $DB_name"
# Создание нового пользователя БД MySQL
mysql -e "CREATE USER '$DB_userName'@'localhost' IDENTIFIED BY '$DB_password';"
# Предоставить права для этого пользователя БД
mysql -e "GRANT ALL PRIVILEGES ON $DB_name.* TO '$DB_userName'@'localhost' WITH GRANT OPTION;"
##DZ-4
# Настройка Wordpress.
# Создаем конфиг WordPress
# Переходим в корневую папку. Там у нас уже конфиги WordPress
cd $siterootDir
cp wp-config-sample.php wp-config.php
# Правим конфиг (wp-config.php) Wordpress. Замена строк в конфиге
# /** Имя базы данных для WordPress */
sed -i "s/.*database_name_here.*/define( 'DB_NAME', '$DB_name' );/" wp-config.php
# /** Имя пользователя MySQL */
sed -i "s/.*username_here.*/define( 'DB_USER', '$DB_userName' );/" wp-config.php
# /** Пароль (пользователя) к базе данных MySQL */
sed -i "s/.*password_here.*/define( 'DB_PASSWORD', '$DB_password' );/" wp-config.php
# /** Имя сервера MySQL */
sed -i "s/.*localhost.*/define( 'DB_HOST', 'localhost' );/" wp-config.php
#############################################################################
## Настройка конфигов Apache2
# Формирование vhost.conf
ServerAdmin="$DB_userName@$siteName"
sed -i "s!ServerAdmin!ServerAdmin $ServerAdmin!" /root/vhost.conf
DocumentRoot="DocumentRoot $siterootDir"
sed -i "s!DocumentRoot!$DocumentRoot!" /root/vhost.conf
ServerName="ServerName $siteName"
sed -i "s!ServerName!$ServerName!" /root/vhost.conf
ServerAlias="ServerAlias www.$siteName"
sed -i "s!ServerAlias!$ServerAlias!" /root/vhost.conf
Directory="<Directory $siterootDir>"
sed -i "s!<Directory>!$Directory!" /root/vhost.conf
ErrorLog="$siterootDir/error.log"
sed -i "s!ErrorLog!ErrorLog $ErrorLog!" /root/vhost.conf
CustomLog="$siterootDir/access.log combined"
sed -i "s!CustomLog!CustomLog $CustomLog!" /root/vhost.conf
# Закидываем сформированный vhost.conf в /etc/apache2/sites-available
cp /root/vhost.conf /etc/apache2/sites-available
# Активируем vhost.conf
a2ensite vhost.conf
# Деактивируем старый конфиг
a2dissite 000-default.conf
systemctl is-active apache2.service --quiet
if [ $? = 0 ]
then
# Перезапускаем apache2.service
systemctl reload apache2.service
else
# Запускаем Apache2
systemctl start apache2.service
fi
# Добавляем DNS-запись
echo "127.0.0.10 $siteName www.$siteName" >> /etc/hosts
#############################################################################
##DZ-5
# Резервные копии
# Разбор конфига /root/config.json
backupEnable=$(cat /root/config.json | jq -r '.backup_enable')
# Нужно ли делать резервные копии?
if [ "$backupEnable" == "1" ]
then
#Запускаем скрипт по добавлению резервного копирования в планировщик
/root/cron.sh
else
echo "Резервное копирование отключено."
fi
| true |
2d08e25d295184f66bcbc1a105935b0dd44ff7e8 | Shell | celsodevelop/trybe-exercises | /fundamentals/bloco_1/dia_5/exercicio6.sh | UTF-8 | 348 | 3.890625 | 4 | [] | no_license | # /bin/bash
if [ -e $1 ]
then
if [ -d $1 ]
then
echo "The path $1 is for a directory"
elif [ -f $1 ]
then
echo "The path $1 is for a file"
else
echo "The path $1 is for another type of file"
fi
echo $'\n More information \n'
echo "$(ls -la $1 | tail -n 5)"
else
echo "The path $1 is invalid"
fi | true |
093f18178db38141b69205d4a4387e4dc2602336 | Shell | Kamigami55/dotfiles | /uninstall.sh | UTF-8 | 578 | 4 | 4 | [] | no_license | #!/bin/bash
# ##########
# uninstall.sh
#
# Uninstall my dotfiles
#
# It will do the following things:
# - Remove soft links to my dotfiles
# ##########
# Import configurations from config.sh
source config.sh
# Decode linkFiles array from config.sh (HACK)
oIFS=$IFS
IFS=$array_separator
linkFiles=( $linkFilesArray )
IFS=$oIFS
# Remove soft links to dotfiles
for file in "${linkFiles[@]}"
do
target="$HOME/$file"
if [ -e $target ]; then
# Remove soft link
unlink $target
echo "Remove soft link: '$target'"
fi
done
echo "Succesfully uninstall dotfiles"
| true |
66a01a91172e9c74d3f3eb478e0878d0ea26135f | Shell | Nauendorf/scantekctl | /modules/scantek-diag | UTF-8 | 2,898 | 4.15625 | 4 | [] | no_license | #!/bin/bash
IS_PARENT=`scantek-config is-parent`
# Diagnostics wrapper script
if [[ "$#" -eq 0 || "$1" == '--help' || "$1" == '-?' ]]; then
echo "Usage: scantek diag [TYPE].."
echo
echo "Diagnostic types available:"
echo " touchscreen Checks to see if the touchscreen is disconnected"
echo " services Checks to see if the required native services are running"
echo " temp Displays the temperature of the CPU cores"
echo
exit
fi
function do_touchscreen_diag() {
if [[ "$IS_PARENT" -eq 1 ]]; then
echo "This unit is a parent - skipping touchscreen tests"
exit
fi
# Check to see if X thinks it's connected
CONNECTED_SCREEN="$(DISPLAY=:0 xrandr -q --verbose | grep ' connected' | cut -d ' ' -f 1 | head -n 1)"
if [[ "$CONNECTED_SCREEN" == "" ]]; then
echo "error: Display does not appear to be connected at all." >&2
exit -1
fi
case "$CONNECTED_SCREEN" in
"DP" | "DP1" | "DP2" | "DP3")
echo "Touchscreen is connected via DisplayPort"
;;
"HDMI" | "HDMI1" | "HDMI2")
echo "Touchscreen is connected via HDMI"
;;
"VGA" | "VGA1" | "VGA2")
echo "Touchscreen is connected via VGA"
;;
*)
echo "Touchscreen is connected via unknown interface $CONNECTED_SCREEN"
;;
esac
# Check to see if it shows up as a HID device
TOUCHSCREEN_HID_EXISTS="$(udevadm info --export-db|grep ID_INPUT_TOUCHSCREEN=1)"
if [[ -z "$TOUCHSCREEN_HID_EXISTS" ]]; then
echo "error: Touchscreen input device is not detected." >&2
exit -1
else
echo "Touchscreen input device is detected."
fi
exit
}
function do_services_diag() {
ST_V5_PID="$(pgrep st_v5.out)"
FACEDETECT_PID="$(pgrep facedetect)"
IDSERVER_PID="$(pgrep bin.idserver)"
MYSQL_PID="$(pgrep mysqld)"
APACHE_PID="$(pgrep apache2)"
if [[ -z "$MYSQL_PID" ]]; then
echo "error: MySQL does not appear to be running" >&2
exit -1
fi
if [[ -z "$APACHE_PID" ]]; then
echo "error: Apache does not appear to be running" >&2
exit -1
fi
if [[ "$IS_PARENT" -eq 0 ]]; then
if [[ -z "$IDSERVER_PID" ]]; then
echo "error: idserver does not appear to be running" >&2
exit -1
fi
USE_CAMERA="$(scantek-config use-camera)"
if [[ -z "$FACEDETECT_PID" && "$USE_CAMERA" -eq 1 ]]; then
echo "error: facedetect does not appear to be running" >&2
exit -1
fi
fi
exit
}
function do_temp_diag() {
# do some magic with the stuff in /sys to fetch temperatures so we don't need
# to install lm_sensors on everything.
exit
}
while [[ ! -z "$1" ]]; do
case "$1" in
touchscreen)
do_touchscreen_diag $*
;;
services)
do_services_diag $*
;;
temp)
do_temp_diag $*
;;
*)
echo >&2
echo "Unknown command $1" >&2
exit -1
;;
esac
shift
done | true |
148b7b314dcf168ffb720d58874f18316c6e3ced | Shell | pepstack/common-tools | /scripts/mount_uuiddev.sh | UTF-8 | 6,613 | 4 | 4 | [] | no_license | #!/bin/sh
#
# @file: mount_uuiddev.sh
# !! 本文件必须以 UTF-8 witghout BOM 格式保存 !!
# !! 此脚本会自动格式化设备,会造成设备数据丢失!慎用 !!
#
# el6, el7
#
# @author: cheungmine@qq.com
#
# @create: 2018-05-17
#
# @update: 2021-02-24 17:22:47
#
#######################################################################
# will cause error on macosx
_file=$(readlink -f $0)
_cdir=$(dirname $_file)
_name=$(basename $_file)
_ver=1.0
. $_cdir/common.sh
# Set characters encodeing
# LANG=en_US.UTF-8;export LANG
LANG=zh_CN.UTF-8;export LANG
# https://blog.csdn.net/drbinzhao/article/details/8281645
# Treat unset variables as an error
set -o nounset
# Treat any error as exit
set -o errexit
#######################################################################
usage() {
cat << EOT
${_name} --dev DISK --mnt PATH --mkfs TYPE
挂载 Linux(el6,el7) 硬盘设备(xfs, ext4)脚本.
Options:
-h, --help 显示帮助
-V, --version 显示版本
--dev DISK 硬盘设备名称. 如: /dev/sdd. (可以通过命令: 'fdisk -l')
--mnt PATH 挂载到的路径. 如: /mnt/disk001
--mkfs TYPE 分区类型名称. 如: ext4, xfs
--force 强制格式化.
Examples:
挂载硬盘 /dev/sde 到目录 /mnt/disk01 分区类型为 ext4
$ sudo ./mount_uuiddev.sh --dev /dev/sde --mnt /mnt/disk01 --mkfs ext4
报告错误: 350137278@qq.com
EOT
} # ---------- end of function usage ----------
if [ $# -eq 0 ]; then usage; exit 1; fi
# parse options:
RET=`getopt -o Vh --long version,help,force,dev:,mnt:,mkfs:, \
-n ' * ERROR' -- "$@"`
# Note the quotes around $RET: they are essential!
eval set -- "$RET"
force="0"
while true; do
case "$1" in
-V | --version) echoinfo "$(basename $0) -- version: $_ver"; exit 1;;
-h | --help ) usage; exit 1;;
--force ) force="1"; shift 1 ;;
--dev ) DEVDISK="$2"; shift 2 ;;
--mnt ) MNTROOT="$2"; shift 2 ;;
--mkfs ) MKFSTYPE="$2"; shift 2 ;;
* ) break ;;
esac
done
#######################################################################
if [ "$MKFSTYPE" != "xfs" -a "$MKFSTYPE" != "ext4" ]; then
echoerror "无效的文件类型(必须是 xfs 或 ext4): ""$MKFSTYPE"
exit -1
fi
MKFS="mkfs.$MKFSTYPE"
chk_root
if [ ! -f "/sbin/""$MKFS" ]; then
echoerror "无效的分区类型: ""$MKFSTYPE"
exit -1
fi
echoinfo "要挂载的磁盘设备: "$DEVDISK
echoinfo "挂载到的目标目录: "$MNTROOT
echoinfo "分区文件系统类型: "$MKFSTYPE
# sde, vdb
BLKNAME=$(basename $DEVDISK)
# 也可以通过命令获得: lsblk
BLKPATH=/sys/block/$BLKNAME
# 检查块设备是否存在
if [ ! -d "$BLKPATH" ]; then
echoerror "指定的设备未发现: "$BLKPATH
exit -1
fi
# 检查已经挂载的设备. 只有未挂载的设备才能挂载
mntstr="$DEVDISK on $MNTROOT type $MKFSTYPE"
echoinfo "检查是否已经挂载硬盘:""$mntstr"
foundres=$(findstrindex "$(mount -l)" "$mntstr")
if [ "$foundres" != "" ]; then
echoerror "磁盘设备已经挂载: ""$mntstr"
exit -1
fi
foundres=$(findstrindex "$(mount -l)" " $DEVDISK ")
if [ "$foundres" != "" ]; then
echoerror "磁盘设备已经挂载: "$DEVDISK
exit -1
fi
foundres=$(findstrindex "$(mount -l)" " $MNTROOT ")
if [ "$foundres" != "" ]; then
echoerror "目录已经被挂载: "$MNTROOT
exit -1
fi
# 提示用户确认?
read -p "挂载新硬盘: $DEVDISK 到目录: $MNTROOT, 确认(yes) 取消(n) ?"
if [ "$REPLY" != "yes" ]; then
echowarn "用户取消了操作!"
exit -1
fi
# 目录必须为空才可以挂载。如果目录不存在则创建
if [ ! -d $MNTROOT ]; then
echowarn "目录不存在, 创建: $MNTROOT"
mkdir -m 755 -p $MNTROOT
elif [ "`ls -A $MNTROOT`" != "" ]; then
echoerror "挂载失败:目录不为空。"
exit -1
fi
# 计算起始扇区的位置
optimal_io_size=$(cat $BLKPATH/queue/optimal_io_size)
alignment_offset=$(cat $BLKPATH/alignment_offset)
minimum_io_size=$(cat $BLKPATH/queue/minimum_io_size)
physical_block_size=$(cat $BLKPATH/queue/physical_block_size)
start_fan=2048
if [ "$optimal_io_size" != "0" ]; then
start_fan=$[(optimal_io_size + alignment_offset) / 512]
fi
start_fan=$start_fan"s"
echoinfo "optimal_io_size: "$optimal_io_size
echoinfo "alignment_offset: "$alignment_offset
echoinfo "起始扇区位置:"$start_fan
# 创建新分区表类型
ret=`parted -s $DEVDISK mklabel gpt`
if [ ! $ret ]; then
echoinfo "创建新分区表类型: OK"
else
echoerror "创建新分区错误:$ret"
exit -1
fi
# 划分整个硬盘空间为主分区
ret=`parted -s $DEVDISK mkpart primary $start_fan 100%`
if [ ! $ret ]; then
echoinfo "划分整个硬盘空间为主分区: OK"
else
echoerror "划分硬盘主分区错误:$ret"
exit -1
fi
# 格式化分区为 ext4
read -p "是(Yes)否(n)格式化分区 ($MKFS $DEVDISK) ? "
if [ "$REPLY" != "Yes" ]; then
echowarn "警告:用户禁止了格式化分区操作!"
exit 1
fi
echoinfo "开始格式化分区...""$MKFSTYPE"
if [ "$force" == "1" ]; then
ret=`echo y | $MKFS -f $DEVDISK`
else
ret=`echo y | $MKFS $DEVDISK`
fi
echo "$ret"
# TODO: 判断是否成功
echoinfo "格式化分区: OK"
if [ "$MKFSTYPE" == "xfs" ]; then
echowarn "请使用 xfs_quota 命令手动设置磁盘配额!"
else
echoinfo "设置保留分区百分比: 1%"
ret=`tune2fs -m 1 $DEVDISK`
echo "$ret"
echoinfo "设置保留分区百分比(1%): OK"
fi
# 挂载硬盘设备到目录
ret=`mount -o noatime,nodiratime $DEVDISK $MNTROOT`
echo "$ret"
# 再次判断设备挂载是否成功
foundres=$(findstrindex "$(mount -l)" "$mntstr")
if [ "$foundres" == "" ]; then
echoerror "磁盘设备没有挂载: ""$mntstr"
exit -1
fi
echoinfo "挂载硬盘设备($DEVDISK)到目录($MNTROOT): 成功."
# 加入开机挂载系统命令中,在 /etc/fstab 中添加如下行:
# UUID=1c4f27cc-293b-40c6-af79-77c1403c895c /mnt/disk01 xfs defaults,noatime,nodiratime 0 0
# 如果块设备已经存在, 不要格式化!
# /dev/sda: UUID="5147aced-c022-4b63-9d6d-1a0187683e9d" TYPE="xfs"
devstr=$(blkid_get_dev "$DEVDISK")
uuidstr=$(blkid_get_uuid "$DEVDISK")
typestr=$(blkid_get_type "$DEVDISK")
if [ "$devstr" == "" ]; then
echoerror "没有找到块设备: ""DEVDISK"
exit -1
fi
echowarn "如果开机自动挂载设备, 请将下行添加到文件: /etc/fstab"
echo "UUID=$uuidstr $MNTROOT $typestr defaults,noatime,nodiratime 0 0"
exit 0
| true |
c591e1be528543684c1974b1f732af9ed9323de9 | Shell | djparks/bin | /bash-dedup-history | UTF-8 | 248 | 2.984375 | 3 | [
"LicenseRef-scancode-public-domain"
] | permissive | #!/bin/sh
#
# Remove duplicates in history file.
sed -e 's:^[[:space:][:cntrl:]]*::' -e 's:[[:space:][:cntrl:]]*$::' "${HISTFILE:?}" |
nl |
sort -u -k 2 |
sort -n |
cut -f 2 > "$HISTFILE~";
mv -- "${HISTFILE:?}~" "$HISTFILE";
# vim: set ft=sh :
| true |
007706de28d7b2210bcc23dbb473725fc9acc6cc | Shell | vmware-samples/euc-samples | /UEM-Samples/Sensors/macOS/Custom Attributes/system_integrity_protection_status.sh | UTF-8 | 361 | 3.234375 | 3 | [
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
SIP_status=$(/usr/bin/csrutil status | awk '/status/ {print $5}' | sed 's/\.//')
if [ $SIP_status = "enabled" ]; then
echo "Enabled"
elif [ $SIP_status = "disabled" }; then
echo "Disabled"
# Description: Check if SIP (system integrity protection) is enabled
# Execution Context: SYSTEM
# Execution Architecture: UNKNOWN
# Return Type: STRING | true |
ebb174818044e4e76efa8cddb3f3fe91ff44ece2 | Shell | Bjay1435/capstone | /rootfs/usr/share/bash-completion/completions/modprobe | UTF-8 | 3,092 | 3.265625 | 3 | [
"MIT"
] | permissive | # Linux modprobe(8) completion -*- shell-script -*-
_modprobe()
{
local cur prev words cword
_init_completion || return
case "$prev" in
-C|--config)
_filedir
return
;;
-d|--dirname|-t|--type)
_filedir -d
return
;;
-S|--set-version)
_kernel_versions
return
;;
esac
if [[ "$cur" == -* ]]; then
COMPREPLY=( $( compgen -W '-a --all -b --use-blacklist -C --config -c
--showconfig --dump-modversions -d --dirname --first-time
--force-vermagic --force-modversion -f --force -i --ignore-install
--ignore-remove -l --list -n --dry-run -q --quiet -R
--resolve-alias -r --remove -S --set-version --show-depends -s
--syslog -t --type -V --version -v --verbose' -- "$cur" ) )
return
fi
local i mode=insert module= version=$(uname -r)
for (( i=1; i < $cword; i++ )); do
case "${words[i]}" in
-r|--remove)
mode=remove
;;
-l|--list)
mode=list
;;
--dump-modversions)
mode=file
;;
-S|--set-version)
version=${words[i+1]} # -S is not $prev and not $cur
;;
-C|--config|-d|--dirname|-t|--type)
((i++)) # skip option and its argument
;;
-*)
# skip all other options
;;
*)
[ -z "$module" ] && module=${words[i]}
;;
esac
done
case $mode in
remove)
_installed_modules "$cur"
;;
list)
# no completion available
;;
file)
_filedir
;;
insert)
# do filename completion if we're giving a path to a module
if [[ "$cur" == @(*/|[.~])* ]]; then
_filedir '@(?(k)o?(.gz))'
elif [[ -n "$module" ]]; then
# do module parameter completion
COMPREPLY=( $( compgen -W "$( PATH="$PATH:/sbin" modinfo \
-p "$module" 2>/dev/null | cut -d: -f1 )" -- "$cur" ) )
else
_modules $version
if [[ $COMPREPLY ]]; then
# filter out already installed modules
local -a mods=( "${COMPREPLY[@]}" )
_installed_modules "$cur"
for i in ${!mods[@]}; do
for module in ${COMPREPLY[@]}; do
if [[ ${mods[i]} == $module ]]; then
unset mods[i]
break
fi
done
done
COMPREPLY=( "${mods[@]}" )
fi
fi
;;
esac
} &&
complete -F _modprobe modprobe
# ex: ts=4 sw=4 et filetype=sh
| true |
7c2c2149c6b22495e2b39b6d985b6c4d14d071c0 | Shell | js-cha/teachme | /cmds/variables | UTF-8 | 960 | 4.15625 | 4 | [] | no_license | #!/usr/bin/env bash
echo "This is the variables."
# The command's name
echo -e 'variables'
# the command's description
echo "Variables in bash scripting behave similar to most programming languages. Variables are containers that store data - character, string, number, function , and etc. Additionally you can parse variables as command input."
echo "------------"
# the command's syntax
echo "Syntax:"
echo "VARNAME=VALUE"
echo -e "\tVARNAME: the name of the variable being defined for easy reference."
echo -e "\tVALUE: the value of the variable being defined. This value can be of different types - string, integer, boolean, array."
# ... etc
echo "------------"
# the command's usage examples
# example 1
echo "my_array={2, 4, 6, 8, 10}"
echo "sets the variable name 'my_array' equal to the given array"
# example 2
echo "alert_msg='User warning! Access denied'"
echo "sets the variable named 'alert_msg' equal to the given string"
exit 0
| true |
5dbdb0891b7c6efd397e6e66a6db942c752fa074 | Shell | Treauxcoin/treauxcoin-src | /contrib/initscripts/bsd/novacoin | UTF-8 | 1,881 | 3.75 | 4 | [
"MIT"
] | permissive | #!/bin/sh
# PROVIDE: TREAUXCOIN
# REQUIRE: LOGIN
# KEYWORD: shutdown
#
# Add the following lines to /etc/rc.conf.local or /etc/rc.conf
# to enable this service:
#
# TREAUXCOIN_enable (bool): Set to NO by default. Set this variable to YES if you want to enable TREAUXCOIN service.
# TREAUXCOIN_config (path): Set to /usr/local/etc/TREAUXCOIN.conf by default.
# TREAUXCOIN_user: The user account TREAUXCOIN daemon runs as. It uses 'root' user by default.
# TREAUXCOIN_group: The group account TREAUXCOIN daemon runs as. It uses 'wheel' group by default.
# TREAUXCOIN_datadir (str): Default to "/var/db/TREAUXCOIN". Base data directory.
. /etc/rc.subr
name=TREAUXCOIN
rcvar=TREAUXCOIN_enable
: ${TREAUXCOIN_enable:=NO}
: ${TREAUXCOIN_config=/usr/local/etc/TREAUXCOIN.conf}
: ${TREAUXCOIN_datadir=/var/db/TREAUXCOIN}
: ${TREAUXCOIN_user="root"}
: ${TREAUXCOIN_group="wheel"}
required_files=${TREAUXCOIN_config}
command=/usr/local/bin/TREAUXCOINd
TREAUXCOIN_chdir=${TREAUXCOIN_datadir}
pidfile="${TREAUXCOIN_datadir}/TREAUXCOINd.pid"
stop_cmd=TREAUXCOIN_stop
command_args="-conf=${TREAUXCOIN_config} -datadir=${TREAUXCOIN_datadir} -daemon -pid=${pidfile}"
start_precmd="${name}_prestart"
TREAUXCOIN_create_datadir()
{
echo "Creating data directory"
eval mkdir -p ${TREAUXCOIN_datadir}
[ $? -eq 0 ] && chown -R ${TREAUXCOIN_user}:${TREAUXCOIN_group} ${TREAUXCOIN_datadir}
}
TREAUXCOIN_prestart()
{
if [ ! -d "${TREAUXCOIN_datadir}/." ]; then
TREAUXCOIN_create_datadir || return 1
fi
}
TREAUXCOIN_requirepidfile()
{
if [ ! "0`check_pidfile ${pidfile} ${command}`" -gt 1 ]; then
echo "${name} not running? (check $pidfile)."
exit 1
fi
}
TREAUXCOIN_stop()
{
TREAUXCOIN_requirepidfile
echo "Stopping ${name}."
eval ${command} -conf=${TREAUXCOIN_config} -datadir=${TREAUXCOIN_datadir} stop
wait_for_pids ${rc_pid}
}
load_rc_config $name
run_rc_command "$1"
| true |
b6d2db3bda99a1b98b7f781e9a0f37c6c3a6306d | Shell | clark2668/grid_scripts | /atten_length_sims/make_dagman_file.sh | UTF-8 | 477 | 2.890625 | 3 | [] | no_license | #!/bin/bash
job_file=dagman.dag
job_index=1
echo 'CONFIG dagman.config' >> $job_file
echo '' >> $job_file
declare -a newold=("new" "old")
declare -a energies=("17" "18" "19")
for i in "${newold[@]}"
do
for j in "${energies[@]}"
do
for ((njob=1; njob<=400; njob+=1))
do
echo 'JOB job_'$job_index job.sub >> $job_file
echo 'VARS job_'$job_index 'version="'$i'"' 'energy="'$j'"' >> $job_file
echo '' >> $job_file
job_index=$(($job_index+1))
done
done
done | true |
81941fccd89b20d0c1f726a2ae3d6675a3dd262c | Shell | mcewand/Linode-SS1-for-Yum | /user_build | UTF-8 | 1,485 | 3.15625 | 3 | [] | no_license | #!/bin/bash
####create a user, add to ssh and sudoers, add ssh key-based authentication
#make user
/usr/sbin/adduser $NEW_USER #ok
passwd $NEW_USER #fail
#add user to sshd
echo 'AllowUsers $NEW_USER' >> /etc/ssh/sshd_config | /etc/init.d/sshd restart #ok
#add user to sudoers
sudo echo '$NEW_USER ALL=(ALL) ALL' >> /etc/sudoers #ok
#create ssh key authentication
mkdir /home/$NEW_USER/.ssh
chown $NEW_USER:$NEW_USER /home/$NEW_USER/.ssh -r
cd /home/$NEW_USER/.ssh
sudo -u $NEW_USER ssh-keygen -t rsa
cat $KEY >> authorized_keys
chmod 700 /home/$NEW_USER/.ssh -r
chmod 600 /home/$NEW_USER/.ssh/authorized_keys
#### Install MySQL 5.5, PHP 5.3 and Apache
#epel/remi for CentOS 5.6
rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-4.noarch.rpm #ok
rpm -Uvh http://rpms.famillecollet.com/enterprise/remi-release-5.rpm #ok
#epel/remi for CentOS 6
rpm -Uvh http://download.fedora.redhat.com/pub/epel/6/i386/epel-release-6-5.noarch.rpm
rpm -Uvh http://rpms.famillecollet.com/enterprise/remi-release-6.rpm
#Install mysql 5.5
yum --enablerepo=remi,remi-test install mysql mysql-server
/etc/init.d/mysqld start
mysqladmin -u root password $MYSQL_ROOT
chkconfig --levels 235 mysqld on
#install php 5.3
yum --enablerepo=remi install httpd php php-common
yum --enablerepo=remi install php-pear php-pdo php-mysql php-pgsql php-pecl-memcache php-gd php-mbstring php-mcrypt php-xml
/etc/init.d/httpd start
chkconfig --levels 235 httpd on
| true |
6e8dd46976a8d5d83d3f000f5498d3d4512fa878 | Shell | tkellen/memorybox | /internal/lambda/scripts/create.sh | UTF-8 | 930 | 2.71875 | 3 | [
"MIT"
] | permissive | (
cd ${TEMP_DIR}
curl -SsL https://github.com/tkellen/memorybox/releases/download/${VERSION}/memorybox_linux_amd64.tar.gz | tar xzf - memorybox
cat <<EOF > run.py
${SCRIPT}
EOF
zip -r memorybox.zip run.py memorybox
aws iam create-role \
--role-name ${ROLE_NAME} \
--assume-role-policy-document '{"Version": "2012-10-17","Statement": [{ "Effect": "Allow", "Principal": {"Service": "lambda.amazonaws.com"}, "Action": "sts:AssumeRole"}]}'
aws iam attach-role-policy \
--role-name ${ROLE_NAME} \
--policy-arn arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole
sleep 5
aws lambda create-function \
--function-name ${ROLE_NAME} \
--runtime python3.8 \
--role $(aws iam get-role --role-name ${ROLE_NAME} --output text --query='Role.Arn') \
--zip-file=fileb://memorybox.zip \
--handler run.main \
--memory 3008 \
--timeout 180
rm memorybox.zip run.py memorybox
) | true |
cc70712414b8c07454eab782aa2f5fa4319c3d1f | Shell | mizzen250/Arch-Installation | /Arch-Installation-Script-Vaio-Chrooted.sh | UTF-8 | 4,770 | 3.234375 | 3 | [] | no_license | #!/bin/bash
##
##
##
## Make Log
export log=/root/install.log
export mirrors="https://www.archlinux.org/mirrorlist/?country=CA&country=US&protocol=https&ip_version=4"
export mirrorlist=/etc/pacman.d/mirrorlist
export ok="good"
logSuccess () {
echo -en ">>> $1 -- " >> $log && date +/%y/%m/%d-%I:%M:%S >> $log
}
logFail () {
echo -en "!!! $1 !!! -- " >> $log && date +/%y/%m/%d-%I:%M:%S >> $log && export ok=$2
# $2 == VVV
# good
# bad
# critical
}
logNote () {
echo -en "\n NOTE: $1 -- " >> $log && date +/%y/%m/%d-%I:%M:%S >> $log && echo ""
}
logNote "Installation started"
echo -e "\n ================================================================="
echo " Installation Script - For Vaio"
echo -e "================================================================= \n"
echo "This script was written for a specific computer but you're welcome to use it"
## Enter Timezone
echo "73:Timezone"
#ln -sf /usr/share/zoneinfo/Canada/Pacific
ln -sf /usr/share/zoneinfo/UTC || \
return
## Run hwclock to generate /etc/adjtime
echo "80:Creating /etc/adjtime"
hwclock --systohc || \
return
## Localization
echo "85:Localization"
cd /etc && \
mv locale.gen locale.gen.bu && \
echo "## Modified By Tim" > locale.gen && \
echo "## Backup located at /etc/locale.gen.bu" >> locale.gen && \
echo "## " >> locale.gen && \
echo "en_US.UTF-8 UTF-8" >> locale.gen && \
locale-gen && \
echo "LANG=en_US.UTF-8" > locale.conf || \
return
## Network Configuration
echo "97:Setting /etc/hostname"
echo "TimsVaio" > hostname || \
return
echo "101:Editing /etc/hosts"
echo "127.0.0.1 localhost" >> hosts && \
echo "127.0.1.1 TimsVaio.localdomain TimsVaio" >> hosts || \
return
echo ""
## Root passwd
echo "108:User Accounts"
echo "!!! Setting Up User Accounts !!!" && \
echo "Enter Root Passwd" && \
passwd || \
return
## Setup sutime
echo "115:sutime"
echo ""; echo "!!! sutime" && \
useradd -mG wheel -s /bin/bash sutime && \
echo "!!! Enter Password" && \
passwd sutime || \
return
## Setup tim
echo "123:tim"
echo ""; echo "!!! tim"
useradd -ms /bin/bash time && \
echo "!!! Enter Password" && \
passwd tim || \
return
echo ""; echo "!!! K, It's time to Edit the sudoers file"
sleep 1
visudo
return
## Install System Stuff
echo "135:"
pacman -S --noconfirm exfat-utils thermald i7z cpupower || \
return
## Install Boot Stuff
echo "140:"
pacman -S --noconfirm grub intel-ucode sddm xorg || \
return
## Install Network Stuff
echo "145:"
pacman -S --noconfirm networkmanager bluez dialog || \
return
## Install Odds and Ends
echo "150:"
pacman -S --noconfirm cowsay vim mc htop mlocate || \
return
## Install Printer stuff
echo "155:"
pacman -S --noconfirm cups cups-pdf samba || \
return
## Install Pacman Stuff
pacman -S --noconfirm pacman-contrib pkgstats || \
return
## Install Security Stuff
pacman -S --noconfirm rkhunter || \
return
## GRUB
grub-install --target=i386-pc /dev/sda && \
grub-mkconfig -o /boot/grub/grub.cfg || \
return
## Update mlocate db
updatedb || \
return
## Setup paccache
systemctl enable paccache.timer && \
paccache -rk2 && \
paccache -ruk0 || \
return
## Set Up sddm
systemctl enable sddm.service || \
return
## Enable thermald
systemctl enable thermald.service || \
return
## Enable cpupower
systemctl enable cpupower.service || \
return
## Enable CUPS Service
systemctl enable org.cups.cupsd.service || \
return
## ********** DONT FORGET Samba needs more Configuration *******
#systemctl enable smb.service
## rkhunter
rkhunter --propupd && \
rkhunter -c --sk || \
return
echo "" >> /root/install.log;echo "" >> /root/install.log;
echo -n "successfully finished at: " >> /root/install.log; date >> /root/install.log
echo "" >> /root/install.log;echo "" >> /root/install.log;
##
## EOF
##
| true |
fce9f9aafb5ce37c1ea0b6ad905eaca04f59f3df | Shell | tcolgan-uw/MRDataTransferTool | /ge_data_tools/sort_ge_files | UTF-8 | 5,097 | 3.703125 | 4 | [] | no_license | #!/bin/bash
main(){
if [ "$#" -gt 1 ]; then
echo "Usage is sort_ge_files [folder]"
exit 1
fi
if [ "$#" -eq 0 ]; then
echo "You did not provide an archive folder"
ARCHIVE_FOLDER=$(pwd)
else
ARCHIVE_FOLDER=$1
fi
if [ -d $ARCHIVE_FOLDER ]
then
echo "Archive folder is $ARCHIVE_FOLDER"
else
echo "Archive folder does not exist ( $ARCHIVE_FOLDER )"
return 1
fi
GE_FILES=($(find -type f -name "P*.7" -o -type f -name "ScanArchive*.h5" ))
for CURRENT_FILE in "${GE_FILES[@]}";
do
echo $CURRENT_FOLDER
HEADER=`pfile_info $CURRENT_FILE print`
if [ $? -ne 0 ]
then
echo "File is not a Pfile!"
FOLDER="Unknown_GE_Files"
mkdir -p $ARCHIVE_FOLDER/$FOLDER
if [ $? -ne 0 ] ; then
echo "Could not create directory $FOLDER!! Exiting"
exit 1
fi
check_mv $CURRENT_FILE $ARCHIVE_FOLDER/$FOLDER
if [ $? -ne 0 ] ; then
echo "Could not move $CURRENT_FILE to $ARCHIVE_FOLDER/$FOLDER!! Exiting"
exit 1
fi
else
# Grab the header variables
KACQUID=$(get_field "${HEADER}" rdbhead.rdb_hdr_kacq_uid)
EXAM=$(get_field "${HEADER}" examhead.ex_no)
SERIES=$(get_field "${HEADER}" serieshead.se_no)
DESC=$(get_field_scrubbed "${HEADER}" serieshead.se_desc)
EX_DESC=$(get_field_scrubbed "${HEADER}" examhead.ex_desc)
RADIOLOGIST=$(get_field "${HEADER}" examhead.diagrad)
SCAN_DATE=$(get_field "${HEADER}" examhead.ex_datetime)
SUBJECT_NAME=$(get_field "${HEADER}" examhead.patnameff)
EX_UID=$(get_field "${HEADER}" examhead.study_uid)
GATING_ID=$(get_field "${HEADER}" pcvipr_id)
EXAM=$(printf %05d $EXAM)
SERIES=$(printf %05d $SERIES)
# Print for good measure
echo " Kacq UID = $KACQUID"
echo " Exam = $EXAM"
echo " Desc = $EX_DESC"
echo " UID = $EX_UID"
echo " Series = $SERIES"
echo " Desc = $DESC"
echo " Radiologist = $RADIOLOGIST"
echo " Subject Name = $SUBJECT_NAME"
echo " Scan Date = $SCAN_DATE"
if [ "${#RADIOLOGIST}" -gt "0" ]
then
echo "Using Radiologist Field for Folder Naming"
IFS='^' read -ra ARRAY <<< "$RADIOLOGIST"
FOLDER=""
for j in "${ARRAY[@]}"; do
FOLDER=$FOLDER$j"_"
done
FOLDER=${FOLDER%?}
if [ "${#ARRAY}" -eq "0" ]
then
FOLDER="Unknown_GE_Files"
fi
else
echo " Using Patient Name for Folder Naming"
IFS='^' read -ra ARRAY <<< "$SUBJECT_NAME"
FOLDER=""
for j in "${ARRAY[@]}"; do
FOLDER=$FOLDER$j"_"
done
FOLDER=${FOLDER%?}
if [ "${#ARRAY}" -eq "0" ]
then
FOLDER="Unknown_GE_Files"
fi
fi
FOLDER=$(scrub_field ${FOLDER})
#
# Create folder names for subject
#
if [ "${#SCAN_DATE}" -gt "0" ]
then
VISIT_FOLDER=$(scrub_field ${FOLDER}"_"$EXAM"_"${SCAN_DATE})
else
VISIT_FOLDER=$(scrub_field $CURRENT_FILE)
VISIT_FOLDER=${VISIT_FOLDER#.}
fi
VISIT_FOLDER="$ARCHIVE_FOLDER/$VISIT_FOLDER"
#
# Create folder names for scan
#
SCAN_FOLDER="${EXAM}_${SERIES}_${DESC}"
SCAN_FOLDER=$(scrub_field ${SCAN_FOLDER})
#
# Look for files related to the scan
#
ACC_FILES=""
if [ "${#KACQUID}" -gt "0" ]
then
ACC_FILES+=$(find -maxdepth 4 -name "*$KACQUID*" )
fi
if [ "${#GATING_ID}" -gt "0" ]
then
ACC_FILES+=$(find -maxdepth 4 -name "*$GATING_ID*" )
fi
echo " Accessory files = $ACC_FILES"
#
# Actually Create Directory Structures
#
echo " Gating ID = $GATING_ID"
echo " Visit Folder = $VISIT_FOLDER"
mkdir -p $VISIT_FOLDER
if [ $? -ne 0 ] ; then
echo "Could not create directory $VISIT_FOLDER!! Exiting"
exit 1
fi
mkdir -p $VISIT_FOLDER/$SCAN_FOLDER
if [ $? -ne 0 ] ; then
echo "Could not create directory $VISIT_FOLDER/$SCAN_FOLDER!! Exiting"
exit 1
fi
mkdir -p $VISIT_FOLDER/$SCAN_FOLDER/raw_data/
if [ $? -ne 0 ] ; then
echo "Could not create directory $VISIT_FOLDER/$SCAN_FOLDER/raw_data!! Exiting"
exit 1
fi
mkdir -p $VISIT_FOLDER/$SCAN_FOLDER/processed_data/
if [ $? -ne 0 ] ; then
echo "Could not create directory $VISIT_FOLDER/$SCAN_FOLDER/raw_data!! Exiting"
exit 1
fi
#
# Copy accessory files
#
for acc in ${ACC_FILES[@]}
do
echo "Working on file $acc"
check_mv $acc $VISIT_FOLDER/$SCAN_FOLDER/raw_data/
done
#
# Copy the main file
#
check_mv $CURRENT_FILE $VISIT_FOLDER/$SCAN_FOLDER/raw_data/
fi # Whether scan or not
done
}
get_field() {
# Get the kacquid and grab all files in the folder with that number
local temp=`echo "$1" | grep $2`
local temp1=${temp#*$2=}
echo $temp1
}
get_field_scrubbed() {
# Get the kacquid and grab all files in the folder with that number
local temp=`echo "$1" | grep $2`
local temp=${temp#*$2=}
local temp=${temp// /_}
local temp=${temp//[^-_.[:alnum:]]/}
echo $temp
}
scrub_field() {
# Get the kacquid and grab all files in the folder with that number
local temp="$1"
local temp=${temp// /_}
local temp=${temp//[^-_[:alnum:]]/}
echo $temp
}
function check_mv () {
if [ "$1" -ef "$2" ]
then
echo "Check_mv:: $1 and $2 are the same file"
return 0
fi
if [ -d $2 ]
then
echo " Yes - In is folder"
else
echo "Moving $1 to $2 but $2 is not a folder!!!!"
return 1
fi
mv -i $1 $2
return 0
}
main "$@"
| true |
f5d1162f10836455ad131c962566b135caa95daa | Shell | sneumann/CRIMSy | /util/bin/updateCloud.sh | UTF-8 | 2,970 | 3.625 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#
# Maintenance Script
# Cloud Resource & Information Management System (CRIMSy)
# Copyright 2020 Leibniz-Institut f. Pflanzenbiochemie
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Update the CRL in the proxy container
# This command should be executed by cron on a regular basis.
#
#==========================================================
#
function download {
NAME=`echo "$0" | cut -f1`
HASH=`echo "$0" | cut -f2`
FP=`echo "$0" | cut -f3`
CACERT=`echo "$0" | cut -f4`
CRL=`echo "$0" | cut -f5`
wget -O $LBAC_DATASTORE/dist/proxy/conf/crl/$NAME.$FP.crl $CRL
wget -O $LBAC_DATASTORE/dist/proxy/conf/crt/$NAME.$FP.pem $CACERT
}
export -f download
function error {
echo $1
exit 1
}
#
# Download CA certificates and CRLs, provide symlinks
#
function install {
rm -f $LBAC_DATASTORE/dist/proxy/conf/crt/*
rm -f $LBAC_DATASTORE/dist/proxy/conf/crl/*
cat $LBAC_DATASTORE/dist/etc/*/addresses.txt | sort | uniq -f2 -w41 | xargs -l1 -i /bin/bash -c download "{}"
c_rehash $LBAC_DATASTORE/dist/proxy/conf/crt/
c_rehash $LBAC_DATASTORE/dist/proxy/conf/crl/
}
#
# Get new CACerts and CRLs and install them into proxy Container
# run only if proxy is enabled
#
function cacrl {
. $LBAC_DATASTORE/dist/etc/config.sh
if test $LBAC_PROXY_ENABLE2 = "ON" ; then
install
tar -C $LBAC_DATASTORE/dist/proxy/conf -cf /tmp/ca_update.tar crt/ crl/
docker cp /tmp/ca_update.tar dist_proxy_1:/install/
docker exec dist_proxy_1 /usr/local/bin/ca_update.sh
rm "/tmp/ca_update.tar"
chown -R --reference=$LBAC_DATASTORE/dist/proxy/conf/httpd.conf \
$LBAC_DATASTORE/dist/proxy/conf/crt \
$LBAC_DATASTORE/dist/proxy/conf/crl
fi
}
#
# do all the maintenance stuff for this node
#
function update {
cacrl
docker exec dist_ui_1 /usr/local/bin/logpurge.sh
}
#
#==========================================================
#
. $HOME/.lbac || error "Local cloud node is configured properly"
export LBAC_DATASTORE
case "$1" in
install)
install
;;
cacrl)
test `id -u` -eq 0 || error "The cacrl function must be called as root"
cacrl
;;
update)
test `id -u` -eq 0 || error "The update function must be called as root"
update
;;
*)
error "Usage: updateCloud.sh install | cacrl | update"
esac
| true |
b8744675bd193a5c9e1576c9dadad1b7b31b11ca | Shell | darkn3rd/script-tut | /shell_scripts/posix/b20.variables.sh | UTF-8 | 226 | 3.546875 | 4 | [] | no_license | #!/usr/bin/env sh
# declare variables
num=5
char=a
string="This is a string"
# output values using string interpolation
printf "Number is %d.\n" $num
printf "Character is '%c'.\n" $char
printf "String is \"%s\".\n" "$string"
| true |
52416d338545885ea7d0874616675ea08f47049f | Shell | CompulsiveCoder/ArduinoPlugAndPlay | /scripts-installation/init.sh | UTF-8 | 2,141 | 3.90625 | 4 | [] | no_license | echo "Initializing plug and play..."
echo " Current directory:"
echo " $PWD"
DIR=$PWD
BRANCH=$1
SMTP_SERVER=$2
ADMIN_EMAIL=$3
EXAMPLE_COMMAND="Example:\n..sh [Branch] [SmtpServer] [AdminEmail]"
if [ ! $BRANCH ]; then
BRANCH="master"
fi
echo " Branch: $BRANCH"
echo " SMTP server: $SMTP_SERVER"
echo " Admin email: $ADMIN_EMAIL"
echo ""
echo " Installing libraries..."
CONFIG_FILE="ArduinoPlugAndPlay/lib/net40/ArduinoPlugAndPlay.exe.config";
CONFIG_FILE_SAVED="ArduinoPlugAndPlay.exe.config";
echo " Library config file:"
echo " $PWD/$CONFIG_FILE"
echo " Saved config file:"
echo " $PWD/$CONFIG_FILE_SAVED"
echo ""
echo " Installing the ArduinoPlugAndPlay library..."
bash install-package-from-github-release.sh $BRANCH CompulsiveCoder ArduinoPlugAndPlay 1.0.2.56 || exit 1
# If the config file is found in the downloaded package
if [ -f $CONFIG_FILE ]; then
echo ""
echo " Config file found. Preserving."
# If no custom config file is found
if [ ! -f $CONFIG_FILE_SAVED ]; then
# Copy the config file from the package into the saved location
cp -v $CONFIG_FILE $CONFIG_FILE_SAVED || exit 1
fi
else
echo ""
echo " Can't find config file in library:"
echo " $CONFIG_FILE"
exit 1
fi
echo ""
echo " Injecting email details into configuration file"
if [ $SMTP_SERVER ]; then
xmlstarlet ed -L -u '/configuration/appSettings/add[@key="SmtpServer"]/@value' -v "$SMTP_SERVER" $CONFIG_FILE_SAVED
fi
if [ $ADMIN_EMAIL ]; then
xmlstarlet ed -L -u '/configuration/appSettings/add[@key="EmailAddress"]/@value' -v "$ADMIN_EMAIL" $CONFIG_FILE_SAVED
fi
# If a saved/custom config file is found then install it
if [ -f $CONFIG_FILE_SAVED ]; then
echo ""
echo " Installing saved config file..."
# Copy the default config file to a .bak file
echo ""
echo " Backing up empty config file"
cp $CONFIG_FILE $CONFIG_FILE.bak || exit 1
echo ""
echo " Restoring saved config file"
# Install the saved/custom config file into the library
cp $CONFIG_FILE_SAVED $CONFIG_FILE || exit 1
fi
cd $DIR
echo ""
echo "Arduino plug and play initialization complete."
| true |
09174bd9d2324b79282a43c60c05aedb5467b7c5 | Shell | djangulo/go-fast | /deployments/production/traefik/_sourced/constants.sh | UTF-8 | 359 | 2.765625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
SCRIPT_WARNING="This script provides the basic setting for a traefik-scalable config\n(with docker), it may make changes to the machine's filesystem."
DOCKER_REQUIRED="This script requires docker to run: docker not found in \$PATH"
SED_REQUIRED="This script requires GNU sed <https://www.gnu.org/software/sed/>: sed not found in \$PATH"
| true |
4633eea107236519471db296577258710bc209c6 | Shell | Spaxe/Dotfiles | /pip-sync.sh | UTF-8 | 652 | 3.671875 | 4 | [] | no_license | #!/bin/bash
# Sync pip packages via Dropbox
#
PIP="pip"
# first get local settings
echo "Reading local settings ..."
rm -f /tmp/pip-sync.txt
$PIP freeze > /tmp/pip-sync.txt
# then combine it with list in Dropbox
echo "Reading settings from Dropbox ..."
[ -e ~/Dropbox/Apps/Pip/pip-sync.txt ] && cat ~/Dropbox/Apps/Pip/pip-sync.txt >> /tmp/pip-sync.txt
# make the lists unique and sync into Dropbox
echo "Syncing to Dropbox ..."
mkdir -p ~/Dropbox/Apps/Pip
cat /tmp/pip-sync.txt | sort | uniq > ~/Dropbox/Apps/Pip/pip-sync.txt
# Install missing Homebrew packages
echo "Install missing packages ..."
$PIP install -r ~/Dropbox/Apps/Pip/pip-sync.txt
| true |
495a3da0cf566666cffda793cac342a27c9809fc | Shell | simensen/mwphp15 | /vagrant/modules/dotfiles/files/.command-files/.mysql-commands | UTF-8 | 523 | 3.171875 | 3 | [] | no_license | #!/bin/sh
alias redb='mysql -u root -proot $PROJECT_DATABASE -e "SET unique_checks=0; SET foreign_key_checks=0; SET autocommit=0; source /var/www/current-data-dump.sql; COMMIT; SET foreign_key_checks=1; SET unique_checks=1;"'
alias savedb='DATE=$(date +%Y_%m_%d-%H_%M_%S) && mysqldump -u root -proot $PROJECT_DATABASE > /var/www/${DATE}-data-dump.sql'
command_notation 'redb' 'Reload the mysqldb to the state of /var/www/current-data-dump.sql'
command_notation 'savedb' "Export a copy of the current $PROJECT_DATABASE db" | true |
5d801d152f2bfc07ac21533699780e975b40fef9 | Shell | sitalprusty/AutomationChallenge | /minikube-terraform/setup.sh | UTF-8 | 1,010 | 2.703125 | 3 | [] | no_license | #!/usr/bin/env bash
set -e
# Install system dependencies
pushd "/tmp"
yum update -y
yum install -y git jq
amazon-linux-extras install docker -y
# Setup Docker
service docker start
usermod -a -G docker ec2-user
# Setup python dependencies and test libraries
curl -O https://bootstrap.pypa.io/get-pip.py
python get-pip.py
pip install tornado numpy
# Host system directories needed for ES
mkdir -p /usr/share/elasticsearch/data/nodes
mkdir -p /mnt/data
chmod -R 777 /usr/share/elasticsearch/data
chmod -R 777 /mnt/data
# Install minikube and kubectl
curl -o kubectl https://amazon-eks.s3-us-west-2.amazonaws.com/1.13.7/2019-06-11/bin/linux/amd64/kubectl
chmod +x ./kubectl
mv ./kubectl /usr/bin/kubectl
kubectl version --short --client
curl -Lo minikube https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64
chmod +x ./minikube
mv ./minikube /usr/bin/minikube
minikube start --vm-driver=none
yum install -y conntrack
minikube status
minikube start --vm-driver=none
minikube status
| true |
980309e49ec99ef1a9b870ae1bb9f5d0f5a2d375 | Shell | ColleyLi/Truck-Hybrid-A-Star | /cpplint_shell.sh | UTF-8 | 766 | 3.359375 | 3 | [] | no_license | #! /bin/bash
echo "^@^cpplint code style check through shell====^"
index=0
config=""
pwd_path=`pwd`
cpplint_path="$pwd_path/cpplint.py"
echo cpplint_path=$cpplint_path
src_path="$pwd_path/src"
echo src_path=$src_path
for file in `find $src_path -maxdepth 1 -type f | grep -E "\.h$|\.cc$|\.cu$|\.cpp$"`
do
echo file=$file
echo -e "\033[36m ===> [FILE] \033[0m \033[47;31m $file \033[0m"
check_files[$index]=$file
index=$(($index+1))
done
check_cmd=""
for file in ${check_files[*]}
do
check_cmd="python2 $cpplint_path --linelength=80"
echo -e "\033[33m =========== check file $file =============\033[0m"
check_cmd="$check_cmd"$file
eval $check_cmd
echo -e "\033[45m ==========================================\033[0m"
done
| true |
bdb69612eb81eadf7dd1c0d16bf9d7c6dfe2747c | Shell | Liao-YiHsiu/Cluster-client-script | /softwares/install2-caffe.sh | UTF-8 | 2,280 | 3.265625 | 3 | [] | no_license | #!/bin/bash -xe
## Caffe installation on CentOS by simpdanny
## Require sudo to complete this installation
YUM_OPTIONS="-y --enablerepo=epel"
threads=$(nproc)
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
## Install location
CAFFE_PATH=/home_local/speech/Cluster-client-script
CAFFE=$CAFFE_PATH/caffe
cd $CAFFE_PATH
## Assume cuda is installed
CUDA_PATH=/usr/local/cuda
## Require v2 cudnn for cuda 7.0
## Require v1 cudnn for cuda 6.5 or below
CUDNN_PATH=/share/cuda
## Install cuDNN
cp $CUDNN_PATH/include/cudnn.h $CUDA_PATH/include
cp $CUDNN_PATH/lib64/lib* $CUDA_PATH/lib64
## Expand repository
## RHEL/CentOS 7 64-Bit ##
cd /tmp
rm -rf epel-release-7-9.noarch.rpm
wget http://dl.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-10.noarch.rpm
rpm -ivh epel-release-7-9.noarch.rpm || true
rm -f /tmp/epel-release-7-9.noarch.rpm
## Install general dependencies
yum $YUM_OPTIONS install protobuf-devel leveldb-devel snappy-devel opencv-devel boost-devel hdf5-devel
yum install -y libpng-devel.x86_64 freetype-devel.x86_64
## Install more dependencies
yum $YUM_OPTIONS install gflags-devel glog-devel lmdb-devel
## Install BLAS
yum $YUM_OPTIONS install atlas-devel
yum $YUM_OPTIONS install openblas-devel.x86_64
## Install jpeg
yum $YUM_OPTIONS install libjpeg-turbo-devel.x86_64
## Install Python headers
yum $YUM_OPTIONS install python-devel
## Require git to clone Caffe on github
rm -rf $CAFFE
cd $CAFFE_PATH
git clone https://github.com/BVLC/caffe.git
## Config installation by simpdanny's makefile
CONFIG=$DIR/Makefile.config
cp $CONFIG $CAFFE
cd $CAFFE
## Compile Caffe and run all test
#mkdir build
#cd build
#cmake -DBUILD_TIFF=ON -DBLAS=open ..
export CUDA_VISIBLE_DEVICES=0
chown speech:speech * -R .
su -l speech -s /bin/bash -c "cd $CAFFE; make all -j $threads"
su -l speech -s /bin/bash -c "cd $CAFFE; make test -j $threads"
su -l speech -s /bin/bash -c "cd $CAFFE; make runtest -j $threads" || true
## Install python-pip
yum $YUM_OPTIONS install python-pip
cd $CAFFE/python
pip install --upgrade pip
## Install python-wrapper requirements
for req in $(cat requirements.txt); do pip install --upgrade $req; done
pip install --upgrade numpy
#easy_install -U scikit-image
## install python-wrapper
cd $CAFFE
make pycaffe -j $threads
| true |
21705d00107eb8077741f94414aa748ef90e7384 | Shell | dxc-labs/dxc-soteria-risk | /utilities/secrets.sh | UTF-8 | 459 | 2.703125 | 3 | [
"Apache-2.0"
] | permissive | ProjectName="--- project name ---"
TenantName="--- tenant name ---"
EnvironmentName="--- environment name ---"
ComponentName="risk"
AWS_REGION="us-east-1"
riskAPIKey="---risk api key ---"
if aws ssm put-parameter --region ${AWS_REGION} --name "${ProjectName}-${TenantName}-${EnvironmentName}-${ComponentName}-apikey" --type "SecureString" --value "${riskAPIKey}" --tier Standard --overwrite; then
echo "risk APIKey ssm put parameter complete"
fi
| true |
347d448048d42b156e8acf79fb688f6b82286ffe | Shell | nutjob4life/bindir | /shorthand-github-issue | UTF-8 | 304 | 3.0625 | 3 | [] | no_license | #!/bin/sh
#
# Convert a URL to a GitHub on the pasteboard to its short format.
#
# For example, if you have
# https://github.com/EDRN/P5/issues/120
# you will get
# EDRN/P5#120
#
PATH=/usr/bin:/bin:/usr/local/bin
export PATH
pbpaste | sed -e 's=https://github.com/==g' -e 's=/issues/=#=g' | pbcopy
| true |
40846fb1b721615e0ff37450d41609bb9f376f11 | Shell | raihaninfo/Shell-Scripting | /bin/complete_echo_command.sh | UTF-8 | 160 | 2.59375 | 3 | [] | no_license | #!/bin/bash
echo "Welcome to shell Scripting"
echo "Welcome to Raihan"
number=444
myname="Raihan"
echo "$number"
echo "$myname"
echo "$(ls)"
echo "$(pwd)"
| true |
5a2162d92152bebb952dd1ef4790751a7acadf43 | Shell | trygvels/trygvetools | /krisjand/extract_rms_from_wcov | UTF-8 | 992 | 3.546875 | 4 | [] | no_license | #!/bin/bash
mt=/mn/stornext/u3/krisjand/quiet_svn/oslo/src/f90/maptool/maptool
me=~/quiet_svn/oslo/src/f90/map_editor/map_editor
if [ $# -ne 1 ] || [ "$1" = "help" ]; then
echo ""
echo "Syntax: extract_rms_from_wcov [npipe version]"
echo ""
echo "e.g. ...wcov npipe6v0 "
echo ""
exit
fi
#freqs=("545" "857")
#freqs=("30" "44" "70" "100" "217")
freqs=("30" "44" "70" "100" "143" "217" "353" "545" "857")
npipe=$1
for ((i=0; i<${#freqs[@]}; i++)) ; do
freq=${freqs[$i]} #Frequency
for file in ${npipe}*${freq}*"wcov"*".fits"; do
if [[ $file = *\** ]]; then #Check if contains wildcard (Map does not exist)
echo "WARNING: " $file " does not exist!"
continue
fi
if [ $freq == 545 ] || [ $freq == 857 ] ; then
$mt extract 1 $file 1 ${file/wcov/rms_uK}
else
$mt extract 3 $file 1 4 6 ${file/wcov/rms_uK}
fi
$me sqrt ${file/wcov/rms_uK} ${file/wcov/rms_uK}
$me scale ${file/wcov/rms_uK} ${file/wcov/rms_uK} 1e6
done
done
| true |
a42fe61d55a8c56a77bce5204b6878da76c16f64 | Shell | softasap/docker-base-lamp | /deployment/docker_build.sh | UTF-8 | 269 | 2.9375 | 3 | [] | no_license | #!/bin/sh
# do docker build --no-cache to drop cached parts
if [ -z $1 ]
then
TAG=":master"
else
TAG=":$1"
fi
echo docker build --tag="voronenko/base-lamp$TAG" ../
docker build --tag="voronenko/base-lamp$TAG" ../
echo "Successfully built voronenko/base-lamp$TAG"
| true |
76df1dbe42f843b78c68fee41776637612f504e0 | Shell | Azure-Samples/azure-intelligent-edge-patterns | /factory-ai-vision/deprecated_Installers/acs/factory-ai-vision-install.sh | UTF-8 | 8,452 | 3.9375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# @ECHO OFF
AZURE_CORE_NO_COLOR=
AZURE_CORE_ONLY_SHOW_ERRORS=True
# Many of the az commands return output containing carriage returns that must be removed
CR=$'\r'
# ARM deployment script for Custom Vison solution (Free SKU)
customVisionArm=deploy-custom-vision-arm.json
# edge-deployment-json is the template,
#edgeDeploymentJson=deployment.amd64.json
# edge-deploy-json is the deployment description with keys and endpoints added
edgeDeployJson=deploy.modules.json
# the solution resource group name
rgName=visiononedge-rg
# azSubscriptonName = The friendly name of the Azure subscription
# iotHubName = The IoT Hub that corisponds to the ASE device
# edgeDeviceId = The device id of the ASE device
# cvTrainingApiKey = The Custom Vision service training key
# cvTrainingEndpoint = The Custom Vision service end point
# cpuGpu = CPU or GPU deployment
# SETLOCAL ENABLEDELAYEDEXPANSION
# ############################## Install Prereqs ##############################
echo Installing / updating the IoT extension
az extension add --name azure-iot
if [ ! $? -eq 0 ]; then
# Azure CLI is not installed. It has an MSI installer on Windows, or is available over REST.
echo
echo It looks like Azure CLI is not installed. Please install it from:
echo https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest
echo and try again
read -p "Press any key to exit..."
exit 1
fi
################################ Get Tenant ###################################
# remove the header and ---- from output list - start good var data at var1
#COLUMNS=1
#outputarrten=()
#echo Logging on to Azure...
#output=$(az login -o table --query [].name --only-show-errors)
#let cnt=0
#while IFS=' ' read -r line
#do
# if [ $cnt -gt 1 ]; then
# outputarrten+=("$line")
# fi
# let cnt++
#done <<< "$output"
#
## get length of an array
#tLen=${#outputarrten[@]}
#
#PS3='Choose the number corisponding to your tenant: '
#select opt in "${outputarrten[@]}"
#do
# # remove carriage return
# azSubscriptonName=${opt%$CR}
# echo you chose: $azSubscriptonName
# break
#done
#az account set --subscription "$azSubscriptonName" --only-show-errors
################################ Install Custom Vision ###########################
echo You can use your existing Custom Vision service, or create a new one
while true; do
read -p "Would you like to use an existing Custom Vision Service? (y or n): " -n 1 -r; echo
case $REPLY in
[Yy]* ) read -p "Please enter your Custom Vision endpoint: " cvTrainingEndpoint; echo
read -p "Please enter your Custom Vision Key: " cvTrainingApiKey; echo
if [[ -z $cvTrainingEndpoint ]]; then
cvTrainingEndpoint='<Training_Endpoint>'
fi
if [[ -z $cvTrainingApiKey ]]; then
cvTrainingApiKey='<Training_API_Key>'
fi
break;;
[Nn]* ) cvTrainingEndpoint=""; break;;
* ) echo "Please answer yes or no.";;
esac
done
if [ "$cvTrainingEndpoint" == "" ]; then
echo Installing the Custom Vision Service
echo
loc=()
loc+=("eastus")
loc+=("westus2")
loc+=("southcentralus")
loc+=("northcentralus")
PS3='Choose the location: '
select opt in "${loc[@]}"
do
echo "you chose: " $opt
location=${opt%$CR}
break
done
echo Creating resource group - $rgName
output=$(az group create -l $location -n $rgName)
echo Creating Custom Vision Service
outputarrcv=()
# Need to note in the documentation that only one free service per subscription can be created. An existing one results in an error.
output="$(az deployment group create --resource-group $rgName --template-file $customVisionArm --query properties.outputs.*.value -o table --parameters "{ 'location': { 'value': '$location' } }")"
let cnt=0
while read -r line
do
if [ $cnt -gt 1 ]; then
outputarrcv+=("$line")
fi
let cnt++
done <<< "$output"
# get length of an array
tLen=${#outputarrcv[@]}
if [ $tLen -eq 0 ]; then
echo
echo Deployment failed. Please check if you already have a free version of Custom Vision installed.
read -p "Press <Enter> key to exit..."
exit 1
fi
# the Custom Vision variables
cvTrainingApiKey=${outputarrcv[0]}
cvTrainingEndpoint=${outputarrcv[1]}
echo API Key: $cvTrainingApiKey
echo Endpoint: $cvTrainingEndpoint
fi
# ############################## Get IoT Hub #####################################
echo listing IoT Hubs
outputhubs=$(az iot hub list --only-show-errors -o table --query [].name)
outputarrhubs=()
let cnt=0
while read -r line
do
if [ $cnt -gt 1 ]; then
outputarrhubs+=("$line")
fi
let cnt++
done <<< "$outputhubs"
# get length of an array
tLen=${#outputarrhubs[@]}
if [ $tLen -le 0 ]; then
echo IoTHub not found
echo Sorry, this demo requires that you have an existing IoTHub and registered Azure Stack Edge Device
read -p "Press <Enter> key to exit..."; echo
exit 1
fi
# Only one option so no need to prompt for choice
if [ $tLen -le 1 ]; then
while true; do
read -p "please confirm install to ${outputarrhubs[0]%$CR} hub (y or n): " -n 1 -r;echo
case $REPLY in
[Yy]* ) break;;
[Nn]* ) exit;;
* ) echo "Please answer yes or no.";;
esac
done
iotHubName=${outputarrhubs[0]%$CR}
else
PS3='Choose the number corisponding to the IoTHub managing your target edge device: '
select opt in "${outputarrhubs[@]}"
do
echo "you chose: " $opt
iotHubName=${opt%$CR}
break
done
fi
# ############################## Get Device #####################################
echo getting devices
# query parameter retrieves only edge devices
output=$(az iot hub device-identity list -n $iotHubName -o table --query [?capabilities.iotEdge].[deviceId])
let cnt=0
outputarrdevs=()
while read -r line
do
# strip off column name and -------
if [ $cnt -gt 1 ]; then
outputarrdevs+=("$line")
fi
let cnt++
done <<< "$output"
# get length of an array
tLen=${#outputarrdevs[@]}
if [ $tLen -le 0 ]; then
echo No edge device found
echo Sorry, this demo requires that you have an existing IoTHub and registered Azure Stack Edge Device
read -p "Press any key to exit..."; echo
exit 1
fi
# Only one option so no need to prompt for choice
if [ $tLen -le 1 ]; then
while true; do
read -p "please confirm install to ${outputarrdevs[0]%$CR} device (y or n): " -n 1 -r;echo
case $REPLY in
[Yy]* ) break;;
[Nn]* ) exit;;
* ) echo "Please answer yes or no.";;
esac
done
edgeDeviceId=${outputarrdevs[0]%$CR}
else
PS3='Choose the number corisponding to the Azure Stack Edge device: '
select opt in "${outputarrdevs[@]}"
do
echo "you chose: " $opt
edgeDeviceId=${opt%$CR}
break
done
fi
################################ Check for GPU ###########################################
while true; do
read -p "Does your device have a GPU? (y or n): " -n 1 -r; echo
case $REPLY in
[Yy]* ) cpuGpu="gpu"; runtime="nvidia"; break;;
[Nn]* ) cpuGpu="cpu"; runtime="runc" ; break;;
* ) echo "Please answer yes or no.";;
esac
done
################################ Check for Platform ###########################################
echo 1 amd64
echo 2 arm64v8
read -p "Choose the platform you're going to deploy: "
if [ "$REPLY" == "2" ]; then
edgeDeploymentJson=deployment.arm64v8.json
else
edgeDeploymentJson=deployment.amd64.json
fi
################################ Write Config ############################################
# Will overwrite file if it already exists
input="./$edgeDeploymentJson"
while read -r line
do
prtline=${line//'<Training Endpoint>'/$cvTrainingEndpoint}
prtline=${prtline//'<Training API Key>'/$cvTrainingApiKey}
prtline=${prtline//'<cpu or gpu>'/$cpuGpu}
prtline=${prtline//'<Docker Runtime>'/$runtime}
echo $prtline
done < "$input" > ./$edgeDeployJson
# ############################## Deploy Edge Modules #####################################
echo Deploying conatiners to Azure Stack Edge
echo This will take more than 10 min at normal connection speeds. Status can be checked on the Azure Stack Edge device
output=$(az iot edge set-modules --device-id $edgeDeviceId --hub-name $iotHubName --content $edgeDeployJson)
echo "installation complete"
echo solution scheduled to deploy on the $edgeDeviceId device, from the $iotHubName hub
| true |
5f5fbb6031fbe66fe2cc669541553fe8ac6a5f7a | Shell | Twenga/twgit | /tests/inc/testFunction.sh | UTF-8 | 751 | 3.40625 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
##
# Call a function of common.inc.sh after loading Shell config files.
# e.g.: /bin/bash testFunction.sh process_fetch x
#
# @author Geoffroy Aubry <geoffroy.aubry@hi-media.com>
#
# Parameters:
sCommonFunction="$1"; shift
# Pre config:
# Absolute path of the top-level directory of the current user repository:
TWGIT_USER_REPOSITORY_ROOT_DIR="$(git rev-parse --show-toplevel 2>/dev/null)"
# Includes:
. /tmp/conf-twgit.sh
. $TWGIT_INC_DIR/common.inc.sh
# Post config:
# TWGIT_USER_REPOSITORY_ROOT_DIR is absolute path of the top-level directory of the current user repository
TWGIT_FEATURES_SUBJECT_PATH="$TWGIT_USER_REPOSITORY_ROOT_DIR/$TWGIT_FEATURES_SUBJECT_FILENAME"
# Execution:
if [ ! -z "$sCommonFunction" ]; then
$sCommonFunction "$@"
fi
| true |
01bcdc14ed308170c3a938bc7023099e68bf0ea3 | Shell | kspine/openssl_rsa_largeFile | /build.sh | UTF-8 | 1,217 | 3.75 | 4 | [] | no_license | #!/bin/bash
# gcc is in /D/linaro/gcc-linaro-4.7-arm-linux-gnueabi/bin/arm-linux-gnueabi-gcc
#export TARGET_ROOTFS_DIR=$PROSENSE_ROOTFS_DIR
#export LINARO_TOOLSCHAIN_DIR="/D/linaro/gcc-linaro-4.7-arm-linux-gnueabi"
#unset TARGET_ROOTFS_DIR
#unset LINARO_TOOLSCHAIN_DIR
CMAKE_INSTALL_PREFIX=""
POSITIONAL=()
while [[ $# -gt 0 ]]
do
key="$1"
case $key in
clean)
do_clean=true
shift
;;
config)
do_config=true
shift
;;
build)
do_build=true
shift
;;
install)
do_install=true
shift
;;
prefix|--prefix)
CMAKE_INSTALL_PREFIX="$2"
shift
shift
;;
CROSS_COMPILE)
CMAKE_CROSS_COMPILE="-DCMAKE_TOOLCHAIN_FILE=../linaro.cmake"
shift
;;
*) # unknown option
POSITIONAL+=("$1") # save it in an array for later
shift # past argument
;;
esac
done
set -- "${POSITIONAL[@]}" # restore positional parameters
if [ $do_clean ]; then
rm -r build
mkdir build
fi
cd build
if [ $do_config ]; then
cmake $CMAKE_CROSS_COMPILE \
-DCMAKE_INSTALL_PREFIX=$CMAKE_INSTALL_PREFIX \
-DTRACE_ENABLED=ON \
..
fi
if [ $do_build ]; then
make
fi
if [ $do_install ]; then
make install
fi
| true |
7f5cc1b3739da376df17fe976bdc8809262d6ee6 | Shell | MBertolino/dotfiles-1 | /bin/wine_steam | UTF-8 | 800 | 3.515625 | 4 | [] | no_license | #!/bin/bash
export WINEDEBUG="${WINEDEBUG:--all}"
export WINEPREFIX="${WINEPREFIX:-$HOME/steam}"
die() { echo >&2 "$1"; exit 1; }
declare -A apps
apps=([tf2]=440
[portal]=400)
steam=("$WINEPREFIX/drive_c/Program Files"*/Steam/Steam.exe)
(( ${#steam[@]} == 0 )) && steam() { die "$0: steam not found"; } \
|| steam() { exec wine "${steam[0]}" -no-dwrite $*; }
(( $# == 0 )) && steam
case $1 in
install)
cd /tmp
[[ ! -f SteamInstall.msi ]] && curl -O http://cdn.steampowered.com/download/SteamInstall.msi
exec wine msiexec /i SteamInstall.msi
;;
kill) exec wineserver -k ;;
config) exec winecfg ;;
*)
local id="${apps[$1]}"
[[ -z "$id" ]] && die "$0: can't launch $1"
steam -applaunch "$id" -novid -dxlevel 9
;;
esac
| true |
747721ef93c0435292101de6d90e284b3b9a4baa | Shell | rohit-bindal/openroberta-lab | /Docker/openroberta/scripts/hotfix.sh | UTF-8 | 6,432 | 3.671875 | 4 | [
"MIT",
"GPL-3.0-only",
"Apache-2.0"
] | permissive | echo '
# --------------------------------------------------------------------------------------------------------------------- #
# hotfix script: merge a hotfix to master, merge to develop, setup develop for the next version. #
# For suggestions and errors contact reinhard.budde at iais.fraunhofer.de #
# #
# For deployments, two parameters are mandatory (both WITHOUT SNAPSHOT) #
# 1. the version number of the hotfix to be deployed at master (likely the version from develop without -SNAPSHOT) #
# 2. the next version to be set in develop (the script suffixes this with "-SNAPSHOT" internally) #
# E.g. ./hotfix.sh 2.4.1 2.5.0 #
# #
# ASSUMPTIONS: #
# - maven project, may be hierarchical (parent + modules). mvn is used and on the PATH! #
# - commits to master and develop can be done in a local repo first and then pushed to remote. #
# - the hotfix branch is a descendant of master. Probably only a few commits apart from master. #
# - the develop branch is a descendant of master. As many commits as needed apart from master. #
# - hotfix, master and develop branches are clean. #
# - hotfix is checked out #
# - THE HOTFIX COMMITS ARE WELL TESTED AND MVN CLEAN INSTALL HAS RUN SUCCESSFULLY #
# #
# ACTIONS: #
# - the hotfix branch is merged into master. #
# - In master the version to be deployed is set. A tag is defined with the name of the deployment version. #
# - the master branch is merged into develop. #
# - In develop the next SNAPSHOT version is set. #
# - develop is checked out #
# Two merge commits are enforced to document the fact, that a hotfix is deployed. #
# #
# After the hotfix script is run, check for sanity and then push develop and master to the remote IMMEDIATELY :-). #
# --------------------------------------------------------------------------------------------------------------------- #
'
# remember script directory, working directory and directory with (parent) pom.
SCRIPT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
CWD=$(pwd)
PARENT="$CWD"
echo "script directory is $SCRIPT, working directory is $CWD, parent pom in $PARENT"
source $SCRIPT/helper/__githelper.sh
# ACK PRECONDITIONS:
question 'did you update the element <openRobertaServer.history> in the parent pom?'
question 'is a database upgrade necessary? Did you change the class Upgrader.java and the SQL script "create-tables.sql" if needed?'
question 'is an update of versions for the EV3 robots in RobotEV3/pom.xml (e.g. <ev3runtime.v0.version>) needed?'
# CONSISTENCY CHECKS:
mavenOnPath # 1. mvn is on the path.
checkMavenBuildDir $PARENT # 2. pom.xml is found for maven builds.
checkGitProjectDir # 3. is a git project.
HOTFIX=$(git rev-parse --abbrev-ref HEAD) # 4. we are in a hotfix branch. This may neither be master nor develop.
if [ "$HOTFIX" = 'develop' -o "$HOTFIX" = 'master' ];
then
echo "this script doesn't run with master or develop checked out - exit 12"
exit 12
fi
echo "the name of the hotfix branch is $HOTFIX"
checkBranchClean master # 5. hotfix, develop and master are clean.
checkBranchClean develop
checkBranchClean $HOTFIX
parent2child master develop # 6. hotfix and develop are descendant of master.
parent2child master $HOTFIX
thisVersion=$1 # 7. the version parameter are set and valid
versionStringIsValid "$thisVersion" "deploy"
nextVersion=$2
versionStringIsValid "$nextVersion" "next"
nextVersionSnapshot="$nextVersion-SNAPSHOT"
# the workflow: merge hotfix into master; set the target version in master; set the target version in develop (temporarily);
# merge master into develop; set next version snapshot to develop
git checkout master
git merge --no-ff $HOTFIX
$SCRIPT/helper/_bumpVersion.sh "$PARENT" "$thisVersion" "hotfix version $thisVersion"
git tag "ORA-$thisVersion" -m "hotfix version $thisVersion"
git checkout develop
$SCRIPT/helper/_bumpVersion.sh "$PARENT" "$thisVersion" "preparing integration of hotfix (version $thisVersion)"
git merge --no-ff master
if [ $? -ne 0 ]
then
echo 'when merging master into develop a merge conflicht occurred. Merge is ABORTED - exit 12'
git merge --abort
echo 'Run the following commands to re-create the conflicts, solve and commit them:'
echo 'git merge --no-ff master'
echo '<<solve the conflicts>>'
echo 'git add --all; git commit -m "<<sensible message>>"'
echo "$SCRIPT/helper/_bumpVersion.sh $PARENT $nextVersionSnapshot \"next version is planned to be $nextVersion\""
exit 12
fi
$SCRIPT/helper/_bumpVersion.sh "$PARENT" "$nextVersionSnapshot" "next version is planned to be $nextVersion"
echo 'everything looks fine. You are in branch develop and should push both develop and master.'
echo "you may run $SCRIPT/helper/_pushMasterAndDevelop.sh"
echo 'later you may remove the hotfix branch' | true |
f6cd2c9b4bedb57f22695111fc07df48c5fad6b5 | Shell | zhoumingchun/generator-meshblu-connector | /core-auto-update | UTF-8 | 678 | 3.6875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
CONNECTOR=$1
if [ -z "$1" ]; then
echo "ERROR: Must include meshblu-connector"
echo "Usage: ./auto-update [meshblu-connector]"
exit 1
fi
echo "* running for connector: $CONNECTOR"
cd ~/Projects/Octoblu
if [ ! -f "./$CONNECTOR/package.json" ]; then
echo "* Cloning repo..."
git clone git@github.com:octoblu/$CONNECTOR.git
fi
cd $CONNECTOR
echo "* Pulling..."
git pull
COMMAND='yo meshblu-connector'
if [ -f './index.coffee' ]; then
echo "* it is coffee"
COMMAND="$COMMAND --coffee"
fi
echo "* Running the generator"
$COMMAND
echo "* Upgrade done."
echo "* Enabling travis..."
travis enable --no-interactive
echo "* Running npm install..."
npm install
| true |
f898a28847e4a64e1716981c7926496893c56db1 | Shell | PRIHLOP/1c-rac-utilite | /1c-rac-utilite.sh | UTF-8 | 5,608 | 3.890625 | 4 | [] | no_license | #!/bin/bash
source ./main.conf #connect main.conf file for define variables $DBUSER and $DBPASS
#request for root previlegies(need to reload apache web server when make 1c base web publication)
if [[ $EUID -ne 0 ]]; then
echo "Запуск требует root прав для управления системными сервисами (apache2 для веб публикации)."
echo "Подтвердите запрос root прав вводом пароля."
sudo ls > /dev/null
fi
CL_STRING=$($PROGRAMPATH/rac cluster list | grep 'cluster *')
CLUSTER=${CL_STRING:32}
#Comment variables defined in main.conf
#DBUSER= #PostgreSQL database user
#DBPASS= #PostgreSQL database password
OPTION=1 #Default option select to resolve error when script running with empty variable
#function for question with answer Y/N
question() {
while echo -n "$1 [Y/N] " && read answer || true && ! grep -e '^[YyNn]$' <<< $answer > /dev/null; do echo "Введите N(n) либо Y(y)! "; done
return $(tr 'YyNn' '0011' <<< $answer)
}
#function to resolve 1c db information
dbuid() {
read -p "Введите имя базы с которой желаете работать, или 0(ноль) для отмены: " DBNAME
DBINFO=$($PROGRAMPATH/rac infobase --cluster=$CLUSTER summary list | grep -w -B 1 $DBNAME | grep infobase)
DBUID=${DBINFO:11}
}
echo
#echo -n "Введите логин пользователя 1С с правами администратора: "
#read LOGIN
#echo -n "Введите пароль пользователя 1С: "
#read -s PASSWD
clear
echo
echo
echo "Информация о локальном кластере сервера 1С:"
echo
$PROGRAMPATH/rac cluster list
#echo "$CLUSTER"
until [ $OPTION -eq 0 ]
do
echo
echo "Выберите для продолжения: "
echo "1) Вывести список БД"
echo "2) Добавить БД"
echo "3) Удалить Информациюнную базу из клатера"
echo "∟3.1) Удалить Информационную базу из кластера вместе с данными"
echo "4) Выполнить веб публикацию серверной БД"
echo "5) Удалить веб публикацию серверной БД"
echo "6) Вывести список соединений"
echo "7) Вывести список сеансов"
echo "8) Завершить сеанс пользователя"
echo "0) Выход"
echo
read -p "Выбранная опция: " OPTION
echo
case "$OPTION" in
1) clear
$PROGRAMPATH/rac infobase --cluster=$CLUSTER summary list ;;
2) clear
read -p "Введите имя создаваемой базы: " DBNAME
$PROGRAMPATH/rac infobase create --cluster=$CLUSTER --name=$DBNAME --create-database --dbms=PostgreSQL --db-server=127.0.0.1 --db-name=$DBNAME --locale=ru --db-user=$DBUSER --db-pwd=$DBPASS --license-distribution=allow > /dev/null
echo
echo "БД успешно создана";;
3) clear
dbuid
while [ -z "$DBUID" ]
do
if [ $DBNAME != "0" ]; then
echo
echo "БД с таким именем не найдена! Повторите ввод."
dbuid
fi
done
if [ $DBNAME != "0" ]; then
question "Уверены что хотите удалить БД с именем $DBNAME ?" && $PROGRAMPATH/rac infobase drop --cluster=$CLUSTER --infobase=$DBUID --infobase-user=$INFOBASEADMIN --infobase-pwd=$INFOBASEPWD
fi
echo
echo "БД $DBNAME успешно удалена!";;
3.1) clear
dbuid
while [ -z "$DBUID" ]
do
if [ $DBNAME != "0" ]; then
echo
echo "БД с таким именем не найдена! Повторите ввод."
dbuid
fi
done
if [ $DBNAME != "0" ]; then
question "Уверены что хотите удалить БД с именем $DBNAME ?" && $PROGRAMPATH/rac infobase drop --cluster=$CLUSTER --infobase=$DBUID --drop-database --infobase-user=$INFOBASEADMIN --infobase-pwd=$INFOBASEPWD
fi
echo
echo "БД $DBNAME успешно удалена!";;
4) clear
dbuid
while [ -z "$DBUID" ]
do
if [ $DBNAME != "0" ]; then
echo
echo "БД с таким именем не найдена! Повторите ввод."
dbuid
fi
done
if [ $DBNAME != "0" ]; then
question "Желаете выполнить веб-публикацию базы с именем $DBNAME ?" && echo "$PROGRAMPATH/webinst -publish -apache22 -wsdir $DBNAME -dir '/var/www/$DBNAME' -connStr 'Srvr="localhost";Ref="$DBNAME";' -confPath /etc/apache2/apache2.conf" | bash
sudo service apache2 reload > /dev/null
fi
echo
echo "Публикация доступна по адресу http://$EXTIP/$DBNAME";;
5) clear
dbuid
while [ -z "$DBUID" ]
do
if [ $DBNAME != "0" ]; then
echo
echo "БД с таким именем не найдена! Повторите ввод."
dbuid
fi
done
if [ $DBNAME != "0" ]; then
question "Желаете УДАЛИТЬ веб-публикацию базы с именем $DBNAME ?" && echo "$PROGRAMPATH/webinst -delete -apache22 -wsdir $DBNAME -dir '/var/www/$DBNAME' -connStr 'Srvr="localhost";Ref="$DBNAME";' -confPath /etc/apache2/apache2.conf" | bash
sudo service apache2 reload > /dev/null
fi
echo;;
6) clear
echo
$PROGRAMPATH/rac connection --cluster=$CLUSTER list;;
7) clear
$PROGRAMPATH/rac session --cluster=$CLUSTER list;;
8) clear
read -p "Введите id сессии: " SESSION
$PROGRAMPATH/rac session --cluster=$CLUSTER terminate --session=$SESSION;;
0) exit ;;
*) echo
echo "Неверно выбрана опция.";;
esac
done
exit 0
| true |
91076717ba0de1cfd772c59582f26b2d82c68f47 | Shell | quyang19/Mytest | /shell/config.sh | UTF-8 | 185 | 2.84375 | 3 | [] | no_license | #! /bin/bash
#local delimeter=","
echo "$1" | awk -F "," '{for(i=1; i<=NF; i++)print "\""$i"\"";}' | sed 's/1/\n/' # awk -F "," '{for(i=1; i<=NF; i++)print "\""$i"\"";}' $1
| true |
dad927b5a65826cfd1a94eea2516fa17167aab25 | Shell | Onturenio/BiasCor | /JOBS/correlations.sh | UTF-8 | 2,296 | 2.890625 | 3 | [] | no_license | ################################################################################
# CORRELATIONS BY REGIONS
################################################################################
seas="DJF JJA MAM SON"
dataset="OBS CESM ERAIN CESMDEBIAS"
date="1979-01-01,2005-12-31"
cdo -splitseas -seldate,$date ../DATA/ERAIN-RAIN-daily-CH.nc ERAIN-
cdo -splitseas -seldate,$date ../DATA/OBS-RAIN-daily-CH.nc OBS-
cdo -splitseas -seldate,$date ../DATA/CESMA-RAIN-daily-CH.nc CESM-
FNAMEOUT="../REGIONALIZATION/COR.ps"
gmt makecpt -Cjet -T0/1/0.1 -I > pepe.cpt
gmt psxy -JX2c -Xf2c -Yf24c -R0/1/0/1 -T -K > $FNAMEOUT
for sea in DJF MAM JJA SON; do
gmt psxy -J -R -T -Y-5c -Xf3c -K -O >> $FNAMEOUT
for data in OBS ERAIN CESM; do
file=$(find ../REGIONALIZATION/SUMMARY-$data -name "Reg*$sea*HR.nc" )
nregions=$(basename $file | cut -c 13-13 )
echo $data $sea $nregions
for i in $(seq 1 $nregions); do
cdo -eqc,$i $file mask.nc
cdo mul ../DATA/OBS/MASK-OBS.nc mask.nc mask2.nc
cdo -fldmean -ifthen mask2.nc $data-$sea.nc $data-$i.nc
done
rm -rf COR-$data-$sea.dat
for i in $(seq 1 $nregions); do
for j in $(seq 1 $nregions); do
cor=$(cdo output -timcor $data-$i.nc $data-$j.nc)
echo $i $j $cor >> COR-$data-$sea.dat
done
done
gmt xyz2grd -GCOR-$data-$sea.grd -R1/$nregions/1/$nregions -I1 COR-$data-$sea.dat
gmt psbasemap -JX4c -R0.5/$nregions.5/0.5/$nregions.5 -BSWne+t"" -Bx1a -By1a -K -O >> $FNAMEOUT
gmt grdimage -R -J COR-$data-$sea.grd -Cpepe.cpt -O -K >> $FNAMEOUT
gmt psxy -J -R -T -X5c -K -O >> $FNAMEOUT
done
done
gmt psxy -Xf1c -Yf1c -JX17c/25c -R0/17/0/25 -T -K -O >> $FNAMEOUT
echo 4 23 OBS | gmt pstext -J -R -K -O -F+f22 >> $FNAMEOUT
echo 9 23 WRF-ERAIN | gmt pstext -J -R -K -O -F+f22 >> $FNAMEOUT
echo 14 23 WRF-CESM | gmt pstext -J -R -K -O -F+f22 >> $FNAMEOUT
echo 1 20 DJF | gmt pstext -J -R -K -O -F+f22+a90 >> $FNAMEOUT
echo 1 15 MAM | gmt pstext -J -R -K -O -F+f22+a90 >> $FNAMEOUT
echo 1 10 JJA | gmt pstext -J -R -K -O -F+f22+a90 >> $FNAMEOUT
echo 1 5 SON | gmt pstext -J -R -K -O -F+f22+a90 >> $FNAMEOUT
gmt psscale -Cpepe.cpt -DjBC+w12c/.5c+h -R -J -K -O -L0.2 -B+l"cor">> $FNAMEOUT
gmt psxy -J -R -T -O >> $FNAMEOUT
gmt psconvert -Tf -A $FNAMEOUT
rm $FNAMEOUT
| true |
692798f34344ea21f9079843ba4058f5135f1c6c | Shell | opsbay/cloud-deployment-scripts | /bin/install-jq.sh | UTF-8 | 510 | 3.515625 | 4 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/usr/bin/env bash
set -uo pipefail
os=$(uname -s)
if ! which jq> /dev/null; then
case $os in
Darwin)
brew install jq
;;
Linux)
# Assumes CentOS 7
if ! rpm -q epel-release; then
sudo yum -y install epel-release
fi
if ! rpm -q jq; then
sudo yum -y install jq
fi
;;
*)
echo "ERROR: Unknown OS $os"
exit 1
;;
esac
fi | true |
0f1e603c7ee452825076de245bd4701f37e984b0 | Shell | gslin/rtorrent-docker | /entrypoint.sh | UTF-8 | 638 | 3.234375 | 3 | [] | no_license | #!/bin/bash
BT_PORT=${PORT:-6991}
if [[ "x${USER_GID}" = x ]]; then
echo "USER_GID is empty."
exit 1
else
groupadd -f -g "${USER_GID}" rtorrent
fi
if [[ "x${USER_UID}" = x ]]; then
echo "USER_UID is empty."
exit 1
else
useradd -d /home/rtorrent -g "${USER_GID}" -m -s /bin/bash -u "${USER_UID}" rtorrent
fi
service dnsmasq start
echo "nameserver 127.0.0.1" > /etc/resolv.conf
cd ~rtorrent
sed -e "s/BT_PORT/${BT_PORT}/g" .rtorrent.rc.template > .rtorrent.rc
chmod 644 .rtorrent.rc
mkdir -p session
chown rtorrent:rtorrent session
sudo -i -u rtorrent -- bash -c "cd /srv/rtorrent; LANG=en_US.UTF-8 rtorrent"
| true |
2872fda506fe65b4858be319901f0f1c54cab033 | Shell | SpiralUp/postgresql-container | /manifest.sh | UTF-8 | 2,902 | 2.96875 | 3 | [
"Apache-2.0"
] | permissive | # Manifest for image directories creation
# every dest path will be prefixed by $DESTDIR/$version
DESTDIR='' # optional, defaults to $PWD
# Files containing distgen directives
DISTGEN_RULES="
src=src/cccp.yml
dest=cccp.yml;
src=src/root/usr/share/container-scripts/postgresql/README.md
dest=root/usr/share/container-scripts/postgresql/README.md;
src=src/root/usr/share/container-scripts/postgresql/common.sh
dest=root/usr/share/container-scripts/postgresql/common.sh;
src=src/root/usr/bin/run-postgresql-slave
dest=root/usr/bin/run-postgresql-slave
mode=0755;
src=src/root/usr/share/container-scripts/postgresql/openshift-custom-recovery.conf.template
dest=root/usr/share/container-scripts/postgresql/openshift-custom-recovery.conf.template;
"
# Files containing distgen directives, which are used for each
# (distro, version) combination not excluded in multispec
DISTGEN_MULTI_RULES="
src=src/Dockerfile
dest=Dockerfile;
src=src/Dockerfile
dest=Dockerfile.rhel7;
src=src/Dockerfile
dest=Dockerfile.rhel8;
src=src/Dockerfile.fedora
dest=Dockerfile.fedora;
"
# Symbolic links
SYMLINK_RULES="
link_target=root/usr/share/container-scripts/postgresql/README.md
link_name=README.md;
link_target=../test
link_name=test;
link_target=/usr/bin/run-postgresql
link_name=s2i/bin/run
"
# Files to copy
COPY_RULES="
src=src/root/usr/libexec/fix-permissions
dest=root/usr/libexec/fix-permissions
mode=0755;
src=src/content_sets.yml
dest=content_sets.yml;
src=src/root/usr/share/container-scripts/postgresql/openshift-custom-postgresql-replication.conf.template
dest=root/usr/share/container-scripts/postgresql/openshift-custom-postgresql-replication.conf.template;
src=src/root/usr/share/container-scripts/postgresql/openshift-custom-postgresql.conf.template
dest=root/usr/share/container-scripts/postgresql/openshift-custom-postgresql.conf.template;
src=src/root/usr/share/container-scripts/postgresql/scl_enable
dest=root/usr/share/container-scripts/postgresql/scl_enable;
src=src/root/usr/bin/run-postgresql
dest=root/usr/bin/run-postgresql
mode=0755;
src=src/root/usr/bin/run-postgresql-master
dest=root/usr/bin/run-postgresql-master
mode=0755;
src=src/root/usr/bin/container-entrypoint
dest=root/usr/bin/container-entrypoint
mode=0755;
src=src/root/usr/bin/usage
dest=root/usr/bin/usage
mode=0755;
src=src/root/usr/libexec/check-container
dest=root/usr/libexec/check-container
mode=0755;
src=src/root/usr/share/container-scripts/postgresql/start/set_passwords.sh
dest=root/usr/share/container-scripts/postgresql/start/set_passwords.sh;
src=src/s2i/bin/assemble
dest=s2i/bin/assemble
mode=0755;
src=src/s2i/bin/usage
dest=s2i/bin/usage
mode=0755;
"
| true |
565356e54b27079629cf1d1a07ca8d1ef1e0641d | Shell | wusui/scripts2.4asynch | /setup_client_and_rgw.sh | UTF-8 | 335 | 2.765625 | 3 | [] | no_license | #! /bin/bash -f
#
# Called by user. This is a wrapper for the remote code that will
# use set up the client and rgw sites.
#
source ./globaldefs.sh
./paranoia.sh $*
rgw_host=${1}
octo_name=${octoname:-'wusui'}
scp ./ansible_client_rgw.sh ${octo_name}@$rgw_host:/tmp
ssh -t -A ${octo_name}@$rgw_host sudo /tmp/ansible_client_rgw.sh $*
| true |
f59b639efad853a2d3513114b1e8c14c6efa9de5 | Shell | liuzhennn/Ryu_SDN_Controller | /ryu/app/experiments/compare.sh | UTF-8 | 2,542 | 3.21875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Copyright (C) 2016 Huang MaChi at Chongqing University
# of Posts and Telecommunications, China.
k=$1
#cpu=$2
flow_num=$2 # number of iperf flows per host.
#duration=$4
out_dir=$3
# Exit on any failure.
set -e
# Check for uninitialized variables.
set -o nounset
ctrlc() {
killall python
killall -9 ryu-manager
mn -c
exit
}
trap ctrlc INT
# Traffic patterns.
# "stag_0.5_0.3" means 50% under the same Edge switch,
# 30% between different Edge switches in the same Pod,
# and 20% between different Pods.
# "random" means choosing the iperf server randomly.
# Change it if needed.
#traffics="random1 random2 random3 random4 random5 random6 random7 random8 stag1_0.1_0.2 stag2_0.1_0.2 stag3_0.1_0.2 stag4_0.1_0.2 stag5_0.1_0.2 stag6_0.1_0.2 stag7_0.1_0.2 stag8_0.1_0.2 stag1_0.2_0.3 stag2_0.2_0.3 stag3_0.2_0.3 stag4_0.2_0.3 stag5_0.2_0.3 stag6_0.2_0.3 stag7_0.2_0.3 stag8_0.2_0.3 stag1_0.3_0.3 stag2_0.3_0.3 stag3_0.3_0.3 stag4_0.3_0.3 stag5_0.3_0.3 stag6_0.3_0.3 stag7_0.3_0.3 stag8_0.3_0.3 stag1_0.4_0.3 stag2_0.4_0.3 stag3_0.4_0.3 stag4_0.4_0.3 stag5_0.4_0.3 stag6_0.4_0.3 stag7_0.4_0.3 stag8_0.4_0.3 stag1_0.5_0.3 stag2_0.5_0.3 stag3_0.5_0.3 stag4_0.5_0.3 stag5_0.5_0.3 stag6_0.5_0.3 stag7_0.5_0.3 stag8_0.5_0.3 stag1_0.6_0.2 stag2_0.6_0.2 stag3_0.6_0.2 stag4_0.6_0.2 stag5_0.6_0.2 stag6_0.6_0.2 stag7_0.6_0.2 stag8_0.6_0.2 stag1_0.7_0.2 stag2_0.7_0.2 stag3_0.7_0.2 stag4_0.7_0.2 stag5_0.7_0.2 stag6_0.7_0.2 stag7_0.7_0.2 stag8_0.7_0.2 stag1_0.8_0.1 stag2_0.8_0.1 stag3_0.8_0.1 stag4_0.8_0.1 stag5_0.8_0.1 stag6_0.8_0.1 stag7_0.8_0.1 stag8_0.8_0.1"
traffics="random"
# Run experiments.
for traffic in $traffics
do
# Create iperf peers.
# sudo python ./create_peers.py --k $k --traffic $traffic --fnum $flowsPerHost
# sleep 1
# ECMP
dir=$out_dir/$flowsPerHost/$traffic/ECMP
mkdir -p $dir
mn -c
sudo python ./ECMP/fattree.py --k $k --duration $duration --dir $dir --cpu $cpu
#PureSDN
dir=$out_dir/$flowsPerHost/$traffic/PureSDN
mkdir -p $dir
mn -c
sudo python ./PureSDN/SegmentECMPfattree4.py --k $k --duration $duration --dir $dir --cpu $cpu
#Hedera
dir=$out_dir/$flowsPerHost/$traffic/Hedera
mkdir -p $dir
mn -c
sudo python ./Hedera/fattree4.py --k $k --duration $duration --dir $dir --cpu $cpu
# NonBlocking
# dir=$out_dir/$flowsPerHost/$traffic/NonBlocking
# mkdir -p $dir
# mn -c
# sudo python ./NonBlocking/NonBlocking.py --k $k --duration $duration --dir $dir --cpu $cpu
done
# Plot results.
sudo python ./plot_results_Chinese.py --k $k --duration $duration --dir $out_dir --fnum $flowsPerHost
| true |
d56684bf6b84657a54e8bd0ebf8a9e1804219bec | Shell | dockerian/go-coding | /tools/check_tests.sh | UTF-8 | 3,765 | 4.15625 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
########################################################################
# Check go test log
#
# Exit code:
# 1 - fail
# 0 - pass
#
########################################################################
set -e +x
script_file="${BASH_SOURCE[0]##*/}"
script_base="$( cd "$( echo "${BASH_SOURCE[0]%/*}/.." )" && pwd )"
script_path="${script_base}/tools/${script_file}"
PROJECT_DIR="${PROJECT_DIR:-${script_base}}"
# main entrance
function main() {
TEST_LOGS="${TEST_LOGS:-tests.log}"
DEBUG="${DEBUG:-${VERBOSE:-${TEST_VERBOSE}}}"
COUNT_FAIL=0
check_depends
cd -P "${PROJECT_DIR}" && echo "PWD: ${PWD:-$(pwd)}"
if [[ ! -f "${TEST_LOGS}" ]]; then
echo "Cannot find ${TEST_LOGS}"
return
fi
if [[ "${DEBUG}" =~ (0|disable|off|false|no) ]]; then
DEBUG="false"
fi
# NOTE: use 'grep --text' (or 'grep -a', processing as text file)
# to avoid from 'grep' error: Binary file (standard input) matches
# see: man page at http://ss64.com/bash/grep.html
#
COUNT_COMP="$(cat "${TEST_LOGS}"|grep -a "\: cannot find package"|wc -l|xargs echo)"
COUNT_FAIL="$(cat "${TEST_LOGS}"|grep -a "\--- FAIL:\|^FAIL"|wc -l|xargs echo)"
COUNT_PASS="$(cat "${TEST_LOGS}"|grep -a "\--- PASS:"|wc -l|xargs echo)"
COUNT_SKIP="$(cat "${TEST_LOGS}"|grep -a "\--- SKIP:"|wc -l|xargs echo)"
if [[ "${COUNT_COMP}" != "0" ]]; then
printf "\n*** Build errors : %2d ***\n" ${COUNT_COMP}
echo "cannot find:"
(cat "${TEST_LOGS}"|grep -a ": cannot find package"|awk '{print $5}'|sort|uniq)
exit ${COUNT_COMP}
fi
if [[ "${COUNT_FAIL}${COUNT_PASS}${COUNT_SKIP}" != "000" ]]; then
echo ""
echo "============================= TEST SUMMARY ============================"
if [[ "${COUNT_PASS}" != "" ]] || [[ "${DEBUG}" != "false" ]]; then
printf "\n*** Passed tests : %2d ***\n" ${COUNT_PASS}
(cat "${TEST_LOGS}" | grep -e "\--- PASS:" | cut -d':' -f2 | sort)
fi
if [[ "${COUNT_SKIP}" != "" ]] || [[ "${DEBUG}" != "false" ]]; then
printf "\n*** Skipped tests : %2d ***\n" ${COUNT_SKIP}
(cat "${TEST_LOGS}" | grep -e "\--- SKIP:" | cut -d':' -f2 | sort)
fi
if [[ "${COUNT_FAIL}" != "" ]] || [[ "${DEBUG}" != "false" ]]; then
printf "\n*** Failed tests : %2d ***\n" ${COUNT_FAIL}
(cat "${TEST_LOGS}" | grep -e "\--- FAIL:\|^FAIL" | cut -d':' -f2 | sort)
fi
echo ""
echo "======================================================================="
elif [[ "${DEBUG}" == "" ]]; then
echo "No failed test (TEST_VERBOSE is unset)"
fi
echo ""
# The exit code is 0 if there are no test failures.
echo "exit code: ${COUNT_FAIL} (see ${TEST_LOGS})"
exit ${COUNT_FAIL}
}
# check_depends(): verifies preset environment variables exist
function check_depends() {
local tool_set="cat cut grep sort uniq wc xargs"
set +u
echo "......................................................................."
echo "Checking dependencies: ${tool_set}"
for tool in ${tool_set}; do
if ! [[ -x "$(which ${tool})" ]]; then
log_error "Cannot find command '${tool}'"
fi
done
set -u
}
# log_error() func: exits with non-zero code on error unless $2 specified
function log_error() {
log_trace "$1" "ERROR" $2
}
# log_trace() func: print message at level of INFO, DEBUG, WARNING, or ERROR
function log_trace() {
local err_text="${1:-Here}"
local err_name="${2:-INFO}"
local err_code="${3:-1}"
if [[ "${err_name}" == "ERROR" ]] || [[ "${err_name}" == "FATAL" ]]; then
HAS_ERROR="true"
echo -e "\n${err_name}: ${err_text}" >&2
exit ${err_code}
else
echo -e "\n${err_name}: ${err_text}"
fi
}
# main entrance, preventing from source
[[ $0 != "${BASH_SOURCE}" ]] || main ${ARGS}
| true |
c4b53b19c58ad4176f27a565b7da5d5ef357fc23 | Shell | saolsen/hokusai | /distribute.sh | UTF-8 | 491 | 2.5625 | 3 | [
"MIT"
] | permissive | #! /bin/bash
set -e
# Tag release
git fetch upstream --tags
git tag v$(cat hokusai/VERSION.txt)
git push upstream --tags
# Pip package
python setup.py sdist bdist_wheel
twine upload dist/*
rm -rf build/ dist/ hokusai.egg-info/
# Docker image
docker login
docker build -t hokusai .
docker tag hokusai:latest artsy/hokusai:$(cat hokusai/VERSION.txt)
docker push artsy/hokusai:$(cat hokusai/VERSION.txt)
docker tag hokusai:latest artsy/hokusai:latest
docker push artsy/hokusai:latest
| true |
47d3f3370cdeb629ea03f92fe049524ad189f14e | Shell | kholdarbekov/Fantasy_Football | /commands/restore_postgresql_db.sh | UTF-8 | 1,251 | 3.265625 | 3 | [] | no_license | #!/usr/bin/env bash
SECONDS=0
PROJECT_PATH=/home/soccer
REPOSITORY_PATH=${PROJECT_PATH}
LATEST_BACKUP_PATH=${PROJECT_PATH}/db_backups/latest.backup
export DJANGO_SETTINGS_MODULE=soccer.settings.production
cd "${PROJECT_PATH}" || exit
source env/bin/activate
cd "${REPOSITORY_PATH}" || exit
DATABASE=$(echo "from django.conf import settings; print(settings.DATABASES['default']['NAME'])" | python manage.py shell -i python)
USER=$(echo "from django.conf import settings; print(settings.DATABASES['default']['USER'])" | python manage.py shell -i python)
PASSWORD=$(echo "from django.conf import settings; print(settings.DATABASES['default']['PASSWORD'])" | python manage.py shell -i python)
echo "=== Restoring DB from a Backup ==="
echo "- Recreate the database"
psql --dbname=$DATABASE --command='SELECT
pg_terminate_backend(pg_stat_activity.pid) FROM pg_stat_activity
WHERE datname = current_database() AND pid <> pg_backend_pid();'
dropdb $DATABASE
createdb --username="$USER" "$DATABASE"
echo "- Fill the database with schema and data"
zcat "${LATEST_BACKUP_PATH}.gz" | python manage.py dbshell
duration=$SECONDS
echo "------------------------------------------"
echo "The operation took $((duration / 60)) minutes and $((duration % 60)) seconds." | true |
141082754b5213c130c59333c0aca2a1af5c567e | Shell | discoposse/devops-2.0 | /provisioners/scripts/common/install_oracle_java_jdk_13.sh | UTF-8 | 1,019 | 3.25 | 3 | [
"MIT"
] | permissive | #!/bin/sh -eux
# install java se 13 development kit by oracle.
# install java se 13 development kit. --------------------------------------------------------------
jdkhome="jdk13"
jdkbuild="13.0.1+9"
jdkhash="cec27d702aa74d5a8630c65ae61e4305"
jdkfolder="jdk-13.0.1"
jdkbinary="${jdkfolder}_linux-x64_bin.tar.gz"
# create java home parent folder.
mkdir -p /usr/local/java
cd /usr/local/java
# download jdk 13 binary from oracle otn.
wget --no-verbose --no-check-certificate --no-cookies --header "Cookie: oraclelicense=accept-securebackup-cookie" https://download.oracle.com/otn-pub/java/jdk/${jdkbuild}/${jdkhash}/${jdkbinary}
# extract jdk 13 binary and create softlink to 'jdk13'.
rm -f ${jdkhome}
tar -zxvf ${jdkbinary} --no-same-owner --no-overwrite-dir
chown -R root:root ./${jdkfolder}
ln -s ${jdkfolder} ${jdkhome}
rm -f ${jdkbinary}
# set jdk 13 home environment variables.
JAVA_HOME=/usr/local/java/${jdkhome}
export JAVA_HOME
PATH=${JAVA_HOME}/bin:$PATH
export PATH
# verify installation.
java -version
| true |
55e4b4236533610e78415c06fb766d86dc041654 | Shell | nikhil218/Shell-programming | /conditional-statement/comparenum.sh | UTF-8 | 110 | 3.34375 | 3 | [] | no_license | #!/bin/bash -x
a=10
b=20
if [ $a -gt $b ]
then
echo $a is greater than $b
else
echo $a is less than $b
fi
| true |
dac0db05b1e998a652f55d68d55a903d6dad2b6b | Shell | GaryHughes/Exchange | /part_2/shell_solution/float_utils.sh | UTF-8 | 1,614 | 4.03125 | 4 | [
"MIT"
] | permissive | #! /bin/sh
# Some utility functions to compare / manipulate flosingpoint numbers
# without requiring `expr` or other sub-shells.
split_int () {
# split a floating point number $1 to integers and millionths
# use the global variables intpart and fracpart to return the split values
# to save `` and a sub-process
# wish there was equivalent of index() in BASH
case "$1" in
.*) intpart=0; fracpart=${1:1};;
?.*) intpart=${1:0:1}; fracpart=${1:2};;
??.*) intpart=${1:0:2}; fracpart=${1:3};;
???.*) intpart=${1:0:3}; fracpart=${1:4};;
????.*) intpart=${1:0:4}; fracpart=${1:5};;
?????.*) intpart=${1:0:5}; fracpart=${1:6};;
*.*) echo "Cant parse real $1" 1>&2; exit 1;;
[0-9]) intpart=$1; fracpart=0;;
[0-9]*[0-9]) intpart=$1; fracpart=0;;
*) echo "Cant parse real $1" 1>&2; exit 1;;
esac
[ ${#fracpart} -gt 6 ] && {
echo "Frac part of $1 too long" 1>&2
exit 1
}
# need to convert to millionths then strip the leading 0
fracpart=${fracpart}000000
fracpart=${fracpart:0:6};
while case "$fracpart" in
0*[0-9]) fracpart=${fracpart:1};; # strip leading zero
*) false;;
esac ; do
: # nothing
done
}
# A shell function to compare two floating ppoint numbers for lessthan,
# done entirely in BASH builtin so no subprocess needed
less_than () {
split_int $1
aint=$intpart
afrac=$fracpart
split_int $2
# return code of the function is the return code of the last command
[ $aint -lt $intpart -o $aint -eq $intpart -a $afrac -lt $fracpart ]
}
| true |
22fce159a4da8abc5c4119a844a5a685aeb934a7 | Shell | toniz4/dotfiles | /.local/bin/barscripts/battery | UTF-8 | 581 | 3.140625 | 3 | [] | no_license | #!/bin/sh
capacity=$(cat /sys/class/power_supply/BAT0/capacity) || break
stats=$(sed "s/[Dd]ischarging//;s/[Nn]ot charging//;s/[Cc]harging//;s/[Uu]nknown//;s/[Ff]ull//" /sys/class/power_supply/BAT0/status)
if [ $capacity -gt 70 ]; then
icon=""
elif [ $capacity -le 30 ]; then
icon=""
else
icon=""
fi
color="^c#b5bd68^"
# If it is discharging and 25% or less, we will add a as a warning.
[ "$capacity" -le 25 ] && [ "$stats" = "" ] && color="^c#cc6666^"
printf "%s%s %s%s%%%s%s\n" "$color" "$icon" "$warn" "$capacity" "$stats" "^d^"
unset warn
| true |
51924d2a22a613b92aa24a2ad6c287369db90163 | Shell | liufeiwu/open-ocr | /kubernetes/bootstrap.sh | UTF-8 | 3,919 | 4.03125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/sh -
#
# bootstrap
# automate the deployment of Open-OCR on a Kubernetes Cluster
# based on https://github.com/liufeiwu/open-ocr/tree/master/kubernetes
#
# Copyright (c) 2019 diego casati <dcasati@dcasati.net>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# usage: -c - clean up the demo.
cleanup() {
echo "cleaning up the Kubernetes environment"
kubectl delete -f \
pods/rabbitmq.yaml,services/rabbitmq.yml,replication-controllers/open-ocr-worker.yaml,pods/open_ocr_httpd.yml,services/open_ocr_httpd.yml
kubectl delete secrets rabbit-mq-password
exit 0
}
# show usage and exit.
__usage="usage: `basename $0` [-cit]
Options:
-c clean up the demo.
-i install the demo.
-t run a cURL test against the API.
"
usage() {
echo "$__usage"
exit 1
}
# Attempt to create a random password for RabitMQ.
create_rabbitmq_secret(){
echo "Creating a random RabbitMQ password"
echo "You will want to replace the YOUR_RABBITMQ_PASS below with something more secure.\n"
date | md5sum | awk '{print $1}' > ./password
kubectl create secret generic rabbit-mq-password --from-file=./password
}
# Clone the repo if not cloned.
first_run() {
local LOCAL_REPO="open-ocr"
if [ -d $LOCAL_REPO ]; then
echo Clone OpenOCR repo
git clone https://github.com/liufeiwu/open-ocr.git
fi
}
# Launch RabbitMQ
launch_rabbitmq() {
local RABBITMQ_STATUS
kubectl create -f pods/rabbitmq.yaml
kubectl create -f services/rabbitmq.yml
printf "%s" "waiting until RabitMQ is ready"
RABBITMQ_STATUS=`kubectl get po -o=jsonpath='{.items[?(@.metadata.labels.name=="rabbitmq")].status.phase}'`
while [ $RABBITMQ_STATUS != "Running" ]; do
RABBITMQ_STATUS=`kubectl get po -o=jsonpath='{.items[?(@.metadata.labels.name=="rabbitmq")].status.phase}'`
printf "%s" "."
sleep 2
done
echo "RabbitMQ is ready."
}
# Launch REST API Server
launch_rest_api(){
echo "creating the REST API Server\n"
kubectl create -f pods/open_ocr_httpd.yml
kubectl create -f services/open_ocr_httpd.yml
}
# Launch OCR Worker
launch_ocr_worker(){
echo "creating the Open-OCR workers\n"
kubectl create -f replication-controllers/open-ocr-worker.yaml
}
# usage: -t - checks if the LoadBalancer IP address is up and running
test_rest_api() {
LOADBALANCER_IP=`kubectl get service -o jsonpath='{.items[?(@.metadata.name=="open-ocr-httpd-service")].status.loadBalancer.ingress[].ip}'`
echo "running curl against the REST API\n"
curl -X POST -H "Content-Type: application/json" -d '{"img_url":"http://bit.ly/ocrimage","engine":"tesseract"}' http://$LOADBALANCER_IP/ocr
# bail out if curl can't get to the REST API server
if [ $? > 0 ]; then
exit 1
fi
exit 0
}
# usage: -i - installs the entire demo
run_install() {
first_run
create_rabbitmq_secret
launch_rabbitmq
launch_rest_api
launch_ocr_worker
test_rest_api
exit 0
}
while getopts "cit" opt; do
case $opt in
c) cleanup
;;
i) run_install
;;
t) test_rest_api
;;
*) usage
exit 1
;;
esac
done
shift $(( $OPTIND - 1 ))
if [ $OPTIND = 1 ]; then
usage
exit 0
fi | true |
d9fa034cefb21a6dd0a26caf0e3babe3b3e6c195 | Shell | Jolium/vaio-grub | /create_grub.sh | UTF-8 | 389 | 2.515625 | 3 | [] | no_license | #!/bin/bash
# Workaround for some Sony Vaio's models
# Just recreates the grub to support dual boot
# Run: sudo ./recreate_grub.sh
mkdir -p /boot/efi/EFI/Windows10 \
&& cp -r /boot/efi/EFI/Microsoft/* /boot/efi/EFI/Windows10 \
&& echo Windows10 directory created... \
&& cp /boot/efi/EFI/MX19/grubx64.efi /boot/efi/EFI/Microsoft/Boot/bootmgfw.efi \
&& echo bootmgfw.efi file recreated
| true |
82ede5a7f7b5102747663508f97d138412473ec5 | Shell | modeling-pipeline/RaptorX_Property_Fast | /oneline_command.sh | UTF-8 | 15,971 | 3.609375 | 4 | [] | no_license | #!/bin/bash
if [ $# -lt 2 ]
then
echo "Usage: ./protprop_server.sh <input_fasta> <out_dir> [cpu_number] [PROF_or_NOT] "
exit
fi
CurRoot="$(pwd)"
# ------- CPU number ------ #
BindX_CPU=1
if [ $# -gt 2 ]
then
BindX_CPU=$3
fi
# ------- use profile or not ---- #
PROF_or_NOT=1
if [ $# -gt 3 ]
then
PROF_or_NOT=$4
fi
# ------ part 0 ------ # related path
fulnam=`basename $1`
relnam=${fulnam%.*}
OUT_DIR=$2
# ------- run ProtProp Server --------- #
Server_Root=~/RaptorX_Property_Fast
util/Verify_FASTA $1 /tmp/$relnam.fasta
OUT=$?
if [ $OUT -ne 0 ]
then
echo "failed in util/Verify_FASTA $1 /tmp/$relnam.fasta"
exit 1
fi
# ---- check if TGT file exist ----- #
has_TGT=0
if [ -f "$relnam.tgt" ]
then
has_TGT=1
cp $relnam.tgt /tmp/
fi
# ---- running ---------#
cd $Server_Root
#-> 0. create 'tmp' folder
rm -rf $OUT_DIR/$relnam
mkdir -p $OUT_DIR/$relnam
tmp=/tmp/TMP"_"$relnam"_"$RANDOM
mkdir -p $tmp/
#------- start -------#
util=bin
program_suc=1
for ((i=0;i<1;i++))
do
if [ $PROF_or_NOT -eq 1 ] #-> use profile
then
#-> 1. build TGT file
if [ $has_TGT -eq 0 ]
then
#--> Fast_TGT
echo "Running Fast_TGT to generate TGT file for sequence $relnam"
./Fast_TGT.sh -i /tmp/$relnam.fasta -c $BindX_CPU -o $tmp 1> $tmp/$relnam.tgt_log1 2> $tmp/$relnam.tgt_log2
OUT=$?
if [ $OUT -ne 0 ]
then
echo "Failed in generating TGT file for sequence $relnam"
program_suc=0
break
fi
else
mv /tmp/$relnam.tgt $tmp/$relnam.tgt
fi
util/Verify_FASTA /tmp/$relnam.fasta $tmp/$relnam.seq
cp /tmp/$relnam.fasta $tmp/$relnam.fasta_raw
#-> 1.1 TGT Update
echo "Running TGT_Update to upgrade TGT file for sequence $relnam"
tmptmp=/tmp/TMPTMP"_"$relnam"_"$RANDOM
mkdir -p $tmptmp
mkdir -p $tmp/update/
./TGT_Update -i $tmp/$relnam.tgt -o $tmp/update/$relnam.tgt -t $tmptmp
rm -r $tmptmp
#-> 2. generate SS3/SS8 file
#--> SS8
$util/DeepCNF_SS_Con -t $tmp/$relnam.tgt -s 0 > $tmp/$relnam.ss8
OUT=$?
if [ $OUT -ne 0 ]
then
echo "Failed in generating SS8 file for sequence $relnam"
program_suc=0
break
fi
#--> SS3
$util/DeepCNF_SS_Con -t $tmp/$relnam.tgt -s 1 > $tmp/$relnam.ss3
OUT=$?
if [ $OUT -ne 0 ]
then
echo "Failed in generating SS3 file for sequence $relnam"
program_suc=0
break
fi
#-> 3. generate ACC/CN file
#--> ACC
$util/DeepCNF_SAS_Con -t $tmp/update/$relnam.tgt -m 0 > $tmp/$relnam.acc
OUT=$?
if [ $OUT -ne 0 ]
then
echo "Failed in generating ACC file for sequence $relnam"
program_suc=0
break
fi
#--> CN
$util/AcconPred $tmp/$relnam.tgt 0 > $tmp/$relnam.cn
OUT=$?
if [ $OUT -ne 0 ]
then
echo "Failed in generating CN file for sequence $relnam"
program_suc=0
break
fi
#-> 4. generate DISO file
./AUCpreD.sh -i $tmp/$relnam.tgt -o $tmp/$relnam.diso
OUT=$?
if [ $OUT -ne 0 ]
then
echo "Failed in generating DISO file for sequence $relnam"
program_suc=0
break
fi
else #-> not use profile
#-> 1. generate feature file
util/Verify_FASTA /tmp/$relnam.fasta $tmp/$relnam.seq
cp /tmp/$relnam.fasta $tmp/$relnam.fasta_raw
#--> feat_file
./Seq_Feat.sh -i $tmp/$relnam.seq -o $tmp/
OUT=$?
if [ $OUT -ne 0 ]
then
echo "Failed in generating noprof_feat file for sequence $relnam"
program_suc=0
break
fi
#--> pred_file for SS8/SS3
$util/DeepCNF_Pred -i $tmp/$relnam.feat_noprof -w 5,5,5,5,5 -d 100,100,100,100,100 -s 8 -l 87 -m parameters/ss8_noprof_model > $tmp/$relnam.ss8_noprof 2> $tmp/$relnam.noprf_log3
OUT=$?
if [ $OUT -ne 0 ]
then
echo "Failed in prediction of SS8/SS3 (no_profile mode) for sequence $relnam"
program_suc=0
break
fi
#--> pred_file for ACC
$util/DeepCNF_Pred -i $tmp/$relnam.feat_noprof -w 5,5,5,5,5 -d 100,100,100,100,100 -s 3 -l 87 -m parameters/acc_noprof_model > $tmp/$relnam.acc_noprof 2> $tmp/$relnam.noprf_log4
OUT=$?
if [ $OUT -ne 0 ]
then
echo "Failed in prediction of ACC (no_profile mode) for sequence $relnam"
program_suc=0
break
fi
#--> pred_file for CN
$util/DeepCNF_Pred -i $tmp/$relnam.feat_noprof -w 5,5,5,5,5 -d 100,100,100,100,100 -s 15 -l 87 -m parameters/cn_noprof_model > $tmp/$relnam.cn_noprof 2> $tmp/$relnam.noprf_log5
OUT=$?
if [ $OUT -ne 0 ]
then
echo "Failed in prediction of CN (no_profile mode) for sequence $relnam"
program_suc=0
break
fi
#-> 2. generate SS3/SS8 file
#--> SS8
$util/Label_Parser $tmp/$relnam.seq $tmp/$relnam.ss8_noprof 0 > $tmp/$relnam.ss8_
OUT=$?
if [ $OUT -ne 0 ]
then
echo "Failed in generating SS8 noprof_file for sequence $relnam"
program_suc=0
break
fi
#--> SS3
$util/Label_Parser $tmp/$relnam.seq $tmp/$relnam.ss8_noprof 1 > $tmp/$relnam.ss3_
OUT=$?
if [ $OUT -ne 0 ]
then
echo "Failed in generating SS3 noprof_file for sequence $relnam"
program_suc=0
break
fi
#-> 3. generate ACC/CN file
#--> ACC
$util/Label_Parser $tmp/$relnam.seq $tmp/$relnam.acc_noprof 2 > $tmp/$relnam.acc_
OUT=$?
if [ $OUT -ne 0 ]
then
echo "Failed in generating ACC noprof_file for sequence $relnam"
program_suc=0
break
fi
#--> CN
$util/Label_Parser $tmp/$relnam.seq $tmp/$relnam.cn_noprof 3 > $tmp/$relnam.cn_
OUT=$?
if [ $OUT -ne 0 ]
then
echo "Failed in generating CN noprof_file for sequence $relnam"
program_suc=0
break
fi
#-> 4. generate DISO file
./AUCpreD.sh -i $tmp/$relnam.seq -o $tmp/$relnam.diso_
OUT=$?
if [ $OUT -ne 0 ]
then
echo "Failed in generating DISO noprof_file for sequence $relnam"
program_suc=0
break
fi
fi
done
# ----------- end ------------- #
if [ $program_suc -ne 1 ]
then
exit 1
fi
# ----------- copy to $OUT_DIR/ ----- #
if [ $PROF_or_NOT -eq 1 ] #-> use profile
then
cp util/0README $OUT_DIR/$relnam/0README.txt
cp $tmp/$relnam.fasta_raw $OUT_DIR/$relnam/$relnam.fasta.txt
cp $tmp/$relnam.seq $OUT_DIR/$relnam/$relnam.seq.txt
awk '{if(NF>0){print $0}}' $tmp/$relnam.ss3 > $OUT_DIR/$relnam/$relnam.ss3.txt
awk '{if(NF>0){print $0}}' $tmp/$relnam.ss8 > $OUT_DIR/$relnam/$relnam.ss8.txt
awk '{if(NF>0){print $0}}' $tmp/$relnam.acc > $OUT_DIR/$relnam/$relnam.acc.txt
awk '{if(NF>0){print $0}}' $tmp/$relnam.diso > $OUT_DIR/$relnam/$relnam.diso.txt
# make simple prediction
#-> SS3
echo ">$relnam" > $OUT_DIR/$relnam/$relnam.ss3_simp.txt
grep -v "#" $OUT_DIR/$relnam/$relnam.ss3.txt | awk '{printf $2}END{printf "\n"}' >> $OUT_DIR/$relnam/$relnam.ss3_simp.txt
grep -v "#" $OUT_DIR/$relnam/$relnam.ss3.txt | awk '{printf $3}END{printf "\n"}' >> $OUT_DIR/$relnam/$relnam.ss3_simp.txt
#-> SS8
echo ">$relnam" > $OUT_DIR/$relnam/$relnam.ss8_simp.txt
grep -v "#" $OUT_DIR/$relnam/$relnam.ss8.txt | awk '{printf $2}END{printf "\n"}' >> $OUT_DIR/$relnam/$relnam.ss8_simp.txt
grep -v "#" $OUT_DIR/$relnam/$relnam.ss8.txt | awk '{printf $3}END{printf "\n"}' >> $OUT_DIR/$relnam/$relnam.ss8_simp.txt
#-> ACC
echo ">$relnam" > $OUT_DIR/$relnam/$relnam.acc_simp.txt
grep -v "#" $OUT_DIR/$relnam/$relnam.acc.txt | awk '{printf $2}END{printf "\n"}' >> $OUT_DIR/$relnam/$relnam.acc_simp.txt
grep -v "#" $OUT_DIR/$relnam/$relnam.acc.txt | awk '{printf $3}END{printf "\n"}' >> $OUT_DIR/$relnam/$relnam.acc_simp.txt
#-> DISO
echo ">$relnam" > $OUT_DIR/$relnam/$relnam.diso_simp.txt
grep -v "#" $OUT_DIR/$relnam/$relnam.diso.txt | awk '{printf $2}END{printf "\n"}' >> $OUT_DIR/$relnam/$relnam.diso_simp.txt
grep -v "#" $OUT_DIR/$relnam/$relnam.diso.txt | awk '{printf $3}END{printf "\n"}' >> $OUT_DIR/$relnam/$relnam.diso_simp.txt
# make overall prediction
head -n1 $OUT_DIR/$relnam/$relnam.fasta.txt > $OUT_DIR/$relnam/$relnam.all.txt
tail -n1 $OUT_DIR/$relnam/$relnam.seq.txt >> $OUT_DIR/$relnam/$relnam.all.txt
tail -n1 $OUT_DIR/$relnam/$relnam.ss3_simp.txt >> $OUT_DIR/$relnam/$relnam.all.txt
tail -n1 $OUT_DIR/$relnam/$relnam.ss8_simp.txt >> $OUT_DIR/$relnam/$relnam.all.txt
tail -n1 $OUT_DIR/$relnam/$relnam.acc_simp.txt >> $OUT_DIR/$relnam/$relnam.all.txt
tail -n1 $OUT_DIR/$relnam/$relnam.diso_simp.txt >> $OUT_DIR/$relnam/$relnam.all.txt
printf "\n\n" >> $OUT_DIR/$relnam/$relnam.all.txt
printf "\n\n#---------------- details of SS3 prediction ---------------------------\n" > $OUT_DIR/$relnam/$relnam.all.ss3
printf "\n\n#---------------- details of SS8 prediction ---------------------------\n" > $OUT_DIR/$relnam/$relnam.all.ss8
printf "\n\n#---------------- details of ACC prediction ---------------------------\n" > $OUT_DIR/$relnam/$relnam.all.acc
printf "\n\n#---------------- details of DISO prediction --------------------------\n" > $OUT_DIR/$relnam/$relnam.all.diso
cat $OUT_DIR/$relnam/$relnam.all.txt $OUT_DIR/$relnam/$relnam.all.ss3 $OUT_DIR/$relnam/$relnam.ss3.txt $OUT_DIR/$relnam/$relnam.all.ss8 $OUT_DIR/$relnam/$relnam.ss8.txt $OUT_DIR/$relnam/$relnam.all.acc $OUT_DIR/$relnam/$relnam.acc.txt $OUT_DIR/$relnam/$relnam.all.diso $OUT_DIR/$relnam/$relnam.diso.txt > $OUT_DIR/$relnam/$relnam.all.txt_
mv $OUT_DIR/$relnam/$relnam.all.txt_ $OUT_DIR/$relnam/$relnam.all.txt
rm -f $OUT_DIR/$relnam/$relnam.all.ss3 $OUT_DIR/$relnam/$relnam.all.ss8 $OUT_DIR/$relnam/$relnam.all.acc $OUT_DIR/$relnam/$relnam.all.diso
else #-> not use profile
cp util/0README_noprof $OUT_DIR/$relnam/0README.txt
cp $tmp/$relnam.fasta_raw $OUT_DIR/$relnam/$relnam.fasta.txt
cp $tmp/$relnam.seq $OUT_DIR/$relnam/$relnam.seq.txt
awk '{if(NF>0){print $0}}' $tmp/$relnam.ss3_ > $OUT_DIR/$relnam/$relnam.ss3_noprof.txt
awk '{if(NF>0){print $0}}' $tmp/$relnam.ss8_ > $OUT_DIR/$relnam/$relnam.ss8_noprof.txt
awk '{if(NF>0){print $0}}' $tmp/$relnam.acc_ > $OUT_DIR/$relnam/$relnam.acc_noprof.txt
awk '{if(NF>0){print $0}}' $tmp/$relnam.diso_ > $OUT_DIR/$relnam/$relnam.diso_noprof.txt
# make simple prediction
#-> SS3
echo ">$relnam" > $OUT_DIR/$relnam/$relnam.ss3_noprof_simp.txt
grep -v "#" $OUT_DIR/$relnam/$relnam.ss3_noprof.txt | awk '{printf $2}END{printf "\n"}' >> $OUT_DIR/$relnam/$relnam.ss3_noprof_simp.txt
grep -v "#" $OUT_DIR/$relnam/$relnam.ss3_noprof.txt | awk '{printf $3}END{printf "\n"}' >> $OUT_DIR/$relnam/$relnam.ss3_noprof_simp.txt
#-> SS8
echo ">$relnam" > $OUT_DIR/$relnam/$relnam.ss8_noprof_simp.txt
grep -v "#" $OUT_DIR/$relnam/$relnam.ss8_noprof.txt | awk '{printf $2}END{printf "\n"}' >> $OUT_DIR/$relnam/$relnam.ss8_noprof_simp.txt
grep -v "#" $OUT_DIR/$relnam/$relnam.ss8_noprof.txt | awk '{printf $3}END{printf "\n"}' >> $OUT_DIR/$relnam/$relnam.ss8_noprof_simp.txt
#-> ACC
echo ">$relnam" > $OUT_DIR/$relnam/$relnam.acc_noprof_simp.txt
grep -v "#" $OUT_DIR/$relnam/$relnam.acc_noprof.txt | awk '{printf $2}END{printf "\n"}' >> $OUT_DIR/$relnam/$relnam.acc_noprof_simp.txt
grep -v "#" $OUT_DIR/$relnam/$relnam.acc_noprof.txt | awk '{printf $3}END{printf "\n"}' >> $OUT_DIR/$relnam/$relnam.acc_noprof_simp.txt
#-> DISO
echo ">$relnam" > $OUT_DIR/$relnam/$relnam.diso_noprof_simp.txt
grep -v "#" $OUT_DIR/$relnam/$relnam.diso_noprof.txt | awk '{printf $2}END{printf "\n"}' >> $OUT_DIR/$relnam/$relnam.diso_noprof_simp.txt
grep -v "#" $OUT_DIR/$relnam/$relnam.diso_noprof.txt | awk '{printf $3}END{printf "\n"}' >> $OUT_DIR/$relnam/$relnam.diso_noprof_simp.txt
# make overall prediction
head -n1 $OUT_DIR/$relnam/$relnam.fasta.txt > $OUT_DIR/$relnam/$relnam.all.txt
tail -n1 $OUT_DIR/$relnam/$relnam.seq.txt >> $OUT_DIR/$relnam/$relnam.all.txt
tail -n1 $OUT_DIR/$relnam/$relnam.ss3_noprof_simp.txt >> $OUT_DIR/$relnam/$relnam.all.txt
tail -n1 $OUT_DIR/$relnam/$relnam.ss8_noprof_simp.txt >> $OUT_DIR/$relnam/$relnam.all.txt
tail -n1 $OUT_DIR/$relnam/$relnam.acc_noprof_simp.txt >> $OUT_DIR/$relnam/$relnam.all.txt
tail -n1 $OUT_DIR/$relnam/$relnam.diso_noprof_simp.txt >> $OUT_DIR/$relnam/$relnam.all.txt
printf "\n\n" >> $OUT_DIR/$relnam/$relnam.all.txt
printf "\n\n#---------------- details of SS3 prediction ---------------------------\n" > $OUT_DIR/$relnam/$relnam.all.ss3
printf "\n\n#---------------- details of SS8 prediction ---------------------------\n" > $OUT_DIR/$relnam/$relnam.all.ss8
printf "\n\n#---------------- details of ACC prediction ---------------------------\n" > $OUT_DIR/$relnam/$relnam.all.acc
printf "\n\n#---------------- details of DISO prediction --------------------------\n" > $OUT_DIR/$relnam/$relnam.all.diso
cat $OUT_DIR/$relnam/$relnam.all.txt $OUT_DIR/$relnam/$relnam.all.ss3 $OUT_DIR/$relnam/$relnam.ss3_noprof.txt $OUT_DIR/$relnam/$relnam.all.ss8 $OUT_DIR/$relnam/$relnam.ss8_noprof.txt $OUT_DIR/$relnam/$relnam.all.acc $OUT_DIR/$relnam/$relnam.acc_noprof.txt $OUT_DIR/$relnam/$relnam.all.diso $OUT_DIR/$relnam/$relnam.diso_noprof.txt > $OUT_DIR/$relnam/$relnam.all.txt_
mv $OUT_DIR/$relnam/$relnam.all.txt_ $OUT_DIR/$relnam/$relnam.all.txt
mv $OUT_DIR/$relnam/$relnam.all.txt $OUT_DIR/$relnam/$relnam.all_noprof.txt
rm -f $OUT_DIR/$relnam/$relnam.all.ss3 $OUT_DIR/$relnam/$relnam.all.ss8 $OUT_DIR/$relnam/$relnam.all.acc $OUT_DIR/$relnam/$relnam.all.diso
fi
# --------- make a zip file ---- #
cd $OUT_DIR/
#-> for Windows
cd $relnam/
rm -rf Windows
mkdir -p Windows
ls *.txt | awk -F".txt" '{print $1}' > txt_list
for i in `cat txt_list`
do
cp $i.txt Windows/$i.rtf
done
rm -f txt_list
mv Windows/0README.rtf ./
cd ../
#-> zip the whole directory
zip -r $relnam.property.zip $relnam
mv $relnam.property.zip $relnam
cd ../
# --------- rename not use profile mode ----- #
if [ $PROF_or_NOT -ne 1 ]
then
mv $OUT_DIR/$relnam/0README.txt $OUT_DIR/$relnam/0README_noprof
mv $OUT_DIR/$relnam/$relnam.fasta.txt $OUT_DIR/$relnam/$relnam.fasta
mv $OUT_DIR/$relnam/$relnam.seq.txt $OUT_DIR/$relnam/$relnam.seq
#-> raw prediction result
mv $OUT_DIR/$relnam/$relnam.ss8_noprof.txt $OUT_DIR/$relnam/$relnam.ss8
mv $OUT_DIR/$relnam/$relnam.ss3_noprof.txt $OUT_DIR/$relnam/$relnam.ss3
mv $OUT_DIR/$relnam/$relnam.acc_noprof.txt $OUT_DIR/$relnam/$relnam.acc
mv $OUT_DIR/$relnam/$relnam.diso_noprof.txt $OUT_DIR/$relnam/$relnam.diso
#-> simp prediction result
mv $OUT_DIR/$relnam/$relnam.ss8_noprof_simp.txt $OUT_DIR/$relnam/$relnam.ss8_simp
mv $OUT_DIR/$relnam/$relnam.ss3_noprof_simp.txt $OUT_DIR/$relnam/$relnam.ss3_simp
mv $OUT_DIR/$relnam/$relnam.acc_noprof_simp.txt $OUT_DIR/$relnam/$relnam.acc_simp
mv $OUT_DIR/$relnam/$relnam.diso_noprof_simp.txt $OUT_DIR/$relnam/$relnam.diso_simp
#-> overall prediction result
mv $OUT_DIR/$relnam/$relnam.all_noprof.txt $OUT_DIR/$relnam/$relnam.all
else
mv $OUT_DIR/$relnam/0README.txt $OUT_DIR/$relnam/0README
mv $OUT_DIR/$relnam/$relnam.fasta.txt $OUT_DIR/$relnam/$relnam.fasta
mv $OUT_DIR/$relnam/$relnam.seq.txt $OUT_DIR/$relnam/$relnam.seq
#-> raw prediction result
mv $OUT_DIR/$relnam/$relnam.ss8.txt $OUT_DIR/$relnam/$relnam.ss8
mv $OUT_DIR/$relnam/$relnam.ss3.txt $OUT_DIR/$relnam/$relnam.ss3
mv $OUT_DIR/$relnam/$relnam.acc.txt $OUT_DIR/$relnam/$relnam.acc
mv $OUT_DIR/$relnam/$relnam.diso.txt $OUT_DIR/$relnam/$relnam.diso
#-> simp prediction result
mv $OUT_DIR/$relnam/$relnam.ss8_simp.txt $OUT_DIR/$relnam/$relnam.ss8_simp
mv $OUT_DIR/$relnam/$relnam.ss3_simp.txt $OUT_DIR/$relnam/$relnam.ss3_simp
mv $OUT_DIR/$relnam/$relnam.acc_simp.txt $OUT_DIR/$relnam/$relnam.acc_simp
mv $OUT_DIR/$relnam/$relnam.diso_simp.txt $OUT_DIR/$relnam/$relnam.diso_simp
#-> overall prediction result
mv $OUT_DIR/$relnam/$relnam.all.txt $OUT_DIR/$relnam/$relnam.all
#-> move tgt files
cp $tmp/$relnam.tgt $OUT_DIR/$relnam/$relnam.tgt
cp $tmp/update/$relnam.tgt $OUT_DIR/$relnam/$relnam.tgt2
fi
# -------- prediction summary -------- #
$util/generate_simp_summary_file $OUT_DIR/$relnam/$relnam.diso $OUT_DIR/$relnam/$relnam.ss3 $OUT_DIR/$relnam/$relnam.acc 0.5 $OUT_DIR/$relnam/$relnam.summary
# ------ remove temporary folder ----- #
rm -rf $tmp/
rm -f /tmp/$relnam.fasta
cd $CurRoot
| true |
b2a68aa657f283f10e5a48c600a48046c04a5700 | Shell | fajarrasyid/dotfiles | /.config/conkys/Izar/scripts/weather.sh | UTF-8 | 504 | 2.984375 | 3 | [
"MIT"
] | permissive | #!/bin/sh
# This script is to get weather data from openweathermap.com in the form of a json file
# so that conky will still display the weather when offline even though it doesn't up to date
# you can use this or replace with yours
api_key=b59117c083dfa1d4e6cc3186a568fd26
# get your city id at https://openweathermap.org/find and replace
city_id=4000900
url="api.openweathermap.org/data/2.5/weather?id=${city_id}&appid=${api_key}&cnt=5&units=metric&lang=es"
curl ${url} -s -o ~/.cache/weather.json
| true |
7d0353b7921d6c97d289119642b7a06576262f73 | Shell | demon90s/LinuxStudy | /codes/gen_readme.sh | UTF-8 | 2,034 | 4.125 | 4 | [] | no_license | #! /bin/bash
# 创建 README.md 的脚本
filename=README.md
chapters=18
main()
{
rm -f $filename
touch $filename
echo -e "# Linux程序设计\n" >> $filename
echo -e "学习环境:CentOS 7\n" >> $filename
echo -e "对于第五章和第六章使用了头 curses.h 和 term.h 中的功能,需要安装 curses 开发包:yum install ncurses-devel.x86_64,并在构建程序时加入链接选项 -lncurses\n" >> $filename
echo -e "对于第七章的 dbm 库的使用,需要安装 gdbm 开发包:yum install gdbm-devel.x86_64,并在构建程序时加入链接选项 -lgdbm -lgdbm_compat\n" >> $filename
echo -e "---\n" >> $filename
# gen exercises
echo -e "## 实验例程\n" >> $filename
local i=1
while [ $i -le $chapters ]; do
local chapter_prefix=ch$(printf "%.2d\n" $i)
local chapter_num=$i
i=$((i+1))
local chapter_dir=$(ls -d $chapter_prefix* 2>/dev/null)
if [ "$chapter_dir" == "" ]; then
continue
fi
cd "$chapter_dir"
local chapter_name="第${chapter_num}章 ${chapter_dir#*_}"
echo -e "### $chapter_name\n" >> ../$filename
local c_files=$(ls -f *.c* 2>/dev/null)
for c_file in $c_files; do
local fname=$(head -1 $c_file)
local tmp=${fname%%:*}
if [ "" != "$fname" ] && [ "// 实验" = "$tmp" ]; then
fname=${fname#*实验:}
echo -e "[$fname](./$chapter_dir/$c_file)\n" >> ../$filename
fi
done
local sh_files=$(ls -f *.sh 2>/dev/null)
for sh_file in $sh_files; do
local fname=$(head -1 $sh_file)
local tmp=${fname%%:*}
if [ "" != "$fname" ] && [ "# 实验" = "$tmp" ]; then
fname=${fname#*实验:}
echo -e "[$fname](./$chapter_dir/$sh_file)\n" >> ../$filename
fi
done
local make_files=$(ls -f Makefile* 2>/dev/null)
for make_file in $make_files; do
local fname=$(head -1 $make_file)
local tmp=${fname%%:*}
if [ "" != "$fname" ] && [ "# 实验" = "$tmp" ]; then
fname=${fname#*实验:}
echo -e "[$fname](./$chapter_dir/$make_file)\n" >> ../$filename
fi
done
cd ..
done
}
main $@
| true |
98d1a8b6f829a57ba4f7024f935ace0daf3df867 | Shell | CurtisVillamizar/crochet | /option/SwapFile/setup.sh | UTF-8 | 2,548 | 4.1875 | 4 | [
"BSD-2-Clause"
] | permissive | # Create a swap file and set it up correctly.
#
# Usage:
# option SwapFile 768m
#
# Creates a 768m swap file as usr/swap0 and
# adds the correct configuration entries for
# it to be used as a swap file.
#
# option SwapFile 768m
# option SwapFile 768m file=/custom/filename
# option SwapFile 768m deferred
#
# The last would causes the swap file to actually get created
# on first boot. (By adding a start script to /usr/local/etc/rc.d
# and enabling it with a suitable option.) In particular,
# this would work well with AutoSize, allowing you to create
# images that can be copied onto any media: If the media is
# larger than the image, the image resizes and creates swap
# at that time.
#
option_swapfile_install ( ) {
_SWAPFILE_DEFERRED=false
_SWAPFILE_FILE=swapfile0
_SWAPFILE_SIZE_MB=512
S=`echo $1 | tr '[:upper:]' '[:lower:]'`
N=`echo $S | tr -cd '[0-9]'`
case $S in
*.*)
echo "SwapFile: Swapfile size cannot include a Decimal point"
exit 2
;;
*m|*mb|*mi|*mib)
_SWAPFILE_SIZE_MB=$N
;;
*g|*gb|*gi|*gib)
_SWAPFILE_SIZE_MB=$(($N * 1024))
;;
*)
echo "SwapFile: Size argument $S not supported"
exit 2
;;
esac
echo "SwapFile: Swapfile will be ${_SWAPFILE_SIZE_MB} MB"
while shift; do
case $1 in
file=*)
_SWAPFILE_FILE=`echo $1 | sed -e 'sXfile=/*XX'`
echo "SwapFile: swap file will be created in ${_SWAPFILE_FILE}"
;;
deferred)
echo "SwapFile: swap file will be created on first boot"
_SWAPFILE_DEFERRED=true
;;
*)
if [ -n "$1" ]; then
echo "SwapFile: Unrecognized parameter '$1'"
exit 2
fi
;;
esac
done
if $_SWAPFILE_DEFERRED; then
mkdir -p usr/local/etc/rc.d
_RCDIR=usr/local/etc/rc.d
cp ${OPTIONDIR}/swapfile_create ${_RCDIR}/swapfile_create
chmod 555 ${_RCDIR}/swapfile_create
cat >>etc/rc.conf <<EOF
# On first boot, create a swap file
swapfile_create_enable="YES"
swapfile_create_file="/${_SWAPFILE_FILE}"
swapfile_create_size_mb="${_SWAPFILE_SIZE_MB}"
swapfile_create_free_mb=2048
EOF
echo "SwapFile: installed rc.d/swapfile_create"
else
echo "SwapFile: sizing swap file to ${_SWAPFILE_SIZE_MB} MiB"
truncate -s ${_SWAPFILE_SIZE_MB}M ${_SWAPFILE_FILE}
chmod 0600 "${_SWAPFILE_FILE}"
echo "md none swap sw,late,file=/${_SWAPFILE_FILE} 0 0" >> etc/fstab
echo "SwapFile: swap file created and configured."
fi
}
strategy_add $PHASE_FREEBSD_OPTION_INSTALL option_swapfile_install "$@"
| true |
ba544f4ac924317d2e41ed9d5c09776d123af3fd | Shell | wumvi/docker.email.prod | /cmd/multi-tail.sh | UTF-8 | 74 | 2.640625 | 3 | [] | no_license | #!/bin/bash
for file in "$@"
do
echo "tail $file"
tail -f $file &
done
| true |
d25b283eb20da04e1c8b2197197e033d09f5cde1 | Shell | petronny/aur3-mirror | /synce-serial/PKGBUILD | UTF-8 | 835 | 2.59375 | 3 | [] | no_license | # $Id: PKGBUILD 91841 2013-05-28 17:05:34Z spupykin $
# Maintainer: Sergej Pupykin <pupykin.s+arch@gmail.com>
# Contributor: Zhukov Pavel <gelios@gmail.com>
pkgname=synce-serial
pkgver=0.11
pkgrel=5
pkgdesc="provide a means of communication with a Windows CE device - helper scripts for setting up a serial connection"
arch=('i686' 'x86_64')
url="http://synce.sourceforge.net/"
depends=(sh)
license=('GPL')
source=(http://downloads.sourceforge.net/synce/$pkgname-$pkgver.tar.gz)
md5sums=('a83f20eb59c845de192645158d051062')
build() {
cd $srcdir/$pkgname-$pkgver
./configure --prefix=/usr --libexecdir=/usr/lib/$pkgname --sbindir=/usr/bin
make
}
package() {
cd $srcdir/$pkgname-$pkgver
make DESTDIR=$pkgdir install
sed -i 's#/usr/etc/#/etc/#' $pkgdir/usr/share/synce/synce-serial-common
sed -i 's#/usr/etc/#/etc/#' $pkgdir/usr/share/synce/synce-serial.conf
}
| true |
3dba4b70eeef44b0e970ed9eac643805dbdae762 | Shell | RaildoBarros/4shared-api | /sh/upload | UTF-8 | 246 | 2.703125 | 3 | [] | no_license | #!/bin/sh
ROOT=../..
JARS=`ls ../../lib`
CLASSPATH="4shared-api-demo.jar:../lib/4shared-api.jar"
for i in $JARS ; do
CLASSPATH="${CLASSPATH}:../../lib/${i}"
done
java -classpath $CLASSPATH \
com.pmstation.shared.soap.demo.UploadDemo $*
| true |
f1e861b5ab347bbf8b7ba5f14ee6cbd70209d353 | Shell | Maxicycle/vagrant-magentos | /vagrant/bootstrap.sh | UTF-8 | 3,599 | 3.203125 | 3 | [] | no_license | #!/usr/bin/env bash
# Update
# --------------------
apt-get update
# Install Apache & PHP
# --------------------
apt-get install -y apache2
apt-get install -y php5
apt-get install -y libapache2-mod-php5
apt-get install -y php5-mysql php5-curl php5-gd php5-intl php-pear php5-imap php5-mcrypt php5-ming php5-ps php5-pspell php5-recode php5-snmp php5-sqlite php5-tidy php5-xmlrpc php5-xsl php-apc
#enable mcrypt
php5enmod mcrypt
# Delete default apache web dir and symlink mounted vagrant dir from host machine
# --------------------
rm -rf /var/www
if [ ! -d "/vagrant/httpdocs" ]; then
mkdir /vagrant/httpdocs
fi
ln -fs /vagrant/httpdocs /var/www
# Replace contents of default Apache vhost
# --------------------
VHOST=$(cat <<EOF
<VirtualHost *:80>
DocumentRoot "/var/www"
ServerName simple-magento-vagrant.dev
<Directory "/var/www">
AllowOverride All
</Directory>
SetEnv MAGE_IS_DEVELOPER_MODE true
</VirtualHost>
EOF
)
echo "$VHOST" > /etc/apache2/sites-enabled/000-default.conf
a2enmod rewrite
service apache2 restart
# Adminer
# --------------------
if [ ! -d "/vagrant/httpdocs/adminer" ]; then
echo "Adminer not found at /vagrant/httpdocs/adminer and will be installed..."
mkdir /vagrant/httpdocs/adminer
wget -O /vagrant/httpdocs/adminer/index.php http://downloads.sourceforge.net/adminer/adminer-4.0.3.php
echo "Adminer installed... Use http://simple-magento-vagrant.dev/adminer/ URL to use it."
fi
# Mysql
# --------------------
# Ignore the post install questions
export DEBIAN_FRONTEND=noninteractive
# Install MySQL quietly
apt-get -q -y install mysql-server-5.5
mysql -u root -e "DROP DATABASE IF EXISTS magentodb"
mysql -u root -e "CREATE DATABASE IF NOT EXISTS magentodb"
mysql -u root -e "GRANT ALL PRIVILEGES ON magentodb.* TO 'magentouser'@'localhost' IDENTIFIED BY 'password'"
mysql -u root -e "FLUSH PRIVILEGES"
# Magento
# --------------------
#
# Unpack magento
# --------------------
if [ -f "/vagrant/source/magento-1.8.1.0.tar.bz2" ]; then
echo "/vagrant/source/magento-1.8.1.0.tar.bz2 found. Start copy..."
tar xvf /vagrant/source/magento-1.8.1.0.tar.bz2 -C /vagrant/httpdocs/ --exclude='._*'
echo "moving files to /vagrant/httpdocs folder..."
mv /vagrant/httpdocs/magento/{*,.*} /vagrant/httpdocs
rm -r /vagrant/httpdocs/magento/
echo "Done."
else
echo "/vagrant/source/magento-1.8.1.0.tar.bz2 not found."
fi
#
# Import DB
# --------------------
if [ -f "/vagrant/source/sql_magento_sample_data_1.6.1.0.sql" ]; then
echo "/vagrant/source/sql_magento_sample_data_1.6.1.0.sql found. Start import..."
mysql -u root magentodb < /vagrant/source/sql_magento_sample_data_1.6.1.0.sql
echo "Done. Run db update..."
#
# Update DB
# --------------------
mysql -u root -e "UPDATE magentodb.core_config_data SET value = 'http://simple-magento-vagrant.dev/' WHERE core_config_data.path = 'web/unsecure/base_url'"
mysql -u root -e "UPDATE magentodb.core_config_data SET value = 'http://simple-magento-vagrant.dev/' WHERE core_config_data.path = 'web/secure/base_url'"
echo "Update complete."
else
echo "/vagrant/source/sql_magento_sample_data_1.6.1.0.sql not found."
fi
# Import Media
# --------------------
if [ -f "/vagrant/source/media_magento_sample_data_for_1.6.1.0.tar.gz" ]; then
echo "/vagrant/source/media_magento_sample_data_for_1.6.1.0.tar.gz found. Start copy..."
tar xvzf /vagrant/source/media_magento_sample_data_for_1.6.1.0.tar.gz -C /vagrant/httpdocs/ --exclude='._*'
echo "Done."
else
echo "/vagrant/source/media_magento_sample_data_for_1.6.1.0.tar.gz not found."
fi
| true |
2ecb3528d7ef115fd1c6bfa52ea18db4fc4f575c | Shell | mselivanov/tools | /scripts/cloud/gcp/services/composer/cli/run_backfill.sh | UTF-8 | 413 | 2.625 | 3 | [] | no_license | #!/usr/bin/env bash
# COMPOSER_ENVIRONMENT - Google Cloud Composer environment name
# COMPOSER_LOCATION - Composer location, e.g. europe-west1
# START_DATE - backfill start date in YYYY-MM-DD format
# END_DATE - backfill end date in YYYY-MM-DD format
# DAG_ID - dag id
gcloud composer environments run ${COMPOSER_ENVIRONMENT} --location ${COMPOSER_LOCATION} backfill -- -s ${START_DATE} -e ${END_DATE} ${DAG_ID}
| true |
cda6c54c3a0cff12118947d2c4a875123bd3590e | Shell | simonjupp/atlas-metadata | /src/hierarchical_ancestors.sh | UTF-8 | 1,134 | 3.984375 | 4 | [
"MIT"
] | permissive | PAGE_SIZE=1000
set -euo pipefail
ols_url(){
what="$1"
cell_id="$2"
pageParam="${3:+page=$3}"
echo "https://www.ebi.ac.uk/ols/api/ontologies/cl/terms/http%253A%252F%252Fpurl.obolibrary.org%252Fobo%252F$cell_id/$what?size=$PAGE_SIZE&$pageParam"
}
get_terms(){
curl $( ols_url "$@" ) \
| jq -r '._embedded.terms // [] | map (.short_form +"\t"+ .label)[]' | grep '[^[:blank:]]'| sort -u
}
get_total_pages(){
curl $( ols_url "$@" ) \
| jq -r '.page.totalPages'
}
#1 : cell id
paged_ancestors(){
cell_id="$1"
>&2 echo "[$cell_id] retrieving page count"
PAGES=$(get_total_pages "hierarchicalAncestors" "$cell_id" )
if [[ $PAGES -eq 0 ]]
then
>&2 echo "[$cell_id]Found no data"
else
for page in $( seq 0 $[PAGES-1] ); do
>&2 echo "[$cell_id] retrieving page $[page+1]/$[PAGES]"
>&1 get_terms "hierarchicalAncestors" "$cell_id" "$page"
done
fi
}
if [[ $# -lt 1 ]] ; then
echo "Fetch hierarchical ancestors from OLS"
echo "Usage: $0 id_1 id_2 ... id_n"
exit 2
fi
for cell_id in "$@" ; do
paged_ancestors $cell_id | awk -F '\t' -v ID="$cell_id" '{print ID "\t" $0}'
done
| true |
5097adb45dc89f321a56ff2cfb4eb5c37b6dcc82 | Shell | hamonikr/hamonikr-docker | /node_exporter/agent-setup.sh | UTF-8 | 811 | 2.734375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
sudo useradd node_exporter -s /sbin/nologin
sudo apt install curl -y
wget https://github.com/prometheus/node_exporter/releases/download/v1.0.1/node_exporter-1.0.1.linux-amd64.tar.gz
tar xvzf node_exporter-1.0.1.linux-amd64.tar.gz
sudo cp node_exporter-1.0.1.linux-amd64/node_exporter /usr/sbin/
sudo mkdir -p /etc/sysconfig
sudo touch /etc/sysconfig/node_exporter
sudo cat << 'EOF' > /etc/systemd/system/node_exporter.service
[Unit]
Description=Node Exporter
[Service]
User=node_exporter
EnvironmentFile=/etc/sysconfig/node_exporter
ExecStart=/usr/sbin/node_exporter $OPTIONS
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable node_exporter
systemctl start node_exporter
sudo rm -rf node_exporter-1.0.1.linux-amd64*
sleep 1
# curl http://localhost:9100/metrics | true |
9330ee77b700d127cbaa961e4691dbe05b2b704a | Shell | vcwebio/conteco.controlplane.soleco | /conteco/bin/controlplane/soleco/internal/run-descendant | UTF-8 | 464 | 3.546875 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
imageName="$1"
component="$2"
stack="$3"
if [[ "$4" = "true" ]] ; then recurse="--recurse"; fi
method="$5"
if [[ "$component" == "." ]] ; then
parent="soleco";
else
parent="soleco/$component";
prefix="$component/"
fi
for folder in $imageName/$parent/*
do
if [[ -d $folder ]] ; then
folderName="$(basename $folder)"
if [[ "$folderName" != "_"* ]] ; then run $method $prefix$folderName $stack $recurse $imageName; fi
fi
done
| true |
a83429e1824876f841c2e77dfb938164b3e87917 | Shell | NikhilDusane222/Shellscript | /Sequences/unitconversion.sh | UTF-8 | 564 | 3.046875 | 3 | [] | no_license | #! /bin/bash -x
#1ft = 12 in then 42 in = ? ft
FEET=12;
inch=$(echo "scale=2; 42/$FEET" |bc)
printf "42 inch means $inch foot \n"
#Rectangular Plot of 60 feet x 40 feet in meters
#1 Feet = 0.3048 meters
Feet=0.3048;
meters=$(echo "scale=2; 60*40*$Feet" |bc)
printf "Rectangular Plot of 60 feet x 40 feet means $meters meters \n"
#Calculate area of 25 such plots in acres
metersinacers=0.000247105;
totalmeters=$(echo "scale=2; $meters*25" |bc)
totalacers=$(echo "scale=2; $totalmeters*$metersinacers " |bc)
printf "25 plots total area is $totalacers acres \n"
| true |
0b6ebd86c8896908b14901a8c4e42f88793a4cbf | Shell | tapi0ka/dotfiles | /bin/app/docker.sh | UTF-8 | 866 | 3.84375 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -eu
#
# Dockerインストール
#
function check_root() {
if [[ ${EUID:-${UID}} = 0 ]]; then
:
else
echo "ルート権限で実行してください"
exit 1
fi
}
function main () {
# check_root
curl -fsSL https://get.docker.com -o get-docker.sh
sh get-docker.sh
# グループ作成(すでに作成済の場合エラーで中断するので&&:をつけてる)
groupadd docker && :
# sudoしているユーザをdockerグループに追加
# ubuntuの場合
# usermod -aG docker ${SUDO_USER:-$USER}
# Debianの場合
gpasswd -a ${SUDO_USER:-$USER} docker
# システム起動時にdockerサービスが起動するように設定
systemctl enable docker.service
rm -f get-docker.sh
# インスタンスの再起動を行わないとsudoなしでdockerコマンドが使えない
}
main | true |
f014d959b92bbfb984afe3e294bdd477d4b5c17a | Shell | lichunzhu/tidb-tools | /tests/sync_diff_inspector/snapshot/run.sh | UTF-8 | 1,728 | 2.828125 | 3 | [
"Apache-2.0"
] | permissive |
#!/bin/sh
set -e
cd "$(dirname "$0")"
OUT_DIR=/tmp/tidb_tools_test/sync_diff_inspector
mysql -uroot -h 127.0.0.1 -P 4000 -e "show master status" > $OUT_DIR/ts.log
cat $OUT_DIR/ts.log
ts=`grep -oE "[0-9]+" $OUT_DIR/ts.log`
echo "get ts $ts"
echo "delete one data, diff should not passed"
mysql -uroot -h 127.0.0.1 -P 4000 -e "delete from diff_test.test limit 1"
sync_diff_inspector --config=./config_base.toml > $OUT_DIR/snapshot_diff.log || true
check_contains "check failed" $OUT_DIR/snapshot_diff.log
# fix.sql will be empty after check below, so backup it
cp $OUT_DIR/fix.sql $OUT_DIR/fix.sql.bak
echo "use snapshot compare data, test sql mode by the way, will auto discover ANSI_QUOTES thus pass"
mysql -uroot -h 127.0.0.1 -P 4000 -e "SET GLOBAL sql_mode = 'ANSI_QUOTES';"
sleep 10
mysql -uroot -h 127.0.0.1 -P 4000 -e "show variables like '%sql_mode%'"
mysql -uroot -h 127.0.0.1 -P 4000 -e "show create table diff_test.test"
cp config_base.toml config.toml
echo "snapshot = \"$ts\"" >> config.toml
echo "use snapshot compare data, data should be equal"
sync_diff_inspector --config=./config.toml > $OUT_DIR/snapshot_diff.log
check_contains "check pass!!!" $OUT_DIR/snapshot_diff.log
echo "execute fix.sql and use base config, and then compare data, data should be equal"
cat $OUT_DIR/fix.sql.bak | mysql -uroot -h127.0.0.1 -P 4000
sync_diff_inspector --config=./config_base.toml > $OUT_DIR/snapshot_diff.log
check_contains "check pass!!!" $OUT_DIR/snapshot_diff.log
# reset sql mode
mysql -uroot -h 127.0.0.1 -P 4000 -e "SET GLOBAL sql_mode = 'ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION';"
echo "snapshot test passed" | true |
0e6ae247c08a8f97116beaa6fa046bd4b705c44f | Shell | hcs42/hcs-utils | /bin/ListHCSUtils | UTF-8 | 752 | 4.03125 | 4 | [] | no_license | #!/bin/bash
# Lists the programs of hcs-utils.
function print_help
{
cat <<EOF
NAME
ListHCSUtils
USAGE
ListHCSUtils
DESCRIPTION
Lists the programs of hcs-utils.
OPTIONS
-h, --help
This help
AUTHOR
Csaba Hoch <csaba.hoch@gmail.com>
EOF
}
HCS_UTILS_DIR=$(dirname "$0")
case $1 in
-h|--help)
print_help
exit
;;
esac
function print_short_description
{
sed -n ':a;/^ *$/{n;ba}; # eat empty lines
/^ *#/{n}; # eat shebang
:c;/^ *$/{n;bc}; # eat empty lines
:d;/^ *#/{s/^ *# *//g;N;bd}; # print comments
s/\n *# */ /g;s/\n//g;p;q' $1
}
for program in $(ls $HCS_UTILS_DIR)
do
echo -n "$program --- "
print_short_description "$HCS_UTILS_DIR/$program"
done
| true |
baa1a48de8c9a136ae15e412e0a5401f5a3e5710 | Shell | ryjen/prep-plugins | /tests/make/main.bats | UTF-8 | 634 | 3.0625 | 3 | [] | no_license | #!/usr/bin/env bats
load "../support/lib"
type make > /dev/null 2>&1
exists=$?
@test "available" {
run plugin_script "load_input.txt"
[ "$status" -eq $exists ]
}
@test "unavailable" {
if [ "$exists" -ne 0 ]; then
skip
fi
run plugin_script "load_input.txt"
[ "$status" -eq 0 ]
}
@test "valid build" {
local SRCDIR="${TEST_ROOT_DIRNAME}/data/make"
if [ ! -d $SRCDIR ]; then
echo "${SRCDIR} not a directory"
return 1
fi
run plugin_build "${SRCDIR}"
[ "$status" -eq 0 ]
}
@test "invalid build" {
local SRCDIR="/tmp/invalid"
run plugin_build "${SRCDIR}"
[ "$status" -ne 0 ]
}
| true |
08746d39a317a59a553807f508e145d0f680352b | Shell | soarnsoar/ToolsForHiggsCombine | /combine3yrCard.sh | UTF-8 | 2,239 | 2.828125 | 3 | [] | no_license | #source DefineMassBin.sh ##ARR_MASS=(mass1 mass2 ...)
ARR_MASS=( 200 210 230 250 300 350 400 450 500 550 600 650 700 750 800 850 900 1000 1500 2000 2500 3000 4000 5000 )
for MASS in ${ARR_MASS[@]};do
#2016
is2016=`ls ../Datacards_2016/Datacard_M${MASS}/combine_M${MASS}.txt|wc -l`
is2017=`ls ../Datacards_2016/Datacard_M${MASS}/combine_M${MASS}.txt|wc -l`
is2018=`ls ../Datacards_2016/Datacard_M${MASS}/combine_M${MASS}.txt|wc -l`
if [ $is2016 -eq 0 ]
then
continue
elif [ $is2017 -eq 0 ]
then
continue
elif [ $is2018 -eq 0 ]
then
continue
fi
echo $MASS
mkdir -p Datacard_M${MASS}
##2016 cards##
input2016="MuonChggfBoostedSR2016=../Datacards_2016/Datacard_M${MASS}/MuonChggfBoostedSR/CleanFatJetPassMBoostedSR_HlnFat_mass/datacard.txt ElectronChggfBoostedSR=../Datacards_2016/Datacard_M${MASS}/ElectronChggfBoostedSR/CleanFatJetPassMBoostedSR_HlnFat_mass/datacard.txt MuonChggfResolvedSR=../Datacards_2016/Datacard_M${MASS}/MuonChggfResolvedSR/LnJJ_mass/datacard.txt ElectronChggfResolvedSR=../Datacards_2016/Datacard_M${MASS}/ElectronChggfResolvedSR/LnJJ_mass/datacard.txt"
input2017="MuonChggfBoostedSR2017=../Datacards_2017/Datacard_M${MASS}/MuonChggfBoostedSR/CleanFatJetPassMBoostedSR_HlnFat_mass/datacard.txt ElectronChggfBoostedSR=../Datacards_2017/Datacard_M${MASS}/ElectronChggfBoostedSR/CleanFatJetPassMBoostedSR_HlnFat_mass/datacard.txt MuonChggfResolvedSR=../Datacards_2017/Datacard_M${MASS}/MuonChggfResolvedSR/LnJJ_mass/datacard.txt ElectronChggfResolvedSR=../Datacards_2017/Datacard_M${MASS}/ElectronChggfResolvedSR/LnJJ_mass/datacard.txt"
input2018="MuonChggfBoostedSR2018=../Datacards_2018/Datacard_M${MASS}/MuonChggfBoostedSR/CleanFatJetPassMBoostedSR_HlnFat_mass/datacard.txt ElectronChggfBoostedSR=../Datacards_2018/Datacard_M${MASS}/ElectronChggfBoostedSR/CleanFatJetPassMBoostedSR_HlnFat_mass/datacard.txt MuonChggfResolvedSR=../Datacards_2018/Datacard_M${MASS}/MuonChggfResolvedSR/LnJJ_mass/datacard.txt ElectronChggfResolvedSR=../Datacards_2018/Datacard_M${MASS}/ElectronChggfResolvedSR/LnJJ_mass/datacard.txt"
combineCards.py -S ${input2016} ${input2017} ${input2018} &> Datacard_M${MASS}/combine_M${MASS}.txt
done
| true |
2709ed60983ef3b57e1ddf39a1394e8e6fd6ca7d | Shell | frank101m/hangman | /res/scores_menu.sh | UTF-8 | 2,060 | 3.5 | 4 | [
"MIT"
] | permissive | #!/bin/bash
. ../main.sh
score_option=""
function show_scores_menu()
{
DATA[0]="==============================================="
DATA[1]=" Puntuacion:"
DATA[2]=" (1) Ver mis puntajes en orden"
DATA[3]=" (2) Ver mis puntajes por fecha"
DATA[4]=" (3) Ver puntaje global"
DATA[5]=" (4) Regresar"
printf '%s\n' "${DATA[@]}"
unset DATA
get_scores_option
clear
case $score_option in
"1")display_scores_desc
;;
"2")display_scores_time
;;
"3")display_scores_global
;;
"4")return_menu
;;
esac
}
function return_menu()
{
playerManagement
}
function get_scores_option()
{
while true; do
read -rsn1 score_option
if [ "$score_option" = "1" ] ||
[ "$score_option" = "2" ] ||
[ "$score_option" = "3" ] ||
[ "$score_option" = "4" ]; then
break;
fi
done
}
function display_scores_desc()
{
sudo -u postgres -H -- psql -d codigoabierto -c "SELECT usr as usuario, puntaje FROM puntaje WHERE usr = '$username' ORDER BY puntaje DESC;" | head -n -2
#SELECT * FROM puntaje WHERE usr = '$usr' ORDER BY puntaje DESC;
echo "Presione una tecla para continuar"
read -n1
clear
show_scores_menu
}
function display_scores_time()
{
sudo -u postgres -H -- psql -d codigoabierto -c "SELECT usr as usuario, puntaje, fecha::date FROM puntaje WHERE usr = '$username' ORDER BY fecha DESC;" | head -n -2
#SELECT * FROM puntaje WHERE usr = '$usr' ORDER BY puntaje DESC;
echo "Presione una tecla para continuar"
read -n1
clear
show_scores_menu
#SELECT * FROM puntaje WHERE usr = '$usr' ORDER BY fecha DESC;
}
function display_scores_global()
{
sudo -u postgres -H -- psql -d codigoabierto -c "SELECT usr as usuario, puntaje FROM puntaje ORDER BY puntaje DESC;" | head -n -2
#SELECT * FROM puntaje WHERE usr = '$usr' ORDER BY puntaje DESC;
echo "Presione una tecla para continuar"
read -n1
clear
show_scores_menu
#SELECT * FROM puntaje ORDER BY puntaje DESC;
}
export -f show_scores_menu
| true |
f9572f54b8a6393a63c4f24eab4cc1d382b8681a | Shell | variableLabel/lapse | /lapse.sh | UTF-8 | 237 | 2.5625 | 3 | [] | no_license | #! /bin/bash
# /etc/init.d/lapse.sh
SAVEDIR=/home/pi/lapse/raw/ #/mnt/piStore/raw
while [ true ]; do
filename=$(date "+%Y%m%d-%H%M%S").jpg
raspistill -n -e jpg -ex auto -awb auto -md 2 -q 100 -o $SAVEDIR/$filename
sleep 9;
done;
| true |
f00352f19426e5f5ffa0656f1a6fa368442f5f82 | Shell | s-light/acme_ola_crosscompile | /chroot_start.sh | UTF-8 | 1,047 | 3.03125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
##########################################
# prepare
# copy qemu binary
sudo cp /usr/bin/qemu-arm-static target-rootfs/usr/bin
# bind host things so chroot can use theme
## this call binds successfully
## but if you exit the chroot this produces a broken /dev....
# sudo mount -o bind /dev/ target-rootfs/dev/
# this works better:
sudo mount --rbind /dev target-rootfs/dev/
sudo mount --make-rslave target-rootfs/dev/
# source: https://unix.stackexchange.com/questions/263972/unmount-a-rbind-mount-without-affecting-the-original-mount/264488#264488
##########################################
# start chroot
# sudo LC_ALL=C LANGUAGE=C LANG=C chroot target-rootfs /bin/bash
# make a full login as the target user
sudo LC_ALL=C LANGUAGE=C LANG=C chroot target-rootfs /bin/login light
##########################################
# we have exited the chroot...
# clean up
# check user
# echo $USER
# remove previous mounted things
sudo umount -R target-rootfs/dev/
# remove qemu binary
sudo rm target-rootfs/usr/bin/qemu-arm-static
| true |
4556cb48a4f4c1e1140f324f2ca31b4130bb9f0a | Shell | MyRequiem/LFS | /stage-2-blfs-stable-x86_64/build/n/whois.sh | UTF-8 | 1,896 | 3.59375 | 4 | [
"MIT"
] | permissive | #! /bin/bash
PRGNAME="whois"
### Whois (whois directory client)
# Клиентское приложение, которое запрашивает службу каталогов whois для
# получения информации, относящейся к конкретному доменному имени. Так же
# устанавливается утилита mkpasswd
# Required: no
# Recommended: no
# Optional: libidn
# libidn2
ROOT="/root/src/lfs"
source "${ROOT}/check_environment.sh" || exit 1
source "${ROOT}/unpack_source_archive.sh" "${PRGNAME}" || exit 1
TMP_DIR="${BUILD_DIR}/package-${PRGNAME}-${VERSION}"
mkdir -pv "${TMP_DIR}"
make || exit 1
# whois и mkpasswd
make prefix=/usr install-"${PRGNAME}" BASEDIR="${TMP_DIR}"
make prefix=/usr install-mkpasswd BASEDIR="${TMP_DIR}"
# файлы локали
make prefix=/usr install-pos BASEDIR="${TMP_DIR}"
# команда mkpasswd уже была установлена в LFS с пакетом expect, удалим ее
EXPECT_PKG="/var/log/packages/expect"
rm -rf /usr/bin/mkpasswd
rm -rf /usr/share/man/man1/mkpasswd.1
sed '/\/usr\/bin\/mkpasswd/d' -i "${EXPECT_PKG}"-*
sed '/\/usr\/share\/man\/man1\/mkpasswd.1/d' -i "${EXPECT_PKG}"-*
source "${ROOT}/stripping.sh" || exit 1
source "${ROOT}/update-info-db.sh" || exit 1
/bin/cp -vpR "${TMP_DIR}"/* /
cat << EOF > "/var/log/packages/${PRGNAME}-${VERSION}"
# Package: ${PRGNAME} (whois directory client)
#
# Whois is a client-side application which queries the whois directory service
# for information pertaining to a particular domain name.
#
# Home page: https://www.linux.it/~md/software/
# Download: https://github.com/rfc1036/${PRGNAME}/archive/v${VERSION}/${PRGNAME}-${VERSION}.tar.gz
#
EOF
source "${ROOT}/write_to_var_log_packages.sh" \
"${TMP_DIR}" "${PRGNAME}-${VERSION}"
| true |
b8357705271b07dcd634fb0a0b0cbc685444a350 | Shell | Darkvater/ffpbuildenv | /source/Conf.d/fetch.sh | UTF-8 | 1,437 | 4.25 | 4 | [] | no_license | #!/bin/sh
usage()
{
echo "Usage: $(basename $0) [-d distfiles-directory] uri..."
exit 1
}
die()
{
cat <<EOF
ERROR: $@
EOF
exit 1
}
# default destination directory
D=$(pwd)
# parse options
[ $# -eq 0 ] && usage
_opts=$(getopt hd: "$@") || usage
eval set -- "$_opts"
while true; do
case "$1" in
-d)
D="${2%/}"
shift 2
;;
-h)
usage
;;
--)
shift
break
;;
*)
die "getopt error"
;;
esac
done
# specific fetch functions
fetch_wget()
{
_cmd="wget -O '$2' '$1'"
echo "$_cmd"
eval $_cmd
}
fetch_git()
{
_cmd="cd '$(dirname $2)'; git clone "$1" '$(basename $2)'"
echo "$_cmd"
eval $_cmd
}
fetch_svn()
{
_cmd="cd '$(dirname $2)'; svn co "$1" '$(basename $2)'"
echo "$_cmd"
eval $_cmd
}
fetch_cvs()
{
module=$(basename $2)
cvsroot=$(echo $1 | sed -e 's@^cvs:@@' -e "s@/$module\$@@")
_cmd="cd '$(dirname $2)'; CVS_RSH=ssh cvs -d '$cvsroot' checkout '$module'"
echo $_cmd
eval $_cmd
}
# one by one
while [ $# -gt 0 ]; do
uri="$1"
f=$(echo "$uri" | sed -e 's@.*/@@' | sed -e 's@\s.*@@')
shift
if [ -e "$D/$f" ]; then
echo "* $f exists"
continue
fi
case "$uri" in
http:* | https:* | ftp:*)
_cmd="fetch_wget"
;;
git:*)
_cmd="fetch_git"
;;
svn:*)
_cmd="fetch_svn"
;;
cvs:*)
_cmd="fetch_cvs"
;;
*)
die "Unknown protocol: $uri"
;;
esac
echo "* $f"
mkdir -p $D
eval $_cmd "'$uri'" '$D/$f' || die "Download of '$uri' failed"
done
| true |
b50125032b288f7ea1648bd6973ef16b965c965c | Shell | ggear/asystem | /src/meg_flo/benchmark/src/main/resources/benchmark-suite/benchmarks/sysbench/parse_memory.sh | UTF-8 | 1,114 | 3.734375 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash -e
test_name="memory"
if [ "$#" -ne 3 ]; then
cat <<-ENDOFMESSAGE
Please specify the base result file and the result file, as well as the output file as arguments.
The result should be named in this format: <limits>_<machine>_${test_name}_<opts>.prof (e.g. without_kv3_memory_seq.prof)
Usage: ./parse_${test_name}.sh <base result file> <result file> <output file>
ENDOFMESSAGE
exit
fi
mkdir -p $(dirname $3)
header="machine,limits,benchmark,base_result,lower_is_better,result"
if [ ! -f "$3" ] || ! grep -q "$header" "$3"; then
echo "$header" | tee "$3"
fi
bn=`basename "$2" ".prof"`
res_test_name=`echo "$bn" | cut -d _ -f 3`
if [ "${res_test_name}" != "${test_name}" ]; then
echo "Please check the if result file is from ${test_name} test. (Current test name: $res_test_name)"
exit
fi
opts=`echo "$bn" | cut -d _ -f 4-`
machine=`echo "$bn" | cut -d _ -f 2`
limits=`echo "$bn" | cut -d _ -f 1`
p_ops="[\d\.]+(?= ops/sec)"
base_res=`grep -oP "$p_ops" $1`
res=`grep -oP "$p_ops" $2`
echo "$machine,$limits,sysbench_${test_name}_${opts},$base_res,False,$res" | tee -a "$3"
| true |
057df5808a1361c91f4ba09b0f467ae0f15535b8 | Shell | Gmentsik/docktartar | /bin/run.sh | UTF-8 | 867 | 3.03125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
VERSION="[v1.0.6] [21.02.2017]"
echo "Version: ${VERSION}"
cp /usr/share/zoneinfo/${TIMEZONE} /etc/localtime
echo "${TIMEZONE}" > /etc/timezone
echo "Registering new cron..."
echo "${CRON} /root/docktartar.sh >> /var/log/cron.log 2>&1 \n" > /root/docktartar.cron
crontab /root/docktartar.cron
echo "Setting up mail..."
if [ -n "$EMAIL_TO" ];then
echo "root=${EMAIL_TO}" > /etc/ssmtp/ssmtp.conf
echo "mailhub=${EMAIL_HOST_PORT}" >> /etc/ssmtp/ssmtp.conf
echo "AuthUser=${EMAIL_USER}" >> /etc/ssmtp/ssmtp.conf
echo "AuthPass=${EMAIL_PASS}" >> /etc/ssmtp/ssmtp.conf
echo "UseSTARTTLS=${EMAIL_USE_STARTTLS}" >> /etc/ssmtp/ssmtp.conf
if [ -n "$EMAIL_FROM" ];then
echo "FromLineOverride=YES" >> /etc/ssmtp/ssmtp.conf
fi
fi
echo "OK - waiting for job to execute."
/usr/sbin/crond && tail -f /var/log/cron.log
| true |
02fc4b78d0e4688d99cb8cf6690cd6521b893fce | Shell | phanirajl/elasticsearch | /7. Winlogbeat installation and configuring.sh | UTF-8 | 2,063 | 2.5625 | 3 | [] | no_license | ##### Winlogbeat installation and configuration #####
# Download Winlogbeat
https://www.elastic.co/downloads/beats/winlogbeat
# Create directory in C:\
New-Item -ItemType directory -Path 'C:\Program files\Winlogbeat'
# Install Winlogbeat
Set-Location -Path 'C:\Program files\Winlogbeat'
powershell -ExecutionPolicy UnRestricted -Exec bypass -File "C:\Program Files\Winlogbeat\install-service-winlogbeat.ps1"
Set-Service -Name "winlogbeat" -StartupType automatic
Start-Service -Name "winlogbeat"
Stop-Service -Name "winlogbeat"
Get-EventLog *
Get-WinEvent -ListLog * | Format-List -Property LogName
(Get-WinEvent -ListLog Security).ProviderNames
# Winlogbeat configuration
winlogbeat.event_logs:
- name: Application
provider:
- Application Error
- ...
ignore_older: 8760h
event_id: 4624, 4625, 4634, 4647, 1102, 4720, 4725, 4722, 4781, 4724, 4732, 4733, 4731, 4734 # if exclude: -4444
processors:
- drop_event.when.not.or:
- equals.event_id: ...
- equals.event_id: ...
- equals.event_id: ...
- name: Security
ignore_older: 8760h
# include_xml: true
setup.template.settings:
index.number_of_shards: 3
#index.codec: best_compression
#_source.enabled: false
# The name of the shipper that publishes the network data. It can be used to group
# all the transactions sent by a single shipper in the web interface.
#name:
# The tags of the shipper are included in their own field with each
# transaction published.
#tags: ["service-X", "web-tier"]
fields:
document_type: windows-security
setup.kibana:
host: "130.117.79.118:5601"
output.logstash:
hosts: ["130.117.79.119:5046"]
# Sets log level. The default log level is info.
# Available log levels are: error, warning, info, debug
#logging.level: debug
# At debug level, you can selectively enable logging only for some components.
# To enable all selectors use ["*"]. Examples of other selectors are "beat",
# "publish", "service".
#logging.selectors: ["*"]
| true |
5f761f3a277dd9da38b0ec4f60cd3052900496c4 | Shell | rome/tools | /scripts/timetravel.sh | UTF-8 | 2,267 | 3.390625 | 3 | [
"MIT"
] | permissive | #! /bin/bash
# Suggestions:
# All Changes to a particular file
# git log --pretty=format:"%H" --follow <FILE> | ../timetravel.sh check test.js
# Last N commits
# git log -n 10 --pretty=format:"%H" | ../timetravel.sh check test.js
rm target/result.md
# get current branch to restore it later
current="$(git rev-parse --abbrev-ref HEAD)"
allruns=()
IFS=$'\n'
set -f
IDX=1
while read i
do
# change to a specific commit
git checkout "$i" -q
git log -1 --pretty=%s
# check if a compiled version of this commit already exists
if [ ! -f "target/$i" ]; then
cargo build -p rome_cli --release 2>&1 > /dev/null
cp target/release/rome "target/$i"
fi
# Print details about this commit
title=$(git log -1 --pretty=%s)
echo "# $IDX - $title" >> target/result.md
echo "## Details" >> target/result.md
git log -1 --pretty=%b >> target/result.md
echo "" >> target/result.md
git log -1 --pretty=%ai >> target/result.md
# Run this commit version and print the result
echo "## Result" >> target/result.md
echo "\`\`\`" >> target/result.md
eval "target/$i" check $1 &>> target/result.md
echo "\`\`\`" >> target/result.md
echo "" >> target/result.md
echo "" >> target/result.md
# Save how to run this version to use it
# later with hyperfine
allruns+=("target/$i check $1")
allruns+=("--command-name")
allruns+=("$IDX")
((IDX++))
done
# restore original branch
git checkout "$current" -q
# Run hyperfine and generate a report
echo "# Performance" >> target/result.md
hyperfine ${allruns[*]} -i --shell=none --export-markdown target/hyperfine.md --export-json target/hyperfine.json
cat target/hyperfine.md >> target/result.md
rm target/hyperfine.md
echo "" >> target/result.md
# Plot hyperfine result
python3 << EOF
#!/usr/bin/env python
import json
import matplotlib.pyplot as plt
f = open("target/hyperfine.json")
results = json.load(f)["results"]
labels=[b["command"] for b in results]
plt.boxplot([b["times"] for b in results])
plt.plot(list(range(1, len(results) + 1)), [b["mean"] for b in results])
plt.savefig("target/whisker.png")
EOF
echo "" >> target/result.md
echo "" >> target/result.md
echo "" >> target/result.md
| true |
76d10e1de706950ea482fc71f70e2d29f61f0ed2 | Shell | krutkow/scripts | /clean_rm_install_dir.sh | UTF-8 | 1,292 | 4.03125 | 4 | [] | no_license | #! /bin/bash
# 20170913
# Kristin Rutkowski
# based on script written by Craig Gordon
# Script to more fully clean a built heasoft directory. The 'hmake clean'
# command cleans the build, but doesn't remove the x86 installed
# directories. If there are files dated older than the installed files
# (but if they're the actual desired files), rebuilding will not install them.
# removing these install directories should fix that.
#
INSTALL_DIR=x86_64
INSTALL_DIR=x86_64-apple-darwin18.7.0
dirsArray=($(find . -maxdepth 1 -type d))
origDir=$(pwd)
echo "Running in directory $origDir"
# make sure we're in a heasoft dir
# ideally we're in the main heasoft directory
if [[ $origDir != *"heasoft"* ]]; then
echo "You're not in a heasoft directory. Exiting."
exit 0
fi
# get length of an array
numDirs=${#dirsArray[@]}
# loop through dirs, rm the install dir
for (( i=0; i<${numDirs}; i++ ));
do
dirToDelete=$origDir/${dirsArray[$i]}/$INSTALL_DIR
echo " curr loop = ${dirsArray[$i]}"
echo " dirToDelete = $dirToDelete"
if [ -d "$dirToDelete"* ] ; then
echo "removing $dirToDelete*"
rm -rf "$dirToDelete"*
fi
dirToDelete=$origDir/${dirsArray[$i]}/BLD/$INSTALL_DIR
if [ -d "$dirToDelete"* ] ; then
echo "removing $dirToDelete*"
rm -rf "$dirToDelete"*
fi
done
| true |
a2f060708468b1f9d2c3aa4ca6bb4a53f292aaa1 | Shell | DanielMabbett/infracost | /scripts/ci/diff.sh | UTF-8 | 10,810 | 3.609375 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/sh -le
# This script runs infracost on the current branch then the master branch. It uses `git diff`
# to post a pull-request comment showing the cost estimate difference whenever a percentage
# threshold is crossed.
# Usage docs: https://www.infracost.io/docs/integrations/
# It supports: GitHub Actions, GitLab CI, CircleCI with GitHub and Bitbucket, Bitbucket Pipelines
# For Bitbucket: BITBUCKET_TOKEN must be set to "myusername:my_app_password", the password needs to have Read scope
# on "Repositories" and "Pull Requests" so it can post comments. Using a Bitbucket App password
# (https://support.atlassian.com/bitbucket-cloud/docs/app-passwords/) is recommended.
# Set variables based on the order for GitHub Actions, or the env value for other CIs
terraform_json_file=${1:-$terraform_json_file}
terraform_plan_file=${2:-$terraform_plan_file}
terraform_use_state=${3:-$terraform_use_state}
terraform_dir=${4:-$terraform_dir}
terraform_plan_flags=${5:-$terraform_plan_flags}
percentage_threshold=${6:-$percentage_threshold}
pricing_api_endpoint=${7:-$pricing_api_endpoint}
usage_file=${8:-$usage_file}
config_file=${9:-$config_file}
# Handle deprecated var names
terraform_json_file=${terraform_json_file:-$tfjson}
terraform_plan_file=${terraform_plan_file:-$tfplan}
terraform_use_state=${terraform_use_state:-$use_tfstate}
terraform_dir=${terraform_dir:-$tfdir}
terraform_plan_flags=${terraform_plan_flags:-$tfflags}
# Set defaults
percentage_threshold=${percentage_threshold:-0}
GITHUB_API_URL=${GITHUB_API_URL:-https://api.github.com}
BITBUCKET_API_URL=${BITBUCKET_API_URL:-https://api.bitbucket.org}
# Export as it's used by infracost, not this script
export INFRACOST_LOG_LEVEL=${INFRACOST_LOG_LEVEL:-info}
export INFRACOST_CI_DIFF=true
if [ ! -z "$GIT_SSH_KEY" ]; then
echo "Setting up private Git SSH key so terraform can access your private modules."
mkdir -p .ssh
echo "${GIT_SSH_KEY}" > .ssh/git_ssh_key
chmod 600 .ssh/git_ssh_key
export GIT_SSH_COMMAND="ssh -i $(pwd)/.ssh/git_ssh_key -o 'StrictHostKeyChecking=no'"
fi
# Bitbucket Pipelines don't have a unique env so use this to detect it
if [ ! -z "$BITBUCKET_BUILD_NUMBER" ]; then
BITBUCKET_PIPELINES=true
fi
post_bitbucket_comment () {
# Bitbucket comments require a different JSON format and don't support HTML
jq -Mnc --arg change_word $change_word \
--arg absolute_percent_diff $(printf '%.1f\n' $absolute_percent_diff) \
--arg default_branch_monthly_cost $default_branch_monthly_cost \
--arg current_branch_monthly_cost $current_branch_monthly_cost \
--arg diff "$(git diff --no-color --no-index default_branch_infracost.txt current_branch_infracost.txt | sed 1,2d | sed 3,5d)" \
'{content: {raw: "Infracost estimate: monthly cost will \($change_word) by \($absolute_percent_diff)% (default branch $\($default_branch_monthly_cost) vs current branch $\($current_branch_monthly_cost))\n\n```diff\n\($diff)\n```\n"}}' > diff_infracost.txt
cat diff_infracost.txt | curl -L -X POST -d @- \
-H "Content-Type: application/json" \
-u $BITBUCKET_TOKEN \
"$BITBUCKET_API_URL/2.0/repositories/$1"
}
infracost_cmd="infracost --no-color"
if [ ! -z "$terraform_json_file" ]; then
echo "WARNING: we do not recommend using terraform_json_file as it doesn't work with this diff script, use terraform_dir instead."
infracost_cmd="$infracost_cmd --terraform-json-file $terraform_json_file"
fi
if [ ! -z "$terraform_plan_file" ]; then
echo "WARNING: we do not recommend using terraform_plan_file as it doesn't work with this diff script, use terraform_dir instead."
infracost_cmd="$infracost_cmd --terraform-plan-file $terraform_plan_file"
fi
if [ "$terraform_use_state" = "true" ] || [ "$terraform_use_state" = "True" ] || [ "$terraform_use_state" = "TRUE" ]; then
echo "WARNING: we do not recommend using terraform_use_state as it doesn't work with this diff script, use terraform_dir without this instead."
infracost_cmd="$infracost_cmd --terraform-use-state"
fi
if [ ! -z "$terraform_dir" ]; then
infracost_cmd="$infracost_cmd --terraform-dir $terraform_dir"
fi
if [ ! -z "$terraform_plan_flags" ]; then
infracost_cmd="$infracost_cmd --terraform-plan-flags \"$terraform_plan_flags\""
fi
if [ ! -z "$pricing_api_endpoint" ]; then
infracost_cmd="$infracost_cmd --pricing-api-endpoint $pricing_api_endpoint"
fi
if [ ! -z "$usage_file" ]; then
infracost_cmd="$infracost_cmd --usage-file $usage_file"
fi
if [ ! -z "$config_file" ]; then
infracost_cmd="$infracost_cmd --config-file $config_file"
fi
echo "$infracost_cmd" > infracost_cmd
echo "Running infracost on current branch using:"
echo " $ $(cat infracost_cmd)"
current_branch_output=$(cat infracost_cmd | sh)
# The sed is needed to cause the header line to be different between current_branch_infracost and
# default_branch_infracost, otherwise git diff removes it as its an identical line
echo "$current_branch_output" | sed 's/MONTHLY COST/MONTHLY COST /' > current_branch_infracost.txt
current_branch_monthly_cost=$(cat current_branch_infracost.txt | awk '/OVERALL TOTAL/ { gsub(",",""); printf("%.2f",$NF) }')
echo "::set-output name=current_branch_monthly_cost::$current_branch_monthly_cost"
current_branch=$(git rev-parse --abbrev-ref HEAD)
if [ "$current_branch" = "master" ] || [ "$current_branch" = "main" ]; then
echo "Exiting as the current branch was the default branch so nothing more to do."
exit 0
fi
if [ ! -z "$BITBUCKET_PIPELINES" ]; then
echo "Configuring git remote for Bitbucket Pipelines"
git config remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*"
fi
echo "Switching to default branch"
git fetch --depth=1 origin master &>/dev/null || git fetch --depth=1 origin main &>/dev/null || echo "Could not fetch default branch from origin, no problems, switching to it..."
git switch master &>/dev/null || git switch main &>/dev/null || (echo "Error: could not switch to branch master or main" && exit 1)
git log -n1
echo "Running infracost on default branch using:"
echo " $ $(cat infracost_cmd)"
default_branch_output=$(cat infracost_cmd | sh)
echo "$default_branch_output" > default_branch_infracost.txt
default_branch_monthly_cost=$(cat default_branch_infracost.txt | awk '/OVERALL TOTAL/ { gsub(",",""); printf("%.2f",$NF) }')
echo "::set-output name=default_branch_monthly_cost::$default_branch_monthly_cost"
if [ $(echo "$default_branch_monthly_cost > 0" | bc -l) = 1 ]; then
percent_diff=$(echo "scale=4; $current_branch_monthly_cost / $default_branch_monthly_cost * 100 - 100" | bc)
else
echo "Default branch has no cost, setting percent_diff=100 to force a comment"
percent_diff=100
# Remove the empty OVERALL TOTAL line to avoid it showing-up in the diff
sed -i '/OVERALL TOTAL/d' default_branch_infracost.txt
fi
absolute_percent_diff=$(echo $percent_diff | tr -d -)
if [ $(echo "$absolute_percent_diff > $percentage_threshold" | bc -l) = 1 ]; then
change_word="increase"
if [ $(echo "$percent_diff < 0" | bc -l) = 1 ]; then
change_word="decrease"
fi
comment_key=body
if [ ! -z "$GITLAB_CI" ]; then
comment_key=note
fi
jq -Mnc --arg comment_key $comment_key \
--arg change_word $change_word \
--arg absolute_percent_diff $(printf '%.1f\n' $absolute_percent_diff) \
--arg default_branch_monthly_cost $default_branch_monthly_cost \
--arg current_branch_monthly_cost $current_branch_monthly_cost \
--arg diff "$(git diff --no-color --no-index default_branch_infracost.txt current_branch_infracost.txt | sed 1,2d | sed 3,5d)" \
'{($comment_key): "Infracost estimate: monthly cost will \($change_word) by \($absolute_percent_diff)% (default branch $\($default_branch_monthly_cost) vs current branch $\($current_branch_monthly_cost))\n<details><summary>infracost diff</summary>\n\n```diff\n\($diff)\n```\n</details>\n"}' > diff_infracost.txt
echo "Default branch and current branch diff ($absolute_percent_diff) is more than the percentage threshold ($percentage_threshold)."
if [ ! -z "$GITHUB_ACTIONS" ]; then
if [ "$GITHUB_EVENT_NAME" == "pull_request" ]; then
GITHUB_SHA=$(cat $GITHUB_EVENT_PATH | jq -r .pull_request.head.sha)
fi
echo "Posting comment to GitHub commit $GITHUB_SHA"
cat diff_infracost.txt | curl -L -X POST -d @- \
-H "Content-Type: application/json" \
-H "Authorization: token $GITHUB_TOKEN" \
"$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/commits/$GITHUB_SHA/comments"
elif [ ! -z "$GITLAB_CI" ]; then
echo "Posting comment to GitLab commit $CI_COMMIT_SHA"
cat diff_infracost.txt | curl -L -X POST -d @- \
-H "Content-Type: application/json" \
-H "PRIVATE-TOKEN: $GITLAB_TOKEN" \
"$CI_SERVER_URL/api/v4/projects/$CI_PROJECT_ID/repository/commits/$CI_COMMIT_SHA/comments"
# Previously we posted to the merge request, using the comment_key=body above:
# "$CI_SERVER_URL/api/v4/projects/$CI_PROJECT_ID/merge_requests/$CI_MERGE_REQUEST_IID/notes"
elif [ ! -z "$CIRCLECI" ]; then
if echo $CIRCLE_REPOSITORY_URL | grep -Eiq github; then
echo "Posting comment from CircleCI to GitHub commit $CIRCLE_SHA1"
cat diff_infracost.txt | curl -L -X POST -d @- \
-H "Content-Type: application/json" \
-H "Authorization: token $GITHUB_TOKEN" \
"$GITHUB_API_URL/repos/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME/commits/$CIRCLE_SHA1/comments"
elif echo $CIRCLE_REPOSITORY_URL | grep -Eiq bitbucket; then
if [ ! -z "$CIRCLE_PULL_REQUEST" ]; then
BITBUCKET_PR_ID=$(echo $CIRCLE_PULL_REQUEST | sed 's/.*pull-requests\///')
echo "Posting comment from CircleCI to Bitbucket pull-request $BITBUCKET_PR_ID"
post_bitbucket_comment "$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME/pullrequests/$BITBUCKET_PR_ID/comments"
else
echo "Posting comment from CircleCI to Bitbucket commit $CIRCLE_SHA1"
post_bitbucket_comment "$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME/commit/$CIRCLE_SHA1/comments"
fi
else
echo "Error: CircleCI is not being used with GitHub or Bitbucket!"
fi
elif [ ! -z "$BITBUCKET_PIPELINES" ]; then
if [ ! -z "$BITBUCKET_PR_ID" ]; then
echo "Posting comment to Bitbucket pull-request $BITBUCKET_PR_ID"
post_bitbucket_comment "$BITBUCKET_REPO_FULL_NAME/pullrequests/$BITBUCKET_PR_ID/comments"
else
echo "Posting comment to Bitbucket commit $BITBUCKET_COMMIT"
post_bitbucket_comment "$BITBUCKET_REPO_FULL_NAME/commit/$BITBUCKET_COMMIT/comments"
fi
fi
else
echo "Comment not posted as default branch and current branch diff ($absolute_percent_diff) is less than or equal to percentage threshold ($percentage_threshold)."
fi
| true |
2bda913587c2763d0eb03d01e5e490d4917a8f6a | Shell | amstuta/Malloc | /test_script.sh | UTF-8 | 1,336 | 3 | 3 | [] | no_license | #!/bin/sh
## test_script.sh for script in /home/elkaim_r/projects/malloc
##
## Made by raphael elkaim
## Login <elkaim_r@epitech.net>
##
## Started on Tue Jan 27 12:34:34 2015 raphael elkaim
## Last update Wed Feb 4 12:35:18 2015 raphael elkaim
##
red=`tput setaf 1`
green=`tput setaf 2`
reset=`tput sgr0`
echo "${green}recompiling the project${reset}"
make && echo "${green}succesfully recompiled the project${reset}" || (echo "${red}compilation failed :(${reset}";exit 1)
export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:./malloc/" && echo "${green}setting up the library path${reset}"
echo "${green}recompiling the test main${reset}"
(gcc main2.c -lmy_malloc -L./ && echo "${green}done!${reset}") || (echo "failed :(" && exit 1)
echo "${green}and everything shall use our malloc!${reset}"
export LD_PRELOAD=./libmy_malloc.so
echo "${green}testing basic malloc/free interactions:${reset}"
./a.out
#exit 0
echo "${green}testing the mighty moulinette:${reset}"
./moul_malloc_stud
echo "${green}some more basic tests:${reset}\n creating file \"your mom\""
touch yourmom
rm -v yourmom
echo "${green}running a new bash command shell!:${reset}"
bash
echo "${green}when shit starts going down:testing emacs:${reset}"
emacs -nw randomfilewedontcareaboutbutitbetterwork || echo "${red}fuck it >:(${reset}"
echo "${green}setting everything back to normal!${reset}"
unset LD_PRELOAD
| true |
d67f866db64be667c2949f61b0c7e55416d73a77 | Shell | chef/automate | /.expeditor/buildkite/semgrep.sh | UTF-8 | 1,098 | 3.25 | 3 | [
"JSON",
"Apache-2.0",
"CC-BY-2.0",
"SAX-PD",
"MPL-2.0",
"Artistic-2.0",
"MIT-CMU",
"BSD-3-Clause",
"0BSD",
"CC-BY-4.0",
"LGPL-3.0-or-later",
"CC0-1.0",
"CC-BY-3.0",
"BSD-Source-Code",
"Apache-1.1",
"Ruby",
"WTFPL",
"BSD-1-Clause",
"MIT",
"Unlicense",
"BSD-2-Clause"
] | permissive | #!/bin/bash
set -eou pipefail
echo "running in $(pwd)"
# Env vars needed by semgrep-agent
export SEMGREP_BRANCH=$BUILDKITE_BRANCH
export SEMGREP_REPO_NAME=chef/automate
# Activates links to buildkite builds in slack notification
export SEMGREP_JOB_URL=$BUILDKITE_BUILD_URL
# Activates links to git commits in slack notification
export SEMGREP_REPO_URL=https://github.com/chef/automate
BASELINE=$BUILDKITE_PULL_REQUEST_BASE_BRANCH
MERGE_BASE=$(git merge-base "${BASELINE:-main}" HEAD)
if [ "$SEMGREP_BRANCH" == "main" ] ; then
echo "build on main; using 'main~1' as base branch" && BASELINE=main~1
elif [ "$SEMGREP_BRANCH" != "main" ] && [ -n "$BASELINE" ] ; then
echo "PR build on '$SEMGREP_BRANCH' branch; base is $MERGE_BASE (merge-base of '$BASELINE')" && BASELINE=$MERGE_BASE
elif [ "$SEMGREP_BRANCH" != "main" ] && [ -z "$BASELINE" ] ; then
echo "manual build on '$SEMGREP_BRANCH' branch; using merge-base of main as base ($MERGE_BASE)" && BASELINE=$MERGE_BASE
fi
python -m semgrep_agent --publish-token "$SEMGREP_TOKEN" --publish-deployment "$SEMGREP_ID" --baseline-ref "$BASELINE"
| true |
ff0162968f5b8a07c45e035fdc781933d809cf6a | Shell | pengdan01/spider | /crawler/crawler2/general_crawler/tools/extract_taobao_hub_urls/build_hub_urls.sh | UTF-8 | 919 | 3.59375 | 4 | [] | no_license | #!/bin/bash
#读取文件
rm -f .hub_urls.txt
while read line
do
curl "$line" -L -o .raw.html
if [ $? -ne 0 ]; then
echo "curl fail and ignore, url: $line"
continue
fi
#2. 网页转码成 utf8
cat .raw.html | grep -v '<!--' | iconv -f gbk -t utf8 > .list_utf8.html
if [ $? -ne 0 ]; then
echo "iconv fail, url: $line"
continue
fi
#3. 调研链接提取 python 脚本
python extractor.py
if [ $? -ne 0 ]; then
echo "python extractor.py fail, url: $line"
continue
fi
done < seed_url.txt
#4. 去除掉无用的链接
#cat .hub_urls.txt | awk -F'#' '{if(NF>=2 && index($2, "cat=") == 0) {print $1;} else {print $0;}}' | sort -u | shuf > hub_urls.txt
# 由于当前只能根据 '#!' 构造 JSON 格式的 url
cat .hub_urls.txt manual_extracted.txt | grep '#!' | sort -u | shuf > hub_urls.txt
url_num=`cat hub_urls.txt | wc -l`
echo "final hub urls: $url_num"
exit 0
| true |
513e6e91352bdda4bc1cb4879af457938cf4d7de | Shell | Kerbodine/shell-script-test | /fizzbuzz.sh | UTF-8 | 195 | 2.953125 | 3 | [] | no_license | #! /bin/zsh
for i in `seq 1 100`; do
if ! (($i % 15)); then
echo "fizzbuzz"
elif ! (($i % 3)); then
echo "fizz"
elif ! (($i % 5)); then
echo "buzz"
else
echo $i
fi
done | true |
935c5f5612550d40a161cde695d7e3ef9a8debcb | Shell | sutanubh1/multicom | /installation/MULTICOM_test_codes/backup/.T13-run-blast.sh.default | UTF-8 | 1,019 | 2.8125 | 3 | [] | no_license | #!/bin/bash
dtime=$(date +%Y-%b-%d)
mkdir -p SOFTWARE_PATH/installation/test_out/T0967-blast-$dtime/
cd SOFTWARE_PATH/installation/test_out/T0967-blast-$dtime/
mkdir blast
perl SOFTWARE_PATH/src/meta/blast//script/main_blast_v2.pl SOFTWARE_PATH/src/meta/blast/cm_option_adv SOFTWARE_PATH/examples/T0967.fasta blast 2>&1 | tee SOFTWARE_PATH/installation/test_out/T0967-blast-$dtime.log
perl SOFTWARE_PATH/src/meta/hhsearch/script/tm_hhsearch_main_casp8.pl SOFTWARE_PATH/src/meta/hhsearch/hhsearch_option_cluster_used_in_casp8 SOFTWARE_PATH/examples/T0967.fasta blast
printf "\nFinished.."
printf "\nCheck log file <SOFTWARE_PATH/installation/test_out/T0967-blast-$dtime.log>\n\n"
if [[ ! -f "SOFTWARE_PATH/installation/test_out/T0967-blast-$dtime/blast/hs1.pdb" ]];then
printf "!!!!! Failed to run blast, check the installation <SOFTWARE_PATH/src/meta/blast/>\n\n"
else
printf "\nJob successfully completed!"
printf "\nResults: SOFTWARE_PATH/installation/test_out/T0967-blast-$dtime/blast/hs1.pdb\n\n"
fi
| true |
6cc0e8f7f1a4f69e92dd31577bb5d8f439cb332f | Shell | Suraj1408/Review | /Prime.sh | UTF-8 | 248 | 3.4375 | 3 | [] | no_license | #!/bin/bash
randomnum=$((1000+RANDOM%1000))
for (( i=2; i<$randomnum; i++ ))
do
res=$(($randomnum%$i))
if [ $res -eq 0 ]
then
flag=0
else
((occur+=1))
flag=1
fi
done
if [ $flag -eq 1 ]
then
echo "$randomnum occures $occur time"
fi
| true |
73aee426e7c132e3dbd2ddda983d42479135b314 | Shell | jadarsie/e2e-ci-poc | /hack/cloud-variables.sh | UTF-8 | 1,568 | 2.765625 | 3 | [] | no_license | #!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
set -o xtrace
METADATA=$(mktemp)
curl -sk -o ${METADATA} "https://management.${AZURE_LOCATION}.${CLOUD_FQDN}/metadata/endpoints?api-version=2015-01-01"
echo export LOCATION=\"$AZURE_LOCATION\"
echo export REGION_OPTIONS=\"$AZURE_LOCATION\"
echo export CLIENT_ID=\"$AZURE_CLIENT_ID\"
echo export CLIENT_SECRET=\"$AZURE_CLIENT_SECRET\"
echo export SUBSCRIPTION_ID=\"$AZURE_SUBSCRIPTION_ID\"
echo export TENANT_ID=\"$AZURE_TENANT_ID\"
echo export CUSTOM_CLOUD_CLIENT_ID=\"$AZURE_CLIENT_ID\"
echo export CUSTOM_CLOUD_SECRET=\"$AZURE_CLIENT_SECRET\"
echo export IDENTITY_SYSTEM=\"$IDENTITY_SYSTEM\"
echo export PORTAL_ENDPOINT=\"https://portal.${AZURE_LOCATION}.${CLOUD_FQDN}\"
echo export RESOURCE_MANAGER_ENDPOINT=\"https://management.${AZURE_LOCATION}.${CLOUD_FQDN}\"
echo export SERVICE_MANAGEMENT_ENDPOINT=\"$(jq -r '.authentication.audiences | .[0]' "$METADATA")\"
echo export ACTIVE_DIRECTORY_ENDPOINT=\"$(jq -r .authentication.loginEndpoint "$METADATA")\" # | sed -e 's/adfs\/*$//'
echo export GALLERY_ENDPOINT=\"$(jq -r .galleryEndpoint "$METADATA")\"
echo export GRAPH_ENDPOINT=\"$(jq -r .graphEndpoint "$METADATA")\"
echo export KEY_VAULT_DNS_SUFFIX=\"".vault.${AZURE_LOCATION}.${CLOUD_FQDN}"\"
echo export STORAGE_ENDPOINT_SUFFIX=\"${AZURE_LOCATION}.${CLOUD_FQDN}\"
echo export RESOURCE_MANAGER_VM_DNS_SUFFIX=\"cloudapp.${CLOUD_FQDN}\"
echo export SERVICE_MANAGEMENT_VM_DNS_SUFFIX=\"cloudapp.net\"
API_MODEL=$(jq -c '.apiModel' ${CLUSTER_DEFINITION})
echo export API_MODEL_INPUT=\'${API_MODEL}\'
| true |
5ba652e13ea1c6fcd1e5786c957e52cae8686691 | Shell | WilhelmStein/Radix-Hash-Join-Optimizer | /cpp-build | UTF-8 | 10,303 | 4.4375 | 4 | [] | no_license | #!/bin/bash
# Save the script's name in order to report errors, warnings and messages under it
prog=$(basename "$0")
function log
{
if [ "$1" == ERROR ]
then
echo -e "$(tput setaf 3)$prog:~$(tput sgr0) $2"
elif [ "$1" == WARNING ]
then
echo -e "$(tput setaf 9)$prog:~$(tput sgr0) $2"
else
echo -e "$(tput setaf 6)$prog:~$(tput sgr0) $2"
fi
}
# The configuration file must be located in the current working directory
config="$(pwd)/.config.json"
# A python script used in order to read the configuration file
load_config=\
"import sys, json;
data = json.load(sys.stdin)
for field in data.keys():
if isinstance(data[field], list):
data[field] = \" \".join(data[field])
print(field, \"=\", '\"', data[field], '\"', sep='')"
declare -A meta
# Store the configuration file's contents into the associative array "meta"
# Check if all the expected fields have been mentioned and
# if the mentioned directories exist
if [ ! -f "$config" ]
then
log ERROR "Unable to locate \"$config\""
exit 1
else
while read line
do
if [[ $line =~ (.*)=\"(.*)\" ]]
then
key="${BASH_REMATCH[1]}"
val="${BASH_REMATCH[2]}"
meta["$key"]="$val"
fi
done <<< $(cat "$config" | python3 -c "$load_config")
for field in ""CC CCFLAGS LIBS PATH_INC PATH_SRC PATH_TEST PATH_BIN""
do
if [[ ! -v "meta[$field]" ]]
then
log ERROR "Field \"$field\" was not specified"
exit 1
fi
done
for field in ""PATH_INC PATH_SRC PATH_TEST""
do
path="${meta[$field]}"
if [ ! -d "$path" ]
then
log ERROR "No directory named \"$path\""
exit 1
fi
done
fi
# A function used in order to confirm whether or not to overwrite
# the file specified by "$1"
function confirm
{
if [ -e "$1" ]
then
read -p "$(log WARNING "Are you sure you want to overwrite \"$1\": ")" answer
if [[ "$answer" != [yY] ]] && [[ "$answer" != [yY][eE][sS] ]]
then
exit 1
fi
fi
}
# Recursively find all the dependencies of the file specified by "$1"
# For this to work properly the name of every header file must be
# of the format *.h or *.hpp
function grep_include_directives
{
local includes=$(grep -Eo '["<].*\.[hi]pp[">]' $1)
if [ -z "$includes" ]
then
return
fi
local dependencies=""
for include in ""$includes""
do
include=${include:1:${#include} - 2}
local entry="${visited[$include]}"
if [[ -n "$entry" ]]
then
continue
else
visited["$include"]=true
grep_include_directives "${meta[PATH_INC]}/$include"
fi
done
}
# Generate a makefile that contains:
# (1) An explicit rule for each .cpp file residing in the "$PATH_SRC" directory
# (2) A wildcard rule for every test unit residing in the "$PATH_TEST" directory
# (3) A "clean" rule in order to remove the "$PATH_BIN" directory and its contents
# (4) An "all" rule that results in the separate compilation of every .cpp file
# residing in the "$PATH_SRC" directory
function generate
{
echo
echo "CC = ${meta[CC]}"
echo "CCFLAGS = ${meta[CCFLAGS]}"
echo
echo "LIBS = ${meta[LIBS]}"
echo
echo "PATH_SRC = ${meta[PATH_SRC]}"
echo "PATH_INC = ${meta[PATH_INC]}"
echo "PATH_BIN = ${meta[PATH_BIN]}"
echo "PATH_TEST = ${meta[PATH_TEST]}"
echo
echo ".PHONY: all"
echo "all:"
echo -e "\tmkdir -p \$(PATH_BIN)"
echo -e "\t@echo"
echo -e "\t@echo \"*** Compiling object files ***\""
echo -e "\t@echo \"***\""
echo -e "\tmake \$(OBJS)"
echo -e "\t@echo \"***\""
echo
echo ".PHONY: clean"
echo "clean:"
echo -e "\t@echo"
echo -e "\t@echo \"*** Purging binaries ***\""
echo -e "\t@echo \"***\""
echo -e "\trm -rvf \$(PATH_BIN)"
echo -e "\t@echo \"***\"\n\n"
objs="OBJS = \$(addprefix \$(PATH_BIN)/, "
deps=""
rules=""
for file in ""$1""
do
declare -A visited
grep_include_directives "${meta[PATH_SRC]}/$file"; includes=${!visited[@]}
unset visited
file=${file%.cpp};
rule="\$(PATH_BIN)/$file.o:"
if [ -n "$includes" ]
then
deps_name=$(echo $file | tr [:lower:] [:upper:])_DEP
rule="$rule \$($deps_name)"
deps_list="\$(addprefix \$(PATH_INC)/, $includes) \$(PATH_SRC)/$file.cpp"
deps="$deps$deps_name = $deps_list\n\n"
else
deps_name=""
deps_list=""
fi
rule="$rule\n\t\$(CC) -I \$(PATH_INC) \$(DEFINED) \$(CCFLAGS) \$(PATH_SRC)/$file.cpp -c -o \$(PATH_BIN)/$file.o"
rules="$rules$rule\n\n"
objs="$objs $file.o"
done
objs="$objs)"
echo -e "$deps\n$rules\n$objs\n"
echo "\$(PATH_BIN)/%.exe: \$(PATH_TEST)/%.cpp \$(OBJS)"
echo -e "\t\$(CC) -I \$(PATH_INC) \$(DEFINED) \$(CCFLAGS) \$< \$(OBJS) \$(LIBS) -o \$@"
}
declare -A classes
# The macros must be of the format __.*__
# grep every global macro and extract its name
classes[-g]=$(grep -Evs '//' ${meta[PATH_INC]}/*.h*p ${meta[PATH_SRC]}/*.c*p | grep -E '__.*__' | cut -d : -f 2 | sed -nE 's/^.*\((__.*__)\).*$/\1/p')
# grep every unit specific macro and extract its name
classes[-u]=$(grep -Evs '//' ${meta[PATH_TEST]}/*.c*p | grep -E '__.*__' | cut -d : -f 2 | sed -nE 's/^.*\((__.*__)\).*$/\1/p')
declare -A shortcuts
# For every class of macros
for class in "${!classes[@]}"
do
# For each macro in the current class
for macro in ""${classes[$class]}""
do
if [[ -z "$macro" ]]
then
continue
fi
# Create a key corresponding to the macro at hand
key="-$(echo ${macro:2:1} | tr [:upper:] [:lower:])"
if [[ "$key" =~ (-[ugxr]) ]]
then
log ERROR "\"$macro\"'s shortcut shadows \"${BASH_REMATCH[1]}\" flag"
exit 1
fi
entry="${shortcuts[$key]}"
# If there is no entry matching the current key register it
# Otherwise
if [[ -n "$entry" ]]
then
# If they don't have different names
if [[ "$entry" =~ (-?)"$macro" ]]
then
# If the macro at hand is global
if [[ "$class" == -g ]]
then
# It overrides the existing entry
shortcuts["$key"]="$class $macro"
fi
# Otherwise move on to the next macro
continue
fi
# If they do have different names but same keys
# then report a macro collision that needs to be
# taken care of
log ERROR "Macro collision detected \"$macro\" \""$(echo "$entry" | cut -d ' ' -f 2)"\""
exit 1
else
shortcuts["$key"]="$class $macro"
fi
done
done
# Print helpful text
if [ "$1" == "--help" ]
then
echo "# Options:"
echo "# -u, --unit-define Define a macro in a test unit"
echo "# -g, --global-define Define a macro globally"
echo "# -x, --executable Compile the specified executable"
echo "# -r, --rebuild Recompile library / executable"
if [ ${#shortcuts[@]} -gt 0 ]
then
echo -e "\n# Shortcuts:"
for macro in "${!shortcuts[@]}"
do
printf "# %s, %s\n" "$macro" "${shortcuts[$macro]}"
done
fi
echo -e "\n# Usage:"
echo "# $prog -u [MACRO]"
echo "# $prog -g [MACRO]"
echo "# $prog -x [name]"
echo "# $prog -r"
echo -e "\n# Example: $prog -r -u __BENCHMARK__ -u __QUIET__ -g __CACHE_SIZE__=32768"
exit 0
fi
# If the "--makefile" flag was specified or there's no Makefile generate one
if [ "$1" == "--makefile" ] || [ ! -f $(pwd)/Makefile ]
then
files=$(ls "${meta[PATH_SRC]}");
if [ -n "$files" ]
then
confirm "Makefile"; generate "$files" > Makefile
else
log ERROR "Failed to generate a makefile due to directory \"${meta[PATH_SRC]}\" being empty"
exit 1
fi
exit 0
fi
# Preprocess the input in order to substitute any shortcuts with their true meaning
cmd="$*"
for key in ${!shortcuts[@]}
do
full="${shortcuts[$key]}"; cmd=${cmd/$key/$full}
done
set -- ""${cmd[@]}""
# Handle the different options mentioned by the program when run with the "--help" flag
while [ ! "$#" -eq 0 ]
do
case "$1" in
"-u" | "--unit-define")
shift
dexe="$dexe -D$1"
shift
;;
"-g" | "--global-define")
shift
dexe="$dexe -D$1"
dlib="$dlib -D$1"
shift
;;
"-x" | "--executable")
shift
fexe=$(echo -e "$1\n$fexe")
shift
;;
"-r" | "--rebuild")
rebuild=true
shift
;;
*)
log ERROR "Invalid syntax! \"$*\""
exit 1
;;
esac
done
# If any number of global macros have been specified or
# the "--rebuild" flag was specified and we were not refering to specific executables
# everything is recompiled
if ([ "$rebuild" ] && [ -z "$fexe" ]) || [ ! -z "$dlib" ]
then
make clean
fi
make "DEFINED=$dlib"
# If no executables have been specified compile every test unit
if [ -z "$fexe" ]
then
fexe=$(ls ${meta[PATH_TEST]})
fi
echo "-e" "\n*** Compiling exe files ***"
echo "***"
# When listing executables with the "--executable" flag the directory and the extension
# of the executable need not be specified
for name in ""$fexe""
do
if [ -z "$name" ]
then
continue
fi
if [[ "$name" =~ (\.?/?.+)/(.+) ]]
then
dir=${BASH_REMATCH[1]}
file=${BASH_REMATCH[2]}
if [ "$dir" == "${meta[PATH_BIN]}" ]
then
name="$file"
else
log WARNING "Directory mismatch! \"$dir\""
continue
fi
fi
name="${meta[PATH_BIN]}/${name//.*/}.exe"
# If the executable already exists but
# the "--rebuild" flag was specified or
# any test unit specific macros have been specified
# recompile it
if ([ "$rebuild" ] || [ ! -z "$dexe" ]) && [ -x "$name" ]
then
rm -f "$name"
fi
make "$name" "DEFINED=$dexe"
done
echo "***"
| true |
add66e579256ef0976c39209acc5e31f0476e6d6 | Shell | kabegh/drupal | /drupal-8.6/create-drupal.sh | UTF-8 | 627 | 2.515625 | 3 | [] | no_license | #! /bin/bash
# create nginx deployment
oc apply -f ./nginx/configmap-sitesconf.yaml
oc apply -f ./nginx/nginx.yaml
# create drupal deployment
oc apply -f ./drupal/drupal.yaml
# create postgres deployment
oc apply -f ./postgres/postgres.yaml
# create service
oc expose deployment postgresql-deployment --name=postgresql-service
oc expose deployment drupal-deployment --name=drupal-service --port=80 --target-port=drupal-port
oc expose deployment nginx-deployment --name=nginx-service
# create route
oc expose service nginx-service --name=nginx-route
# show url
oc describe route nginx-route | grep "Requested Host:"
| true |
360bb0bc30f9f0b02990413d71059a89128cffb9 | Shell | sjf/scripts | /sublime | UTF-8 | 260 | 2.9375 | 3 | [] | no_license | #!/bin/bash
set -x #print commands
set -e #exit on failed command
set -u #fail on undefined variable
if [ -f "/opt/sublime_text/sublime_text" ];then
/opt/sublime_text/sublime_text $@
else
/Applications/Sublime\ Text.app/Contents/SharedSupport/bin/subl $@
fi
| true |
a69e1e469b54fd520106d2de5c0fb5c3108d84a6 | Shell | dslm4515/BMLFS | /build-scripts/XML-Simple.build | UTF-8 | 1,833 | 3.1875 | 3 | [] | no_license | #! /bin/bash
# XML::Simple (Perl Module)
# Source: https://www.cpan.org/authors/id/G/GR/GRANTM/XML-Simple-2.25.tar.gz
#
# $BUILD = Directory to temporarily install
# $PKGS = Directory to store built packages
#
# DEPS
# Required: NONE
# Recommended: NONE
# Optional: XML::SAX
perl Makefile.PL && \
read -p "Compile?" && make -j2 &&
sudo -S make DESTDIR=$BUILD install &&
sudo -S mkdir -pv /BMAN/install && \
sudo -S mkdir -pv /BMAN/usr && \
sudo -S mv $BUILD/usr/share /BMAN/usr/ && \
cd $BUILD && sudo -S mkdir -v ${BUILD}/install &&
cat > /tmp/slack-desc << "EOF"
# HOW TO EDIT THIS FILE:
# The "handy ruler" below makes it easier to edit a package description. Line
# up the first '|' above the ':' following the base package name, and the '|'
# on the right side marks the last column you can put a character in. You must
# make exactly 11 lines for the formatting to be correct. It's also
# customary to leave one space after the ':' except on otherwise blank lines.
|-----handy-ruler------------------------------------------------------|
XML-Simple: XML::Simple (Perl Module)
XML-Simple:
XML-Simple: An easy API to read and write XML (especially config files). It is
XML-Simple: deprecated and its use is discouraged.
XML-Simple:
XML-Simple:
EOF
sudo -S mv -v /tmp/slack-desc install/ &&
sudo -S makepkg -l y -c n $PKGS/XML-Simple-2.25-$(uname -m)-mlfs.txz && \
cd /BMAN && \
cat > /tmp/slack-desc << "EOF"
XML-Simple-doc: Manuals for XML::Simple
XML-Simple-doc:
XML-Simple-doc: An easy API to read and write XML (especially config files). It is
XML-Simple-doc: deprecated and its use is discouraged.
XML-Simple-doc:
XML-Simple-doc:
EOF
sudo -S mv -v /tmp/slack-desc install/ && \
sudo -S makepkg -l y -c n $PKGS/XML-Simple-doc-2.25-noarch-mlfs.txz && \
sudo -S rm -rf ${BUILD}/* /BMAN/*
| true |
5318c485d969881516b0ae4123005fbc2c61536c | Shell | garbetjie/docker-image-ftp-server | /fs/docker-entrypoint.sh | UTF-8 | 1,130 | 3.46875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -e
# Ensure configuration is provided.
has_error=0
[[ -z $DB_HOST ]] && { echo "\$DB_HOST is required."; has_error=1; }
[[ -z $DB_USERNAME ]] && { echo "\$DB_USERNAME is required."; has_error=1; }
[[ -z $DB_NAME ]] && { echo "\$DB_NAME is required."; has_error=1; }
# Exit if there are errors.
[[ $has_error -eq 1 ]] && exit 1
# Initialize user.
userdel -f ftp &> /dev/null || true
groupdel -f ftp &> /dev/null || true
groupadd -o -g ${GID} ftp
useradd -K MAIL_DIR=/dev/null -u ${UID} -G ftp -o -d /srv/secure-chroot -M -N -s /sbin/nologin ftp 2>/dev/null
# Update the configuration.
envsubst '
$DB_HOST
$DB_USERNAME
$DB_PASSWORD
$DB_NAME
$TABLE_USERS
$TABLE_LOGS
$COL_USERNAME
$COL_PASSWORD
$PASSWORD_CRYPT
$LOG_TRANSACTIONS' < /etc/pam.d/vsftpd-config > /etc/pam.d/.vsftpd-config
mv /etc/pam.d/.vsftpd-config /etc/pam.d/vsftpd-config
# Set the external address.
if [ "$EXTERNAL_IP" != "" ]; then
sed -i "/pasv_address/c\pasv_address=${EXTERNAL_IP}" /etc/vsftpd/vsftpd.conf
else
sed -i "/pasv_address/c\#pasv_address=" /etc/vsftpd/vsftpd.conf
fi
# Execute the FTP server.
exec /usr/sbin/vsftpd
| true |
d322e3c5de3355e9466522fa23bc1009c412bb01 | Shell | sjbylo/osd-workshop | /scripts/deploy-get-a-username | UTF-8 | 2,097 | 3.25 | 3 | [] | no_license | #!/bin/bash
cd $(dirname $0)
BASE=$(pwd)
cd - >> /dev/null
source ${BASE}/../config.sh
function ensure_set {
local varName="$1"
if [ -z "${!varName}" ]; then
echo "$varName is not set"
exit 1
fi
}
ensure_set USERCOUNT
ensure_set HR_PROJECT
ensure_set GAU_PROJECT
oc project $GAU_PROJECT || oc new-project $GAU_PROJECT
HR_ROUTE="https://$(oc get -n $HR_PROJECT route/hosted-workshop-spawner -o jsonpath='{.spec.host}')"
CONSOLE_ROUTE="https://$(oc get -n openshift-console route/console -o jsonpath='{.spec.host}')"
ensure_set HR_ROUTE
ensure_set CONSOLE_ROUTE
set -e
oc new-app \
-n $GAU_PROJECT \
--name=redis \
--template=redis-persistent \
-p MEMORY_LIMIT=1Gi \
-p DATABASE_SERVICE_NAME=redis \
-p REDIS_PASSWORD=redis \
-p VOLUME_CAPACITY=1Gi \
-p REDIS_VERSION=5
oc new-app \
quay.io/openshiftlabs/username-distribution \
-n $GAU_PROJECT \
--name=get-a-username \
-e LAB_REDIS_HOST=redis \
-e LAB_REDIS_PASS=redis \
-e LAB_TITLE="OpenShift Dedicated Workshop" \
-e LAB_DURATION_HOURS=240h \
-e LAB_USER_COUNT=$USERCOUNT \
-e LAB_USER_ACCESS_TOKEN="$GAU_ACCESS_TOKEN" \
-e LAB_USER_PASS=openshift \
-e LAB_USER_PREFIX=user \
-e LAB_USER_PAD_ZERO=false \
-e LAB_ADMIN_PASS="$GAU_ADMIN_PASSWORD" \
-e LAB_MODULE_URLS="$HR_ROUTE;Lab" \
-e LAB_EXTRA_URLS="${CONSOLE_ROUTE};OpenShift Console,https://$(oc get route/grafana -n openshift-monitoring -o jsonpath='{.spec.host}');Grafana"
oc expose -n $GAU_PROJECT svc/get-a-username
oc patch route/get-a-username -n $GAU_PROJECT -p '{"spec":{"tls":{"termination":"edge","insecureEdgeTerminationPolicy":"Allow"}}}'
# for the topology view
oc label -n $GAU_PROJECT deploy/get-a-username app.kubernetes.io/part-of=get-a-username
oc label -n $GAU_PROJECT dc/redis app.kubernetes.io/part-of=get-a-username
oc annotate -n $GAU_PROJECT deploy/get-a-username app.openshift.io/connects-to='[{"apiVersion":"apps.openshift.io/v1","kind":"DeploymentConfig","name":"redis"}]'
echo "Username app is now available at https://$(oc get -n $GAU_PROJECT route/get-a-username -o jsonpath='{.spec.host}')" | true |
2e120ad500d2e4d7382680c5e02532beb796e0f7 | Shell | unRob/dotfiles | /zshrc.dotfile | UTF-8 | 2,153 | 2.8125 | 3 | [] | no_license | #!/usr/bin/env zsh
# shellcheck shell=bash
export DOTFILES="${HOME}/.dotfiles"
export ZSH_CUSTOM=$DOTFILES/zsh-stuff
# Path to your oh-my-zsh installation.
export ZSH=$DOTFILES/oh-my-zsh
# Uncomment the following line to change how often to auto-update (in days).
export UPDATE_ZSH_DAYS=1
# Set name of the theme to load.
export ZSH_THEME="unrob"
# Uncomment the following line to disable auto-setting terminal title.
export DISABLE_AUTO_TITLE="true"
# Uncomment the following line to display red dots whilst waiting for completion.
export COMPLETION_WAITING_DOTS="true"
# What to higlight in the shell prompt
export ZSH_HIGHLIGHT_HIGHLIGHTERS=(main brackets pattern cursor)
# ----------------
# Plugins
# ----------------
# load out-of-the-box plugins
plugins=(git macos iterm2)
# load custom plugins
plugins+=("${(@f)$(find -L "$DOTFILES/zsh-stuff/plugins" -type d -maxdepth 1 -mindepth 1 -exec basename "{}" \;)}")
# zmv is mv for champions
autoload zmv
# control+x control+e to edit current command in $EDITOR
autoload -z edit-command-line
zle -N edit-command-line
bindkey '\C-x\C-e' edit-command-line
# ----------------
# Start oh-my-zsh
# ----------------
# shellcheck disable=1090
source "$ZSH/oh-my-zsh.sh"
# ----------------
# Aliases
# ----------------
alias g='git'
alias m='milpa'
# Enable us to cd to anywhere in the current path, $HOME or $HOME/src
cdpath=("$HOME" "$HOME/src")
# ----------------
# Further config
# ----------------
# this is the editor used in git commit messages and all sorts of crazy stuff
export EDITOR="vim"
# Please en_US.utf8 all the things
export LC_ALL="en_US.utf8"
export LANG="en_US.UTF-8"
export LC_COLLATE="en_US.UTF-8"
export LC_CTYPE="en_US.UTF-8"
export LC_MESSAGES="en_US.UTF-8"
export LC_MONETARY="en_US.UTF-8"
export LC_NUMERIC="en_US.UTF-8"
export LC_TIME="en_US.UTF-8"
export LC_ALL="en_US.UTF-8"
export RIPGREP_CONFIG_PATH="$HOME/.ripgreprc"
# ----------------
# Nasty stuff added by computars beyond
# ----------------
for localInit in $(find "${HOME}/.dotfiles/local-init" -type f -name '*.zsh'); do
# let computars break the system and slow down shell boot
source "${localInit}"
done
| true |
98cdf075ab9cc981db5ab2c909b630105527700c | Shell | koby28/linux | /uptime.sh | UTF-8 | 244 | 3.59375 | 4 | [] | no_license | #!/bin/bash
# 文件名: uptime.sh
# 用途:系统运行时间监视器
IP_LIST="192.168.0.1 192.168.0.5 192.168.0.9"
USER="test"
for IP in $IP_LIST;
do
utime=$(ssh ${USER}@${IP} uptime | awk '{ print $3 }' )
echo $IP uptime: $utime
done
| true |
16e626be0d5fdbb166a7bba48d139fa159fe0e15 | Shell | mbezjak/poly-devel | /bin/validate-project-exists | UTF-8 | 134 | 2.96875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
#
declare -r project="$1"
project-find "$project" > /dev/null ||
fail "Project not found in any workspace: $project"
| true |
1b1795f1ed600bfe2e721e2bb91a6c9b2f793562 | Shell | KjMaas/.dotSetup | /symlink.sh | UTF-8 | 1,888 | 4.03125 | 4 | [] | no_license | #!/bin/bash
echo "[INFO] symlinking dotfiles..."
function linkDotfile {
if [[ -z $2 ]]; then
# if no folder argument is provided, create symlink @HOME
destFolder="${HOME}"
else
# else put create it at the given location
destFolder="${2}"
fi
dest="${destFolder}/${1}"
dateStr=$(date +%Y-%m-%d-%H%M)
# remove symlink if it already exists
if [ -L "${dest}" ]; then
echo "--- Removing existing symlink: ${dest}"
rm ${dest}
# backup file if it already exists
elif [ -f "${dest}" ]; then
echo "<<< Backing up existing file: ${dest} --> ${dest}.${dateStr}"
mv ${dest}{,.${dateStr}}
# backup directory if it already exists
elif [ -d "${dest}" ]; then
echo "<<< Backing up existing dir: ${dest} --> ${dest}.${dateStr}"
mv ${dest}{,.${dateStr}}
fi
# create folders arborescence (if needed)
if [[ ! -d ${destFolder} ]]; then
echo "+++ Creating directory arborescence ${destFolder}"
mkdir -p ${destFolder}
fi
# create symlink
echo "+++ Creating new symlink: ${dest} --> ${dotfilesDir}/${1}"
ln -s ${dotfilesDir}/${1} ${dest}
}
# Symlink shell stuff
cd shell/
dotfilesDir=$(pwd)
linkDotfile .bashrc
linkDotfile .bash_aliases
linkDotfile .bash_paths
linkDotfile .bash_func
linkDotfile .bash_autocomplete
linkDotfile .profile
# linkDotfile .bash_history
cd ../
# Symlink Git stuff
cd git/
dotfilesDir=$(pwd)
linkDotfile .gitconfig
cd ../
# Symlink NeoVim-related stuff
cd NeoVim/
dotfilesDir=$(pwd)
linkDotfile init.vim ~/.config/nvim
linkDotfile coc-settings.json ~/.config/nvim
linkDotfile snips/python.snippets ~/.config/nvim
linkDotfile ftplugin/python.vim ~/.config/nvim
cd ../
# add custom functions
cd funcs/
dotfilesDir=$(pwd)
#linkDotfile create_backup.sh ~/usr/local/bin/
cd ../
echo "[INFO] symlinking done!"
| true |
9e5f9ef4b0c72bfbabaff8e3a9d292c6df3df1eb | Shell | priyanshul-govil/hashcode2021 | /run.sh | UTF-8 | 162 | 2.78125 | 3 | [] | no_license | program=$1
clang++ -std=c++17 $program -o program
cd input
for i in *; do
filePath=$(echo ../output/$(echo $i))
../program < $i > $filePath
done | true |
8a4620b09d754497b0369e356eb8f1c3006d6bde | Shell | bookingcom/pakket | /dev.rc | UTF-8 | 1,806 | 3.53125 | 4 | [
"MIT"
] | permissive | # vim: filetype=sh
# shellcheck shell=bash
# usage:
# source "$(git rev-parse --show-toplevel)/dev.rc"
# => environment -------------------------------------------------------------------------------------------------- {{{1
REQUIRED_PERL_VERSION=5.38.0
NPROC=4
[[ -x "$(command which -- nproc)" ]] && {
NPROC="$(nproc)"
}
REPO_ROOT="$(git rev-parse --show-toplevel)"
REPO_URL=$(git config --get remote.origin.url)
REPO_NAME=$(basename -s .git "$REPO_URL")
PERLCRITIC_PATH="$REPO_ROOT/.perlcriticrc"
if [[ -r "$PERLCRITIC_PATH" ]]; then
export PERLCRITIC="$PERLCRITIC_PATH"
fi
# => check prerequisites ------------------------------------------------------------------------------------------ {{{1
[[ -x "$(command which -- perlbrew)" ]] || {
echo "Please install perlbrew (https://perlbrew.pl/)"
return
}
[[ -x "$(command which -- cpanm)" ]] || {
perlbrew install-cpanm || return
}
# => install default perl and library ----------------------------------------------------------------------------- {{{1
PERL_NAME="perl-$REQUIRED_PERL_VERSION"
PERL_NAME_MATCH="\b${PERL_NAME}\b"
[[ $(perlbrew list) =~ $PERL_NAME_MATCH ]] || {
echo "Installing required perl: $PERL_NAME (nproc: $NPROC)"
nice perlbrew install -nf -j "$NPROC" "$REQUIRED_PERL_VERSION" --as "$PERL_NAME" || return
echo
}
LIBRARY_NAME="${PERL_NAME}@dev-$REPO_NAME"
LIBRARY_NAME_MATCH="\b${LIBRARY_NAME}\b"
[[ $(perlbrew lib-list) =~ $LIBRARY_NAME_MATCH ]] || {
echo "Creating Perl local dev library: $LIBRARY_NAME"
perlbrew lib create "$LIBRARY_NAME" || return
echo
echo "Installing required modules"
perlbrew use "$LIBRARY_NAME"
nice tools/setup-dev-environment
}
# => -------------------------------------------------------------------------------------------------------------- {{{1
perlbrew use "$LIBRARY_NAME"
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.