blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
923252052ae42562b396c71dba19a4717febcea6 | Shell | vadivelkindhealth/heroku-buildpack-nginx | /bin/compile | UTF-8 | 1,254 | 3.796875 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
# fail fast
set -eu
# parse args
BP_DIR="$(cd "$(dirname "$0")" && pwd)"
BUILD_DIR=$1
CACHE_DIR=$2
OPT_DIR=$BP_DIR/../opt/
fetch_nginx_tarball() {
local version="1.9.7"
local tarball_file="nginx-$version.tgz"
local stack="cedar-14"
local nginx_tarball_url="https://s3-external-1.amazonaws.com/heroku-buildpack-ruby/nginx/$stack/nginx-$version-ngx_mruby.tgz"
local dest_path="$CACHE_DIR/$stack/$tarball_file"
if [ -f "$dest_path" ]; then
echo -n "cat $dest_path"
else
echo -n "curl -L $nginx_tarball_url"
fi
}
heroku_bin_dir=$BUILD_DIR/.heroku/bin
mkdir -p $heroku_bin_dir
nginx_dir=$BUILD_DIR/.nginx
nginx_bin_dir=$nginx_dir/bin
mkdir -p $nginx_bin_dir
$(fetch_nginx_tarball) | tar xzC $nginx_bin_dir
cp $OPT_DIR/web $nginx_dir/bin/nginx-start
chmod +x $nginx_dir/bin/nginx-start
cat <<EOF > ${BUILD_DIR}/nginx-start.sh
#!/bin/bash
set -eu
if [ -f .nginx/bin/nginx-start ]; then
cd .nginx > /dev/null 2>&1
eval "bin/nginx-start &"
cd - > /dev/null 2>&1
else
echo "No .nginx/bin/nginx-start file found!"
exit 1
fi
EOF
chmod +x ${BUILD_DIR}/nginx-start.sh
echo "-----> Installed nginx"
cp $OPT_DIR/nginx.conf.erb $nginx_dir/
| true |
f0df6113d74e04f01283a75c171fdeb55dfa244b | Shell | amandameganchan/MScSLP-dissertation | /dnn-diarization/edit_vad.sh | UTF-8 | 400 | 3.328125 | 3 | [] | no_license | #!/usr/bin/bash
# takes a VAD (ark) file and changes the non-speech
# noise symbol (1) to the speech noise symbol (2)
arkdir=$1
outdir=$2
mkdir -p $outdir
mkdir -p temp_files
for ark in $arkdir/*
do
copy-vector ark:$ark ark,t:temp.txt
sed -i -e 's/\b1\b/2/g' temp.txt
copy-vector ark,t:temp.txt ark:$outdir/$(basename "$ark")
mv temp.txt temp_files/$(basename "$ark" .ark)-temp.txt
done
| true |
bc1e6bc1e64139064a94d235f20a52908838219d | Shell | cyanide284/IoTGoat | /OpenWrt/openwrt-18.06.2/target/linux/lantiq/base-files/etc/hotplug.d/firmware/12-ath9k-eeprom | UTF-8 | 3,569 | 3.890625 | 4 | [
"MIT",
"GPL-2.0-only"
] | permissive | #!/bin/sh
# Based on ar71xx 10-ath9k-eeprom
[ -e /lib/firmware/$FIRMWARE ] && exit 0
. /lib/functions.sh
. /lib/functions/system.sh
. /lib/upgrade/nand.sh
# xor multiple hex values of the same length
xor() {
local val
local ret="0x$1"
local retlen=${#1}
shift
while [ -n "$1" ]; do
val="0x$1"
ret=$((ret ^ val))
shift
done
printf "%0${retlen}x" "$ret"
}
ath9k_eeprom_die() {
echo "ath9k eeprom: $*"
exit 1
}
ath9k_eeprom_extract_raw() {
local source=$1
local offset=$2
local swap=$3
local size=4096
local bs=1
local conv=
if [ $swap -gt 0 ]; then
bs=2
conv="conv=swab"
size=$((size / bs))
offset=$((offset / bs))
fi
dd if=$source of=/lib/firmware/$FIRMWARE bs=$bs skip=$offset count=$size $conv 2>/dev/null || \
ath9k_eeprom_die "failed to extract from $mtd"
}
ath9k_eeprom_extract() {
local part=$1
local offset=$2
local swap=$3
local mtd
mtd=$(find_mtd_chardev $part)
[ -n "$mtd" ] || \
ath9k_eeprom_die "no mtd device found for partition $part"
ath9k_eeprom_extract_raw $mtd $offset $swap
}
ath9k_ubi_eeprom_extract() {
local part=$1
local offset=$2
local swap=$3
local ubidev=$(nand_find_ubi $CI_UBIPART)
local ubi
ubi=$(nand_find_volume $ubidev $part)
[ -n "$ubi" ] || \
ath9k_eeprom_die "no UBI volume found for $part"
ath9k_eeprom_extract_raw /dev/$ubi $offset $swap
}
ath9k_patch_fw_mac_crc() {
local mac=$1
local mac_offset=$2
local chksum_offset=$((mac_offset - 10))
ath9k_patch_fw_mac "${mac}" "${mac_offset}" "${chksum_offset}"
}
ath9k_patch_fw_mac() {
local mac=$1
local mac_offset=$2
local chksum_offset=$3
local xor_mac
local xor_fw_mac
local xor_fw_chksum
[ -z "$mac" -o -z "$mac_offset" ] && return
[ -n "$chksum_offset" ] && {
xor_mac=${mac//:/}
xor_mac="${xor_mac:0:4} ${xor_mac:4:4} ${xor_mac:8:4}"
xor_fw_mac=$(hexdump -v -n 6 -s $mac_offset -e '/1 "%02x"' /lib/firmware/$FIRMWARE)
xor_fw_mac="${xor_fw_mac:0:4} ${xor_fw_mac:4:4} ${xor_fw_mac:8:4}"
xor_fw_chksum=$(hexdump -v -n 2 -s $chksum_offset -e '/1 "%02x"' /lib/firmware/$FIRMWARE)
xor_fw_chksum=$(xor $xor_fw_chksum $xor_fw_mac $xor_mac)
printf "%b" "\x${xor_fw_chksum:0:2}\x${xor_fw_chksum:2:2}" | \
dd of=/lib/firmware/$FIRMWARE conv=notrunc bs=1 seek=$chksum_offset count=2
}
macaddr_2bin $mac | dd of=/lib/firmware/$FIRMWARE conv=notrunc bs=1 seek=$mac_offset count=6
}
case "$FIRMWARE" in
"ath9k-eeprom-pci-0000:00:0e.0.bin" | \
"ath9k-eeprom-pci-0000:01:00.0.bin" | \
"ath9k-eeprom-pci-0000:02:00.0.bin")
board=$(board_name)
case "$board" in
arcadyan,arv7518pw)
ath9k_eeprom_extract "boardconfig" 1024 1
;;
arcadyan,arv8539pw22)
ath9k_eeprom_extract "art" 1024 1
;;
bt,homehub-v2b)
ath9k_eeprom_extract "art" 0 1
ath9k_patch_fw_mac_crc "00:00:00:00:00:00" 524
;;
bt,homehub-v3a)
ath9k_eeprom_extract "art-copy" 0 1
ath9k_patch_fw_mac_crc $(macaddr_add $(mtd_get_mac_ascii uboot_env ethaddr) +2) 268
;;
bt,homehub-v5a)
ath9k_ubi_eeprom_extract "caldata" 4096 0
ath9k_patch_fw_mac_crc $(macaddr_add $(mtd_get_mac_binary_ubi caldata 4364) +2) 268
;;
netgear,dgn3500|netgear,dgn3500b)
ath9k_eeprom_extract "calibration" 61440 0
ath9k_patch_fw_mac_crc $(macaddr_add $(mtd_get_mac_ascii uboot-env ethaddr) +2) 524
;;
avm,fritz3370|avm,fritz7320|avm,fritz7360sl)
ath9k_eeprom_extract "urlader" 2437 0
;;
tplink,tdw8970|tplink,tdw8980)
ath9k_eeprom_extract "boardconfig" 135168 0
;;
*)
ath9k_eeprom_die "board $board is not supported yet"
;;
esac
;;
esac
| true |
1813309b54d1f9da0ae6e5ee7ba5ed157b3cf60f | Shell | duguyue100/NSC-GPU-GUIDE | /setup/admin-setup.sh | UTF-8 | 2,917 | 3.703125 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# This script is to setup the general system configuration.
# Author: Yuhuang Hu
# Email : yuhuang.hu@ini.uzh.ch
USER_DIR=$(cd $(dirname $0); pwd)
INSTALL_OPT="yes"
ENABLE_INSTALL=false
DEBUG_MODE=true
STATE_START=0
STATE_AFTER_UPGRADE=1
STATE_AFTER_JAVA=2
CAME_BACK_FROM_REBOOT=0
ENABLE_GPU=true
ENABLE_ALL=false
ENABLE_DEV=false
ENABLE_JAVA=false
# Color Profile
RED='\033[0;31m'
LIGHT_BLUE='\033[1;34m'
BLUE='\033[0;34m'
GREEN='\033[0;32m'
CYAN='\033[0;36m'
PURPLE='\033[0;35m'
COLOR_END='\033[0m'
# Welcome Message
echo -e "${LIGHT_BLUE}WELCOME TO MACHINE SETUP SCRIPT!${COLOR_END}"
echo -e "${LIGHT_BLUE}This script helps you setup your${COLOR_END}"
echo -e "${LIGHT_BLUE}system quickly and efficiently.${COLOR_END}"
echo -e "${LIGHT_BLUE}This script is hosted at https://github.com/duguyue100/NSC-GPU-GUIDE${COLOR_END}"
echo -e "${LIGHT_BLUE}Make pull requests or submit issues if you want to make changes.${COLOR_END}"
echo -e "${LIGHT_BLUE}Let's make setup easy again!${COLOR_END}"
echo -e "${LIGHT_BLUE}If you have any problems, please contact:${COLOR_END}"
echo -e "${GREEN}Yuhuang Hu${COLOR_END}"
echo -e "${GREEN}Email: yuhuang.hu@ini.uzh.ch${COLOR_END}"
# check user installation profile
echo -e "${RED}--------------------------------------------------${COLOR_END}"
if [ -f "$USER_DIR/admin-conf.sh" ]; then
echo -e "[MESSAGE] Loading installation configuration."
source "$USER_DIR/admin-conf.sh"
echo "[MESSAGE] Install configuration loaded."
else
echo "[MESSAGE] No install configuration found. Use default settings."
fi
# set update stage
if [ -f "$USER_DIR/admin-setup-stage.sh" ]; then
touch $USER_DIR/admin-setup-stage.sh
echo "#!/usr/bin/env bash" > $USER_DIR/admin-setup-stage.sh
echo "CAME_BACK_FROM_REBOOT=0" >> "$USER_DIR/admin-setup-stage.sh"
source $USER_DIR/admin-setup-stage.sh
fi
# set all options true if all enabled
if [ $ENABLE_ALL = true ]; then
ENABLE_DEV=true
ENABLE_JAVA=true
fi
# Functions
general_update()
{
if sudo apt-get update ; then
echo "Updated successfully."
else
echo "Some portion of the update is failed."
fi
# upgrade the system
sudo apt-get upgrade
# reboot the system
echo "CAME_BACK_FROM_REBOOT=1" >> $USER_DIR/admin-setup-stage.sh
reboot
}
if [ $DEBUG_MODE = false ]; then
print_config
if [ $ENABLE_INSTALL = true ]; then
# setting up resource folder
setup_env
# setting up anaconda
if [ $ENABLE_PYTHON = true ]; then
setup_anaconda
fi
# setting up environment for deep learning
config_dl
# cleaning environment
clean_env
echo -e "${PURPLE}[MESSAGE] Selected Installation Completed.${COLOR_END}"
else
echo -e "${PURPLE}[MESSAGE] Installation interrupted.${COLOR_END}"
fi
else
print_config
echo "[MESSAGE] Enable Install : $ENABLE_INSTALL"
fi
| true |
16a101fe70e1780d7993e2e269d474924bb8de30 | Shell | cs-willian-silva/git-cli | /proxy | UTF-8 | 2,330 | 3.328125 | 3 | [] | no_license | #!/bin/bash
#
# Nome: proxyManager
# Autor: Willian Martins - willian.martins@msn.com
# Descricao: Gerencia configuracao de Proxy do Gnome.
#
enableProxy ()
{
clear ; echo
gsettings set org.gnome.system.proxy ignore-hosts "['localhost', '127.0.0.0/8', '192.168.8.0/24', '10.0.0.0/8', '172.16.0.0/12', '.samedomain.example']"
gsettings set org.gnome.system.proxy mode "manual"
gsettings set org.gnome.system.proxy use-same-proxy "true"
gsettings set org.gnome.system.proxy.http authentication-password "Your password here. Blank for none."
gsettings set org.gnome.system.proxy.http authentication-user "Your proxy username here. Blank for none."
gsettings set org.gnome.system.proxy.http enabled "true"
gsettings set org.gnome.system.proxy.http host "squid-proxy.samedomain.example"
gsettings set org.gnome.system.proxy.http port "3128"
gsettings set org.gnome.system.proxy.http use-authentication "false"
gsettings set org.gnome.system.proxy.https host "squid-proxy.samedomain.example"
gsettings set org.gnome.system.proxy.https port "3128"
gsettings set org.gnome.system.proxy.ftp host "squid-proxy.samedomain.example"
gsettings set org.gnome.system.proxy.ftp port "3128"
}
disableProxy ()
{
clear ; echo
gsettings set org.gnome.system.proxy ignore-hosts "[]"
gsettings set org.gnome.system.proxy mode "none"
gsettings set org.gnome.system.proxy use-same-proxy "false"
gsettings set org.gnome.system.proxy.http authentication-password " "
gsettings set org.gnome.system.proxy.http authentication-user " "
gsettings set org.gnome.system.proxy.http enabled "false"
gsettings set org.gnome.system.proxy.http host " "
gsettings set org.gnome.system.proxy.http port "0"
gsettings set org.gnome.system.proxy.http use-authentication "false"
gsettings set org.gnome.system.proxy.https host " "
gsettings set org.gnome.system.proxy.https port "0"
gsettings set org.gnome.system.proxy.ftp host " "
gsettings set org.gnome.system.proxy.ftp port "0"
}
case $1 in
start) enableProxy ; echo "Proxy Habilitado no Gnome" ; echo ;;
stop) disableProxy ; echo "Proxy Desabilitado no Gnome" ; echo ;;
*) clear ; echo ; echo "Erro de sintaxe. Utilize start|stop" ; echo ;;
esac
| true |
cfb96353aedbfa765b99019848d0b6ef6ce0c9a0 | Shell | fcostanz/DesySusy | /stop_2013/Utilities/checkDuplicate.sh | UTF-8 | 980 | 3.625 | 4 | [] | no_license | #!/bin/zsh
FILE=DC.txt
if [ $1 ]
then
FILE=$1
fi
red='\e[0;31m'
green='\e[0;32m'
NC='\e[0m' # No Color
for dir in `cat $FILE | awk -F "," '{print $11}'`
do
rm -f help.txt help2.txt sort.txt sort2.txt
echo $dir
echo $dir | grep -q "pnfs"
if [[ $? -eq 0 ]]
then
dcls $dir > help.txt
else
ls $dir/*.root > help.txt
fi
touch help2.txt
for rootfile in `cat help.txt`
do
break
done
echo $rootfile
echo $rootfile | grep -q cfg
if [ $? -eq 0 ]
then
continue
fi
for rootfile in `cat help.txt`
do
echo $rootfile| rev | cut -f 3- -d"_" | rev >> help2.txt
done
sort help2.txt > sort.txt
sort -u help2.txt > sort2.txt
if [[ `sort -u help2.txt | wc -l` -ne `more help.txt | wc -l` ]]
then
echo -e "${red}Houston, we've got a problem${NC}" $dir
diff sort.txt sort2.txt
else
echo -e "${green}Kein Problem Digga!!${NC}"
fi
done
rm -f help.txt help2.txt sort.txt sort2.txt | true |
257a181a2640f0993e2f911ab0ae68b8baac08ce | Shell | SCH94/cheatsheets | /shellscripting/mod4-parsingCLI/backup.sh | UTF-8 | 1,861 | 4.625 | 5 | [] | no_license | #!/bin/bash
# this script will backup website files and mysql database. Return non-zero status on error
log() {
# this sends message to syslog and to standard out.
local MESSAGE="${@}"
echo "${MESSAGE}"
logger -t "${0}" "${MESSAge}"
}
check_status() {
local STATUS="${@}"
if [[ "${STATUS}" -eq 0 ]]
then
log "Backup completed"
log "Backup file location: ${BACKUP_FILE}"
log "Size of backup: $(du -lh ${BACKUP_FILE})"
log "NOTE: backup will be deleted after 30 days."
else
log "Backup failed"
fi
}
backup_files() {
# this script will back up files.
local FILE="${1}"
# check if file exsist
if [[ -d "${FILE}" ]]
then
BACKUP_FILE="/var/tmp"
local BNAME="$(basename ${FILE}).$(date +%F-%N)"
log "Backing up ${FILE} at ${BACKUP_FILE}"
log "Backup started..."
# compressing the files
#tar -czvf "${BNAME}.tar.gz" "${FILE}" > "${BACKUP_FILE}" &> /dev/null
tar -czvf "${BACKUP_FILE}/${BNAME}.tar.gz" "${FILE}"
check_status "${?}"
else
return 1
fi
}
backup_sql() {
# this function will backup mysql database
BACKUP_FILE="/var/tmp/mysql_.$(date +F-%N)"
log "Backing up mysql database ${3}"
mysqldump -u ${1} --password=${2} ${3} | gzip > "${BACKUP_FILE}.gz"
check_status "${?}"
}
# testing if user is root
UUUID=$(id -u)
if [[ "${UUUID}" -ne 0 ]]
then
log "$(id -un) is not a root user."
log "Please use sudo"
exit 1
fi
# check if user provided parameters
if [[ "${#}" -lt 1 ]]
then
log "USAGE: ${0} [website_directory] [mysql_user] [mysql_password] [database_name]"
exit 1
fi
read -p "Enter 1 for website backup \n 2 for mysql backup: " CHOICE
case "$CHOICE" in
1) backup_files "${1}" ;;
2) backup_sql "${2}" "${3}" "${4}" ;;
*)
log "Enter Valid Option."
exit 1
esac | true |
64635f6bb5e8c28ba32f12bfe6c8110fc080f9b0 | Shell | boyska/cliutils | /pyless | UTF-8 | 81 | 2.703125 | 3 | [] | no_license | #!/bin/sh
if [ $# -ge 1 ]; then
pycolor "$1"|less -R
else
pycolor|less -R
fi
| true |
9287da016f3b64c0fce860d37f67ca58b8dc04e9 | Shell | brewconomist/brewcompute | /run_tests.sh | UTF-8 | 221 | 2.71875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -e
printf "CHECKING SYNTAX\n"
flake8 --exclude=ui_*.py src/*.py
flake8 src/unit_tests/*.py
echo "OK"
#run unit tests
cmd='python -m unittest discover src/'
printf "\nRUNNING UNIT TESTS with $cmd\n"
$cmd
| true |
077b78e14af973c48d12f22e8ef76e7c6a2da031 | Shell | hebs/nodar | /scripts/getbalance_all.sh | UTF-8 | 254 | 2.859375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
node_addrs=$(smartcash-cli smartnode list full | sed -e 's/.*90024 //' -e 's/ .*//' | grep -v -F -e "{" -e "}")
for address in $node_addrs;
do
echo $address: `curl --silent https://explorer3.smartcash.cc/ext/getbalance/$address`
done
| true |
6dade60ee331cfd0c73dabdcdff54dae89670864 | Shell | chiralsoftware/EssentialBusinessFundingSpam | /update-spam.sh | UTF-8 | 529 | 3.375 | 3 | [] | no_license | #!/bin/bash
echo Updating spam info
die() { echo "$*" 1>&2 ; exit 1; }
echo Doing a Git update ...
curl --silent \
https://raw.githubusercontent.com/chiralsoftware/EssentialBusinessFundingSpam/main/essential-business > \
/etc/postfix/essential-business || die "Couldn't fetch file from GitHub"
echo Creating new header_checks with Git plus local domains...
cat /etc/postfix/other-checks /etc/postfix/essential-business > \
/etc/postfix/header_checks || die "Couldn't create new header_checks file"
echo Done
| true |
b2d5fde35aeebf539e038e4a45d5309af3d43cf5 | Shell | bopopescu/recipes | /mount_s3_centos.sh | UTF-8 | 1,507 | 3.21875 | 3 | [] | no_license | #!/bin/bash
#mount s3 bucket into cloud instances
BUCKET_NAME=fernando-sanchez
MOUNT_POINT=/mnt/s3
ACCESS_KEY_ID=
AWS_SECRET_ACCESS_KEY=
#install requirements
yum remove fuse fuse-s3fs
yum install -y gcc gcc-c++ automake openssl-devel libstdc++-devel curl-devel libxml2-devel mailcap wget git screen \
c
#install fuse backend for S3 instances
yum remove fuse fuse-s3fs
yum install gcc libstdc++-devel gcc-c++ curl-devel libxml2-devel openssl-devel mailcap
cd /usr/src/
wget https://github.com/libfuse/libfuse/releases/download/fuse_2_9_4/fuse-2.9.3.tar.gz
tar xvfz fuse-2.9.3.tar.gz
cd fuse-2.9.3
./configure --prefix=/usr/local
make && make install
export PKG_CONFIG_PATH=/usr/local/lib/pkgconfig
export PATH=$PATH:/usr/local/bin:/usr/local/lib
ldconfig
modprobe fuse
lsmod|grep fuse #check it works
#s3fs
cd /usr/src
git clone https://github.com/s3fs-fuse/s3fs-fuse.git
cd s3fs-fuse
./autogen.sh
./configure
make
sudo make install
ln -s /usr/local/bin/s3fs /usr/bin/s3fs
#create config file
#configure s3fs required Access Key and Secret Key of your S3 Amazon account (Change AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY with your actual key values).
#–> Click on AWS Menu -> Your AWS Account Name -> Security Credentials***
touch /etc/passwd-s3fs && chmod 640 /etc/passwd-s3fs && echo "$ACCESS_KEY_ID:$AWS_SECRET_ACCESS_KEY" > /etc/passwd-s3fs
#create mount point and mount
mkdir -p $MOUNT_POINT
s3fs $BUCKET_NAME $MOUNT_POINT -o passwd_file=/etc/passwd-s3fs
#check it works
grep s3fs /etc/mtab
| true |
f1592718b3e77e5c422c7eac8e3c1b0cb8452f33 | Shell | subogero/napi | /frissit | ISO-8859-2 | 1,530 | 3.53125 | 4 | [] | no_license | #!/bin/sh
if [ "$1" = -h ]; then
cat <<EOF
Szinopszis: ./frissit [ -n | -i | -c ]
-n csak napirajz
-i csak a'index
-c csak comedycentral
EOF
exit 0
fi
cd $(echo $0 | sed -r 's|^(.*/).+$|\1|')
FILE=mine.csv
DATE=$(date -Idate | sed 's/-/_/g')
# napi
if [ "$1" != -i -a "$1" != -c ]; then
URL="http://napirajz.hu/"
curl -f ${URL} >tmp 2>/dev/null || exit 1
while true; do
PICS=`cat tmp | sed -nr 's;^.*<img .*src="(http://napirajz\.hu/wp-content/uploads/[^ ]+jpe?g).+$;\1;p'`
[ -n "$PICS" ] && echo >&2 "regex 1" && break
echo >&2 "regex -" && break
done
for i in $PICS; do
NODIR=$(echo $i | sed -r 's|^.*/(.+)$|\1|')
[ -f $NODIR ] && continue
wget "${i}"
echo "$NODIR;$DATE;napi"
done >> $FILE
fi
# komdia
if [ "$1" != -n -a "$1" != -i ]; then
URL="http://www.comedycentral.hu/photos/007_cruton/?flipbook=napirajz-comedy-2012-1 \
http://www.comedycentral.hu/photos/01_csigolyaid/?flipbook=napirajz-comedy \
http://www.comedycentral.hu/photos/103_krumpli?flipbook=napirajz-comedy-2013 \
http://www.comedycentral.hu/photos/103_krumpli?flipbook=napirajz-comedy-2014 \
http://www.comedycentral.hu/photos/408_vaku/?flipbook=napirajz-comedy-2015"
PICS=`curl $URL 2>/dev/null \
| sed -rn 's:^.+src=.(.+)\?(width=70|height=53).+Grafitember.+$:\1:p'`
for i in $PICS; do
NODIR=$(echo $i | sed -r 's|^.*/(.+)$|\1|')
[ -f "$NODIR" ] && continue
wget "$i"
echo "$NODIR;$DATE;komedia"
done >> $FILE
fi
rm tmp
./comedy-rss
| true |
2072f1ebc8aab7119d8dd5d33d1a861704338d2d | Shell | euryecetelecom/eurybox | /sw/src/eurybox.functions.vm | UTF-8 | 11,754 | 3.9375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#EuryBOX vm functions file
#Desc: rename a VM through libvirt
#1 arg required: vm_name
eurybox_vm_rename ()
{
local NAME=$1
local STATUS
local DISKS
local DISKS_NUM
local NOW=$(date +"%Y_%m_%d@%H_%M_%S")
local TMP_DESC="${EURYBOX_TMP_FOLDER}/${NAME}_${NOW}"
local RENAME_OUT=`sudo virsh $EURYBOX_VIRSH_OPTIONS list --all | awk '{ if ($2 == name) {print $2} }' name=$NAME 2>&1`
STATUS=$?
if [[ $STATUS -eq 0 ]]
then
if [[ $RENAME_OUT == "" ]]
then
eurybox_display_message warning VM "Rename not possible on non-present vm: $NAME"
else
RENAME_OUT=`sudo virsh $EURYBOX_VIRSH_OPTIONS list | awk '{ if ($2 == name) {print $2} }' name=$NAME 2>&1`
if [[ !($RENAME_OUT == "") ]]
then
eurybox_display_message warning VM "Forcing stop of running vm: $NAME"
RENAME_OUT=`sudo sh -c "virsh $EURYBOX_VIRSH_OPTIONS destroy $NAME" 2>&1`
fi
eurybox_export_vm_config $NAME $TMP_DESC
DISKS=( $(sudo virsh $EURYBOX_VIRSH_OPTIONS domblklist $NAME | awk '($2 != "-") {print $2}') )
STATUS=$?
if [[ !($STATUS -eq 0) ]]
then
eurybox_display_message error VM "Error on VM description dump: $NAME - error: $STATUS:\n$DISKS"
else
if [[ !($DISKS = "") ]]
then
DISKS_NUM=${#DISKS[@]}
eurybox_display_message message VM "VM $NAME - $DISKS_NUM disk(s) found"
for (( DISK_NUM=0;DISK_NUM<$DISKS_NUM;DISK_NUM++ ))
do
RENAME_OUT=`sudo mv ${DISKS[DISK_NUM]} ${DISKS[DISK_NUM]}_${NOW} 2>&1`
STATUS=$?
if [[ !($STATUS -eq 0) ]]
then
eurybox_display_message error VM "Error on VM disk rename: $NAME - ${DISKS[DISK_NUM]} - error $STATUS:\n$RENAME_OUT"
else
eurybox_display_message debug VM "VM $NAME - disk renamed: ${DISKS[DISK_NUM]} -> ${DISKS[DISK_NUM]}_${NOW}"
RENAME_OUT=`sudo cat $TMP_DESC | awk '{ if($NF == "file=" sq disk_name sq "/>") { for(i=1; i<(NF); i++) { if(i==1) { disk=$i } else { disk=disk" "$i } } ; disk=disk" file=" sq disk_new_name sq "/>" ; print disk } else { print $0 } }' disk_name=${DISKS[DISK_NUM]} disk_new_name=${DISKS[DISK_NUM]}_${NOW} sq=\' 2>&1`
STATUS=$?
if [[ !($STATUS -eq 0) ]]
then
eurybox_display_message error VM "Error on VM disk description update: $NAME - ${DISKS[DISK_NUM]} - error $STATUS:\n$RENAME_OUT"
else
sudo sh -c "echo \"$RENAME_OUT\" > $TMP_DESC"
eurybox_display_message debug VM "VM $NAME - disk description renamed: ${DISKS[DISK_NUM]} -> ${DISKS[DISK_NUM]}_${NOW}"
fi
fi
done
else
eurybox_display_message warning VM "No domain disk detected for VM $NAME"
fi
fi
RENAME_OUT=`sudo sh -c "cat $TMP_DESC | awk '{ if (\\\$NF == \"<name>\"name\"</name>\") {print \"<name>\"new_name\"</name>\"} else { if (\\\$0 !~ /<uuid>/) {print \\\$0} } }' name=$NAME new_name=${NAME}_${NOW}" 2>&1`
STATUS=$?
if [[ !($STATUS -eq 0) ]]
then
eurybox_display_message error VM "Error on VM domain name description updated: $NAME - error $STATUS:\n$RENAME_OUT"
else
RENAME_OUT=`sudo sh -c "echo \"$RENAME_OUT\" > $TMP_DESC" 2>&1`
STATUS=$?
if [[ !($STATUS -eq 0) ]]
then
eurybox_display_message error RENAME "VM $NAME - domain name description update failed - error $STATUS:\n$RENAME_OUT"
else
eurybox_display_message debug RENAME "VM $NAME - domain name description updated: $NAME -> ${NAME}_${NOW}"
fi
RENAME_OUT=`sudo virsh $EURYBOX_VIRSH_OPTIONS undefine $NAME 2>&1`
STATUS=$?
if [[ !($STATUS -eq 0) ]]
then
eurybox_display_message error VM "Error on VM domain name undefine: $NAME - error $STATUS:\n$RENAME_OUT"
else
RENAME_OUT=`sudo virsh $EURYBOX_VIRSH_OPTIONS define $TMP_DESC 2>&1`
STATUS=$?
if [[ !($STATUS -eq 0) ]]
then
eurybox_display_message error VM "Error on VM domain name define: $TMP_DESC - error $STATUS:\n$RENAME_OUT"
else
eurybox_display_message message VM "VM renamed $NAME -> ${NAME}_${NOW}"
fi
fi
fi
fi
else
eurybox_display_message error VM "Virsh call issue - error $STATUS:\$RENAME_OUT"
fi
}
#Desc: shutdown a VM through libvirt ACPI emulation (doesn't work for OS without ACPI support)
#1 arg required: vm_name
eurybox_vm_shutdown_acpi ()
{
local NAME=$1
local STATUS
local VIRSH_OUT
#Waiting for the VM to be stopped
local EURYBOX_VM_NAME
local EURYBOX_VM_STATE=1
local EURYBOX_VM_STOPPED=0
local NB_TRY_LEFT=$EURYBOX_MAX_RETRY_ON_FAILURE
while [[ ( $EURYBOX_VM_STATE -eq 1 ) && !( $NB_TRY_LEFT -eq 0 ) ]]
do
EURYBOX_VM_NAME=`sudo virsh $EURYBOX_VIRSH_OPTIONS list | awk '{ if ($2 == name) {print $2} }' name=$NAME 2>&1`
if [[ -z "$EURYBOX_VM_NAME" ]]
then
EURYBOX_VM_STATE=0
else
if [[ $EURYBOX_VM_STOPPED -eq 0 ]]
then
VIRSH_OUT=`sudo virsh $EURYBOX_VIRSH_OPTIONS shutdown $NAME 2>&1`
STATUS=$?
if [[ !($STATUS -eq 0) ]]
then
NB_TRY_LEFT=$(( $NB_TRY_LEFT - 1 ))
eurybox_display_message debug VM "VM $NAME - stop failed - error $STATUS:\n$VIRSH_OUT\nTrying again in ${EURYBOX_ERROR_TEMPORISATION_TIME} seconds / $NB_TRY_LEFT left"
else
EURYBOX_VM_STOPPED=1
eurybox_display_message message VM "Successfully initiated VM $NAME stop - waiting for vm poweroff"
NB_TRY_LEFT=$EURYBOX_MAX_RETRY_ON_FAILURE
fi
else
NB_TRY_LEFT=$(( $NB_TRY_LEFT - 1 ))
eurybox_display_message debug VM "VM $NAME - still running - trying again in ${EURYBOX_ERROR_TEMPORISATION_TIME} seconds / $NB_TRY_LEFT left"
fi
sleep ${EURYBOX_ERROR_TEMPORISATION_TIME}
fi
done
if [[ $NB_TRY_LEFT -eq 0 ]]
then
eurybox_display_message warning VM "VM $NAME - ACPI stop failed - VM still running after $EURYBOX_MAX_RETRY_ON_FAILURE:\n$EURYBOX_VM_NAME\n error $STATUS:\n$VIRSH_OUT\nForcing stop"
VIRSH_OUT=`sudo virsh $EURYBOX_VIRSH_OPTIONS destroy $NAME 2>&1`
STATUS=$?
if [[ !($STATUS -eq 0) ]]
then
eurybox_display_message error VM "VM $NAME - forced stop failed - error $STATUS:\n$VIRSH_OUT"
else
eurybox_display_message message VM "Successfully initiated VM $NAME forced stop"
EURYBOX_VM_NAME=`sudo virsh $EURYBOX_VIRSH_OPTIONS list | awk '{ if ($2 == name) {print $2} }' name=$NAME 2>&1`
if [[ -z "$EURYBOX_VM_NAME" ]]
then
eurybox_display_message message VM "VM $NAME - forced stop OK"
else
eurybox_display_message error VM "VM $NAME - forced stop failed - VM still running:\n$EURYBOX_VM_NAME"
fi
fi
else
eurybox_display_message message VM "VM $NAME - ACPI stop OK"
fi
}
#Desc: shutdown a VM through ssh connection
#4 arg required: vm_name vm_host vm_user vm_port
eurybox_vm_shutdown_ssh ()
{
local NAME=$1
local HOST=$2
local USER=$3
local PORT=$4
local EURYBOX_VM_STATE=1
local EURYBOX_VM_STOPPED=0
local STATUS
local EURYBOX_VM_NAME
local SSH_OUT
local VIRSH_OUT
local NB_TRY_LEFT=$EURYBOX_MAX_RETRY_ON_FAILURE
while [[ ( $EURYBOX_VM_STATE -eq 1 ) && !( $NB_TRY_LEFT -eq 0 ) ]]
do
EURYBOX_VM_NAME=`sudo virsh $EURYBOX_VIRSH_OPTIONS list | awk '{ if ($2 == name) {print $2} }' name=$NAME 2>&1`
if [[ -z "$EURYBOX_VM_NAME" ]]
then
EURYBOX_VM_STATE=0
else
if [[ $EURYBOX_VM_STOPPED -eq 0 ]]
then
SSH_OUT=`ssh $EURYBOX_SSH_OPTIONS -p $PORT $USER@$HOST "halt -p >/dev/null &" 2>&1`
STATUS=$?
if [[ !($STATUS -eq 0) ]]
then
NB_TRY_LEFT=$(( $NB_TRY_LEFT - 1 ))
eurybox_display_message debug VM "VM $NAME - SSH stop failed - trying again in ${EURYBOX_ERROR_TEMPORISATION_TIME} seconds:\n$SSH_OUT\n$NB_TRY_LEFT left"
else
EURYBOX_VM_STOPPED=1
eurybox_display_message debug VM "Successfully initiated vm $NAME stop - waiting for vm poweroff"
NB_TRY_LEFT=$EURYBOX_MAX_RETRY_ON_FAILURE
fi
else
NB_TRY_LEFT=$(( $NB_TRY_LEFT - 1 ))
eurybox_display_message debug VM "VM $NAME - still running - trying again in ${EURYBOX_ERROR_TEMPORISATION_TIME} seconds:\n$EURYBOX_VM_NAME\n$NB_TRY_LEFT left"
fi
sleep ${EURYBOX_ERROR_TEMPORISATION_TIME}
fi
done
if [[ $NB_TRY_LEFT -eq 0 ]]
then
eurybox_display_message warning VM "VM $NAME - SSH stop failed - VM still running after $EURYBOX_MAX_RETRY_ON_FAILURE\nForcing stop"
VIRSH_OUT=`sudo virsh $EURYBOX_VIRSH_OPTIONS destroy $NAME 2>&1`
STATUS=$?
if [[ !($STATUS -eq 0) ]]
then
eurybox_display_message error VM "VM $NAME - forced stop failed - error $STATUS:\n$VIRSH_OUT"
else
eurybox_display_message message VM "Successfully initiated VM $NAME forced stop"
EURYBOX_VM_NAME=`sudo virsh $EURYBOX_VIRSH_OPTIONS list | awk '{ if ($2 == name) {print $2} }' name=$NAME 2>&1`
if [[ -z "$EURYBOX_VM_NAME" ]]
then
eurybox_display_message message VM "VM $NAME - forced stop OK"
else
eurybox_display_message error VM "VM $NAME - forced stop failed - VM still running:\n$EURYBOX_VM_NAME"
fi
fi
else
eurybox_display_message message VM "VM $NAME - SSH stop OK"
fi
}
#Desc: start a VM through libvirt and check via ssh startup success
#4 arg required: vm_name vm_host vm_user vm_port
eurybox_vm_start ()
{
local NAME=$1
local HOST=$2
local USER=$3
local PORT=$4
local STATUS
local VM_OUT
local EURYBOX_VM_AVAILABLE
local VIRSH_OUT
#Check if VM is operationnal (at network level)
local EURYBOX_VM_STATE=0
local EURYBOX_VM_STARTED=0
local NB_TRY_LEFT=$EURYBOX_MAX_RETRY_ON_FAILURE
while [[ ( $EURYBOX_VM_STATE -eq 0 ) && !( $NB_TRY_LEFT -eq 0 ) ]]
do
EURYBOX_VM_AVAILABLE=`ssh $EURYBOX_SSH_OPTIONS -p $PORT $USER@$HOST "touch eurybox_check_ssh_ok" 2>&1`
STATUS=$?
if [[ $STATUS -eq 0 ]]
then
EURYBOX_VM_STATE=1
else
if [[ $EURYBOX_VM_STARTED -eq 0 ]]
then
VIRSH_OUT=`sudo virsh $EURYBOX_VIRSH_OPTIONS start $NAME 2>&1`
STATUS=$?
if [[ !($STATUS -eq 0) ]]
then
VM_OUT=`sudo virsh $EURYBOX_VIRSH_OPTIONS list | awk '{ if ($2 == name) {print $2} }' name=$NAME 2>&1`
if [[ -z "$VM_OUT" ]]
then
NB_TRY_LEFT=$(( $NB_TRY_LEFT - 1 ))
eurybox_display_message debug VM "Error on vm $NAME start - trying again in ${EURYBOX_ERROR_TEMPORISATION_TIME} seconds:\n$VIRSH_OUT\n$NB_TRY_LEFT left"
else
eurybox_display_message debug VM "VM $NAME already running - waiting for network contact"
EURYBOX_VM_STARTED=1
NB_TRY_LEFT=$EURYBOX_MAX_RETRY_ON_FAILURE
fi
else
EURYBOX_VM_STARTED=1
eurybox_display_message debug VM "Successfully initiated vm $NAME start - waiting for network contact"
NB_TRY_LEFT=$EURYBOX_MAX_RETRY_ON_FAILURE
fi
else
NB_TRY_LEFT=$(( $NB_TRY_LEFT - 1 ))
eurybox_display_message debug VM "VM $NAME - still unreachable - trying again in ${EURYBOX_ERROR_TEMPORISATION_TIME} seconds\n$EURYBOX_VM_AVAILABLE\n$NB_TRY_LEFT left"
fi
sleep ${EURYBOX_ERROR_TEMPORISATION_TIME}
fi
done
if [[ $NB_TRY_LEFT -eq 0 ]]
then
eurybox_display_message warning VM "VM $NAME - start failed - no network contact after $EURYBOX_MAX_RETRY_ON_FAILURE:\n${EURYBOX_VM_AVAILABLE}"
else
eurybox_display_message message VM "VM $NAME - start OK"
fi
}
| true |
0a48dbe8bc310ae9d570b35076da27a7122003cb | Shell | FrenchBear/Linux | /Utils/rfind.sh | UTF-8 | 91 | 2.765625 | 3 | [] | no_license | #!/bin/sh
find . -type f -name "*.c" -print | while read file; do grep -l $1 "$file"; done
| true |
d98f889d8024d908f8c157553ec25d63421e5f74 | Shell | alexiudice/usefulMiscellaneous | /aws-ecs-helper.sh | UTF-8 | 2,002 | 3.59375 | 4 | [] | no_license | #!/bin/sh
# shellcheck shell=dash
# alexiudice
#
# A script to quickly get the private IP addresses of the ec2 containers
# holding services running in a specific cluster.
#
# Makes it easy to get these and ssh in without having to go use the browser.
#
# NOTE: This works depending on the AWS Region the terminal CLI is signed onto with. You may need
# to run a CLI graphgrid login into the correct region.
# For example, to use these scripts to acces a frazr core in us-east-1 you need to first run:
# graphgrid ecr login --profile graphgrid --region us-east-1 --maven
#
# Usage:
# ec_service_ip <cluster> <service>
# ec_graph_ip <cluster> <graphAbbreviationName>
#
#
# Ex:
# ec_service_ip vive-stag search
#
# Output:
# 172.26.34.4
# 172.26.34.118
#
# Ex:
# ec_graph_ip vive-stag ongdb
#
# Output:
# 172.26.36.12
# $1 = cluster (vive-stag, gg-dev, etc.)
# $2 = service name (search, nlp, fuze, etc.)
function ec_service_ip () {
cluster="$1-frazr-core"
service="$1-$2"
taskArns=$(aws ecs list-tasks --output text --query taskArns[*] --cluster "$cluster" --service-name "$service" | awk '{print $1 " " $2}')
containerInstanceArns=$(aws ecs describe-tasks --output text --query tasks[*].containerInstanceArn --cluster "$cluster" --tasks $(echo "$taskArns") | awk '{print $1 " " $2}')
ec2InstanceIds=$(aws ecs describe-container-instances --output text --query containerInstances[*].ec2InstanceId --cluster "$cluster" --container-instances $(echo "$containerInstanceArns") | awk '{print $1 " " $2}')
aws ec2 describe-instances --output text --query Reservations[*].Instances[*].PrivateIpAddress --instance-ids $(echo "$ec2InstanceIds")
}
# $1 = cluster (vive-stag, gg-dev, etc.)
# $2 = graph abbreviation name (neo, ongdb)
function ec_graph_ip () {
tagValue="$1-$2"
filters="Name=tag:Cluster,Values=$tagValue"
aws ec2 describe-instances --output text --query Reservations[*].Instances[*].PrivateIpAddress --filters $(echo "$filters")
}
| true |
a1b6595cd36f8c9f5a07fe239be3a921b0191912 | Shell | fjudith/kubehttpbin | /_scripts/deploy.sh | UTF-8 | 841 | 3.40625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
#
# Build and push Docker images to Docker Hub and quay.io.
#
cd "$(dirname "$0")" || exit 1
# push to quay
export IMAGE_PREFIX=arschles
docker login -e="$QUAY_EMAIL" -u="$QUAY_USERNAME" -p="$QUAY_PASSWORD" quay.io
make -C .. docker-build docker-push
# install the kubeconfig file from the env var, then tell helm where to find it
# (with the KUBECONFIG env var)
echo $KUBECONFIG_DATA_BASE64 | base64 --decode > ./kubeconfig
export KUBECONFIG=./kubeconfig
# download the helm CLI and calling 'helm upgrade'
HELM_DOWNLOAD_URL="https://storage.googleapis.com/kubernetes-helm/helm-v2.2.2-linux-amd64.tar.gz"
echo "downloading $HELM_DOWNLOAD_URL"
curl -o helm.tar.gz $HELM_DOWNLOAD_URL
tar -zxvf helm.tar.gz
mv linux-amd64/helm ./helm
chmod +x ./helm
LAST_RELEASE=$(./helm list -q)
./helm upgrade $LAST_RELEASE ../chart
| true |
b01a6058409299cd3ef069340bb97fa01263673f | Shell | also/settings | /shell-profile.sh | UTF-8 | 1,240 | 3.21875 | 3 | [] | no_license | export CLICOLOR=1
export PATH=$HOME/.rbenv/bin:$HOME/.jenv/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:/usr/X11/bin:/usr/local/git/bin:$HOME/Library/Haskell/bin
if which rbenv > /dev/null; then eval "$(rbenv init -)"; fi
if which jenv > /dev/null; then eval "$(jenv init -)"; fi
if which nodenv > /dev/null; then eval "$(nodenv init -)"; fi
source $HOME/work/settings/helpers.sh
function prompt_git() {
# check if we're in a git repo. (fast)
git rev-parse --is-inside-work-tree &>/dev/null || return
branchName="$(git symbolic-ref --quiet --short HEAD 2> /dev/null || \
git describe --all --exact-match HEAD 2> /dev/null || \
git rev-parse --short HEAD 2> /dev/null || \
echo '(unknown)')";
echo " on $branchName"
}
function set_prompts() {
local bold=$(tput bold)
local green=$(tput setaf 190)
local purple=$(tput setaf 141)
local reset=$(tput sgr0)
local black=$(tput setaf 0)
PS1="\[\033]0;\w\007\]" # terminal title (set to the current working directory)
PS1+="\n\[$bold\]"
PS1+="\[$green\]\w" # working directory
PS1+="\[$purple\]\$(prompt_git)"
PS1+="\n"
PS1+="\[$reset\]\\$ "
}
set_prompts
unset set_prompts
| true |
b91978eefe5e139d2016644ed9498b1cd932d3a3 | Shell | laisaquinoo/Comandos-ShellScript | /Criar-pasta | UTF-8 | 109 | 3.125 | 3 | [] | no_license | #!/bin/bash
V1=$1
if [ $V1 = "UNIP" ]
then
mkdir UNIP
echo "criou uma nova pasta chamada UNIP!"
fi
| true |
2f2134d7172d721cb181536bc42844b5fe78c2fa | Shell | Linaro/OpenCSD | /decoder/tests/run_pkt_decode_single.bash | UTF-8 | 2,740 | 3.34375 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
#################################################################################
# Copyright 2018 ARM. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################
# OpenCSD library: run single test
#
#
#################################################################################
# Usage options:-
# * default: run test on binary + libs in ./bin/linux64/rel
# run_pkt_decode_tests.bash <test> <options>
#
# * use installed opencsd libraries & program
# run_pkt_decode_tests.bash use-installed <test> <options>
#
#
OUT_DIR=./results
SNAPSHOT_DIR=./snapshots
BIN_DIR=./bin/linux64/rel/
TEST="a57_single_step"
mkdir -p ${OUT_DIR}
if [ "$1" == "use-installed" ]; then
BIN_DIR=""
shift
fi
if [ "$1" != "" ]; then
TEST=$1
shift
fi
echo "Running trc_pkt_lister on single snapshot ${TEST}"
if [ "${BIN_DIR}" != "" ]; then
echo "Tests using BIN_DIR = ${BIN_DIR}"
export LD_LIBRARY_PATH=${BIN_DIR}.
echo "LD_LIBRARY_PATH set to ${BIN_DIR}"
else
echo "Tests using installed binaries"
fi
# === test the decode set ===
${BIN_DIR}trc_pkt_lister -ss_dir "${SNAPSHOT_DIR}/${TEST}" $@ -decode -logfilename "${OUT_DIR}/${TEST}.ppl"
echo "Done : Return $?"
| true |
98543fb83050f48d820e31394ab0cdea9ed0ab9c | Shell | joscormir/grvcMigration | /scripts/system/filesToMigrateDebian.sh~ | UTF-8 | 846 | 3.515625 | 4 | [] | no_license | #! /bin/bash
#Define variables to use in the script
lastDate="2014-09-19"
actualDate="2015-03-17"
touch /home/joscormir/lastDate.file -d $lastDate
touch /home/joscormir/actualDate.file -d $actualDate
lastDateFilePath=/home/joscormir/lastDate.file
actualDateFilePath=/home/joscormir/actualDate.file
dirToSave="/home/joscormir"
clear
echo "Extracting files that have changed in /home"
cd /home
find . -type f -newer $lastDateFilePath ! -newermt $actualDateFilePath >> $dirToSave/homeChanges.txt
echo "Extracting files that have changed in /disk"
cd /disk
find . -type f -newermt $lastDateFilePath ! -newermt $actualDateFilePath >> $dirToSave/diskChanges.txt
echo "Extracting files that have changed in /var"
cd /var
find . -type f -newermt $lastDateFilePath ! -newermt $actualDateFilePath >> $dirToSave/varChanges.txt
echo "Finished"
| true |
a0a412fef0f6c1f964d482e082e0a6f160fb6ebc | Shell | romansokolovski/cisc220_3 | /tpircs.sh | UTF-8 | 159 | 3.046875 | 3 | [] | no_license | #!/bin/bash
read -p "Insert a list of words: " -a listWords
newArray=($listWords)
for (( i=0; i<${#newArray[@]}; i++ )); do
echo ${newArray[i]}|rev;
done
| true |
4420a1bcf09d4bd8ea5353682633614d987ac284 | Shell | melodylail/topologies | /distributed_unet/Horovod/run_singleworker_hvd.sh | UTF-8 | 1,411 | 3.375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# To run: bash run_singleworker_hvd.sh <logidr> <hostfile> <workers per node> <inter op threads>
# Note: The total number of workers deployed will be the number of workers per node * number of nodes
logdir=${1:-_singleworker} # Default suffix is _singleworker
node_ips=${2:-hosts.txt} # Default is the hosts.txt file
export num_workers_per_node=${3:-1} # Default 1 worker per node
export num_inter_threads=${4:-2} # Default to 2 inter_op threads
export LD_LIBRARY_PATH=/usr/local/openmpi:$LD_LIBRARY_PATH
export physical_cores=`lscpu | grep "Core(s) per socket" | cut -d':' -f2 | sed "s/ //g"` # Total number of physical cores per socket
export OMP_NUM_THREADS=$physical_cores
export num_nodes=`cat ${node_ips} | sed '/^\s*$/d' | wc -l` # Hosts.txt should contain a single host per line
export num_processes=$(( $num_nodes * $num_workers_per_node ))
export ppr=2 # Two processes per resource (e.g. socket)
echo "Running $num_workers_per_node worker(s)/node on $num_nodes nodes..."
echo "Synching hosts.."
bash synch_servers.sh
HOROVOD_FUSION_THRESHOLD=134217728 /usr/local/openmpi/bin/mpirun \
-x HOROVOD_FUSION_THRESHOLD \
-x LD_LIBRARY_PATH -x OMP_NUM_THREADS \
-np $num_processes --hostfile $node_ips -bind-to none \
--map-by ppr:$ppr:socket:pe=$pe \
--report-bindings --oversubscribe \
bash exec_singleworker.sh $logdir $ppr $num_inter_threads
| true |
e70c4c96791405f2c344a4938bb861ee39a69332 | Shell | OnGle/client | /sub/initcmd/embedded/scripts/default/google-chrome | UTF-8 | 517 | 3.453125 | 3 | [
"MIT"
] | permissive | #!/bin/sh
set -e
fatal() { echo "fatal: $*" 1>&2; exit 1; }
[ "$WIRELEAP_SOCKS" ] || fatal "WIRELEAP_SOCKS not set"
if [ "$(uname -s)" = "Darwin" ]; then
cmd="/Applications/Google Chrome.app/Contents/MacOS/Google Chrome"
[ -e "$cmd" ] || fatal "$cmd not found"
else
cmd="$(basename "$0")"
command -v "$cmd" >/dev/null || fatal "$cmd not found"
fi
exec "$cmd" \
--proxy-server="socks5://$WIRELEAP_SOCKS" \
--user-data-dir="$HOME/.config/google-chrome-wireleap" \
--incognito \
"$@"
| true |
a65eef7edafc7dc263e35f5aac409bbd90ab4975 | Shell | eayunstack/diskimage-builder | /tests/image_output_formats.bash | UTF-8 | 504 | 3.703125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -eux
set -o pipefail
source $(dirname $0)/test_functions.bash
test_formats="tar raw qcow2"
if [ -z "$(which qemu-img)" ]; then
echo "Warning: No qemu-img binary found, cowardly refusing to run tests."
exit 0
fi
for format in '' $test_formats; do
build_test_image $format
echo "Test passed for output formats '$format'."
done
combined_format=$(echo $test_formats | tr ' ' ',')
build_test_image $combined_format
echo "Test passed for output format '$combined_format'."
| true |
6cdb6f1024c3c3802122fd40087e39aa0bc81391 | Shell | ivantichy/koncentrator-backend | /Koncentrator/test/Scenarios/vytvoreni_vpn_TAP-ADVANCED-UDP.sh | UTF-8 | 2,132 | 2.59375 | 3 | [] | no_license | #!/bin/bash
set -ex
set -o pipefail
set +e
pkill -e -f ".*cz.ivantichy.httpapi.handlers.vpnapi.*"
set -e
java -classpath "Koncentrator/*:Koncentrator/lib/*" cz.ivantichy.httpapi.handlers.vpnapi.RunnerVPN &
java -classpath "Koncentrator/*:Koncentrator/lib/*" cz.ivantichy.httpapi.handlers.vpnapi.RunnerCERT &
sleep 5
#CERT createCa
curl -f -v -X PUT "http://127.0.0.1:10001/createca" --data '{"subvpn_name" : "tap-advanced-12345", "subvpn_type" : "tap-advanced", "domain" : "tap-advanced-12345.tap-advanced.koncentrator.cz", "ca_valid_days" : 3650}' -o ca.json
#CERT generateServer
wget "http://127.0.0.1:10001/generateserver/?subvpn_name=tap-advanced-12345&subvpn_type=tap-advanced&common_name=tap-advanced-12345&domain=tap-advanced-12345.tap-advanced.koncentrator.cz&server_valid_days=3650" -O server.json
chmod 666 *.json
java -classpath "Koncentrator/*:Koncentrator/lib/*" cz.ivantichy.support.JSON.test.JSONAddParameter ca.json ip_range 123.123.123.123/16
java -classpath "Koncentrator/*:Koncentrator/lib/*" cz.ivantichy.support.JSON.test.JSONAddParameter ca.json node 1
java -classpath "Koncentrator/*:Koncentrator/lib/*" cz.ivantichy.support.JSON.test.JSONAddParameter ca.json server_device tap1
java -classpath "Koncentrator/*:Koncentrator/lib/*" cz.ivantichy.support.JSON.test.JSONAddParameter ca.json server_management_port 20001
java -classpath "Koncentrator/*:Koncentrator/lib/*" cz.ivantichy.support.JSON.test.JSONAddParameter ca.json server_port 15001
java -classpath "Koncentrator/*:Koncentrator/lib/*" cz.ivantichy.support.JSON.test.JSONAddParameter ca.json server_protocol udp
java -classpath "Koncentrator/*:Koncentrator/lib/*" cz.ivantichy.support.JSON.test.JSONAddParameter ca.json server_domain_name tap-advanced.koncentrator.cz
java -classpath "Koncentrator/*:Koncentrator/lib/*" cz.ivantichy.support.JSON.test.JSONAddParameter server.json server_commands ""
#VPN createsubvpn
curl -f -v -X PUT "http://127.0.0.1:10002/createsubvpn" -d @ca.json
#VPN createServer
curl -f -v -X PUT "http://127.0.0.1:10002/createserver" -d @server.json
pkill -e -f ".*cz.ivantichy.httpapi.handlers.vpnapi.*"
| true |
090146a8a471372a1a1e2e4fd951f1401f6264dc | Shell | tbbrown/LinuxSetup | /SetupGit.sh | UTF-8 | 510 | 2.8125 | 3 | [] | no_license | #!/bin/bash
# Setup Git
sudo apt-get install libcurl4-gnutls-dev libexpat1-dev gettext libz-dev libssl-dev
sudo apt-get install git
git config --global user.name "Timothy Brown"
git config --global user.email "tbb@acm.org"
git config --global core.editor gedit
git config --global merge.tool meld
git config --list
if [ -f ~/.gitignore ]; then
echo "WARNING: Original ~/.gitignore file will be moved to ~/.gitignore.orig and replaced"
mv ~/.gitignore ~/.gitignore.orig
fi
cp template.gitignore ~/.gitignore
| true |
d96a346fcb66b1a4a3765fe93a365b35882b9770 | Shell | paulpas/pixie_farm | /Ubuntu1804/randomx/miner-stats | UTF-8 | 718 | 3.5 | 4 | [] | no_license | #!/usr/bin/env bash
#set -xv
# Detects hosts that are up and responding and centrally logs.
#. /etc/profile
#. /etc/bash.bashrc
#. /root/.bashrc
#. /root/.profile
NIC=bond0
function detect_hosts() {
hosts=($(/usr/sbin/arp -n | grep -v incomplete | grep ${NIC} | awk '{print $1}'))
echo ${hosts[@]}
}
function test_ssh() {
nc -w 1 $1 22 </dev/null &>/dev/null
}
function collect_xmrig_hashes() {
ssh -o "StrictHostKeyChecking=no" $1 journalctl -xe 2>/dev/null | grep [x]mrig*.*speed | tail -1 | awk '{ $1=""; $2=""; $3=""; $6=""; $7="";print}'
}
function execute_xmrig_hashes_on_all_hosts() {
for i in $(detect_hosts); do
test_ssh $i && collect_xmrig_hashes $i
done
}
execute_xmrig_hashes_on_all_hosts
| true |
a849069c38f4ca2ca437565c267d0c6c195d199a | Shell | jpbourbon/http3-speed-compare | /scripts/distributed02/xrun_tsA.sh | UTF-8 | 2,084 | 3.15625 | 3 | [] | no_license | #!/bin/bash
DBPASS=http_speed_quic
DBHOST=db.jpbourbon-httpspeed.io
host=msb.jpbourbon-httpspeed.io
HTTPS=('1.1' '2' '3' '1.1' '2' '3' '1.1' '2' '3')
#HTTPS=('3' '1.1' '2' '3' '1.1' '2' '3' '1.1' '2')
DELAY=('10' '10' '10' '10' '10' '10' '10' '10' '10')
SLEEP=('10' '10' '10' '10' '10' '10' '10' '10' '10')
#A-A- could be removed... but just in case...
DOCKERS='A-B- B-B-'
hD="192.168.1.9"
hC="192.168.1.5"
hB="192.168.1.12"
sDIR="/home/debian/go/src/bitbucket.org/jpbourbon/http3-speed-compare/"
sDIR_SCRIPTS="$sDIR/scripts/distributed02"
function stop_processes(){
echo "stopping in msB"
ssh -i ~/debian.pem debian@$hB sDIR=$sDIR http=$http s=$s 'bash -s ' < $sDIR_SCRIPTS/stop_comparison.sh &
echo "stopping in msC"
ssh -i ~/debian.pem debian@$hC sDIR=$sDIR http=$http s=$s 'bash -s ' < $sDIR_SCRIPTS/stop_comparison.sh &
echo "stopping in msD"
ssh -i ~/debian.pem debian@$hD sDIR=$sDIR http=$http s=$s 'bash -s ' < $sDIR_SCRIPTS/stop_comparison.sh &
killall comparison
}
stop_processes
sleep 20
echo "starting the process"
for sc in ${!HTTPS[@]};
do
http=${HTTPS[$sc]}
s=$(($sc+1))
echo "starting for HTTP/$http and scenarion $s"
#stop_dockers $s
echo "starting msD"
#( ./comparison -http $http -ms D -serv 8002 -ts A -s $s & )
ssh -i ~/debian.pem debian@$hD sDIR=$sDIR http=$http s=$s 'bash -s ' < $sDIR_SCRIPTS/xrun_tsA-msD-server.sh $http $s $sDIR $hD &
sleep 1
echo "starting msC"
ssh -i ~/debian.pem debian@$hC sDIR=$sDIR http=$http s=$s 'bash -s ' < $sDIR_SCRIPTS/xrun_tsA-msC-server.sh $http $s $sDIR $hC &
sleep 1
echo "starting msB"
ssh -i ~/debian.pem debian@$hB sDIR=$sDIR http=$http s=$s 'bash -s ' < $sDIR_SCRIPTS/xrun_tsA-msB-server.sh $http $s $sDIR $hB &
sleep 1
echo "starting Client"
$sDIR/comparison -http $http -ms A -conn 8000 -ts A -s $s -dbpass $DBPASS -dbhost $DBHOST -delay ${DELAY[$sc]} -host $host
sleep ${SLEEP[$sc]}
echo "Killing processes..."
stop_processes
sleep 5
done
echo "Tests finished now collecting results"
cd analysis
Rscript ./influx_base_noDocker_http_speed-v3.R $1
| true |
a72f1f628a87d590627db9e80dfec598f1cd4f96 | Shell | jcccookie/cs344 | /1.4/bigloops | UTF-8 | 387 | 3.15625 | 3 | [] | no_license | #!/bin/bash
lineNum = 0
while read myLine
do
sum=0
count=0
average=0
lineNum=`expr $lineNum + 1`
echo "Processing line $lineNum: $myLine"
for i in $myLine
do
num=$i
count=`expr $count + 1`
echo " Item $count: $num"
sum=`expr $sum + $num`
done
average=`expr $sum / $count`
echo " Line count: $count"
echo " Line sum: $sum"
echo " Line average: $average"
done < $1
| true |
41d9a8e6e22b92411d0f065f3bc3e1c969b4944e | Shell | 5g-media/mape-data-importer | /deployment/run.sh | UTF-8 | 1,237 | 2.5625 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | #!/bin/bash
# set the variables in the supervisor environment
sed -i "s/ENV_PUBLIC_IP/$PUBLIC_IP/g" /etc/supervisor/supervisord.conf
sed -i "s/ENV_KAFKA_PORT/$KAFKA_PORT/g" /etc/supervisor/supervisord.conf
sed -i "s/ENV_KAFKA_TRANSLATION_TOPIC/$KAFKA_TRANSLATION_TOPIC/g" /etc/supervisor/supervisord.conf
sed -i "s/ENV_INFLUXDB_DB_NAME/$INFLUXDB_DB_NAME/g" /etc/supervisor/supervisord.conf
sed -i "s/ENV_INFLUXDB_USER/$INFLUXDB_USER/g" /etc/supervisor/supervisord.conf
sed -i "s/ENV_INFLUXDB_PWD/$INFLUXDB_PWD/g" /etc/supervisor/supervisord.conf
sed -i "s/ENV_INFLUXDB_PORT/$INFLUXDB_PORT/g" /etc/supervisor/supervisord.conf
sed -i "s/ENV_INFLUXDB_RETENTION_POLICY_NAME/$INFLUXDB_RETENTION_POLICY_NAME/g" /etc/supervisor/supervisord.conf
sed -i "s/ENV_INFLUXDB_RETENTION_POLICY_DURATION/$INFLUXDB_RETENTION_POLICY_DURATION/g" /etc/supervisor/supervisord.conf
sed -i "s/ENV_INFLUXDB_RETENTION_POLICY_REPLICATION/$INFLUXDB_RETENTION_POLICY_REPLICATION/g" /etc/supervisor/supervisord.conf
# Restart services
service supervisor start && service supervisor status
# Makes services start on system start
update-rc.d supervisor defaults
echo "Initialization completed."
tail -f /dev/null # Necessary in order for the container to not stop
| true |
2dcc9fe040b2ff485230caa918e3536aa9f4a9f9 | Shell | midwire/bash.env | /plugins/vagrant/_vagrant.sh | UTF-8 | 2,840 | 3.734375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
__pwdln() {
# Doing PE from the beginning of the string is needed
# so we get a string of 0 len to break the until loop.
pwdmod="${PWD}/"
itr=0
until [[ -z "$pwdmod" ]];do
itr=$(($itr+1))
pwdmod="${pwdmod#*/}"
done
echo -n $(($itr-1))
}
__vagrantinvestigate() {
if [[ -f "${PWD}/.vagrant" ]];then
echo "${PWD}/.vagrant"
return 0
else
pwdmod2="${PWD}" # Since we didn't find a $PWD/.vagrant, we're going to pop
for (( i=2; i<=$(__pwdln); i++ ));do # a directory off the end of the $pwdmod2 stack until we
pwdmod2="${pwdmod2%/*}" # come across a ./.vagrant. /home/igneous/proj/1 will start at
if [[ -f "${pwdmod2}/.vagrant" ]];then # /home/igneous/proj because of our loop starting at 2.
echo "${pwdmod2}/.vagrant"
return 0
fi
done
fi
return 1
}
_vagrant()
{
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
commands="box destroy halt help init package provision reload resume ssh ssh-config status suspend up version"
if [ $COMP_CWORD == 1 ]
then
COMPREPLY=($(compgen -W "${commands}" -- ${cur}))
return 0
fi
if [ $COMP_CWORD == 2 ]
then
case "$prev" in
"init")
local box_list=$(find $HOME/.vagrant.d/boxes -mindepth 1 -maxdepth 1 -type d -exec basename {} \;)
COMPREPLY=($(compgen -W "${box_list}" -- ${cur}))
return 0
;;
"ssh"|"provision"|"reload"|"halt"|"suspend"|"resume"|"ssh-config")
vagrant_state_file=$(__vagrantinvestigate) || return 1
#Got lazy here.. I'd like to eventually replace this with a pure bash solution.
running_vm_list=$(grep 'active' $vagrant_state_file | sed -e 's/"active"://' | tr ',' '\n' | cut -d '"' -f 2 | tr '\n' ' ')
COMPREPLY=($(compgen -W "${running_vm_list}" -- ${cur}))
return 0
;;
"box")
box_commands="add help list remove repackage"
COMPREPLY=($(compgen -W "${box_commands}" -- ${cur}))
return 0
;;
"help")
COMPREPLY=($(compgen -W "${commands}" -- ${cur}))
return 0
;;
*)
;;
esac
fi
if [ $COMP_CWORD == 3 ]
then
action="${COMP_WORDS[COMP_CWORD-2]}"
if [ $action == 'box' ]
then
case "$prev" in
"remove"|"repackage")
local box_list=$(find $HOME/.vagrant.d/boxes -mindepth 1 -maxdepth 1 -type d -exec basename {} \;)
COMPREPLY=($(compgen -W "${box_list}" -- ${cur}))
return 0
;;
*)
;;
esac
fi
fi
}
complete -F _vagrant vagrant
| true |
f80f9fcbf279afbab560f6eb98c7f8283f7e433b | Shell | Matbe34/EDA-FIB | /game/GameTest.sh | UTF-8 | 599 | 3.25 | 3 | [] | no_license | #!/bin/bash
totalgames=$5
counter=1
wc1=0
wc2=0
wc3=0
wc4=0
while [ $counter -le $totalgames ]
do
seed=$RANDOM
./Game $1 $2 $3 $4 -s $seed -i default.cnf -o strgame$counter.res &> strgameoutput$counter.txt
winnerline=$( tail -2 strgameoutput$counter.txt | head -1 )
winner=$( echo $winnerline | cut -d " " -f 3 )
echo $winner
echo $seed
if [ $winner == $1 ]
then
((wc1++))
elif [ $winner == $2 ]
then
((wc2++))
elif [ $winner == $3 ]
then
((wc3++))
elif [ $winner == $4 ]
then
((wc4++))
fi
((counter++))
done
echo $1 won $wc1
echo $2 won $wc2
echo $3 won $wc3
echo $4 won $wc4
rm strgame*
| true |
130a7f2e75d392bbe9f23b78c7708ed46ef0c6b2 | Shell | ferricoxide/AMIgen6 | /ec2-get-ssh.txt | UTF-8 | 1,938 | 4.09375 | 4 | [] | no_license | #!/bin/bash
# chkconfig: 2345 95 20
# processname: ec2-get-ssh
# description: Capture AWS public key credentials for EC2 user
INITARG=${1:-UNDEF}
ROOTSSH="/root/.ssh"
DLKEY=/tmp/EC2-key.pub
# Informational messages
function info_out() {
logger -t ec2.init -p kern.$1 "$2"
echo $2
}
# Error handler
function err_out() {
info_out error "$2"
exit $1
}
# Replace the following environment variables for your system
export PATH=:/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin
start() {
if [ ! -d ${ROOTSSH} ]; then
info_out warn "No ${ROOTSSH} directory. Will try to create"
mkdir -p ${ROOTSSH} || err_out 1 "Failed to create ${ROOTSSH}"
chmod 700 ${ROOTSSH} || err_out 1 "Failed to set permissions on ${ROOTSSH}"
fi
# Retrieve public key from metadata server using HTTP
info_out info "EC2: attempting to Retrieve public key from metadata server via HTTP."
curl -f http://169.254.169.254/latest/meta-data/public-keys/0/openssh-key > ${DLKEY} || err_out 1 "Key-grab FAILED"
# What to do with the DL'ed key...
if [ -s ${ROOTSSH}/authorized_keys ]
then
# See if there's an existing keyfile to append to
grep -q "`cat ${DLKEY}`" ${ROOTSSH}/authorized_keys
if [ $? -eq 0 ]
then
# There is, but key's already present
info_out info "AWS key already present: nothing to do."
rm ${DLKEY}
else
# There isn't, so add it
cat ${DLKEY} >> ${ROOTSSH}/authorized_keys
rm ${DLKEY}
fi
else
# No keyfile, so just move and permission the DL'ed key
mv ${DLKEY} ${ROOTSSH}/authorized_keys
chmod 600 ${ROOTSSH}/authorized_keys
fi
}
stop() {
info_out info "Nothing to do here"
}
restart() {
stop
start
}
# See how we were called.
case ${INITARG} in
start)
start
;;
stop)
stop
;;
restart)
restart
;;
*)
err_out 1 "Usage: ${0} {start|stop|restart}"
esac
exit 0
| true |
26a199b5a14e7fa82ba7d6cf068919a7d8e575d3 | Shell | elevy30/spark-kafka-elastic | /dockers/tr-kafka/docker-entrypoint.sh | UTF-8 | 846 | 3.4375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# set -e
# echo "Getting Zookeeper settings"
# #Check Postgres availability
# SERVER=$(echo "$KAFKA_ZOOKEEPER_CONNECT" | awk -F "," '{print $1}' | awk -F ":" '{print $1}')
# PORT=$(echo "$KAFKA_ZOOKEEPER_CONNECT" | awk -F "," '{print $1}' | awk -F ":" '{print $2}')
# echo "Server: $SERVER and port: $PORT"
# `nc -z -v -w5 $SERVER $PORT &> /dev/null`
# result1=$?
# retry_count=5
# sleep_time=20
# while [ "$result1" != 0 ] && [ "$retry_count" -gt 0 ]
# do
# echo "port $PORT on $SERVER is closed. trying again"
# sleep $sleep_time
# `nc -z -v -w5 $SERVER $PORT &> /dev/null`
# result1=$?
# let retry_count=retry_count-1
# done
# if [ "$result1" != 0 ]
# then
# echo "Could not reach $PORT on $SERVER. Quitting application"
# exit -1
# fi
# echo "Port $PORT on $SERVER is open!!"
# echo "Starting Kafka"
start-kafka.sh
| true |
eec7e73c468553f48ba5fbdc7a353552b57dfcf5 | Shell | openframeworks/openFrameworks | /scripts/linux/chip/install_dependencies.sh | UTF-8 | 4,095 | 3.484375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
if [ $EUID != 0 ]; then
echo "this script must be run as root"
echo ""
echo "usage:"
echo "sudo "$0
exit $exit_code
exit 1
fi
ROOT=$(cd $(dirname $0); pwd -P)
apt-get update
GSTREAMER_VERSION=0.10
GSTREAMER_FFMPEG=gstreamer${GSTREAMER_VERSION}-ffmpeg
echo "detecting latest gstreamer version"
apt-cache show libgstreamer1.0-dev
exit_code=$?
if [ $exit_code = 0 ]; then
echo selecting gstreamer 1.0
GSTREAMER_VERSION=1.0
GSTREAMER_FFMPEG=gstreamer${GSTREAMER_VERSION}-libav
fi
GTK_VERSION=2.0
echo "detecting latest gtk version"
apt-cache show libgtk-3-dev
exit_code=$?
if [ $exit_code = 0 ]; then
echo selecting gtk 3
GTK_VERSION=-3
fi
echo "installing OF dependencies"
apt-get install freeglut3-dev libasound2-dev libxmu-dev libxxf86vm-dev g++ libraw1394-dev libudev-dev libdrm-dev libglew-dev libopenal-dev libsndfile-dev libfreeimage-dev libcairo2-dev libfreetype6-dev libssl-dev libpulse-dev libusb-1.0-0-dev libgtk${GTK_VERSION}-dev libopencv-dev libassimp-dev librtaudio-dev libboost-filesystem-dev libglfw3-dev liburiparser-dev libcurl4-openssl-dev libpugixml-dev
exit_code=$?
if [ $exit_code != 0 ]; then
echo "error installing dependencies, there could be an error with your internet connection"
echo "if the error persists, please report an issue in github: http://github.com/openframeworks/openFrameworks/issues"
exit $exit_code
fi
echo "installing gstreamer"
apt-get install libgstreamer${GSTREAMER_VERSION}-dev libgstreamer-plugins-base${GSTREAMER_VERSION}-dev ${GSTREAMER_FFMPEG} gstreamer${GSTREAMER_VERSION}-pulseaudio gstreamer${GSTREAMER_VERSION}-x gstreamer${GSTREAMER_VERSION}-plugins-bad gstreamer${GSTREAMER_VERSION}-alsa gstreamer${GSTREAMER_VERSION}-plugins-base gstreamer${GSTREAMER_VERSION}-plugins-good
exit_code=$?
if [ $exit_code != 0 ]; then
echo "error installing gstreamer, there could be an error with your internet connection"
echo "if the error persists, please report an issue in github: http://github.com/openframeworks/openFrameworks/issues"
exit $exit_code
fi
if [ -f /opt/vc/include/bcm_host.h ]; then
echo "detected Raspberry Pi"
echo "installing gstreamer omx"
apt-get install gstreamer${GSTREAMER_VERSION}-omx
fi
OS_CODENAME=$(cat /etc/os-release | grep VERSION= | sed "s/VERSION\=\"\(.*\)\"/\1/")
if [ "$OS_CODENAME" = "7 (wheezy)" ]; then
echo "detected wheezy, installing g++4.8 for c++11 compatibility"
apt-get install g++-4.8
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-4.6 20
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-4.8 50
sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-4.6 20
sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-4.8 50
fi
export LC_ALL=C
GCC_MAJOR_GT_4=$(expr `gcc -dumpversion | cut -f1 -d.` \> 4)
if [ $GCC_MAJOR_GT_4 -eq 1 ]; then
echo
echo
echo "It seems you are running gcc 5 or later, due to incomatible ABI with previous versions"
echo "we need to recompile poco. This will take a while"
read -p "Press any key to continue... " -n1 -s
sys_cores=$(getconf _NPROCESSORS_ONLN)
if [ $sys_cores -gt 1 ]; then
cores=$(($sys_cores-1))
else
cores=1
fi
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
cd ${DIR}/../../apothecary/apothecary
./apothecary -j${cores} update poco
fi
# Update addon_config.mk files to use OpenCV 3 or 4 depending on what's installed
addons_dir="$(readlink -f "$ROOT/../../../addons")"
$(pkg-config opencv4 --exists)
exit_code=$?
if [ $exit_code != 0 ]; then
echo "Updating ofxOpenCV to use openCV3"
sed -i -E 's/ADDON_PKG_CONFIG_LIBRARIES =(.*)opencv4(.*)$/ADDON_PKG_CONFIG_LIBRARIES =\1opencv\2/' "$addons_dir/ofxOpenCv/addon_config.mk"
else
echo "Updating ofxOpenCV to use openCV4"
sed -i -E 's/ADDON_PKG_CONFIG_LIBRARIES =(.*)opencv\s/ADDON_PKG_CONFIG_LIBRARIES =\1opencv4 /g' "$addons_dir/ofxOpenCv/addon_config.mk"
sed -i -E 's/ADDON_PKG_CONFIG_LIBRARIES =(.*)opencv$/ADDON_PKG_CONFIG_LIBRARIES =\1opencv4/g' "$addons_dir/ofxOpenCv/addon_config.mk"
fi
| true |
3dedf080c8ef663847e21adde538a9956ba7cf57 | Shell | confluentinc/pmm | /cluster-linking/cloud2cloud/runme.sh | UTF-8 | 178 | 3.28125 | 3 | [] | no_license | I=0
FILE=$I.txt
SCRIPT=$I-*.sh
while [ -e $FILE ]
do
cat $FILE
sh $SCRIPT
read -n1 -s -r -p $'Press any key to continue...\n' key
I=$[$I+1]
FILE=$I.txt
SCRIPT=$I-*.sh
done
| true |
2ed05a99374b1f96723767f01abe616da0502d78 | Shell | singlepig/scripts | /tools/clearCR.sh | UTF-8 | 317 | 3.65625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# this shell is written for converting CRLF to LF
for filename in $@; do
# if file note exsit, do nothing
if [[ -e $filename ]]; then
cat $filename | tr -d "\r" > "$filename.tmp"
mv "$filename.tmp" "$filename"
else
echo "error: $filename not exsit!"
fi
done
| true |
8ace7e41b0cb7a9e845561e1091fcabea0fa66f7 | Shell | jam31118/rigged-qprop | /src/util/job/submit/sge/single.sh | UTF-8 | 541 | 2.90625 | 3 | [] | no_license | #!/bin/bash
# request 'bash' as shell for job
#$ -S /bin/bash
# Import environment variable from where this job is submitted
#$ -V
#$ -cwd
# The Job name
#$ -N single
# Configure log files for stdout and stderr
#$ -o $JOB_NAME.$JOB_ID.log
#$ -e $JOB_NAME.$JOB_ID.err
#cd $SGE_O_WORKDIR
log_file="${JOB_NAME}.${JOB_ID}.log"
date > $log_file
bin=imag-prop
$QPROP_HOME/bin/$bin > $bin.log 2>&1
echo "[ LOG ] ${bin} finished" >> $log_file
bin=real-prop
$QPROP_HOME/bin/$bin > $bin.log 2>&1
echo "[ LOG ] ${bin} finished" >> $log_file
| true |
ae89781fc2081044d6cbcd61a3d73cf101b771a7 | Shell | ShinJaehun/BashScriptPractice | /AdvancedBashScriptingGuide/abs4-2.dpkg-check.sh | UTF-8 | 491 | 3.6875 | 4 | [] | no_license | #!/bin/bash
SUCCESS=0
E_NOARGS=65
if [ -z "$1" ]
then
echo "사용법 : `basename $0` dpkg-file"
exit $E_NOARGS
fi
{
echo
echo "아카이브 정보"
dpkg -l $1
echo
echo "아카이브 목록"
dpkg -c $1
echo
dpkg -i -no-act $1
if [ "$?" -eq $SUCCESS ]
then
echo "$1 는 설치될 수 있습니다."
else
echo "$1 는 설치될 수 없습니다."
fi
echo
} > "$1.test"
echo "$1.test 파일에 dpgk 테스트 결과가 저장되었습니다."
exit 0
| true |
2bebc726e9e86cb2c3701cea8e4e4a8d49064420 | Shell | killvxk/cloud-hypervisor | /scripts/run_integration_tests_aarch64.sh | UTF-8 | 11,234 | 3.625 | 4 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | #!/bin/bash
set -x
source $HOME/.cargo/env
source $(dirname "$0")/test-util.sh
export BUILD_TARGET=${BUILD_TARGET-aarch64-unknown-linux-gnu}
WORKLOADS_DIR="$HOME/workloads"
WORKLOADS_LOCK="$WORKLOADS_DIR/integration_test.lock"
mkdir -p "$WORKLOADS_DIR"
# Checkout source code of a GIT repo with specified branch and commit
# Args:
# $1: Target directory
# $2: GIT URL of the repo
# $3: Required branch
# $4: Required commit (optional)
checkout_repo() {
SRC_DIR="$1"
GIT_URL="$2"
GIT_BRANCH="$3"
GIT_COMMIT="$4"
# Check whether the local HEAD commit same as the requested commit or not.
# If commit is not specified, compare local HEAD and remote HEAD.
# Remove the folder if there is difference.
if [ -d "$SRC_DIR" ]; then
pushd $SRC_DIR
git fetch
SRC_LOCAL_COMMIT=$(git rev-parse HEAD)
if [ -z "$GIT_COMMIT" ]; then
GIT_COMMIT=$(git rev-parse remotes/origin/"$GIT_BRANCH")
fi
popd
if [ "$SRC_LOCAL_COMMIT" != "$GIT_COMMIT" ]; then
rm -rf "$SRC_DIR"
fi
fi
# Checkout the specified branch and commit (if required)
if [ ! -d "$SRC_DIR" ]; then
git clone --depth 1 "$GIT_URL" -b "$GIT_BRANCH" "$SRC_DIR"
if [ "$GIT_COMMIT" ]; then
pushd "$SRC_DIR"
git fetch --depth 1 origin "$GIT_COMMIT"
git reset --hard FETCH_HEAD
popd
fi
fi
}
build_custom_linux() {
SRCDIR=$PWD
LINUX_CUSTOM_DIR="$WORKLOADS_DIR/linux-custom"
LINUX_CUSTOM_BRANCH="ch-5.14"
LINUX_CUSTOM_URL="https://github.com/cloud-hypervisor/linux.git"
checkout_repo "$LINUX_CUSTOM_DIR" "$LINUX_CUSTOM_URL" "$LINUX_CUSTOM_BRANCH"
cp $SRCDIR/resources/linux-config-aarch64 $LINUX_CUSTOM_DIR/.config
pushd $LINUX_CUSTOM_DIR
time make -j `nproc`
cp arch/arm64/boot/Image "$WORKLOADS_DIR/" || exit 1
cp arch/arm64/boot/Image.gz "$WORKLOADS_DIR/" || exit 1
popd
}
build_edk2() {
EDK2_BUILD_DIR="$WORKLOADS_DIR/edk2_build"
EDK2_REPO="https://github.com/tianocore/edk2.git"
EDK2_DIR="$EDK2_BUILD_DIR/edk2"
EDK2_PLAT_REPO="https://github.com/tianocore/edk2-platforms.git"
EDK2_PLAT_DIR="$EDK2_BUILD_DIR/edk2-platforms"
ACPICA_REPO="https://github.com/acpica/acpica.git"
ACPICA_DIR="$EDK2_BUILD_DIR/acpica"
export WORKSPACE="$EDK2_BUILD_DIR"
export PACKAGES_PATH="$EDK2_DIR:$EDK2_PLAT_DIR"
export IASL_PREFIX="$ACPICA_DIR/generate/unix/bin/"
if [ ! -d "$EDK2_BUILD_DIR" ]; then
mkdir -p "$EDK2_BUILD_DIR"
fi
# Prepare source code
checkout_repo "$EDK2_DIR" "$EDK2_REPO" master "46b4606ba23498d3d0e66b53e498eb3d5d592586"
pushd "$EDK2_DIR"
git submodule update --init
popd
checkout_repo "$EDK2_PLAT_DIR" "$EDK2_PLAT_REPO" master "8227e9e9f6a8aefbd772b40138f835121ccb2307"
checkout_repo "$ACPICA_DIR" "$ACPICA_REPO" master "b9c69f81a05c45611c91ea9cbce8756078d76233"
pushd "$EDK2_BUILD_DIR"
# Build
make -C acpica -j `nproc`
source edk2/edksetup.sh
make -C edk2/BaseTools -j `nproc`
build -a AARCH64 -t GCC5 -p ArmVirtPkg/ArmVirtCloudHv.dsc -b RELEASE -n 0
cp Build/ArmVirtCloudHv-AARCH64/RELEASE_GCC5/FV/CLOUDHV_EFI.fd "$WORKLOADS_DIR"
popd
}
update_workloads() {
cp scripts/sha1sums-aarch64 $WORKLOADS_DIR
BIONIC_OS_IMAGE_DOWNLOAD_NAME="bionic-server-cloudimg-arm64.img"
BIONIC_OS_IMAGE_DOWNLOAD_URL="https://cloud-hypervisor.azureedge.net/$BIONIC_OS_IMAGE_DOWNLOAD_NAME"
BIONIC_OS_DOWNLOAD_IMAGE="$WORKLOADS_DIR/$BIONIC_OS_IMAGE_DOWNLOAD_NAME"
if [ ! -f "$BIONIC_OS_DOWNLOAD_IMAGE" ]; then
pushd $WORKLOADS_DIR
time wget --quiet $BIONIC_OS_IMAGE_DOWNLOAD_URL || exit 1
popd
fi
BIONIC_OS_RAW_IMAGE_NAME="bionic-server-cloudimg-arm64.raw"
BIONIC_OS_RAW_IMAGE="$WORKLOADS_DIR/$BIONIC_OS_RAW_IMAGE_NAME"
if [ ! -f "$BIONIC_OS_RAW_IMAGE" ]; then
pushd $WORKLOADS_DIR
time qemu-img convert -p -f qcow2 -O raw $BIONIC_OS_IMAGE_DOWNLOAD_NAME $BIONIC_OS_RAW_IMAGE_NAME || exit 1
popd
fi
# Convert the raw image to qcow2 image to remove compressed blocks from the disk. Therefore letting the
# qcow2 format image can be directly used in the integration test.
BIONIC_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME="bionic-server-cloudimg-arm64.qcow2"
BIONIC_OS_QCOW2_UNCOMPRESSED_IMAGE="$WORKLOADS_DIR/$BIONIC_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME"
if [ ! -f "$BIONIC_OS_QCOW2_UNCOMPRESSED_IMAGE" ]; then
pushd $WORKLOADS_DIR
time qemu-img convert -p -f raw -O qcow2 $BIONIC_OS_RAW_IMAGE_NAME $BIONIC_OS_QCOW2_UNCOMPRESSED_IMAGE || exit 1
popd
fi
FOCAL_OS_RAW_IMAGE_NAME="focal-server-cloudimg-arm64-custom-20210929-0.raw"
FOCAL_OS_RAW_IMAGE_DOWNLOAD_URL="https://cloud-hypervisor.azureedge.net/$FOCAL_OS_RAW_IMAGE_NAME"
FOCAL_OS_RAW_IMAGE="$WORKLOADS_DIR/$FOCAL_OS_RAW_IMAGE_NAME"
if [ ! -f "$FOCAL_OS_RAW_IMAGE" ]; then
pushd $WORKLOADS_DIR
time wget --quiet $FOCAL_OS_RAW_IMAGE_DOWNLOAD_URL || exit 1
popd
fi
FOCAL_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME="focal-server-cloudimg-arm64-custom-20210929-0.qcow2"
FOCAL_OS_QCOW2_IMAGE_UNCOMPRESSED_DOWNLOAD_URL="https://cloud-hypervisor.azureedge.net/$FOCAL_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME"
FOCAL_OS_QCOW2_UNCOMPRESSED_IMAGE="$WORKLOADS_DIR/$FOCAL_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME"
if [ ! -f "$FOCAL_OS_QCOW2_UNCOMPRESSED_IMAGE" ]; then
pushd $WORKLOADS_DIR
time wget --quiet $FOCAL_OS_QCOW2_IMAGE_UNCOMPRESSED_DOWNLOAD_URL || exit 1
popd
fi
ALPINE_MINIROOTFS_URL="http://dl-cdn.alpinelinux.org/alpine/v3.11/releases/aarch64/alpine-minirootfs-3.11.3-aarch64.tar.gz"
ALPINE_MINIROOTFS_TARBALL="$WORKLOADS_DIR/alpine-minirootfs-aarch64.tar.gz"
if [ ! -f "$ALPINE_MINIROOTFS_TARBALL" ]; then
pushd $WORKLOADS_DIR
time wget --quiet $ALPINE_MINIROOTFS_URL -O $ALPINE_MINIROOTFS_TARBALL || exit 1
popd
fi
ALPINE_INITRAMFS_IMAGE="$WORKLOADS_DIR/alpine_initramfs.img"
if [ ! -f "$ALPINE_INITRAMFS_IMAGE" ]; then
pushd $WORKLOADS_DIR
mkdir alpine-minirootfs
tar xf "$ALPINE_MINIROOTFS_TARBALL" -C alpine-minirootfs
cat > alpine-minirootfs/init <<-EOF
#! /bin/sh
mount -t devtmpfs dev /dev
echo \$TEST_STRING > /dev/console
poweroff -f
EOF
chmod +x alpine-minirootfs/init
cd alpine-minirootfs
find . -print0 |
cpio --null --create --verbose --owner root:root --format=newc > "$ALPINE_INITRAMFS_IMAGE"
popd
fi
pushd $WORKLOADS_DIR
sha1sum sha1sums-aarch64 --check
if [ $? -ne 0 ]; then
echo "sha1sum validation of images failed, remove invalid images to fix the issue."
exit 1
fi
popd
# Build custom kernel for guest VMs
build_custom_linux
# Update the kernel in the cloud image for some tests that requires recent kernel version
FOCAL_OS_RAW_IMAGE_UPDATE_KERNEL_NAME="focal-server-cloudimg-arm64-custom-20210929-0-update-kernel.raw"
cp "$WORKLOADS_DIR/$FOCAL_OS_RAW_IMAGE_NAME" "$WORKLOADS_DIR/$FOCAL_OS_RAW_IMAGE_UPDATE_KERNEL_NAME"
FOCAL_OS_RAW_IMAGE_UPDATE_KERNEL_ROOT_DIR="$WORKLOADS_DIR/focal-server-cloudimg-root"
mkdir -p "$FOCAL_OS_RAW_IMAGE_UPDATE_KERNEL_ROOT_DIR"
# Mount the 'raw' image, replace the compressed kernel file and umount the working folder
guestmount -a "$WORKLOADS_DIR/$FOCAL_OS_RAW_IMAGE_UPDATE_KERNEL_NAME" -m /dev/sda1 "$FOCAL_OS_RAW_IMAGE_UPDATE_KERNEL_ROOT_DIR" || exit 1
cp "$WORKLOADS_DIR"/Image.gz "$FOCAL_OS_RAW_IMAGE_UPDATE_KERNEL_ROOT_DIR"/boot/vmlinuz
guestunmount "$FOCAL_OS_RAW_IMAGE_UPDATE_KERNEL_ROOT_DIR"
# Build virtiofsd
VIRTIOFSD_RS="$WORKLOADS_DIR/virtiofsd-rs"
VIRTIOFSD_RS_DIR="virtiofsd_rs_build"
if [ ! -f "$VIRTIOFSD_RS" ]; then
pushd $WORKLOADS_DIR
git clone "https://gitlab.com/virtio-fs/virtiofsd-rs.git" $VIRTIOFSD_RS_DIR
git checkout c847ab63acabed2ed6e6913b9c76bb5099a1d4cb
pushd $VIRTIOFSD_RS_DIR
time cargo build --release
cp target/release/virtiofsd-rs $VIRTIOFSD_RS || exit 1
popd
rm -rf $VIRTIOFSD_RS_DIR
popd
fi
BLK_IMAGE="$WORKLOADS_DIR/blk.img"
MNT_DIR="mount_image"
if [ ! -f "$BLK_IMAGE" ]; then
pushd $WORKLOADS_DIR
fallocate -l 16M $BLK_IMAGE
mkfs.ext4 -j $BLK_IMAGE
mkdir $MNT_DIR
sudo mount -t ext4 $BLK_IMAGE $MNT_DIR
sudo bash -c "echo bar > $MNT_DIR/foo" || exit 1
sudo umount $BLK_IMAGE
rm -r $MNT_DIR
popd
fi
SHARED_DIR="$WORKLOADS_DIR/shared_dir"
if [ ! -d "$SHARED_DIR" ]; then
mkdir -p $SHARED_DIR
echo "foo" > "$SHARED_DIR/file1"
echo "bar" > "$SHARED_DIR/file3" || exit 1
fi
# Check and build EDK2 binary
build_edk2
}
process_common_args "$@"
# aarch64 not supported for MSHV
if [[ "$hypervisor" = "mshv" ]]; then
echo "AArch64 is not supported in Microsoft Hypervisor"
exit 1
fi
features_build=""
features_test="--features integration_tests"
# lock the workloads folder to avoid parallel updating by different containers
(
echo "try to lock $WORKLOADS_DIR folder and update"
flock -x 12 && update_workloads
) 12>$WORKLOADS_LOCK
# Check if there is any error in the execution of `update_workloads`.
# If there is any error, then kill the shell. Otherwise the script will continue
# running even if the `update_workloads` function was failed.
RES=$?
if [ $RES -ne 0 ]; then
exit 1
fi
BUILD_TARGET="aarch64-unknown-linux-${CH_LIBC}"
CFLAGS=""
TARGET_CC=""
if [[ "${BUILD_TARGET}" == "aarch64-unknown-linux-musl" ]]; then
TARGET_CC="musl-gcc"
CFLAGS="-I /usr/include/aarch64-linux-musl/ -idirafter /usr/include/"
fi
export RUST_BACKTRACE=1
# Test without ACPI
cargo build --all --release $features_build --target $BUILD_TARGET
strip target/$BUILD_TARGET/release/cloud-hypervisor
strip target/$BUILD_TARGET/release/vhost_user_net
strip target/$BUILD_TARGET/release/ch-remote
# Enable KSM with some reasonable parameters so that it won't take too long
# for the memory to be merged between two processes.
sudo bash -c "echo 1000000 > /sys/kernel/mm/ksm/pages_to_scan"
sudo bash -c "echo 10 > /sys/kernel/mm/ksm/sleep_millisecs"
sudo bash -c "echo 1 > /sys/kernel/mm/ksm/run"
# Setup huge-pages for ovs-dpdk
echo 2048 | sudo tee /proc/sys/vm/nr_hugepages
# Run all direct kernel boot (Device Tree) test cases in mod `parallel`
time cargo test $features_test "tests::parallel::$test_filter"
RES=$?
# Run some tests in sequence since the result could be affected by other tests
# running in parallel.
if [ $RES -eq 0 ]; then
time cargo test $features_test "tests::sequential::$test_filter" -- --test-threads=1
RES=$?
else
exit $RES
fi
# Run all ACPI test cases
if [ $RES -eq 0 ]; then
time cargo test $features_test "tests::aarch64_acpi::$test_filter"
RES=$?
else
exit $RES
fi
# Run all test cases related to live migration
if [ $RES -eq 0 ]; then
time cargo test $features_test "tests::live_migration::$test_filter" -- --test-threads=1
RES=$?
else
exit $RES
fi
exit $RES
| true |
e9cc84109cf400139d1f610346afa69ed82b83f0 | Shell | ikenticus/blogcode | /bash/tasks/30-days/05-loops.sh | UTF-8 | 61 | 2.8125 | 3 | [] | no_license | read n
for i in {1..10}; do
echo "$n x $i = $[n*i]"
done
| true |
cde5020d499d4a3b30218ba2f00259953a8dffe5 | Shell | khaleel-ahmed/terraform-alb | /install.sh | UTF-8 | 1,564 | 3 | 3 | [] | no_license | #!/bin/bash
printf "\e[1;34m _____ __ \e[0m\n"
printf "\e[1;34m |_ _|__ _ __ _ __ __ _ / _| ___ _ __ _ __ ___ \e[0m\n"
printf "\e[1;34m | |/ _ \ '__| '__/ _' | |_ / _ \| '__| '_ ' _ \ \e[0m\n"
printf "\e[1;34m | | __/ | | | | (_| | _| (_) | | | | | | | | \e[0m\n"
printf "\e[1;34m |_|\___|_| |_| \__,_|_| \___/|_| |_| |_| |_| \e[0m\n"
printf "\e[1;34m ___ _ _ _ _ _ \e[0m\n"
printf "\e[1;34m |_ _|_ __ ___| |_ __ _| | | __ _| |_(_) ___ _ __ \e[0m\n"
printf "\e[1;34m | || '_ \/ __| __/ _' | | |/ _' | __| |/ _ \| '_ \ \e[0m\n"
printf "\e[1;34m | || | | \__ \ || (_| | | | (_| | |_| | (_) | | | | \e[0m\n"
printf "\e[1;34m |___|_| |_|___/\__\__,_|_|_|\__,_|\__|_|\___/|_| |_| \e[0m\n"
echo""
echo""
echo""
if [ -f /usr/bin/terraform ]; then
echo "Already Installed"
echo""
else
wget https://releases.hashicorp.com/terraform/1.0.6/terraform_1.0.6_linux_amd64.zip
unzip terraform*.zip
mv terraform /usr/bin/
rm -rf terraform*.zip
echo""
echo""
echo "Terrform Installtion has been Sucessfully completed"
echo""
echo "The Installed Version is"
terraform -v
echo""
echo""
fi | true |
6a7f90b5bc6445536a4c7c9dc4d1f3aa22e0a020 | Shell | ibizaman/picasm | /mpasmx-harness.sh | UTF-8 | 1,522 | 4.0625 | 4 | [] | no_license | #!/bin/sh
## Do not call this by hand: it's a harness for the code in
## picasm-external.el (and should stay in the same directory so that
## the elisp code can find it)
##
## Parameters:
##
## $1 = The path to mpasmx
## $2 = The path of the asm file to compile
## $3 = The output file name
## $4 = The chip name (lower case, stripped of PIC prefix)
## $5 = Default radix
## $6 = Output format
## $7 = Absolute mode?
##
## Behaviour:
##
## MPASMX is run, if possible. The harness outputs any warnings or
## errors to stderr
##
## Known problems:
##
## Currently, one can't specify a list of include directories. This
## is because MPASMX doesn't implement it. A hacky solution would be
## to copy the file to be assembled, together with any asm files in
## an include directory, into a temp directory and then compile
## there, but I can't really face that now.
mpasmx=${1}
infile=${2}
outfile=${3}
chipname=${4}
radix=${5}
outfmt=${6}
absolute=${7}
no_temp ()
{
echo "ERROR: Couldn't create temp file."
exit 1
}
errfile="$(mktemp)" || no_temp
lstfile="$(mktemp)" || no_temp
if [ x"${absolute}" = x"true" ]; then
set -x
"${mpasmx}" "-a${outfmt}" "-p${chipname}" "-r${radix}" "-e${errfile}" "-l${lstfile}" "${infile}"
set +x
else
set -x
"${mpasmx}" "-o${outfile}" "-p${chipname}" "-r${radix}" "-e${errfile}" "-l${lstfile}" "${infile}"
set +x
fi
cat "${errfile}"
grep '^Error\[' "${errfile}"
exitcode=$(($? == 0))
rm ${errfile} ${lstfile}
exit $exitcode
| true |
c9a58f974fa3bf295c01e5cdc0c51a238fd263ec | Shell | silky/FFMPEG-gif-script-for-bash | /gifenc.sh | UTF-8 | 477 | 3.5625 | 4 | [] | no_license | #!/bin/sh
if test $# -lt 4; then
cat <<-EOH
$0: Script to generate animated gifs easily from command line.
Usage:
$0 input.(mp4|avi|webm|flv|...) output.gif horizontal_resolution fps
EOH
exit 1
fi
palette="$(mktemp /tmp/ffmpeg2gifXXXXXX.png)"
filters="fps=$4,scale=$3:-1:flags=lanczos"
ffmpeg -v warning -i "$1" -vf "$filters,palettegen" -y "$palette"
ffmpeg -v warning -i "$1" -i $palette -lavfi "$filters [x]; [x][1:v] paletteuse" -y "$2"
rm -f "$palette"
| true |
74e0e774f5a59ee148f9bb6befb3dfdec858555a | Shell | malsoudani/dots | /envs/.bash_profile | UTF-8 | 2,719 | 3.5 | 4 | [] | no_license | source ~/.bashrc
ff () {
start_path="$1"
dir=$(ls -1);
if [[ -z "$start_path" ]]; then
files=$(ls -1 $(findr $PWD '.' 2>/dev/null | \
fzf --multi --height 80% --reverse --preview 'bat --style=numbers --color=always {} | head -500' | \
perl -ne 'chomp $_; print $_ . " "'))
echo $files;
if [[ "$dir" =~ "$files" ]]; then
return;
fi
emacs $files
else
files=$(ls -1 $(findr $start_path '.' 2>/dev/null | \
fzf --multi --height 80% --reverse --preview 'bat --style=numbers --color=always {} | head -500' | \
perl -ne 'chomp $_; print $_ . " "'))
if [[ "$dir" =~ "$files" ]]; then
return;
fi
emacs $files
fi
}
findr () {
if [[ "$1" =~ "--help" ]]; then
echo "findr PATH_HERE FILE_NAME";
return;
fi
path=$1;
file_name=$2;
ag "." -lG $file_name $path
}
# search a directory and cd into it
dd() {
local dir
dir=$(find ${1:-.} -path '*/\.*' -prune \
-o -type d -print 2> /dev/null | fzf +m) &&
cd "$dir"
}
########### git fzf
# checkout branches in git
bb() {
local branches branch
branches=$(git --no-pager branch -vv) &&
branch=$(echo "$branches" | fzf +m) &&
git checkout $(echo "$branch" | awk '{print $1}' | sed "s/.* //")
}
# fgst - pick files from `git status -s`
is_in_git_repo() {
git rev-parse HEAD > /dev/null 2>&1
}
fgst() {
# "Nothing to see here, move along"
is_in_git_repo || return
local cmd="${FZF_CTRL_T_COMMAND:-"command git status -s"}"
eval "$cmd" | FZF_DEFAULT_OPTS="--height ${FZF_TMUX_HEIGHT:-40%} --reverse $FZF_DEFAULT_OPTS $FZF_CTRL_T_OPTS" fzf -m "$@" | while read -r item; do
echo "$item" | awk '{print $2}'
done
echo
}
############# docker fzf
# Select a docker container to start and attach to
function da() {
local cid
cid=$(docker ps -a | sed 1d | fzf -1 -q "$1" | awk '{print $1}')
[ -n "$cid" ] && docker start "$cid" && docker attach "$cid"
}
# Select a running docker container to stop
function ds() {
local cid
cid=$(docker ps | sed 1d | fzf -q "$1" | awk '{print $1}')
[ -n "$cid" ] && docker stop "$cid"
}
# Select a docker container to remove
function drm() {
local cid
cid=$(docker ps -a | sed 1d | fzf -q "$1" | awk '{print $1}')
[ -n "$cid" ] && docker rm "$cid"
}
if [ -f "$(brew --prefix)/opt/bash-git-prompt/share/gitprompt.sh" ]; then
__GIT_PROMPT_DIR=$(brew --prefix)/opt/bash-git-prompt/share
GIT_PROMPT_ONLY_IN_REPO=1
source "$(brew --prefix)/opt/bash-git-prompt/share/gitprompt.sh"
fi
export PATH="/usr/local/opt/postgresql@11/bin:$PATH"
export GOBIN="$GOPATH/bin"
eval "$(rbenv init -)"
| true |
f9da45dc20e36a088f180cd64a4ffad4d2784415 | Shell | vmoravec/yast-runlevel | /tools/scripts.sh | UTF-8 | 685 | 3.25 | 3 | [] | no_license | #! /bin/bash
# $Id$
# required: pdb-commandline.rpm
#
SX=scripts
:> $SX.ycp.out
exec > $SX.ycp.out
# no leading slash
DIR=etc/init.d
function extract() {
pdb query --filter "rpmdir:/$DIR" --attribs packname > $SX-pkgs
mv -f $DIR/* $DIR.bak
sort $SX-pkgs | while read pkg; do
rpm2cpio /work/CDs/all/full-i386/suse/*/$pkg.rpm \
| cpio -idvm --no-absolute-filenames "$DIR/*" "./$DIR/*"
done
}
extract
echo -n "// Generated on "
LANG=C date
# comment out the nil reply to the agent initialization
echo -n "//"
# Use the agent to parse the config files
{
echo "InitScripts (\"$DIR\")"
echo "Read (.comments)"
} | /usr/lib/YaST2/servers_non_y2/ag_initscripts
| true |
bae617489af780ffaad9b66f372b28c12c621d88 | Shell | kanchwala-yusuf/odo-init-image | /language-scripts/java/dev-run | UTF-8 | 458 | 3.078125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -e
set -x
DEBUG_PORT="${DEBUG_PORT:=49200}"
# DEBUG_MODE is true by default, which means that the application will be started with remote debugging enabled.
DEBUG_MODE="${DEBUG_MODE:=true}"
if [ $DEBUG_MODE != "false" ]; then
export JAVA_OPTS="$JAVA_OPTS -Xdebug -Xrunjdwp:server=y,transport=dt_socket,address=${DEBUG_PORT},suspend=n"
export JAVA_OPTIONS=$JAVA_OPTS
fi
# run normal run s2i script
exec "${ODO_S2I_SCRIPTS_URL}/run" | true |
9341385e2faad7ec56f3455686ab93bcbe86a796 | Shell | yypptest/cp4mcm-samples | /monitoring/2.3/prepareImages.sh | UTF-8 | 2,538 | 3.890625 | 4 | [
"Apache-2.0"
] | permissive | #! /bin/bash
pushImages(){
if [ ! -d $IMAGEFOLDER ]
then
echo $IMAGEFOLDER " is not a valid folder that contains docker images you need"
exit 1
fi
for item in $list
do
for FILE in `ls $IMAGEFOLDER | grep "$item"`
do
if [ -e $UA_INSTALL/ua-pkg/images/$FILE ]
then
PREFIX=${FILE%%_*}
SUFFIX=${FILE#*_}
echo $PREFIX:$SUFFIX
TAG=${SUFFIX%.tar*}
echo 'loading '$IMAGEFOLDER/$FILE
docker load -i $IMAGEFOLDER/$FILE
sleep 5
echo 'tag to '$DOCKERREG/$PREFIX:$TAG
docker tag $PREFIX:$TAG $DOCKERREG/$PREFIX:$TAG
echo 'push image ' $DOCKERREG/$PREFIX:$TAG
docker push $DOCKERREG/$PREFIX:$TAG
else
echo " failed to find the file " $UA_INSTALL/ua-pkg/images/$FILE
exit
fi
done
done
}
createManifest(){
for item in $list
do
FILE=`ls $IMAGEFOLDER | grep "${item}" | grep "amd64"`
if [ -n "$FILE" ]
then
SUFFIX=${FILE#*_}
TAG=${SUFFIX%-*}
MANIFEST=`docker manifest inspect $DOCKERREG/$item:$TAG | grep "schemaVersion"`
echo $MANIFEST
if [ -z "$MANIFEST" ]
then
echo "Create manifest for " $DOCKERREG/$item:$TAG
docker manifest create $DOCKERREG/$item:$TAG \
$DOCKERREG/$item:$TAG-amd64 \
$DOCKERREG/$item:$TAG-ppc64le \
$DOCKERREG/$item:$TAG-s390x
fi
echo "Push manifest for " $DOCKERREG/$item:$TAG
docker manifest push --purge $DOCKERREG/$item:$TAG
sleep 5
fi
done
}
##----------------------------------------------------------------------
# main function
##----------------------------------------------------------------------
if [ $# != 2 -a $# != 3 ]
then
echo " Please input 2 or 3 parameters: "
echo " #1 is directory that contains all images; "
echo " #2 is docker registry and image group split with / ; "
echo " #3 is optional, it is image you want to push besides the default list (ua-operator ua-cloud-monitoring ua-repo ua-plugins reloader)"
echo " E.g: ./prepareImages.sh /var/uainstall uaocp.fyre.ibm.com:5000/ua 'k8-monitor k8sdc-operator'"
echo " E.g: ./prepareImages.sh /var/uainstall uaocp.fyre.ibm.com:5000/ua "
exit 1
fi
UA_INSTALL=$1
DOCKERREG=$2
IMAGELIST=$3
IMAGEFOLDER=$UA_INSTALL/ua-pkg/images
list="ua-operator ua-cloud-monitoring ua-repo ua-plugins reloader $IMAGELIST"
pushImages
createManifest
| true |
a113aa86e8ad7a600bdff1363ba29ff4b9d4094c | Shell | marekurbanski/sky-desk-monitoring | /setup.sh | UTF-8 | 38,547 | 3.5625 | 4 | [] | no_license | #!/usr/bin/env bash
###############################################################################################
##### ######
##### Funkcje do monitoringu ######
##### ######
##### ######
##### Uruchom "./setup.sh" ######
##### "./setup.sh --update" - aktualizacja, nie wymaga potwierdzenia ######
##### ".setup.sh --help" - wyświetlenie pomocy ######
##### ".setup.sh --config" - uruchomienie konfiguratora ######
##### ######
###############################################################################################
#### Instalacja:
#### wget --no-check-certificate https://sky-desk.eu/download/monitoring/setup.sh
########### wymagane pakiety ##############
##
## curl
## md5sum
## md5
## md5sha1sum
## free
## python
## snmp
## nc
###
SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )"
cd $SCRIPTPATH
function clear_screen {
clear
echo "######################################################################################################"
echo "# #"
echo "# #"
echo "# Instalator monitoringu #"
echo "# ./setup.sh --help # wyświetlenie pomocy #"
echo "# ./setup.sh --config # uruchomienie konfiguratora #"
echo "# #"
echo "# Postępuj zgodnie z instrukcjami wyświetlanymi na ekranie #"
echo "# W razie problemów skontaktuj się z pomocą techniczną lub skorzystaj #"
echo "# pomocy on-line pod adresem: #"
echo "# #"
echo "# https://sky-desk.eu/help #"
echo "# lub: #"
echo "# https://sky-desk.eu/help?topic=13-konfiguracja-monitorowania-serwera #"
echo "# #"
echo "######################################################################################################"
echo "";echo "";
}
function check_needed_packages {
check_package "curl"
check_package "md5sum"
check_package "python"
#check_package "snmp"
check_package "nmap"
# check_package "free" -- paczka przeniesiona do miejsca instalacji
}
#################################################################################################
# instalacja skryptow
# zmiana nazwy z monitoring.sh na functions.sh
# instalacja przykladowych skryptow
# tworzenie katalogow
my_name=`echo $0 | rev | cut -d'/' -f1 | rev`
update='no'
if [ "$1" = "--help" ] || [ "$1" = "-h" ] || [ "$1" = "?" ]
then
cat README.md
exit 1
fi
if [ "$1" = "--update" ]
then
update='yes'
force='yes'
fi
if [ "$update" = "yes" ]
then
if [ -f "${TMPDIR}sky_desk_server" ]
then
mv ${TMPDIR}sky_desk_server $SCRIPTPATH/include/sky_desk_server
sky_desk_server_path=$SCRIPTPATH/include/sky_desk_server
fi
if [ -f sky_desk_server ]
then
mv sky_desk_server include/
sky_desk_server_path=$SCRIPTPATH/include/sky_desk_server
fi
if [ -f "${TMPDIR}sky_desk_server" ]
then
sky_desk_server_path=$SCRIPTPATH/include/sky_desk_server
fi
sky_desk_server_path=$SCRIPTPATH/include/sky_desk_server
vold=`cat version.txt | xargs`
rm -rf version.txt
wget --no-check-certificate https://sky-desk.eu/download/monitoring/version.txt
vnew=`cat version.txt | xargs`
clear_screen
if [ "$vnew" = "$vold" ]
then
echo ""
echo ""
echo " Posiadasz aktualną wersję :)"
echo ""
echo ""
echo ""
exit
fi
echo ""
echo "================================================================================"
echo ""
echo "Jest nowa aktualizacja ( z wersji $vold do wersji $vnew )"
echo ""
if [ ! -d "archive" ]
then
mkdir archive
fi
data=`date +%Y-%m-%d`
if [ ! -d "include" ]
then
mkdir include
fi
if [ -f "include/functions.sh" ]
then
mv include/functions.sh archive/functions_$data
mv include/global_functions.sh archive/global_functions_$data
fi
rm -rf setup.sh
rm -rf include/functions.sh
rm -rf scanner.sh
rm -rf include/global_functions.sh
wget --no-check-certificate https://sky-desk.eu/download/monitoring/include/functions.sh
wget --no-check-certificate https://sky-desk.eu/download/monitoring/setup.sh
wget --no-check-certificate https://sky-desk.eu/download/monitoring/scanner.sh
wget --no-check-certificate https://sky-desk.eu/download/monitoring/include/global_functions.sh
if [ -f README.md ]
then
rm -rf README.md
fi
wget --no-check-certificate https://sky-desk.eu/download/monitoring/README.md
chmod 777 setup.sh
chmod 777 scanner.sh
mv functions.sh include/functions.sh
mv global_functions.sh include/global_functions.sh
rm -rf functions.sh*
rm -rf global_functions.sh*
clear_screen
echo "Gotowe... :)"
echo ""
host_name=`echo $HOSTNAME`
ip=`ifconfig | grep -Eo 'inet (addr:)?([0-9]*\.){3}[0-9]*' | grep -Eo '([0-9]*\.){3}[0-9]*' | grep -v '127.0.0.1' | head -n 1`
mac=`ifconfig | grep "$ip" -B 3 -A 3 | grep -o -E '([[:xdigit:]]{1,2}:){5}[[:xdigit:]]{1,2}'`
source $SCRIPTPATH/include/config
if [ "$default_item_group_id" == "" ]
then
host_name=`echo $HOSTNAME`
ip=`ifconfig | grep -Eo 'inet (addr:)?([0-9]*\.){3}[0-9]*' | grep -Eo '([0-9]*\.){3}[0-9]*' | grep -v '127.0.0.1' | head -n 1`
mac=`ifconfig | grep "$ip" -B 3 -A 3 | grep -o -E '([[:xdigit:]]{1,2}:){5}[[:xdigit:]]{1,2}'`
default_item_group=$(get_data_from_server $user_id $api_key "check_default_asset_group^$mac")
if [ "$default_item_group" == "" ]
then
clear_screen
echo "------------------------------- Pytanie dodatkowe ----------------------------------------- "
echo ""
echo "Podaj grupę urządzeń z systemu Sky-Desk. Nie jest ona niezbędna ale jej brak może powodować problem"
echo "z identyfikacją sprzętu w przypdaku zdublowanych adresów MAC (mało prawdopodobne ale zawsze)"
read -p "Podaj grupę lub wciśnij ENTER aby anulowac [ np: 1 ] = " default_item_group
echo ""
else
clear_screen
echo "------------------------------- Pytanie dodatkowe ----------------------------------------- "
echo ""
echo "Podaj grupę urządzeń z systemu Sky-Desk. Nie jest ona niezbędna ale jej brak może powodować problem"
echo "z identyfikacją sprzętu w przypdaku zdublowanych adresów MAC (mało prawdopodobne ale zawsze)"
echo "Automatycznie dopasowałem grupę ===> $default_item_group <=== - potwierdź jej poprawnosc"
read -p "Wpisz grupę $default_item_group lub wciśnij ENTER aby anulowac [ $default_item_group ] = " default_item_group
echo ""
fi
fi
if [ "$default_item_group" != "" ]
then
echo "default_item_group_id='$default_item_group'" >> include/config
fi
echo ""
exit
fi
source $SCRIPTPATH/include/functions.sh
install_now='0'
if [ ! -d "include" ] || [ ! -f "include/config" ] || [ ! -f "check.sh" ] || [ "$1" == "--config" ]
then
install_now='1'
fi
sky_desk_server_path=${TMPDIR}sky_desk_server
if [ -f "${TMPDIR}sky_desk_server" ]
then
mv ${TMPDIR}sky_desk_server $SCRIPTPATH/include/sky_desk_server
sky_desk_server_path=$SCRIPTPATH/include/sky_desk_server
fi
if [ -f sky_desk_server ]
then
mv sky_desk_server include/
sky_desk_server_path=$SCRIPTPATH/include/sky_desk_server
fi
if [ -f "${TMPDIR}sky_desk_server" ]
then
sky_desk_server_path=$SCRIPTPATH/include/sky_desk_server
fi
sky_desk_server_path=$SCRIPTPATH/include/sky_desk_server
if [ "$install_now" == "1" ]
then
clear_screen
echo "########################################################################### "
echo "# "
echo "# Instalacja skryptu monitoringu "
echo "# "
echo "########################################################################### "
check_needed_packages
# tworznie katalogu z archiwami
if [ ! -d "archive" ]
then
mkdir archive
fi
if [ ! -d "include" ]
then
mkdir include
fi
if [ ! -f "include/functions.sh" ]
then
wget --no-check-certificate https://sky-desk.eu/download/monitoring/include/functions.sh
wget --no-check-certificate https://sky-desk.eu/download/monitoring/scanner.sh
wget --no-check-certificate https://sky-desk.eu/download/monitoring/include/global_functions.sh
if [ -f README.md ]
then
rm -rf README.md
fi
wget --no-check-certificate https://sky-desk.eu/download/monitoring/README.md
cat functions.sh > include/functions.sh
cat global_functions.sh > include/global_functions.sh
rm -rf functions.sh
rm -rf global_functions.sh
chmod 777 include/functions.sh
chmod 777 include/global_functions.sh
chmod 777 scanner.sh
source $SCRIPTPATH/include/functions.sh
fi
if [ -f "include/config" ]
then
clear_screen
echo "Znalazłem poprzedni plik konfiguracyjny, więc go użyję :)"
source include/config
server=$server
url=$url
companyID=$companyID
userID=$user_id
companyAPI=$api_key
echo "Poprzednie userID = $userID"
echo "Poprzednie companyAPI = $companyAPI"
echo "Poprzedni server = $server"
echo "Poprzedni url = $url"
echo "Poprzedni user_id = $userID"
else
if [ -f "check.sh" ]
then
echo "Sprawdzam poprzedni plik check.sh"
userID=`cat check.sh | grep user_id | cut -d "'" -f2`
companyAPI=`cat check.sh | grep api_key | cut -d "'" -f2`
echo "Poprzednie userID = $userID"
echo "Poprzednie companyAPI = $companyAPI"
if [ -f "$sky_desk_server_path" ]
then
url=`cat $sky_desk_server_path`
server=$url
fi
fi
fi
while [ "$server" = "" ]
do
clear_screen
echo "------------------------------- Pytanie ----------------------------------------- "
echo "# Podaj adres URL servera Sky-Desk z przedrostkiem https lub bez:"
echo "# https://xyz.sky-desk.eu "
echo "# xyz.sky-desk.eu "
echo "# np: 'https://kowalski.sky-desk.eu'"
echo "# lub: 'kowalski.sky-desk.eu'"
echo ""
read -p "URL = " server
done
echo $server > $sky_desk_server_path
url=`cat $sky_desk_server_path`
echo "Ustawiony serwer = $url"
while [ "$companyID" = "" ]
do
clear_screen
echo "------------------------------- Pytanie ----------------------------------------- "
echo "Podaj numer ID firmy z systemu Sky-Desk"
echo "Jeśli monitoring ma dotyczyć Twojej firmy lub nie masz dodanych żadnych innych wprowadź '1'"
echo ""
read -p "Comapny ID = " companyID
done
while [ "$userID" = "" ]
do
clear_screen
echo "------------------------------- Pytanie ----------------------------------------- "
echo "Podaj numer ID użytkownika"
echo "ID to znajdziesz w Panelu Sterowania, klikając w swojego użytkownika, zazwyczaj dla Administratora powinna ta wartość być równa '1'"
echo ""
read -p "User ID = " userID
done
while [ "$companyAPI" = "" ]
do
clear_screen
echo "------------------------------- Pytanie ----------------------------------------- "
echo "Podaj klucz API użytkownika"
echo "Klucz ten znajdziesz w Panelu Sterowania, klikając w swojego użytkownika (nie będzie on widoczny podczas wpisywania, możesz go wkleić)"
echo ""
read -s -p "Klucz API = " companyAPI
done
# sprawdzanie poprawnosci danych
r=$(get_data_from_server $userID $companyAPI 'check_credentials^')
if [ "$r" != "OK" ]
then
clear_screen
echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
echo "!!"
echo "!! BŁĄD W AUTORYZACJI"
echo "!!"
echo "!! Sprawdź czy podałeś prawiłowe dane"
echo "!!"
echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
exit 1
fi
if [ "$r" = "OK" ]
then
if [ "$default_item_group_id" == "" ]
then
host_name=`echo $HOSTNAME`
ip=`ifconfig | grep -Eo 'inet (addr:)?([0-9]*\.){3}[0-9]*' | grep -Eo '([0-9]*\.){3}[0-9]*' | grep -v '127.0.0.1' | head -n 1`
mac=`ifconfig | grep "$ip" -B 3 -A 3 | grep -o -E '([[:xdigit:]]{1,2}:){5}[[:xdigit:]]{1,2}'`
default_item_group=$(get_data_from_server $user_id $api_key "check_default_asset_group^$mac")
if [ "$default_item_group" == "" ]
then
clear_screen
echo "------------------------------- Pytanie dodatkowe ----------------------------------------- "
echo ""
echo "Podaj grupę urządzeń z systemu Sky-Desk. Nie jest ona niezbędna ale jej brak może powodować problem"
echo "z identyfikacją sprzętu w przypdaku zdublowanych adresów MAC (mało prawdopodobne ale zawsze)"
read -p "Podaj grupę lub wciśnij ENTER aby anulowac [ np: 1 ] = " default_item_group
echo ""
else
clear_screen
echo "------------------------------- Pytanie dodatkowe ----------------------------------------- "
echo ""
echo "Podaj grupę urządzeń z systemu Sky-Desk. Nie jest ona niezbędna ale jej brak może powodować problem"
echo "z identyfikacją sprzętu w przypdaku zdublowanych adresów MAC (mało prawdopodobne ale zawsze)"
echo "Automatycznie dopasowałem grupę ===> $default_item_group <=== - potwierdź jej poprawnosc"
read -p "Wpisz grupę $default_item_group lub wciśnij ENTER aby anulowac [ $default_item_group ] = " default_item_group
echo ""
fi
fi
# poprawne dane - robie plik
echo "#!/usr/bin/env bash" > include/config
echo "user_id='$userID'" >> include/config
echo "api_key='$companyAPI'" >> include/config
echo "url='$url'" >> include/config
echo "server='$server'" >> include/config
echo "companyID='$companyID'" >> include/config
if [ "$default_item_group" != "" ]
then
echo "default_item_group_id='$default_item_group'" >> include/config
fi
data=`date +"%Y-%m-%d %H:%M:%S"`
echo "#!/usr/bin/env bash" > _check.sh
echo 'source $(dirname $0)/include/config' >> _check.sh
echo 'source $(dirname $0)/include/functions.sh' >> _check.sh
echo '' >> _check.sh
echo 'SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )"' >> _check.sh
echo 'cd $SCRIPTPATH' >> _check.sh
echo '' >> _check.sh
clear_screen
echo '####################################################' >> _check.sh
echo '## poniżej skrypty monitorujące ##' >> _check.sh
echo "## ostatnia aktualizacja: $data ##" >> _check.sh
echo '## ##' >> _check.sh
echo '## UWAGA, wszyskie testy bez słowa "check" ##' >> _check.sh
echo '## w nazwie będą usunięte !!!! ##' >> _check.sh
echo '## ##' >> _check.sh
echo '####################################################' >> _check.sh
echo '' >> _check.sh
if [ -f "check.sh" ]
then
echo "" >> _check.sh
cat check.sh | grep check >> _check.sh
fi
# pobieranie parametrow urzadzania
# i sprawdzanie czy nie ma go w bazie juz przypadkiem
host_name=`echo $HOSTNAME`
ip=`ifconfig | grep -Eo 'inet (addr:)?([0-9]*\.){3}[0-9]*' | grep -Eo '([0-9]*\.){3}[0-9]*' | grep -v '127.0.0.1' | head -n 1`
mac=`ifconfig | grep "$ip" -B 3 -A 3 | grep -o -E '([[:xdigit:]]{1,2}:){5}[[:xdigit:]]{1,2}'`
desc=`uname -a`
system=`uname -s`
assetID=$(get_data_from_server $userID $companyAPI "check_mac^$mac")
if [ "$assetID" != "" ]
then
clear_screen
echo "------------------------------- Pytanie ----------------------------------------- "
echo "Znalazłem takie urządzenie (ID=$assetID) w bazie $server"
echo "Czy chcesz usunąć wszystkie aktualne wpisy sprawdzające parametry dla tego urządzenia ???????"
echo ""
read -p "Usunąć wszystkie wpisy dla tego urządzenia: '$mac' ? [t/N]" -n 1 -r
echo ""
if [[ $REPLY =~ ^[Tt]$ ]]
then
wynik=$(get_data_from_server $userID $companyAPI "delete_subitems^$mac^$assetID")
fi
fi
while [ "$assetID" = "" ]
do
clear_screen
echo "------------------------------- Pytanie ----------------------------------------- "
echo "Nie znalazłem w bazie urządzenia z tym adresem MAC: $mac"
echo "Podaj ID urządzenia do którego chcesz przypisać monitoring"
echo "ID to znajdziesz otwierając ikonę 'Zasoby sprzętowe' a następnie 'Pokaż zasoby' przy wybranej kategorii"
echo "albo:"
echo "Jeśli chcesz dodać automatycznie dany sprzęt (jego nazwę, IP, MAC itp, wpisz cyfrę '0')"
echo ""
read -p "ID urządzenia = " assetID
done
if [ "$assetID" = "0" ]
then
assetID=$(get_data_from_server $userID $companyAPI "add_host^$host_name^$mac^$ip^$system^$desc")
assetID=$(get_data_from_server $userID $companyAPI "check_mac^$mac")
clear_screen
echo "------------------------------- Info -------------------------------------------- "
echo "Dodałem nowe urządzenie do bazy"
echo "Jest ono teraz dostępne jako $host_name pod numerem ID=$assetID"
fi
echo ""
echo ""
# sprawdzanie dyskow
for d in `df -h | grep -v Filesystem | rev | cut -d'%' -f1 | rev | grep '/'`
do
dd=`echo $d | xargs`
ret=`cat check.sh | grep 'check_hdd_size' | grep "'$dd'" | wc -l | xargs`
if [ "$ret" = "0" ]
then
clear_screen
echo "------------------------------- Pytanie ----------------------------------------- "
read -p "Czy chcesz dodać monitoring zajętości partycji '$dd' ? [t/N]" -n 1 -r
echo ""
echo ""
if [[ $REPLY =~ ^[Tt]$ ]]
then
sID=$(get_data_from_server $userID $companyAPI "get_set_monitor^$assetID^0^90^HDD [% usage] $dd")
echo "check_hdd_size $sID '$dd'" >> _check.sh
fi
fi
done
# sprawdzanie pamieci ram
ret=`cat check.sh | grep 'check_memory_usage' | wc -l | xargs`
if [ "$ret" = "0" ]
then
clear_screen
echo "------------------------------- Pytanie ----------------------------------------- "
echo ""
read -p "Czy chcesz dodać monitoring pamięci RAM ? [t/N]" -n 1 -r
echo ""
if [[ $REPLY =~ ^[Tt]$ ]]
then
check_package "free"
sID=$(get_data_from_server $userID $companyAPI "get_set_monitor^$assetID^0^95^Memory usage")
echo "check_memory_usage $sID" >> _check.sh
fi
fi
# sprawdzanie CPU
ret=`cat check.sh | grep 'check_cpu_load' | wc -l | xargs`
if [ "$ret" = "0" ]
then
clear_screen
echo "------------------------------- Pytanie ----------------------------------------- "
echo ""
read -p "Czy chcesz dodać monitoring obciążenia procesora ? [t/N]" -n 1 -r
echo ""
if [[ $REPLY =~ ^[Tt]$ ]]
then
sID=$(get_data_from_server $userID $companyAPI "get_set_monitor^$assetID^0^3^CPU Load")
echo "check_cpu_load $sID" >> _check.sh
fi
fi
# sprawdzanie zalogowanych uzytkownikow
ret=`cat check.sh | grep 'check_logged_users' | wc -l | xargs`
if [ "$ret" = "0" ]
then
clear_screen
echo "------------------------------- Pytanie ----------------------------------------- "
echo ""
read -p "Czy chcesz dodać monitoring zalogowanych użytkowników ? [t/N]" -n 1 -r
echo ""
if [[ $REPLY =~ ^[Tt]$ ]]
then
sID=$(get_data_from_server $userID $companyAPI "get_set_monitor^$assetID^0^0^Logged users")
echo "check_logged_users $sID" >> _check.sh
fi
fi
# sprawdzanie MySQLa
ret=`cat check.sh | grep 'check_mysql_processcount' | wc -l | xargs`
if [ "$ret" = "0" ]
then
clear_screen
echo "------------------------------- Pytanie ----------------------------------------- "
echo ""
read -p "Czy chcesz dodać monitoring obciążenia bazy MySQL ? [t/N]" -n 1 -r
echo ""
if [[ $REPLY =~ ^[Tt]$ ]]
then
read -p "Podaj użytkownika do MySQLa [ np: root ] = " mysqlUser
read -s -p "Podaj hasło $mysqlUser do MySQLa = " mysqlPass
sID=$(get_data_from_server $userID $companyAPI "get_set_monitor^$assetID^0^20^MySQL - process count")
echo "check_mysql_processcount $sID $mysqlUser $mysqlPass" >> _check.sh
sID=$(get_data_from_server $userID $companyAPI "get_set_monitor^$assetID^0^20^MySQL - long process count > 1 min")
echo "check_mysql_processcount $sID $mysqlUser $mysqlPass" >> _check.sh
fi
fi
# sprawdzanie uptime w dniach
ret=`cat check.sh | grep 'check_uptime_days' | wc -l | xargs`
if [ "$ret" = "0" ]
then
clear_screen
echo "------------------------------- Pytanie ----------------------------------------- "
echo ""
read -p "Czy chcesz dodać monitoring uptime ? [t/N]" -n 1 -r
echo ""
if [[ $REPLY =~ ^[Tt]$ ]]
then
read -p "Podaj minimalną liczbę dni [ np: 7 ] = " min_days
read -p "Podaj maksymalną liczbę dni [ np: 365 ] = " max_days
sID=$(get_data_from_server $userID $companyAPI "get_set_monitor^$assetID^$min_days^$max_days^Uptime [dni]")
echo "check_uptime_days $sID" >> _check.sh
fi
fi
# sprawdzanie transferu sieciowego
ret=`cat check.sh | grep 'check_lan_interface_tx' | wc -l | xargs`
if [ "$ret" = "0" ]
then
clear_screen
echo "------------------------------- Pytanie ----------------------------------------- "
echo ""
read -p "Czy chcesz dodać monitoring transferu sieciowego TX/RX ? [t/N]" -n 1 -r
echo ""
if [[ $REPLY =~ ^[Tt]$ ]]
then
for i in `ip link show | grep 'state' | cut -d':' -f 2 | grep -v 'lo'`
do
interface=`echo $i | xargs`
read -p "Dodać interfejs '$interface' ? [t/N]" -n 1 -r
echo ""
if [[ $REPLY =~ ^[Tt]$ ]]
then
echo "Dostępne interfejsy sieciowe:"
ip link show | grep 'state' | cut -d':' -f 2 | grep -v 'lo'
read -p "Podaj minimalne wysycenie pobierania [kbitów/s] dla $interface w kb [ np: 0 ] = " min_rx
read -p "Podaj maksymalne wysycenie pobierania [kbitów/s] dla $interface w kb [ np: 1024 ] = " max_rx
read -p "Podaj minimalne wysycenie wysyłania [kbitów/s] dla $interface w kb [ np: 0 ] = " min_tx
read -p "Podaj maksymalne wysycenie wysyłania [kbitów/s] dla $interface w kb [ np: 1024 ] = " max_tx
sID=$(get_data_from_server $userID $companyAPI "get_set_monitor^$assetID^$min_tx^$max_tx^$interface TX [kBitów/s]")
echo "check_lan_interface_tx $sID '$interface'" >> _check.sh
sID=$(get_data_from_server $userID $companyAPI "get_set_monitor^$assetID^$min_rx^$max_rx^$interface RX [kBitów/s]")
echo "check_lan_interface_rx $sID '$interface'" >> _check.sh
fi
done
fi
fi
# sprawdzanie ilosci uruchomionych procesow w pamieci
ret=`cat check.sh | grep 'check_running_process_count' | wc -l | xargs`
clear_screen
if [ "$ret" != "0" ]
then
echo "INFO: Lista poniższych procesów jest już monitorowana:"
cat check.sh | grep 'check_running_process_count' | grep -v '#'
fi
echo ""
echo "------------------------------- Pytanie ----------------------------------------- "
echo ""
read -p "Czy chcesz dodać sprawdzanie liczby otwartych procesów ? [t/N]" -n 1 -r
echo ""
if [[ $REPLY =~ ^[Tt]$ ]]
then
while [ "$processName" != "0" ]
do
clear_screen
echo "------------------------------- Pytanie ----------------------------------------- "
echo ""
echo "Podaj nazwę procesu którego liczbę uruchomień chcesz monitorować [ jeśli nie chcesz dodawać więcej portów wpisz '0' lub naciśnij ENTER bez wpisywania niczego ]"
read -p "Nazwa procesu = " processName
if [ "$processName" = "" ]
then
processName="0"
fi
if [ "$processName" != "0" ]
then
clear_screen
echo "------------------------------- Pytanie ----------------------------------------- "
echo ""
read -p "Podaj minimalną liczbę wystąpień danego procesu = " min_count
read -p "Podaj maksymalną liczbę wystąpień danego procesu = " max_count
sID=$(get_data_from_server $userID $companyAPI "get_set_monitor^$assetID^$min_count^$max_count^Uruchomionych: $processName")
echo "check_running_process_count $sID '$processName'" >> _check.sh
fi
done
fi
fi
# sprawdzanie otwartych portów
clear_screen
ret=`cat check.sh | grep 'check_port_open' | wc -l | xargs`
if [ "$ret" != "0" ]
then
echo "INFO: Poniższe porty są już monitorowane:"
cat check.sh | grep 'check_port_open' | grep -v '#'
fi
echo ""
echo "------------------------------- Pytanie ----------------------------------------- "
echo ""
read -p "Czy chcesz dodać sprawdzanie otwartych portów (mogą być też inne adresy IP) ? [t/N]" -n 1 -r
echo ""
if [[ $REPLY =~ ^[Tt]$ ]]
then
while [ "$portNo" != "0" ]
do
clear_screen
echo "------------------------------- Pytanie ----------------------------------------- "
echo ""
echo "Podaj numer portu [ jeśli nie chcesz dodawać więcej portów wpisz '0' lub naciśnij ENTER bez wpisywania niczego ]"
read -p "Numer portu = " portNo
if [ "$portNo" = "" ]
then
portNo="0"
fi
if [ "$portNo" != "0" ]
then
echo ""
echo "------------------------------- Pytanie ----------------------------------------- "
echo ""
echo "Podaj nazwę hosta albo adres IP [ Jeśli chcesz sprawdzać tego hosta wpisz 'localhost' albo kliknij ENTER pozostawiając pusty wpis ]"
read -p "Nazwa hosta = " portHost
if [ "$portHost" = "" ]
then
portHost="localhost"
fi
echo ""
echo "------------------------------- Pytanie ----------------------------------------- "
echo ""
echo "Podaj opis portu [ np. 'FTP', jeśli nie ustawisz opisu będzie np: 'Port 21' ]"
read -p "Opis portu = " portDesc
if [ "$portDesc" = "" ]
then
sID=$(get_data_from_server $userID $companyAPI "get_set_monitor^$assetID^1^2^Port $portNo")
else
sID=$(get_data_from_server $userID $companyAPI "get_set_monitor^$assetID^1^2^$portDesc")
fi
echo "check_port_open $sID $portNo 2 0 $portHost" >> _check.sh
fi
done
fi
fi
ret=`cat /etc/crontab | grep 'check.sh' | grep -v '#' | wc -l | xargs`
if [ "$ret" = "0" ]
then
clear_screen
echo "------------------------------- Pytanie ----------------------------------------- "
echo ""
read -p "Czy chcesz dodać skrypt do crona ? [t/N]" -n 1 -r
echo ""
if [[ $REPLY =~ ^[Tt]$ ]]
then
#wget --no-check-certificate https://sky-desk.eu/download/demo_check.sh
#mv demo_check.sh check.sh
chmod 777 check.sh
echo "Będzie potrzeba podania hasła roota"
if [ -n "$(command -v sudo)" ]
then
sudo echo "" >> /etc/crontab
sudo echo "" >> /etc/crontab
sudo echo "##### MONITORING #####" >> /etc/crontab
sudo echo "" >> /etc/crontab
sudo echo "*/5 * * * * root $SCRIPTPATH/check.sh" >> /etc/crontab
sudo echo "1 1 * * * root $SCRIPTPATH/setup.sh --update" >> /etc/crontab
sudo echo "" >> /etc/crontab
sudo echo "##### KONIEC MONITORINGU #####" >> /etc/crontab
else
echo "" >> /etc/crontab
echo "" >> /etc/crontab
echo "##### MONITORING #####" >> /etc/crontab
echo "" >> /etc/crontab
echo "*/5 * * * * root $SCRIPTPATH/check.sh" >> /etc/crontab
echo "1 1 * * * root $SCRIPTPATH/setup.sh --update" >> /etc/crontab
echo "" >> /etc/crontab
echo "##### KONIEC MONITORINGU #####" >> /etc/crontab
fi
fi
fi
#echo ""
#echo ""
#echo "#####################################################################"
#echo "Sprawdzanie pakietów niezbędnych do działania monitoringu"
#check_needed_packages
source $SCRIPTPATH/include/config
## sprawdzanie default_item_group ##
if [ "$default_item_group_id" == "" ]
then
host_name=`echo $HOSTNAME`
ip=`ifconfig | grep -Eo 'inet (addr:)?([0-9]*\.){3}[0-9]*' | grep -Eo '([0-9]*\.){3}[0-9]*' | grep -v '127.0.0.1' | head -n 1`
mac=`ifconfig | grep "$ip" -B 3 -A 3 | grep -o -E '([[:xdigit:]]{1,2}:){5}[[:xdigit:]]{1,2}'`
default_item_group=$(get_data_from_server $user_id $api_key "check_default_asset_group^$mac")
if [ "$default_item_group" == "" ]
then
clear_screen
echo "------------------------------- Pytanie dodatkowe ----------------------------------------- "
echo ""
echo "Podaj grupę urządzeń z systemu Sky-Desk. Nie jest ona niezbędna ale jej brak może powodować problem"
echo "z identyfikacją sprzętu w przypdaku zdublowanych adresów MAC (mało prawdopodobne ale zawsze)"
read -p "Podaj grupę lub wciśnij ENTER aby anulowac [ np: 1 ] = " default_item_group
echo ""
else
clear_screen
echo "------------------------------- Pytanie dodatkowe ----------------------------------------- "
echo ""
echo "Podaj grupę urządzeń z systemu Sky-Desk. Nie jest ona niezbędna ale jej brak może powodować problem"
echo "z identyfikacją sprzętu w przypdaku zdublowanych adresów MAC (mało prawdopodobne ale zawsze)"
echo "Automatycznie dopasowałem grupę ===> $default_item_group <=== - potwierdź jej poprawnosc"
read -p "Wpisz grupę $default_item_group lub wciśnij ENTER aby anulowac [ $default_item_group ] = " default_item_group
echo ""
fi
fi
if [ "$default_item_group" != "" ]
then
echo "default_item_group_id='$default_item_group'" >> include/config
fi
clear_screen
echo "------------------------------- Info ----------------------------------------- "
echo ""
echo "Wygląda OK. Jeśli chcesz to przeedytuj skrypt 'check.sh' na własne potrzeby..."
echo "Poniżej lista opcji uruchamiania:"
echo ""
echo ""
if [ -f _check.sh ]
then
mv _check.sh check.sh
fi
if [ -f check.sh ]
then
chmod 777 check.sh
./check.sh
fi
exit 1
fi
| true |
65b63c6f3be2300da7973979e7a75257890ee9ea | Shell | adamdrucker/bash-hours-logger | /hours_main.sh | UTF-8 | 8,414 | 3.90625 | 4 | [] | no_license | #!/bin/bash
# /////////////////////////////////////
# /// Script for time card format ////
# ///////////////////////////////////
# /////////// Functions ///////////
# ////////////////////////////////
# /////// DATES ///////
# Check date formatting
check_date_format() {
input=$1
while [[ ! $input =~ ^[0-9]{2}-[0-9]{2}-[0-9]{4}$ ]]
do
read -p "Wrong date format, try again. Enter the date of your shift: " input
done
echo $input
}
# Check if year is a leap year
leap_year(){
year=$1
(( !(year % 4) && ( year % 100 || !(year % 400) ) )) &&
echo 1 || echo 0
}
# Check for valid date entry
date_check(){
month=$1
day=$2
leap_date=$3
## 31 days in Jan,Mar,May,July,Aug,Oct,Dec
if [ "$month" -eq "01" ] || [ "$month" -eq "03" ] || [ "$month" -eq "05" ] || [ "$month" -eq "07" ] || [ "$month" -eq "08" ] || [ "$month" -eq "10" ] || [ "$month" -eq "12" ]; then
if [ $((10#$day)) -lt 0 ] || [ $((10#$day)) -gt 31 ]; then
echo 1
else
echo 0
fi
fi
## 30 days in Apr,June,Sept,Nov
if [ $month -eq "04" ] || [ $month -eq "06" ] || [ $month -eq "09" ] || [ $month -eq "11" ]; then
if [ $((10#$day)) -lt "0" ] || [ $((10#$day)) -gt "30" ]; then
echo 1
else
echo 0
fi
fi
## 28-29 days in Feb
if [ $month -eq "02" ]; then
if [ $leap_date -eq "1" ]; then
if [ $((10#$day)) -lt "0" ] || [ $((10#$day)) -gt "29" ]; then
echo 1
fi
elif [ $leap_date -eq "0" ]; then
if [ $((10#$day)) -lt "0" ] || [ $((10#$day)) -gt "28" ]; then
echo 1
fi
else
echo 0
fi
fi
}
# //////// TIMES ////////
# Check time input format
function check_time_format() {
input=$1
while [[ ! $input =~ ^[0-9]{2}:[0-9]{2}+$ ]]
do
read -p "Incorrect format, try again. Enter the time for your shift: " input
done
echo "$input"
}
# Convert minutes into decimal format for submission
convert_base_sixty() {
min=$((10#$1))
if [ $min -ge 0 ] && [ $min -le 14 ]; then
baseten=0
elif [ $min -ge 15 ] && [ $min -le 29 ]; then
baseten="25"
elif [ $min -ge 30 ] && [ $min -le 44 ]; then
baseten="50"
elif [ $min -ge 45 ] && [ $min -le 59 ]; then
baseten="75"
fi
echo $baseten
}
# /////// MAIN & INPUTS ///////
# ////////////////////////////
# Main function
main() {
# Static variables
NAME=$(echo $USER)
PAYCODE="MGHPCC/INTERN"
BILLABLE="Y"
EMERGENCY="N"
# /////// STARTING ///////
# Prompt to enter start date
printf "PLEASE BE ADVISED: Dates must be entered in the following format MM-DD-YYYY.\nEnter the date of the START of your shift: "
read INPUT
IN_DATE=$(check_date_format $INPUT) # This var equals the formatted date
# Break dates into variables
IN_MONTH=$(echo $IN_DATE | awk -F'-' '{print $1}')
IN_DAY=$(echo $IN_DATE | awk -F'-' '{print $2}')
IN_YEAR=$(echo $IN_DATE | awk -F'-' '{print $3}')
IN_LEAP_DATE=$(leap_year $IN_YEAR) # 1 = yes, 0 = no
IN_FUNC_TEST=$(date_check $IN_MONTH $IN_DAY $IN_LEAP_DATE)
while [[ $IN_FUNC_TEST -eq 1 ]]; do
echo "That day doesn't exist in that month. Try again."
printf "PLEASE BE ADVISED: Dates must be entered in the following format MM-DD-YYYY.\nEnter the date of the START of your shift: "
read INPUT
IN_DATE=$(check_date_format $INPUT) # This var equals the formatted date
IN_MONTH=$(echo $IN_DATE | awk -F'-' '{print $1}')
IN_DAY=$(echo $IN_DATE | awk -F'-' '{print $2}')
IN_YEAR=$(echo $IN_DATE | awk -F'-' '{print $3}')
IN_LEAP_DATE=$(leap_year $IN_YEAR) # 1 = yes, 0 = no
IN_FUNC_TEST=$(date_check $IN_MONTH $IN_DAY $IN_LEAP_DATE)
done
# Prompt to enter start time
printf "PLEASE BE ADVISED: Times must be formatted in 24-hour notation as HH:MM.\nEnter START time for $IN_DATE: "
read INPUT
IN_TIME=$(check_time_format $INPUT)
# Break up time and combine
INHOUR=$(echo $IN_TIME | awk -F: '{print $1}')
INMIN=$(echo $IN_TIME | awk -F: '{print $2}')
CMBN=$INHOUR$INMIN
# Check combineid IN value
while [ $CMBN -ge 2400 ]; do
echo "The time you enter cannot exceed 23:59. Try again."
echo "PLEASE BE ADVISED: Times must be formatted in 24-hour notation as HH:MM."
echo "Enter START time for $IN_DATE: "
read INPUT
IN_TIME=$(check_time_format $INPUT)
INHOUR=$(echo $IN_TIME | awk -F: '{print $1}')
INMIN=$(echo $IN_TIME | awk -F: '{print $2}')
CMBN=$INHOUR$INMIN
done
# /////// ENDING ///////
# Prompt to enter end date
printf "PLEASE BE ADVISED: Dates must be entered in the following format MM-DD-YYYY.\nEnter the date of the END of your shift: "
read INPUT
OUT_DATE=$(check_date_format $INPUT) # This var equals the formatted date
OUT_MONTH=$(echo $OUT_DATE | awk -F'-' '{print $1}')
OUT_DAY=$(echo $OUT_DATE | awk -F'-' '{print $2}')
OUT_YEAR=$(echo $OUT_DATE | awk -F'-' '{print $3}')
OUT_LEAP_DATE=$(leap_year $OUT_YEAR) # 1 = yes, 0 = no
OUT_FUNC_TEST=$(date_check $OUT_MONTH $OUT_DAY $OUT_LEAP_DATE)
while [[ $OUT_FUNC_TEST -eq 1 ]]; do
echo "That day doesn't exist in that month. Try again."
printf "PLEASE BE ADVISED: Dates must be entered in the following format MM-DD-YYYY.\nEnter the date of the END of your shift: "
read INPUT
OUT_DATE=$(check_date_format $INPUT) # This var equals the formatted date
OUT_MONTH=$(echo $OUT_DATE | awk -F'-' '{print $1}')
OUT_DAY=$(echo $OUT_DATE | awk -F'-' '{print $2}')
OUT_YEAR=$(echo $OUT_DATE | awk -F'-' '{print $3}')
OUT_LEAP_DATE=$(leap_year $OUT_YEAR) # 1 = yes, 0 = no
OUT_FUNC_TEST=$(date_check $OUT_MONTH $OUT_DAY $OUT_LEAP_DATE)
done
# Prompt eo enter end time
printf "PLEASE BE ADVISED: Times must be formatted in 24-hour notation as HH:MM.\nEnter END time for $OUT_DATE: "
read INPUT
OUT_TIME=$(check_time_format $INPUT)
OUTHOUR=$(echo $OUT_TIME | awk -F: '{print $1}')
OUTMIN=$(echo $OUT_TIME | awk -F: '{print $2}')
COMBN=$OUTHOUR$OUTMIN
# Check combined OUT value
while [ $COMBN -ge 2400 ]; do
echo "The time you enter cannot exceed 23:59. Try again."
echo "PLEASE BE ADVISED: Times must be formatted in 24-hour notation as HH:MM."
echo -n "Enter END time for $OUT_DATE: "
read INPUT
OUT_TIME=$(check_time_format $INPUT)
OUTHOUR=$(echo $IN_TIME | awk -F: '{print $1}')
OUTMIN=$(echo $IN_TIME | awk -F: '{print $2}')
COMBN=$OUTHOUR$OUTMIN
done
# /////////// Some calculations ///////////
# ////////////////////////////////////////
MINSUB=$(((10#$OUTMIN)-(10#$INMIN)))
# Convert to positive if minutes are negative
if [ $MINSUB -lt 0 ]; then
let MINSUB=$(($MINSUB+60)) # Changed this from *-1 to +60
else let MINSUB=$MINSUB
fi
# Convert hours to base ten
FIRSTIN=$((10#$INHOUR))
FIRSTOUT=$((10#$OUTHOUR))
# Convert minutes to base ten
SECONDIN=$((10#$INMIN))
SECONDOUT=$((10#$OUTMIN))
HOURS=$(($FIRSTOUT-$FIRSTIN))
# Hours offset for overnight
if [ $FIRSTIN -gt $FIRSTOUT ]; then HOURS=$(((24-$FIRSTIN) + $FIRSTOUT)); fi
# If punch in and out are not whole hours adjust by one hour
if [ $SECONDIN -gt $SECONDOUT ]; then
HOURS=$(($HOURS-1))
# else HOURS=$HOURS
fi
MINCALC=$(($(convert_base_sixty $MINSUB)))
# Format total as a float
TOTAL=$HOURS"."$MINCALC
#
# Prompt for description of work
echo -n "Enter a description: "
read DESC_INPUT
#
# /////////// Output ///////////
OUTPUT="$NAME|$IN_DATE $IN_TIME|$OUT_DATE $OUT_TIME|$TOTAL|${PAYCODE^^}|${BILLABLE^^}|${EMERGENCY^^}|$DESC_INPUT"
echo $OUTPUT
echo -n "Is the preceding information correct? (Y/N) "
read CORRECT
if [ $CORRECT == "Y" ] || [ $CORRECT == "y" ]; then
echo $OUTPUT >> "/home/$USER/Documents/Timecards/timecard_$DATE_LOGGED.txt"
fi
while [ $CORRECT == "N" ] || [ $CORRECT == "n" ]; do
echo "Please start over then."
break
done
while [ $CORRECT == "Y" ] || [ $CORRECT == "y" ]; do
echo "Do you want to submit another entry?"
break
done
echo -n "Press [ENTER] to start another entry or 'q' to quit. "
read START
}
# /////////// Prompts ///////////
# //////////////////////////////
#
printf "Welcome to the Timecard Logging System.\nHere you will enter the dates and hours you've worked.\nTasks should be separated and itemized with dates/hours recorded for each entry.\n"
echo -n "Press [ENTER] to start, press 'q' to quit. "
read START
test -d "/home/$USER/Documents/Timecards"
if [ $? == 1 ]; then
mkdir "/home/$USER/Documents/Timecards"
fi
DATE_LOGGED=$(date +%m-%d-%Y)
$(touch /home/$USER/Documents/Timecards/"timecard_$DATE_LOGGED.txt")
while [[ $START != 'q' ]]; do
main
done
| true |
a75861f7d7bf30e264ce03ecb75ccd753819de43 | Shell | tiehan/ampliconDIVider | /sh/samtools_proper_pairs.sh | UTF-8 | 218 | 2.703125 | 3 | [] | no_license | #!/usr/bin/env bash
source ~/.bashrc
INBAM=$1
OUTBAM=$2
# Keep only proper pairs from a BAM file (removing unpaired an unaligned reads)
samtools view -f 3 -h ${INBAM} \
| samtools view -S -b - > ${OUTBAM}
exit
| true |
80cfd869ebbe136caf1ee9cff9180cd36ba22ea4 | Shell | sudomesh/connect-to-mesh | /connect_to_mesh.sh | UTF-8 | 1,500 | 4.15625 | 4 | [] | no_license | #!/usr/bin/env bash
set -e
# check that we're executing with sudo
if [ "$#" -lt 1 ]; then
echo "Usage: sudo connect_to_mesh.sh <EXITNODE_IP> [<EXITNODE_HOSTNAME>]"
exit 1
fi
if [ ! -f connect_to_mesh.config ]
then
echo "Could not find connect_to_mesh.config. See README.md."
exit 1
fi
source connect_to_mesh.config
EXITNODE_IP=$1
EXITNODE_HOSTNAME=$2
EXITNODE_PORT=8942
# get the name of the default gateway
REGEX='via[[:space:]]([^[:space:]]+)[[:space:]]dev[[:space:]]([^[:space:]]+)'
TEST=$(ip route get $EXITNODE_IP)
if [[ $TEST =~ $REGEX ]]
then
DEFAULT_GATEWAY=${BASH_REMATCH[1]}
DEFAULT_INTERFACE=${BASH_REMATCH[2]}
else
echo "Could not determine default interface and gateway."
exit 1
fi
# add a static route for the exitnode via the default gateway
# (this will override the default route, which is about to become the tunnel interface l2tp0)
STATIC_ROUTE="$EXITNODE_IP/32 dev $DEFAULT_INTERFACE via $DEFAULT_GATEWAY"
echo "Adding static route $STATIC_ROUTE"
ip route add $STATIC_ROUTE
# cleanup this route when we exit the script
function cleanup {
ip route del $STATIC_ROUTE
}
trap cleanup EXIT
# use exitnode hostname if provided
if [ -z $EXITNODE_HOSTNAME ]
then
TUNNELDIGGER_ADDR=$EXITNODE_IP:$EXITNODE_PORT
else
TUNNELDIGGER_ADDR=$EXITNODE_HOSTNAME:$EXITNODE_PORT
fi
echo "Connecting to $TUNNELDIGGER_ADDR..."
# open the tunnel
$TUNNELDIGGER_PATH -f -b $TUNNELDIGGER_ADDR -u $UUID -i l2tp0 -s tunnel_hook.sh &
# start babeld
$BABELD_PATH l2tp0 &
wait
| true |
5f16546eaa7a3408dadf3bad3fa87dec356024ca | Shell | bamboo-yujiro/dotfiles | /.zshrc | UTF-8 | 6,493 | 2.625 | 3 | [] | no_license | export ZSH=~/.oh-my-zsh
ZSH_THEME="maran"
plugins=(git)
export PATH="/usr/local/sbin:/usr/sbin:/usr/local/bin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games"
source $ZSH/oh-my-zsh.sh
# ------------------------------
# General Settings
# ------------------------------
export LESSCHARSET=utf-8
export LANG=ja_JP.UTF-8 # 文字コードをUTF-8に設定
export KCODE=u # KCODEにUTF-8を設定
### Complement ###
autoload -U compinit; compinit # 補完機能を有効にする
setopt auto_list # 補完候補を一覧で表示する(d)
setopt auto_menu # 補完キー連打で補完候補を順に表示する(d)
setopt auto_param_slash # ディレクトリ名の補完で末尾の / を自動的に付加
setopt mark_dirs # ファイル名の展開でディレクトリにマッチした場合 末尾に / を付加
setopt list_types # 補完候補一覧でファイルの種別を識別マーク表示
setopt complete_in_word # 語の途中でもカーソル位置で補完
setopt always_last_prompt # カーソル位置は保持したままファイル名一覧を順次その場で表示
setopt list_packed # 補完候補をできるだけ詰めて表示する
setopt list_types # 補完候補にファイルの種類も表示する
setopt globdots # 明確なドットの指定なしで.から始まるファイルをマッチ
bindkey "^[[Z" reverse-menu-complete # Shift-Tabで補完候補を逆順する("\e[Z"でも動作する)
zstyle ':completion:*' matcher-list 'm:{a-z}={A-Z}' # 補完時に大文字小文字を区別しない
zstyle ':completion:*' verbose yes
zstyle ':completion:*' completer _expand _complete _match _prefix _approximate _list _history
### History ###
HISTFILE=~/.zsh_history # ヒストリを保存するファイル
HISTSIZE=10000 # メモリに保存されるヒストリの件数
SAVEHIST=10000 # 保存されるヒストリの件数
setopt bang_hist # !を使ったヒストリ展開を行う(d)
setopt extended_history # ヒストリに実行時間も保存する
setopt hist_ignore_dups # 直前と同じコマンドはヒストリに追加しない
setopt share_history # 他のシェルのヒストリをリアルタイムで共有する
setopt hist_reduce_blanks # 余分なスペースを削除してヒストリに保存する
# マッチしたコマンドのヒストリを表示できるようにする
autoload history-search-end
zle -N history-beginning-search-backward-end history-search-end
zle -N history-beginning-search-forward-end history-search-end
bindkey "^P" history-beginning-search-backward-end
bindkey "^N" history-beginning-search-forward-end
# すべてのヒストリを表示する
function history-all { history -E 1 }
# ------------------------------
# Look And Feel Settings
# ------------------------------
case "${OSTYPE}" in
darwin*)
alias ls="ls -G"
alias ll="ls -lG"
alias la="ls -laG"
export LS_COLORS='di=00;36'
;;
linux*)
alias ls='ls --color'
alias ll='ls -l --color'
alias la='ls -la --color'
export LS_COLORS='di=00;36'
;;
esac
# ------------------------------
# Alias
# ------------------------------
# cd->ls
chpwd() {
ls_abbrev
}
ls_abbrev() {
# -a : Do not ignore entries starting with ..
# -C : Force multi-column output.
# -F : Append indicator (one of */=>@|) to entries.
local cmd_ls='ls'
local -a opt_ls
opt_ls=('-aCF' '--color=always')
case "${OSTYPE}" in
freebsd*|darwin*)
if type gls > /dev/null 2>&1; then
cmd_ls='gls'
else
# -G : Enable colorized output.
opt_ls=('-aCFG')
fi
;;
esac
local ls_result
ls_result=$(CLICOLOR_FORCE=1 COLUMNS=$COLUMNS command $cmd_ls ${opt_ls[@]} | sed $'/^\e\[[0-9;]*m$/d')
local ls_lines=$(echo "$ls_result" | wc -l | tr -d ' ')
if [ $ls_lines -gt 10 ]; then
echo "$ls_result" | head -n 5
echo '...'
echo "$ls_result" | tail -n 5
echo "$(command ls -1 -A | wc -l | tr -d ' ') files exist"
else
echo "$ls_result"
fi
}
alias sudo='sudo '
alias ll='ls -la'
alias vi='vim'
alias unicorn:start='bundle exec rake unicorn:start'
alias unicorn:stop='bundle exec rake unicorn:stop'
alias clip="nc localhost 8377"
# For Perl Environment
export PYENV_ROOT="${HOME}/.plenv"
if [ -d "${PYENV_ROOT}" ]; then
export PATH="$HOME/.plenv/bin:$PATH"
eval "$(plenv init -)"
fi
# For Ruby Environment
export RBENV_ROOT="${HOME}/.rbenv"
if [ -d "${RBENV_ROOT}" ]; then
export PATH="$HOME/.rbenv/bin:$PATH"
eval "$(rbenv init -)"
fi
# For PHP Environment
export PHPENV_ROOT="${HOME}/.phpenv"
if [ -d "${PHPENV_ROOT}" ]; then
export PATH=${PHPENV_ROOT}/bin:$PATH
eval "$(phpenv init -)"
fi
# For Python Environment
export PYENV_ROOT="${HOME}/.pyenv"
if [ -d "${PYENV_ROOT}" ]; then
export PATH=${PYENV_ROOT}/bin:$PATH
eval "$(pyenv init -)"
eval "$(pyenv virtualenv-init -)"
fi
# For Go Environment
export GOENV_ROOT="${HOME}/.goenv"
if [ -d "${GOENV_ROOT}" ]; then
export PATH=${GOENV_ROOT}/bin:$PATH
eval "$(goenv init -)"
fi
alias pyg='pygmentize -O style=monokai -f console256 -g'
export WORKON_HOME=$HOME/.virtualenvs
export DISABLE_UPDATE_PROMPT="false"
export "EDITOR=vim"
# NeoVim
export XDG_CONFIG_HOME=~/.config
# こうしないと色がおかしくなる
alias tmux='tmux -2'
export PATH="$PATH:/usr/local/opt/mongodb-community@4.0/bin:$PATH"
export PATH="/usr/local/opt/mysql@5.7/bin:$PATH"
export PATH="/Users/yujiro/Library/Android/sdk/platform-tools:$PATH"
export PATH="/Users/yujiro/development/flutter/bin:$PATH"
export PATH="/Users/yujiro/development/flutter/.pub-cache/bin:$PATH"
export PATH=$HOME/.nodebrew/current/bin:$PATH
# >>> conda initialize >>>
# !! Contents within this block are managed by 'conda init' !!
#__conda_setup="$('/Users/yujiro/opt/anaconda3/bin/conda' 'shell.zsh' 'hook' 2> /dev/null)"
#if [ $? -eq 0 ]; then
# eval "$__conda_setup"
#else
# if [ -f "/Users/yujiro/opt/anaconda3/etc/profile.d/conda.sh" ]; then
# . "/Users/yujiro/opt/anaconda3/etc/profile.d/conda.sh"
# else
# export PATH="/Users/yujiro/opt/anaconda3/bin:$PATH"
# fi
#fi
#unset __conda_setup
# <<< conda initialize <<<
| true |
905b391b6b871b06ecb5a9dff02ef1fa8417efc6 | Shell | willmarkley/wml | /glibc-build/chroot.sh | UTF-8 | 3,066 | 2.9375 | 3 | [] | no_license | ## chroot.sh
## executed in the chroot environment with the TOOLCHAIN
## PREPARE FOR INSTALLS ##
## Create Directories and Links (FHS)
mkdir -pv /{boot,etc/{opt,sysconfig},home,mnt,opt}
mkdir -pv /{media/{floppy,cdrom},srv,var}
install -dv -m 0750 /root
install -dv -m 1777 /tmp /var/tmp
mkdir -pv /usr/{,local/}{bin,include,lib,sbin,src}
mkdir -pv /usr/{,local/}share/{color,dict,doc,info,locale,man}
mkdir -v /usr/{,local/}share/{misc,terminfo,zoneinfo}
mkdir -v /usr/libexec
mkdir -pv /usr/lib/firmware
mkdir -pv /usr/{,local/}share/man/man{1..8}
ln -sv usr/bin /bin
ln -sv usr/lib /lib
ln -sv usr/sbin /sbin
ln -sv lib /lib64
mkdir -v /var/{log,mail,spool}
ln -sv /run /var/run
ln -sv /run/lock /var/lock
mkdir -pv /var/{opt,cache,lib/{color,misc,locate},local}
## Create temporary symlinks
ln -sv /tools/bin/{bash,cat,dd,echo,ln,pwd,rm,stty} /bin
ln -sv /tools/bin/{install,perl,m4} /usr/bin
ln -sv /tools/lib/libgcc_s.so{,.1} /usr/lib
ln -sv /tools/lib/libstdc++.{a,so{,.6}} /usr/lib
ln -sv bash /bin/sh
cp -v /tools/lib/ld-linux-x86-64.so.2 /lib64
## INSTALL LINUX KERNEL API HEADERS ##
echo "INSTALL LINUX KERNEL API HEADERS"
## Extract and change directory
cd /sources
tar -xf $LINUX_TAR
cd $LINUX
## Build and install headers
make mrproper
make --silent INSTALL_HDR_PATH=dest headers_install
find dest/include \( -name .install -o -name ..install.cmd \) -delete
cp -rv dest/include/* /usr/include
## INSTALL GLIBC ##
echo "INSTALL GLIBC"
## Extract and change directory
cd /sources
tar -xf $GLIBC_TAR
cd $GLIBC
## Apply patch to meet FHS
patch -Np1 -i ../$GLIBC_PATCH
## Add compatibility for build
ln -sfv /tools/lib/gcc /usr/lib
GCC_INCDIR=/usr/lib/gcc/x86_64-pc-linux-gnu/7.3.0/include
ln -sfv ld-linux-x86-64.so.2 /lib64/ld-lsb-x86-64.so.3
rm -f /usr/include/limits.h
## Create separate build directory
mkdir -v build
cd build
## configure
CC="gcc -isystem $GCC_INCDIR -isystem /usr/include" \
../configure --prefix=/usr \
--disable-werror \
--enable-kernel=3.2 \
--enable-stack-protector=strong \
libc_cv_slibdir=/lib
unset GCC_INCDIR
## make
make --silent
## make install
sed '/test-installation/s@$(PERL)@echo not running@' -i ../Makefile
make --silent install
## nscd
cp -v ../nscd/nscd.conf /etc/nscd.conf
mkdir -pv /var/cache/nscd
# Locales
mkdir -pv /usr/lib/locale
localedef -i en_US -f ISO-8859-1 en_US
localedef -i en_US -f UTF-8 en_US.UTF-8
## Timezone
tar -xf ../../$TZDATA
ZONEINFO=/usr/share/zoneinfo
mkdir -pv $ZONEINFO/{posix,right}
for tz in etcetera southamerica northamerica europe africa antarctica \
asia australasia backward pacificnew systemv; do
zic -L /dev/null -d $ZONEINFO -y "sh yearistype.sh" ${tz}
zic -L /dev/null -d $ZONEINFO/posix -y "sh yearistype.sh" ${tz}
zic -L leapseconds -d $ZONEINFO/right -y "sh yearistype.sh" ${tz}
done
cp -v zone.tab zone1970.tab iso3166.tab $ZONEINFO
zic -d $ZONEINFO -p America/New_York
unset ZONEINFO
| true |
3299373dfe897f10687e0ef73b398ea3c5801b83 | Shell | xyuan/SEM | /jobscript_topopt.sh | UTF-8 | 1,043 | 2.625 | 3 | [] | no_license | #!/bin/sh
# run with
# qsub jobscript.sh
# http://beige.ucs.indiana.edu/I590/node35.html
# qstat: R: running, C:completed, Q: queued, S: suspended,
# H:held this means that it is not going to run until it is released
# showq:alle jobs
# 1 nodes x 12 cores
#PBS -l nodes=4:ppn=12
#PBS -q topopt
# embedded options to qsub - start with #PBS
# -- our name ---
#PBS -N SEM
#PBS -o stout/$PBS_JOBNAME.$PBS_JOBID.out
#PBS -e stout/$PBS_JOBNAME.$PBS_JOBID.err
# -- run in the current working (submission) directory --
cd $PBS_O_WORKDIR
cat $PBS_NODEFILE
# does not have any effect on MPI, but if you mix mpi with openmp it will have
# an effect
# export OMP_NUM_THREADS=8
source /appl/htopopt/RH61/set_env.sh
MPIRUN=/appl/htopopt/RH61/openmpi-1.4.5/bin/mpirun
# $MPIRUN --bind-to-socket GOL_P
# $MPIRUN -np 8 -mca btl tcp,self GOL_P
# Set enviroment
module load VTK
arr=(2 4 6 8 10 12 14 16 18 20 22 24 26 28 30 32 34 36 38 40 42 44 46 48)
for nodes in ${arr[@]}
do
$MPIRUN -np $nodes -mca btl tcp,self SEM_p
#echo $nodes
done
| true |
623c7ec534ae7278ef8de196b45d912daf2268a4 | Shell | FauxFaux/debian-control | /m/memtest86+/memtest86+_5.01-3_amd64/postinst | UTF-8 | 393 | 2.765625 | 3 | [] | no_license | #!/bin/sh
set -e
. /usr/share/debconf/confmodule
if [ "$1" = configure ]; then
db_get shared/memtest86-run-lilo
if [ "$RET" = true ] &&
[ -x /sbin/lilo ] && [ -r /etc/lilo.conf ] &&
grep "image.*=.*/boot/memtest86+.bin" /etc/lilo.conf >/dev/null
then
lilo
fi
if [ -e /boot/grub/grub.cfg ] && [ -x "`which update-grub2 2>/dev/null`" ] ; then
update-grub2
fi
fi
| true |
a159da28bc34d6ff55eab69b83a4a84e053eb4d4 | Shell | nicolasi31/public | /racine/home/.profile.d/perso-pxe.sh | UTF-8 | 1,687 | 3.40625 | 3 | [] | no_license | if [ ${PERSO_ENABLED} = 1 ] ; then
pxeaddclient () {
PXEC_HOSTNAME=${1}
PXEC_MAC_ADDR=${2}
PXEC_IP_ADDR=${3}
PXE_DHCPD_DST_FILE="/etc/dhcp/linux.hosts"
[[ $# == 3 ]] || { /bin/echo -e "Usage: ${FUNCNAME[0]} HOSTNAME MAC_ADDR IP_ADDR\nExample: ${FUNCNAME[0]} foobar 52:54:00:91:c0:e1 192.168.0.101" ; return 0 ; }
[[ -f ${PXE_DHCPD_DST_FILE} ]] || { /bin/echo -e "Destination file ${PXE_DHCPD_DST_FILE} do not exist." ; return 1 ; }
[[ "${PXEC_MAC_ADDR}" =~ ^([a-fA-F0-9]{2}:){5}[a-fA-F0-9]{2}$ ]] || { echo "Invalid MAC address" ; return 1 ; }
[[ "${PXEC_IP_ADDR}" =~ ^[0-9]+(\.[0-9]+){3}$ ]] || { echo "Invalid IP address" ; return 1 ; }
PXE_BOOT_DST_FILE="/var/www/html/pxelinux.cfg/$(echo "${PXEC_MAC_ADDR}" | tr ":" "-" | sed "s/^/01-/")"
[[ ! -f ${PXE_BOOT_DST_FILE} ]] || { /bin/echo -e "Destination file ${PXE_BOOT_DST_FILE} already exist." ; return 1 ; }
[[ $(grep -c ${PXEC_HOSTNAME} ${PXE_DHCPD_DST_FILE}) == 0 ]] || { /bin/echo "${PXEC_HOSTNAME} already present in ${PXE_DHCPD_DST_FILE}" ; return 1 ; }
[[ $(grep -c ${PXEC_MAC_ADDR} ${PXE_DHCPD_DST_FILE}) == 0 ]] || { /bin/echo "${PXEC_MAC_ADDR} already present in ${PXE_DHCPD_DST_FILE}" ; return 1 ; }
[[ $(grep -c ${PXEC_IP_ADDR} ${PXE_DHCPD_DST_FILE}) == 0 ]] || { /bin/echo "${PXEC_IP_ADDR} already present in ${PXE_DHCPD_DST_FILE}" ; return 1 ; }
/bin/echo -e "# Created by ${USER:-${USERNAME}}, date: $(/bin/date +%Y%m%d%H%M%S)\nhost ${PXEC_HOSTNAME} { hardware ethernet ${PXEC_MAC_ADDR}; fixed-address ${PXEC_IP_ADDR}; option host-name \"${PXEC_HOSTNAME}\"; }" >> ${PXE_DHCPD_DST_FILE}
/bin/cp /var/www/html/pxelinux.cfg/default ${PXE_BOOT_DST_FILE}
systemctl restart dhcpd
}
fi
| true |
bb719e7cb601531a5d03666b83f8287129e6b6a8 | Shell | qishanqing/myscript | /sh/test.sh | UTF-8 | 873 | 3.25 | 3 | [] | no_license | #!/bin/bash
:<<!
while [ $# -ne 0 ]
do
echo $1
shift
done
eval echo \$$#
PS3="Select a script language (1-4): "
select i in perl php python shell exit
do
case $i in
perl) echo “I like perl”;;
php) echo “php is good”;;
python) echo “xiangjun like python”;;
shell) echo “shell is my favourite”;;
exit) exit;;
esac
done
s=$(
c=(qi test wan hai)
for x in ${c[@]};do
echo $x=rw
done
)
cat <<ee
[tech:/Branch/${br}]
$s
@admin=rw
*=
ee
(
cat << mail
新建分支:test
mail
) | mails_cm "svnadminBuilder request from 18310287801@163.com"
!
echo pid is $$
die() {
(
echo "$@"
if test output.$$;then
echo
cat output.$$
fi
) | mails_cm -i "svn branch create failed"
kill $$
exit -1
}
(
svn copy ${Trunk_name} ${branch_name} --parents --username builder --password ant@ -m "新建项目开发分支" >output.$$ 2>&1
) || die "no reason to failed"
| true |
a44fcc47eeca5d5d87aec911857cba1795963f3c | Shell | colinmccabe/dockerfiles | /jq/run | UTF-8 | 275 | 2.78125 | 3 | [] | no_license | #!/bin/sh
set -eu
if [ -t 1 ]; then COLOR='-C'; else COLOR='-M'; fi
# shellcheck disable=SC2086
docker run --rm -i \
--cap-drop ALL --security-opt=no-new-privileges \
--read-only --network=none \
-u 8956:8956 \
-v "$(pwd)":/mnt:ro \
-w /mnt \
jq "$COLOR" "$@"
| true |
931c1fab13a720c9d6def01db057f8f42e4105db | Shell | yqqxyy/VHbb | /CxAODOperations_VHbb/scripts/prepareCxAODProduction.sh | UTF-8 | 8,947 | 3.5 | 4 | [] | no_license | #!/bin/bash
# Adrian Buzatu (adrian.buzatu@cern.ch) on behalf of the CxAODFramework group
[[ $- == *i* ]] && echo "Shell Interactive" || echo "Shell Not interactive"
if [[ $0 == "$BASH_SOURCE" ]]; then
echo "ERROR: I'm a script forcing you to source. Don't execute me!" >&2
exit 1
fi
#if there is no parameter, it stops and it gives the instructions
if [ $# -ne 7 ]; then
cat <<EOF
Usage: source $0 CxAODFolder WorkingFolder InputSampleList OutputSampleList GridUser Derivation VTag
Usage: if they exist already on the grid and you have the outfile and you want to download from the grid instead of from eos
Usage: source ../source/CxAODOperations_VHbb/scripts/prepareCxAODProduction.sh
Usage:
Usage: if the outsample does not exist yet, and you run with Higgs group privileges
Usage: source ../source/CxAODOperations_VHbb/scripts/prepareCxAODProduction.sh /data06/abuzatu/code/CxAODFramework_branch_master_21.2.39_21 prepareCxAODProduction0LTest /data06/abuzatu/code/CxAODFramework_branch_master_21.2.39_21/source/CxAODOperations_VHbb/data/DxAOD/VHbb/list_sample_grid.13TeV_25ns.mcdata_a.HIGG5D1.txt computeSampleList group.phys-higgs HIGG5D1 CxAOD_Adrian
Usage:
Usage: if you have your own grid username (and no privilege), replace group.phys-higgs with user.abuzatu
EOF
return
fi
SCRIPT_NAME=$0
CXAOD_FOLDER=$1
WORKING_FOLDER=$2
INPUT_SAMPLE_LIST=$3
OUTPUT_SAMPLE_LIST=$4
GRID_USER=$5
DERIVATION=$6
VTAG=$7
VERBOSE=1
DEBUG=0
echo "SCRIPT_NAME=${SCRIPT_NAME}"
echo "CXAOD_FOLDER=${CXAOD_FOLDER}"
echo "WORKING_FOLDER=${WORKING_FOLDER}"
echo "INPUT_SAMPLE_LIST=${INPUT_SAMPLE_LIST}"
echo "OUTPUT_SAMPLE_LIST=${OUTPUT_SAMPLE_LIST}"
echo "GRID_USER=${GRID_USER}"
echo "DERIVATION=${DERIVATION}"
echo "VTAG=${VTAG}"
# JOBVERSION="${DERIVATION}.${VTAG}"
# echo "JOBVERSION=${JOBVERSION}"
CXAOD_NUMBER="${VTAG:0:2}" # first two characters
echo "CXAOD_NUMBER=${CXAOD_NUMBER}"
STEM_LIST_SAMPLE="list_sample_grid.mc15c_13TeV_25ns"
mkdir -p ${WORKING_FOLDER}
# if the user wants to take just all from the grid, then use "all" for both input and output
# we loop over all of them and copy the combined list of input and output
if [[ ${INPUT_SAMPLE_LIST} == "all" ]]; then
# the output must also be all
if [[ ${OUTPUT_SAMPLE_LIST} != "all" ]]; then
echo "If Input is all, the Output must also be all. We ABORT!!!"
return -1
fi
##################################################################################################
########## input files ###########################################################################
##################################################################################################
# wc -l ${CXAOD_FOLDER}/CxAODOperations_VHbb/data/DxAOD/CxAOD${CXAOD_NUMBER}/${STEM_LIST_SAMPLE}*${DERIVATION}*.txt
INPUT_SAMPLE_LIST="${STEM_LIST_SAMPLE}_${DERIVATION}_all.txt"
cat ${CXAOD_FOLDER}/CxAODOperations_VHbb/data/DxAOD/CxAOD${CXAOD_NUMBER}/${STEM_LIST_SAMPLE}*${DERIVATION}*.txt > ${INPUT_SAMPLE_LIST}
sort ${INPUT_SAMPLE_LIST} -o ${INPUT_SAMPLE_LIST}
precedant_sample=""
precedant_dsid=""
while read sample; do
# echo "sample=${sample}"
dsid=`echo ${sample} | cut -d '.' -f2`
# echo "precedant_dsid=${precedant_dsid} dsid=${dsid}"
if [[ "${dsid}" == "${precedant_dsid}" ]]; then
echo "precedant_sample ${precedant_sample}"
echo "sample ${sample}"
fi
precedant_sample="${sample}"
precedant_dsid="${dsid}"
done < ${INPUT_SAMPLE_LIST}
# remove the ones that have the same DSID, but either another production/derivation tag, or AFII instead of FS
INPUT_SAMPLE_LIST_2="${INPUT_SAMPLE_LIST}_2"
rm -f ${INPUT_SAMPLE_LIST_2}
while read sample; do
# echo "sample=${sample}"
echo "${sample}" >> ${INPUT_SAMPLE_LIST_2}
done < ${INPUT_SAMPLE_LIST}
mv ${INPUT_SAMPLE_LIST_2} ${WORKING_FOLDER}/${INPUT_SAMPLE_LIST}
echo ""
echo ""
##################################################################################################
########## output files ##########################################################################
##################################################################################################
# wc -l ${CXAOD_FOLDER}/CxAODOperations_VHbb/data/CxAOD/CxAOD${CXAOD_NUMBER}/${STEM_LIST_SAMPLE}*${DERIVATION}*.txt
OUTPUT_SAMPLE_LIST="out_sample_${STEM_LIST_SAMPLE}_${DERIVATION}_all.txt"
cat ${CXAOD_FOLDER}/CxAODOperations_VHbb/data/CxAOD/CxAOD${CXAOD_NUMBER}/out_sample_${STEM_LIST_SAMPLE}*${DERIVATION}*.txt > ${OUTPUT_SAMPLE_LIST}
sort ${OUTPUT_SAMPLE_LIST} -o ${OUTPUT_SAMPLE_LIST}
precedant_sample=""
precedant_dsid=""
while read sample; do
# echo "sample=${sample}"
dsid=`echo ${sample} | cut -d '.' -f4`
# echo "precedant_dsid=${precedant_dsid} dsid=${dsid}"
if [[ "${dsid}" == "${precedant_dsid}" ]]; then
echo "precedant_sample ${precedant_sample}"
echo "sample ${sample}"
fi
precedant_sample="${sample}"
precedant_dsid="${dsid}"
done < ${OUTPUT_SAMPLE_LIST}
# remove the ones that have the same DSID, but either another production/derivation tag, or AFII instead of FS
OUTPUT_SAMPLE_LIST_2="${OUTPUT_SAMPLE_LIST}_2"
rm -f ${OUTPUT_SAMPLE_LIST_2}
while read sample; do
# echo "sample=${sample}"
echo "${sample}" >> ${OUTPUT_SAMPLE_LIST_2}
done < ${OUTPUT_SAMPLE_LIST}
mv ${OUTPUT_SAMPLE_LIST_2} ${WORKING_FOLDER}/${OUTPUT_SAMPLE_LIST}
fi # end if the user gives the list that is to be downloaded from the grid
# if the user gives an input list, and we want to create the output for it, or the user gives an output list
if [[ ${OUTPUT_SAMPLE_LIST} == "computeSampleList" ]]; then
echo "Compute the output sample"
# create output sample list (out_sample_*.list) # ADRIAN
COMMAND="${CXAOD_FOLDER}/source/CxAODOperations_VHbb/scripts/form_HSG5_out.py ${GRID_USER} ${VTAG} ${INPUT_SAMPLE_LIST} ${WORKING_FOLDER} ${DEBUG}"
echo "COMMAND=${COMMAND}"
eval ${COMMAND}
elif [[ ${OUTPUT_SAMPLE_LIST} == "all" ]]; then
echo "all, do nothing"
else
echo "Use the output sample given to us"
cp ${OUTPUT_SAMPLE_LIST} ${WORKING_FOLDER}
fi
# create AMI event count (dslist_NevtDxAOD.txt)
COMMAND="${CXAOD_FOLDER}/source/CxAODOperations_VHbb/scripts/getAMIInfo.py ${INPUT_SAMPLE_LIST} ${WORKING_FOLDER} ${VERBOSE} ${DEBUG}"
echo "COMMAND=${COMMAND}"
eval ${COMMAND}
# copy script to download from the grid via rucio
COMMAND="cp ${CXAOD_FOLDER}/source/CxAODOperations_VHbb/scripts/rucio_get_jobs.py ${WORKING_FOLDER}"
echo "COMMAND=${COMMAND}"
eval ${COMMAND}
# copy script that computes the sum of weights (both to compare with Ami and to produce the one to use)
COMMAND="cp ${CXAOD_FOLDER}/source/CxAODOperations_VHbb/scripts/count_Nentry_SumOfWeight.py ${WORKING_FOLDER}"
echo "COMMAND=${COMMAND}"
eval ${COMMAND}
# once the files are downloaded locally, from that folder run
# ./count_Nentry_SumOfWeight.py 1 0
# this produces a file called yields.13TeV_DxAOD_sorted.txt
# then you need to compare this file (yields.13TeV_DxAOD_sorted.txt) with that produced by AMI (dslist_NevtDxAOD.txt)
# you need the file
COMMAND="cp ${CXAOD_FOLDER}/source/CxAODOperations_VHbb/scripts/checkYields.py ${WORKING_FOLDER}"
echo "COMMAND=${COMMAND}"
eval ${COMMAND}
# which you run with
# ./checkYields.py
# it produces a file called dslist_NevtDxAOD_yield.txt and tell you if there are samples not yet downloaded that are in the initial list
# for the samples already here it compares the yields and you should have a ratio of 1 when all the files for that sample were copied
# to prepare the output sample list from the already downloaded CxAOD (and then compare and/or copy with/to the one from CxAODOperations_VHbb/data/CxAOD)
COMMAND="cp ${CXAOD_FOLDER}/source/CxAODOperations_VHbb/scripts/createOutputLisFromAlreadyDownloaded.sh ${WORKING_FOLDER}"
echo "COMMAND=${COMMAND}"
eval ${COMMAND}
# when the agreement is full, you can copy the scripts to eos
COMMAND="cp ${CXAOD_FOLDER}/source/CxAODOperations_VHbb/scripts/copy_CxAODs_to_eos.py ${WORKING_FOLDER}"
echo "COMMAND=${COMMAND}"
eval ${COMMAND}
# to copy to eos, you need the file of eos folder name for each DSID
COMMAND="cp ${CXAOD_FOLDER}/source/CxAODOperations_VHbb/data/DxAOD/info/sample_info.txt ${WORKING_FOLDER}"
echo "COMMAND=${COMMAND}"
eval ${COMMAND}
# to replicate to grid
COMMAND="cp ${CXAOD_FOLDER}/source/CxAODOperations_VHbb/scripts/replicateToGrid.py ${WORKING_FOLDER}"
echo "COMMAND=${COMMAND}"
eval ${COMMAND}
# to run repeatedly rucio_get_jobs.py
COMMAND="cp ${CXAOD_FOLDER}/source/CxAODOperations_VHbb/scripts/runRepeatedlyRucioGetJobs.sh ${WORKING_FOLDER}"
echo "COMMAND=${COMMAND}"
eval ${COMMAND}
# to retry automatically jobs in Panda
COMMAND="cp ${CXAOD_FOLDER}/source/CxAODOperations_VHbb/scripts/operatePandaJobs.sh ${WORKING_FOLDER}"
echo "COMMAND=${COMMAND}"
eval ${COMMAND}
echo ""
echo ""
echo "All finished well for prepareCxAODProduction.sh."
return
| true |
a8030d7e571ce84228a353efa488680a1b9a9315 | Shell | matijaeski/khk_skriptimine | /praks11/yl2 | UTF-8 | 535 | 3.0625 | 3 | [] | no_license | #!/bin/bash
#
clear
tput setaf 5
echo "Tere tulemast menüüsse."
echo "Tegevuste valik:"
tput setaf 6
echo "1 - Loo fail vastava nimega."
echo "2 - Loo fail ja ava nanoga."
echo "3 - Välju programmist."
tput setaf 5
echo -n "Palun vali oma tegevus: "
read asi
tput sgr0
if [ $asi = 1 ]; then
echo -n "Sisesta failinimi mida soovid luua: "
read nimi
touch $nimi
elif [ $asi = 2 ]; then
echo -n "Sisesta failinimi mida soovid luua: "
read nimi
touch $nimi
nano $nimi
elif [ $asi = 3 ]; then
exit
else
./yl2
fi
| true |
5e0714c8fd876ddd1c25b01bb3b42a5caf7540ae | Shell | binkha/scripts | /Binay.all | UTF-8 | 11,533 | 3.84375 | 4 | [] | no_license | #!/bin/bash
set -e
clear
echo "This scrip will give us enviroment information"
echo "=============================================="
echo ""
echo "Hello Username: $USER"
echo ""
echo "Your Home Directory is: $USER"
echo ""
echo "Your History File will Ignore: $PATH"
echo ""
echo "Your Termainal Session Type is: $TERM"
echo ""
#!bin/bash
#we can set the enviromental variables by ourself and use those variables on our script. Already set variables cannot be reassigned.
#if we set our enviromental variables and want to reassigned new variables we can set again because it will overlap
#!/bin/bash
set -e
echo "The Current path is : $PATH"
MYUSERNAME="Binay Kharel"
MYPASSWORD="Checkit"
STARTOFSCRIPT=`date`
echo "My login name for this application is: $MYUSERNAME"
echo "MY login password for the application is: $MYPASSWORD"
echo "My started this script at : $STARTOFSCRIPT"
ENDOFSCRIPT=`date`
echo "I ended this script at: $ENDOFSCRIPT"
#!/bin/bash
#redirect to /dev/null example
echo "This is displaying on the console"
echo "This is going into the black hole">>/dev/null
#!/bin/bash
set -e
expr 1 + 5
echo $?
rm doodles.sh
echo $?
expr 10 + 10
echo $?
#!/bin/bash
set -e
expr 2 + 2
expr 2+2/*4
expr \(2+2\)*4
#!/bin/bash
#simple substitution
shopt -s expand_aliases
alias TODAY="date"
alias UFILES="find /root/sciptsLinux"
TODAYSDATE=`date`
USERFILES="find /root/sciptsLinux"
echo "Today's date: $TODAYSDATE"
echo "All files owned by USER:$USERFILES"
A=`TODAY`
B=`UFILES`
echo "With Alias, Today is $A"
echo "With Alias, UFILES is $B"
#!/bin/bash
#interactive script for user input
echo "Enter Your First Name: "
read FirstName
echo "Enter Your Last Name: "
read LastName
echo ""
echo "Your Full Name is: $FirstName $LastName"
echo ""
echo "Enter Your Age: "
read Userage
echo "In ten years you will be `expr $Userage + 10` years old."
#!/bin/bash/
echo `sh{ot,ort,oot}`
echo `st{il,al}l`
#!/bin/bash
# Simple arry list and loop for display
serverlist=("webser01" "webser02" "webser03" "webser04")
count=0
for index in ${serverlist[@]}; do #@is the special char in array tell as many as in array
echo "Processing Server : ${serverlist[count]}"
count="` expr $count + 1 `"
done
#!/bin/bash
#demo of command line values passed in our shell script
set -e
#echo "the following item was passed in to the script at run time $1 $2"
username=$1
password=$2
echo "The following username $username and password is $password"
#!/bin/bash
set -e
#simple if script if script for guessing a number
echo "Guess the secret number"
echo "======================"
echo ""
echo "Enter a number Between 1 and 5: "
read Guess
if [ $Guess = 3 ]
then
echo "You Guessed the correct number"
#else
#echo " Your guess is worng"
fi
#!/bin/bash
set -e
# test for existence of indicated file name
#Filename=$1
#echo "Testing for the existence of a file called $Filename"
#
#if [ -a $Filename ]
#if we put ! before -a to get which does not exist
# then
# echo "File $Filename Does Indeed Exist!"
#fi
#
Filename=$1
echo "Testing for the non existence of a file called $Filename"
if [ ! -a $Filename ]
#if we put ! before -a to get which does not exist
then
echo "File $Filename Doesn't Indeed Exist!"
fi
#!/bin/bash
#test multiple expression in single if statement
Filename=$1
echo "Testing for file $Filename and readability"
if [ -f $Filename ] && [ -r $Filename ]
then
echo "File $Filename exists And is readable"
fi
#!/bin/bash
set -e
#simple example of if then else and nested if statement
echo "Enter a number between 1 and 3:"
read Value
if [ "$Value" = 1 ] 2>/dev/null || [ "$Value" = 2 ] 2>/dev/nill || [ "$Value" = 3 ] 2>/dev/null; then
echo "You entered $Value"
else
echo "You did not follow the direction"
fi
#!/bin/bash
set -e
#simple example of if then else and nested if statement
echo "Enter a number between 1 and 3:"
read Value
if [ "$Value" = 1 ] 2>/dev/null; then
echo "You entered #1"
elif [ "$Value" = 2 ] 2>/dev/null; then
echo "You entered #2"
elif [ "$Value" = 3 ] 2>/dev/devnull; then
echo "You entered the 3rd number"
else
echo "You did not follow the direction"
fi
#!/bin/bash
# this is a demo of the for loop
echo "List all the shell scripts of the directory"
Shellscripts=`ls `
#echo "listing is the $Shellscripts"
for ForLoop in "$Shellscripts";do
Display="`cat $ForLoop`"
echo " File: $8Forloop - contents $Display"
done
#echo "listing is the $Shellscripts"
#!/bin/bash
#demo of the case statement
echo "Main Menu"
echo "========="
echo "1) Choice One"
echo "2) Choice Two"
echo "3) Choice Three"
echo ""
echo "Enter Choice"
read Menuchoice
case $Menuchoice in
1)
echo "Congratulation for Choosing the First Option";;
2)
echo "Choice 2 Chosen";;
3)
echo "Last choice made";;
*)
echo "You choose unwisely";;
esac
#!/bin/bash
#while loop example
echo "Enter the number of times to displat the 'Hello World' message"
read Displaynumber
Count=1
while [ $Count -le $Displaynumber ]
do
echo "Hello World - $Count"
Count="`expr $Count + 1`"
done
#!/bin/bash
#execution operatiors examples
echo "Enter a number between 1 and 5"
read Value
if [ "$Value" -eq "1" ] || [ "$Value" -eq "3" ] || [ "$Value" -eq "5" ]; then
echo "You entered the ODD value of $Value"
else
echo "You entered a value of $Value"
fi
#!/bin/bash
# simple file reading (non-binary) and displaying one line at a time
echo "Enter a filename to read: "
read FILE
while read -r Names;
do
echo "My Family Name: $Names"
done < "$FILE"
#!/bin/bash
#demo of reading and writing to a file using file descriptor
echo "Enter a file name to read: "
read File
exec 5<>$File
#>is for write only <is for read only <>is for read and write
while read -r Name;
do
echo "Family Name: $Name"
done <&5
echo "File was read on: `date`">&5
exec 5>&-
#!/bin/bash
#doing delimiter example using IFS
echo "Enter filename to parse: "
read File
echo "Enter the Delimiter: "
read Delim
IFS="$Delim"
while read -r CPU MEMORY DISK;
do
echo "CPU: $CPU"
echo "Memory: $MEMORY"
echo "Disk: $DISK"
done <"$File"
#!/bin/bash
#example of trapping events and limiting the shell stopping
clear
trap 'echo " - Please Press Q to Exit.."' SIGINT SIGTERM SIGTSTP
while [ "$Choice" != "Q" ] && [ "$Choice" != "q" ];do
echo "Main Menu"
echo "========="
echo "1) Choice One"
echo "2) Choice Two"
echo "3) Choice Three"
echo "Q) Quit/Exit"
echo ""
echo "Enter Choice"
read Choice
clear
done
#clear
#done
#!/bin/bash
#demo of using erroe handing with exit
echo "Change to a directory and list the contents"
Directory=$1
cd $Directory 2>/dev/null
if [ "$?" = "0" ]; then
echo "We can change into the directory $Directory, and here are the contents"
echo "`ls -al`"
else
echo "Cannot change directories, exiting with an error and no listing"
exit 111
fi
Ramjee Kharel
Subhadra Kharel
Rajan Raj Upreti
Bina Kharel
Bimal Kharel
Nita Kharel
Binay Kharel
Aryashree Poudyal Kharel
Biraj Upreti
Renisha Upreti
Kritika Kharel
Abhiraj Kharel
Abha Kharel
File was read on: Wed Jun 20 10:01:56 EDT 2018
List all the shell scripts of the directory
File: Forloop - contents #!/bin/bash
set -e
clear
echo "This scrip will give us enviroment information"
echo "=============================================="
echo ""
echo "Hello Username: $USER"
echo ""
echo "Your Home Directory is: $USER"
echo ""
echo "Your History File will Ignore: $PATH"
echo ""
echo "Your Termainal Session Type is: $TERM"
echo ""
#!bin/bash
#we can set the enviromental variables by ourself and use those variables on our script. Already set variables cannot be reassigned.
#if we set our enviromental variables and want to reassigned new variables we can set again because it will overlap
#!/bin/bash
set -e
echo "The Current path is : $PATH"
MYUSERNAME="Binay Kharel"
MYPASSWORD="Checkit"
STARTOFSCRIPT=`date`
echo "My login name for this application is: $MYUSERNAME"
echo "MY login password for the application is: $MYPASSWORD"
echo "My started this script at : $STARTOFSCRIPT"
ENDOFSCRIPT=`date`
echo "I ended this script at: $ENDOFSCRIPT"
#!/bin/bash
#redirect to /dev/null example
echo "This is displaying on the console"
echo "This is going into the black hole">>/dev/null
#!/bin/bash
set -e
expr 1 + 5
echo $?
rm doodles.sh
echo $?
expr 10 + 10
echo $?
#!/bin/bash
set -e
expr 2 + 2
expr 2+2/*4
expr \(2+2\)*4
#!/bin/bash
#simple substitution
shopt -s expand_aliases
alias TODAY="date"
alias UFILES="find /root/sciptsLinux"
TODAYSDATE=`date`
USERFILES="find /root/sciptsLinux"
echo "Today's date: $TODAYSDATE"
echo "All files owned by USER:$USERFILES"
A=`TODAY`
B=`UFILES`
echo "With Alias, Today is $A"
echo "With Alias, UFILES is $B"
#!/bin/bash
#interactive script for user input
echo "Enter Your First Name: "
read FirstName
echo "Enter Your Last Name: "
read LastName
echo ""
echo "Your Full Name is: $FirstName $LastName"
echo ""
echo "Enter Your Age: "
read Userage
echo "In ten years you will be `expr $Userage + 10` years old."
#!/bin/bash/
echo `sh{ot,ort,oot}`
echo `st{il,al}l`
#!/bin/bash
# Simple arry list and loop for display
serverlist=("webser01" "webser02" "webser03" "webser04")
count=0
for index in ${serverlist[@]}; do #@is the special char in array tell as many as in array
echo "Processing Server : ${serverlist[count]}"
count="` expr $count + 1 `"
done
#!/bin/bash
#demo of command line values passed in our shell script
set -e
#echo "the following item was passed in to the script at run time $1 $2"
username=$1
password=$2
echo "The following username $username and password is $password"
#!/bin/bash
set -e
#simple if script if script for guessing a number
echo "Guess the secret number"
echo "======================"
echo ""
echo "Enter a number Between 1 and 5: "
read Guess
if [ $Guess = 3 ]
then
echo "You Guessed the correct number"
#else
#echo " Your guess is worng"
fi
#!/bin/bash
set -e
# test for existence of indicated file name
#Filename=$1
#echo "Testing for the existence of a file called $Filename"
#
#if [ -a $Filename ]
#if we put ! before -a to get which does not exist
# then
# echo "File $Filename Does Indeed Exist!"
#fi
#
Filename=$1
echo "Testing for the non existence of a file called $Filename"
if [ ! -a $Filename ]
#if we put ! before -a to get which does not exist
then
echo "File $Filename Doesn't Indeed Exist!"
fi
#!/bin/bash
#test multiple expression in single if statement
Filename=$1
echo "Testing for file $Filename and readability"
if [ -f $Filename ] && [ -r $Filename ]
then
echo "File $Filename exists And is readable"
fi
#!/bin/bash
set -e
#simple example of if then else and nested if statement
echo "Enter a number between 1 and 3:"
read Value
if [ "$Value" = 1 ] 2>/dev/null || [ "$Value" = 2 ] 2>/dev/nill || [ "$Value" = 3 ] 2>/dev/null; then
echo "You entered $Value"
else
echo "You did not follow the direction"
fi
#!/bin/bash
set -e
#simple example of if then else and nested if statement
echo "Enter a number between 1 and 3:"
read Value
if [ "$Value" = 1 ] 2>/dev/null; then
echo "You entered #1"
elif [ "$Value" = 2 ] 2>/dev/null; then
echo "You entered #2"
elif [ "$Value" = 3 ] 2>/dev/devnull; then
echo "You entered the 3rd number"
else
echo "You did not follow the direction"
fi
#!/bin/bash
# this is a demo of the for loop
echo "List all the shell scripts of the directory"
Shellscripts=`ls `
#echo "listing is the $Shellscripts"
for ForLoop in "$Shellscripts";do
Display="`cat $ForLoop`"
echo " File: $8Forloop - contents $Display"
done
#echo "listing is the $Shellscripts"
List all the shell scripts of the directory
i7 4Ghz;32Gb;4TB
| true |
63eb9c34fbcb94b14dc3e1d15b22f6d32e77fccf | Shell | CryZe/LiveSplitOne | /ci/script.sh | UTF-8 | 1,330 | 3.125 | 3 | [
"MIT"
] | permissive | # This script takes care of testing your crate
set -ex
SOURCE_BRANCH="master"
TARGET_BRANCH="gh-pages"
doCompile() {
(cd livesplit-core && cross rustc -p livesplit --target $TARGET --release -- -C opt-level=z)
(cd livesplit-core/capi/bind_gen && cargo run)
cp livesplit-core/target/asmjs-unknown-emscripten/release/livesplit*.js* src/livesplit_core.js
cat livesplit-core/capi/js/exports.js >> src/livesplit_core.js
cp livesplit-core/capi/bindings/emscripten/livesplit_core.ts src/livesplit.ts
npm install
webpack -p
}
if [ "$TRAVIS_PULL_REQUEST" != "false" -o "$TRAVIS_BRANCH" != "$SOURCE_BRANCH" ]; then
echo "Skipping deploy; just doing a build."
doCompile
exit 0
fi
doCompile
git config --global user.email "christopher.serr@gmail.com"
git config --global user.name "Travis CI"
git checkout -b gh-pages
git add -f dist
git add -f src/livesplit_core.js
git commit -m "gh pages"
ENCRYPTED_KEY_VAR="encrypted_${ENCRYPTION_LABEL}_key"
ENCRYPTED_IV_VAR="encrypted_${ENCRYPTION_LABEL}_iv"
ENCRYPTED_KEY=${!ENCRYPTED_KEY_VAR}
ENCRYPTED_IV=${!ENCRYPTED_IV_VAR}
openssl aes-256-cbc -K $ENCRYPTED_KEY -iv $ENCRYPTED_IV -in ci/deploy_key.enc -out deploy_key -d
chmod 600 deploy_key
eval `ssh-agent -s`
ssh-add deploy_key
git remote set-url origin git@github.com:CryZe/LiveSplitOne.git
git push origin gh-pages -f
| true |
f38a25c8f0ea0bc4805a3cec25275f52d589dfe4 | Shell | Gali-Madhan-Kumar/Employee_Wage_Computation | /Employee_Wage_Computation/empWageUsingFunc.sh | UTF-8 | 781 | 3.453125 | 3 | [] | no_license | #! /bin/bash -x
# CONSTANTS FOR THE PROGRAM
IS_PART_TIME=1
IS_FULL_TIME=2
MAX_HRS_IN_MONTH=10
EMP_RATE_PER_HR=20
NUM_WORKING_DAYS=20
# VARIABLES
totalEmpHr=0
totalWorkingDays=0
function getWorkingHours() {
case $1 in
$IS_FULL_TIME)
workingHours=8
;;
$IS_PART_TIME)
workingHours=4
;;
*)
workingHours=0
;;
esac
echo $workingHours
}
while [[ $totalEmpHrs -lt $MAX_HRS_IN_MONTH && $totalWorkingDays -lt $NUM_WORKING_DAYS ]]
do
((totalWorkingDays++))
workHours="$( getWorkingHours $(RANDOM % 3)) )"
totalWorkHours=$((totalEmpHrs + workHours))
done
totalSalary=$((totalWorkHours * EMP_RATE_PER_HR))
| true |
04b76ce6b58d65f121919246f4d202c459b4a9cd | Shell | uroborus/synapse | /demo/start.sh | UTF-8 | 840 | 3.25 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
DIR="$( cd "$( dirname "$0" )" && pwd )"
CWD=$(pwd)
cd "$DIR/.."
mkdir -p demo/etc
for port in 8080 8081 8082; do
echo "Starting server on port $port... "
https_port=$((port + 400))
python -m synapse.app.homeserver \
--generate-config \
--config-path "demo/etc/$port.config" \
-p "$https_port" \
--unsecure-port "$port" \
-H "localhost:$https_port" \
-f "$DIR/$port.log" \
-d "$DIR/$port.db" \
-D --pid-file "$DIR/$port.pid" \
--manhole $((port + 1000)) \
--tls-dh-params-path "demo/demo.tls.dh"
python -m synapse.app.homeserver \
--config-path "demo/etc/$port.config" \
-vv \
done
echo "Starting webclient on port 8000..."
python "demo/webserver.py" -p 8000 -P "$DIR/webserver.pid" "webclient"
cd "$CWD"
| true |
64f5801f0d9e4cab70b6d9a0a53e30fae88213bf | Shell | SpiffySaxMan/Ridealong-App | /public/sbin/mkdumprd | UTF-8 | 121,537 | 3.09375 | 3 | [] | no_license | #!/bin/bash --norc
# vim:sts=4:sw=4:ts=8:et
# mkdumprd
#
# Copyright 2005 Red Hat, Inc.
#
# Written by Erik Troan <ewt@redhat.com>
#
# Contributors:
# Elliot Lee <sopwith@cuc.edu>
# Miguel de Icaza <miguel@nuclecu.unam.mx>
# Christian 'Dr. Disk' Hechelmann <drdisk@ds9.au.s.shuttle.de>
# Michael K. Johnson <johnsonm@redhat.com>
# Pierre Habraken <Pierre.Habraken@ujf-grenoble.fr>
# Jakub Jelinek <jakub@redhat.com>
# Carlo Arenas Belon (carenas@chasqui.lared.net.pe>
# Keith Owens <kaos@ocs.com.au>
# Bernhard Rosenkraenzer <bero@redhat.com>
# Matt Wilson <msw@redhat.com>
# Trond Eivind Glomsrød <teg@redhat.com>
# Jeremy Katz <katzj@redhat.com>
# Preston Brown <pbrown@redhat.com>
# Bill Nottingham <notting@redhat.com>
# Guillaume Cottenceau <gc@mandrakesoft.com>
# Peter Jones <pjones@redhat.com>
# Neil Horman <nhorman@redhat.com>
# Jarod Wilson <jwilson@redhat.com>
#
# Note: this scripts use 4 spaces as indent.
cmdname=`basename $0`
umask 0022
export MALLOC_PERTURB_=204
PATH=/sbin:/usr/sbin:/bin:/usr/bin:$PATH
export PATH
. /etc/rc.d/init.d/functions
VERSION=5.0.39
PROBE="yes"
MODULES=""
PREMODS=""
DMRAIDS=""
MPATHS=""
CLUSTER_CONFIG_FILE="/etc/cluster/cluster.conf"
CLUSTER_NODE_LIST=""
FENCE_KDUMP_CONFIG="/etc/sysconfig/fence_kdump"
FENCE_KDUMP_OPTS=""
CONFMODS="$MODULES"
MODULES=""
ARCH=$(uname -m)
withusb=yes
compress=1
allowmissing=""
target=""
kernel=""
force=""
verbose=""
img_vers=""
builtins=""
rc=0
IMAGESIZE=8000
PRESCSIMODS="sd_mod"
fstab="/etc/fstab"
vg_list=""
net_list=""
USING_METHOD=""
SAVE_PATH=/var/crash
bin=""
KDUMP_POST=""
extra_kdump_mods=""
DUMP_TARGET=""
DUMP_FSTYPE=""
SSH_KEY_LOCATION="/root/.ssh/kdump_id_rsa"
DMESG_COLLECTOR="/sbin/vmcore-dmesg"
FONTDIR=/lib/kbd/consolefonts
DEFAULT_FONT=LatArCyrHeb-16.psfu.gz
override_resettable=0
DISK_TIMEOUT=180
DEBUG_MEM_LEVEL="0"
FIPS_FILE="/proc/sys/crypto/fips_enabled"
error()
{
NONL=""
if [ "$1" == "-n" ]; then
NONL="-n"
shift
fi
echo $NONL "$@" >&2
}
cleanup_and_exit ()
{
rm -rf $MNTIMAGE
rm -f $IMAGE
rm -f $TMPDISKLIST
exit $1
}
strip_comments() {
echo $@ | sed -e 's/\(.*\)#.*/\1/'
}
function make_trace_mem()
{
# parameters: msg [trace_level:trace]...
msg=$1
shift
if [ "$DEBUG_MEM_LEVEL" -gt 0 ]; then
make_trace show_memstats $DEBUG_MEM_LEVEL "[debug_mem]" "$msg" "$@"
fi
}
function make_trace()
{
# parameters: func log_level prefix msg [trace_level:trace]...
func=$1
shift
log_level=`echo "$1" | grep -o '^[0-9]\+'`
shift
prefix=$1
shift
msg=$1
shift
if [ -z "$log_level" ]; then
return
fi
# deal with indentation
space_at_front=`echo "$msg" | grep -o "^[[:space:]]\+"`
msg=`echo "$msg" | sed 's/^\s\+//'`
msg_printed=0
while [ $# -gt 0 ]; do
trace_level=`echo "$1" | grep -o '^[0-9]\+'`
trace_in_higher_levels=`echo "$1" | grep -o '+'`
trace=`echo $1 | sed "s/^.*://"`
if [ -z "$trace_level" ]; then
trace_level=0
fi
insert_trace=0
if [ -n "$trace_in_higher_levels" ]; then
if [ "$log_level" -ge "$trace_level" ]; then
insert_trace=1
fi
else
if [ "$log_level" -eq "$trace_level" ]; then
insert_trace=1
fi
fi
if [ $insert_trace -eq 1 ]; then
if [ $msg_printed -eq 0 ]; then
emit "${space_at_front}echo \"$prefix $msg\""
msg_printed=1
fi
emit "${space_at_front}$func $trace"
fi
shift
done
}
if [ -z "$TMPDIR" ]
then
for t in /tmp /var/tmp /root ${PWD}; do
if [ ! -w $t ]; then continue; fi
TMPDIR=$t
break
done
else
if [ ! -w "$TMPDIR" ]
then
error "Can't write to $TMPDIR."
exit 1
fi
fi
if [ -z "$TMPDIR" ]; then
error "No temporary directory could be found."
exit 1
fi
if [ $TMPDIR = "/root" -o $TMPDIR = "${PWD}" ]; then
error "WARNING: using $TMPDIR for temporary files"
fi
#for other commands below, like 'mount'
export TMPDIR
TMPDISKLIST=""
MNTIMAGE=`mktemp -d ${TMPDIR}/initrd.XXXXXX`
IMAGE=`mktemp ${TMPDIR}/initrd.img.XXXXXX`
RCFILE=$MNTIMAGE/init
vecho()
{
NONL=""
if [ "$1" == "-n" ]; then
NONL="-n"
shift
fi
[ -n "$verbose" ] && echo $NONL "$@"
}
usage () {
if [ "$1" == "-n" ]; then
cmd=echo
else
cmd=error
fi
$cmd "usage: $cmdname [--version] [--help] [-v] [-d] [-f] [--preload <module>]"
$cmd " [--image-version]"
$cmd " [--builtin=<module>] [--omit-dmraid]"
$cmd " [--fstab=<fstab>] [--nocompress] <initrd-image> <kernel-version>"
$cmd ""
$cmd " (ex: $cmdname /boot/initrd-2.2.5-15.img 2.2.5-15)"
if [ "$1" == "-n" ]; then
exit 0
else
cleanup_and_exit 1
fi
}
moduledep() {
if [ ! -f "/lib/modules/$kernel/modules.dep" ]; then
error "No dep file found for kernel $kernel"
cleanup_and_exit 1
fi
deps=$(awk 'BEGIN { searched=ARGV[2]; ARGV[2]=""; rc=1 } \
function modname(filename) { match(filename, /\/([^\/]+)\.k?o:?$/, ret); return ret[1] } \
function show() { if (orig == searched) { print dep; orig=""; rc=0; exit } } \
/^\/lib/ { show(); \
orig=modname($1); dep=""; \
if ($2) { for (i = 2; i <= NF; i++) { dep=sprintf("%s %s", dep, modname($i)); } } } \
/^ / { dep=sprintf("%s %s", dep, modname($1)); } \
END { show(); exit(rc) }' /lib/modules/$kernel/modules.dep $1)
for i in `modprobe --set-version $kernel --show-depends $1 2>/dev/null | awk '/^insmod/ {print $2}'`
do
modname=`basename $i | sed -e's/\.ko//'`
if [ "$modname" == "$1" ]
then
continue
fi
deps="$deps $modname"
done
[ -n "$deps" ] && vecho ":$deps" || vecho
}
findone() {
find "$@" | /bin/awk '{print $1; exit}'
}
findall() {
find "$@"
}
find_dm_in_sysblock() {
local devnode=$1
local majmin
local device
[ ! -b "$devnode" ] && return 1;
majmin=$(get_numeric_dev dec $devnode)
[ -z "$majmin" ] && return 1
find -L /sys/block -maxdepth 2 -name dev | while read device ; do \
echo "$majmin" | cmp -s $device && echo $device ; done \
| sed -e 's,/dev$,,'
}
is_mpath() {
local major
local minor
local target
major=$(echo $1 | cut -d: -f1)
minor=$(echo $1 | cut -d: -f2)
for target in $(dmsetup -C -j $major -m $minor table 2>/dev/null | \
grep -v "No devices found" | awk ' { print $3 }') ; do
[ "$target" == "multipath" ] && return 0
done
return 1
}
find_mpath_deps() {
local devpath="/dev/$(echo $1 | sed -e 's,.*/\([^/]\+\),\1,' )"
local syspath="/sys/block/$(echo $1 | sed -e 's,.*/\([^/]\+\),\1,' )"
local arg2="$2"
local majmin=$(cat $syspath/dev)
local ret=1
if is_mpath ${majmin} ; then
arg2=yes
fi
slaves="$syspath/slaves/*"
for slave in $slaves ; do
[ -e $slave ] || continue
find_mpath_deps $(readlink $slave) ${arg2} && ret=0
done
if [ "$2" == "yes" ]; then
echo $devpath
fi
return $ret
}
dm_get_uuid() {
dmsetup info "$1" | awk '/^.*UUID.*/ {print $2}'
}
depsolve_modlist()
{
local TMPINMODS=$MODULES
local TMPOUTMODS=""
local i
local j
local mname
local dpcnt
local scnt
#
# So, basically, we just do this until TMPINMODS
# is an empty list
#
while [ -n "$TMPINMODS" ]
do
for i in $TMPINMODS
do
mname=`basename $i | sed -e's/\.ko//'`
dpcnt=`modprobe --set-version $kernel --show-depends $mname 2>/dev/null | awk '/^insmod/ {print $2}' | wc -l`
if [ $dpcnt -le 1 ]
then
# we have no dependencies, just add it to the list
echo "$TMPOUTMODS" | grep -q $i
if [ $? -ne 0 ]
then
TMPOUTMODS="$TMPOUTMODS $i"
fi
continue
fi
# We should start this counter at 1, since we expect that the last
# line output by modprobe --show-depends will be the module we
# specify as mname below, but since modprobe is busted and
# sometimes doesn't do that, we start at zero, and increment an
# extra time below if we are searching for a dependency on ourself
let scnt=0
for j in `modprobe --set-version $kernel --show-depends $mname 2>/dev/null | awk '/^insmod/ {print $2}'`
do
echo $TMPOUTMODS | grep -q $j
if [ $? -eq 0 ]
then
let scnt=$scnt+1
fi
# here we are looking to see if the insmod line is for the
# module that we are searching for dependencies on. We do this
# because modprobe is busted in its show-depends line
echo $j | grep -q $i
if [ $? -eq 0 ]
then
let scnt=$scnt+1
fi
done
if [ "$scnt" == "$dpcnt" ]
then
echo "$TMPOUTMODS" | grep -q $i
if [ $? -ne 0 ]
then
TMPOUTMODS="$TMPOUTMODS $i"
fi
fi
#Finish for i loop
done
for j in $TMPOUTMODS
do
TMPTMPMODS=""
for z in $TMPINMODS
do
if [ "$j" == "$z" ]
then
continue
fi
TMPTMPMODS="$TMPTMPMODS $z"
done
TMPINMODS=$TMPTMPMODS
done
done
MODULES=$TMPOUTMODS
}
findmodule() {
local skiperrors=""
local mod_found=0
if [ $1 == "--skiperrors" ]; then
skiperrors=--skiperrors
shift
fi
local modName=$1
local skipdeps=$2
if [ "$modName" = "off" -o "$modName" = "null" ]; then
return
fi
if [ $(echo $modName | cut -b1) = "-" ]; then
skiperrors=--skiperrors
modName=$(echo $modName | cut -b2-)
fi
case "$MODULES " in
*"/$modName.ko "*) return ;;
esac
if echo $builtins | egrep -q '(^| )'$modName'( |$)' ; then
vecho "module $modName assumed to be built in"
return
fi
# special cases
case "$modName" in
raid[456])
modName=raid456
;;
esac
if [ "$modName" = "i2o_block" ]; then
findmodule i2o_core
findmodule -i2o_pci
modName="i2o_block"
elif [ "$modName" = "ppa" ]; then
findmodule parport
findmodule parport_pc
modName="ppa"
elif [ "$modName" = "sbp2" ]; then
findmodule ieee1394
findmodule ohci1394
modName="sbp2"
else
if [ -z "$skipdeps" ]
then
moduledep $modName
for i in $deps; do
findmodule $i
done
fi
fi
fmPath=$(modprobe --set-version $kernel -l $modName 2>/dev/null)
if [ ! -f "/lib/modules/$kernel/$fmPath" ]; then
for modDir in /lib/modules/$kernel/updates /lib/modules/$kernel
do
if [ -d $modDir ]
then
fmPath=$(findone $modDir -name $modName.ko)
if [ -f "$fmPath" ]
then
fmPath=${fmPath#/lib/modules/$kernel/}
mod_found=1
break
fi
fi
done
else
mod_found=1
fi
if [ $mod_found -eq 0 ]; then
if [ -n "$skiperrors" ]; then
return
fi
# ignore the absence of the scsi modules
for n in $PRESCSIMODS; do
if [ "$n" = "$modName" ]; then
return;
fi
done;
if [ -n "$allowmissing" ]; then
error "WARNING: No module $modName found for kernel $kernel, continuing anyway"
return
fi
error "No module $modName found for kernel $kernel, aborting."
cleanup_and_exit 1
fi
# only need to add each module once
MODULES="$MODULES /lib/modules/$kernel/$fmPath"
# need to handle prescsimods here -- they need to go _after_ scsi_mod
if [ "$modName" = "scsi_mod" ]; then
for n in $PRESCSIMODS ; do
findmodule $n
done
fi
}
find_scsi_dh_modules() {
local scsipath=$(modprobe --set-version $kernel --show-depends sg 2>/dev/null | awk '/^insmod / { print $2; }' | tail -1)
scsipath="${scsipath%%sg.ko}device_handler/"
[ -d "$scsipath" ] || return
for x in $scsipath/*.ko ; do
local h=${x##*/}
findmodule -${h%%.ko}
done
}
inst() {
if [ "$#" != "2" ];then
echo "usage: inst <file> <destination>"
return
fi
vecho "$1 -> $2"
cp $1 $2
}
handle_multipath () {
local sysdev
local deps
local device=$1
case " $multipath_devices " in
*" $device "*)
return ;;
*) multipath_devices="$multipath_devices $device" ;;
esac
findmodule -dm-mod
findmodule -dm-mirror
findmodule -dm-zero
findmodule -dm-snapshot
find_scsi_dh_modules
findmodule -dm-multipath
findmodule -dm-round-robin
sysdev=$(find_dm_in_sysblock /dev/$device)
deps=$(find_mpath_deps ${sysdev})
vecho -n "multipath components of $device are "
for dep in ${deps}; do
vecho -n "$dep "
done
vecho
for dep in ${deps}; do
findstoragedriver ${dep##/dev/}
done
}
resolve_dm() {
# resolve device mapper nodes to something of the form /dev/mapper/foo
if [[ ! "$1" =~ ^dm- ]]; then
echo $1
return 0
fi
majmin=$(cat /sys/block/$1/dev)
for dmdev in /dev/mapper/* ; do
dmnum=$(get_numeric_dev dec $dmdev)
if [ "$dmnum" = "$majmin" ]; then
echo ${dmdev#/dev/}
break
fi
done
}
handledm () {
local major=$1
local minor=$2
local origdevice=$3
# Check if it is a multipath device
if is_mpath "$major:$minor"; then
vecho "Found dm-multipath component $origdevice"
handle_multipath ${origdevice}
return 0;
fi
while read dmstart dmend dmtype r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 ; do
case "$dmtype" in
linear)
slavedev=$(find -L /sys/block -maxdepth 2 -name dev \
| while read device ; do \
echo "$r0" \
| cmp -s $device && echo $device ; \
done \
| sed -e 's,.*/\([^/]\+\)/dev,\1,;s,!,/,' )
slavedev=$(resolve_dm ${slavedev##/dev/})
vecho "$device device is linear, slave is $slavedev"
handlelvordev "/dev/$slavedev"
;;
esac
done << EOF
$(dmsetup table -j $major -m $minor 2>/dev/null)
EOF
}
findstoragedriverinsys () {
while [ ! -L device ]; do
[ "$PWD" = "/sys" ] && return
cd ..
done
cd $(readlink ./device)
if is_iscsi $PWD; then
handleiscsi "$PWD"
fi
while [ ! -f modalias ]; do
[ "$PWD" = "/sys/devices" ] && return
cd ..
done
modalias=$(cat modalias)
for driver in $(modprobe --set-version $kernel --show-depends $modalias 2>/dev/null| awk '/^insmod/ { print gensub(".*/","","g",$2) }') ; do
findmodule ${driver%%.ko}
done
}
get_basedev() {
echo $1 | sed -e's/\/dev\///' -e's/[0-9]\+$//'
}
identify_critical_disk_by_vendor_model_type() {
local i
local a
local TMPNAME
local DSKSTRING
local found
local IDSTRING
local basedev=$1
[ -z "$TMPDISKLIST" ] && TMPDISKLIST=`mktemp ${TMPDIR}/disklist.XXXXXX`
if [ -d /sys/block/$basedev ]
then
TMPNAME=""
DSKSTRING=""
#only add devices which have a presence in sysfs
for i in "vendor" "model" "type"
do
TMPNAME=`cat /sys/block/$basedev/device/$i 2>/dev/null`
DSKSTRING="$DSKSTRING $TMPNAME"
done
DSKSTRING=$(echo $DSKSTRING | sed -e's/ //g')
if grep -q "$DSKSTRING" "$TMPDISKLIST"
then
found=`awk '{ if($2 == "'"$DSKSTRING"'") print $3; }' $TMPDISKLIST`
echo -n "$basedev $DSKSTRING" > $TMPDISKLIST
else
found=0
echo -n "$basedev $DSKSTRING" >> $TMPDISKLIST
fi
for i in `ls /sys/block`
do
IDSTRING=""
if [ ! -d /sys/block/$i/device ]
then
continue
fi
for a in "vendor" "model" "type"
do
TMPNAME=`cat /sys/block/$i/device/$a 2>/dev/null`
IDSTRING="$IDSTRING $TMPNAME"
done
IDSTRING=$(echo $IDSTRING | sed -e's/ //g')
if [ "$DSKSTRING" == "$IDSTRING" ]
then
found=$(($found + 1))
fi
done
echo " $found">>$TMPDISKLIST
fi
}
identify_critical_disk_by_scsi_id() {
local scsi_id=""
scsi_id=$(/lib/udev/scsi_id --whitelisted --device=/dev/$1 --replace-whitespace)
if ! grep -q "$scsi_id" $MNTIMAGE/etc/critical_scsi_ids 2>/dev/null; then
echo "$scsi_id" >> $MNTIMAGE/etc/critical_scsi_ids
fi
}
is_virtio_disk_with_serial() {
[[ $(readlink /sys/block/$1) =~ virtio ]] && \
[ -n "$(cat /sys/block/$1/serial 2>/dev/null)" ]
}
identify_critical_disk_by_virtio_serial() {
local serial
serial=$(cat /sys/block/$1/serial 2>/dev/null)
if ! grep -q "$serial" $MNTIMAGE/etc/virtio_ids 2>/dev/null; then
echo "$serial" >> $MNTIMAGE/etc/virtio_ids
fi
}
# Check for a Storage Class Memory (SCM) device (s390 only).
is_scm() {
# /sys/block/scma -> ../devices/scm/0000000000000000/block/scma
if [[ $(readlink /sys/block/$1) =~ /scm/[0-9a-f][0-9a-f]*/ ]]; then
return 0
else
return 1
fi
}
identify_critical_disk_by_scmid() {
local scm_id
# /sys/block/scma/device -> ../../../0000000000000000/
scm_id=$(basename $(readlink /sys/block/$1/device))
if ! grep -q "$scm_id" $MNTIMAGE/etc/scm_ids 2>/dev/null; then
echo "$scm_id" >> $MNTIMAGE/etc/scm_ids
fi
}
# Check for DASD device (s390 only)
is_dasd() {
# /sys/block/dasda -> ../devices/css0/0.0.000c/0.0.23b5/block/dasda
if [[ $1 =~ dasd ]] && [[ $(readlink /sys/block/$1) =~ /css.*/ ]]; then
return 0
else
return 1
fi
}
identify_critical_disk_by_dasd_bus_id() {
local dasd_bus_id
dasd_bus_id=$(basename $(readlink /sys/block/$1/device))
if ! grep -q "$dasd_bus_id" $MNTIMAGE/etc/dasd_bus_ids 2>/dev/null; then
echo "$dasd_bus_id" >> $MNTIMAGE/etc/dasd_bus_ids
fi
}
#$1: device name: /dev/devname
#other logic to find critical disks should go to this function
identify_critical_disk() {
local basedev=$(get_basedev $1)
local scsiid
# don't block on cciss devices, parsing them in sysfs takes a special
# case and there might be some other old reason for this which are not
# clear. Also don't block on md/dm devices because we only need waiting
# for their children devices, and md/dm devices will be setup in later code.
if echo $basedev|egrep -s -q '(^cciss|^md|^dm|^mapper\/)'; then
return
fi
scsiid=$(/lib/udev/scsi_id --whitelisted /dev/$basedev --replace-whitespace)
#do not use scsi_id for qemu ide disks because the ids are not persistent.
if [ -n "$scsiid" ] && [ "$scsiid" = "${scsiid/QEMU_HARDDISK/}" ]; then
identify_critical_disk_by_scsi_id $basedev
elif is_virtio_disk_with_serial $basedev; then
identify_critical_disk_by_virtio_serial $basedev
elif is_scm $basedev; then
identify_critical_disk_by_scmid $basedev
elif is_dasd $basedev; then
identify_critical_disk_by_dasd_bus_id $basedev
else
identify_critical_disk_by_vendor_model_type $basedev
fi
}
findstoragedriver () {
local majmin
local device
local sysfs
for device in $@ ; do
identify_critical_disk $device
case " $handleddevices " in
*" $device "*)
continue ;;
*) handleddevices="$handleddevices $device" ;;
esac
echo $device | grep -q "md[0-9]\+"
if [ $? == 0 ]; then
vecho "Found RAID component $device"
handleraid "$device"
continue
fi
if [[ "$device" =~ ^(dm-|mapper/|mpath/) ]]; then
device=$(resolve_dm $device)
vecho "Found DM device $device"
majmin=$(get_numeric_dev dec "/dev/$device")
sysfs=$(find_dm_in_sysblock /dev/$device)
handledm $(echo "$majmin" |cut -d : -f 1) $(echo "$majmin" |cut -d : -f 2) ${device}
continue;
fi
device=`echo $device | sed 's/\//\!/g'`
sysfs=$(findone -L /sys/block -maxdepth 2 -type d -name $device)
[ -z "$sysfs" ] && return
pushd $sysfs >/dev/null 2>&1
findstoragedriverinsys
popd >/dev/null 2>&1
done
}
kdump_is_bridge() {
[ -d /sys/class/net/"$1"/bridge ]
}
kdump_is_bond() {
[ -d /sys/class/net/"$1"/bonding ]
}
kdump_is_vlan() {
[ -f /proc/net/vlan/"$1" ]
}
findnetdriver() {
for device in $@ ; do
case " $handleddevices " in
*" $device "*)
continue ;;
*) handleddevices="$handleddevices $device" ;;
esac
if kdump_is_vlan "$device"; then
modalias=8021q
elif kdump_is_bond "$device"; then
modalias=bonding
elif kdump_is_bridge "$device"; then
modalias=bridge
elif [ -f /sys/class/net/$device/device/modalias ]; then
modalias=$(cat /sys/class/net/$device/device/modalias)
else
modalias=$(ethtool -i $device | awk '/^driver:/ { print $2 }')
fi
for driver in $(modprobe --set-version $kernel --show-depends $modalias 2>/dev/null| awk '/^insmod/ { print gensub(".*/","","g",$2) }') ; do
if [ "$driver" = "mlx4_core.ko" ]; then
extra_kdump_mods="$extra_kdump_mods mlx4_core"
fi
findmodule ${driver%%.ko}
done
done
}
iscsi_get_rec_val() {
# The open-iscsi 742 release changed to using flat files in
# /var/lib/iscsi.
result=$(/sbin/iscsiadm --show -m session -r ${1} | grep "^${2} = ")
result=${result##* = }
}
# No ibft handling yet.
iscsi_set_parameters() {
path=$1
vecho "setting iscsi parameters"
# Check once before getting explicit values, so we can output a decent
# error message.
if ! /sbin/iscsiadm -m session -r ${path} >/dev/null ; then
echo "Unable to find iscsi record for $path"
exit 1
fi
iscsi_get_rec_val $path "node.name"; tgt_name=${result}
iscsi_get_rec_val $path "node.conn\[0\].address"; tgt_ipaddr=${result}
}
is_iscsi() {
path=$1
if echo $path | grep -q "/platform/host[0-9]*/session[0-9]*/target[0-9]*:[0-9]*:[0-9]*/[0-9]*:[0-9]*:[0-9]*:[0-9]*"; then
return 0
else
return 1
fi
}
# Taken from mkinitrd. Trimmed for bare minimum functionality.
handleiscsi() {
vecho "Found iscsi component $1"
for idev in $ISCSI_DEVICES; do
# try to avoid any duplication of a target
if [ "${idev%/*}" == "${1%/*}" ]; then
vecho "Already have ${1%/*}... skipping."
return
fi
done
findmodule iscsi_tcp
findmodule sd_mod
iscsi_set_parameters $1
netdev=$(/sbin/ip route get to $tgt_ipaddr | \
sed 's|.*dev \(.*\).*|\1|g' | awk '{ print $1; exit }')
mkdir -p $MNTIMAGE/etc/network/
handlenetdev $netdev
echo $netdev >> $MNTIMAGE/etc/iface_to_activate
ISCSI_DEVICES="$ISCSI_DEVICES $1"
vecho "iscsi component target is $tgt_name"
ISCSI_TARGETS="$ISCSI_TARGETS $tgt_name"
}
handleraid() {
local start=0
if [ -n "$noraid" -o ! -f /proc/mdstat ]; then
return 0
fi
levels=$(grep "^$1[ ]*:" /proc/mdstat | \
awk '{ print $4 }')
devs=$(grep "^$1[ ]*:" /proc/mdstat | \
awk '{ print gensub("\\[[0-9]*\\]","","g",gensub("^md.*raid[0-9]*","","1")) }')
for level in $levels ; do
case $level in
linear)
findmodule linear
start=1
;;
multipath)
findmodule multipath
start=1
;;
raid[01456] | raid10)
findmodule $level
start=1
;;
*)
error "raid level $level (in /proc/mdstat) not recognized"
;;
esac
done
findstoragedriver $devs
if [ "$start" = 1 ]; then
raiddevices="$raiddevices $1"
fi
return $start
}
check_encrypted() {
[ -f /etc/crypttab ] || return 0
local dev=$1
if lsblk -d -n -o TYPE "$dev" 2>/dev/null | grep -q crypt;
then
error "Device $dev is encrypted, can not be used in kdump"
cleanup_and_exit 1
fi
return 0
}
check_resettable() {
local path="/sys/$(udevadm info --query=all --name="$1" | awk '/^P:/ {print $2}' | sed -e 's/\(cciss[0-9]\+\/\).*/\1/g' -e 's/\/block\/.*$//')/resettable"
local resettable=1
if [ ! -f "$path" ]
then
return
else
resettable="$(cat $path)"
fi
if [ $resettable -eq 0 ]
then
if [ "$2" -eq 0 ]
then
error "Can not save vmcore to target device $1. This device can not be initialized in kdump kernel as it is not resettable"
else
error "Rootfs device $1 is not resettable, can not be used as the default target, please specify a default action"
fi
if [ "$override_resettable" -eq 0 ]
then
cleanup_and_exit 1
fi
fi
}
get_devname_from_uuid_label() {
local device="$1"
local IS_UUID IS_LABEL
IS_UUID=$(echo $device | grep UUID)
IS_LABEL=$(echo $device | grep LABEL)
if [ -n "$IS_UUID" -o -n "$IS_LABEL" ]; then
device=$(findfs $device)
if [ $? -ne 0 ]; then
echo "find device name failed for $device"
cleanup_and_exit 1
fi
fi
echo $device
}
handlelvordev() {
local dev=$(get_devname_from_uuid_label $1)
check_encrypted "$dev"
local vg=`lvs --noheadings -o vg_name $dev 2>/dev/null`
if [ -z "$vg" ]; then
vg=`lvs --noheadings -o vg_name $(echo $dev | sed -e 's#^/dev/mapper/\([^-]*\)-\(.*\)$#/dev/\1/\2#') 2>/dev/null`
fi
if [ -n "$vg" ]; then
vg=`echo $vg` # strip whitespace
case " $vg_list " in
*" $vg "*)
;;
*)
vg_list="$vg_list $vg"
for device in `vgdisplay -v $vg 2>/dev/null | sed -n 's/PV Name//p'`; do
check_resettable "$device" "$2"
check_encrypted "$device"
findstoragedriver ${device##/dev/}
done
;;
esac
else
check_resettable "$dev" "$2"
findstoragedriver ${dev##/dev/}
fi
}
get_routes() {
local dev="$1"
local routes=`/sbin/ip route show | grep "^[[:digit:]].*via.* $dev "`
if [ -z "$GATEWAY" ]
then
GATEWAY=`/sbin/ip route show | awk '/^default/ {print $3}'`
fi
if [ -n "$GATEWAY" ]
then
echo " " gateway $GATEWAY >> $MNTIMAGE/etc/network/interfaces
fi
if [ -n "$routes" ]
then
/sbin/ip route show | grep "^[[:digit:]].*via.* $dev " \
>> $MNTIMAGE/etc/network/route-static
fi
}
find_ifcfg_by_devicename() {
local dev=$1
if [ -f /etc/sysconfig/network-scripts/ifcfg-$dev ]
then
echo /etc/sysconfig/network-scripts/ifcfg-$dev
return
fi
for file in `ls /etc/sysconfig/network-scripts/ifcfg-*`
do
if grep -q -E "^[^#]*(DEVICE|device).*$dev" $file
then
echo $file
return
fi
done
}
handlenetdev() {
local dev=$1
local ifcfg_file
case " $handlednetdevices " in
*" $dev "*)
return ;;
*) handlednetdevices="$handlednetdevices $dev" ;;
esac
ifcfg_file=`find_ifcfg_by_devicename $dev`
if [ -z "${ifcfg_file}" ]; then
error "The ifcfg-$dev or ifcfg-xxx which contains DEVICE=$dev field doesn't exist."
cleanup_and_exit 1
fi
cp ${ifcfg_file} $MNTIMAGE/etc/ifcfg-$dev
BOOTPROTO=""
VLAN=""
MASTER=""
SLAVE=""
IPADDR=""
. $MNTIMAGE/etc/ifcfg-$dev
findnetdriver $dev
if [ "$BOOTPROTO" == "dhcp" ]
then
if ! grep -q "iface $dev inet dhcp" $MNTIMAGE/etc/network/interfaces 2>/dev/null
then
echo "iface $dev inet dhcp" >> $MNTIMAGE/etc/network/interfaces
fi
elif [ -n "$IPADDR" ]
then
if ! grep -q "iface $dev inet static" $MNTIMAGE/etc/network/interfaces 2>/dev/null
then
echo "iface $dev inet static" >> $MNTIMAGE/etc/network/interfaces
fi
echo " " address $IPADDR >> $MNTIMAGE/etc/network/interfaces
if [ -n "$NETMASK" ] && [ -n "$PREFIX" ]
then
echo "Warning: both NETMASK and PREFIX exist, mkdumprd is confused"
fi
if [ -n "$NETMASK" ]
then
echo " " netmask $NETMASK >> $MNTIMAGE/etc/network/interfaces
elif [ -n "$PREFIX" ]
then
echo " " netmask $(ipcalc -m 127.0.0.1/$PREFIX | cut -d'=' -f2) >> $MNTIMAGE/etc/network/interfaces
sed -i -e "s/PREFIX=.*/NETMASK=$(ipcalc -m 127.0.0.1/$PREFIX | cut -d'=' -f2)/" $MNTIMAGE/etc/ifcfg-$dev
fi
get_routes $dev
cp -a /etc/resolv.conf $MNTIMAGE/etc
else
echo iface $dev inet static >> $MNTIMAGE/etc/network/interfaces
echo " " address 0.0.0.0 >> $MNTIMAGE/etc/network/interfaces
echo " " netmask 0.0.0.0 >> $MNTIMAGE/etc/network/interfaces
echo " " bnmask 0 >> $MNTIMAGE/etc/network/interfaces
fi
#This lets us recursively handle stacked devices
if kdump_is_vlan "$dev"
then
if [ "$VLAN" == "yes" ]
then
echo >> $MNTIMAGE/etc/ifcfg-$dev
echo "BUS_ID=\"Vlan\"" >> $MNTIMAGE/etc/ifcfg-$dev
BASE_IFC=`awk '/^Device:/ {print $2}' /proc/net/vlan/$dev`
handlenetdev $BASE_IFC
fi
elif kdump_is_bond "$dev"
then
#This is a bond, pick up its slaves
for j in `cat /sys/class/net/$dev/bonding/slaves`
do
handlenetdev $j
done
echo >> $MNTIMAGE/etc/ifcfg-$dev
echo "BUS_ID=\"Bonding\"" >> $MNTIMAGE/etc/ifcfg-$dev
elif kdump_is_bridge "$dev"
then
for j in `ls /sys/class/net/$dev/brif`
do
handlenetdev $j
done
echo >> $MNTIMAGE/etc/ifcfg-$dev
echo "BUS_ID=\"Bridge\"" >> $MNTIMAGE/etc/ifcfg-$dev
else
if [ "$VLAN" == "yes" ]
then
echo >> $MNTIMAGE/etc/ifcfg-$dev
echo "BUS_ID=\"Vlan\"" >> $MNTIMAGE/etc/ifcfg-$dev
BASE_IFC=`awk '/^Device:/ {print $2}' /proc/net/vlan/$dev`
handlenetdev $BASE_IFC
else
BUS_ID=`ls -l /sys/class/net/$dev/device | sed -e's/\(.*\/\)\(.*$\)/\2/'`
echo >> $MNTIMAGE/etc/ifcfg-$dev
echo "BUS_ID=\"$BUS_ID\"" >> $MNTIMAGE/etc/ifcfg-$dev
fi
fi
}
arch_netdev_init() {
if [ "$ARCH" = "s390x" ]; then
if [ -e /sys/bus/ccw ]; then
install_ccw_net_init
emit "for i in \`ls /etc/ifcfg-* 2>/dev/null\`; do"
emit " ccw_net_init \$i"
emit "done"
else
error "/sys/bus/ccw is required to set up net devices"
cleanup_and_exit 1
fi
fi
}
while [ $# -gt 0 ]; do
case $1 in
--fstab*)
if echo $1 | grep -q '=' ; then
fstab=`echo $1 | sed 's/^--fstab=//'`
else
fstab=$2
shift
fi
;;
--with-usb)
withusb=yes
;;
--without-usb)
withusb=no
;;
--with*)
if echo $1 | grep -q '=' ; then
modname=`echo $1 | sed 's/^--with=//'`
else
modname=$2
shift
fi
basicmodules="$basicmodules $modname"
;;
--builtin*)
if echo $1 | grep -q '=' ; then
modname=`echo $1 | sed 's/^--builtin=//'`
else
modname=$2
shift
fi
builtins="$builtins $modname"
;;
--version)
echo "$cmdname: version $VERSION"
exit 0
;;
-v)
verbose=-v
;;
--nocompress)
compress=""
;;
--ifneeded)
# legacy
;;
-f)
force=1
;;
-d)
KDUMP_CONFIG_FILE=""
if [ -f /etc/kdump.conf ]; then
KDUMP_CONFIG_FILE="/etc/kdump.conf"
fi
;;
--preload*)
if echo $1 | grep -q '=' ; then
modname=`echo $1 | sed 's/^--preload=//'`
else
modname=$2
shift
fi
PREMODS="$PREMODS $modname"
;;
--force-scsi-probe)
forcescsi=1
;;
--omit-scsi-modules)
PRESCSIMODS=""
noscsi=1
;;
--force-raid-probe)
forceraid=1
;;
--omit-raid-modules)
noraid=1
;;
--force-lvm-probe)
forcelvm=1
;;
--omit-lvm-modules)
nolvm=1
;;
--omit-dmraid)
nodmraid=1
;;
--force-ide-probe)
forceide=1
;;
--image-version)
img_vers=yes
;;
--allow-missing)
allowmissing=yes
;;
--noresume)
noresume=1
;;
--override-resettable)
override_resettable=1
;;
--help)
usage -n
;;
*)
if [ -z "$target" ]; then
target=$1
elif [ -z "$kernel" ]; then
kernel=$1
else
usage
fi
;;
esac
shift
done
if [ -z "$target" -o -z "$kernel" ]; then
usage
fi
if [ -n "$img_vers" ]; then
target="$target-$kernel"
fi
if [ -z "$force" -a -f $target ]; then
error "$target already exists."
cleanup_and_exit 1
fi
if [ -n "$forcescsi" -a -n "$noscsi" ]; then
error "Can't both force scsi probe and omit scsi modules"
cleanup_and_exit 1
fi
if [ -n "$forceraid" -a -n "$noraid" ]; then
error "Can't both force raid probe and omit raid modules"
cleanup_and_exit 1
fi
if [ -n "$forcelvm" -a -n "$nolvm" ]; then
error "Can't both force LVM probe and omit LVM modules"
cleanup_and_exit 1
fi
if [ ! -d /lib/modules/$kernel ]; then
error 'No modules available for kernel "'${kernel}'".'
cleanup_and_exit 1
fi
if [ $UID != 0 ]; then
error "$cmdname must be run as root."
cleanup_and_exit 1
fi
vecho "Creating initramfs"
modulefile=/etc/modprobe.conf
if [ ! -f $modulefile ] ;then
if [ -d /etc/modprobe.d -a -n "$(ls -A /etc/modprobe.d)" ] ; then
modulefile="$(echo /etc/modprobe.d/*.conf)"
else
modulefile=""
fi
fi
for n in $PREMODS; do
findmodule $n
done
[ ! -d $MNTIMAGE/var/lib ] && mkdir -p $MNTIMAGE/var/lib
inst /var/lib/random-seed $MNTIMAGE/var/lib/random-seed 2>/dev/null || {
error "Saving random seed failed."
cleanup_and_exit 1
}
needusb=""
if [ -n "$withusb" -a "x$PROBE" == "xyes" ]; then
# If / or /boot is on a USB device include the driver. With root by
# label we could still get some odd behaviors
for fs in / /boot ; do
esc=$(echo $fs | sed 's,/,\\/,g')
dev=$(mount | awk "/ on ${esc} / { print \$1 }" | sed 's/[0-9]*$//' | cut -d/ -f3)
if [ "$(echo $dev | cut -c1-2)" = sd ]; then
if [ `which kudzu 2>/dev/null` ]; then
host=$(kudzu --probe -b scsi |
gawk '/^device: '${dev}'/,/^host:/ { if (/^host/) { print $2; exit; } }')
if [ -d /proc/scsi/usb-storage-${host} -o -f /proc/scsi/usb-storage/${host} ]; then
needusb=1
fi
fi
fi
done
fi
# for USB keyboard
if [ -z "$needusb" -a -n "$withusb" -a "x$PROBE" == "xyes" ];
then
for inputdev in $(ls -d /sys/class/input/input* 2>/dev/null); do
if [ ! -e ${inputdev}/mouse* ]; then
if [ "$(cat ${inputdev}/phys | cut -c1-3)" = usb ]; then
needusb=1
fi
fi
done
fi
if [ -n "$needusb" -a "x$PROBE" == "xyes" -a -n "$modulefile" ]; then
drivers=$(awk '/^alias[[:space:]]+usb-controller[0-9]* / { print $3}' $modulefile)
useUSB=0
if [ -n "$drivers" ]; then
useUSB=1
for driver in $drivers; do
findmodule $driver
done
fi
for x in $(awk '/^[eou]hci_hcd/ {print $1}' /proc/modules | sed '1!G;h;$!d') ; do
useUSB=1
findmodule $(echo $x | sed 's/_/-/')
done
if [ "$useUSB" == "1" ]; then
findmodule scsi_mod
findmodule sd_mod
fi
fi
if [ -n "$forcescsi" -o -z "$noscsi" -a "x$PROBE" == "xyes" ]; then
if [ -n "$modulefile" ]; then
scsimodules=`grep "alias[[:space:]]\+scsi_hostadapter" $modulefile | grep -v '^[ ]*#' | LC_ALL=C sort -u | awk '{ print $3 }'`
if [ -n "$scsimodules" ]; then
for n in $scsimodules; do
# for now allow scsi modules to come from anywhere. There are some
# RAID controllers with drivers in block/
findmodule $n
done
fi
fi
fi
# If we have ide devices and module ide, do the right thing
ide=/proc/ide/ide*
if [ -n "$forceide" -o -n "$ide" -a "x$PROBE" == "xyes" ]; then
findmodule -ide-disk
fi
# If we have dasd devices, include the necessary modules (S/390)
if [ "x$PROBE" == "xyes" -a -d /proc/dasd ]; then
findmodule -dasd_mod
findmodule -dasd_eckd_mod
findmodule -dasd_fba_mod
fi
add_rootfs()
{
if [ "x$PROBE" == "xyes" ]; then
rootfs=$(awk '{ if ($1 !~ /^[ \t]*#/ && $2 == "/") { print $3; }}' $fstab)
rootopts=$(awk '{ if ($1 !~ /^[ \t]*#/ && $2 == "/") { print $4; }}' $fstab)
# in case the root filesystem is modular
findmodule -${rootfs}
rootdev=$(awk '/^[ \t]*[^#]/ { if ($2 == "/") { print $1; }}' $fstab)
# check if it's nfsroot
if [ "$rootfs" == "nfs" ]; then
remote=$(echo $rootdev | cut -d : -f 1)
# FIXME: this doesn't handle ips properly
remoteip=$(getent ahostsv4 $remote | head -n 1 | cut -d ' ' -f 1)
netdev=`/sbin/ip route get to $remoteip |sed 's|.*dev \(.*\).*|\1|g' |awk {'print $1;'} |head -n 1`
net_list="$net_list $netdev"
# check if it's root by label
elif echo $rootdev | cut -c1-6 | grep -q "UUID=\|LABEL=" ; then
dev=`/sbin/findfs $rootdev`
if [ -n "$dev" ] ; then
vecho "Found root device $dev for $rootdev"
rootdev=$dev
fi
else
rootopts=$(echo $rootopts | sed -e 's/^r[ow],//' -e 's/,r[ow],$//' -e 's/,r[ow],/,/' \
-e 's/^r[ow]$/defaults/' -e 's/$/,ro/')
fi
[ "$rootfs" != "nfs" ] && handlelvordev $rootdev 1
fi
}
# If we use LVM or dm-based raid, include dm-mod
# XXX: dm not really supported yet.
testdm=""
[ -n "$vg_list" ] && testdm="yes"
[ -n "$forceraid" -o -n "$forcelvm" ] && testdm="yes"
[ -z "$nolvm" -o -z "$noraid" ] && testdm="yes"
[ "x$PROBE" != "xyes" ] && testdm=""
if [ -n "$testdm" ]; then
if [ -x /sbin/dmsetup -a -e /dev/mapper/control ]; then
dmout=$(/sbin/dmsetup ls 2>/dev/null)
if [ "$dmout" != "No devices found" -a "$dmout" != "" ]; then
findmodule -dm-mod
# DM requires all of these to be there in case someone used the
# feature. broken. (#132001)
findmodule -dm-mirror
findmodule -dm-zero
findmodule -dm-snapshot
fi
fi
if [ -x /sbin/dmraid -a -z "$nodmraid" ]; then
NOBLK=`/sbin/dmraid -s -craidname 2>/dev/null | grep "no block devices"`
NORAD=`/sbin/dmraid -s -craidname 2>/dev/null | grep "no raid disks"`
if [ -z "$NOBLK" ] && [ -z "$NORAD" ]
then
for raid in $(/sbin/dmraid -s -craidname 2>/dev/null) ; do
DMRAIDS="$DMRAIDS $raid"
done
fi
fi
fi
if [ "$useUSB" == "1" ]; then
findmodule usb-storage
fi
for n in $basicmodules; do
findmodule $n
done
for n in $CONFMODS; do
findmodule $n
done
vecho "Using modules:$MODULES"
cemit()
{
cat >> $RCFILE
}
emit()
{
NONL=""
if [ "$1" == "-n" ]; then
NONL="-n"
shift
fi
echo $NONL "$@" >> $RCFILE
}
kdump_chk()
{
rc=`eval $1` && return $rc
echo "$KDUMP_CONFIG_FILE: $2"
cleanup_and_exit 1
}
# Tests if fence_kdump is configured in Pacemaker cluster.
is_pcs_fence_kdump()
{
[ -x /usr/sbin/fence_kdump_send ] || return 1
[ -f "$CLUSTER_CONFIG_FILE" ] || return 1
}
# Returns list of nodes defined in Pacemaker cluster.
get_pcs_cluster_nodes()
{
echo "xpath /cluster/clusternodes/clusternode/@name" \
| xmllint --shell $CLUSTER_CONFIG_FILE | grep content | cut -d'=' -f2
}
# get_option_value <option_name>
# Retrieves value of option defined in /etc/kdump.conf.
get_option_value()
{
echo $(strip_comments `grep ^$1 /etc/kdump.conf | tail -1 | cut -d\ -f2-`)
}
# Tests if fence_kdump is configured using options in /etc/kdump.conf.
is_generic_fence_kdump()
{
[ -x /usr/sbin/fence_kdump_send ] || return 1
grep -q "^fence_kdump_nodes" /etc/kdump.conf
}
# Reads list of cluster nodes, filters local IPs and setups network for them.
# Nodes to send fence_kdump notification to are stored in CLUSTER_NODE_LIST.
# Other arguments for fence_kdump_send are stored in FENCE_KDUMP_OPTS.
setup_cluster_nodes_and_options()
{
# setup fence_kdump
local nodelist=""
if is_generic_fence_kdump; then
# fence_kdump setup for generic clusters
nodelist=$(get_option_value "fence_kdump_nodes")
# read fence_kdump_send options and store them to FENCE_KDUMP_OPTS
FENCE_KDUMP_OPTS=$(get_option_value "fence_kdump_args")
elif is_pcs_fence_kdump; then
# fence_kdump setup for Pacemaker clusters
nodelist=$(get_pcs_cluster_nodes)
# read fence_kdump_send options and store them to FENCE_KDUMP_OPTS
if [ -f "$FENCE_KDUMP_CONFIG" ]; then
. "$FENCE_KDUMP_CONFIG"
fi
fi
if [ -n "$nodelist" ]; then
for node in ${nodelist}; do
addr=`getent ahostsv4 $node | head -n 1 | cut -d' ' -f1`
netdev=`/sbin/ip route get to $addr 2>&1`
if echo $netdev | grep -q ^local; then
continue
fi
# add node ip address to node list
CLUSTER_NODE_LIST="$CLUSTER_NODE_LIST $addr"
if echo $netdev | grep -q via; then
netdev=`echo $netdev | awk '{print $5}' | head -n 1`
else
netdev=`echo $netdev | awk '{print $3}' | head -n 1`
fi
mkdir -p $MNTIMAGE/etc/network/
handlenetdev $netdev
echo $netdev >> $MNTIMAGE/etc/iface_to_activate
done
if [ -n "$CLUSTER_NODE_LIST" ]; then
bin="$bin /usr/sbin/fence_kdump_send"
fi
fi
}
if [ -z "$MNTIMAGE" -o -z "$IMAGE" ]; then
error "Error creating temporaries. Try again"
cleanup_and_exit 1
fi
# Just get all the modules that are currently loaded
# We're likely to need them
for i in `lsmod | tail --lines=+2 | awk '{print $1}'`
do
findmodule -$i skip
echo $MODULES | grep -q $i
if [ $? -ne 0 ]
then
ALTERNATE=`echo $i | sed -e's/_/-/g'`
findmodule $ALTERNATE skip
fi
done
#START BUILDING INITRD HERE
mkdir -p $MNTIMAGE
mkdir -p $MNTIMAGE/lib
mkdir -p $MNTIMAGE/bin
mkdir -p $MNTIMAGE/etc
mkdir -p $MNTIMAGE/dev
mkdir -p $MNTIMAGE/proc
mkdir -p $MNTIMAGE/sys
mkdir -p $MNTIMAGE/tmp
mkdir -p $MNTIMAGE/sysroot
mkdir -p $MNTIMAGE/modules
mkdir -p $MNTIMAGE/usr/share/udhcpc
mkdir -p $MNTIMAGE/var/run
mkdir -p $MNTIMAGE/etc/network/if-pre-up.d
mkdir -p $MNTIMAGE/etc/network/if-up.d
mkdir -p $MNTIMAGE/etc/network/if-pre-down.d
mkdir -p $MNTIMAGE/etc/network/if-down.d
mkdir -p $MNTIMAGE/etc/network/if-post-down.d
ln -s bin $MNTIMAGE/sbin
if [ -n "$KDUMP_CONFIG_FILE" ]; then
while read config_opt config_val; do
# remove inline comments after the end of a directive.
config_val=$(strip_comments $config_val)
case "$config_opt" in
net|nfs|nfs4|ssh)
if [ "$config_opt" = "net" ]; then
USE_SSH=`echo $config_val | grep @`
if [ -n "$USE_SSH" ]; then
USING_METHOD="ssh"
else
USING_METHOD="nfs"
fi
else
USING_METHOD="$config_opt"
fi
#grab remote host and xlate into numbers
rhost=`echo $config_val | sed 's/.*@//' | cut -d':' -f1`
need_dns=`echo $rhost|grep "[a-zA-Z]"`
remoteip=$rhost
[ -n "$need_dns" ] && remoteip=`getent ahostsv4 $rhost | head -n 1 | cut -d' ' -f1`
#find ethernet device used to route to remote host, ie eth0
netdev=`/sbin/ip route get to $remoteip 2>&1`
[ $? != 0 ] && echo "Bad kdump location: $config_val" && cleanup_and_exit 1
DUMP_TARGET=$config_val
#the field in the ip output changes if we go to another subnet
OFF_SUBNET=`echo $netdev | grep via`
if [ -n "$OFF_SUBNET" ]
then
# we are going to a different subnet
netdev=`echo $netdev|awk '{print $5;}'|head -n 1`
else
# we are on the same subnet
netdev=`echo $netdev|awk '{print $3}'|head -n 1`
fi
#add the ethernet device to the list of modules
mkdir -p $MNTIMAGE/etc/network/
handlenetdev $netdev
echo $netdev >> $MNTIMAGE/etc/iface_to_activate
#load nfs modules, if needed
echo $config_val | grep -v "@" > /dev/null && findmodule nfs
;;
raw)
USING_METHOD="raw"
DUMP_TARGET=$config_val
handlelvordev $config_val 0
;;
core_collector)
if [ -x /usr/sbin/makedumpfile ]; then
CORE_COLLECTOR=$config_val
grep -q control_d /proc/xen/capabilities 2>/dev/null
if [ $? -eq 0 ]
then
if [ ! -e /sys/kernel/vmcoreinfo ]
then
CORE_COLLECTOR=`echo $CORE_COLLECTOR | sed -e's/makedumpfile/makedumpfile --xen-vmcoreinfo \/etc\/makedumpfile.config/'`
fi
else
if [ ! -e /sys/kernel/vmcoreinfo ]
then
CORE_COLLECTOR=`echo $CORE_COLLECTOR | sed -e's/makedumpfile/makedumpfile -i \/etc\/makedumpfile.config/'`
fi
fi
else
echo "Cannot use the core_collector option on this arch"
cleanup_and_exit 1
fi
;;
path)
SAVE_PATH=$config_val
;;
link_delay)
LINK_DELAY=$config_val
;;
kdump_post)
KDUMP_POST=$config_val
if [ ! -x "$KDUMP_POST" ]; then
echo "$KDUMP_POST not executable or not found"
cleanup_and_exit 1
fi
bin="$bin $KDUMP_POST"
KDUMP_POST_INTERNAL=`echo $KDUMP_POST | sed -e's/\(^.*\/\)\(.*$\)/\/bin\/\2/'`
;;
kdump_pre)
KDUMP_PRE=$config_val
if [ ! -x "$KDUMP_PRE" ]; then
echo "$KDUMP_PRE not executable or not found"
cleanup_and_exit 1
fi
bin="$bin $KDUMP_PRE"
KDUMP_PRE_INTERNAL=`echo $KDUMP_PRE | sed -e's/\(^.*\/\)\(.*$\)/\/bin\/\2/'`
;;
extra_bins)
bin="$bin $config_val"
;;
extra_modules)
extra_kdump_mods="$extra_kdump_mods $config_val"
;;
blacklist)
if echo "$config_val" | grep -q "\/" ; then
echo "Do not support the directory \"$config_val\" for blacklist"
cleanup_and_exit 1
fi
blacklist_mods="$blacklist_mods $config_val"
;;
options)
;;
force_rebuild)
;;
default)
DEFAULT_ACTION=$config_val
case $DEFAULT_ACTION in
reboot|shell|mount_root_run_init)
FINAL_ACTION="reboot -f"
;;
halt)
FINAL_ACTION="halt -f"
;;
poweroff)
FINAL_ACTION="poweroff -f"
;;
*)
echo "$DEFAULT_ACTION is not a valid default option"
cleanup_and_exit 1
;;
esac
;;
disk_timeout)
DISK_TIMEOUT=$config_val
;;
debug_mem_level)
DEBUG_MEM_LEVEL=$config_val
echo "$DEBUG_MEM_LEVEL" | grep "^[0-3]$" &> /dev/null
if [ $? -ne 0 ]
then
echo "debug_mem_level is valid only for range 0-3"
cleanup_and_exit 1
fi
;;
sshkey)
if [ -f "$config_val" ]; then
# canonicalize the path
SSH_KEY_LOCATION=$(/usr/bin/readlink -m $config_val)
else
echo "WARNING: '$config_val' doesn't exist, using default value '$SSH_KEY_LOCATION'"
fi
;;
Kdump_not_supported_on_Xen_domU_guest)
cat << HERE
Since RHEL 6.3 there is limited support for kdump on full-virt Xen DomU.
This kdump.conf marker may now be removed on supported configurations.
See KCS Solution 92943 for more details.
HERE
;;
fence_kdump_nodes)
;;
fence_kdump_args)
;;
*)
IS_COMMENT=`echo $config_opt | grep "^#.*$"`
if [ -n "$IS_COMMENT" -o -z "$config_opt" ]
then
#don't process comments or blank line
continue
fi
let grep_rc=0
echo $config_opt | grep -q "ext[234]"
ga_rc=$?
echo $config_opt | grep -q "minix"
gb_rc=$?
echo $config_opt | grep -q "xfs"
gc_rc=$?
echo $config_opt | grep -q "btrfs"
gd_rc=$?
if [ $ga_rc -ne 0 -a $gb_rc -ne 0 -a $gc_rc -ne 0 -a $gd_rc -ne 0 ]
then
echo "Unknown parameter " $config_opt
cleanup_and_exit 1
fi
USING_METHOD="filesystem"
if (echo $config_val | egrep -q "^(LABEL|UUID)="); then
# We need to "strip" the quotes in LABEL or UUID,
# otherwise it will be passed into findfs as a part of
# LABEL or UUID. Note a label name itself may contain
# spaces and quotes.
config_val=$(eval echo $config_val)
if [ -z "$config_val" ] ;then
cleanup_and_exit 1
fi
fi
DUMP_FSTYPE=$config_opt
DUMP_TARGET=$config_val
handlelvordev $config_val 0
;;
esac
done < $KDUMP_CONFIG_FILE
fi
# Include vmcore-dmesg and associated dependencies.
if [ ! -f "$DMESG_COLLECTOR" ];then
echo "Error: $DMESG_COLLECTOR is not present"
cleanup_and_exit 1
fi
bin="$bin $DMESG_COLLECTOR"
# If user did not specify a default action, set the defaults.
if [ -z "$DEFAULT_ACTION" ];then
DEFAULT_ACTION="reboot"
FINAL_ACTION="reboot -f"
fi
# if default is mount root and run init, then add root fs.
if [ "$DEFAULT_ACTION" == "mount_root_run_init" ];then
add_rootfs
fi
setup_cluster_nodes_and_options
# if no method was specified default to the currently booted filesystem
if [ -z "$USING_METHOD" ]
then
mkdir -p $SAVE_PATH
mntpoint=`df $SAVE_PATH | tail -1 | awk '{ print $NF }'`
DUMP_TARGET=`mount | awk '$3 == "'$mntpoint'" { print $1 }'`
DUMP_UUID=$(blkid -s UUID $DUMP_TARGET|cut -d ' ' -f2|tr -d '\042')
[[ "$DUMP_UUID" =~ "UUID=" ]] && DUMP_TARGET="$DUMP_UUID"
DUMP_FSTYPE=`mount | awk '$3 == "'$mntpoint'" { print $5 }'`
handlelvordev $DUMP_TARGET 0
if [ "$mntpoint" != "/" ]
then
SAVE_PATH=`echo $SAVE_PATH | sed "s,$mntpoint,,"`
fi
fi
if [ "$DUMP_FSTYPE" = "btrfs" ]
then
kdump_chk "test -f /sbin/btrfsck" "/sbin/btrfsck not found. Install package btrfs-progs and retry"
bin="$bin /sbin/btrfsck"
elif [ "$DUMP_FSTYPE" = "xfs" ]
then
:
elif [ -n "$DUMP_FSTYPE" ]
then
kdump_chk "test -f /sbin/fsck.$DUMP_FSTYPE" "fsck.$DUMP_FSTYPE not found. Install package e2fsprogs and retry."
fi
[ -n "$DUMP_FSTYPE" ] && findmodule $DUMP_FSTYPE
# If there are ISCSI devices found in dump target path, include some associated
# files and modules.
prepare_iscsi_target () {
if [ -n "$ISCSI_DEVICES" ];then
kdump_chk "test -f /sbin/iscsiadm" "Can't find /sbin/iscsiadm"
bin="$bin /sbin/iscsiadm"
kdump_chk "test -f /sbin/iscsid" "Can't find /sbin/iscsid"
bin="$bin /sbin/iscsid"
# Pack /var/lib/iscsi/* and /etc/iscsi/*
mkdir -p $MNTIMAGE/var/lib/iscsi/
cp -r /var/lib/iscsi/* $MNTIMAGE/var/lib/iscsi/
mkdir -p $MNTIMAGE/etc/iscsi/
cp -r /etc/iscsi/* $MNTIMAGE/etc/iscsi/
# iscsiadm does not like it if following does not exist.
mkdir -p $MNTIMAGE/var/lock/iscsi/
# Put list of targets in a file
mkdir -p $MNTIMAGE/etc/
echo "$ISCSI_TARGETS" | sed 's/^ //'> $MNTIMAGE/etc/iscsi_targets_to_activate
vecho "Will activate following targets: `cat $MNTIMAGE/etc/iscsi_targets_to_activate`"
fi
}
prepare_iscsi_target
inst_scsi_id() {
mkdir -p "$MNTIMAGE/lib/udev"
inst /lib/udev/scsi_id "$MNTIMAGE/lib/udev/scsi_id"
ln -s "/lib/udev/scsi_id" "$MNTIMAGE/bin/scsi_id"
}
# If there are dm multipath devices found in dump target path, include some
# associated files and modules.
prepare_multipath_target () {
local tempfile
local bindings
local libdir
local f
[ -z "$multipath_devices" ] && return
vecho "Prepare multipath related files"
kdump_chk "test -f /sbin/multipath" "Can't find /sbin/multipath"
bin="$bin /sbin/multipath"
vecho "Adding /sbin/multipath"
if [ -f /etc/multipath.conf ]; then
inst /etc/multipath.conf $MNTIMAGE/etc/multipath.conf
fi
kdump_chk "test -f /sbin/multipathd" "Can't find /sbin/multipathd"
bin="$bin /sbin/multipathd"
vecho "Adding /sbin/multipathd"
# Pack findfs and blkid also. Busybox findfs is not working well with
# multipath devices where it can return a child component device for
# a uuid instead of top level multipath device.
kdump_chk "test -f /sbin/findfs" "Can't find /sbin/findfs"
bin="$bin /sbin/findfs"
vecho "Adding /sbin/findfs"
kdump_chk "test -f /sbin/blkid" "Can't find /sbin/blkid"
bin="$bin /sbin/blkid"
vecho "Adding /sbin/blkid"
# blkid command can save blkid.tab cache file if this dir is present.
# primarily helpful for debugging.
mkdir -p $MNTIMAGE/etc/blkid/
mkdir -p $MNTIMAGE/tmp
# For kpartx command which creates device maps for disk partitions
# and creates device files
kdump_chk "test -f /sbin/dmsetup" "Can't find /sbin/dmsetup"
bin="$bin /sbin/dmsetup"
vecho "Adding /sbin/dmsetup"
kdump_chk "test -f /sbin/kpartx" "Can't find /sbin/kpartx"
bin="$bin /sbin/kpartx"
vecho "Adding /sbin/kpartx"
if ldd $(which multipath) 2>/dev/null |grep -q lib64; then
libdir="/lib64"
else
libdir="/lib"
fi
mkdir -p $MNTIMAGE/$libdir/multipath
mkdir -p $MNTIMAGE/etc/multipath
mkdir -p $MNTIMAGE/lib/udev
# /lib64/multipath/libcheckdirectio.so requires libaio
for f in \
/etc/multipath/* \
$(ls $libdir/libaio* 2>/dev/null) \
$(ls $libdir/multipath/* 2>/dev/null); do
[ -e "$f" ] && inst "$f" "$MNTIMAGE/$f"
done
}
inst_scsi_id
prepare_multipath_target
#if we are using makedumpfile here, then generate the config file
#also only build this config if we don't have vmcoreinfo on this kernel
if [ -n "$CORE_COLLECTOR" -a ! -e /sys/kernel/vmcoreinfo ]; then
RUN_KERN_VER=`uname -r`
if [ ! -f /usr/lib/debug/lib/modules/$RUN_KERN_VER/vmlinux ]
then
echo "kernel-debuginfo-$RUN_KERN_VER is not installed. You need this to use makedumpfile!"
echo "please install it and restart the kdump service"
cleanup_and_exit 1
fi
XEN_OPTS=""
grep -q control_d /proc/xen/capabilities 2>/dev/null
if [ $? -eq 0 ]
then
# This is a dom0 xen kernel so we need to add xen-syms to the
# makedumpefile config
RUN_XEN_VER=${RUN_KERN_VER%xen}
if [ ! -f /usr/lib/debug/boot/xen-syms-$RUN_XEN_VER.debug ]
then
echo "xen-syms.debug not found and is needed on this kernel to use makedumpfile!"
echo "please install it and restart the kdump service"
cleanup_and_exit 1
fi
XEN_OPTS="--xen-syms /usr/lib/debug/boot/xen-syms-$RUN_XEN_VER.debug"
/usr//sbin/makedumpfile -g $MNTIMAGE/etc/makedumpfile.config $XEN_OPTS > /dev/null 2>&1
else
/usr//sbin/makedumpfile -g $MNTIMAGE/etc/makedumpfile.config -x /usr/lib/debug/lib/modules/$RUN_KERN_VER/vmlinux > /dev/null 2>&1
fi
if [ $? != 0 ]; then
echo "could not generate makedumpfile configuration. aborting"
cleanup_and_exit 1
fi
fi
#include extra user-specified modules for kdump initrd
for n in $extra_kdump_mods; do
findmodule $n
done
# After we get all the modules, lets depsolve the list
# so that we load them in proper order
depsolve_modlist
#copy in busybox and make symlinks to its supported utilities
cp /sbin/busybox $MNTIMAGE/sbin/busybox
bin="$bin /sbin/busybox"
cd $MNTIMAGE/sbin
for i in `/sbin/busybox |
awk 'BEGIN {found=0} /.*/ { if (found) print $0 } /Currently/ {found=1}' |
sed -e's/,//g' -e's/busybox//g'`
do
ln -s busybox $MNTIMAGE/sbin/$i
done
cd - > /dev/null 2>&1
if [ -f /etc/mdadm.conf ]
then
cp /etc/mdadm.conf $MNTIMAGE/etc
bin="$bin /sbin/mdadm"
fi
# we need the fstab file so that we can fsck properly
cp /etc/fstab $MNTIMAGE/etc/fstab
bin="$bin /sbin/fsck.ext2 /sbin/fsck.ext3 /sbin/fsck.ext4"
if [ -f "$TMPDISKLIST" ]; then
mv $TMPDISKLIST $MNTIMAGE/etc/critical_disks
fi
#THIS IS WHERE WE GENERATE OUR ADDITINONAL UTILITIES
#Busybox doesn't have a /bin/sh applet,
#so we build a reasonable faximilie here
cat >> $MNTIMAGE/bin/sh << EOF
#!/bin/hush
#drop the -c from the command line
shift 1
#now execute the passed command
#don't exec this or $@ won't work
/bin/hush -c "\$@"
EOF
chmod 755 $MNTIMAGE/bin/sh
cat >> $MNTIMAGE/usr/share/udhcpc/default.script << EOF
#!/bin/hush
[ -z "\$1" ] && echo "Error: should be called from udhcpc" && exit 1
case "\$1" in
deconfig)
/sbin/ifconfig \$interface 0.0.0.0
;;
renew|bound)
/sbin/ifconfig \$interface \$ip netmask \$subnet
if [ -n "\$router" ] ; then
echo "deleting routers"
while route del default gw 0.0.0.0 dev \$interface 2>/dev/null ; do
:
done
for i in \$router ; do
route add default gw \$i dev \$interface
done
fi
echo -n > /etc/resolv.conf
[ -n "\$domain" ] && echo search \$domain >> /etc/resolv.conf
for i in \$dns ; do
echo adding dns \$i
echo nameserver \$i >> /etc/resolv.conf
done
;;
*)
echo "Unable to get a DHCP address retry..."
exit 1
;;
esac
exit 0
EOF
#NETWORKING SCRIPT DIRECTORIES
cat >> $MNTIMAGE/etc/network/if-pre-up.d/pre-up-script << EOF
#!/bin/hush
PATH=\$PATH:/scriptfns
. /etc/ifcfg-\$IFACE
link_delay()
{
if [ -n "\$LINK_DELAY" ]
then
echo "\$IFACE Link Up. Waiting \$LINK_DELAY Seconds"
sleep \$LINK_DELAY
echo "Continuing"
fi
}
LINK_DELAY=$LINK_DELAY
if [ "\$BUS_ID" == "Bridge" ]
then
brctl addbr \$IFACE
brctl setfd \$IFACE 1
fi
bring_up_bond_interface()
{
BOND_MASTER=\$1
if [ ! -f /sys/class/net/\$BOND_MASTER ]; then
echo +\$BOND_MASTER > /sys/class/net/bonding_masters
#this is a bond find and bring up the slaves
echo searching for slaves
find_activate_slaves \$BOND_MASTER
fi
}
if [ "\$BUS_ID" == "Bonding" ]
then
bring_up_bond_interface \$IFACE
elif [ "\$BUS_ID" == "Vlan" ]
then
case "\$IFACE" in
vlan*)
VLAN_ID=\${IFACE#vlan*}
ifup \$PHYSDEV
vconfig add \$PHYSDEV \$VLAN_ID
ip link set \$PHYSDEV.\$VLAN_ID name \$IFACE
;;
*.*)
#bring up the base interface first
BASE_DEV=\`echo \$IFACE | cut -d"." -f1\`
VLAN_ID=\`echo \$IFACE | cut -d"." -f2\`
ifup \$BASE_DEV
vconfig add \$BASE_DEV \$VLAN_ID
;;
esac
elif [ "\$BUS_ID" == "Bridge" ]
then
echo searching for bridge members
find_bridge_members \$IFACE
fi
ifconfig \$IFACE up
link_delay
exit 0
EOF
for i in `ls $MNTIMAGE/etc/network/if-pre-up.d`
do
chmod 755 $MNTIMAGE/etc/network/if-pre-up.d/$i
done
cat >> $MNTIMAGE/etc/network/if-up.d/up-script << EOF
#!/bin/hush
PATH=\$PATH:/scriptfns
if [ "\$METHOD" != "dhcp" ]
then
. /etc/ifcfg-\$IFACE
if [ -n "\$IPADDR" ]
then
ifconfig \$IFACE \$IPADDR netmask \$NETMASK
else
ifconfig \$IFACE up
fi
fi
exit 0
EOF
for i in `ls $MNTIMAGE/etc/network/if-up.d`
do
chmod 755 $MNTIMAGE/etc/network/if-up.d/$i
done
chmod 755 $MNTIMAGE/usr/share/udhcpc/default.script
# WE DONT HAVE FUNCTIONS AVAILABLE IN MSH
# SO WE IMPLEMENT THEM HERE AS scripts
SCRIPTDIR=$MNTIMAGE/scriptfns
mkdir -p $SCRIPTDIR
cat >> $SCRIPTDIR/load_selinux_policy << EOF
#!/bin/hush
ROOTPATH=\$1
NEEDSELINUX=1
LOADPOL=
grep -q selinux=0 /proc/cmdline
if [ \$? -eq 0 ]
then
NEEDSELINUX=0
fi
if [ -x \$ROOTPATH/usr/sbin/load_policy ]
then
LOADPOL=/usr/sbin/load_policy
fi
if [ -x \$ROOTPATH/sbin/load_policy ]
then
LOADPOL=/sbin/load_policy
fi
if [ \$NEEDSELINUX -eq 1 -a -n "\$LOADPOL" ]
then
echo "Loading SELINUX policy"
chroot \$ROOTPATH \$LOADPOL -i
if [ \$? -ge 2 ]
then
echo "Policy load failed, a relabel will be required on reboot"
fi
fi
EOF
cat >> $SCRIPTDIR/show_memstats << EOF
#!/bin/hush
while [ \$# -gt 0 ]; do
case \$1 in
shortmem)
cat /proc/meminfo | grep -e "^MemFree" -e "^Cached" -e "^Slab"
;;
mem)
cat /proc/meminfo
;;
slab)
cat /proc/slabinfo
;;
iomem)
cat /proc/iomem
;;
esac
shift
done
echo
EOF
cat >> $SCRIPTDIR/map_interface << EOF
#!/bin/hush
if [ -e /tmp/tmpcnt ]
then
TMPCNT=\`cat /tmp/tmpcnt\`
else
TMPCNT=0
fi
#erase previously recorded map
RENAMED=""
REAL_DEV=""
HWADDR=""
NETDEV=\$1
. /etc/ifcfg-\$NETDEV
HWADDR=\`echo "\$HWADDR" | awk '{print toupper(\$1)}'\`
for j in \`ifconfig -a | awk '/.*Link encap.*/ {print \$1}'\`
do
case "\$BUS_ID" in
Bonding)
REAL_DEV=\$NETDEV
RENAMED="yes"
;;
Vlan)
case "\$NETDEV" in
vlan*)
REAL_DEV=\$NETDEV
BASE_DEV=\$PHYSDEV
REAL_BASE=\`grep "^\$BASE_DEV " /etc/iface_map | cut -d" " -f2\`
sed -i -e "s/PHYSDEV=\$BASE_DEV/PHYSDEV=\$REAL_BASE/" /etc/ifcfg-\$NETDEV
RENAMED="yes"
;;
*.*)
BASE_DEV=\`echo \$NETDEV | cut -d"." -f1\`
VLAN_ID=\`echo \$NETDEV | cut -d"." -f2\`
REAL_BASE=\`grep "^\$BASE_DEV " /etc/iface_map | cut -d" " -f2\`
REAL_DEV=\$REAL_BASE.\$VLAN_ID
NETDEV=\$BASE_DEV.\$VLAN_ID
RENAMED="yes"
;;
esac
;;
Bridge)
REAL_DEV=\$NETDEV
RENAMED="yes"
;;
*)
INFO=\`ls -l /sys/class/net/\$j/device 2>/dev/null | sed -e's/\\(.*\\/\\)\\(.*$\\)/\\2/'\`
if [ "\$INFO" == "\$BUS_ID" -a -z "\$REAL_DEV" ]
then
# Some multiport network cards report one BUS address
# for all ports. In such cases differentiate the ports by
# the MAC address if it is included in the ifcfg-ethn file
# as HWADDR.
NUM_NIC_PORTS=\`ls /sys/class/net/\$j/device/net 2>/dev/null | grep -c .\`
if [ "\$HWADDR" -a "\$NUM_NIC_PORTS" -gt 1 ]
then
REAL_MAC=\`ifconfig \$j | awk '/.*HWaddr.*/ {print toupper(\$5)}'\`
if [ "\$HWADDR" == "\$REAL_MAC" ]
then
REAL_DEV=\$j
RENAMED="yes"
fi
else
REAL_DEV=\$j
RENAMED="yes"
fi
fi
;;
esac
done
if [ -z "\$RENAMED" ]
then
echo "Could not find a mapping for device \$NETDEV"
exit 1
fi
#build the interface rename map
echo \$NETDEV \$REAL_DEV tmp\$TMPCNT >> /etc/iface_map
TMPCNT=\`echo \$TMPCNT 1 + p | dc\`
echo \$TMPCNT > /tmp/tmpcnt
echo mapping \$NETDEV to \$REAL_DEV
EOF
cat >> $SCRIPTDIR/rename_interfaces << EOF
#!/bin/hush
rename_iface_in_file()
{
local i=\$1
local CURRENT=\$2
local INTERIM=\$3
fname=\$(basename \$i)
case \$fname in
ifcfg-*)
# Replace iface occurences only in DEVICE lines
sed -i 's,'"\(^DEVICE=\)\"\?\$CURRENT\"\?\$"','"\1\$INTERIM"',' \$i
;;
iface_to_activate)
# Replace iface occurences in lines containg exact this iface
sed -i 's,'"^\$CURRENT\$"','"\$INTERIM"',' \$i
;;
interfaces)
# Replace iface occurences only in iface lines
sed -i 's,'"\(^iface\ \)\$CURRENT\ "','"\1\$INTERIM\ "',' \$i
;;
esac
}
MAP_COUNT=\`awk 'END{print NR}' /etc/iface_map\`
#now do all the renaming - first to temp space
for j in \`seq 1 1 \$MAP_COUNT\`
do
CURRENT=\`awk -v MATCH=\$j '{if (NR == MATCH) print \$1}' /etc/iface_map\`
NEW=\`awk -v MATCH=\$j '{if (NR == MATCH) print \$2}' /etc/iface_map\`
INTERIM=\`awk -v MATCH=\$j '{if (NR == MATCH) print \$3}' /etc/iface_map\`
mv /etc/ifcfg-\$CURRENT /etc/ifcfg-\$INTERIM
for i in /etc/ifcfg-\$INTERIM /etc/iface_to_activate /etc/network/interfaces
do
rename_iface_in_file "\$i" "\$CURRENT" "\$INTERIM"
done
if [ -f /etc/network/route-static ]
then
# the double quotes lets us expand the variables
sed -i 's,'"\(.*dev\) \$CURRENT"','"\1 \$INTERIM"',g' /etc/network/route-static
fi
done
for j in \`seq 1 1 \$MAP_COUNT\`
do
CURRENT=\`awk -v MATCH=\$j '{if (NR == MATCH) print \$1}' /etc/iface_map\`
NEW=\`awk -v MATCH=\$j '{if (NR == MATCH) print \$2}' /etc/iface_map\`
INTERIM=\`awk -v MATCH=\$j '{if (NR == MATCH) print \$3}' /etc/iface_map\`
mv /etc/ifcfg-\$INTERIM /etc/ifcfg-\$NEW
for i in /etc/ifcfg-\$NEW /etc/iface_to_activate /etc/network/interfaces
do
rename_iface_in_file "\$i" "\$INTERIM" "\$NEW"
done
if [ -f /etc/network/route-static ]
then
# the double quotes lets us expand the variables
sed -i 's,'"\(.*dev\) \$INTERIM"','"\1 \$NEW"',g' /etc/network/route-static
fi
IS_BOND=\`echo /etc/ifcfg-\$NEW | grep bond\`
if [ -n "\$IS_BOND" ]
then
for i in \`ls /etc/ifcfg-*\`
do
awk -v str2="MASTER=\$NEW" "{gsub(/.*MASTER=\$CURRENT.*/,str2);print}" \$i > \$i.tmp
mv \$i.tmp \$i
done
fi
done
rm -f /etc/iface_map
exit 0
EOF
cat >> $SCRIPTDIR/find_activate_slaves << EOF
#!/bin/hush
BOND_MASTER=\$1
for j in \`ls /etc/ifcfg-*\`
do
MASTER=""
touch \$j
. \$j
if [ "\$MASTER" == "\$BOND_MASTER" ]
then
dev=\`echo \$j | cut -d'-' -f2-\`
#this is a slave of the rising interface
echo enslaving \$dev to \$BOND_MASTER
echo +\$dev > /sys/class/net/\$BOND_MASTER/bonding/slaves
ifup \$dev
fi
done
EOF
cat >> $SCRIPTDIR/find_bridge_members << EOF
#!/bin/msh
for j in \`ls /etc/ifcfg-*\`
do
touch \$j
BRIDGE=""
. \$j
if [ "\$1" == "\$BRIDGE" ]
then
dev=\`echo \$j | cut -d'-' -f2-\`
ifconfig \$dev promisc
ifup \$dev
echo adding \$dev to \$1
brctl addif \$1 \$dev
fi
done
EOF
cat >> $SCRIPTDIR/add_route << EOF
#!/bin/hush
file=\$1
handle_ip_file() {
local f t type= file=\$1 proto="-4"
f=\${file##*/}
t=\${f%%-*}
type=\${t%%6}
if [ "\$type" != "\$t" ]; then
proto="-6"
fi
cat "\$file"
nr_lines=\$(wc -l < "\$file")
for i in \`seq 1 \$nr_lines\`; do
line=\$(awk -v li=\$i '{if(NR==li){print}}' \$file)
/sbin/ip \$proto \$type add \$line
done
}
if [ -f "\$file" ]; then
handle_ip_file \$file
fi
EOF
cat >> $SCRIPTDIR/monitor_dd_progress << EOF
#!/bin/hush
SRC_FILE_SIZE=\`ls -l /proc/vmcore | awk '{print \$5}'\`
BLOCK_SIZE=\$1
SRC_FILE_MB=\`dc \$SRC_FILE_SIZE 1048576 / p\`
while true
do
DD_PID=\`pidof dd\`
if [ -n "\$DD_PID" ]
then
break
fi
done
while true
do
sleep 5
if [ ! -d /proc/\$DD_PID ]
then
break
fi
kill -SIGUSR1 \$DD_PID
CURRENT_SIZE=\`tail -n 1 /tmp/dd_progress_file | sed "s/[^0-9].*//g"\`
CURRENT_MB=\`dc \$CURRENT_SIZE \$BLOCK_SIZE \* 1048576 / p\`
echo -n -e "Copied \$CURRENT_MB MB / \$SRC_FILE_MB MB\\\r"
done
rm -f /tmp/dd_progres_file
EOF
cat >> $SCRIPTDIR/monitor_scp_progress << EOF
#!/bin/hush
SRC_FILE_SIZE=\`ls -l /proc/vmcore | awk '{print \$5}'\`
LOCATION=\$1
REMOTE_FILE=\$2
SRC_FILE_MB=\`dc \$SRC_FILE_SIZE 1048576 / p\`
while true
do
SCP_PID=\`pidof scp | awk '{print \$1}'\`
if [ -n "\$SCP_PID" ]
then
break
fi
done
while true
do
sleep 5
if [ ! -d /proc/\$SCP_PID ]
then
break
fi
SSH_OUTPUT=\`ssh -q -i $SSH_KEY_LOCATION -o BatchMode=yes \$LOCATION ls -l \$REMOTE_FILE\`
REMOTE_SIZE=\`echo \$SSH_OUTPUT | awk '{print \$5}'\`
REMOTE_SIZE_MB=\`dc \$REMOTE_SIZE 1048576 / p\`
echo -n -e "Copied \$REMOTE_SIZE_MB MB / \$SRC_FILE_MB MB\\\r"
done
EOF
cat >> $SCRIPTDIR/monitor_cp_progress <<EOF
#!/bin/hush
SRC_FILE_SIZE=\`ls -l /proc/vmcore | awk '{print \$5}'\`
DST_FILE=\$1
SRC_FILE_MB=\`dc \$SRC_FILE_SIZE 1048576 / p\`
while true
do
CP_PID=\`pidof cp\`
if [ -n "\$CP_PID" ]
then
break
fi
done
while true
do
sleep 5
if [ ! -d /proc/\$CP_PID ]
then
break
fi
LS_SIZE=\`ls -l \$DST_FILE | awk '{print \$5}'\`
LS_MB=\`dc \$LS_SIZE 1048576 / p\`
echo -n -e "Copied \$LS_MB MB / \$SRC_FILE_MB MB\\\r"
done
EOF
cat >> $SCRIPTDIR/handle_event <<EOF
#!/bin/hush
FIRMWARE_DIRS="/lib/firmware"
if [ "\$ACTION" != "add" ]
then
exit 0
fi
if [ "\$SUBSYSTEM" != "firmware" ]
then
exit 0
fi
for DIR in \$FIRMWARE_DIRS; do
if [ ! -f \$DIR/\$FIRMWARE ]
then
continue
fi
echo 1 > /sys\$DEVPATH/loading
cat "\$DIR/\$FIRMWARE" > /sys\$DEVPATH/data
echo 0 > /sys\$DEVPATH/loading
exit 0
done
EOF
cat >> $SCRIPTDIR/display_mem_usage <<EOF
#!/bin/hush
FREEMEM=\`free | awk '/^ *Mem:/ {print \$4}'\`
TOTALMEM=\`free | awk '/^ *Mem:/ {print \$2}'\`
FREEPCT=\`dc \$FREEMEM \$TOTALMEM / 100 \* p\`
echo -n "Free memory/Total memory (free %): "
echo \$FREEMEM / \$TOTALMEM \( \$FREEPCT \)
EOF
# If iscsi devices are in boot path, load a script to start connections.
cat >> $SCRIPTDIR/iscsi_start <<EOF
#!/bin/hush
ISCSIDIR="/var/lib/iscsi/nodes/"
ISCSITARGETFILE="/etc/iscsi_targets_to_activate"
if [ ! -d \$ISCSIDIR ];then
exit 0
fi
if [ ! -f \$ISCSITARGETFILE ];then
exit 1
fi
/sbin/iscsid
for iscsi_target in \`cat /etc/iscsi_targets_to_activate\`;do
echo "Activating iscsi target: \$iscsi_target"
/sbin/iscsiadm -m node -T \$iscsi_target -l
done
EOF
# A script to do multipath related processing like creating device nodes.
cat >> $SCRIPTDIR/process_multipath <<EOF
#!/bin/hush
find_dev_in_sysblock () {
local majmin=\$1
find /sys/block/ -name dev | while read device; do \
echo "\$majmin" | cmp -s \$device && echo \$device ; done \
| sed -e 's,/dev$,,'
}
#Create /dev/mpath dir
mkdir -p /dev/mpath/
maj_min=\`dmsetup ls --target multipath | sed 's/.*(\([0-9]*\), \([0-9]*\)).*/\1:\2/'\`
for majmin in \$maj_min; do
sysdev=\`find_dev_in_sysblock \$majmin\`
if [ -n "\$sysdev" ]; then
major=\`echo \$majmin | cut -d ":" -f1\`
minor=\`echo \$majmin | cut -d ":" -f2\`
devnode=\`echo \$sysdev | sed 's,/sys/block/,/dev/,'\`
echo "Creating multipath node \$devnode(\$majmin), sys=\$sysdev"
mknod \$devnode b \$major \$minor
# create /dev/mpath/* soft links
mpathnode=\`dmsetup ls --target multipath | grep "(\${major}, \${minor})" | cut -f1\`
if [ -n "\$mpathnode" ];then
ln -s \$devnode /dev/mpath/\$mpathnode
fi
fi
done
EOF
install_ccw_net_init()
{
cat >> $SCRIPTDIR/ccw_net_init <<EOF
#!/bin/hush
BLACKLIST="/proc/cio_ignore"
CIO_SETTLE="/proc/cio_settle"
timedwait_for_path()
{
local somepath=\$1
local i=0
while [ \$i -lt 20 ]; do
[ -e \$somepath ] && break
sleep 0.1
i=\$((\$i + 1))
done
if [ ! -e \$somepath ]; then
echo "timed out while waiting for: \$somepath"
exit 1
fi
}
ifcfg_file=\$1
NETDEV=\`echo \$ifcfg_file | cut -d'-' -f2\`
. \$ifcfg_file
echo "ccw_net_init \$NETDEV \$ifcfg_file"
if [ -z "\$SUBCHANNELS" -o -z "\$NETTYPE" -o -z "\$DEVICE" ]; then
echo "no SUBCHANNELS,NETTYPE or DEVICE in \$ifcfg_file"
echo "dont know how to init \$NETDEV:"
cat \$ifcfg_file
exit 1
fi
#remove from blacklist
echo "free \$SUBCHANNELS" > \$BLACKLIST
if [ \$? -ne 0 ]; then
echo "Failed to free \$i from \$BLACKLIST"
fi
echo 1 > \$CIO_SETTLE
#group buses
timedwait_for_path "/sys/bus/ccwgroup/drivers/\$NETTYPE/group"
bus1=\`echo \$SUBCHANNELS | cut -d',' -f1\`
echo "\$SUBCHANNELS" > /sys/bus/ccwgroup/drivers/\$NETTYPE/group
if [ \$? -ne 0 ]; then
echo "Failed to group \$SUBCHANNELS in \$NETTYPE/group"
exit 1
fi
timedwait_for_path "/sys/bus/ccw/devices/\$bus1/group_device"
group_device=\`readlink -f /sys/bus/ccw/devices/\$bus1/group_device\`
if [ -z "\$group_device" ]; then
echo "Failed to find: /sys/bus/ccw/devices/\$bus1/group_device"
exit 1
fi
#set options
if [ -n "\$OPTIONS" ]; then
for option in "\$OPTIONS"; do
opt_name=\`echo \$option | cut -d'=' -f1\`
opt_val=\`echo \$option | cut -d'=' -f2\`
echo \$opt_val > \$group_device/\$opt_name
if [ \$? -ne 0 ]; then
echo "Failed to set option \$group_device/\$opt_name"
fi
done
fi
if [ -n "\$PORTNAME" ]; then
echo \$PORTNAME > \$group_device/portname
if [ \$? -ne 0 ]; then
echo "Failed to set PORTNAME"
fi
fi
#bring online
echo 1 > \$group_device/online
if [ \$? -ne 0 ]; then
echo "Failed to bring \$DEVICE online"
fi
#set desired interface name
ifname=\`cat \$group_device/if_name\`
if [ "\$ifname" != "\$DEVICE" ]; then
echo "\$ifname != \$DEVICE"
ip link set dev \$ifname name \$DEVICE
if [ \$? -ne 0 ]; then
echo "Failed to rename: \$ifname -> \$DEVICE"
fi
fi
EOF
chmod a+x $SCRIPTDIR/ccw_net_init
}
emit_network()
{
emit "if [ \$network_up -eq 0 ]"
emit "then"
emit " for i in \`ls /etc/ifcfg-*\`"
emit " do"
emit " NETDEV=\`echo \$i | cut -d\"-\" -f2-\`"
emit " map_interface \$NETDEV"
emit " done"
emit " rename_interfaces"
emit " for IFACE in \`cat /etc/iface_to_activate\`"
emit " do"
emit " ifup \$IFACE"
emit " IFADDR=\`ifconfig \$IFACE | awk '/inet addr/ {print \$2}' | cut -d\":\" -f 2\`"
emit " if [ -z \"\$IFADDR\" ]"
emit " then"
emit " echo \"\$IFACE failed to come up\""
emit " fi"
emit " done"
emit " add_route \"/etc/network/route-static\""
emit " network_up=1"
emit "fi"
}
#DONT ADD STUFF to SCRIPTDIR PAST HERE
for i in `ls $SCRIPTDIR/*`
do
chmod 755 $i
done
if [ -e /etc/fstab.sys ]; then
inst /etc/fstab.sys "$MNTIMAGE/etc/fstab.sys"
fi
#build a good passwd file
cat >> $MNTIMAGE/etc/passwd << EOF
root:x:0:0:root:/root:/bin/bash
EOF
default_exclude_modules="snd soundcore cfg80211 mac80211 iwl virtio_balloon\
microcode mlx4_core"
# $1: module basename
exclude_module() {
local i
for i in $extra_kdump_mods; do
[[ "$1" =~ "$i" ]] && return 1
done
for i in $default_exclude_modules; do
[[ "$1" =~ "$i" ]] && return 0
done
return 1
}
mkdir -p $MNTIMAGE/lib/modules/$kernel
for MODULE in $MODULES; do
_base_name=$(basename $MODULE)
if exclude_module $_base_name; then
MODULES=${MODULES/$MODULE/}
continue
fi
if [ -x /usr/bin/strip ]; then
/usr/bin/strip -g $verbose $MODULE -o $MNTIMAGE/lib/modules/$kernel/$_base_name
else
cp $verbose -a $MODULE $MNTIMAGE/lib/modules/$kernel/
fi
modinfo $MODULE | awk '/^firmware:/{print $2}' | while read firmware;
do
mkdir -p $(dirname $MNTIMAGE/lib/firmware/$firmware)
# if there is a kmod firmware
if [ -e /lib/firmware/updates/$firmware ]; then
cp /lib/firmware/updates/$firmware $MNTIMAGE/lib/firmware/$firmware
else
cp /lib/firmware/$firmware $MNTIMAGE/lib/firmware/$firmware
fi
done
done
depmod -b $MNTIMAGE/
ln -sf ram1 $MNTIMAGE/dev/ram
# FIXME -- this can really go poorly with clvm or duplicate vg names.
# nash should do lvm probing for us and write its own configs.
if [ -n "$vg_list" ]; then
inst /sbin/lvm "$MNTIMAGE/bin/lvm"
bin="$bin /sbin/lvm"
if [ -f /etc/lvm/lvm.conf ]; then
cp $verbose --parents /etc/lvm/lvm.conf $MNTIMAGE/
fi
fi
mkdir -p $MNTIMAGE/$FONTDIR
inst $FONTDIR/${DEFAULT_FONT} $MNTIMAGE/$FONTDIR/
echo -n >| $RCFILE
cat >> $MNTIMAGE/init << EOF
#!/bin/hush
export PATH=$PATH:/scriptfns
# Tell libdevmapper that there is no udev
export DM_DISABLE_UDEV=1
mount -t proc /proc /proc
echo Mounting proc filesystem
echo Mounting sysfs filesystem
mount -t sysfs /sys /sys
echo "/scriptfns/handle_event" > /sys/kernel/uevent_helper
echo 1 > /proc/sys/vm/dirty_background_ratio
echo 5 > /proc/sys/vm/dirty_ratio
echo 10 > /proc/sys/vm/dirty_writeback_centisecs
echo 50 > /proc/sys/vm/dirty_expire_centisecs
echo Creating /dev
mount -o mode=0755 -t tmpfs /dev /dev
mkdir /dev/pts
mount -t devpts -o gid=5,mode=620 /dev/pts /dev/pts
mkdir /dev/shm
mkdir /dev/mapper
echo Creating initial device nodes
mknod /dev/null c 1 3
mknod /dev/zero c 1 5
mknod /dev/systty c 4 0
mknod /dev/tty c 5 0
mknod /dev/console c 5 1
mknod /dev/ptmx c 5 2
mknod /dev/rtc c 254 0
mknod /dev/urandom c 1 9
mknod /dev/efirtc c 10 136
export network_up=0
setfont $FONTDIR/$DEFAULT_FONT -C /dev/console
display_mem_usage
set -o pipefail
EOF
# Some helper functions go here.
cat >> $MNTIMAGE/init << EOF
save_vmcore_dmesg_fs() {
local _dmesg_collector=\$1
local _path=\$2
local _exitcode
echo "Saving vmcore-dmesg.txt"
\$_dmesg_collector /proc/vmcore > \${_path}/vmcore-dmesg-incomplete.txt
_exitcode=\$?
if [ \$_exitcode -eq 0 ]; then
mv \${_path}/vmcore-dmesg-incomplete.txt \${_path}/vmcore-dmesg.txt
# Make sure file is on disk. There have been instances where later
# saving vmcore failed and system rebooted without sync and there
# was no vmcore-dmesg.txt available.
sync
echo "Saved vmcore-dmesg.txt"
else
echo "Saving vmcore-dmesg.txt failed"
fi
return \$_exitcode
}
save_vmcore_dmesg_ssh() {
local _dmesg_collector=\$1
local _path=\$2
local _opts="\$3"
local _location=\$4
local _exitcode
echo "Saving vmcore-dmesg.txt"
\$_dmesg_collector /proc/vmcore | ssh \$_opts \$_location "dd of=\$_path/vmcore-dmesg-incomplete.txt"
_exitcode=\$?
if [ \$_exitcode -eq 0 ]; then
ssh -q \$_opts \$_location mv \$_path/vmcore-dmesg-incomplete.txt \$_path/vmcore-dmesg.txt
echo "Saved vmcore-dmesg.txt"
else
echo "Saving vmcore-dmesg.txt failed"
fi
return \$_exitcode
}
wait_for_multipath_devices() {
local _found=0
while true
do
sleep 1
mpath_list=\$(dmsetup ls --target multipath 2> /dev/null | awk '{print \$1}')
for mpdev in $multipath_devices
do
# Also make sure the corresponding device node is created
if echo \$mpath_list | grep "\${mpdev##*/}" > /dev/null &&\
[ -e /dev/mapper/\${mpdev##*/} ]
then
_found=1
else
_found=0
break;
fi
done
[ \$_found -eq 1 ] && break
if [ -n "$DISK_TIMEOUT" -a "\$timeout_count" -ge "$DISK_TIMEOUT" ]
then
break
fi
timeout_count=\`expr \$timeout_count + 1\`
done
}
EOF
# makedumpfile creates line mode terminal friendly output when TERM=dumb is set.
if [ "$ARCH" == "s390x" ]; then
echo "export TERM=dumb" >> $MNTIMAGE/init
fi
# Linux on System z: Bring zfcp adapter and devices online
emit_zfcp_device_init() {
local DEVICE WWPN FCPLUN
emit "echo Waiting 2 seconds for driver initialization."
emit "sleep 2"
cat /etc/zfcp.conf | grep -v "^#" | tr "A-Z" "a-z" | while read DEVICE WWPN FCPLUN; do
cemit <<EOF
echo "free ${DEVICE/0x/}" > /proc/cio_ignore
echo -n 1 > /sys/bus/ccw/drivers/zfcp/${DEVICE/0x/}/online
echo -n 1 > /proc/cio_settle
echo -n $WWPN > /sys/bus/ccw/drivers/zfcp/${DEVICE/0x/}/port_add
echo -n $FCPLUN > /sys/bus/ccw/drivers/zfcp/${DEVICE/0x/}/$WWPN/unit_add
display_mem_usage
EOF
done
}
# Linux on System z: Bring DASD devices online
emit_dasd_device_init() {
local DEVICE OPTIONS
cat /etc/dasd.conf | grep -v "^#" | tr "A-Z" "a-z" | while read DEVICE OPTIONS; do
test -n "$DEVICE" || continue
emit "echo "free ${DEVICE/0x/}" > /proc/cio_ignore"
emit "echo -n 1 > /proc/cio_settle"
emit "echo -n 1 > /sys/bus/ccw/devices/${DEVICE/0x/}/online"
# Set device options (if specified)
test -n "$OPTIONS" || continue
set $OPTIONS
while [ -n "$1" ]; do
(
attribute="$1"
IFS="="
set $attribute
if [ $1 != "use_diag" ]; then
emit "echo $2 > /sys/bus/ccw/devices/${DEVICE/0x/}/$1"
fi
)
shift
done
done
}
# XXX really we need to openvt too, in case someting changes the
# color palette and then changes vts on fbcon before gettys start.
# (yay, fbcon bugs!)
for i in 0 1 2 3 4 5 6 7 8 9 10 11 12 ; do
emit "mknod /dev/tty$i c 4 $i"
done
for i in 0 1 2 3 ; do
emit "mknod /dev/ttyS$i c 4 $(($i + 64))"
done
make_trace_mem "At init start" 1+:mem 2+:iomem 3+:slab
for MODULE in $MODULES; do
text=""
module=`echo $MODULE | sed "s|.*/||" | sed "s/.k\?o$//"`
# check if module is in blacklist
skipit=""
for m in $blacklist_mods; do
if [ $module == $m ]; then
skipit="skip"
break
fi
done
if [ ! -z "$skipit" ]; then
continue
fi
fullmodule=`echo $MODULE | sed "s|.*/||"`
if [ -n "$modulefile" ]
then
options=`sed -n -e "s/^options[ ][ ]*$module[ ][ ]*//p" $modulefile 2>/dev/null`
fi
if [ -n "$KDUMP_CONFIG_FILE" ]
then
options2=`sed -n -e "s/^options[ ][ ]*$module[ ][ ]*//p" $KDUMP_CONFIG_FILE 2>/dev/null`
fi
# Overwrite options if option is specified in kdump.conf
if [ -n "$options2" ]; then
options=$options2
fi
if [ -n "$options" ]; then
vecho "Adding module $module$text with options $options"
else
vecho "Adding module $module$text"
fi
emit "echo \"Loading $fullmodule module\""
emit "insmod /lib/modules/$kernel/$fullmodule $options"
make_trace_mem "After module $fullmodule has been loaded" 1:shortmem 2+:mem 3+:slab
# Hack - we need a delay after loading usb-storage to give things
# time to settle down before we start looking a block devices
if [ "$module" = "usb-storage" ]; then
emit "echo Waiting 8 seconds for driver initialization."
emit "sleep 8"
fi
if [ "$module" = "zfcp" -a -f /etc/zfcp.conf ]; then
emit_zfcp_device_init
fi
done
# initialise network devices
# arch specific setup before interfaces can be configured
arch_netdev_init
# Before we wait for block devices to come up, try to bring up any iscsi
# devices and bring up the networking
if [ -n "$ISCSI_DEVICES" ];then
# bring up the network
emit_network
# start iscsi
emit "iscsi_start"
fi
# Before we create our block devices, we need to make sure that we have all the needed block devices discovered
# Thats seems like a chicken and egg problem, I know, but we generated a critcal_disks list when we build this initramfs
# that tell us what devices we need to wait to see in /sys/block
# first bring DASDs online on s390
if [ -f /etc/dasd.conf ]; then
emit_dasd_device_init
fi
cat >> $MNTIMAGE/init << EOF
echo "Waiting for required block device discovery"
block_mknod() {
local basename=\$1
local major minor minor_range part_sep
[ -b /dev/\$basename ] && return
major=\`cat /sys/block/\$basename/dev | cut -d":" -f1\`
minor=\`cat /sys/block/\$basename/dev | cut -d":" -f2\`
minor_range=\`cat /sys/block/\$basename/range | cut -d":" -f2\`
minor_end=\`echo \$minor \$minor_range + p | dc\`
minor_start=\`echo \$minor 1 + p | dc\`
part_num=1
part_sep=''
basename=\`echo \$basename | sed 's/\!/\//g'\`
if echo \$basename | grep -q '^cciss'; then
part_sep='p'
elif echo \$basename | grep -q '^ida'; then
part_sep='p'
fi
echo "Creating block device \$basename"
mknod /dev/\$basename b \$major \$minor
hdparm -z /dev/\$basename >/dev/null 2>/dev/null
if [ \$minor_range -gt 1 ]; then
for j in \`seq \$minor_start 1 \$minor_end\`
do
if [ ! -e /dev/\$basename\$part_num ]
then
mknod /dev/\$basename\$part_sep\$part_num b \$major \$j
fi
part_num=\`expr \$part_num + 1\`
done
fi
}
wait_critical_disks_by_vendor_model_type() {
for i in \`cat /etc/critical_disks | awk '{print \$1}'\`
do
IDSTRING=\`grep \$i /etc/critical_disks | awk '{print \$2}'\`
COUNT=\`grep \$i /etc/critical_disks | awk '{print \$3}'\`
found=0
echo -n "Waiting for \$COUNT \$i-like device(s)..."
while true
do
for j in \`ls /sys/block\`
do
DSKSTRING=""
TMPNAME=""
if [ ! -d /sys/block/\$j/device ]
then
continue
fi
for a in "vendor" "model" "type"
do
TMPNAME=\`cat /sys/block/\$j/device/\$a 2>/dev/null \`
DSKSTRING="\$DSKSTRING \$TMPNAME"
done
DSKSTRING=\`echo \$DSKSTRING | sed -e's/ //g'\`
if [ "\$DSKSTRING" == "\$IDSTRING" ]
then
found=\$((\$found + 1))
fi
if [ \$found -ge \$COUNT ]
then
break 2
fi
done
sleep 1
if [ -n "$DISK_TIMEOUT" -a "\$timeout_count" -ge "$DISK_TIMEOUT" ]
then
break 2
fi
timeout_count=\`expr \$timeout_count + 1\`
done
echo Found
found=0
done
}
get_id_file_from_type() {
case \$1 in
scsi_ids)
echo "/etc/critical_scsi_ids"
;;
virtio_ids)
echo "/etc/virtio_ids"
;;
scm_ids)
echo "/etc/scm_ids"
;;
dasd_bus_ids)
echo "/etc/dasd_bus_ids"
;;
esac
}
#$1: id-type
#$2: device name
get_device_id() {
case \$1 in
scsi_ids)
echo \$(scsi_id --whitelisted --device=\$2 --replace-whitespace)
;;
virtio_ids)
local majmin=\$(cat /sys/block/\$(basename \$2)/dev 2>/dev/null)
cat /sys/dev/block/\$majmin/serial 2>/dev/null
;;
scm_ids)
basename \$(readlink /sys/block/\$(basename \$2)/device) 2>/dev/null
;;
dasd_bus_ids)
basename \$(readlink /sys/block/\$(basename \$2)/device) 2>/dev/null
;;
esac
}
_wait_critical_disks() {
local id_tmp=""
local nr_lines i line id
local idfile
# \$1==vendor_model_type means using old vendor/model/type sysfs attributes
# to idenfify the critical disks.
if [ "\$1" = "vendor_model_type" ]; then
wait_critical_disks_by_vendor_model_type
return \$?
fi
idfile=\$(get_id_file_from_type \$1)
#rhel6 hush does not support read line by line in a while loop, so..
nr_lines=\$(wc -l < "\$idfile")
for i in \`seq 1 \$nr_lines\`; do
id=\$(awk -v li=\$i 'NR==li {print}' \$idfile)
echo "Waiting for device with \$1: \$id ..."
while true; do
cd /sys/block
for j in *; do
[ ! -b /dev/\$j ] && block_mknod \$j
id_tmp=\$(get_device_id \$1 "/dev/\$j")
if [ "\$id" = "\$id_tmp" ]; then
echo "Found device with \$1: \$id"
break 2
fi
done
sleep 1
if [ -n "$DISK_TIMEOUT" -a "\$timeout_count" -ge "$DISK_TIMEOUT" ]
then
break 2
fi
timeout_count=\`expr \$timeout_count + 1\`
done
done
}
wait_critical_disks() {
if [ -f /etc/critical_scsi_ids ]; then
_wait_critical_disks scsi_ids
fi
if [ -f /etc/virtio_ids ]; then
_wait_critical_disks virtio_ids
fi
if [ -f /etc/scm_ids ]; then
_wait_critical_disks scm_ids
fi
if [ -f /etc/dasd_bus_ids ]; then
_wait_critical_disks dasd_bus_ids
fi
if [ -f /etc/critical_disks ]; then
_wait_critical_disks vendor_model_type
fi
}
timeout_count=0
wait_critical_disks
EOF
make_trace_mem "After block device discovery" 1:shortmem 2+:mem 3+:slab
# HACK: module loading + device creation isn't necessarily synchronous...
# this will make sure that we have all of our devices before trying
# things like RAID or LVM
emit "echo Creating Remain Block Devices"
emit "mkdir /dev/cciss"
emit "mkdir /dev/ida"
emit "for i in \`ls /sys/block\`; do"
emit " block_mknod \$i"
emit "done"
#now do any software raid devices we might have
emit "if [ -f /etc/mdadm.conf ]"
emit "then"
emit " for i in \`awk '/^ARRAY[[:space:]]/{print \$2}' /etc/mdadm.conf\`"
emit " do"
emit " MD_MIN=\`echo \$i | sed -e 's/^[^0-9]*\([0-9]\+\)$/\1/'\`"
emit " mknod \$i b 9 \$MD_MIN"
emit " done"
emit "fi"
make_trace_mem "After creation of block devices" 1:shortmem 2+:mem 3+:slab
if [ -n "$vg_list" ]; then
emit "echo Making device-mapper control node"
emit "DM_MAJ=\`cat /proc/devices | grep misc | cut -d\" \" -f2\`"
emit "DM_MIN=\`cat /proc/misc | grep device-mapper | cut -d\" \" -f2\`"
emit "mknod /dev/mapper/control b \$DM_MAJ \$DM_MIN"
fi
# multipath
if [ -n "$multipath_devices" ]; then
emit "echo Creating multipath devices"
emit "multipathd -B || multipathd"
emit "wait_for_multipath_devices"
emit "dmsetup ls --target multipath --exec 'kpartx -a -p p'"
# Create various multipath device nodes and links
#emit "process_multipath"
fi
if [ -n "$net_list" ]; then
for netdev in $net_list; do
emit "echo Bringing up $netdev"
handle_netdev $netdev
#emit $network
done
fi
if [ -n "$raiddevices" ]; then
for dev in $raiddevices; do
cp -a /dev/${dev} $MNTIMAGE/dev
emit "#need code here to set up md devs"
done
fi
emit "if [ -f /etc/mdadm.conf ]"
emit "then"
emit " mdadm -A -s"
emit "display_mem_usage"
emit "fi"
if [ -n "$vg_list" ]; then
emit "echo Scanning logical volumes"
emit "lvm vgscan --ignorelockingfailure --mknodes"
emit "echo Activating logical volumes"
emit "lvm vgchange -a y --ignorelockingfailure"
emit "DM_NUM=0"
emit "for i in \`lvm lvs --noheadings -o lv_name,vg_name | sed -e's/ \\+/:/g'\`"
emit "do"
emit " LV=\`echo \$i | awk -F\":\" '{ print \$2 }'\`"
emit " VGRP=\`echo \$i | awk -F\":\" '{ print \$3 }'\`"
emit " mkdir -p /dev/\$VGRP"
emit " if [ ! -e /dev/\$VGRP/\$LV ]"
emit " then"
emit " ln -s /dev/mapper/\$VGRP-\$LV /dev/\$VGRP/\$LV"
emit " DM_NUM=\`echo \$DM_NUM 1 + p | dc\`"
emit " if [ -z \"\$noresume\" ]"
emit " then"
emit " /sbin/dmsetup resume /dev/mapper/\$VGRP-\$LV"
emit " fi"
emit " fi"
emit "done"
emit "display_mem_usage"
fi
make_trace_mem "After scanning logical volumes" 1:shortmem 2+:mem 3+:slab
copysshenv()
{
# Copy over the key itself.
dir=`dirname $SSH_KEY_LOCATION`
mkdir -p $MNTIMAGE/$dir
cp -a $SSH_KEY_LOCATION $MNTIMAGE/$SSH_KEY_LOCATION
# Copy over root and system-wide ssh configs.
if [ -f /root/.ssh/config ]; then
mkdir -p $MNTIMAGE/root/.ssh
chmod 700 $MNTIMAGE/root/.ssh
cp -a /root/.ssh/config $MNTIMAGE/root/.ssh/config
fi
if [ -f /etc/ssh/ssh_config ]; then
mkdir -p $MNTIMAGE/etc/ssh/
cp -a /etc/ssh/ssh_config $MNTIMAGE/etc/ssh/ssh_config
fi
# Copy over any known_hosts files that would apply.
if [ -f /root/.ssh/known_hosts ]; then
mkdir -p $MNTIMAGE/root/.ssh
chmod 700 $MNTIMGE/root/.ssh
cp -a /root/.ssh/known_hosts $MNTIMAGE/root/.ssh/known_hosts
fi
if [ -f /etc/ssh/ssh_known_hosts ]; then
mkdir -p $MNTIMAGE/etc/ssh
cp -a /etc/ssh/ssh_known_hosts $MNTIMAGE/etc/ssh/ssh_known_hosts
fi
return 0
}
prepare_nfs4()
{
grep '^nobody:' /etc/passwd >> "$MNTIMAGE/etc/passwd"
grep '^rpc:' /etc/passwd >> "$MNTIMAGE/etc/passwd"
grep '^rpcuser:' /etc/passwd >> "$MNTIMAGE/etc/passwd"
grep '^nobody:' /etc/group >> "$MNTIMAGE/etc/group"
grep '^rpc:' /etc/group >> "$MNTIMAGE/etc/group"
mkdir -m 0755 -p "$MNTIMAGE/var/lib/nfs/rpc_pipefs"
mkdir -m 0755 -p "$MNTIMAGE/var/lib/rpcbind"
mkdir -m 0755 -p "$MNTIMAGE/var/lib/nfs/statd/sm"
for f in /etc/netconfig /etc/idmapd.conf /etc/nsswitch.conf /etc/rpc /etc/protocols /etc/services
do
[ -f "$f" ] && cp "$f" $MNTIMAGE/$f
done
bin=" $bin /sbin/mount.nfs4 /usr/sbin/rpc.idmapd /sbin/rpc.statd /sbin/rpcbind"
[ -d /usr/lib64/libnfsidmap ] && k_extras="$k_extras $(echo /usr/lib64/libnfsidmap/*)"
[ -d /usr/lib/libnfsidmap ] && k_extras="$k_extras $(echo /usr/lib/libnfsidmap/*)"
}
enter_user_space()
{
if [ "$ARCH" != "s390x" ]; then
emit "echo Resetting kernel time value to BIOS time and timezone value to UTC."
emit "cp /etc/utctime /etc/localtime"
emit "hwclock --hctosys -l"
fi
emit "display_mem_usage"
emit "echo Creating root device."
emit "#check to see if we have root= on the command line"
emit "ROOTDEV=\`cat /proc/cmdline | grep root=\`"
emit "if [ -n \"\$ROOTDEV\" ]"
emit "then"
emit " ROOTDEV=\`cat /proc/cmdline | sed 's/^.*root=//' | cut -d\" \" -f1\`"
emit " IS_LABEL=\`echo \$ROOTDEV | grep LABEL\`"
emit " IS_UUID=\`echo \$ROOTDEV | grep UUID\`"
emit " if [ -n \"\$IS_LABEL\" -o -n \"\$IS_UUID\" ] "
emit " then"
emit " ROOTDEV=\`findfs \"\$ROOTDEV\"\`"
emit " fi"
emit "else"
emit " #we need to get the root major/minor from real-root-dev"
emit " ROOT_DEV_NR=\`cat /proc/sys/kernel/real-root-dev\`"
emit " ROOT_MIN=\`echo \$ROOT_DEV_NR | sed -e's/\\([0-9a-f]\\{1,2\\}\\)\\([0-9a-f]\\{2\\}\\)/\\2/'\`"
emit " ROOT_MAJ=\`echo \$ROOT_DEV_NR | sed -e's/\\([0-9a-f]\\{1,2\\}\\)\\([0-9a-f]\\{2\\}\\)/\\1/'\`"
emit " mknod /dev/rootdev b 0x\$ROOT_MAJ 0x\$ROOT_MIN"
emit " ROOTDEV=/dev/rootdev"
emit "fi"
emit "display_mem_usage"
emit "echo Checking root filesystem."
if [ "$DUMP_FSTYPE" = "btrfs" ] ;then
emit "btrfsck \$ROOTDEV"
emit "if [ \$? != 0 ]"
emit "then"
emit " echo btrfsck \$ROOTDEV failed. Executing $FINAL_ACTION"
emit " $FINAL_ACTION"
emit "fi"
elif [ "$DUMP_FSTYPE" = "xfs" ] ;then
emit "# xfs does not need fsck"
else
emit "fsck -p \$ROOTDEV"
emit "if [ \$? -gt 1 ]"
emit "then"
emit " echo fsck \$ROOTDEV failed. Executing $FINAL_ACTION"
emit " $FINAL_ACTION"
emit "fi"
fi
emit "echo Mounting root filesystem: mount -t $rootfs \$ROOTDEV /sysroot"
emit "mount -t $rootfs \$ROOTDEV /sysroot >/dev/null 2>&1 "
emit "if [ \$? != 0 ]"
emit "then"
emit " echo unable to mount rootfs. Dropping to shell"
emit " /bin/hush"
emit "fi"
emit "#move various filesystems and prime the rootfs to boot properly"
emit "umount /proc"
emit "mount -t proc proc /sysroot/proc"
emit "umount /sys"
emit "mount -t sysfs sysfs /sysroot/sys"
emit "mount -o bind /dev /sysroot/dev"
emit "load_selinux_policy /sysroot"
emit "display_mem_usage"
emit "touch /sysroot/fastboot"
emit "echo Switching to new root and running init."
emit "exec switch_root /sysroot /sbin/init"
}
if [ -n "$CLUSTER_NODE_LIST" ]; then
# bring up the network
emit_network
emit "fence_kdump_send $FENCE_KDUMP_OPTS $CLUSTER_NODE_LIST &"
fi
handle_default_action() {
if [ "$DEFAULT_ACTION" == "shell" ]
then
emit "echo dropping to initramfs shell"
emit "echo exiting this shell will reboot your system"
emit "/bin/hush"
fi
case $DEFAULT_ACTION in
reboot|halt|poweroff|shell)
emit "$FINAL_ACTION"
;;
mount_root_run_init)
emit "echo Attempting to enter user-space to capture vmcore"
enter_user_space
;;
esac
}
# normalize_path <path>
# Prints the normalized path, where it removes any duplicated
# and trailing slashes.
# Example:
# $ normalize_path ///test/test//
# /test/test
normalize_path() {
shopt -q -s extglob
set -- "${1//+(\/)//}"
shopt -q -u extglob
echo "${1%/}"
}
# convert_abs_rel <from> <to>
# Prints the relative path, when creating a symlink to <to> from <from>.
# Example:
# $ convert_abs_rel /usr/bin/test /bin/test-2
# ../../bin/test-2
# $ ln -s $(convert_abs_rel /usr/bin/test /bin/test-2) /usr/bin/test
convert_abs_rel() {
local __current __absolute __abssize __cursize __newpath
local -i __i __level
set -- "$(normalize_path "$1")" "$(normalize_path "$2")"
# corner case #1 - self looping link
[[ "$1" == "$2" ]] && { echo "${1##*/}"; return; }
# corner case #2 - own dir link
[[ "${1%/*}" == "$2" ]] && { echo "."; return; }
IFS="/" __current=($1)
IFS="/" __absolute=($2)
__abssize=${#__absolute[@]}
__cursize=${#__current[@]}
while [[ ${__absolute[__level]} == ${__current[__level]} ]]
do
(( __level++ ))
if (( __level > __abssize || __level > __cursize ))
then
break
fi
done
for ((__i = __level; __i < __cursize-1; __i++))
do
if ((__i > __level))
then
__newpath=$__newpath"/"
fi
__newpath=$__newpath".."
done
for ((__i = __level; __i < __abssize; __i++))
do
if [[ -n $__newpath ]]
then
__newpath=$__newpath"/"
fi
__newpath=$__newpath${__absolute[__i]}
done
echo "$__newpath"
}
# this func is used to setup fips environment.
# please place it just above the final copy of bin and libs,
# because it depends on the final collection of bin, kdump_libs
# k_extra
setup_fips()
{
bin="$bin /usr/bin/fipscheck"
#ssh+fips require libraries that aren't found with ldd
if [ -e /usr/lib/libssl.so.10 ]; then
k_extras="$k_extras /usr/lib/libssl.so.10"
fi
if [ -e /usr/lib64/libssl.so.10 ]; then
k_extras="$k_extras /usr/lib64/libssl.so.10"
fi
# find all fips .hmac files
for n in $bin $kdump_libs $k_extras; do
_hmac=${n%/*}/.${n##*/}.hmac
if [[ -e "$_hmac" ]]; then
fips_hmac="$fips_hmac $_hmac"
fi
done
}
if [ -n "$KDUMP_CONFIG_FILE" ]; then
memtotal=`cat /proc/meminfo | grep MemTotal | awk '{print $2}'`
#timezone info for date which outputs YYYY-MM-DD-hh:mm
cp /etc/localtime $MNTIMAGE/etc/localtime
if [ "$ARCH" != "s390x" ]; then
cp /usr/share/zoneinfo/UTC $MNTIMAGE/etc/utctime
cp /etc/adjtime $MNTIMAGE/etc/adjtime
emit "if grep -q UTC /etc/adjtime"
emit " then"
emit " TIME_FORMAT=-u"
emit " else"
emit " TIME_FORMAT=-l"
emit "fi"
emit "hwclock --hctosys \$TIME_FORMAT"
fi
emit "DATE=\`date +%Y-%m-%d-%T\`"
bin="$bin /sbin/dmsetup /sbin/kpartx"
if [ -z "$CORE_COLLECTOR" ] || [ "${CORE_COLLECTOR%%[[:blank:]]*}" = "makedumpfile" ]
then
bin="$bin /usr/sbin/makedumpfile"
fi
#ssh, scp require libraries that aren't found with ldd
lib=/lib && [ -d "/lib64" -a "$ARCH" != "ppc64" ] && lib=/lib64
k_extras="/$lib/libnss_compat.so.2 /$lib/libnss_files.so.2 /$lib/libnss_dns.so.2"
if [ "$ARCH" = "ppc64" ]
then
k_extras="$k_extras /lib64/libnss_compat.so.2 /lib64/libnss_files.so.2 /lib64/libnss_dns.so.2"
fi
case "$USING_METHOD" in
raw)
bin="$bin /sbin/blockdev"
#test raw partition
kdump_chk "dd if=$DUMP_TARGET count=1 of=/dev/null > /dev/null 2>&1" \
"Bad raw partition $DUMP_TARGET"
#check for available size is greater than $memtotal
available_size=$(fdisk -s $DUMP_TARGET)
if [ $available_size -lt $memtotal ]; then
echo "Warning: There might not be enough space to save a vmcore."
echo " The size of $DUMP_TARGET should be greater than $memtotal kilo bytes."
fi
#setup raw case
if [ -n "$KDUMP_PRE" ]
then
emit "$KDUMP_PRE"
emit "if [ \$? -ne 0 ]"
emit "then"
emit " echo kdump_pre script exited with non-zero status"
emit " $FINAL_ACTION"
emit "fi"
fi
emit "echo Saving to partition $DUMP_TARGET"
emit "display_mem_usage"
make_trace_mem "Before dumping vmcore" 1:shortmem 2+:mem 3+:slab
emit "monitor_dd_progress 512 &"
if [ -z "$CORE_COLLECTOR" ]
then
CORE_COLLECTOR="makedumpfile -c --message-level 1 -d 31"
else
if [ "${CORE_COLLECTOR%%[[:blank:]]*}" != "makedumpfile" ]
then
echo "Warning: specifying a non-makedumpfile core collector, you will have to recover the vmcore manually."
fi
fi
CORE_COLLECTOR=`echo $CORE_COLLECTOR | sed -e's/\(^makedumpfile\)\(.*$\)/\1 -F \2/'`
emit "$CORE_COLLECTOR /proc/vmcore | dd of=$DUMP_TARGET bs=512 >> /tmp/dd_progress_file 2>&1"
emit "exitcode=\$?"
emit "if [ \$exitcode == 0 ]"
emit "then"
emit " echo -e \"\\\033[0JSaving core complete\""
emit "fi"
emit "blockdev --flushbufs $DUMP_TARGET"
make_trace_mem "After dumping vmcore" 1:shortmem 2+:mem 3+:slab
if [ -x "$KDUMP_POST" ]; then
emit "$KDUMP_POST \$exitcode"
fi
emit "[ \$exitcode == 0 ] && $FINAL_ACTION"
;;
nfs|nfs4|ssh)
#build an /etc/passwd for scp to work properly
grep "^root" /etc/passwd > $MNTIMAGE/etc/passwd
# bring up the network
emit_network
if [ -n "$OFF_SUBNET" ]
then
# we are going to a different subnet
lhost=`echo $OFF_SUBNET|awk '{print $7}'|head -n 1`
else
# we are on the same subnet
lhost=`/sbin/ip route get to $remoteip 2>&1 |awk '{print $5}'|head -n 1`
fi
emit "echo Saving to remote location $DUMP_TARGET"
if [ -z "`echo $DUMP_TARGET|grep @`" ]; then
#NFS path
if [ "$USING_METHOD" = "nfs4" ]
then
prepare_nfs4
fi
#test nfs mount and directory creation
rlocation=`echo $DUMP_TARGET | sed 's/.*:/'"$remoteip"':/'`
tmnt=`mktemp -dq`
kdump_chk "mount -t $USING_METHOD -o nolock -o tcp $rlocation $tmnt" \
"Bad NFS mount $DUMP_TARGET"
kdump_chk "mkdir -p $tmnt/$SAVE_PATH" "Read only NFS mount $DUMP_TARGET"
kdump_chk "touch $tmnt/$SAVE_PATH/testfile" "Read only NFS mount $DUMP_TARGET"
kdump_chk "rm -f $tmnt/$SAVE_PATH/testfile" "Read only NFS mount $DUMP_TARGET"
tdir=`mktemp -dqp $tmnt/$SAVE_PATH`
rc=$?
available_size=$(df -P $tdir | tail -1 | tr -s ' ' ':' | cut -d: -f5)
rm -rf $tdir
umount -f $tmnt
if [ $? != 0 ]; then
rmdir $tmnt
echo "Cannot unmount the temporary directory"
cleanup_and_exit 1
fi
rm -rf $tmnt
if [ $rc != "0" ]; then
echo "Cannot create directory in $DUMP_TARGET: $SAVE_PATH"
cleanup_and_exit 1
fi
if [ -z "$CORE_COLLECTOR" ]; then
CORE_COLLECTOR="makedumpfile -c --message-level 1 -d 31"
fi
#check for available size is greater than $memtotal
if [ $available_size -lt $memtotal ]; then
echo "Warning: There might not be enough space to save a vmcore."
echo " The size of $DUMP_TARGET should be greater than $memtotal kilo bytes."
fi
#setup nfs case
if [ -n "$KDUMP_PRE" ]
then
emit "$KDUMP_PRE"
emit "if [ \$? -ne 0 ]"
emit "then"
emit " echo kdump_pre script exited with non-zero status"
emit " $FINAL_ACTION"
emit "fi"
fi
mkdir -p $MNTIMAGE/mnt
if [ "$USING_METHOD" = "nfs4" ]
then
emit "rpcbind"
emit "[ ! -d /var/lib/nfs/rpc_pipefs/nfs ] && mount -t rpc_pipefs rpc_pipefs /var/lib/nfs/rpc_pipefs"
emit "rpc.statd && rpc.idmapd"
#the mount in busybox may not support NFS4
emit "/sbin/mount.nfs4 $rlocation /mnt -o nolock"
else
emit "mount -t $USING_METHOD -o nolock -o tcp $rlocation /mnt"
fi
emit "exitcode=\$?"
emit "if [ \$exitcode -eq 0 ]"
emit "then"
emit " mkdir -p /mnt/$SAVE_PATH/$lhost-\$DATE"
emit " exitcode=\$?"
emit " if [ \$exitcode -eq 0 ]"
emit " then"
# Save vmcore-dmesg.txt
emit " DMESG_PATH=/mnt/$SAVE_PATH/$lhost-\$DATE/"
emit " save_vmcore_dmesg_fs ${DMESG_COLLECTOR} \${DMESG_PATH}"
# Save vmcore
emit " VMCORE=/mnt/$SAVE_PATH/$lhost-\$DATE/vmcore"
emit " export VMCORE"
emit " display_mem_usage"
make_trace_mem " Before dumping vmcore" 1:shortmem 2+:mem 3+:slab
emit " monitor_cp_progress \$VMCORE-incomplete &"
emit " $CORE_COLLECTOR /proc/vmcore \$VMCORE-incomplete "
emit " exitcode=\$?"
emit " if [ \$exitcode -eq 0 ]"
emit " then"
emit " mv \$VMCORE-incomplete \$VMCORE"
emit " echo -e \"\\\033[0JSaving core complete\""
emit " fi"
emit " umount -f /mnt"
emit " fi"
emit "fi"
make_trace_mem " After dumping vmcore" 1:shortmem 2+:mem 3+:slab
if [ -x "$KDUMP_POST" ]; then
emit "$KDUMP_POST \$exitcode"
fi
emit "[ \$exitcode -eq 0 ] && $FINAL_ACTION"
else
#SSH path
#rebuild $DUMP_TARGET replacing machine name with ip address
if [ -n "$CORE_COLLECTOR" ]
then
CORE_COLLECTOR=`echo $CORE_COLLECTOR | sed -e's/\(^makedumpfile\)\(.*$\)/\1 -F \2/'`
else
CORE_COLLECTOR="makedumpfile -F -c --message-level 1 -d 31"
fi
bin="$bin /usr/bin/ssh /usr/bin/scp"
rlocation=`echo $DUMP_TARGET|sed 's/@.*/@'"$rhost"'/'`
#test ssh path and directory creation
s_opts="-i $SSH_KEY_LOCATION -o BatchMode=yes"
kdump_chk "ssh -q $s_opts $rlocation mkdir -p $SAVE_PATH </dev/null" \
"Could not create $DUMP_TARGET:$SAVE_PATH, you probably need to run \"service kdump propagate\""
tdir=`ssh -q $s_opts $rlocation mktemp -dqp $SAVE_PATH </dev/null`
if [ $? != "0" ]; then
echo "$KDUMP_CONFIG_FILE: Could not create temp directory in $DUMP_TARGET:$SAVE_PATH"
cleanup_and_exit 1
fi
remote_df=`ssh -q $s_opts $rlocation df -P $tdir | tail -1`
available_size=$(echo $remote_df | tr -s ' ' '|' | cut -d\| -f4)
#check for available size is greater than $memtotal
if [ $available_size -lt $memtotal ]; then
echo "Warning: There might not be enough space to save a vmcore."
echo " The size of $rlocation:$tdir should be greater than $memtotal kilo bytes."
fi
#We do this to remove the temp directory from above
ssh -q $s_opts $rlocation rmdir $tdir
#setup ssh case, quick check to see if setup already
if [ -n "$KDUMP_PRE" ]
then
emit "$KDUMP_PRE"
emit "if [ \$? -ne 0 ]"
emit "then"
emit " echo kdump_pre script exited with non-zero status"
emit " $FINAL_ACTION"
emit "fi"
fi
if [ ! -r $MNTIMAGE/dev/urandom ]; then
copysshenv
mknod $MNTIMAGE/dev/urandom c 1 9
fi
emit "dd if=/var/lib/random-seed of=/dev/urandom 2>/dev/null"
emit "ssh -q $s_opts $rlocation mkdir $SAVE_PATH/$lhost-\$DATE"
# Save vmcore-dmesg.txt
emit "DMESG_PATH=$SAVE_PATH/$lhost-\$DATE/"
emit "save_vmcore_dmesg_ssh ${DMESG_COLLECTOR} \${DMESG_PATH} \"${s_opts}\" ${rlocation}"
# Save vmcore
emit "VMCORE=$SAVE_PATH/$lhost-\$DATE/vmcore"
emit "export VMCORE"
emit "display_mem_usage"
make_trace_mem "Before dumping vmcore" 1:shortmem 2+:mem 3+:slab
emit "monitor_scp_progress $rlocation $SAVE_PATH/$lhost-\$DATE/vmcore-incomplete &"
# We need to restrict the use of core_collector here, if
# its not makedumpfile, we need to ignore it as scp
# requires that core_collector dump to stdout
if [ "${CORE_COLLECTOR%%[[:blank:]]*}" == "makedumpfile" ]
then
emit "$CORE_COLLECTOR /proc/vmcore | ssh $s_opts $rlocation \"dd of=$SAVE_PATH/$lhost-\$DATE/vmcore-incomplete\""
else
emit "$CORE_COLLECTOR -q $s_opts /proc/vmcore $rlocation:\$VMCORE-incomplete"
fi
emit "exitcode=\$?"
emit "if [ \$exitcode == 0 ]"
emit "then"
if [ -z "$CORE_COLLECTOR" ] || [ "${CORE_COLLECTOR%%[[:blank:]]*}" != "makedumpfile" ]
then
emit " ssh -q $s_opts $rlocation mv \$VMCORE-incomplete \$VMCORE"
else
emit " ssh -q $s_opts $rlocation mv \$VMCORE-incomplete \$VMCORE.flat"
fi
emit " echo -e \"\\\033[0JSaving core complete\""
emit "fi"
make_trace_mem "After dumping vmcore" 1:shortmem 2+:mem 3+:slab
if [ -x "$KDUMP_POST" ]; then
emit "$KDUMP_POST \$exitcode"
fi
emit "[ \$exitcode -eq 0 ] && $FINAL_ACTION"
fi
;;
*)
#test filesystem and directory creation
tmnt=`mktemp -dq`
kdump_chk "mount -t $DUMP_FSTYPE $DUMP_TARGET $tmnt" "Bad mount point $DUMP_TARGET"
mkdir -p $tmnt/$SAVE_PATH
tdir=`mktemp -dqp $tmnt/$SAVE_PATH`
rc=$?
available_size=$(df $tdir | tail -1 | tr -s ' ' ':' | cut -d: -f4)
rm -rf $tdir
umount $tmnt
if [ $? != 0 ]; then
rmdir $tmnt
echo "Cannot unmount the temporary directory"
cleanup_and_exit 1
fi
rm -rf $tmnt
if [ $rc != "0" ]; then
echo "Cannot create directory in $DUMP_TARGET: $SAVE_PATH"
cleanup_and_exit 1
fi
#check for available size is greater than $memtotal
if [ $available_size -lt $memtotal ]; then
echo "Warning: There might not be enough space to save a vmcore."
echo " The size of $DUMP_TARGET should be greater than $memtotal kilo bytes."
fi
#setup filesystem case
if [ -n "$KDUMP_PRE" ]
then
emit "$KDUMP_PRE"
emit "if [ \$? -ne 0 ]"
emit "then"
emit " echo kdump_pre script exited with non-zero status"
emit " $FINAL_ACTION"
emit "fi"
fi
mkdir -p $MNTIMAGE/mnt
touch $MNTIMAGE/etc/mtab
if [ -z "$CORE_COLLECTOR" ]; then
CORE_COLLECTOR="makedumpfile -c --message-level 1 -d 31"
fi
emit "echo Saving to the local filesystem $DUMP_TARGET"
emit "DUMPDEV=$DUMP_TARGET"
emit "IS_LABEL=\`echo \$DUMPDEV | grep LABEL\`"
emit "IS_UUID=\`echo \$DUMPDEV | grep UUID\`"
emit "if [ -n \"\$IS_LABEL\" -o -n \"\$IS_UUID\" ] "
emit "then"
emit " DUMPDEV=\`findfs \"\$DUMPDEV\"\`"
emit "fi"
if [ "$DUMP_FSTYPE" = "btrfs" ] ; then
emit "btrfsck \$DUMPDEV"
emit "if [ \$? != 0 ]"
emit "then"
emit " echo btrfsck \$DUMPDEV failed. Executing default action"
handle_default_action
emit "fi"
elif [ "$DUMP_FSTYPE" = "xfs" ] ; then
emit "# xfs does not need fsck"
else
emit "fsck.$DUMP_FSTYPE -p \$DUMPDEV"
emit "if [ \$? -gt 1 ]"
emit "then"
emit " echo fsck.$DUMP_FSTYPE \$DUMPDEV failed. Executing default action"
handle_default_action
emit "fi"
fi
emit "mount -t $DUMP_FSTYPE \$DUMPDEV /mnt"
emit "if [ \$? == 0 ]"
emit "then"
emit " mkdir -p /mnt/$SAVE_PATH/127.0.0.1-\$DATE"
emit " VMCORE=/mnt/$SAVE_PATH/127.0.0.1-\$DATE/vmcore"
emit " export VMCORE"
emit " display_mem_usage"
make_trace_mem " Before selinux policy is loaded" 1:shortmem 2+:mem 3+:slab
if [ "$CORE_COLLECTOR" == "cp" ]; then
emit " monitor_cp_progress \$VMCORE-incomplete &"
fi
emit " load_selinux_policy /mnt"
make_trace_mem " Before dumping vmcore" 1:shortmem 2+:mem 3+:slab
# Save vmcore-dmesg
emit " DMESG_PATH=/mnt/$SAVE_PATH/127.0.0.1-\$DATE/"
emit " save_vmcore_dmesg_fs ${DMESG_COLLECTOR} \${DMESG_PATH}"
# Save vmcore
emit " $CORE_COLLECTOR /proc/vmcore \$VMCORE-incomplete "
emit " exitcode=\$?"
emit " if [ \$exitcode == 0 ]"
emit " then"
emit " mv \$VMCORE-incomplete \$VMCORE"
emit " echo -e \"\\\033[0JSaving core complete\""
emit " fi"
emit " sync"
make_trace_mem " After dumping vmcore" 1:shortmem 2+:mem 3+:slab
if [ -x "$KDUMP_POST" ]; then
emit " $KDUMP_POST \$exitcode"
fi
emit " [ -e /mnt/selinux ] && umount /mnt/selinux"
emit " umount /mnt"
emit " if [ \$exitcode == 0 ]"
emit " then"
emit " $FINAL_ACTION"
emit " fi"
emit "fi"
;;
esac
#now handle the default action
handle_default_action
#find the shared libraries. this snippet taken from kboot
TEMPLDCFG=`mktemp`
for lib in `ls /etc/ld.so.conf.d/* 2>/dev/null | grep -v kernelcap`
do
echo "include " $lib >> $TEMPLDCFG
done
/sbin/ldconfig -f $TEMPLDCFG -N
kdump_libs=`for n in $bin; do
ldd "$n" 2>/dev/null | tr -s '\011' ' ' |
sed 's/.*=> *//;s/^ *//;/ *(0x.*)/s///p;d'
done | sort | uniq | sed '/^ *$/d'`
rm -f $TEMPLDCFG
if [ -f "$FIPS_FILE" ]; then
_fips_mode=$(cat $FIPS_FILE)
[ "$_fips_mode" -eq 1 ] && setup_fips
fi
#copy the binaries and their shared libraries to the archive
for n in $bin $kdump_libs $k_extras $fips_hmac; do
mkdir -p $MNTIMAGE/`dirname $n`
if [ -h $MNTIMAGE/$n ]; then
rm -f $MNTIMAGE/$n
fi
if [ -h $n ]; then
_real=$(readlink -f "$n")
mkdir -p $MNTIMAGE/`dirname $_real`
cp $_real $MNTIMAGE/$_real
ln -sfn $(convert_abs_rel $n $_real) $MNTIMAGE/$n
else
cp $n $MNTIMAGE/$n
fi
done
fi
chmod +x $RCFILE
umask 0066
(cd $MNTIMAGE; findall . | cpio --quiet -c -o) >| $IMAGE || cleanup_and_exit 1
if [ -n "$compress" ]; then
gzip -9 < $IMAGE >| $target || rc=1
else
cp -a $IMAGE $target || rc=1
fi
rm -rf $MNTIMAGE $IMAGE $TMPDISKLIST
if [ -n "$MNTPOINT" ]; then rm -rf $MNTPOINT ; fi
sync && sync
exit $rc
| true |
3f4bff61a540cf04f7f86fc0f818d7a63746bbee | Shell | raxityo/code-pipeline-slack-approver | /scripts/publish.sh | UTF-8 | 369 | 2.578125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -eu
cd `dirname $0`
CONF=${1:-./deploy.conf}
source $CONF
cd ..
rm -rf target
mkdir target
zip -r target/app app LICENSE README.md
sam package --region us-east-1 --template-file template.yaml --output-template-file target/packaged.yaml --s3-bucket sam-artifacts-524176662322-us-east-1
sam publish --region us-east-1 --template target/packaged.yaml
| true |
a5ca372f74583b0e6df24ca370c806a591806759 | Shell | marclambrichs/dotfiles | /bootstrap.sh | UTF-8 | 4,480 | 4.0625 | 4 | [] | no_license | #!/bin/bash
DOTFILES_GIT_REMOTE="git@github.com:marclambrichs/dotfiles.git"
DOTFILES_DIRECTORY="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source ./lib/utils
source ./lib/brew
# Before relying on Homebrew, check that packages can be compiled
if ! type_exists 'gcc'; then
e_error "The XCode Command Line Tools must be installed first."
exit 1
fi
# Check for Homebrew
if ! type_exists 'brew'; then
e_header "Installing Homebrew..."
ruby -e "$(curl -fsSkL raw.github.com/mxcl/homebrew/go)"
fi
# Check for git
e_header "Check for git..."
if ! type_exists 'git'; then
e_header "Updating Homebrew..."
brew update
e_header "Installing Git..."
brew install git
fi
git pull origin zsh;
git submodule update --init
# Initialize the git repository if it's missing
if ! is_git_repo; then
e_header "Initializing git repository..."
git init
git remote add origin ${DOTFILES_GIT_REMOTE}
git fetch origin master
# Reset the index and working tree to the fetched HEAD
# (submodules are cloned in the subsequent sync step)
git reset --hard FETCH_HEAD
# Remove any untracked files
git clean -fd
fi
# Conditionally sync with the remote repository
if [[ $no_sync ]]; then
printf "Skipped dotfiles sync.\n"
else
e_header "Syncing dotfiles..."
# Pull down the latest changes
git pull --rebase origin master
# Update submodules
git submodule update --recursive --init --quiet
fi
# Install and update packages
if [[ $no_packages ]]; then
printf "Skipped package installations.\n"
else
printf "Updating packages...\n"
# Install Homebrew formulae
run_brew
fi
link() {
if [ -e "${HOME}/${2}" ]; then
rm -f "${HOME}/${2}"
fi
# Force create/replace the symlink.
ln -fs "${DOTFILES_DIRECTORY}/${1}" "${HOME}/${2}"
}
mirrorfiles() {
# Copy `.gitconfig`.
# Any global git commands in `~/.bash_profile.local` will be written to
# `.gitconfig`. This prevents them being committed to the repository.
rsync -avh ${DOTFILES_DIRECTORY}/git/gitconfig ${HOME}/.gitconfig
# Force remove the vim directory if it's already there.
if [ -e "${HOME}/.vim" ]; then
rm -rf "${HOME}/.vim"
fi
# Force remove the zsh directory if it's already there.
if [ -e "${HOME}/.zsh" ]; then
rm -rf "${HOME}/.zsh"
fi
# Create the necessary symbolic links between the `.dotfiles` and `HOME`
# directory.
link "aliases" ".aliases"
link "asdf/asdfrc" ".asdfrc"
link "asdf/tool-versions" ".tool-versions"
link "bash/bashrc" ".bashrc"
link "bash/profile" ".profile"
link "bash/bash_prompt" ".bash_prompt"
link "direnv" ".config/direnv"
link "elixir/default-mix-commands" ".default-mix-commands"
link "git/gitattributes" ".gitattributes"
link "git/gitconfig" ".gitconfig"
link "git/gitignore" ".gitignore"
link "git/gituser-default" ".gituser-default"
link "git/gitignore_global" ".gitignore_global"
link "ohmyzsh" ".oh-my-zsh"
link "path" ".path"
link "ruby/default-gems" ".default-gems"
link "tmux/tmux.conf" ".tmux.conf"
link "vim" ".vim"
link "vim/vimrc" ".vimrc"
link ".zshrc" ".zshrc"
link "zsh" ".zsh"
e_success "Dotfiles update complete!"
}
# Ask before potentially overwriting files
seek_confirmation "Warning: This step may overwrite your existing dotfiles."
if is_confirmed; then
mirrorfiles
#source ${HOME}/.bash_profile
else
printf "Aborting...\n"
exit 1
fi
exit
function copyDots() {
rsync --exclude ".git/" \
--exclude ".DS_Store" \
--exclude ".osx" \
--exclude "bootstrap.sh" \
-avh . ~;
source ~/.bashrc;
}
read -p "This will overwrite existing files in your home directory. Are you sure? (y/N) " -n 1;
echo "";
if [[ $REPLY =~ ^[Yy]$ ]]; then
copyDots
cd ~/.oracle
find /usr/lib/oracle -type d -name bin -exec ln -sf {} bin \;
find /usr/lib/oracle -type d -name lib -exec ln -sf {} lib \;
[ -d rdbms ] || mkdir rdbms; cd rdbms
find /usr/include/oracle -type d -name client64 -exec ln -sf {} public \;
fi;
| true |
3c629d58eb288cb13bd9b3f788c74e43b3c63fca | Shell | moduda/pub | /tools/gam/add-group-member.sh | UTF-8 | 556 | 3.75 | 4 | [] | no_license | #! /bin/sh
#
# Add new member(s) to an existing group
#
# 15.jan.2016 ykim
#
PWD=$(dirname $0)
. $PWD/gam-script-lib.sh
usage() {
echo "Usage: $(basename $0) [owner|manager|member] group-name user-name"
}
case $1 in
owner|manager|member) OM=$1; shift ;;
*) OM=member ;;
esac
g=$1
if [ -f $2 ]
then
for u in $(cat $2 | sed -e "s/,/ /g")
do
echo "++ gam $C update group $g add $OM $u"
gam $C update group $g add $OM $u
done
else
shift
for m in $*
do
echo "++ gam $C update group $g add $OM $m"
gam $C update group $g add $OM $m
done
fi
| true |
617be7b1a936735e91845cbc0560c10f7de81733 | Shell | cl91/ece-lab-server-go | /script/run_moss | UTF-8 | 588 | 3.546875 | 4 | [] | no_license | #!/bin/bash
course="$1"
lab="$2"
email="$3"
filter="$4"
tmp="/tmp/moss_tmp"
rm -rf $tmp
cp -r uploaded/${course}/${lab} $tmp
oldpwd=$PWD
cd $tmp
$oldpwd/script/extract_files -x $filter
mossfiles=$($oldpwd/script/extract_files -m $filter)
link=$($oldpwd/script/moss -d $mossfiles | grep 'http')
if [[ $link ]]; then
wget $link -O - -o /dev/null | mail -s "Plagiarism Detection Result $course Lab $lab" $email
echo Success. Browse to the link $link to view the results. A copy has also been emailed to you.
else
echo Something went wrong. Please tell whoever is in charge of this.
fi
| true |
5f4f51617d760c31b079ce8dc253f3a0e15192fb | Shell | tahaelleuch/holberton-system_engineering-devops | /0x04-loops_conditions_and_parsing/7-clock | UTF-8 | 205 | 3.21875 | 3 | [] | no_license | #!/usr/bin/env bash
#displays the time for 12 hours and 59 minutes:
h=0
while ((h<13))
do
echo "Hour: $h"
min=1
while ((min<60))
do
echo "$min"
((min=min+1))
done
((h=h+1))
done
| true |
17d41311a20c786077524d3f80b19b974e7126c0 | Shell | eudoxia0/dotfiles | /recrank.sh | UTF-8 | 337 | 3.296875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
hostname=`hostname`
function recrank() {
sudo nixos-rebuild switch -I nixos-config=$1.nix
}
if [ "$hostname" == "sextant" ]; then
recrank sextant
elif [ "$hostname" == "bullroarer" ]; then
recrank bullroarer
else
if [ -z "$1" ]; then
echo "unknown hostname"
exit 1
else
recrank $1
fi
fi
| true |
4a2824902610596442481a56ace5dbbf48a162c0 | Shell | up1/try-kong-go-plugin | /go-plugins/start_server.sh | UTF-8 | 336 | 2.921875 | 3 | [] | no_license | #!/bin/bash
# Start the first process
cd /usr/local/kong/
./go-pluginserver -socket /usr/local/kong/go_pluginserver.sock -plugins-directory /kong.conf.d &
status=$?
if [ $status -ne 0 ]; then
echo "Failed to start my_first_process: $status"
exit $status
fi
ls -la /usr/local/kong
sudo chmod 777 -R /usr/local/kong
kong restart --vv | true |
c5b113fa2eab0cfc9827a7ffde2338f6058cc00b | Shell | bipinupd/datalake-repo | /cloud-build-deploy/scripts/deploy.sh | UTF-8 | 929 | 3.328125 | 3 | [] | no_license | #!/bin/bash
cd "/workspace/${_SHORT_SHA}/"
status="0"
for dir in `find . -maxdepth 1 -mindepth 1 -type d`; do
directory=`echo "$dir" | sed 's/\.\///g'`
if [[ "$directory" != "cloud-build-deploy" ]]; then
subs_var=""
# echo "_PIPELINE_NAME=$directory" >> /workspace/build_vars
while read line; do
if [[ "$line" == "_PIPELINE_NAME" ]]; then
subs_var+=`echo "_PIPELINE_NAME=$directory,"`
else
subs_var+=`cat /workspace/build_vars | grep $line`,
fi
done < "$directory/scripts/deploy_variables"
_subs=`echo $subs_var | sed 's/,*$//'`
status=`gcloud builds submit --config "$directory/cloudbuild-deploy.yaml" --substitutions=$_subs`
if [[ $status -ne "0" ]]; then
echo "Build failed for $directory"
status=1
fi
fi
done
if [[ $status -ne "0" ]]; then
exit 1;
fi
| true |
8cb6860a6c353b2a9027e709ad2ffb10e613843b | Shell | VDBWRAIR/pathdiscov | /pathdiscov/host_map/bwa_filter_host.sh | UTF-8 | 5,429 | 3.578125 | 4 | [] | no_license | #!/bin/bash
# bwa align to a reference; then extract from the original fastq file(s) the reads that didn't map
# can parallelize here
outputdir=$1 # outputdir
d=$2 # scripts dir
r1=$3 # reads 1 file
r2=$4 # reads 2 file
db=$5 # database
paired=$6 # paired - 1 if paired data, 0 if not
wellpaired=$7 # wellpaired - 1 if paired data "well-paired" (i.e., mate files have exact same IDs), 0 if not
opts=$8 # options
if ! cd ${outputdir}; then
echo "[error] changing directories";
else
# paired data
if [ "$paired" == 1 ]; then
# well paired
if [ "$wellpaired" == 1 ]; then
time1=$( date "+%s" )
# create sai's
cmd="bwa aln ${db} ${r1} > R1.paired.sai"
echo "[cmd] "$cmd
eval ${cmd}
cmd="bwa aln ${db} ${r2} > R2.paired.sai"
echo "[cmd] "$cmd
eval ${cmd}
cmd="bwa sampe ${db} R1.paired.sai R2.paired.sai ${r1} ${r2} ${opts} > paired.sam"
echo "[cmd] "$cmd
eval ${cmd}
time2=$( date "+%s" )
echo "[echo] mapping complete. deltat: "$(( $time2 - $time1 ))
samtools view -bS paired.sam > paired.bam
echo "[stats] paired stats"
samtools flagstat paired.bam
# get unmapped read IDs
cat paired.sam | awk '$3=="*"' | cut -f1 | sort -u | awk '{print "@"$0}' > paired.unmap.id
rm paired.sam
# extract the 4 line chunks from a fastq file, given in argument 1 file, for IDs give in argument 2 file
cmd="fastq_extract_id.pl ${r1} paired.unmap.id > R1.unmap.fastq"
echo "[cmd] "$cmd
eval ${cmd}
cmd="fastq_extract_id.pl ${r2} paired.unmap.id > R2.unmap.fastq"
echo "[cmd] "$cmd
eval ${cmd}
else # paired but not well-paired
# find pairs and singletons
cmd="perlscripts_wrapper.pl get_common_uneven_files ${r1} ${r2} R1.single.fastq R1.paired.fastq R2.single.fastq R2.paired.fastq"
echo "[cmd] "$cmd
eval ${cmd}
# paired -------------------------------
time1=$( date "+%s" )
# create sai's
cmd="bwa aln ${db} R1.paired.fastq > R1.paired.sai"
echo "[cmd] "$cmd
eval ${cmd}
cmd="bwa aln ${db} R2.paired.fastq > R2.paired.sai"
echo "[cmd] "$cmd
eval ${cmd}
cmd="bwa sampe ${db} R1.paired.sai R2.paired.sai R1.paired.fastq R2.paired.fastq ${opts} > paired.sam"
echo "[cmd] "$cmd
eval ${cmd}
time2=$( date "+%s" )
echo "[echo] mapping complete. deltat: "$(( $time2 - $time1 ))
samtools view -bS paired.sam > paired.bam
echo "[stats] paired stats"
samtools flagstat paired.bam
# get unmapped read IDs
cat paired.sam | awk '$3=="*"' | cut -f1 | sort -u | awk '{print "@"$0}' > paired.unmap.id
rm paired.sam
# extract the 4 line chunks from a fastq file, given in argument 1 file, for IDs give in argument 2 file
cmd="fastq_extract_id.pl R1.paired.fastq paired.unmap.id > R1.unmap.fastq"
echo "[cmd] "$cmd
eval ${cmd}
cmd="fastq_extract_id.pl R2.paired.fastq paired.unmap.id > R2.unmap.fastq"
echo "[cmd] "$cmd
eval ${cmd}
if [ -s R1.single.fastq ]; then
time1=$( date "+%s" )
# create sai's
cmd="bwa aln ${db} R1.single.fastq > R1.single.sai"
echo "[cmd] "$cmd
eval ${cmd}
cmd="bwa samse ${db} R1.single.sai R1.single.fastq ${opts} > R1.single.sam"
echo "[cmd] "$cmd
eval ${cmd}
time2=$( date "+%s" )
echo "[echo] mapping complete. deltat: "$(( $time2 - $time1 ))
samtools view -bS R1.single.sam > R1.single.bam
echo "[stats] R1.single stats"
samtools flagstat R1.single.bam
cat R1.single.sam | awk '$3=="*"' | cut -f1 | sort -u | awk '{print "@"$0}' > R1.single.unmap.id
rm R1.single.sam
cmd="fastq_extract_id.pl R1.single.fastq R1.single.unmap.id >> R1.unmap.fastq"
echo "[cmd] "$cmd
eval ${cmd}
fi
if [ -s R2.single.fastq ]; then
time1=$( date "+%s" )
# create sai's
cmd="bwa aln ${db} R2.single.fastq > R2.single.sai"
echo "[cmd] "$cmd
eval ${cmd}
cmd="bwa samse ${db} R2.single.sai R2.single.fastq ${opts} > R2.single.sam"
echo "[cmd] "$cmd
eval ${cmd}
time2=$( date "+%s" )
echo "[echo] mapping complete. deltat: "$(( $time2 - $time1 ))
samtools view -bS R2.single.sam > R2.single.bam
echo "[stats] R2.single stats"
samtools flagstat R2.single.bam
cat R2.single.sam | awk '$3=="*"' | cut -f1 | sort -u | awk '{print "@"$0}' > R2.single.unmap.id
rm R2.single.sam
cmd="fastq_extract_id.pl R2.single.fastq R2.single.unmap.id >> R2.unmap.fastq"
echo "[cmd] "$cmd
eval ${cmd}
fi
fi # not well paired
else # single reads
time1=$( date "+%s" )
# create sai's
cmd="bwa aln ${db} ${r1} > R1.single.sai"
echo "[cmd] "$cmd
eval ${cmd}
cmd="bwa samse ${db} R1.single.sai ${r1} ${opts} > R1.single.sam"
echo "[cmd] "$cmd
eval ${cmd}
time2=$( date "+%s" )
echo "[echo] mapping complete. deltat: "$(( $time2 - $time1 ))
samtools view -bS R1.single.sam > R1.single.bam
echo "[stats] R1.single stats"
samtools flagstat R1.single.bam
cat R1.single.sam | awk '$3=="*"' | cut -f1 | sort -u | awk '{print "@"$0}' > R1.single.unmap.id
rm R1.single.sam
cmd="fastq_extract_id.pl ${r1} R1.single.unmap.id > R1.unmap.fastq"
echo "[cmd] "$cmd
eval ${cmd}
fi # single
fi # cd
| true |
393a28ae4598004752319d8718e670f0ac0946ad | Shell | thamjieying/ShellScriptTutorial | /lesson17.sh | UTF-8 | 569 | 3.671875 | 4 | [] | no_license | #! /bin/bash
# Read file content in Bash using while loops
# 1. INPUT redirection
while read line # read the content of the file line by line
do
echo $line
done < lesson17.sh # < angle redirection (hello.sh is redirected to the while loop)
# 2. read file in a single variable and then print it
cat lesson17.sh | while read line
do
echo $line
done
# 3. Using IFS (Internal Field Separator) used by the shell to determine how to do word splitting
# -r prevent backslash escape from being interpreted
while IFS=' ' read -r line
do
echo $line
done < lesson17.sh
| true |
58b7803db75783efe98ee40be8b16ee2c7094cdd | Shell | thioshp/mytmxbinonitel | /smcpick.sh | UTF-8 | 414 | 3.109375 | 3 | [] | no_license | #!/system/bin/sh
sel="$(smc pick -f)";
echo -E X"${sel}"X
if [ -e "$sel" ];
then echo exists;
else echo not exists;
fi
sleep 3
sel="$(smc pick -A /storage/emulated/legacy/ )"
OIFS=$IFS
IFS=""
for f in $sel ;
do
echo -E f=X"$f"X
realname="$(echo -e "$f")"
echo -E selected X"$realname"X
echo
if [ -e "$realname" ];
then
echo exists;
else
echo not exists;
fi;
done
IFS=$OIFS | true |
6918e1cafcc9a24641fd7524ace6e4b3359e29ae | Shell | akhial/dotfiles | /bin/inkexport | UTF-8 | 470 | 3.65625 | 4 | [
"MIT"
] | permissive | #!/bin/zsh
while getopts ":f:" opt; do
case $opt in
f)
shift 2
NAME=`echo $OPTARG | sed 's/.svg//g'`
for x in $@ ; do inkscape --export-filename=exports/$NAME@${x}.png -d ${x} $OPTARG ; done
;;
\?)
echo "Invalid option: -$OPTARG" >&2
exit 1
;;
:)
echo "Option -$OPTARG requires an argument." >&2
exit 1
;;
esac
done
| true |
73d4d25867a1909491f5724aeeec09bbd1f44012 | Shell | vipoo/raspberry-lights | /scripts/set-read-only.sh | UTF-8 | 4,656 | 3.265625 | 3 | [] | no_license | #!/bin/bash
# CREDIT TO THESE TUTORIALS:
# petr.io/en/blog/2015/11/09/read-only-raspberry-pi-with-jessie
# hallard.me/raspberry-pi-read-only
# k3a.me/how-to-make-raspberrypi-truly-read-only-reliable-and-trouble-free
# START INSTALL ------------------------------------------------------------
# All selections have been validated at this point...
# Given a filename, a regex pattern to match and a replacement string:
# Replace string if found, else no change.
# (# $1 = filename, $2 = pattern to match, $3 = replacement)
replace() {
grep $2 $1 >/dev/null
if [ $? -eq 0 ]; then
# Pattern found; replace in file
sed -i "s/$2/$3/g" $1 >/dev/null
fi
}
# Given a filename, a regex pattern to match and a replacement string:
# If found, perform replacement, else append file w/replacement on new line.
replaceAppend() {
grep $2 $1 >/dev/null
if [ $? -eq 0 ]; then
# Pattern found; replace in file
sed -i "s/$2/$3/g" $1 >/dev/null
else
# Not found; append on new line (silently)
echo $3 | sudo tee -a $1 >/dev/null
fi
}
# Given a filename, a regex pattern to match and a string:
# If found, no change, else append file with string on new line.
append1() {
grep $2 $1 >/dev/null
if [ $? -ne 0 ]; then
# Not found; append on new line (silently)
echo $3 | sudo tee -a $1 >/dev/null
fi
}
# Given a filename, a regex pattern to match and a string:
# If found, no change, else append space + string to last line --
# this is used for the single-line /boot/cmdline.txt file.
append2() {
grep $2 $1 >/dev/null
if [ $? -ne 0 ]; then
# Not found; insert in file before EOF
sed -i "s/\'/ $3/g" $1 >/dev/null
fi
}
echo
echo "Starting installation..."
echo "Updating package index files..."
echo "Removing unwanted packages..."
apt-get remove -y --force-yes --purge triggerhappy logrotate dphys-swapfile fake-hwclock
apt-get -y --force-yes autoremove --purge
# Replace log management with busybox (use logread if needed)
echo "Installing ntp and busybox-syslogd..."
apt-get -y --force-yes install ntp busybox-syslogd
dpkg --purge rsyslog
echo "Configuring system..."
# Add fastboot, noswap and/or ro to end of /boot/cmdline.txt
append2 /boot/cmdline.txt fastboot fastboot
append2 /boot/cmdline.txt noswap noswap
append2 /boot/cmdline.txt ro^o^t ro
# Move /var/spool to /tmp
rm -rf /var/spool
ln -s /tmp /var/spool
# Make SSH work
replaceAppend /etc/ssh/sshd_config "^.*UsePrivilegeSeparation.*$" "UsePrivilegeSeparation no"
# bbro method (not working in Jessie?):
#rmdir /var/run/sshd
#ln -s /tmp /var/run/sshd
# Change spool permissions in var.conf (rondie/Margaret fix)
replace /usr/lib/tmpfiles.d/var.conf "spool\s*0755" "spool 1777"
# Move dhcpd.resolv.conf to tmpfs
touch /tmp/dhcpcd.resolv.conf
rm /etc/resolv.conf
ln -s /tmp/dhcpcd.resolv.conf /etc/resolv.conf
# Make edits to fstab
# make / ro
# tmpfs /var/log tmpfs nodev,nosuid 0 0
# tmpfs /var/tmp tmpfs nodev,nosuid 0 0
# tmpfs /tmp tmpfs nodev,nosuid 0 0
replace /etc/fstab "vfat\s*defaults\s" "vfat defaults,ro "
replace /etc/fstab "ext4\s*defaults,noatime\s" "ext4 defaults,noatime,ro "
append1 /etc/fstab "/var/log" "tmpfs /var/log tmpfs nodev,nosuid 0 0"
append1 /etc/fstab "/var/tmp" "tmpfs /var/tmp tmpfs nodev,nosuid 0 0"
append1 /etc/fstab "\s/tmp" "tmpfs /tmp tmpfs nodev,nosuid 0 0"
cat <<EOT >> /etc/bash.bashrc
# set variable identifying the filesystem you work in (used in the prompt below)
fs_mode=\$(mount | sed -n -e "s/^.* on \/ .*(\(r[w|o]\).*/\1/p")
# alias ro/rw
alias roroot='mount -o remount,ro / ; fs_mode=\$(mount | sed -n -e "s/^.* on \/ .*(\(r[w|o]\).*/\1/p")'
alias rwroot='mount -o remount,rw / ; fs_mode=\$(mount | sed -n -e "s/^.* on \/ .*(\(r[w|o]\).*/\1/p")'
# setup fancy prompt
export PS1='\[\033[01;32m\]\u@\h\${fs_mode:+(\$fs_mode)}\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\\\$ '
# aliases for mounting boot volume
alias roboot='mount -o remount,ro /boot'
alias rwboot='mount -o remount,rw /boot'
EOT
cat <<EOT >> /home/pi/.bashrc
# set variable identifying the filesystem you work in (used in the prompt below)
fs_mode=\$(mount | sed -n -e "s/^.* on \/ .*(\(r[w|o]\).*/\1/p")
# alias ro/rw
alias roroot='sudo mount -o remount,ro / ; fs_mode=\$(mount | sed -n -e "s/^.* on \/ .*(\(r[w|o]\).*/\1/p")'
alias rwroot='sudo mount -o remount,rw / ; fs_mode=\$(mount | sed -n -e "s/^.* on \/ .*(\(r[w|o]\).*/\1/p")'
# setup fancy prompt
export PS1='\[\033[01;32m\]\u@\h\${fs_mode:+(\$fs_mode)}\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\\\$ '
# aliases for mounting boot volume
alias roboot='sudo mount -o remount,ro /boot'
alias rwboot='sudo mount -o remount,rw /boot'
EOT | true |
b5b476bf326c4cd82956256d5867694c1f68ceae | Shell | bhfs9999/appointment-code | /word2vec/scripts/demo-classes.sh | UTF-8 | 779 | 3.234375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
DATA_DIR=../data
BIN_DIR=../bin
SRC_DIR=../src
TEXT_DATA=$DATA_DIR/text8
CLASSES_DATA=$DATA_DIR/classes.txt
pushd ${SRC_DIR} && make; popd
if [ ! -e $CLASSES_DATA ]; then
if [ ! -e $TEXT_DATA ]; then
wget http://mattmahoney.net/dc/text8.zip -O $DATA_DIR/text8.gz
gzip -d $DATA_DIR/text8.gz -f
fi
echo -----------------------------------------------------------------------------------------------------
echo -- Training vectors...
time $BIN_DIR/word2vec -train $TEXT_DATA -output $CLASSES_DATA -cbow 0 -size 200 -window 5 -negative 0 -hs 1 -sample 1e-3 -threads 12 -classes 500
fi
sort $CLASSES_DATA -k 2 -n > $DATA_DIR/classes.sorted.txt
echo The word classes were saved to file $DATA_DIR/classes.sorted.txt
| true |
a032f4bc961235b7c5a94e5630822c908c956281 | Shell | recovery12/courses | /itw/shells/scripts/pidr.sh | UTF-8 | 283 | 3.890625 | 4 | [] | no_license | #!/bin/bash
#This script is used tell whether a given pid is root's or not
if [ $# -ne 1 ]
then
echo "Syntax: pidr.sh <pid>"
exit 1
fi
if [ $(ps -U root | awk '$1=="'$1'"{print $1}') ]
then
echo "The process is root's process"
else
echo "The process is not root's process"
fi
| true |
83efad3566067ff1359e1e6ee8fef519dca4c80e | Shell | njw1204/golf-online-judge | /judge-docker/python3/exec.sh | UTF-8 | 944 | 3.84375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# GolfJudge Python3 Solution Tester
# exit code 0 : Accepted
con="goj-python3-container" # docker container name
SCRIPTPATH=$( cd "$(dirname "$0")" ; pwd )
ret=0 # exit code
# docker variables
time_limit=$4 # seconds
# problem variables
source_file=$1
input_testcase=$2
output_testcase=$3
echo "" > ${SCRIPTPATH}/output.out
if [ "$(sudo docker ps -a -f name=${con} --format {{.Names}})" != "${con}" ]; then
sudo bash ${SCRIPTPATH}/docker-run.sh
fi
sudo docker cp ${source_file} ${con}:/judge/main.py > /dev/null
sudo docker cp ${input_testcase} ${con}:/judge/input.in > /dev/null
sudo docker start ${con} > /dev/null
sudo docker stop -t ${time_limit} ${con} > /dev/null
if [ "$(diff --ignore-trailing-space --ignore-space-change --ignore-blank-lines --text -q ${SCRIPTPATH}/output.out ${output_testcase} 2>&1)" = "" ]; then
echo "pass"
ret=0
else
echo "fail"
ret=1
fi
sudo rm ${SCRIPTPATH}/output.out
exit $ret
| true |
efc08cd20d9dc29096278baf69c91b3cd6c78c33 | Shell | hm1365166/opencsw | /csw/mgar/pkg/ghc/trunk/files/CSWghc.postinstall | UTF-8 | 356 | 2.703125 | 3 | [] | no_license | #!/bin/sh
set -e
# Implement the procedure described here:
# http://hackage.haskell.org/trac/ghc/wiki/Building/Solaris
/opt/csw/bin/ghc-pkg describe rts > /tmp/rts.pkg.in
cat /tmp/rts.pkg.in | sed \
-e "s, -R/opt/csw/lib ,,g" \
-e "s,^\(ld-options:\) \(.*\),\1 -R/opt/csw/lib \2," > /tmp/rts.pkg.out
/opt/csw/bin/ghc-pkg update /tmp/rts.pkg.out
| true |
689a37390ccb3f667e31900a35a31783fbc509ea | Shell | davidyuqiwei/davidyu_stock | /scripts/download/DFCF/zhulikongpan/z1.sh | UTF-8 | 645 | 2.765625 | 3 | [
"MIT"
] | permissive |
download_date=`date +%Y-%m-%d`
source ~/.bashrc
cd `dirname $0`
#file_in=$stock_data/basic_info/stock_basic_info.csv
#`awk -F "," '{print $1}' $file_in > stock_list.txt`
#./run_get_stock_list.sh
while read -r line
do
if [[ $line != "code" ]];then
stock_index=$line
echo $stock_index
file1=$stock_index"_zhulikongpan_"$download_date".txt"
url1="http://dcfm.eastmoney.com/em_mutisvcexpandinterface/api/js/get?type=QGQP_LB&CMD=$stock_index&token=70f12f2f4f091e459a279469fe49eca5&callback=jQuery1123048223496594506776_1609067355341&_=1609067355343"
wget $url1 -O $file1
sleep 2s
fi
done <$stock_list_data_test
| true |
94baa7289cb184c4c1028910a58f1b5cd5731206 | Shell | CarbonFace/carbon-docker | /services/carbon-cloud-docker-base/carbon-cloud-docker-entrypoint.sh | UTF-8 | 2,515 | 4.0625 | 4 | [] | no_license | #!/bin/sh
set -eo pipefail
# logging functions
log() {
type="$1"; shift
# accept argument string or stdin
text="$*"; if [ "$#" -eq 0 ]; then text="$(cat)"; fi
dt="$(date -R)"
printf '%s [%s] [Entrypoint]: %s\n' "$dt" "$type" "$text"
}
note() {
log Note "$@"
}
warn() {
log Warn "$@" >&2
}
error() {
log ERROR "$@" >&2
exit 1
}
check_work_dir_valid(){
dir_name="$1"
if [ ! -d "/$dir_name" ]
then
error "docker running failed! service work directory $dir_name does not exits!"
fi
}
init_service_user(){
user_name="$1"
work_dir_name="$2"
group_name="$user_name"
if ! id -u "$group_name" >/dev/null 2>&1; then
addgroup -S "$group_name"
fi
if ! id -u "$user_name" >/dev/null 2>&1; then
adduser -D -G "$user_name" "$user_name"
note "system user initialized."
note "user:group $user_name:$group_name"
chown -R "$user_name":"$user_name" /"$work_dir_name"
chmod 1777 /"$work_dir_name"
fi
}
init_work_dir(){
echo
note "work directory check started!"
work_dir_name="$1"
check_work_dir_valid "$work_dir_name"
note "system user create initializing started!"
service_user_name="$work_dir_name"
note "user and group name will be the same with work directory name: $work_dir_name as default!"
init_service_user "$service_user_name" "$work_dir_name"
note "work directory check done ! "
note "work directory : /$work_dir_name"
}
_main() {
is_base=true
service_jar="$SERVICE_JAR"
service_name="$SERVICE_NAME"
work_dir_name=$service_name
debug="$DEBUG"
java_opts="-agentlib:jdwp=transport=dt_socket,address=5005,server=y,suspend=n"
echo
note "carbon-cloud service [$service_name] initializing started!"
echo "$@"
note "debug opt:" $debug
note "==========================================================="
security_egd="-Djava.security.egd=file:/dev/./urandom";
if [ "$service_name" = "carbon-cloud-app" ]
then
is_base=true
else
is_base=false
fi
if [ "$is_base" = true ]
then
exec echo "hello carbon cloud!"
else
init_work_dir "$work_dir_name"
echo
note "welcome to the Carbon Cloud! have fun~ ^_^) "
echo java "$java_opts" $security_egd -jar "$service_jar"
if [ "$debug" = "true" ]
then
exec java $java_opts $security_egd -jar /$work_dir_name/$service_jar "$@"
else
note "exec " java $security_egd -jar /$work_dir_name/$service_jar "$@"
exec java $security_egd -jar /$work_dir_name/$service_jar "$@"
fi
fi
}
printf "Carbon Cloud"
_main "$@"
| true |
54c81021f73fe93c44a8f1e532f7bd6d310a45c1 | Shell | coolcoding/shell | /security/ddos.sh | UTF-8 | 1,287 | 3.671875 | 4 | [] | no_license | #!/bin/sh
TMP_PREFIX="/tmp/ddos"
BAN_IP_LIST_TMP=`mktemp $TMP_PREFIX.XXXXX`
BANNED_IP_LIST="/tmp/banned.ip.list"
IGNORE_IP_LIST="/tmp/ignore.ip.list"
MAX_CONNECTIONS=2
IPT="/sbin/iptables"
if [ ! -f $BANNED_IP_LIST ];then
touch $BANNED_IP_LIST
fi
if [ ! -f $IGNORE_IP_LIST ];then
touch $IGNORE_IP_LIST
fi
function iptablesBan(){
# netstat -ntu | awk '/:80/{split($5,ip,":");++A[ip[1]]}END{for(i in A) print A[i],i}' | sort -rn | head -n10 | tee ${BAN_IP_LIST_TMP}
netstat -ntu | awk '/:80/{split($5,ip,":");++A[ip[1]]}END{for(i in A) print A[i],i}' | sort -rn | head -n10 > ${BAN_IP_LIST_TMP}
while read line; do
NUM=$(echo ${line} | cut -d" " -f1)
IP=$(echo ${line} | cut -d" " -f2)
if [ ${NUM} -lt ${MAX_CONNECTIONS} ]; then
continue
fi
IGNORE_BAN=`grep -c ${IP} ${IGNORE_IP_LIST}`
if [ ${IGNORE_BAN} -ge 1 ]; then
continue
fi
BANNED_BAN=`grep -c ${IP} ${BANNED_IP_LIST}`
if [ ${BANNED_BAN} -ge 1 ]; then
continue
fi
echo ${IP} >> ${BANNED_IP_LIST}
${IPT} -I INPUT -s ${IP} -j DROP
/sbin/service iptables save
done < ${BAN_IP_LIST_TMP}
}
while :
do
iptablesBan
/bin/sleep 10
rm -f $BAN_IP_LIST_TMP
done
| true |
1b4936c30b4ca3ff78e4a69b5c5abd24f49ec6ac | Shell | laurelnaiad/k8s-on-eoan | /k8s-config/docker-registry.sh | UTF-8 | 2,138 | 3.140625 | 3 | [
"WTFPL"
] | permissive | ########################################################################
# docker-registry.sh
# if we want to do authenticated/authoerized access:
# https://github.com/cesanta/docker_auth/blob/master/examples/reference.yml
########################################################################
source "${0%/*}/../lib/all.sh"
MYDIR=$WORK_DIR/docker-registry
mkdir -p $MYDIR
rm $MYDIR/docker-registry/values.yaml
helm pull stable/docker-registry -d $MYDIR --untar
until [[ $(stat $MYDIR/docker-registry/values.yaml) ]]
do
sleep 1
done
sleep 1
cat $MYDIR/docker-registry/values.yaml \
| yq -y ' .persistence.enabled = true
| .persistence.size = "20Gi"
| .persistence.storageClass = "fixed-size"
' \
| tee $MYDIR/docker-registry/values.yaml
helm template $MYDIR/docker-registry \
| sed -e 's/RELEASE-NAME-//g' \
| awk '{if( \
! /^[\t ]*chart:/ && \
! /^[\t ]*heritage:/ && \
! /^[\t ]*release:/ \
) {print $0}}' \
| tee $MYDIR/resources.yaml
cat <<EOF > $MYDIR/ingress.yaml
kind: Ingress
apiVersion: networking.k8s.io/v1beta1
metadata:
name: docker-registry
namespace: docker-registry
labels:
app: docker-registry
annotations:
kubernetes.io/ingress.class: nginx-intranet
nginx.ingress.kubernetes.io/rewrite-target: /
nginx.ingress.kubernetes.io/proxy-body-size: "0"
cert-manager.io/cluster-issuer: cert-issuer-prod
spec:
rules:
- host: docker-registry.intranet.$PRI_DOMAIN
http:
paths:
- backend:
serviceName: docker-registry
servicePort: 5000
tls:
- hosts:
- docker-registry.intranet.$PRI_DOMAIN
secretName: docker-registry.intranet.$PRI_DOMAIN-tls
EOF
cat <<EOF > $MYDIR/kustomization.yaml
resources:
- resources.yaml
- ingress.yaml
namespace: docker-registry
EOF
kustomize build $MYDIR > $MYDIR/package.yaml
# create a partition/persistent volume for the storage...
add_volume fixed-size 25Gi
kubectl create namespace docker-registry
kubectl apply -f $MYDIR/package.yaml
sudo sed -i "s/'docker.io'/'docker-registry.intranet.$PRI_DOMAIN', 'docker.io'/" /etc/containers/registries.conf
| true |
9b7a5e30635824b9500bab7b6006af477eccdf57 | Shell | cwi-swat/meta-environment | /sisyphus/demo/sisyphus-demo-client.sh.src | UTF-8 | 516 | 2.5625 | 3 | [] | no_license | #! /bin/sh
PREFIX=__PREFIX__
DATADIR=__DATA_DIR__
BINDIR=__BIN_DIR__
SICLI=$BINDIR/si-cli
CONFIG_SVN=file://$DATADIR/demo/config-repo/demo
DB_CONFIG=$DATADIR/demo/dbconfig.yml
exec $SICLI \
--root relational-aterms \
--profile demo \
--interval 3600 \
--trackingback \
--weekday Mon \
--weekday Tue \
--weekday Wed \
--weekday Thu \
--weekday Fri \
--after 00:01 \
--before 23:59 \
--config ${CONFIG_SVN} \
--dbconf ${DB_CONFIG} \
--no-quiet \
--verbose
| true |
a061d3fbbb87e2f30a9274ac756e42f0d2619eb1 | Shell | canastic/informixcdc | /githooks/pre-commit | UTF-8 | 639 | 3.171875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
before=$(git diff --name-only)
for file in $(git diff --cached --name-only | grep -e '\.kt$'); do
ktlint --experimental -F "$file" &
done
wait
./idl/records_request.schema.json.py | ./idl/jsonschema2klaxon.py informixcdc RecordsRequest > src/main/kotlin/informixcdc/RecordsRequest.kt
./idl/records_message.schema.json.py | ./idl/jsonschema2klaxon.py informixcdc RecordsMessage > src/main/kotlin/informixcdc/RecordsMessage.kt
after=$(git diff --name-only)
changed=0
for file in $(diff <(echo "$before") <(echo "$after") | grep -e '\.kt$' | cut -b3-); do
echo "Reformatted: $file"
changed=1
done
exit "$changed"
| true |
bd0136a7c9786f7fbbdba3e7428735ac65538900 | Shell | neggles/minipitft | /dkms.post_install | UTF-8 | 495 | 3.46875 | 3 | [] | no_license | #!/usr/bin/env bash
ret=-1
install_dt_overlay()
{
local PWD=$(eval pwd)
local overlay=$1
local base_dir="${PWD%/*}/$kernelver/$arch/module"
local overlays_dir="/boot/overlays"
echo "${overlay}:"
echo " - Installation"
echo " - Installing to ${overlays_dir}/"
cp "${base_dir}/${overlay}" "${overlays_dir}/"
ret=$?
}
install_dt_overlay "minipitft114.dtbo"
if ($ret != 0); then exit $ret; fi
echo ''
install_dt_overlay "minipitft13.dtbo"
echo ''
exit $ret
| true |
fc19812350049883e0ab9844be62205f973f159a | Shell | ucb-cyarp/platformScripts | /offline-cpus.sh | UTF-8 | 569 | 3.78125 | 4 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
echo "Bringing Isolated CPUs Offline"
#Get the number of virtual CPUs
cpus=$(lscpu | grep "^CPU(s)" | sed -r -e 's/CPU[(]s[)][:]\s*([0-9]+)/\1/')
if [[ -z cpus ]]; then
echo "Unable to identify the number of CPU cores!"
cpus=64
echo "Defaulting to ${cpus}"
fi
lastCore=$(( ${cpus}-1 ))
#Assumeing OS cores grouped at lower CPU numbers
#Do not offline/online OS CPUs
echo "Offline Cores 1-${lastCore}:"
for i in $(seq 1 ${lastCore}); do
echo "echo \"0\" > /sys/devices/system/cpu/cpu$i/online"
echo "0" > /sys/devices/system/cpu/cpu$i/online
done;
| true |
e2cff5e4fb58a52f96b54fa335e9bd0b32b00cd5 | Shell | huangyingw/loadrc | /vishrc/cat_play.sh | UTF-8 | 180 | 2.734375 | 3 | [] | no_license | #!/bin/zsh
while true
do
while read ss
do
ss=$(echo "$ss" | sed 's/"//g')
echo "now playing $ss"
~/loadrc/vishrc/vlc.sh "$ss"
done < "$1"
done
| true |
8b1dafc94cd83a4b19092d525b9e3fa6ef32bfea | Shell | andreaguarise/one-initScripts | /c7-kaui.sh | UTF-8 | 1,162 | 2.609375 | 3 | [] | no_license | #!/bin/sh
# c7-kaui.sh
#
#
# Created by Andrea Guarise on 9/28/15.
#
LOGFILE=/tmp/killbill.log
yum -y install git
yum -y install ruby ruby-devel
yum -y install gcc
yum -y install zlib zlib-devel
yum -y install patch
yum -y install mysql-devel
cd /opt
git clone https://github.com/killbill/killbill-admin-ui.git
cd killbill-admin-ui
gem install bundler
bundle install
cd test/dummy
export RAILS_ENV=development
bundle install
sed -e "s@password: root@password: ${KILLBILLPWD}@g;" ./config/database.yml > ./config/database.yml.new
mv -f ./config/database.yml.new ./config/database.yml
mysql -uroot -p$MYSQLROOT -e "create database kaui character set utf8" >> $LOGFILE 2>&1
mysql -uroot -p$MYSQLROOT -e "grant all privileges on kaui.* to killbill@'localhost' identified by \"$KILLBILLPWD\"" >> $LOGFILE 2>&1
mysql -uroot -p$MYSQLROOT -e "FLUSH PRIVILEGES" >> $LOGFILE 2>&1
rake kaui:install:migrations
rake db:migrate
firewall-cmd --zone=public --add-port=3000/tcp --permanent
firewall-cmd --reload
cat >> config/initializers/killbill_client.rb << EOL
KillBillClient.api_key = 'bob'
KillBillClient.api_secret = 'lazar'
EOL
rails server -b $ETH0_IP >> $LOGFILE 2>&1 | true |
9b95fd669a2be67cec6fc2737ba52218720113c7 | Shell | chiragagrawal/asm-deployer-jenkins | /scripts/puppet_certname.sh | UTF-8 | 359 | 2.734375 | 3 | [] | no_license | #!/bin/bash
VERSION=`puppet --version`
if [[ "$VERSION" =~ Enterprise ]]
then
/opt/puppet/bin/gem install hashie
/opt/puppet/bin/gem install inifile -v 2.0.2
/opt/puppet/bin/ruby puppet_certname.rb
/opt/puppet/bin/puppet agent -t
else
gem install hashie
gem install inifile -v 2.0.2
ruby /usr/local/bin/puppet_certname.rb
puppet agent -t
fi
| true |
3814e37d58b220ac96631ff101155ea026bff940 | Shell | jinmukeji/jiujiantang-services | /api-l-v2/go-lint.sh | UTF-8 | 229 | 2.609375 | 3 | [] | no_license | #!/usr/bin/env bash
set -e
CUR=`dirname $0`
# Run gometalinter with configuration file.
# Refer to https://github.com/alecthomas/gometalinter#configuration-file
gometalinter \
--config ${CUR}/.gometalinter.json \
${CUR}/...
| true |
ef620d95559089bc47dd9d194292a727ba8db254 | Shell | jewettaij/visfd_tutorials | /prokaryotes/complex_example/Bdellovibrio_bacteriovorus/STEP_1_segment_fiducial_beads.sh | UTF-8 | 6,290 | 3.484375 | 3 | [
"CC0-1.0"
] | permissive | #!/usr/bin/env bash
# WARNING: In these instructions, it is assumed that the voxel width
# is 18.08 Angstroms. If not, replace this number everywhere.
#
# Terminology: I use the phrase "3-D image" and "tomogram" interchangeably.
# ---- Goal: Remove excessively bright or dark objects from the image ----
#
# If the tomogram contains extremely dark objects, such as gold fiducial
# beads or ice contamination, then the first step is to remove this debris
# from the tomogram. To do that, create a mask file (eg. "mask_blobs.rec").
# This file has brightness 0 in regions from the image containing these
# objects that we want to exclude from consideration later on.
#
# IF YOUR TOMOGRAM DOES NOT CONTAIN GOLD FIDUCIAL BEADS, THEN YOU CAN
# SKIP ALL OF THE INSTRUCTIONS CONTAINED IN THIS FILE AND DO THIS INSTEAD:
#
#
# filter_mrc -in orig_crop.rec -out fiducial_blobs.rec -fill 0
#
#
# This will create an (empty) "fiducial_blobs.rec" we will use in later steps.
# In that case, after doing this you can skip this file and follow the
# instructions in the file named "STEP_2..."
#
#
# ------------------------------------------
#
# PREREQUISITES
#
# A 3-D image file (tomogram) containing the cell you want to segment.
# It is a -VERY- good idea to crop this tomogram beforehand so that it
# only includes the cell you want to segment. (You can do this using
# "3dmod", "trimvol", or the "crop_mrc" program distributed with visfd.)
# Otherwise the software will be extremely slow and your computer is also
# likely to run out of memory (freezing up and becomming unresponsive).
#
# The tomogram in this example has been cropped and is named "orig_crop.rec".
# ------- Detect gold fiducial beads (markers) --------
# Let's detect all dark blobs ("minima") between 120 and 170 Angstroms in width.
# This corresponds (approximately) to the size of gold-bead fiducial markers
# located in this image. These objects are so dark that they confuse the
# code that detects other objects we do care about like membranes and ribosomes.
# We detect them now so that we can exclude them from consideration later.
filter_mrc -in orig_crop.rec \
-w 18.08 \
-blob minima fiducial_blob_candidates.txt 120.0 170.0 1.01
# (Note: The "-w 18.08" argument specifies the voxel width in Angstroms.)
#
# (Note: The "1.01" parameter should probably be left alone.
# Using larger values, like 1.02, will make detection faster, but doing that
# will likely cause some of the blobs we do care about to be missed,
# even if they are dark and clearly visible. If that happens, feel free to
# reduce this parameter even further. The parameter must be > 1.0.)
# Now discard the faint, noisy, or overlapping blobs. The remaining list of
# blobs (that we want to ignore later) will be saved in "fiducial_blobs.txt".
filter_mrc -in orig_crop.rec \
-w 18.08 \
-discard-blobs fiducial_blob_candidates.txt fiducial_blobs.txt \
-radial-separation 0.9 \
-minima-threshold -300 # <-- blobs with low "score" are omitted
# The critical parameter here is the "-minima-threshold". We should choose
# this threshold so that we detect the fiducial markers that we want to ignore
# later, without also including other features we do care about.
#
# To obtain this parameter, open the "fiducial_blob_candidates.txt" file that
# we created in the previous step with a text editor. This is a huge file
# containing one line per detected blob. The "score" of each blob is in the
# final column on the far right. This file is sorted according to blob score.
# (The scores are negative because the blobs are darker than their surroundings)
# The most significant blobs (with large scores) occur at the beginning of the
# file. Scroll downward through the file. After about a hundred lines or
# so (the number of gold beads in your image), you will notice a sudden
# drop-off in the score numbers in the 5th column. Below that point,
# all of the remaining blobs (which make up the majority of the file),
# have low scores and probably do not correspond to gold beads.
# The score where this drop occurs makes a reasonable first guess to use as a
# parameter for the "-minima-threshold" argument.
#
# Now create an image with the location of each blob "marked" with a
# hollow spherical shell:
filter_mrc -in orig_crop.rec \
-w 18.08 \
-out fiducials_blobs_annotated.rec \
-draw-hollow-spheres fiducial_blobs.txt \
-background-auto -background-scale 0.2 \
-spheres-scale 2.2 # make the spheres 120% larger so we can
# see them more easily
# Verify that the threshold was chosen correctly by viewing the file using
#
# 3dmod -S fiducial_blobs_anotated.rec
#
# If we used a reasonable guess for the "-minima-threshold", then thin hollow
# shells should surround all of the fiducial markers.
# If not, then we have to go back and adjust this "-minima-threshold" parameter.
# (Useful trick: Clicking on one of the voxels in the thin shells and
# pressing the "F" key will print out the "score" for that blob.)
#
# It's okay if we also detect other dark objects in the image which are not
# fiducial markers (such as ice contamination or carbon films), as long as they
# are outside the cell and you don't mind excluding them from consideration
# later. If everything looks good, then create another 3-D image
# ("fiducial_blobs.rec") file that displays the blobs as white on a black
# background. (We will eventually use that version of the file to help
# us build an image "mask" later.)
filter_mrc -in orig_crop.rec \
-w 18.08 \
-out fiducial_blobs.rec \
-draw-spheres fiducial_blobs.txt \
-foreground 1 \
-background 0 \
-spheres-scale 3.5 # make the spheres 3.5 times as large as the
# gold beads. I want the spheres to completely
# cover up these beads (as well as the bright
# halos that tend to surround them). I want
# to remove all traces of these beads later
# when we try to detect features in the cell.
| true |
621eb2b3cb4bece1bdbc7f9742de93bbed1e8396 | Shell | notnowdog/vpn | /2.sh | UTF-8 | 1,031 | 3.09375 | 3 | [] | no_license | #!/bin/bash
###################
rm -R /root/doublevpn &> /dev/null
rm -R /root/1.sh &> /dev/null
###################
# Install ansible #
if ! grep -q "ansible/ansible" /etc/apt/sources.list /etc/apt/sources.list.d/*; then
echo "Adding Ansible PPA"
apt-add-repository ppa:ansible/ansible -y
fi
if ! hash ansible >/dev/null 2>&1; then
echo "Installing Ansible..."
apt-get update
apt-get install software-properties-common ansible git python-apt -y
else
echo "Ansible already installed"
fi
git clone https://github.com/it-toppp/doublevpn.git && cd /root/doublevpn/
ansible-playbook gen_conf.yml
echo "Please wait..."
ansible-playbook main.yml
CNF=$(cat /root/doublevpn/wg-client.conf);
MYIP=$(curl -4 https://icanhazip.com/);
echo "#####################################################################################################################"
#cd /root
#wget https://git.io/vpn -O openvpn-install.sh && bash openvpn-install.sh
#rm -R /root/doublevpn &> /dev/null
#rm -R /root/1.sh &> /dev/null
| true |
7fd7bb83b18034e02a19a0f92dbf169de80854dd | Shell | michaelenglert/docker.appd_agents | /docker/agents.sh | UTF-8 | 6,877 | 3.4375 | 3 | [] | no_license | #!/bin/bash
APPD_JAVA_AGENT_TMP="$APPD_HOME/java-agenttemp/"
APPD_MACHINE="$APPD_HOME/machine-agent"
APPD_ANALYTICS="$APPD_MACHINE/monitors/analytics-agent"
APPD_MEMORY="256m"
# Configure App Agent
if [ -d "$APPD_JAVA_AGENT_TMP" ]; then
find $APPD_JAVA_AGENT_TMP \
-iname controller-info.xml \
-exec /bin/sh -c "sed -i -e \"/-host>/c\<controller-host>$APPDYNAMICS_CONTROLLER_HOST_NAME<\/controller-host>\" {}" \;
find $APPD_JAVA_AGENT_TMP \
-iname controller-info.xml \
-exec /bin/sh -c "sed -i -e \"/-port>/c\<controller-port>$APPDYNAMICS_CONTROLLER_PORT<\/controller-port>\" {}" \;
find $APPD_JAVA_AGENT_TMP \
-iname controller-info.xml \
-exec /bin/sh -c "sed -i -e \"/-ssl-enabled>/c\<controller-ssl-enabled>$APPDYNAMICS_CONTROLLER_SSL_ENABLED<\/controller-ssl-enabled>\" {}" \;
find $APPD_JAVA_AGENT_TMP \
-iname controller-info.xml \
-exec /bin/sh -c "sed -i -e \"/<account-name>/c\<account-name>$APPDYNAMICS_AGENT_ACCOUNT_NAME<\/account-name>\" {}" \;
find $APPD_JAVA_AGENT_TMP \
-iname controller-info.xml \
-exec /bin/sh -c "sed -i -e \"/-key>/c\<account-access-key>$APPDYNAMICS_AGENT_ACCOUNT_ACCESS_KEY<\/account-access-key>\" {}" \;
cp -r $APPD_JAVA_AGENT_TMP/* $APPD_HOME/java-agent
rm -rf $APPD_JAVA_AGENT_TMP
echo "$(date -u +%d\ %b\ %Y\ %H:%M:%S) INFO [appd.sh] \
App Agent controller-info.xml configured."
else
echo "$(date -u +%d\ %b\ %Y\ %H:%M:%S) INFO [appd.sh] \
APPD_JAVA_AGENT_TMP directory ($APPD_JAVA_AGENT_TMP) does not exist --> App Agent controller-info.xml is already configured."
fi
# Configure Docker Visibility
find $APPD_MACHINE \
-iname DockerMonitoring.yml \
-exec /bin/sh -c "sed -i -e \"s/\.\*\[ \]-Dappdynamics//\" {}" \;
echo "$(date -u +%d\ %b\ %Y\ %H:%M:%S) INFO [appd.sh] \
Docker Visibility Process Selector generified."
# If the corresponding Environment Variables are set the Analytics Plugin will be enabled by default
if [ -n "${APPDYNAMICS_CONTROLLER_SSL_ENABLED:+1}" ]
then
if [ "$APPDYNAMICS_CONTROLLER_SSL_ENABLED" = "false" ]
then
APPDYNAMICS_CONTROLLER_PROTOCOL="http"
elif [ "$APPDYNAMICS_CONTROLLER_SSL_ENABLED" = "true" ]
then
APPDYNAMICS_CONTROLLER_PROTOCOL="https"
fi
else
echo "$(date -u +%d\ %b\ %Y\ %H:%M:%S) INFO [appd.sh] \
APPDYNAMICS_CONTROLLER_SSL_ENABLED not set. It will default to false."
APPDYNAMICS_CONTROLLER_PROTOCOL="http"
fi
if [ -n "${APPDYNAMICS_AGENT_GLOBAL_ACCOUNT_NAME:+1}" ] && [ -n "${APPDYNAMICS_ANALYTICS_EVENT_ENDPOINT:+1}" ]
then
if [ -e $APPD_ANALYTICS/conf/analytics-agent.properties.backup ]
then
cp $APPD_ANALYTICS/conf/analytics-agent.properties.backup $APPD_ANALYTICS/conf/analytics-agent.properties
else
cp $APPD_ANALYTICS/conf/analytics-agent.properties $APPD_ANALYTICS/conf/analytics-agent.properties.backup
fi
APPD_CONTROLLER_URL="$APPDYNAMICS_CONTROLLER_PROTOCOL:\/\/$APPDYNAMICS_CONTROLLER_HOST_NAME:$APPDYNAMICS_CONTROLLER_PORT"
sed -i -e "s/false/true/" \
$APPD_ANALYTICS/monitor.xml
sed -i -e "/controller.url/c\ad\.controller\.url=$APPD_CONTROLLER_URL" \
$APPD_ANALYTICS/conf/analytics-agent.properties
sed -i -e "/http.event.name/c\http.event.name=$APPDYNAMICS_AGENT_ACCOUNT_NAME" \
$APPD_ANALYTICS/conf/analytics-agent.properties
sed -i -e "/http.event.accountName/c\http.event.accountName=$APPDYNAMICS_AGENT_GLOBAL_ACCOUNT_NAME" \
$APPD_ANALYTICS/conf/analytics-agent.properties
sed -i -e "/http.event.accessKey/c\http.event.accessKey=$APPDYNAMICS_AGENT_ACCOUNT_ACCESS_KEY" \
$APPD_ANALYTICS/conf/analytics-agent.properties
sed -i -e "/http.event.endpoint/c\http.event.endpoint=$APPDYNAMICS_ANALYTICS_EVENT_ENDPOINT" \
$APPD_ANALYTICS/conf/analytics-agent.properties
echo "$(date -u +%d\ %b\ %Y\ %H:%M:%S) INFO [appd.sh] \
Analytics enabled."
else
echo "$(date -u +%d\ %b\ %Y\ %H:%M:%S) INFO [appd.sh] \
Analytics not enabled cause either APPDYNAMICS_AGENT_GLOBAL_ACCOUNT_NAME or APPDYNAMICS_ANALYTICS_EVENT_ENDPOINT is missing."
fi
APPD_JAVA_AGENT_VERSIONS="$(find $APPD_HOME/java-agent/ -name ver4*)"
if [ "$APPDYNAMICS_STDOUT_LOGGING" = "true" ]
then
if [ ! -e $APPD_MACHINE/conf/logging/log4j.xml.backup ]
then
cp $APPD_MACHINE/conf/logging/log4j.xml \
$APPD_MACHINE/conf/logging/log4j.xml.backup
fi
sed -i -e 's/ref="\w*"/ref="ConsoleAppender"/g' \
$APPD_MACHINE/conf/logging/log4j.xml
sed -i -e 's/ABSOLUTE/DATE/' \
$APPD_MACHINE/conf/logging/log4j.xml
while read -r APPD_JAVA_AGENT_VERSION; do
if [ -e $APPD_JAVA_AGENT_VERSION/conf/logging/log4j.xml ]
then
if [ ! -e $APPD_JAVA_AGENT_VERSION/conf/logging/log4j.xml.backup ]
then
cp $APPD_JAVA_AGENT_VERSION/conf/logging/log4j.xml \
$APPD_JAVA_AGENT_VERSION/conf/logging/log4j.xml.backup
fi
sed -i -e 's/ref="\w*"/ref="ConsoleAppender"/g' \
$APPD_JAVA_AGENT_VERSION/conf/logging/log4j.xml
sed -i -e 's/ABSOLUTE/DATE/' \
$APPD_JAVA_AGENT_VERSION/conf/logging/log4j.xml
elif [ -e $APPD_JAVA_AGENT_VERSION/conf/logging/log4j2.xml ]
then
if [ ! -e $APPD_JAVA_AGENT_VERSION/conf/logging/log4j2.xml.backup ]
then
cp $APPD_JAVA_AGENT_VERSION/conf/logging/log4j2.xml \
$APPD_JAVA_AGENT_VERSION/conf/logging/log4j2.xml.backup
fi
sed -i -e 's/ref="\w*"/ref="ConsoleAppender"/g' \
$APPD_JAVA_AGENT_VERSION/conf/logging/log4j2.xml
sed -i -e 's/ABSOLUTE/DATE/' \
$APPD_JAVA_AGENT_VERSION/conf/logging/log4j2.xml
fi
done <<< "$APPD_JAVA_AGENT_VERSIONS"
echo "$(date -u +%d\ %b\ %Y\ %H:%M:%S) INFO [appd.sh] Logging set to Standard Out."
elif [ "$APPDYNAMICS_STDOUT_LOGGING" = "false" ]
then
if [ -e $APPD_MACHINE/conf/logging/log4j.xml.backup ]
then
cp $APPD_MACHINE/conf/logging/log4j.xml.backup \
$APPD_MACHINE/conf/logging/log4j.xml
fi
while read -r APPD_JAVA_AGENT_VERSION; do
if [ -e $APPD_JAVA_AGENT_VERSION/conf/logging/log4j.xml.backup ]
then
cp $APPD_JAVA_AGENT_VERSION/conf/logging/log4j.xml.backup \
$APPD_JAVA_AGENT_VERSION/conf/logging/log4j.xml
fi
done <<< "$APPD_JAVA_AGENT_VERSIONS"
echo "$(date -u +%d\ %b\ %Y\ %H:%M:%S) INFO [appd.sh] Logging set to File."
fi
# Cleanup old .id files
find $APPD_HOME -iname *.id -exec /bin/sh -c "rm -rf {}" \;
if [ -n "${APPDYNAMICS_MEMORY:+1}" ]
then
APPD_MEMORY=${APPDYNAMICS_MEMORY}
fi
$APPD_MACHINE/bin/machine-agent -Xmx$APPD_MEMORY -Xms$APPD_MEMORY start
exit 0 | true |
3143de4d54e9d1dc1b19f9791276b2613f3cc6b6 | Shell | rtreffer/home-network-builder | /build-container/secret-env | UTF-8 | 412 | 3.59375 | 4 | [] | no_license | #!/bin/bash
set -eu -o pipefail
file="${1}"
shift
target="${1}"
shift
[[ -d "${HOME}/.gnupg" ]] || mkdir "${HOME}/.gnupg"
if [[ -f "${file}" ]] && [[ -d "$(dirname "${target}")" ]]; then
gpg2 --no-keyring --no-auto-key-locate --no-auto-key-retrieve --no-default-keyring --yes --output "${target}" --decrypt "${file}"
fi
# source and export all secret variables
set -a
source "${target}"
set +a
exec "$@"
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.