blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
c508ce4de61427b72fe86b818f5e61bdaedb9706
|
Shell
|
Dallinger/Dallinger
|
/scripts/update_experiments_constraints.sh
|
UTF-8
| 1,249
| 3.78125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
dir=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd)
cd $dir/..
# Command to calculate md5sum of a file. Since BSD (Mac OS) has an `md5` binary and Linux has `md5sum`
# we salomonically decide to ditch both and implement them as a python one liner.
function md5_cmd () {
python3 -c "import hashlib as h;from sys import argv; print(h.md5(open( argv[1], 'rb').read()).hexdigest())" $1
}
# Update demos constraints
export CUSTOM_COMPILE_COMMAND=$'./scripts/update_experiments_constraints.sh\n#\n# from the root of the dallinger repository'
for demo_name in $(ls demos/dlgr/demos/); do
if [ -f "demos/dlgr/demos/${demo_name}/config.txt" ]; then
cd "demos/dlgr/demos/${demo_name}/"
echo Compiling ${demo_name}
echo "-c ../../../../dev-requirements.txt
-r requirements.txt" > temp-requirements.txt
pip-compile temp-requirements.txt -o constraints.txt
rm temp-requirements.txt
echo '# generate from file with hash ' $(md5_cmd requirements.txt) >> constraints.txt
# Remove the extras from constraints.txt
sed -e 's/\[.*==/==/' -i constraints.txt
cd -
fi
done
sed -i.orig -e "s/dallinger==.*/dallinger==$(dallinger --version)/" demos/dlgr/demos/*/constraints.txt
| true
|
0c0c3932c2705e9c2ff0c0e5f1e1a31e2773111c
|
Shell
|
buwilliams/dotfiles_old
|
/vim/install.sh
|
UTF-8
| 211
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/sh
VUNDLE_REPO="https://github.com/gmarik/Vundle.vim.git"
VUNDLE_DIR="$HOME/.vim/bundle/Vundle.vim"
if [ ! -d "$VUNDLE_DIR" ]; then
git clone "$VUNDLE_REPO" "$VUNDLE_DIR"
fi
vim +PluginInstall +qall
| true
|
ff792b3f0c6c60ec7f46caff8f324584f73e14db
|
Shell
|
marasteiger/functional_genomics
|
/mapping/RNA_mapping.sh
|
UTF-8
| 664
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/bash
cd /project/functional-genomics/2019/data/sra/48hMEF/RNASeq/
for file in *.fastq
do
first=$(echo "$file")
first=${first:0:10}
cd /project/functional-genomics/2019/group1/mapping/48hMEF/RNASeq/
STAR --runThreadN 10 \
--runMode alignReads \
--genomeDir /project/functional-genomics/2019/data/genome/STARindex/ \
--readFilesIn /project/functional-genomics/2019/data/sra/48hMEF/RNASeq/"$file" \
--sjdbGTFfile /project/functional-genomics/2019/data/annotation/Mus_musculus.NCBIM37.67.gtf \
--outSAMtype BAM SortedByCoordinate \
--outWigType wiggle \
--outWigNorm RPM \
--outFileNamePrefix ./"$first" \
--quantMode GeneCounts
done
| true
|
c57116e1e843db3ea90370270162a0e9526ac557
|
Shell
|
schwarz84/dotfiles
|
/bin/scripts/dmenu-explore
|
UTF-8
| 1,471
| 3.828125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Copyright [2019] andreasl
# Copyright [2020] SqrtMinusOne
define_standard_settings() {
selected_path="$HOME"
history_file="${HOME}/.config/.edm_history"
max_history_entries=3
choices=(
'<open terminal here>'
'.'
'..'
"$(ls "$selected_path")"
"$(cat "$history_file")"
)
open_command='xdg-open'
open_terminal_command='setsid st'
}
define_standard_settings
write_selection_to_history_file() {
sed -i "\:${selected_path}:d" "$history_file"
printf '%s\n' "$selected_path" >> "$history_file"
printf '%s\n' "$(tail -n "$max_history_entries" "$history_file")" > "$history_file"
}
while : ; do
dmenu_result="$(printf '%s\n' "${choices[@]}" | dmenu -p "$selected_path" -l 50)" || exit 1
if [ "$dmenu_result" = '<open terminal here>' ]; then
cd $selected_path && $open_terminal_command
write_selection_to_history_file
exit 0
fi
if [[ "$dmenu_result" == "/"* ]]; then
selected_path="${dmenu_result}"
else
selected_path="$(realpath "${selected_path}/${dmenu_result}")"
fi
if [ -f "$selected_path" ] || [ "$dmenu_result" = '.' ]; then
$open_command "$selected_path"
write_selection_to_history_file
exit 0
elif [ -d "$selected_path" ]; then
choices=( '<open terminal here>' '.' '..' "$(ls "$selected_path")")
else
selected_path="$(dirname "$selected_path")"
fi
done
| true
|
949fadade9d94d1c3026f5c1490b1b5bec3ef3ac
|
Shell
|
arrjay/packer
|
/kam/files/save.sh
|
UTF-8
| 340
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/sh
OIFS=IFS
IFS=,
mydev=''
for disk in $(sysctl -n hw.disknames) ; do
topdev=${disk%%:*}
for dev in /dev/$topdev? ; do
label=$(e2label $dev 2> /dev/null)
if [[ $label == "black" ]] ; then
mydev=$dev
break
fi
done
if [ ! -z "${mydev}" ] ; then
break
fi
done
IFS=$OIFS
set -e
mount $dev /mnt
| true
|
7cc016bbeab2b1bf7ff917b61b2936216a1be6c6
|
Shell
|
rahulredde9/disk-encryption
|
/encrypt_disks.sh
|
UTF-8
| 3,752
| 4.15625
| 4
|
[] |
no_license
|
#!/bin/bash
#####################
# Author: RahulReddy
# Description:
# This script encrypts all attached drives except the first drive. It was tested in AWS but for other environments
# you may need to modify num_extra_drives and dev_label variables to make sure it gets the right variables
# NB: This script will erase all contents of your drive! To prevent this touch ${encrypted_disks_log}/<drive name>
#####################
set -e
specific_disk=$1
set -u
num_extra_drives=$((` lsblk | awk ' $0 ~ "^[a-z]" { print $1 } ' | cut -c -4 | sort | uniq | wc -l `-1))
dev_label=`lsblk | awk ' $0 ~ "^[a-z]" { print $1 } ' | cut -c -3 | sort | uniq`
encrypted_disks_log=/root/encrypted_disk
letters=( $(echo {a..z}) )
encrypt_key=/root/disk_encrypt_key
yum -y install pv cryptsetup
mount_point_prefix=/data/vol
# Generate encryption key
if [ ! -e ${encrypt_key} ]; then
if [ ! -e "${encrypt_key}.bak" ]; then
echo "-> Creating Encryption Key"
openssl rand -base64 32 > ${encrypt_key}
cp ${encrypt_key} ${encrypt_key}.bak
else
echo "-> Backup Encryption key exists! Please copy back to ${encrypt_key}"
fi
fi
# Check for formated disk directory
if [ ! -d $encrypted_disks_log ]; then
echo "-> Creating Encrypted Disk log directory"
mkdir -p $encrypted_disks_log
fi
function encrypt_disk(){
local disk=$1
local count=$2
local device=/dev/${disk}
echo "*-> Working on ${device}"
sleep 3
#if [ ! -e ${device} ]; then
#echo " !- Device - ${device} does not exist"
#return 0
#fi
# Check if drive has been formatted
if [ -e "${encrypted_disks_log}/${disk}" ]; then
echo " -> Drive has been encrypted. Skipping! To force format remove ${encrypted_disks_log}/${disk}"
else
if mount | grep "${mount_point_prefix}${count} " > /dev/null; then
echo " -> Unmounting ${mount_point_prefix}${count}"
umount ${mount_point_prefix}${count}
fi
echo " -> Removing ${device} from fstab"
sed_var="${mount_point_prefix}${count} "
sed -i -e "\|$sed_var|d" /etc/fstab
echo " -> Closing Encryted the drive"
set +e
cryptsetup luksClose vol${count}
set -e
echo " -> Performing Cryptsetup format"
cryptsetup --verbose -yrq --key-file=${encrypt_key} luksFormat $device
echo " -> Mapping from dev to volume"
cryptsetup --key-file=${encrypt_key} luksOpen $device vol${count}
echo " -> Creating Filesystem - ext4"
mkfs.ext4 /dev/mapper/vol${count}
echo " -> Adding new entry to fstab"
# Make sure you use the UUID of the mapped drive
mapped_uuid=`blkid /dev/mapper/vol${count} | awk ' { line=$2; gsub(/"/,"",line); print line } '`
# Sample fstab entry
# UUID=e7faba57-b88f-46f8-a4b8-9fd4fa0a2e4b /data/vol4 ext4 defaults 0 0
echo "${mapped_uuid} ${mount_point_prefix}${count} ext4 defaults 0 0" >> /etc/fstab
mkdir -p ${mount_point_prefix}${count}
echo " -> Removing old crypttab entry"
sed_var="vol${count} "
sed -i -e "\|${sed_var}|d" /etc/crypttab
echo " -> Adding entry to crypttab - ${device}"
device_uuid=`blkid ${device} | awk ' { line=$2; sub(/UUID=/,"",line); gsub(/"/,"",line); print line } '`
echo "vol${count} /dev/disk/by-uuid/${device_uuid} ${encrypt_key} luks" >> /etc/crypttab
touch ${encrypted_disks_log}/${disk}
fi
}
date
if [ -z $specific_disk ]; then
d_count=1
( pids=""
for disk in `lsblk | awk ' $0 ~ "^[a-z]" { print $1 } ' | cut -c -4 | sort | uniq | awk ' NR >1' `; do
encrypt_disk $disk $d_count &
pids="$pids $!"
d_count=$((d_count+1))
done
echo "-> Waiting for Children to finish"
wait $pids )
else
encrypt_disk $specific_disk 1
fi
echo "-> All Children are back - Mounting FSTAB"
mount -a
| true
|
19649653fa4cb6cc5d0a4e23466731093724e37e
|
Shell
|
t-lin/ece361-vm
|
/vnc/ece361-vnc
|
UTF-8
| 749
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
source /usr/local/ece361-wrapper-prints
if [[ $# != 1 ]]; then
bold_red -n "ERROR: "
echo "Expecting at least one parameter: 'start' or 'stop'"
exit 1
fi
if [[ $1 == "start" ]]; then
if [[ -n `ps aux | grep Xvnc4 | grep 5901` ]]; then
bold_red -n "ERROR: "
echo "A VNC server session already exists (port 5901). Either connect to the current session or stop it, then re-start it"
else
vncserver -geometry 1280x720 -depth 24 :1
green "VNC server is now running and listening on port 5901"
fi
elif [[ $1 == "stop" ]]; then
vncserver -kill :1
green "VNC server has been killed"
else
bold_red -n "ERROR: "
echo "Expecting at least one parameter: 'start' or 'stop'"
fi
| true
|
8ebbb3dec7730bdf45fbe942d3094b7531a62956
|
Shell
|
xuewenhaoby/fnl
|
/test/change_link.sh
|
UTF-8
| 211
| 2.796875
| 3
|
[] |
no_license
|
if [ "help" = $1 ];then
echo "change_link.sh {HOSTNAME} {SBRNAME}"
else
HOST=$1
PORT=${HOST}p1
SBR=$2
ovs-vsctl del-port $PORT
ovs-vsctl add-port $SBR $PORT
ip netns exec $HOST ./../bin/send register
fi
| true
|
17d0078ba61b37f76f9268df9d58f3c6996bf897
|
Shell
|
greyson/aur-cctool
|
/PKGBUILD
|
UTF-8
| 544
| 2.75
| 3
|
[] |
no_license
|
pkgname=cc-tool
pkgver=0.24
pkgrel=1
pkgdesc="Support for Texas Instruments CC Debugger"
arch=('x86_64')
url='http://sourceforge.net/projects/cctool/'
license=('GPL')
depends=('boost>=1.34.0')
source=( "http://downloads.sourceforge.net/project/cctool/$pkgname-$pkgver-src.tgz" )
md5sums=('4b95889a60aa8162d1f41ed19bf14f7b')
build() {
cd "$srcdir/cc-tool"
./configure --prefix=/usr
make
}
check() {
cd "$srcdir/cc-tool"
make -k check
}
package() {
cd "$srcdir/cc-tool"
make DESTDIR="$pkgdir/" install
}
# vim:set ts=2 sw=2 et:
| true
|
46c966b75af300795c158aee3330133a50af3571
|
Shell
|
unt-libraries/catalog-api
|
/clear_index.sh
|
UTF-8
| 466
| 3.015625
| 3
|
[
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
if [ $1 ]
then
curl http://localhost:8983/solr/$1/update --data '<delete><query>*:*</query></delete>' -H 'Content-type:text/xml; charset=utf-8'
wait ${!}
curl http://localhost:8983/solr/$1/update --data '<commit />' -H 'Content-type:text/xml; charset=utf-8'
wait ${!}
echo "Solr index for core $1 cleared."
else
echo "No Solr core specified."
echo "Example usage: clear_index.sh core1"
fi
| true
|
bffffb99d3034ce43ac5f73b84f5b675ee1bd51f
|
Shell
|
AlibiMourad/easycwmp
|
/fichier model
|
UTF-8
| 1,665
| 3.25
| 3
|
[] |
no_license
|
#!/bin/sh
#############################
# Entry point functuons #
#############################
prefix_list="$prefix_list $DMROOT.PID."
entry_execute_method_list="$entry_execute_method_list entry_execute_method_root_PID"
entry_execute_method_list_forcedinform="$entry_execute_method_list_forcedinform entry_execute_method_root_PID"
entry_execute_method_root_PID() {
case "$1" in ""|"$DMROOT."|"$DMROOT.PID."|"$DMROOT.PID.Process."*)
common_execute_method_obj "$DMROOT.PID." "0"
common_execute_method_obj "$DMROOT.PID.Process." "0"
common_execute_method_obj "$DMROOT.PID.Process." "0" "" "" "PID_device_browse_instances $1"
return 0
;;
esac
return $E_INVALID_PARAMETER_NAME;
}
sub_entry_PID_Process(){
local j="$2"
local n="$3"
local inface=`cat /etc/config/network | grep config | head -$j | tail -1 | cut -d " " -f 3 | tr -d "'"`
common_execute_method_obj "$DMROOT.PID.Process.$j." "0"
common_execute_method_param "$DMROOT.PID.Process.$j.PID " "0" "Process_PID $j" "1" "xsd:unsignedInt"
}
#######################################
# Data model browse instances #
#######################################
PID_device_browse_instances(){
local n=`PID_Process_get_max_Process`
local j=1
while test $j != $n
do
sub_entry_PID_Process "$1" "$j" "$n"
j=$(($j+1))
done
}
#######################################
# Data model parameters functions #
#######################################
PID_Process_get_max_Process(){
local iface="$1"
local max=`top -n 1| grep -i root |wc -l`
echo $max
}
Process_PID(){
local pid=`top -n 1| grep -v " 0% 0% " | grep -i root |head -1 | tail -1 | cut -d " " -f 2`
echo $pid
}
| true
|
9a5bb1692333c0711be7fc19571f40dd229a7c40
|
Shell
|
Jauresk/kafkaEisDemo
|
/seeds/kafka-sample-generator.sh
|
UTF-8
| 1,179
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/sh
IFS=', ' read -r -a TOPICS_ARRAY <<< "$TOPIC_LIST"
IFS=', ' read -r -a PARTITIONS_ARRAY <<< "$PARTITIONS"
IFS=', ' read -r -a FACTORS_ARRAY <<< "$REPLICATION_FACTORS"
if [ "${#TOPICS_ARRAY[@]}" -ne "${#PARTITIONS_ARRAY[@]}" ] || [ "${#TOPICS_ARRAY[@]}" -ne "${#FACTORS_ARRAY[@]}" ] || [ "${#PARTITIONS_ARRAY[@]}" -ne "${#FACTORS_ARRAY[@]}" ] ; then
RED='\033[0;31m'
NC='\033[0m'
printf "${RED}"
printf "Exception: The number of topics, partitions and replication factors doesn't match.\n"
printf ">>> TOPICS: ................ ${#TOPICS_ARRAY[@]}\n"
printf ">>> PARTITIONS: ............ ${#PARTITIONS_ARRAY[@]}\n"
printf ">>> REPLICATION FACTORS: ... ${#FACTORS_ARRAY[@]}\n"
printf "${NC}"
exit
fi
echo 'Waiting for Kafka to be ready...'
cub kafka-ready -b $BROKER_HOST:$BROKER_PORT 1 20 && \
sleep 1
for ((i=0;i<${#TOPICS_ARRAY[@]};++i)); do
echo "Creating Topic [$BROKER_HOST:$BROKER_PORT <topic:'${TOPICS_ARRAY[i]}'>]"
kafka-topics --create --topic ${TOPICS_ARRAY[i]} --if-not-exists \
--zookeeper $ZOOKEEPER_HOST:$ZOOKEEPER_PORT --partitions ${PARTITIONS_ARRAY[i]} --replication-factor ${FACTORS_ARRAY[i]}
done
| true
|
2c55a475e04a86009a1fe2ca8c520d5e51d7606b
|
Shell
|
rathrio/concurrency-multi-core-programming-and-data-processing
|
/examples/lists/run-graph.sh
|
UTF-8
| 547
| 3.125
| 3
|
[] |
no_license
|
#!/bin/sh
initial=2048
update=20
mkdir -p data
for class in CoarseList FineList OptimisticList LazyList LockFreeList
do
echo "0 0" > data/${class}.dat
done
for class in CoarseList FineList OptimisticList LazyList LockFreeList
do
for threads in 1 2 4 8 16 24 32 48
do
echo "########################################################################"
java Driver ${class} ${threads} ${update} ${initial} | tee /tmp/list.log
n=`tail -1 /tmp/list.log | awk '{print $NF}'`
echo "${threads} ${n}" >> data/${class}.dat
rm -f /tmp/list.log
done
done
| true
|
19fe6e534e261bf62cdc5d2a43a7c38b09952a4c
|
Shell
|
amwill04/Packer-LAMP-Python-PhantomJS-WebScrapping-Build
|
/scripts/zerodisk.sh
|
UTF-8
| 2,056
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/bash
# Credits to:
# - http://vstone.eu/reducing-vagrant-box-size/
# - https://github.com/mitchellh/vagrant/issues/343
aptitude -y purge ri
aptitude -y purge installation-report landscape-common wireless-tools wpasupplicant ubuntu-serverguide
aptitude -y purge python-dbus libnl1 python-smartpm python-twisted-core libiw30
aptitude -y purge python-twisted-bin libdbus-glib-1-2 python-pexpect python-pycurl python-serial python-gobject python-pam python-openssl libffi5
aptitude -y purge linux-image-3.0.0-12-generic-pae linux-image-extra-3.13.0-62-generic linux-headers-3.13.0-62-generic libruby1.9.1 libx11-doc
aptitude -y purge language-pack-gnome-en-base discover-data language-pack-en-base libfreetype6-dev libx11-dev x11proto-core-dev
aptitude -y purge g++ libnl-genl-3-200 libalgorithm-merge-perl libalgorithm-diff-xs-perl
aptitude -y purge libfile-fcntllock-perl crda hiera biosdevname discover libalgorithm-diff-perl
aptitude -y purge libnl-3-200 libopts25 libdiscover2 libreadline5 libtinfo-dev zlib1g-dev
aptitude -y purge libreadline-gplv2-dev dictionaries-common wamerican wbritish libssl-doc ntp
aptitude -y purge libdpkg-perl dpkg-dev libssl-dev libstdc++-4.8-dev g++-4.8 linux-firmware
# Remove APT cache
apt-get clean -y
apt-get autoclean -y
apt-get autoremove -y
# Zero free space to aid VM compression
dd if=/dev/zero of=/EMPTY bs=1M
rm -f /EMPTY
# Remove bash history
unset HISTFILE
rm -f /root/.bash_history
rm -f /home/vagrant/.bash_history
# Cleanup log files
find /var/log -type f | while read f; do echo -ne '' > $f; done;
# Whiteout root
count=`df --sync -kP / | tail -n1 | awk -F ' ' '{print $4}'`;
let count--
dd if=/dev/zero of=/tmp/whitespace bs=1024 count=$count;
rm /tmp/whitespace;
# Whiteout /boot
count=`df --sync -kP /boot | tail -n1 | awk -F ' ' '{print $4}'`;
let count--
dd if=/dev/zero of=/boot/whitespace bs=1024 count=$count;
rm /boot/whitespace;
swappart=`cat /proc/swaps | tail -n1 | awk -F ' ' '{print $1}'`
swapoff $swappart;
dd if=/dev/zero of=$swappart;
mkswap $swappart;
swapon $swappart;
| true
|
798ff395f1e9b38bafdc1f13f8c4fa9657dca3f7
|
Shell
|
elasti-guyr/gcp-automation
|
/create_vheads.sh
|
UTF-8
| 13,624
| 3.46875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#create vheads.sh, Andrew Renz, Sept 2017, June 2018
#Script to configure Elastifile EManage (EMS) Server, and deploy cluster of ECFS virtual controllers (vheads) in Google Compute Platform (GCE)
#Requires terraform to determine EMS address and name (Set EMS_ADDRESS and EMS_NAME to use standalone)
set -ux
#impliment command-line options
#imported from EMS /elastifile/emanage/deployment/cloud/add_hosts_google.sh
# function code from https://gist.github.com/cjus/1047794 by itstayyab
function jsonValue() {
KEY=$1
awk -F"[,:}]" '{for(i=1;i<=NF;i++){if($i~/'$KEY'\042/){print $(i+1)}}}' | tr -d '"'| tr '\n' ','
}
usage() {
cat << E_O_F
Usage:
-c configuration type: "small" "medium" "medium-plus" "large" "standard" "small standard" "local" "small local" "custom"
-l load balancer: "none" "dns" "elastifile" "google"
-t disk type: "persistent" "hdd" "local"
-n number of vhead instances (cluster size): eg 3
-d disk config: eg 8_375
-v vm config: eg 4_42
-p IP address
-r cluster name
-s deployment type: "single" "dual" "multizone"
-a availability zones
-e company name
-f contact person
-g contact person email
-i clear tier
-k async dr
-j lb vip
-b data container
-z image
E_O_F
exit 1
}
#variables
#LOGIN=admin
SESSION_FILE=session.txt
PASSWORD=`cat password.txt | cut -d " " -f 1`
SETUP_COMPLETE="false"
DISKTYPE=local
NUM_OF_VMS=3
NUM_OF_DISKS=1
WEB=https
LOG="create_vheads.log"
#LOG=/dev/null
#DISK_SIZE=
while getopts "h?:c:l:t:n:d:v:p:s:a:e:f:g:i:k:j:b:r:z:" opt; do
case "$opt" in
h|\?)
usage
exit 0
;;
c) CONFIGTYPE=${OPTARG}
[ "${CONFIGTYPE}" = "small" -o "${CONFIGTYPE}" = "medium" -o "${CONFIGTYPE}" = "medium-plus" -o "${CONFIGTYPE}" = "large" -o "${CONFIGTYPE}" = "standard" -o "${CONFIGTYPE}" = "small standard" -o "${CONFIGTYPE}" = "local" -o "${CONFIGTYPE}" = "small local" -o "${CONFIGTYPE}" = "custom" ] || usage
;;
l) LB=${OPTARG}
;;
t) DISKTYPE=${OPTARG}
[ "${DISKTYPE}" = "persistent" -o "${DISKTYPE}" = "hdd" -o "${DISKTYPE}" = "local" ] || usage
;;
n) NUM_OF_VMS=${OPTARG}
[ ${NUM_OF_VMS} -eq ${NUM_OF_VMS} ] || usage
;;
d) DISK_CONFIG=${OPTARG}
;;
v) VM_CONFIG=${OPTARG}
;;
p) EMS_ADDRESS=${OPTARG}
;;
s) DEPLOYMENT_TYPE=${OPTARG}
;;
a) AVAILABILITY_ZONES=${OPTARG}
;;
e) COMPANY_NAME=${OPTARG}
;;
f) CONTACT_PERSON_NAME=${OPTARG}
;;
g) EMAIL_ADDRESS=${OPTARG}
;;
i) ILM=${OPTARG}
;;
k) ASYNC_DR=${OPTARG}
;;
j) LB_VIP=${OPTARG}
;;
b) DATA_CONTAINER=${OPTARG}
;;
r) EMS_NAME=${OPTARG}
;;
z) IMAGE=${OPTARG}
;;
esac
done
#capture computed variables
EMS_HOSTNAME="${EMS_NAME}.local"
# load balancer mode
if [[ $LB == "elastifile" ]]; then
USE_LB="true"
elif [[ $LB == "dns" ]]; then
USE_LB="false"
else
USE_LB="false"
fi
#deployment mode
if [[ $DEPLOYMENT_TYPE == "single" ]]; then
REPLICATION="1"
elif [[ $DEPLOYMENT_TYPE == "dual" ]]; then
REPLICATION="2"
else
REPLICATION="2"
fi
echo "EMS_ADDRESS: $EMS_ADDRESS" | tee ${LOG}
echo "EMS_NAME: $EMS_NAME" | tee -a ${LOG}
echo "EMS_HOSTNAME: $EMS_HOSTNAME" | tee -a ${LOG}
echo "DISKTYPE: $DISKTYPE" | tee -a ${LOG}
echo "NUM_OF_VMS: $NUM_OF_VMS" | tee -a ${LOG}
echo "NUM_OF_DISKS: $NUM_OF_DISKS" | tee -a ${LOG}
echo "LB: $LB" | tee -a ${LOG}
echo "USE_LB: $USE_LB" | tee -a ${LOG}
echo "DEPLOYMENT_TYPE: $DEPLOYMENT_TYPE" | tee -a ${LOG}
echo "REPLICATION: $REPLICATION" | tee -a ${LOG}
echo "COMPANY_NAME: $COMPANY_NAME" | tee -a ${LOG}
echo "CONTACT_PERSON_NAME: $CONTACT_PERSON_NAME" | tee -a ${LOG}
echo "EMAIL_ADDRESS: $EMAIL_ADDRESS" | tee -a ${LOG}
echo "ILM: $ILM" | tee -a ${LOG}
echo "ASYNC_DR: $ASYNC_DR" | tee -a ${LOG}
echo "LB_VIP: $LB_VIP" | tee -a ${LOG}
echo "DATA_CONTAINER: $DATA_CONTAINER" | tee -a ${LOG}
echo "IMAGE: $IMAGE" | tee -a ${LOG}
#set -x
#establish https session
function establish_session {
echo -e "Establishing https session..\n" | tee -a ${LOG}
curl -k -D ${SESSION_FILE} -H "Content-Type: application/json" -X POST -d '{"user": {"login":"admin","password":"'$1'"}}' https://$EMS_ADDRESS/api/sessions >> ${LOG} 2>&1
}
function first_run {
#loop function to wait for EMS to complete loading after instance creation
while true; do
curl -k -s -D ${SESSION_FILE} -m 5 -H "Content-Type: application/json" -X POST -d '{"user": {"login":"admin","password":"changeme"}}' https://$EMS_ADDRESS/api/sessions
emsresponse=`curl -k -s -b ${SESSION_FILE} -m 5 -H "Content-Type: application/json" -X GET https://$EMS_ADDRESS/api/cloud_providers/is_ready | grep true`
echo -e "Waiting for EMS init...\n" | tee -a ${LOG}
if [[ -n "$emsresponse" ]]; then
echo -e "EMS now ready!\n" | tee -a ${LOG}
break
fi
sleep 10
done
}
# Configure ECFS storage type
# "small" "medium" "large" "standard" "small standard" "local" "small local" "custom"
function set_storage_type {
echo -e "Configure systems...\n" | tee -a ${LOG}
type_id="$(curl -k -b ${SESSION_FILE} -H "Content-Type: application/json" -X GET https://$EMS_ADDRESS/api/cloud_configurations|grep -o -E '.{0,4}"name":"'$1'"'| cut -d ":" -f2| cut -d "," -f1 2>&1)"
echo -e "Setting storage type $1..." | tee -a ${LOG}
curl -k -b ${SESSION_FILE} -H "Content-Type: application/json" -X PUT -d '{"id":1,"load_balancer_use":'$USE_LB',"cloud_configuration_id":"'$type_id'"}' https://$EMS_ADDRESS/api/cloud_providers/1 >> ${LOG} 2>&1
}
function set_storage_type_custom {
type=$1
disks=`echo $2 | cut -d "_" -f 1`
disk_size=`echo $2 | cut -d "_" -f 2`
cpu_cores=`echo $3 | cut -d "_" -f 1`
ram=`echo $3 | cut -d "_" -f 2`
echo -e "Configure systems...\n" | tee -a ${LOG}
echo -e "Setting custom storage type: $type, num of disks: $disks, disk size=$disk_size cpu cores: $cpu_cores, ram: $ram \n" | tee -a ${LOG}
curl -k -b ${SESSION_FILE} -H "Content-Type: application/json" -X POST -d '{"name":"legacy","storage_type":"'$type'","num_of_disks":'$disks',"disk_size":'$disk_size',"instance_type":"custom","cores":'$cpu_cores',"memory":'$ram',"min_num_of_instances":3}' https://$EMS_ADDRESS/api/cloud_configurations >> ${LOG} 2>&1
type_id="$(curl -k -b ${SESSION_FILE} -H "Content-Type: application/json" -X GET https://$EMS_ADDRESS/api/cloud_configurations|grep -o -E '.{0,4}"name":"legacy"'| cut -d ":" -f2| cut -d "," -f1 2>&1)"
curl -k -b ${SESSION_FILE} -H "Content-Type: application/json" -X PUT -d '{"id":1,"load_balancer_use":'$USE_LB',"cloud_configuration_id":"'$type_id'"}' https://$EMS_ADDRESS/api/cloud_providers/1 >> ${LOG} 2>&1
}
function setup_ems {
#accept EULA
echo -e "\nAccepting EULA.. \n" | tee -a ${LOG}
curl -k -b ${SESSION_FILE} -H "Content-Type: application/json" -X POST -d '{"id":1}' https://$EMS_ADDRESS/api/systems/1/accept_eula >> ${LOG} 2>&1
#configure EMS
echo -e "Configure EMS...\n" | tee -a ${LOG}
echo -e "\nGet cloud provider id 1\n" | tee -a ${LOG}
curl -k -s -b ${SESSION_FILE} --request GET --url "https://$EMS_ADDRESS/api/cloud_providers/1" >> ${LOG} 2>&1
echo -e "\nValidate project configuration\n" | tee -a ${LOG}
curl -k -s -b ${SESSION_FILE} --request GET --url "https://$EMS_ADDRESS/api/cloud_providers/1/validate" >> ${LOG} 2>&1
echo -e "Configure systems...\n" | tee -a ${LOG}
curl -k -b ${SESSION_FILE} -H "Content-Type: application/json" -X PUT -d '{"name":"'$EMS_NAME'","replication_level":'$REPLICATION',"show_wizard":false,"eula":true,"registration_info":{"company_name":"'$COMPANY_NAME'","contact_person_name":"'$CONTACT_PERSON_NAME'","email_address":"'$EMAIL_ADDRESS'","receive_marketing_updates":false}}' https://$EMS_ADDRESS/api/systems/1 >> ${LOG} 2>&1
if [[ ${NUM_OF_VMS} == 0 ]]; then
echo -e "0 VMs configured, skipping set storage type.\n"
elif [[ ${CONFIGTYPE} == "custom" ]]; then
echo -e "Set storage type custom $DISKTYPE $DISK_CONFIG $VM_CONFIG \n" | tee -a ${LOG}
set_storage_type_custom ${DISKTYPE} ${DISK_CONFIG} ${VM_CONFIG}
else
echo -e "Set storage type ${CONFIGTYPE} \n" | tee -a ${LOG}
set_storage_type ${CONFIGTYPE}
fi
if [[ ${DEPLOYMENT_TYPE} == "multizone" ]]; then
echo -e "Multi Zone.\n" | tee -a ${LOG}
echo -e "Multi Zone.\n"
if [[ $(echo ${AVAILABILITY_ZONES} | awk -v RS="," '{print $1}'| sort | uniq | wc -l) -ne 3 ]]; then
echo "Zone list should be 3"
exit 1
fi
all_zones=$(curl -k -s -b ${SESSION_FILE} --request GET --url "https://"${EMS_ADDRESS}"/api/availability_zones" | jsonValue name | sed s'/[,]$//')
echo -e "$all_zones"
curl -k -b ${SESSION_FILE} -H "Content-Type: application/json" -X PUT -d '{"availability_zone_use":true}' https://${EMS_ADDRESS}/api/cloud_providers/1 >> ${LOG} 2>&1
let i=1
for zone in ${all_zones//,/ }; do
zone_exists=`echo $AVAILABILITY_ZONES | grep $zone`
if [[ ${zone_exists} == "" ]]; then
curl -k -b ${SESSION_FILE} -H "Content-Type: application/json" -X PUT -d '{"enable":false}' https://${EMS_ADDRESS}/api/availability_zones/$i >> ${LOG} 2>&1
fi
let i++
done
else
echo -e "Single Zone.\n" | tee -a ${LOG}
curl -k -b ${SESSION_FILE} -H "Content-Type: application/json" -X PUT -d '{"availability_zone_use":false}' https://${EMS_ADDRESS}/api/cloud_providers/1 >> ${LOG} 2>&1
fi
if [[ ${LB_VIP} != "auto" ]]; then
echo -e "\n LB_VIP "${LB_VIP}" \n" | tee -a ${LOG}
echo -e "\n LB_VIP "${LB_VIP}" \n"
curl -k -b ${SESSION_FILE} -H "Content-Type: application/json" -X PUT -d '{"load_balancer_vip":"'${LB_VIP}'"}' https://$EMS_ADDRESS/api/cloud_providers/1 >> ${LOG} 2>&1
elif [[ ${USE_LB} = true && ${LB_VIP} == "auto" ]]; then
LB_VIP=$(curl -k -s -b ${SESSION_FILE} --request GET --url "https://"${EMS_ADDRESS}"/api/cloud_providers/1/lb_vip" | jsonValue vip | sed s'/[,]$//')
echo -e "\n LB_VIP "${LB_VIP}" \n" | tee -a ${LOG}
echo -e "\n LB_VIP "${LB_VIP}" \n"
curl -k -b ${SESSION_FILE} -H "Content-Type: application/json" -X PUT -d '{"load_balancer_vip":"'${LB_VIP}'"}' https://$EMS_ADDRESS/api/cloud_providers/1 >> ${LOG} 2>&1
else
echo -e "\n DNS mode \n" | tee -a ${LOG}
fi
}
# Kickoff a create vhead instances job
function create_instances {
echo -e "Creating $NUM_OF_VMS ECFS instances\n" | tee -a ${LOG}
curl -k -b ${SESSION_FILE} -H "Content-Type: application/json" -X POST -d '{"instances":'$1',"async":true,"auto_start":true}' https://$EMS_ADDRESS/api/hosts/create_instances >> ${LOG} 2>&1
}
# Function to check running job status
function job_status {
while true; do
STATUS=`curl -k -s -b ${SESSION_FILE} --request GET --url "https://$EMS_ADDRESS/api/control_tasks/recent?task_type=$1" | grep status | cut -d , -f 7 | cut -d \" -f 4`
echo -e "$1 : $STATUS " | tee -a ${LOG}
if [[ $STATUS == "success" ]]; then
echo -e "$1 Complete! \n" | tee -a ${LOG}
sleep 5
break
fi
if [[ $STATUS == "error" ]]; then
echo -e "$1 Failed. Exiting..\n" | tee -a ${LOG}
exit 1
fi
sleep 10
done
}
# Create data containers
function create_data_container {
if [[ $NUM_OF_VMS != 0 ]]; then
echo -e "Create data container & 1000GB NFS export /$DATA_CONTAINER/root\n" | tee -a ${LOG}
curl -k -b ${SESSION_FILE} -H "Content-Type: application/json" -X POST -d '{"name":"'$DATA_CONTAINER'","dedup":0,"compression":1,"soft_quota":{"bytes":1073741824000},"hard_quota":{"bytes":1073741824000},"policy_id":1,"dir_uid":0,"dir_gid":0,"dir_permissions":"755","data_type":"general_purpose","namespace_scope":"global","exports_attributes":[{"name":"root","path":"/","user_mapping":"remap_all","uid":0,"gid":0,"access_permission":"read_write","client_rules_attributes":[],"namespace_scope":"global","data_type":"general_purpose"}]}' https://$EMS_ADDRESS/api/data_containers >> ${LOG} 2>&1
fi
}
# Provision and deploy
function add_capacity {
if [[ $NUM_OF_VMS == 0 ]]; then
echo -e "0 VMs configured, skipping create instances\n"
else
create_instances $NUM_OF_VMS
job_status "create_instances_job"
if [[ $(echo $IMAGE | sed 's/^\(elastifile-storage\|emanage\)-//g') == 3-2-* ]]; then
echo "Start cluster deployment\n" | tee -a ${LOG}
job_status "activate_emanage_job"
fi
fi
}
function change_password {
if [[ "x$PASSWORD" != "x" ]]; then
echo -e "Updating password...\n" | tee -a ${LOG}
#update ems password
curl -k -b ${SESSION_FILE} -H "Content-Type: application/json" -X PUT -d '{"user":{"id":1,"login":"admin","first_name":"Super","email":"admin@example.com","current_password":"changeme","password":"'$PASSWORD'","password_confirmation":"'$PASSWORD'"}}' https://$EMS_ADDRESS/api/users/1 >> ${LOG} 2>&1
echo -e "Establish new https session using updated PASSWORD...\n" | tee -a ${LOG}
establish_session $PASSWORD
fi
}
# ilm
function enable_clear_tier {
if [[ $ILM == "true" ]]; then
echo -e "auto configuraing clear tier\n"
curl -k -b ${SESSION_FILE} -H "Content-Type: application/json" -X POST https://$EMS_ADDRESS/api/cc_services/auto_setup
fi
}
# asyncdr
function enable_async_dr {
if [[ $ASYNC_DR == "true" ]]; then
echo -e "auto configuraing async dr\n"
curl -k -b ${SESSION_FILE} -H "Content-Type: application/json" -X POST -d '{"instances":2,"auto_start":true}' https://$EMS_ADDRESS/api/hosts/create_replication_agent_instance
fi
}
# Main
first_run
setup_ems
add_capacity
create_data_container
change_password
enable_async_dr
enable_clear_tier
| true
|
c8bdaf933344a7bbf8edb175bf51941c57c89775
|
Shell
|
davidjohnplatt/sh
|
/moveCanonDropboxPics.sh
|
UTF-8
| 1,825
| 4.09375
| 4
|
[] |
no_license
|
#!/bin/bash
#
# script to zip one month work of pictures
#
#if [ $# -ne 1 ]
#then
# echo "Usage: unzipMonth.sh Month"
# exit 1
#fi
pictureDir=/home/david/Pictures
cd ~/Dropbox/Camera\ Uploads/
pwd
#
# find jpg files and replace the blank with an underscore
#
find . -name "* *.jpg" -print | while read file
do
new=`echo "$file" | sed 's/ /_/g'`;export $new
mv "$file" "$new"
echo $new
done
#
# get the year month and day from the filename and build the required directories
# from that
#
for i in `ls -1 [0-9][0-9][0-9][0-9]*.jpg`
do
year=`echo $i | cut -c 1-4`
month=`echo $i | cut -c 6-7`
day=`echo $i | cut -c 9-10`
if [ -d "$pictureDir/$year/$month/$day" ]; then
echo "~/Pictures/$year/$month/$day is a directory"
else
if [ -d "$pictureDir/$year/$month" ]; then
echo "$year/$month is a directory"
else
if [ -d "$pictureDir/$year/" ]; then
echo "$year is a directory"
else
mkdir $pictureDir/$year
echo "need to make $pictureDir/$year"
fi
mkdir $pictureDir/$year/$month
echo "need to make directory /home/david/Pictures/$year/$month"
fi
mkdir ~/Pictures/$year/$month/$day
echo "need to make directory ~/Pictures/$year/$month/$day"
fi
done
#
# copy the files to the proper directory is they do not exit there
#
for i in `ls -1 [0-9][0-9][0-9][0-9]*.jpg`
do
year=`echo $i | cut -c 1-4`
month=`echo $i | cut -c 6-7`
day=`echo $i | cut -c 9-10`
echo $year $month $day
if [ -f "$pictureDir/$year/$month/$day/$i" ]; then
echo "not copying file $pictureDir/$year/$month/$day/$i because file exists"
else
cp $i $pictureDir/$year/$month/$day
echo "copying file $pictureDir/$year/$month/$day/$i"
fi
done
| true
|
9b8455612c741c9220c3cdd62effcc84fa0bd986
|
Shell
|
justinbloomfield/dotfiles
|
/scripts/bin/timer
|
UTF-8
| 303
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/sh
#NTFY_FIFO_PATH=/tmp/ntfy.fifo
usage() {
echo "Usage: timer <hour>:<minute> [message]"
exit 1
}
{
while [ "$(date +%H:%M)" != $1 ]; do
sleep 1
done
mpv ~/etc/alarm.wav
message=${2:-"DING DONG"}
echo "$message" > "${NTFY_FIFO_PATH}"
echo "$message" > "${DVTM_FIFO}"
sleep 3
} &
| true
|
2ffe45a5050ae1e1055ab43b4d374be2f0985c44
|
Shell
|
0x0916/lfs
|
/02-build-lfs/03-glibc.sh
|
UTF-8
| 3,209
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/sh
set -e
NAME=glibc-2.28
TAR=tar.xz
SOURCE=/sources
cd $SOURCE
rm -fr $SOURCE/$NAME
tar -xf $NAME.$TAR
cd $SOURCE/$NAME
#From LFS
patch -Np1 -i ../glibc-2.28-fhs-1.patch
ln -sfv /tools/lib/gcc /usr/lib
case $(uname -m) in
i?86) GCC_INCDIR=/usr/lib/gcc/$(uname -m)-pc-linux-gnu/8.2.0/include
ln -sfv ld-linux.so.2 /lib/ld-lsb.so.3
;;
x86_64) GCC_INCDIR=/usr/lib/gcc/x86_64-pc-linux-gnu/8.2.0/include
ln -sfv ../lib/ld-linux-x86-64.so.2 /lib64
ln -sfv ../lib/ld-linux-x86-64.so.2 /lib64/ld-lsb-x86-64.so.3
;;
esac
rm -f /usr/include/limits.h
mkdir -v build
cd build
CC="gcc -isystem $GCC_INCDIR -isystem /usr/include" \
../configure --prefix=/usr \
--disable-werror \
--enable-kernel=3.2 \
--enable-stack-protector=strong \
libc_cv_slibdir=/lib
unset GCC_INCDIR
make -j100
#make -j100 check
touch /etc/ld.so.conf
sed '/test-installation/s@$(PERL)@echo not running@' -i ../Makefile
make install
cp -v ../nscd/nscd.conf /etc/nscd.conf
mkdir -pv /var/cache/nscd
mkdir -pv /usr/lib/locale
localedef -i cs_CZ -f UTF-8 cs_CZ.UTF-8
localedef -i de_DE -f ISO-8859-1 de_DE
localedef -i de_DE@euro -f ISO-8859-15 de_DE@euro
localedef -i de_DE -f UTF-8 de_DE.UTF-8
localedef -i en_GB -f UTF-8 en_GB.UTF-8
localedef -i en_HK -f ISO-8859-1 en_HK
localedef -i en_PH -f ISO-8859-1 en_PH
localedef -i en_US -f ISO-8859-1 en_US
localedef -i en_US -f UTF-8 en_US.UTF-8
localedef -i es_MX -f ISO-8859-1 es_MX
localedef -i fa_IR -f UTF-8 fa_IR
localedef -i fr_FR -f ISO-8859-1 fr_FR
localedef -i fr_FR@euro -f ISO-8859-15 fr_FR@euro
localedef -i fr_FR -f UTF-8 fr_FR.UTF-8
localedef -i it_IT -f ISO-8859-1 it_IT
localedef -i it_IT -f UTF-8 it_IT.UTF-8
localedef -i ja_JP -f EUC-JP ja_JP
localedef -i ru_RU -f KOI8-R ru_RU.KOI8-R
localedef -i ru_RU -f UTF-8 ru_RU.UTF-8
localedef -i tr_TR -f UTF-8 tr_TR.UTF-8
localedef -i zh_CN -f GB18030 zh_CN.GB18030
make localedata/install-locales
# Configuring Glibc
cat > /etc/nsswitch.conf << "EOF"
# Begin /etc/nsswitch.conf
passwd: files
group: files
shadow: files
hosts: files dns
networks: files
protocols: files
services: files
ethers: files
rpc: files
# End /etc/nsswitch.conf
EOF
tar -xf ../../tzdata2018e.tar.gz
ZONEINFO=/usr/share/zoneinfo
mkdir -pv $ZONEINFO/{posix,right}
for tz in etcetera southamerica northamerica europe africa antarctica \
asia australasia backward pacificnew systemv; do
zic -L /dev/null -d $ZONEINFO -y "sh yearistype.sh" ${tz}
zic -L /dev/null -d $ZONEINFO/posix -y "sh yearistype.sh" ${tz}
zic -L leapseconds -d $ZONEINFO/right -y "sh yearistype.sh" ${tz}
done
cp -v zone.tab zone1970.tab iso3166.tab $ZONEINFO
zic -d $ZONEINFO -p America/New_York
unset ZONEINFO
#tzselect
cp -v /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
cat > /etc/ld.so.conf << "EOF"
# Begin /etc/ld.so.conf
/usr/local/lib
/opt/lib
EOF
cat >> /etc/ld.so.conf << "EOF"
# Add an include directory
include /etc/ld.so.conf.d/*.conf
EOF
mkdir -pv /etc/ld.so.conf.d
#From LFS end
cd $SOURCE
rm -fr $SOURCE/$NAME
| true
|
0b86925cc415e106c37d2dc65aa7ae05e539c0f7
|
Shell
|
peterhel/proxmox-utils
|
/shrink-disk
|
UTF-8
| 528
| 3.546875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
echo "For further instructions: https://pve.proxmox.com/wiki/Shrink_Qcow2_Disk_Files"
echo "Prepare the linux guest"
echo "dd if=/dev/zero of=/mytempfile"
echo "# that could take a some time"
echo "rm -f /mytempfile"
echo
USAGE="usage: $0 image.qcow2 image.qcow_backup"
echo
if [ -z "$1" ]; then echo "$USAGE"; exit 1; fi
if [ -z "$2" ]; then echo "$USAGE"; exit 1; fi
cp "$1" "$2"
qemu-img convert -0 qcow2 "$2" "$1"
# with compression: qemu-img convert -O qcow2 -c image.qcow2_backup image.qcow2"
| true
|
cdb77882dcd475325cb58a8a3feb5d3ce6aa1ea7
|
Shell
|
ArchonAmon/Labosi
|
/Bash/zadatak2.sh
|
UTF-8
| 536
| 2.578125
| 3
|
[] |
no_license
|
#a)
grep -i "banana\|jabuka\|jagoda\|dinja\|lubenica" "/projekti(2.zadatak)/namirnice.txt"
#b)
grep -i -v "banana\|jabuka\|jagoda\|dinja\|lubenica" "/projekti(2.zadatak)/namirnice.txt" > "/projekti(2.zadatak)/ne-voce.txt"
#c)
grep -r -E "\<[A-Z]{3}[0-9]{6}\>" "C:\\Programiranje\\Bash_playground\\projekti(2.zadatak)" #r cita rekurzivno sve poddatotke,also direktorij je trebao biti ~/projekti, \< to mora biti pocetak rijeci, \> to mora biti kraj rijeci
#d)
find ./ -mtime +7 -mtime -14 -ls
#e)
for i in {1..15}; do echo $i; done
| true
|
2bf6c8896742ff381c061e9d1aa4b353ec1cd95c
|
Shell
|
fabianonunes/sony-t13-fnkeys
|
/package/usr/bin/brightnessup
|
UTF-8
| 542
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
b=`head -1 /sys/class/backlight/intel_backlight/brightness`
max=`head -1 /sys/class/backlight/intel_backlight/max_brightness`
step=$(expr $max / 20)
if [ $b -lt $(expr $max - $step) ]
then
nb=`expr $b + $step`
else
nb=$max
fi
echo $nb | sudo tee /sys/class/backlight/intel_backlight/brightness
percentage=`expr $nb \* 100 / $max`
xuser=$(who | grep ":0" | awk '{print $1}' | tail -n1)
su "$xuser" -c "DISPLAY=:0 notify-send ' ' -i 'notification-display-brightness-medium' -h int:value:$percentage -h string:synchronous:volume"
| true
|
17404e74a5eedaea552de1a78de1f6f80bff31b0
|
Shell
|
wing-888/MOR_X5_FROM_VM
|
/x5/helpers/.svn/text-base/elunia_stats_install.sh.svn-base
|
UTF-8
| 20,804
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
#==== Includes=====================================
. /usr/src/mor/sh_scripts/install_configs.sh
. /usr/src/mor/sh_scripts/mor_install_functions.sh
#====end of Includes===========================
. /usr/src/mor/x5/framework/bash_functions.sh
if [ ! -f "/usr/bin/rrdtool" ]; then
yum -y install rrdtool
fi
which_os # keep it here, do not remove
PSWS=`< /dev/urandom tr -dc _A-Z-a-z-0-9 | head -c10`
( cd /sys/class/net && printf "%s\n" * ) >/tmp/interfaces
if [ $OS = "CENTOS" ]; then
yum -y install rrdtool-perl rrdtool-devel rrdtool
if [ ! -f /tmp/elunastats.tar.gz ]; then
yum -y install perl-Module-Build perl-DateTime perl-CGI
cd /tmp
rm -rf stats
rm -rf elunastats
wget http://$KOLMISOFT_IP/packets/elunastats.tar.gz
tar xvfz elunastats.tar.gz
fi
if [ ! -d /var/www/html/stats ]; then
mkdir -p /var/www/html/stats
cd /tmp/stats
rpm --install /tmp/stats/rr/libart_lgpl-2.3.17-4.i386.rpm
rpm --install /tmp/stats/rr/libart_lgpl-devel-2.3.17-4.i386.rpm
rpm --install /tmp/stats/rr/rrdtool-1.2.19-1.el5.kb.i386.rpm
tar xvfz /tmp/stats/DateTime-0.4501.tar.gz
tar xvfz /tmp/stats/DateTime-Locale-0.42.tar.gz
tar xvfz /tmp/stats/DateTime-TimeZone-0.8301.tar.gz
tar xvfz /tmp/stats/HTML-Template-2.9.tar.gz
tar xvfz /tmp/stats/HTML-Template-Expr-0.07.tar.gz
tar xvfz /tmp/stats/List-MoreUtils-0.22.tar.gz
tar xvfz /tmp/stats/Params-Validate-0.91.tar.gz
tar xvfz /tmp/stats/Parse-RecDescent-1.96.0.tar.gz
tar xvfz /tmp/stats/version-0.76.tar.gz
tar xvfz /tmp/stats/elunastats.tar.gz
cp -R /tmp/stats/stats /var/www/html
cd /tmp/stats/DateTime-0.4501
perl Makefile.PL
make
make install
cd /tmp/stats/DateTime-Locale-0.42
perl Makefile.PL
make
make install
cd /tmp/stats/DateTime-TimeZone-0.8301
perl Makefile.PL
make
make install
cd /tmp/stats/HTML-Template-2.9
perl Makefile.PL
make
make install
cd /tmp/stats/HTML-Template-Expr-0.07
perl Makefile.PL
make
make install
cd /tmp/stats/List-MoreUtils-0.22
perl Makefile.PL
make
make install
cd /tmp/stats/Params-Validate-0.91
perl Makefile.PL
make
make install
cd /tmp/stats/Parse-RecDescent-1.96.0
perl Makefile.PL
make
make install
cd /tmp/stats/version-0.76
perl Makefile.PL
make
make install
rm -rf /tmp/stats
rm -rf /tmp/elunastats.tar.gz
cd /tmp
checkforcgi=`cat /etc/httpd/conf/httpd.conf | grep /var/www/html/stats/`
if [ ! "$checkforcgi" = "<Directory /var/www/html/stats/>" ]; then
echo "<Directory /var/www/html/stats/>
AddHandler cgi-script .pl
Options +ExecCGI
DirectoryIndex index.pl
</Directory>" >>/etc/httpd/conf/httpd.conf # apache will be restarted later
fi
fi #done with install
#cron check
checkforcron=`crontab -l | grep /var/www/html/stats/update.pl`
rm -rf /tmp/crontab.tmp
crontab -l >/tmp/crontab.tmp
# test()
if [ ! "$checkforcron" = "*/5 * * * * /var/www/html/stats/update.pl" ]; then
echo "*/5 * * * * /var/www/html/stats/update.pl" >>/tmp/crontab.tmp
fi
/usr/bin/crontab /tmp/crontab.tmp
#(main)
for interfacename2 in `cat /tmp/interfaces`; do
if [ ! -d /var/www/html/stats/rrd/${interfacename2}_in ]; then
mkdir -p /var/www/html/stats/rrd/${interfacename2}_in
fi
biginterfacename2=$(awk -v v="$interfacename2" 'BEGIN{print toupper(v)}') # translate var to VAR
if [ ! -f /var/www/html/stats/rrd/${interfacename2}_in/create.sh ]; then
echo "#!/bin/bash
rrdtool create ${interfacename2}_in.rrd \\
--start \`date +%s\` \\
--step 300 \\
DS:in:COUNTER:600:0:U \\
RRA:AVERAGE:0.5:1:2016 \\
RRA:AVERAGE:0.5:6:1344 \\
RRA:AVERAGE:0.5:24:732 \\
RRA:AVERAGE:0.5:144:1460" >>/var/www/html/stats/rrd/${interfacename2}_in/create.sh
chmod 755 /var/www/html/stats/rrd/${interfacename2}_in/create.sh
fi
if [ ! -f /var/www/html/stats/rrd/${interfacename2}_in/graph.pm ]; then
echo "\$GRAPH_TITLES{'${interfacename2}_in'} = \"{#server#} - $interfacename2 Inbound Traffic\";
\$GRAPH_CMDS{'${interfacename2}_in'} = <<\"${biginterfacename2}_IN_GRAPH_CMD\";
--title \"{#server#} - $biginterfacename2 Inbound Traffic\"
--vertical-label=\"Bytes\"
--lower-limit 0
DEF:in={#path#}${interfacename2}_in.rrd:in:AVERAGE
AREA:in{#color5#}:\"Inbound \"
GPRINT:in:LAST:\"Current\\\: %5.2lf %s \"
GPRINT:in:AVERAGE:\"Average\\\: %5.2lf %s \"
GPRINT:in:MAX:\"Maximum\\\: %5.2lf %s\\\n\"
LINE1:in{#linecolor#}
${biginterfacename2}_IN_GRAPH_CMD
1; # Return true" >>/var/www/html/stats/rrd/${interfacename2}_in/graph.pm
chmod 755 /var/www/html/stats/rrd/${interfacename2}_in/graph.pm
fi
if [ ! -f /var/www/html/stats/rrd/${interfacename2}_in/update.sh ]; then
echo "#!/bin/bash
rrdtool update ${interfacename2}_in.rrd \\
-t in \\
N:\`/sbin/ifconfig $interfacename2 |grep bytes|cut -d\":\" -f2|cut -d\" \" -f1\`" >>/var/www/html/stats/rrd/${interfacename2}_in/update.sh
chmod 755 /var/www/html/stats/rrd/${interfacename2}_in/update.sh
fi
done
#OUT
#-----------------------create $interface_out----------------------------------------------
for interfacename in `cat /tmp/interfaces`; do
if [ ! -d /var/www/html/stats/rrd/${interfacename}_out ]; then
mkdir -p /var/www/html/stats/rrd/${interfacename}_out
fi
#$a = expr `ls | tail -n 1 | awk '{split ($0,a,"_"); print a[1]}'` + 1
biginterfacename=$(awk -v v="$interfacename" 'BEGIN{print toupper(v)}') # translate var to VAR
#echo $biginterfacename
#${interface}_out
if [ ! -f /var/www/html/stats/rrd/${interfacename}_out/create.sh ]; then
#GEN create.sh $interface_out----------------------------------------------------------------
echo -n "#!/bin/bash
rrdtool create " >>/var/www/html/stats/rrd/${interfacename}_out/create.sh
echo -n "$interfacename" >>/var/www/html/stats/rrd/${interfacename}_out/create.sh
echo -n "_out.rrd \\
--start \`date +%s\` \\
--step 300 \\
DS:out:COUNTER:600:0:U \\
RRA:AVERAGE:0.5:1:2016 \\
RRA:AVERAGE:0.5:6:1344 \\
RRA:AVERAGE:0.5:24:732 \\
RRA:AVERAGE:0.5:144:1460" >>/var/www/html/stats/rrd/${interfacename}_out/create.sh
chmod 755 /var/www/html/stats/rrd/${interfacename}_out/create.sh
fi
#----------------------------------------------------------end of gen create.sh
if [ ! -f /var/www/html/stats/rrd/${interfacename}_out/graph.pm ]; then
#GEN graph.pm $interface_out-------------------------------------------------------------------------
echo -n "\$GRAPH_TITLES{'" >>/var/www/html/stats/rrd/${interfacename}_out/graph.pm
echo -n "$interfacename" >>/var/www/html/stats/rrd/${interfacename}_out/graph.pm
echo -n "_out'} = \"{#server#} - $biginterfacename Outbound Traffic\";
\$GRAPH_CMDS{'" >>/var/www/html/stats/rrd/${interfacename}_out/graph.pm
echo -n "$interfacename" >>/var/www/html/stats/rrd/${interfacename}_out/graph.pm
echo -n "_out'} = <<\"" >>/var/www/html/stats/rrd/${interfacename}_out/graph.pm
echo -n "$biginterfacename" >>/var/www/html/stats/rrd/${interfacename}_out/graph.pm
echo -n "_OUT_GRAPH_CMD\";
--title \"{#server#} - $biginterfacename Outbound Traffic\"
--vertical-label=\"Bytes\"
--lower-limit 0
DEF:out={#path#}" >>/var/www/html/stats/rrd/${interfacename}_out/graph.pm
echo -n "$interfacename" >>/var/www/html/stats/rrd/${interfacename}_out/graph.pm
echo -n "_out.rrd:out:AVERAGE
AREA:out{#color5#}:\"Outbound \"
GPRINT:out:LAST:\"Current\\\: %5.2lf %s \"
GPRINT:out:AVERAGE:\"Average\\\: %5.2lf %s \"
GPRINT:out:MAX:\"Maximum\\\: %5.2lf %s\\\n\"
LINE1:out{#linecolor#}" >>/var/www/html/stats/rrd/${interfacename}_out/graph.pm
echo "" >>/var/www/html/stats/rrd/${interfacename}_out/graph.pm
echo -n "$biginterfacename" >>/var/www/html/stats/rrd/${interfacename}_out/graph.pm
echo "_OUT_GRAPH_CMD
1; # Return true" >>/var/www/html/stats/rrd/${interfacename}_out/graph.pm
chmod 755 /var/www/html/stats/rrd/${interfacename}_out/graph.pm
fi
#----------------------------------------------------------------END OF GEN graph.pm
if [ ! -f /var/www/html/stats/rrd/${interfacename}_out/update.sh ]; then
#GEN update.pl $interface_out----------------------------------------------------
echo -n "#!/bin/bash
rrdtool update " >>/var/www/html/stats/rrd/${interfacename}_out/update.sh
echo -n "$interfacename" >>/var/www/html/stats/rrd/${interfacename}_out/update.sh
echo -n "_out.rrd \\
-t out \\
N:\`/sbin/ifconfig $interfacename |grep bytes|cut -d\":\" -f3|cut -d\" \" -f1\`" >>/var/www/html/stats/rrd/${interfacename}_out/update.sh
chmod 755 /var/www/html/stats/rrd/${interfacename}_out/update.sh
fi
#-----------------------------------------------------------------
done
# gen psw
if [ ! -f /var/www/html/stats/.htpasswd ]; then
touch /var/www/html/stats/.htpasswd
htpasswd -b -m /var/www/html/stats/.htpasswd admin $PSWS
rm -rf /root/statspassword
touch /root/statsPassword
echo "Your Login and Password from stats system is: admin $PSWS" >/root/statsPassword
fi
if [ ! -f /var/www/html/stats/.htaccess ]; then
touch /var/www/html/stats/.htaccess
echo "AuthUserFile /var/www/html/stats/.htpasswd
AuthName \"Restricted access, password located in /root/statsPassword file\"
AuthType Basic
Require valid-user" > /var/www/html/stats/.htaccess
/etc/init.d/httpd restart
fi
chmod 777 /var/www/html/stats/graphs
echo "Updating... (This can take some minutes to complete if running first time)"
exec /var/www/html/stats/update.pl
#if [ $OS = "CENTOS" ]; then
#done for centos
elif [ $OS = "DEBIAN" ]; then
checkforcgi2=`cat /etc/apache2/apache2.conf | grep /var/www/stats/`
if [ ! "$checkforcgi2" = "<Directory /var/www/stats/>" ]; then
echo "<Directory /var/www/stats/>
AddHandler cgi-script .pl
Options +ExecCGI
DirectoryIndex index.pl
</Directory>" >>/etc/apache2/apache2.conf
fi
if [ ! -f /usr/bin/rrdtool ]; then
apt-get -y install rrdtool
fi
( cd /sys/class/net && printf "%s\n" * ) >/tmp/interfaces
if [ ! -f /tmp/elunastats.tar.gz ]; then
cd /tmp
rm -rf stats
rm -rf elunastats
wget http://$KOLMISOFT_IP/packets/elunastats.tar.gz
tar xvfz elunastats.tar.gz
fi
if [ ! -d /var/www/stats ]; then
mkdir -p /var/www/stats
cd /tmp/stats
tar xvfz /tmp/stats/DateTime-0.4501.tar.gz
tar xvfz /tmp/stats/DateTime-Locale-0.42.tar.gz
tar xvfz /tmp/stats/DateTime-TimeZone-0.8301.tar.gz
tar xvfz /tmp/stats/HTML-Template-2.9.tar.gz
tar xvfz /tmp/stats/HTML-Template-Expr-0.07.tar.gz
tar xvfz /tmp/stats/List-MoreUtils-0.22.tar.gz
tar xvfz /tmp/stats/Params-Validate-0.91.tar.gz
tar xvfz /tmp/stats/Parse-RecDescent-1.96.0.tar.gz
tar xvfz /tmp/stats/version-0.76.tar.gz
tar xvfz /tmp/stats/elunastats.tar.gz
cp -R /tmp/stats/stats /var/www
cd /tmp/stats/DateTime-0.4501
perl Makefile.PL
make
make install
cd /tmp/stats/DateTime-Locale-0.42
perl Makefile.PL
make
make install
cd /tmp/stats/DateTime-TimeZone-0.8301
perl Makefile.PL
make
make install
cd /tmp/stats/HTML-Template-2.9
perl Makefile.PL
make
make install
cd /tmp/stats/HTML-Template-Expr-0.07
perl Makefile.PL
make
make install
cd /tmp/stats/List-MoreUtils-0.22
perl Makefile.PL
make
make install
cd /tmp/stats/Params-Validate-0.91
perl Makefile.PL
make
make install
cd /tmp/stats/Parse-RecDescent-1.96.0
perl Makefile.PL
make
make install
cd /tmp/stats/version-0.76
perl Makefile.PL
make
make install
rm -rf /tmp/stats
rm -rf /tmp/elunastats.tar.gz
cd /tmp
fi #done with install
#cron check and maybe install()
checkforcron=`crontab -l | grep /var/www/stats/update.pl`
rm -rf /tmp/crontab.tmp
crontab -l >/tmp/crontab.tmp
# test()
if [ ! "$checkforcron" = "*/5 * * * * /var/www/stats/update.pl" ]; then
echo "*/5 * * * * /var/www/stats/update.pl" >>/tmp/crontab.tmp
fi
/usr/bin/crontab /tmp/crontab.tmp
#(main)
for interfacename2 in `cat /tmp/interfaces`; do
if [ ! -d /var/www/stats/rrd/${interfacename2}_in ]; then
mkdir -p /var/www/stats/rrd/${interfacename2}_in
fi
biginterfacename2=$(awk -v v="$interfacename2" 'BEGIN{print toupper(v)}') # translate var to VAR
#'
if [ ! -f /var/www/stats/rrd/${interfacename2}_in/create.sh ]; then
echo "#!/bin/bash
rrdtool create ${interfacename2}_in.rrd --start \`date +%s\` --step 300 DS:in:COUNTER:600:0:U RRA:AVERAGE:0.5:1:2016 RRA:AVERAGE:0.5:6:1344 RRA:AVERAGE:0.5:24:732 RRA:AVERAGE:0.5:144:1460" >>/var/www/stats/rrd/${interfacename2}_in/create.sh
chmod 755 /var/www/stats/rrd/${interfacename2}_in/create.sh
fi
if [ ! -f /var/www/stats/rrd/${interfacename2}_in/graph.pm ]; then
echo "\$GRAPH_TITLES{'${interfacename2}_in'} = \"{#server#} - $interfacename2 Inbound Traffic\";
\$GRAPH_CMDS{'${interfacename2}_in'} = <<\"${biginterfacename2}_IN_GRAPH_CMD\";
--title \"{#server#} - $biginterfacename2 Inbound Traffic\"
--vertical-label=\"Bytes\"
--lower-limit 0
DEF:in={#path#}${interfacename2}_in.rrd:in:AVERAGE
AREA:in{#color5#}:\"Inbound \"
GPRINT:in:LAST:\"Current\\\: %5.2lf %s \"
GPRINT:in:AVERAGE:\"Average\\\: %5.2lf %s \"
GPRINT:in:MAX:\"Maximum\\\: %5.2lf %s\\\n\"
LINE1:in{#linecolor#}
${biginterfacename2}_IN_GRAPH_CMD
1; # Return true" >>/var/www/stats/rrd/${interfacename2}_in/graph.pm
chmod 755 /var/www/stats/rrd/${interfacename2}_in/graph.pm
fi
if [ ! -f /var/www/stats/rrd/${interfacename2}_in/update.sh ]; then
echo "#!/bin/bash
rrdtool update ${interfacename2}_in.rrd -t in N:\`/sbin/ifconfig $interfacename2 |grep bytes|cut -d\":\" -f2|cut -d\" \" -f1\`" >>/var/www/stats/rrd/${interfacename2}_in/update.sh
chmod 755 /var/www/stats/rrd/${interfacename2}_in/update.sh
fi
done
#OUT
#-----------------------create $interface_out----------------------------------------------
for interfacename in `cat /tmp/interfaces`; do
if [ ! -d /var/www/stats/rrd/${interfacename}_out ]; then
mkdir -p /var/www/stats/rrd/${interfacename}_out
fi
#$a = expr `ls | tail -n 1 | awk '{split ($0,a,"_"); print a[1]}'` + 1
biginterfacename=$(awk -v v="$interfacename" 'BEGIN{print toupper(v)}') # translate var to VAR
#echo $biginterfacename
#${interface}_out
#'
if [ ! -f /var/www/html/rrd/${interfacename}_out/create.sh ]; then
#GEN create.sh $interface_out----------------------------------------------------------------
echo -n "#!/bin/bash
rrdtool create " >>/var/www/stats/rrd/${interfacename}_out/create.sh
echo -n "$interfacename" >>/var/www/stats/rrd/${interfacename}_out/create.sh
echo -n "_out.rrd --start \`date +%s\` --step 300 DS:out:COUNTER:600:0:U RRA:AVERAGE:0.5:1:2016 RRA:AVERAGE:0.5:6:1344 RRA:AVERAGE:0.5:24:732 RRA:AVERAGE:0.5:144:1460" >>/var/www/stats/rrd/${interfacename}_out/create.sh
chmod 755 /var/www/stats/rrd/${interfacename}_out/create.sh
fi
#----------------------------------------------------------end of gen create.sh
if [ ! -f /var/www/stats/rrd/${interfacename}_out/graph.pm ]; then
#GEN graph.pm $interface_out-------------------------------------------------------------------------
echo -n "\$GRAPH_TITLES{'" >>/var/www/stats/rrd/${interfacename}_out/graph.pm
echo -n "$interfacename" >>/var/www/stats/rrd/${interfacename}_out/graph.pm
echo -n "_out'} = \"{#server#} - $biginterfacename Outbound Traffic\";
\$GRAPH_CMDS{'" >>/var/www/stats/rrd/${interfacename}_out/graph.pm
echo -n "$interfacename" >>/var/www/stats/rrd/${interfacename}_out/graph.pm
echo -n "_out'} = <<\"" >>/var/www/stats/rrd/${interfacename}_out/graph.pm
echo -n "$biginterfacename" >>/var/www/stats/rrd/${interfacename}_out/graph.pm
echo -n "_OUT_GRAPH_CMD\";
--title \"{#server#} - $biginterfacename Outbound Traffic\"
--vertical-label=\"Bytes\"
--lower-limit 0
DEF:out={#path#}" >>/var/www/stats/rrd/${interfacename}_out/graph.pm
echo -n "$interfacename" >>/var/www/stats/rrd/${interfacename}_out/graph.pm
echo -n "_out.rrd:out:AVERAGE
AREA:out{#color5#}:\"Outbound \"
GPRINT:out:LAST:\"Current\\\: %5.2lf %s \"
GPRINT:out:AVERAGE:\"Average\\\: %5.2lf %s \"
GPRINT:out:MAX:\"Maximum\\\: %5.2lf %s\\\n\"
LINE1:out{#linecolor#}" >>/var/www/stats/rrd/${interfacename}_out/graph.pm
echo "" >>/var/www/stats/rrd/${interfacename}_out/graph.pm
echo -n "$biginterfacename" >>/var/www/stats/rrd/${interfacename}_out/graph.pm
echo "_OUT_GRAPH_CMD
1; # Return true" >>/var/www/stats/rrd/${interfacename}_out/graph.pm
chmod 755 /var/www/stats/rrd/${interfacename}_out/graph.pm
fi
#----------------------------------------------------------------END OF GEN graph.pm
if [ ! -f /var/www/stats/rrd/${interfacename}_out/update.sh ]; then
#GEN update.pl $interface_out----------------------------------------------------
echo -n "#!/bin/bash
rrdtool update " >>/var/www/stats/rrd/${interfacename}_out/update.sh
echo -n "$interfacename" >>/var/www/stats/rrd/${interfacename}_out/update.sh
echo -n "_out.rrd -t out N:\`/sbin/ifconfig $interfacename |grep bytes|cut -d\":\" -f3|cut -d\" \" -f1\`" >>/var/www/stats/rrd/${interfacename}_out/update.sh
chmod 755 /var/www/stats/rrd/${interfacename}_out/update.sh
fi
#-----------------------------------------------------------------
done
# gen
if [ ! -f /var/www/stats/.htpasswd ]; then
touch /var/www/stats/.htpasswd
htpasswd -b -m /var/www/stats/.htpasswd admin $PSWS
rm -rf /root/statspassword
touch /root/statsPassword
echo "Your Login and Password from stats system is: admin $PSWS" >/root/statsPassword
fi
if [ ! -f /var/www/stats/.htaccess ]; then
touch /var/www/stats/.htaccess
echo "AuthUserFile /var/www/stats/.htpasswd
AuthName \"Restricted access, password located in /root/statsPassword file\"
AuthType Basic
Require valid-user" > /var/www/stats/.htaccess
/etc/init.d/apache2 restart
fi
chmod 777 /var/www/stats/graphs
echo "Updating... (This can take some minutes to complete if running first time)"
exec /var/www/stats/update.pl
fi #done for DEBIAN
#------------------------------------END------------------------------------------
| true
|
2a1154e4d10f3b92dc4cfffda360f1c41fba1c62
|
Shell
|
mpotr/heroku-buildpack-c
|
/bin/detect
|
UTF-8
| 159
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/sh
# bin/detect <build-dir>
BUILD_DIR=$1
cd $BUILD_DIR
# echo 'CPP'; exit 0
(ls -R | grep '\.c$' > /dev/null 2>&1) && (echo "CPP" && exit 0) || exit 1
| true
|
eae019ee382c32229db5af3f5f12ad60467b63fa
|
Shell
|
kyktommy/dotfiles
|
/.bashrc
|
UTF-8
| 2,529
| 2.75
| 3
|
[] |
no_license
|
# JAVA
export PATH=$PATH:/Library/Java/JavaVirtualMachines/1.6.0_29-b11-402.jdk/Contents/Home/bin
export PATH=$PATH:/usr/local/apache-maven/apache-maven-3.0.4/bin:$HOME/bin:/usr/sbin:/sbin:/usr/local/bin:/usr/X11/bin:/usr/texbin:/usr/texbin/
#### Play
export PATH=$PATH:~/workspace/RunableX/play-2.1.1
#### Go
export GOPATH=$HOME/Code/go
export PATH=$PATH:$GOPATH/bin
### EDITOR
export EDITOR=vi
### POSTGRES
### postgres.app
export PATH="/Applications/Postgres.app/Contents/MacOS/bin:$PATH"
### Maven, android for android-bootstrap
export M2_HOME=/usr/share/maven
export M2_BIN=/usr/share/maven/bin
export ANDROID_HOME=~/workspace/androidSDK
export ANDROID_SDK=$ANDROID_HOME
export ANDROID_TOOLS=$ANDROID_HOME/tools
export PATH=$PATH:$M2_BIN:$ANDROID_HOME:$ANDROID_TOOLS
# Alias
alias ll='ls -laG'
#alias rake="noglob rake"
alias rake="bundle exec rake"
alias pserver="open http://localhost:8000 && python -m SimpleHTTPServer"
alias vim="/usr/local/bin/vim"
alias v="/usr/local/bin/vim"
alias vi="/usr/local/bin/vim"
alias e="/usr/local/Cellar/emacs/HEAD/Emacs.app/Contents/MacOS/Emacs -nw"
alias emacs=e
alias mou="open -a Mou"
alias blog="cd $HOME/Site/kyktommy.github.io/"
alias st="/Applications/Sublime\ Text.app/Contents/SharedSupport/bin/subl"
alias jist="jist -p -c"
alias htdocs="cd /Applications/MAMP/htdocs/"
alias mysql="/Applications/MAMP/Library/bin/mysql -uroot -p"
alias swipl="/usr/local/Cellar/swi-prolog/6.2.3/bin/swipl"
alias rl="source $HOME/.zshrc"
alias irb=use_pry
use_pry() {
if which pry >/dev/null; then
pry
else
irb
fi
}
# load nvm
. $HOME/nvm/nvm.sh
# rbenv init
eval "$(rbenv init - --no-rehash)"
# load kyk commands automatically
eval "$($HOME/.kyk/bin/kyk init -)"
# Opens the github page for the current git repository in your browser
# git@github.com:jasonneylon/dotfiles.git
# https://github.com/jasonneylon/dotfiles/
function gh() {
giturl=$(git config --get remote.origin.url)
if [ "$giturl" == "" ]
then
echo "Not a git repository or no remote.origin.url set"
exit 1;
fi
giturl=${giturl/git\@github\.com\:/https://github.com/}
giturl=${giturl/\.git//}
echo $giturl
open $giturl
}
# Random Number (32)
alias rand='ruby -e "puts rand(32**32).to_s(32)"'
# Ctags brew version
alias ctags="/usr/local/Cellar/ctags/5.8/bin/ctags"
export LANG="en_US.UTF-8"
export LC_ALL="en_US.UTF-8"
[ -n "$TMUX" ] && export TERM=xterm-256color
# if [ -x /usr/local/bin/tmux ]; then
# tmux attach -t base || tmux new -s base
# else
# echo "no tmux"
# fi
| true
|
7eb6736871bd906b62de228b74dd3f368dc39a2d
|
Shell
|
TinyTheBrontosaurus/rosbug_latched_singlecore
|
/launch/nominal_run.sh
|
UTF-8
| 214
| 2.953125
| 3
|
[] |
no_license
|
#! /bin/bash
# Run it, nothing special
rosrun latched_singlecore repro&
# Make it easy to kill this
cr=`echo $'\n.'`
cr=${cr%.}
read -p "Press enter to kill $cr"
echo -n "Killing..."
pkill -9 repro
echo "Killed"
| true
|
9ff180909cf28b864e46a9fc1715966b6f3fdffa
|
Shell
|
studiometa/create-wordpress-project
|
/template/bin/db-export.sh
|
UTF-8
| 926
| 3.5625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Config
PROJECT_ROOT=$( cd "$(dirname "${BASH_SOURCE[0]}")/.." ; pwd -P )
source $PROJECT_ROOT/bin/utils/colors.sh
DATE=$(date +"%Y%m%d-%H%M")
FILENAME="$DATE.sql.gz"
# Output result
echo ""
echo -e " ${BLACK}#####################################################${COLOR_OFF}"
echo -e " ${BLACK}####### ${WHITE}STUDIO META WORDPRESS DATABASE EXPORT${BLACK} #######${COLOR_OFF}"
echo -e " ${BLACK}#####################################################${COLOR_OFF}"
echo ""
echo -e " 💾 ${WHITE}Exporting database...${COLOR_OFF}"
cd $PROJECT_ROOT
{
$(./vendor/bin/wp config path) \
&& $(./vendor/bin/wp db export - --add-drop-table | gzip > "$PROJECT_ROOT/data/$DATE.sql.gz") \
&& echo -e " 👍 ${WHITE}Database has been successfully exported to ${BLUE}data/$DATE.sql.gz${COLOR_OFF}"
} || {
echo -e " 🚫 ${WHITE}An error occured... Check the logs above to find out why.${COLOR_OFF}"
}
echo ""
| true
|
ee0abf11255b69f6d3521d9cc4c27f075724b58d
|
Shell
|
kampsj/shell_scripts
|
/restore_ubuntu.sh
|
UTF-8
| 6,273
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
clear
################################################################################
# Written By: "Jesse"
# Last update: 16-01-2020.
#===============================================================================
# Purpose:
# This script is used to get a fresh (x)ubuntu install up to speed.
# Useage: ./restore_ubuntu.sh
################################################################################
################################################################################
# Define Variables
################################################################################
now=$(date +'%m/%d/%Y %r')
################################################################################
# Define Functions
################################################################################
timestamp()
{
now=$(date +'%m/%d/%Y %r')
}
check_exit_status()
{
if [ $? -eq 0 ]
then
echo
echo "Success"
echo
else
echo
echo "[ERROR] Process Failed!"
echo
read -p "The last command exited with an error. Exit script? (y/n) " ans
if [ "$ans" == "y" ]
then
exit 1
fi
fi
}
reminder()
{
echo "##########################################################################"
echo "# Theme/icon reminder"
echo "##########################################################################"
echo
echo "Change the icon to Paper"
echo "Change the theme to Greybird-dark, also in the window manager"
echo
echo "##########################################################################"
echo "# Other software"
echo "##########################################################################"
echo
echo "Install Zoom and VS Code (download most recent debs)"
echo
echo "##########################################################################"
echo "# Misc"
echo "##########################################################################"
echo
echo "Run TLP"
echo
echo "Change the video drivers to proprietary (if you haven't already)"
echo
echo "##########################################################################"
echo "# Mouse gestures"
echo "##########################################################################"
echo
echo "Reboot the system now and run the following commands afterwards:"
echo
echo "sudo libinput-gestures/libinput-gestures-setup install"
echo "libinput-gestures/libinput-gestures-setup autostart"
echo "libinput-gestures/libinput-gestures-setup start"
echo "sudo python3 gestures/setup.py install"
}
################################################################################
# Main Processing
################################################################################
timestamp
echo "##########################################################################"
echo "# Setup process started by $USER on $now"
echo "##########################################################################"
echo
#-------------------------------------------------------------------------------
# Make sure base system is up to date first.
#-------------------------------------------------------------------------------
echo "--------------------------------------------------------------------------"
echo "- apt update && upgrade"
echo "--------------------------------------------------------------------------"
echo
sudo apt-get update;
check_exit_status;
sudo apt-get upgrade;
check_exit_status;
echo "--------------------------------------------------------------------------"
echo "- Preparing mouse gestures"
echo "--------------------------------------------------------------------------"
echo
echo "Installing dependencies"
sudo apt install -y python3 python3-setuptools xdotool python3-gi libinput-tools python-gobject;
echo "Add user to input"
sudo gpasswd -a $USER input
check_exit_status;
echo "Clone repositories"
git clone https://github.com/bulletmark/libinput-gestures.git
check_exit_status;
git clone https://gitlab.com/cunidev/gestures
check_exit_status;
echo "Done."
echo "--------------------------------------------------------------------------"
echo "- Install paper theme"
echo "--------------------------------------------------------------------------"
echo
sudo apt-get install -y $PWD/paper-icon-theme_1.5.728-202003121505~daily~ubuntu18.04.1_all.deb;
check_exit_status;
echo
echo "--------------------------------------------------------------------------"
echo "- Install PyCharm"
echo "--------------------------------------------------------------------------"
echo
sudo snap install pycharm-community --classic;
check_exit_status;
echo "--------------------------------------------------------------------------"
echo "- Install TLP"
echo "--------------------------------------------------------------------------"
echo
sudo apt install -y tlp;
sudo systemctl enable tlp;
echo "--------------------------------------------------------------------------"
echo "- Install Spotify"
echo "--------------------------------------------------------------------------"
echo
sudo snap install spotify;
echo "--------------------------------------------------------------------------"
echo "- Install Python stuff"
echo "--------------------------------------------------------------------------"
echo
sudo apt install -y python3-pip;
check_exit_status;
sudo apt install -y virtualenv;
check_exit_status;
sudo pip3 install ipython;
check_exit_status;
echo "--------------------------------------------------------------------------"
echo "- Install Docker"
echo "--------------------------------------------------------------------------"
echo
sudo apt install -y docker docker-compose;
check_exit_status;
echo "--------------------------------------------------------------------------"
echo "- Removing downloaded files"
echo "--------------------------------------------------------------------------"
echo
echo "Zoom..."
rm -f ~/Downloads/zoom_x86_64.rpm;
check_exit_status;
echo "VS Code..."
rm -f ~/Downloads/vscode.rpm;
check_exit_status;
reminder;
echo "##########################################################################"
echo "# Setup process ended by $USER on $now"
echo "##########################################################################"
echo
| true
|
5d6f7b19d986d82aa3c4c33f4fb6df567679f792
|
Shell
|
lucyxiang/COMP206
|
/exam/midterm/midterm1v2.sh
|
UTF-8
| 153
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
if [ $# -lt 1 ];
then
exit
fi
#for var in "$@"
while [ $# -gt 0 ];
do
for f in $(ls $1)
do
cp $f ./backup
done
shift
done
exit
| true
|
5aad638b8d310234cd89453b4d5088cb403b0e25
|
Shell
|
johnmolloy/cloudwatchdns
|
/launch_script.sh
|
UTF-8
| 1,694
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
yum install -y httpd
cd /var/www/html/
wget http://i.imgur.com/VuofKvO.jpg
echo "<html><div align="center"><h1>Webpage Test</h1></div>
<img src="VuofKvO.jpg" alt="Lord Buckethead"></html>" > /var/www/html/index.html
cat >>/etc/httpd/conf/httpd.conf <<EOL
<Location /server-status>
SetHandler server-status
Order allow,deny
Allow from all
</Location>
EOL
chkconfig httpd on
service httpd start
cd /home/ec2-user
echo "*/1 * * * * ec2-user /home/ec2-user/cloudwatchdns/cloudwatchmetric.sh" > /etc/cron.d/cloudwatchmetric
SERVER=`wget -q -O - http://169.254.169.254/latest/meta-data/instance-id`
UPALARMNAME="scale-up-alarm-"
UPALARMNAME+=$SERVER
DOWNALARMNAME="scale-down-alarm-"
DOWNALARMNAME+=$SERVER
echo $SERVER
echo $UPALARMNAME
echo $DOWNALARMNAME
export AWS_DEFAULT_REGION=eu-west-1
aws cloudwatch put-metric-alarm --alarm-name $UPALARMNAME --comparison-operator GreaterThanThreshold --evaluation-periods 1 --metric-name johntest-busyworkers --namespace "johntest" --dimensions "Name=InstanceId,Value=$SERVER" --period 600 --statistic Average --threshold 10 --alarm-actions arn:aws:autoscaling:eu-west-1:540421644628:scalingPolicy:e3bbf3e1-d3ed-4d8c-930e-be4d1ef6dc39:autoScalingGroupName/john-test:policyName/john-test-ScaleOut
aws cloudwatch put-metric-alarm --alarm-name $DOWNALARMNAME --comparison-operator LessThanThreshold --evaluation-periods 1 --metric-name johntest-busyworkers --namespace "johntest" --dimensions "Name=InstanceId,Value=$SERVER" --period 600 --statistic Average --threshold 9 --alarm-actions arn:aws:autoscaling:eu-west-1:540421644628:scalingPolicy:41bbdceb-deef-4c3e-b6bf-8b43d7e8018d:autoScalingGroupName/john-test:policyName/john-test-ScaleIn
| true
|
991157754d311d7efa830e87286253e45302a0cd
|
Shell
|
theGreenJedi/Path
|
/Python Books/Athena/training/demo/demo/performance_python/c-vs-cpp/time-all.sh
|
UTF-8
| 260
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/sh
PROGS=(
./count-distinct.py
./count-distinct.c.x
./count-distinct.cpp.x
./count-distinct-v1.cpp.x
./count-distinct-v2.cpp.x
./count-distinct-v3.cpp.x )
for prog in "${PROGS[@]}" ; do
echo "Program: " $prog
time $prog < words.txt
done
| true
|
eff466cc18a824a185cc05cbcc67a119220291e2
|
Shell
|
duyquangou1989/MyScript
|
/Others/install_zabbix_centos.sh
|
UTF-8
| 1,032
| 2.75
| 3
|
[] |
no_license
|
#!/bin/sh
#echo "Enter HOSTNAME of this server here (This hostname must be the same as hostname which is created on Zabbix Server): "
HostName=`hostname`
yum -y install tcpdump rsyslog nc sysstat net-tools wget
yum -y group install 'Development Tools'
cd /etc/yum.repos.d/
wget http://10.30.10.11/download/zabbix.repo
yum clean all
rpm --import http://repo.zabbix.com/RPM-GPG-KEY-ZABBIX-A14FE591
yum -y install zabbix-sender zabbix-agent
rm -rfv /etc/zabbix/*
cd /usr/src
wget http://10.30.10.11/download/zabbix-agentd.tar.gz
tar -xvf zabbix-agentd.tar.gz
cd etc/zabbix/
cp -rfv bin iostat-data zabbix_agentd.conf zabbix_agentd.d /etc/zabbix/
echo "# Zabbix Agent " >> /var/spool/cron/root
echo "* * * * * sh /etc/zabbix/bin/iostat-cron.sh /dev/null 2>&1 " >> /var/spool/cron/root
systemctl restart zabbix-agent
echo Hostname=$HostName >> /etc/zabbix/zabbix_agentd.conf
systemctl restart zabbix-agent
systemctl -f disable firewalld
systemctl -f stop firewalld
systemctl enable zabbix-agent
netstat -nlpt |grep 1005
exit 0;
| true
|
a61ffe0bf8ce6aaafb61a60cd308e99329b2c051
|
Shell
|
jinalsalvi/Halifax_Library
|
/DataCleaning/data2mongo.sh
|
UTF-8
| 1,175
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
# reads each ".txt" file in the given folder, where the 1st line of
# each file is the title; for each file, it makes a json object with
# three fields: "file_name", "title", "content", where it replaces
# any double quotes and \n's in the title/content with spaces
if [ $# -ne 3 ]
then
printf "\nUsage: \n\$ $0 <database> <user> <pass> \n\n"
exit 1
fi
coll="dummy";
db="$1";
user="$2";
pass="$3";
mongo -u "$user" -p "$pass" "$db" --eval "db.$coll.drop()"
mongo -u "$user" -p "$pass" "$db" --eval "db.author.drop()"
mongo -u "$user" -p "$pass" "$db" --eval "db.article.drop()"
mongoimport -u "$user" -p "$pass" -d "$db" -c "$coll" --file "articles.json"
mysql -u "$user" --password="$pass" -D "$db" < 'existing_tables.sql'
mysql -u "$user" --password="$pass" -D "$db" < 'new_tables.sql'
mysql -u "$user" --password="$pass" -D "$db" < 'get_author.sql' > 'mongo_author_insert.txt'
mysql -u "$user" --password="$pass" -D "$db" -e "SET FOREIGN_KEY_CHECKS = 0;TRUNCATE TABLE AUTHOR;SET FOREIGN_KEY_CHECKS = 1;"
mongo "$db" -u "$user" -p "$pass" < 'mongo_author_insert.txt'
python ./create_article.py $user $pass $db
| true
|
cbb0c9c84b8c0d2d0873b11552341772d4832d11
|
Shell
|
tenllado/dotfiles
|
/.config/rofi/scripts/rofi_i3_msg.sh
|
UTF-8
| 838
| 4.25
| 4
|
[] |
no_license
|
#!/bin/bash
# Use rofi to send commands to i3, using i3-msg underneath. It manages a history
# file offered to the user in the rofi list window.
#
# Author: Christian Tenllado
# e-mail: ctenllado@gmail.com
HISTORY_FILE=~/.local/share/rofi/rofi_i3_history
MAX_HISTORY_SIZE=20
history_trunc() {
history_len=$(cat ${HISTORY_FILE} | wc -l)
while [ $history_len -gt ${MAX_HISTORY_SIZE} ]; do
sed -i 1d "${HISTORY_FILE}"
history_len=$(cat ${HISTORY_FILE} | wc -l)
done
}
history_add() {
command=$(echo "$@" | sed -e 's/\(\[\|\]\)/\\&/g')
sed -i "/$command/d" "${HISTORY_FILE}"
echo "$@" >> "${HISTORY_FILE}"
}
if [ ! -d $(dirname "${HISTORY_FILE}") ]; then
mkdir -p "$(dirname "${HISTORY_FILE}")"
fi
if [ $# -eq 0 ]; then
# Output to rofi, history of commands
tac "${HISTORY_FILE}"
else
history_add $@
history_trunc
i3-msg "$@" > /dev/null
fi
| true
|
5c96bfe734dfbe132f929fec67546ccae0dd6393
|
Shell
|
mpaya/epigenomics_scripts
|
/scripts/aux/manorm.sbatch
|
UTF-8
| 5,228
| 3.265625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#SBATCH -n 1
#SBATCH -c 1
#SBATCH -t 5:00:00
#SBATCH --job-name=manorm
#SBATCH --mem-per-cpu=10GB
module load gcc/6.4.0 samtools/1.9 picard/2.18.14 R/3.6.0
PATH=$EBROOTPICARD/:$PATH
source config.txt
outdir="$1"
bamdir="$2"
beddir="$3"
s=${bamdir##*/}
pattern_bed=*"$s"_"$epic2_gap".bed
cpus=1
compare_batch=1
mkdir -p $1 && cd $1
## find files of sample 1
# if merged file was created, first find it
# else, get original files
# merge of bam files is done in tmp folder to avoid clash among jobs
bamfiles1=($([[ -d tmp_"$s" ]] && find tmp_"$s" -name "$s"*merged.bam))
if [ ${#bamfiles1[@]} -eq 0 ];
then
bamfiles1=($(find "$bamdir"* -name *$dup_pt))
fi
bedfile1=($(find "$beddir"* -name $pattern_bed))
## if two files, merge them
if [ ${#bamfiles1[@]} -eq 1 ];
then
bamfile1=${bamfiles1[0]}
elif [ ${#bamfiles1[@]} -eq 2 ]
then
echo merging $s
[[ -d tmp_"$s" ]] || mkdir tmp_"$s"
bambase=tmp_"$s"/"$s".dupmark
## MergeSamFiles
java -Xmx8g -jar picard.jar \
MergeSamFiles \
INPUT="${bamfiles1[0]}" \
INPUT="${bamfiles1[1]}" \
OUTPUT="$bambase".merged.bam \
MERGE_SEQUENCE_DICTIONARIES="false" \
ASSUME_SORTED="false" \
USE_THREADING=true \
SORT_ORDER=coordinate \
VALIDATION_STRINGENCY="LENIENT" 2> $bambase.mergelog
samtools index "$bambase".merged.bam
bamfile1="$bambase".merged.bam
elif [[ ${#bamfiles2[@]} -gt 2 ]]
then
echo "More than two replicates on $s not supported" && k=1
fi
echo "sample1 files processed"
## find files for wild type
wt_beddir=${beddir/$s/*$wt_name}
if [[ compare_batch -eq 1 ]]
then
f=$(basename $(pwd | sed 's;analy.*;;'))
batch=$(awk -v i=${expidx[$f]} -v j=$col_strain -v b=$col_batch -v s=$s \
'NR>1{if ($i != "" && $j == s) {print $b}}' FS='\t' "$metadata" | sort -u)
## do something if length batch > 1
wt_name=$(awk -v i=${expidx[$f]} -v j=$col_strain -v k=$col_sample \
-v wt=$wt_name -v b=$col_batch -v sb=$batch 'NR>1{if ($i != "" &&
$j == wt && $b == sb) {print $k}}' FS='\t' "$metadata")
## same, what if multiple...
fi
wt_bamdir=${bamdir/$s/$wt_name}
bamfiles2=($([[ -d tmp_"$s" ]] && find tmp_"$s" -name "$wt_name"*merged.bam))
if [ ${#bamfiles2[@]} -eq 0 ];
then
bamfiles2=($(find "$wt_bamdir"* -name *$dup_pt))
fi
bedfile2=($(find "$wt_beddir"* -name ${pattern_bed/$s/$wt_name}))
## if two files, merge them
if [ ${#bamfiles2[@]} -eq 1 ];
then
bamfile2=${bamfiles2[0]}
elif [ ${#bamfiles2[@]} -eq 2 ]
then
echo merging $wt_name
[[ -d tmp_"$s" ]] || mkdir tmp_"$s"
bambase=tmp_"$s"/"$wt_name".dupmark
## MergeSamFiles
java -Xmx2g -jar picard.jar \
MergeSamFiles \
INPUT="${bamfiles2[0]}" \
INPUT="${bamfiles2[1]}" \
OUTPUT="$bambase".merged.bam \
MERGE_SEQUENCE_DICTIONARIES="false" \
ASSUME_SORTED="false" \
USE_THREADING=true \
SORT_ORDER=coordinate \
VALIDATION_STRINGENCY="LENIENT" 2> $bambase.mergelog
samtools index "$bambase".merged.bam
bamfile2="$bambase".merged.bam
elif [[ ${#bamfiles2[@]} -gt 2 ]]
then
echo "More than two replicates on $wt_name not supported" && k=1
fi
echo "sample2 files processed"
if [[ ${#bamfiles1[@]} -eq 0 ]]; then echo "missing bam files for $s"; k=1; fi
if [[ ${#bedfile1[@]} -eq 0 ]]; then echo "missing bed file for $s"; k=1; fi
if [[ ${#bamfiles2[@]} -eq 0 ]]; then echo "missing bam files for $wt_name"; k=1; fi
if [[ ${#bedfile2[@]} -eq 0 ]]; then echo "missing bed file for $wt_name"; k=1; fi
if [[ $k -eq 1 ]]; then exit 1; fi
## run MAnorm
manorm_dir=manorm_"$s"_vs_"$wt_name"
## run manorm if no previous run exists
if [ ! -f manorm_"$s"_vs_"$wt_name"/*xls ]
then
source $anaconda_dir/etc/profile.d/conda.sh
conda activate $anaconda_dir/envs/manorm
manorm \
--p1 ${bedfile1} \
--p2 ${bedfile2} \
--peak-format bed \
--r1 $bamfile1 \
--r2 $bamfile2 \
--read-format bam \
--name1 $s \
--name2 $wt_name \
--paired-end \
-o "$manorm_dir"/ \
2> manorm_"$s".log
conda deactivate
## add numbers to log
echo -e "\n# peaks\tM>0\tM>0.1\tM>0.25\tM>0.5\tM>1" >> manorm_"$s".log
awk -F '\t' 'NR>1{m_val=sqrt($5^2); if(m_val>0){a++;} if(m_val>.1){b++;}
if(m_val>.25){c++;} if(m_val>.5){d++;} if(m_val>1){e++;} }END{print "total",a,b,c,d,e}' \
OFS='\t' "$manorm_dir"/*xls >> manorm_"$s".log
awk -F '\t' 'NR>1&&$5>0{m_val=$5; if(m_val>0){a++;} if(m_val>.1){b++;}
if(m_val>.25){c++;}if(m_val>.5){d++;} if(m_val>1){e++;} }END{print "M > 0",a,b,c,d,e}' \
OFS='\t' "$manorm_dir"/*xls >> manorm_"$s".log
awk -F '\t' 'NR>1&&$5<0{m_val=-$5; if(m_val>0){a++;} if(m_val>.1){b++;}
if(m_val>.25){c++;} if(m_val>.5){d++;} if(m_val>1){e++;} }END{print "M < 0",a,b,c,d,e}' \
OFS='\t' "$manorm_dir"/*xls >> manorm_"$s".log
mv manorm_"$s".log manorm_"$s"_vs_"$wt_name"
## finish
echo "manorm done"
else
echo "using previous manorm run"
fi
## run annotation
cd "$manorm_dir"
ln -s ${bedfile1} .
ln -s ${bedfile2} .
ln -s "$genome_gff" genome_annot.gff
ln -s "$genome_annot" genome_annot.txt
Rscript $scriptd/manorm.R $s "$wt_name" $manorm_m
rm *bed genome_annot*
echo "manorm analysis finished"
| true
|
267279f3aa68f88115be7fb2a94c330dcebf0a6d
|
Shell
|
slamdev/catalog
|
/etc/cluster-configuration/create-backend-project.sh
|
UTF-8
| 2,489
| 3.125
| 3
|
[] |
no_license
|
#!/bin/sh
set -euo pipefail
TERRAFORM_BACKEND_PROJECT="catalog-tf-backend"
REGION="australia-southeast1"
ORGANIZATION_ID=`gcloud organizations list --format="get(name)" | grep -oE "[^/]+$"`
BILLING_ACCOUNT_ID=`gcloud beta billing accounts list --format="get(name)" | grep -oE "[^/]+$"`
RANDOM_ID=`openssl rand -hex 3`
TERRAFORM_BACKEND_PROJECT_ID="${TERRAFORM_BACKEND_PROJECT}-`openssl rand -hex 4`"
##
## Setup Google Cloud Project
##
ORIGINAL_PROJECT=`gcloud config get-value project`
# Create admin project
gcloud projects create ${TERRAFORM_BACKEND_PROJECT_ID} --set-as-default \
--name=${TERRAFORM_BACKEND_PROJECT} --organization=${ORGANIZATION_ID}
# Enable project billing
gcloud beta billing projects link ${TERRAFORM_BACKEND_PROJECT_ID} --billing-account ${BILLING_ACCOUNT_ID}
# Create service account for terraform
gcloud iam service-accounts create terraform --display-name "Terraform admin account"
# Allow service account to view project
gcloud projects add-iam-policy-binding ${TERRAFORM_BACKEND_PROJECT_ID} \
--member serviceAccount:terraform@${TERRAFORM_BACKEND_PROJECT_ID}.iam.gserviceaccount.com \
--role roles/viewer
# Allow service account to manage Google Storage
gcloud projects add-iam-policy-binding ${TERRAFORM_BACKEND_PROJECT_ID} \
--member serviceAccount:terraform@${TERRAFORM_BACKEND_PROJECT_ID}.iam.gserviceaccount.com \
--role roles/storage.admin
# Enable required services
gcloud services enable cloudresourcemanager.googleapis.com \
&& gcloud services enable cloudbilling.googleapis.com \
&& gcloud services enable iam.googleapis.com \
&& gcloud services enable compute.googleapis.com \
&& gcloud services enable sqladmin.googleapis.com \
&& gcloud services enable container.googleapis.com
# Allow service account to create projects
gcloud organizations add-iam-policy-binding ${ORGANIZATION_ID} \
--member serviceAccount:terraform@${TERRAFORM_BACKEND_PROJECT_ID}.iam.gserviceaccount.com \
--role roles/resourcemanager.projectCreator
# Allow service account to enable billing for projects
gcloud organizations add-iam-policy-binding ${ORGANIZATION_ID} \
--member serviceAccount:terraform@${TERRAFORM_BACKEND_PROJECT_ID}.iam.gserviceaccount.com \
--role roles/billing.user
# Create Google Storage bucket to save terraform state
gsutil mb -p ${TERRAFORM_BACKEND_PROJECT_ID} -l ${REGION} gs://${TERRAFORM_BACKEND_PROJECT_ID}
# Enable versioning for Google Storage bucket
gsutil versioning set on gs://${TERRAFORM_BACKEND_PROJECT_ID}
# Restore original project
gcloud config set project ${ORIGINAL_PROJECT}
| true
|
4cda613fbf838537c0a56a0f583c20eb129c45f8
|
Shell
|
TrungNguyenBa/bug_fix_minimization
|
/scripts/Fault_localization/get_buggy_lines.sh
|
UTF-8
| 5,518
| 4.03125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
#
################################################################################
# This script determines all buggy source code lines in a buggy Defects4J project
# version. It writes the result to a file in the provided output directory; the
# file name is: <project_id>-<bug_id>.buggy.lines
#
# Considering removed and added lines (from buggy to fixed), the script works as
# follows:
#
# 1) For each removed line, the script outputs the line number of the removed
# line.
#
# 2) For each block of added lines, the script distinguishes two cases:
# 2.1) If the block of added lines is immediately preceded by a removed line,
# the block is associated with that preceding line -- the script doesn't
# output a line number in this case.
# 2.2) If the block of added lines is not immediately preceded by a removed
# line, the script outputs the line number of the line immediately
# following the block of added lines.
#
# Usage:
# get_buggy_lines.sh <project_id> <bug_id> <out_dir>"
#
# Examples:
#
# Case 1) -- output: line 2
# buggy fixed
# 1 1
# 2
#
# Case 2.1) -- output: line 2
# buggy fixed
# 1 1
# 2 20
#
# Case 2.1) -- output: line 2
# buggy fixed
# 1 1
# 2 20
# 21
# 22
#
# Case 2.2) -- output: line 2
# buggy fixed
# 1 1
# 10
# 11
# 2 2
#
#
# Requirements:
# - Bash 4+ needs to be installed
# - diff needs to be installed
# - the environment variable D4J_HOME needs to be set and must point to the
# Defects4J installation that contains all minimized patches.
# - the environment variable SLOC_HOME needs to be set and must point to the
# sloccount installation.
#
################################################################################
#
# Print error message and exit
#
die() {
echo $1
exit 1
}
# Check command-line arguments
[ $# == 3 ] || die "usage: $0 <project_id> <bug_id> <out_dir>"
PID=$1
BID=$2
OUT_DIR=$3
mkdir -p $OUT_DIR
OUT_FILE="$OUT_DIR/$PID-$BID.buggy.lines"
# Check whether D4J_HOME is set
[ "$D4J_HOME" != "" ] || die "D4J_HOME is not set!"
# Check whether SLOC_HOME is set
[ "$SLOC_HOME" != "" ] || die "SLOC_HOME is not set!"
# Put the defects4j command on the PATH
PATH=$PATH:$D4J_HOME/framework/bin:$SLOC_HOME
# Temporary directory, used to checkout the buggy and fixed version
TMP="$BFM/raw_data/D4J_projects/$PID/raw_modified_files/minimized"
mkdir -p $TMP
# Temporary file, used to collect information about all removed and added lines
TMP_LINES="$TMP/all_buggy_lines"
#
# Determine all buggy lines, using the diff between the buggy and fixed version
#
# Checkout the fixed project version
work_dir="$TMP/$PID-$BID"
defects4j checkout -p$PID -v${BID}f -w$work_dir
# Determine and iterate over all modified classes (i.e., patched files)
src_dir=$(grep "d4j.dir.src.classes=" $work_dir/defects4j.build.properties | cut -f2 -d'=')
mod_classes=$(cat $D4J_HOME/framework/projects/$PID/modified_classes/$BID.src)
for class in $mod_classes; do
file="$(echo $class | tr '.' '/').java";
if !(ls "$TMP/$PID-$BID_${class}_fixed" > /dev/null 2> /dev/null); then
# Checkout the fixed project version
defects4j checkout -p$PID -v${BID}f -w$work_dir
cp $work_dir/$src_dir/$file "$TMP/$PID-$BID_${class}_fixed"
fi
if !(ls "$TMP/$PID-$BID_${class}_buggy" > /dev/null 2> /dev/null); then
# Checkout the buggy project version
defects4j checkout -p$PID -v${BID}b -w$work_dir
cp $work_dir/$src_dir/$file "$TMP/$PID-$BID_${class}_buggy"
fi
# Diff between buggy and fixed -- only show line numbers for removed and
# added lines in the buggy version
diff \
--unchanged-line-format='' \
--old-line-format="$file#%dn#%l%c'\12'" \
--new-group-format="$file#%df#FAULT_OF_OMISSION%c'\12'" \
"$TMP/$PID-$BID_${class}_buggy" "$TMP/$PID-$BID_${class}_fixed" >> "$TMP_LINES"
done
# Print all removed lines to output file
grep --text -v "FAULT_OF_OMISSION" "$TMP_LINES" > "$OUT_FILE"
# Check which added lines need to be added to the output file
for entry in $(grep --text 'FAULT_OF_OMISSION' "$TMP_LINES"); do
# Determine whether file#line already exists in output file -> if so, skip
line=$(echo $entry | cut -f1,2 -d'#')
grep -q "$line" "$OUT_FILE" || echo "$entry" >> "$OUT_FILE"
done
#
# Compute total sloc for all bug-related classes on the buggy version
#
defects4j checkout -p$PID -v${BID}b -w$work_dir
# Set of all bug-related classes
rel_classes=$(cat $D4J_HOME/framework/projects/$PID/loaded_classes/$BID.src)
# Temporary directory that holds all bug-related classes -- used to compute the
# overall number of lines of code
DIR_SRC="$TMP/loc"
mkdir -p $DIR_SRC
[ -f $OUT_DIR/sloc.csv ] || echo "project_id,bug_id,sloc,sloc_total" > $OUT_DIR/sloc.csv
CNT=1
for class in $rel_classes; do
src_file="$(echo $class | tr '.' '/').java";
to_file="$(echo $src_file | tr '/' '-')";
# Checkout the buggy project version
[ -f $work_dir/$src_dir/$src_file ] && cp $work_dir/$src_dir/$src_file "$DIR_SRC/$to_file"
(( CNT += 1 ))
done
# Run sloccount and report total sloc
sloc=$(sloccount $DIR_SRC | grep "java=\d*" | cut -f1 -d' ')
sloc_total=$(sloccount $work_dir/$src_dir | grep "java=\d*" | cut -f1 -d' ')
echo "$PID,$BID,$sloc,$sloc_total" >> $OUT_DIR/sloc.csv
rm -rf $work_dir
rm $TMP_LINES
| true
|
f56ee3fab1a5dcecab1948f7d0581cee3664111f
|
Shell
|
mkozinski/MRAdata_py
|
/prepareData.sh
|
UTF-8
| 3,416
| 3.453125
| 3
|
[] |
no_license
|
DNAME="download"
DN='ITKTubeTK - Bullitt - Healthy MR Database'
DD="${DN}/Designed Database of MR Brain Images of Healthy Volunteers"
LBL_SUBDIR="AuxillaryData/VascularNetwork.tre"
OUT_DIR="orig_data"
OUT_LBLDIR="$OUT_DIR"/lbl
OUT_IMGDIR="$OUT_DIR"/img
if [ ! -d "$OUT_DIR" ]; then
NEW_DOWNLOAD=true
if [ ! -d "$DN" ]; then
if [ ! -d "$DNAME" ]; then
echo "downloading and unpacking the dataset into folder $DNAME"
wget https://data.kitware.com/api/v1/collection/591086ee8d777f16d01e0724/download
else
echo "found an existing \"$DNAME\" directory, I will not download the data again"
fi
echo "unpacking the dataset into folder $DN"
unzip "$DNAME"
echo "removing $DNAME"
rm "$DNAME"
else
echo "found an existing \"$DN\" directory, I shall not download and unpack the data again"
fi
mkdir $OUT_DIR
mkdir $OUT_LBLDIR
mkdir $OUT_IMGDIR
echo "copying the annotated images into folder $OUT_IMGDIR and the ground truths into $OUT_LBLDIR"
for D in "$DD"/Normal*
do
ID=`echo $D | sed -e "s/.*\/Normal-\(\d\)*/\1/"`
#echo ${ID}
if [ -e "$D/$LBL_SUBDIR" ]; then
cp "$D/MRA/Normal$ID-MRA.mha" "$OUT_IMGDIR/$ID.mha"
cp "$D/$LBL_SUBDIR" "$OUT_LBLDIR/$ID.tre"
fi
done
echo "removing $DD and $DN"
rm -rf "$DD"
rm -rf "$DN"
else
echo "found an existing \"$OUT_DIR\" directory, I shall not acquire the dataset again"
fi
if [ ! -d img ] || [ "$NEW_DOWNLOAD" = true ]; then
echo "generating the inputs into folder img"
python convertMha2Py.py "$OUT_IMGDIR" img
else
echo "found an existing \"img\" directory, not re-generating the inputs"
fi
if [ ! -d lbl ] || [ "$NEW_DOWNLOAD" = true ]; then
echo "rendering the ground truths into folder lbl"
mkdir lbl
for TRE in "$OUT_LBLDIR"/*.tre; do
OUTNAME=`basename "$TRE" | sed -e 's/\.tre//'`
python renderGroundTruth.py "$TRE" lbl/"$OUTNAME.npy" --volume_size 128 448 448 --dimension_permutation 2 1 0
done
else
echo "I found an existing \"lbl\" dir, I will not re-render the ground truths"
fi
echo "cropping the volumes to remove empty margins; folder img_cropped"
mkdir img_cropped
for IMG in img/*.npy; do
OUTNAME=` basename "$IMG" `
python crop.py "$IMG" img_cropped/"$OUTNAME" --crop_dims 0 128 16 432 64 392
done
echo "cropping the labels to remove empty margins; folder lbl_cropped"
mkdir lbl_cropped
for LBL in lbl/*.npy; do
OUTNAME=` basename "$LBL" `
python crop.py "$LBL" lbl_cropped/"$OUTNAME" --crop_dims 0 128 16 432 64 392
done
echo "cutting the volumes; results in folder img_cropped"
mkdir img_cut
for IMG in img_cropped/*.npy; do
OUTNAME=` basename "$IMG" `
python cut.py "$IMG" img_cut/"$OUTNAME" --nb_pieces 1 2 2
done
echo "cutting the labels; results in folder lbl_cut"
mkdir lbl_cut
for LBL in lbl_cropped/*.npy; do
OUTNAME=` basename "$LBL" `
python cut.py "$LBL" lbl_cut/"$OUTNAME" --nb_pieces 1 2 2
done
echo "generating projection labels in lbl_projections"
mkdir lbl_projections
for LBL in lbl_cut/*.npy; do
OUTNAME=`basename "$LBL"`
python project.py "$LBL" lbl_projections/"$OUTNAME"
done
echo "generating labels with margins in lbl_with_margin"
mkdir lbl_with_margin
for LBL in lbl_cut/*.npy; do
OUTNAME=`basename "$LBL"`
python addMargin2Lbl.py "$LBL" lbl_with_margin/"$OUTNAME"
done
./split_data.sh img_cropped lbl_cropped img_cut lbl_cut lbl_projections lbl_with_margin
| true
|
70b4cd98d75ad9154da7b286e65d83f584566bb6
|
Shell
|
ofalk/Nagios
|
/eventhandler_dispatcher
|
UTF-8
| 387
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Forward to all our eventhandlers
#
# In case you have several event handlers, this can be used
# to dispatch the events to all your eventhandlers
# There is no configuration file, just adapt the script.
dir=$(dirname $0)
#$dir/nag2tec $@
$dir/nag2pan $@
# Debugging
#echo "`comm -3 <(declare | sort) <(declare -f | sort)`" >> /tmp/asdf
#echo "$@" >> /tmp/asdf
exit 0
| true
|
d9d45fd2082b7cf50f0e98b5eb47cdead3840b13
|
Shell
|
kyubeom21c/kosta_iot_study
|
/shellPro/sh09until2.sh
|
UTF-8
| 145
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
echo $0
echo "args length:" $#
until [ $# = 0 ]
do
echo $1
sum=$(($sum+$1))
shift
echo $#
done
echo "sum:$sum"
exit 0
| true
|
f87d4a7687ae2933ef34f0ee98c889b1e9d86936
|
Shell
|
maxiroellplenty/web-template
|
/deploy.sh
|
UTF-8
| 907
| 3.40625
| 3
|
[] |
no_license
|
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
SET='\033[0m'
# change your branch name
gitBranch="master"
sshConnection="yourUser@yourServer.de:httpdocs"
function uploadToWebServer()
{
echo "${YELLOW} ### Upload to Webserver with ssh ### ${SET}"
scp -r dist/* $sshConnection
echo "${YELLOW} ### Done ### ${SET}"
}
function pushToGithub()
{
echo "${YELLOW} ### Upload to Github ### ${SET}"
# Updates your local git
git fetch
# Adds all changed files and creates a commit message
git add -A && git commit -m 'Deployed to Github'
# Pushes to the given repository
git push origin $master
echo "${YELLOW} ### Done ### ${SET}"
}
function main()
{
echo "${GREEN} ### Started Deployer ### ${SET}"
echo ''
sh build.sh
echo ''
pushToGithub
echo ''
uploadToWebServer
echo ''
echo "${GREEN} ### Finished Deployer ### ${SET}"
}
main
| true
|
f744c8638dcc9602abfb34fd71ba84a8afeaf7ca
|
Shell
|
whitwhittle/homeseer-tools
|
/HomeSeer-manager/run
|
UTF-8
| 1,076
| 3.90625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
HOMESEER_DIR=/opt/HomeSeer
PID_FILE=/run/homeseer.pid
START_PID_FILE=/run/homeseer-start.pid
STOP_PID_FILE=/run/homeseer-stop.pid
TIMEOUT=60
echo $$ > "$PID_FILE"
cd "$HOMESEER_DIR"
mono HSConsole.exe --log
EXIT=$?
echo "HomeSeer exited with code $EXIT."
# Homeseer forks shutdown and restart scripts. If we're not trying to stop it, give them time to run.
if [ -e "$STOP_PID_FILE" ] || [[ $EXIT -eq 0 ]]; then
TRIES=$((TIMEOUT/5))
while [[ $TRIES -gt 0 ]] && ! [ -e "$STOP_PID_FILE" ]; do
echo "Waiting $((TRIES*5)) seconds for HomeSeer scripts to finish."
sleep 5
TRIES=$((TRIES-1))
done
fi
# Kill the start / monitoring script here, so it always finishes before the stop script.
if ([ -e "$STOP_PID_FILE" ] || [[ $EXIT -eq 0 ]]) && [ -e "$START_PID_FILE" ]; then
echo "Killing monitor."
START_PID=`cat "$START_PID_FILE"`
[ -n "$START_PID" ] && kill "$START_PID" > /dev/null 2>&1 && sleep 2
kill -SIGKILL "$START_PID" > /dev/null 2>&1
rm -f "$START_PID_FILE"
fi
# Done.
rm -f "$PID_FILE"
exit $EXIT
| true
|
a99fa816e8466217523f61d25ae189632b00b5a8
|
Shell
|
chenke225/bash_shell
|
/windows利用git通过ssh连接github.sh
|
UTF-8
| 3,552
| 2.96875
| 3
|
[] |
no_license
|
windows下免除每次连接github要输入密码之烦恼
打开~/.bash_profile,将以下内容粘贴到该文件,保存。当你初次运行git_bash时,将提示输入密码(SSH建立密钥时的密码),此时ssh-agent进程将持续运行,知道关闭git_bash。如此,连接github时将无需输入密码。
VScode中利用git通过ssh连接github
1 初始化
git config --global user.name chenke225 #设定连接用户名
git config --global user.email chenke225@gmail.com #建立github的用户邮箱
2 建立git仓库
mkdir /d/gittest # 建立本地目录
cd /d/gittest # 进入本地目录
git init # 建立git本地仓库,可拷贝一些文件到此目录。
3 产生ssh密钥 将公钥复制到github
vscode终端输入运行
ssh-keygen -t rsa -C "chenke225@gmail.com" #产生一对ssh密钥
此后,在需要输入密码时,可以直接回车,就会忽略连接启动时的密码。 若之前已产生过密钥,选择覆盖。
这样在~/目录下产生.ssh目录,至少有两个文件 id_rsa, id_rsa.pub,即私钥和公钥。
clip < ~/.ssh/id_rsa.pub #复制公钥到剪贴板。 ~是用户的目录,windows下我的是 c:\users\chk
ubuntu下使用如下命令拷贝到剪贴板:
xclip -sel clip < ~/.ssh/id_rsa.pub
将公钥复制到github。 登录你的github,右上角点开下拉菜单,选settings,settings界面选择 SSH and GPG keys里面的SSH。
删除可能已有的公钥,粘贴你的新建公钥,点击new ssh key按键。 由此将公钥保存到github。
使用命令 ssh -T git@github.com 测试,输入password时直接回车。 将显示成功信息
Hi, chenke225! You've successfully authenticated, but Github does not provide shell access.
注: 可以在github中放置多个公钥。例如:windows下产生的公钥叫chenke dell XPS, ubuntu下产生的公钥叫ubuntu16.04。
这两个不同的公钥可连接两个不同的电脑平台。当新加入公钥,在github中显示黑色钥匙。连接成功后,显示绿色钥匙。
4 设置vscode工作目录与git的连接
1)首先github中必须新建一个项目,例如gittest,这个就不赘述了.
2)在vscode的终端中,输入
git remote add origin git@github.com:chenke225/gittest.git
如果有 fatal: remote origin already exists,则输入
git remote rm origin # 删除config文件的对应项。
重新运行 git remote add origin git@github.com: chenke225/gittest.git
出错表示远程的origin已经在config文件中设定,可用 git remote -v 查看origin。
3)输入推送工作区
git push -u origin master
如果推送成功就会在网站中看到你的项目。有时不成功,可以强行push
git push -u origin master -f
这样把本地目录中的文件上传到gittest目录下,也可以叫gittest仓库 repositories
4)之后只需要在vscode中的菜单中操作上传即可,大功告成。
在vscode中进行git版本控制
选择你的项目目录(仓库目录)
点击左侧git图标(一个连接样子的图标)。
打开要修改的文件,进行修改,保存。
点击该文件上方的+(stage changes)。
添加注释message,必须写不能空。 Ctrl+Enter确定。也可以用对勾 由此保存了修改。
下拉菜单中选 push。如此将本地修改的文件上传到github。
要从云端github下拉文件,从下拉菜单选pull。
此后,不用再用终端来设置,只要在vscode中进行修改,push,pull。。。。
| true
|
448ea0fa638874fd9330f6d7cc0b1f397b4347cb
|
Shell
|
ronioncloud/cloud-media-scripts
|
/config.template
|
UTF-8
| 4,145
| 3.15625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# shellcheck disable=SC2034
###############################################################################
# OPTIONS
###############################################################################
# Pool choice , 0 = unionfs, 1 = mergerfs mergerfs allows deletions for radarr/sonarr to prevent duplication on g-drive.
pool_choice=0
# Encrypt media (0 means no encryption)
encrypt_media=1
# Set your folder location within google drive where your media is stored. Empty means media files are in root
google_drive_media_directory=""
# Upload/Move flag, if set to move, cloudupload will use move instead of copy to move files immediately instead of after a week.
move_ind=0
# Mount user Id's. If necessary for acccess, Replace $(id with a different ID if you need to override.MergerFS tends to not need, unionfs sometimes does.
uid=$(id -u)
gid=$(id -g)
###############################################################################
# DIRECTORIES
###############################################################################
media_dir="INSERT_ROOT_DIR"
# Cloud directories
cloud_encrypt_dir="${media_dir}/.cloud-encrypt"
cloud_decrypt_dir="${media_dir}/.cloud-decrypt"
# Local directory
local_decrypt_dir="${media_dir}/.local-decrypt/Media"
plexdrive_temp_dir="${media_dir}/.local-decrypt/plexdrive"
# Media directory (FINAL)
local_media_dir="${media_dir}/media"
###############################################################################
# FS-Pooling
###############################################################################
ufs_bin="/usr/bin/unionfs"
mfs_bin="/usr/bin/mergerfs"
ufs_options="-o cow,allow_other,direct_io,nonempty,auto_cache,sync_read,uid=$uid,gid=$gid"
mfs_options="-o defaults,nonempty,allow_other,direct_io,use_ino,category.create=ff,category.search=ff,minfreespace=0,uid=$uid,gid=$gid"
###############################################################################
# PLEXDRIVE
###############################################################################
plexdrive_dir="${media_dir}/plexdrive"
plexdrive_bin="${plexdrive_dir}/plexdrive-linux-amd64"
mongo_database="plexdrive"
mongo_host="localhost"
mongo_user=""
mongo_password=""
plexdrive_options="--temp=${plexdrive_temp_dir} -o allow_other --clear-chunk-max-size=300G --clear-chunk-age=24h --chunk-size=10M"
###############################################################################
# RCLONE
###############################################################################
rclone_dir="${media_dir}/rclone"
rclone_bin="${rclone_dir}/rclone"
rclone_config="${rclone_dir}/rclone.conf"
rclone_options="--buffer-size 500M --checkers 16"
rclone_beta="0" # Set this to "1" if you want to live dangerous and try out rclone latest beta.
if [ "${pool_choice}" = "1" ]; then
rclone_mount_options="${rclone_options} --allow-non-empty --allow-other --max-read-ahead 30G"
else
rclone_mount_options="${rclone_options} --read-only --allow-non-empty --allow-other --max-read-ahead 30G"
fi
# Rclone endpoints
rclone_cloud_endpoint="gd-crypt:"
rclone_local_endpoint="local-crypt:"
upload_limit="0" # This is only used to check within cloudupload script. This number should be in GB and greater than 0 be activated.
move_limit="0" # This is only used to check within rmlocal script. This number should be in GB and greater than 0 be activated.
###############################################################################
# Plex (empty trash)
###############################################################################
plex_url="http://localhost:32400"
plex_token=""
###############################################################################
# MISC. CONFIG
###############################################################################
date_format="+%F@%T"
# Select to remove media files based on 'time', 'space' or 'instant'
#remove_files_based_on="instant"
#remove_files_based_on="time"
remove_files_based_on="space"
# Remove media files based on 'time'
remove_files_older_than=60 # In days
# Remove media files based on 'space'
remove_files_when_space_exceeds=500 # In Gigabytes
freeup_atleast=300 # In Gigabytes
| true
|
b6afe0005b40ae074602f1842ce923b646c6df27
|
Shell
|
dx0eu/v2rew
|
/v2ray_package_crew.sh
|
UTF-8
| 6,490
| 3.609375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
BIN_PATH=$(dirname $(readlink -f $0))
USER=dx0eu
REPO=v2rew
V2RAY_VERSION=$(curl -sL https://github.com/v2ray/v2ray-core/releases/latest | pup '.release-header:first-child .f1 a text{}')
_V2REW_VERSION=$(curl -sL https://github.com/$USER/$REPO/releases/latest | pup '.release-header:first-child .f1 a text{}')
_DIST_PREFIX=https://github.com/v2ray/v2ray-core/releases/download
DIST_ARCH_32=$_DIST_PREFIX/$V2RAY_VERSION/v2ray-linux-32.zip
DIST_ARCH_64=$_DIST_PREFIX/$V2RAY_VERSION/v2ray-linux-64.zip
DIST_ARCH_ARM=$_DIST_PREFIX/$V2RAY_VERSION/v2ray-linux-arm.zip
DIST_ARCH_ARM64=$_DIST_PREFIX/$V2RAY_VERSION/v2ray-linux-arm64.zip
DIST_SRC=https://github.com/v2ray/v2ray-core/archive/$V2RAY_VERSION.zip
OK_SH="sh -f $BIN_PATH/ok.sh"
build_pkg_crew() {
URL=$1
ARCH=$2
echo 'Download v2ray -> '$URL
curl -sL -o v2ray.zip $URL
mkdir -p usr/local/share/v2ray
mkdir -p usr/local/bin
echo 'Unzip origin package'
unzip v2ray.zip -d usr/local/share/v2ray
ln -s /usr/local/share/v2ray/v2ray usr/local/bin/
ln -s /usr/local/share/v2ray/v2ctl usr/local/bin/
chmod +x usr/local/share/v2ray
chmod +x usr/local/share/v2ctl
echo 'Generate dlist'
gen_filelist
echo 'Generate filelist'
gen_dirlist
TAR_XZ=$BIN_PATH/v2ray-chromeos-$ARCH.tar.xz
TAR_SIG=$TAR_XZ.sha256.txt
echo 'Create v2ray crew package -> '$TAR_XZ
tar -cvf $TAR_XZ usr dlist filelist
echo $(sha256sum $TAR_XZ | head -c 64) > $TAR_SIG
echo 'Create v2ray crew package complete. -> '$ARCH
# cd ..
# mkdir
viewpkg $TAR_XZ
clear_build $ARCH
}
viewpkg() {
echo '================= VIEWPKG =================='
TMP_PATH=/tmp/v2ray
mkdir $TMP_PATH
tar -xvf $1 -C $TMP_PATH
tree $TMP_PATH
echo '===> dlist'
echo "`cat $TMP_PATH/dlist`"
echo '===> filelist'
echo "`cat $TMP_PATH/filelist`"
rm -rf $TMP_PATH
echo '================= VIEWPKG =================='
}
clear_build() {
ARCH=$1
rm -rf v2ray.zip
rm -rf tmp
rm -rf usr
# rm -rf v2ray-chromeos-$ARCH.tar.xz
rm -rf dlist
rm -rf filelist
}
gen_filelist() {
CONTENT=`tree -ifF --noreport usr | grep -v '/$'`
CONTENT=`echo "$CONTENT" | sed 's/usr/\/usr/g'`
CONTENT=`echo "$CONTENT" | sed 's/ -> \/\/.*//g'`
echo "$CONTENT" > filelist
}
gen_dirlist() {
CONTENT=`tree -dif --noreport usr`
CONTENT=`echo "$CONTENT" | sed 's/usr/\/usr/g' | sed '1d'`
echo "$CONTENT" > dlist
}
release() {
# TAG=`cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1`
TAG=$V2RAY_VERSION
# echo $OK_SH create_release $USER $REPO $TAG
$OK_SH create_release $USER $REPO $TAG
upload_assets i686 $TAG
upload_assets x86_64 $TAG
upload_assets armv7l $TAG
upload_assets aarch64 $TAG
upload_assets_source $TAG
}
upload_assets_source() {
TAG=$1
SRC_F=$V2RAY_VERSION.zip
SRC_F_SIG=$SRC_F.sha256.txt
curl -sL -o $BIN_PATH/$SRC_F $DIST_SRC
echo $(sha256sum $SRC_F | head -c 64) > $SRC_F_SIG
$OK_SH list_releases "$USER" "$REPO" \
| awk -v "tag=$TAG" -F'\t' '$2 == tag { print $3 }' \
| xargs -I@ $OK_SH release "$USER" "$REPO" @ _filter='.upload_url' \
| sed 's/{.*$/?name='"$SRC_F"'/' \
| xargs -I@ $OK_SH upload_asset @ "$BIN_PATH/$SRC_F" mime_type='application/x-tar'
$OK_SH list_releases "$USER" "$REPO" \
| awk -v "tag=$TAG" -F'\t' '$2 == tag { print $3 }' \
| xargs -I@ $OK_SH release "$USER" "$REPO" @ _filter='.upload_url' \
| sed 's/{.*$/?name='"$SRC_F_SIG"'/' \
| xargs -I@ $OK_SH upload_asset @ "$BIN_PATH/$SRC_F_SIG" mime_type='text/plain'
}
upload_assets() {
ARCH=$1
TAG=$2
FILE=v2ray-chromeos-$ARCH.tar.xz
SIG=$FILE.sha256.txt
$OK_SH list_releases "$USER" "$REPO" \
| awk -v "tag=$TAG" -F'\t' '$2 == tag { print $3 }' \
| xargs -I@ $OK_SH release "$USER" "$REPO" @ _filter='.upload_url' \
| sed 's/{.*$/?name='"$FILE"'/' \
| xargs -I@ $OK_SH upload_asset @ "$BIN_PATH/$FILE" mime_type='application/x-tar'
$OK_SH list_releases "$USER" "$REPO" \
| awk -v "tag=$TAG" -F'\t' '$2 == tag { print $3 }' \
| xargs -I@ $OK_SH release "$USER" "$REPO" @ _filter='.upload_url' \
| sed 's/{.*$/?name='"$SIG"'/' \
| xargs -I@ $OK_SH upload_asset @ "$BIN_PATH/$SIG" mime_type='text/plain'
}
pkg_sig() {
echo $(cat $BIN_PATH/v2ray-chromeos-$1.tar.xz.sha256.txt)
}
build_crew() {
V2RAY_RB=$(cat $BIN_PATH/v2ray.rb)
CT=$(echo "$V2RAY_RB" | sed -e "s/{{VERSION}}/${V2RAY_VERSION}/g")
CT=$(echo "$CT" | sed -e "s/{{SHA256_I686}}/$(pkg_sig i686)/g")
CT=$(echo "$CT" | sed -e "s/{{SHA256_X86_64}}/$(pkg_sig x86_64)/g")
CT=$(echo "$CT" | sed -e "s/{{SHA256_ARMV7L}}/$(pkg_sig armv7l)/g")
CT=$(echo "$CT" | sed -e "s/{{SHA256_AARCH64}}/$(pkg_sig aarch64)/g")
CT=$(echo "$CT" | sed -e "s/{{SOURCE_SHA256}}/$(cat $BIN_PATH/$V2RAY_VERSION.zip.sha256.txt)/g")
CT=$(echo "$CT" | sed -e "s/{{USER}}/${USER}/g")
CT=$(echo "$CT" | sed -e "s/{{REPO}}/${REPO}/g")
echo "$CT" > $BIN_PATH/v2ray.rb
cat $BIN_PATH/v2ray.rb
}
chromebrew() {
# cd /tmp
# git clone https://github.com/dx0eu/chromebrew.git
# cd chromebrew
# cp $BIN_PATH/v2ray.rb packages/
# RNG=`cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1`
# echo $RNG > $RNG.txt
# git add .
# git commit -m "$RNG"
# git push origin master
cd /tmp
git clone git@github.com:dx0eu/chromebrew.git
cd chromebrew
git remote add skyc https://github.com/skycocker/chromebrew.git
git fetch skyc
git pull skyc master
cp $BIN_PATH/v2ray.rb packages/
git add .
git commit -m "v2ray $V2RAY_VERSION"
git push origin master
$OK_SH create_pull_request \
skycocker/chromebrew \
"v2ray package" \
dx0eu:master \
master \
body="A platform for building proxies to bypass network restrictions. https://www.v2ray.com/ github repo https://github.com/v2ray/v2ray-core"
cd $BIN_PATH
}
main() {
if [ "$V2RAY_VERSION" = "$_V2REW_VERSION" ];
then
echo 'This version is already build to crew package.'
exit 0
fi
curl -sL -o $BIN_PATH/ok.sh https://raw.githubusercontent.com/dx0eu/ok.sh/master/ok.sh
build_pkg_crew $DIST_ARCH_32 i686
build_pkg_crew $DIST_ARCH_64 x86_64
build_pkg_crew $DIST_ARCH_ARM armv7l
build_pkg_crew $DIST_ARCH_ARM64 aarch64
echo '------------------------'
ls
release
# https://gist.github.com/stefanbuck/ce788fee19ab6eb0b4447a85fc99f447
# sh -f upload-github-release-asset.sh github_api_token=92c97dbfa713f51e7683d1193701ee7a5f6d9a70 owner=dx0eu repo=citest tag=v0.0.2 filename=./aaa.txt
build_crew
chromebrew
}
main
| true
|
a76403880ae7652e5934a413ff2c2258708dd264
|
Shell
|
shang766/multiview
|
/doc/build.sh
|
UTF-8
| 1,028
| 3.578125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
PPWD="$(cd "$(dirname "$0")" ; pwd)"
ARCH_D=$(cat /opt/multiview/etc/arch)
CONFIG_F="doxy.config"
EXEC=/opt/multiview/$ARCH_D/opt/cc/bin/doxygen
# ! [ -x "LD_LIBRARY_PATH=$LLVM_D/lib $EXEC" ] \
# && echo "Failed to find $EXEC. Please build with: " \
# && echo "build/install-files/scripts/build-doxygen.sh" \
# && exit 1
! [ "$(LD_LIBRARY_PATH=$LLVM_D/lib $EXEC -v)" = "1.8.20" ] \
&& echo "Doxygen is wrong version. Please build with " \
&& echo "$PPWD/../build/install-files/scripts/build-doxygen.sh" \
&& exit 1
TMPD=$(mktemp -d /tmp/$(basename $0).XXXXX)
trap cleanup EXIT
cleanup()
{
rm -rf $TMPD
}
# Replace PROJECT_NUMBER with git information
TAG="$(git describe --tags | head -n 1)"
COMMIT="$(git log | grep commit | head -n 1)"
cat "$PPWD/$CONFIG_F" | sed "s,<PROJECT_VERSION_NUMBER>,$TAG $COMMIT," > $TMPD/doxy-config
cd $PPWD
LD_LIBRARY_PATH=$LLVM_D/lib $EXEC $TMPD/doxy-config
rsync -ac "$PPWD/html/" "$PERCEIVE_DATA/computer-vision/documentation/multiview/html"
| true
|
d185bb7855f9bc2a5d7098cb8cf03d82708d7d31
|
Shell
|
mehulsbhatt/docker-mariadb-backup
|
/backup-mysqldump
|
UTF-8
| 1,674
| 4.125
| 4
|
[] |
no_license
|
#!/bin/bash
MYSQL_USER=
MYSQL_PASSWORD=
MYSQL_HOST=
MYSQL_PORT=3306
while getopts ":u:p:h:P:d" opt; do
case $opt in
d)
BACKUP_FILE=$OPTARG
;;
u)
MYSQL_USER=$OPTARG
;;
p)
MYSQL_PASSWORD=$OPTARG
;;
P)
MYSQL_PORT=$OPTARG
;;
h)
MYSQL_HOST=$OPTARG
;;
\?)
echo "Invalid option: -$OPTARG" >&2
exit 1
;;
:)
echo "Option -$OPTARG requires an argument." >&2
exit 1
;;
esac
done
if [ -z "$BACKUP_FILE" ]; then
BACKUP_FILE=/var/backups/$(date +"%Y-%m-%d-%H%M%S")_${MYSQL_HOST}_mysqldump
fi
if [ -z "$MYSQL_USER" ] || [ -z "$MYSQL_PASSWORD" ]; then
echo
echo Usage: $0 -u mysqluser -p mysqlpassword -h mysqlhost
echo
echo " -u Specifies the MySQL user (required)"
echo " -p Specifies the MySQL password (required)"
echo " -h Specifies the MySQL host (required)"
echo " -P Specifies the MySQL port (optional)"
echo " -d Specifies the backup file where to put the backup (default: /var/backups/CURRENT_DATETIME_MYSQLHOST_mysqldump)"
echo
exit 1
fi
echo Using the following configuration:
echo
echo " backup_file: ${BACKUP_FILE}"
echo " mysql_user: ${MYSQL_USER}"
echo " mysql_password: ****** (not shown)"
echo " mysql_host: ${MYSQL_HOST}"
echo " mysql_port: ${MYSQL_PORT}"
echo
mysqldump \
-u${MYSQL_USER} \
-p${MYSQL_PASSWORD} \
-h${MYSQL_HOST} \
-P${MYSQL_PORT} \
--single-transaction \
--routines \
--triggers \
--all-databases \
> ${BACKUP_FILE}
RETVAL=$?
if [ "$RETVAL" == 0 ]; then
echo Backup finished successfully.
exit 0
else
echo Backup failed with errors!
exit 1
fi
| true
|
1d3598af926319006648d1b861c081c87d23bedb
|
Shell
|
KazAoyama/KaigoSystem
|
/E-LIFE/KEIRI/CGI/back/._KOJINATESEIKYUSHO_SAKUSEI.KEISAN.NORMAL_HIWARI.20140414.163044
|
UTF-8
| 35,125
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
#
# KOJINATESEIKYUSHO_SAKUSEI.KEISAN.NORMAL_HIWARI 施設請求計算部分のCGI
#
# Written by S.Otsubo
# 設定ファイル読込
source /home/hands/.bashrc &> /dev/null
source /home/hands/E-LIFE/KEIRI/CGI/KOJINATESEIKYUSHO_SAKUSEI.INI &> /dev/null
# ログ
[ ! -e /home/hands/E-LIFE/KEIRI/TRACE_LOG/${today} ] && mkdir /home/hands/E-LIFE/KEIRI/TRACE_LOG/${today}
exec 2> /home/hands/E-LIFE/KEIRI/TRACE_LOG/${today}/LOG.$(basename ${0}).${HOSTNAME}.${current_time}; set -xv
#---------------------------------------------------------------
#---------------------------------------------------------------
function error_exit {
message="${1}"
echo "${message}"
exit 1
}
#---------------------------------------------------------------
#---------------------------------------------------------------
# 引数設定
namefile=${1}
# 変数
eval $(name-source ${namefile})
seikyu_syori_month="${year}${month}"
seikyu_syori_next_month="$(mdate ${seikyu_syori_month}m/+1)"
seikyu_syori_two_next_month="$(mdate ${seikyu_syori_month}m/+2)"
seikyu_syori_last_month="$(mdate ${seikyu_syori_month}m/-1)"
seikyu_syori_two_last_month="$(mdate ${seikyu_syori_month}m/-2)"
# エラーファイル初期化
rm ${tmp}-err
# くりかえし吐き出し初期化
: > ${tmp}-tokuhan_while_result
: > ${tmp}-nottokuhan_while_result
#---------------------------------------------------------------
#---------------------------------------------------------------
# この処理で使用するファイルがなければ終了
[ ! -s ${tmp}-tujyou_hiwari_taisyou ] && exit 0
#---------------------------------------------------------------
#---------------------------------------------------------------
### 日割り側の処理
# 日割り日数の算出
# 請求月=起算開始月 もしくは 請求月=起算終了月 ならば 日割り日数を算出
# (ただしその月1日からその月末までが期間ならば日割り日数=0(一ヶ月の日数が違うため))
# (さらに日割り日数算出して30日と同じかそれより大きいならば日割り日数=0)
# 請求月!=起算開始月 かつ 請求月!=起算終了月 ならば 日割り日数=0
cat ${tmp}-tujyou_hiwari_taisyou |
awk '{if($2==$18 || $2==$19)
{hiwari_taisyou_flg="1"}
else
{hiwari_taisyou_flg="2"};
print $0,hiwari_taisyou_flg}' |
# 1:入居者ID 2:請求月 3:契約ID 4:利用料種別ID 5:契約枝番
# 6:利用料名 7:税区分 8:支払区分 9:費用算出方法 10:起算開始日
# 11:起算終了日 12:金額 13:特販ID 14:値引額 15:値引後金額
# 16:特販期間FROM 17:特販期間TO 18:起算開始月 19:起算終了月 20:レコードが発生した年月日
# 21:日割り対象フラグ
# フラグをあたまにもってくる
self NF 1/NF-1 |
# 1:日割り対象フラグ 2:入居者ID 3:請求月 4:契約ID 5:利用料種別ID
# 6:契約枝番 7:利用料名 8:税区分 9:支払区分 10:費用算出方法
# 11:起算開始日 12:起算終了日 13:金額 14:特販ID 15:値引額
# 16:値引後金額 17:特販期間FROM 18:特販期間TO 19:起算開始月 20:起算終了月
# 21:レコードが発生した年月日
rank |
tee ${tmp}-hiwari_basedata |
self 1 2 > ${tmp}-hiwari_kurikaeshi_data
[ $(plus ${PIPESTATUS[@]}) -eq 0 ] || error_exit "処理中にエラーが発生しました(NORMAL_HIWARI)"
#---------------------------------------------------------------
#---------------------------------------------------------------
# 繰り返し処理
cat ${tmp}-hiwari_kurikaeshi_data |
while read gyo_no flg ; do
case "${flg}" in
#------------------------------------------
# 請求月が起算開始終了にかぶる
1 )
# 日割り日数を算出する(レコード発生年月日から月末か起算終了日まで)
# 起算終了月が請求月ならば起算終了日で計算
# 起算開始月が請求月ならば起算開始日で計算
# 起算開始終了月が請求月でないならば、日割り0で計算
#--------------------------------------
## 起算終了月が請求月
cat ${tmp}-hiwari_basedata |
awk '$1=="'${gyo_no}'"' |
delf 1 2 |
awk '{if($2!=$18 && $2==$19)
{print $0,$2"01",$11}}' |
# 1:入居者ID 2:請求月 3:契約ID 4:利用料種別ID 5:契約枝番
# 6:利用料名 7:税区分 8:支払区分 9:費用算出方法 10:起算開始日
# 11:起算終了日 12:金額 13:特販ID 14:値引額 15:値引後金額
# 16:特販期間FROM 17:特販期間TO 18:起算開始月 19:起算終了月 20:レコードが発生した年月日
# 21:日割り計算用FROM 22:日割り計算用TO
mdate 22 21 |
awk '{print $0,$23+1}' |
delf 21/23 > ${tmp}-tujyou_hiwari_taisyou_bunki1
# 1:入居者ID 2:請求月 3:契約ID 4:利用料種別ID 5:契約枝番
# 6:利用料名 7:税区分 8:支払区分 9:費用算出方法 10:起算開始日
# 11:起算終了日 12:金額 13:特販ID 14:値引額 15:値引後金額
# 16:特販期間FROM 17:特販期間TO 18:起算開始月 19:起算終了月 20:レコードが発生した年月日
# 21:日割り日数(当日含む)
#--------------------------------------
#--------------------------------------
# ファイルがある状態で処理に入るのでパイプでエラーのとき落とす
[ $(plus ${PIPESTATUS[@]}) -eq 0 ] || : > ${tmp}-err
[ -e ${tmp}-err ] && break
#--------------------------------------
#--------------------------------------
## 起算開始月が請求月
cat ${tmp}-hiwari_basedata |
awk '$1=="'${gyo_no}'"' |
delf 1 2 |
awk '{if($2==$18 && $2!=$19)
{print $0,$2+1"01"}}' |
# 1:入居者ID 2:請求月 3:契約ID 4:利用料種別ID 5:契約枝番
# 6:利用料名 7:税区分 8:支払区分 9:費用算出方法 10:起算開始日
# 11:起算終了日 12:金額 13:特販ID 14:値引額 15:値引後金額
# 16:特販期間FROM 17:特販期間TO 18:起算開始月 19:起算終了月 20:レコードが発生した年月日
# 21:請求月+1月初
mdate -e 21/-1 |
# 1:入居者ID 2:請求月 3:契約ID 4:利用料種別ID 5:契約枝番
# 6:利用料名 7:税区分 8:支払区分 9:費用算出方法 10:起算開始日
# 11:起算終了日 12:金額 13:特販ID 14:値引額 15:値引後金額
# 16:特販期間FROM 17:特販期間TO 18:起算開始月 19:起算終了月 20:レコードが発生した年月日
# 21:日割り計算用TO 22:請求月+1月初
self 1/NF 10 21 |
# 1:入居者ID 2:請求月 3:契約ID 4:利用料種別ID 5:契約枝番
# 6:利用料名 7:税区分 8:支払区分 9:費用算出方法 10:起算開始日
# 11:起算終了日 12:金額 13:特販ID 14:値引額 15:値引後金額
# 16:特販期間FROM 17:特販期間TO 18:起算開始月 19:起算終了月 20:レコードが発生した年月日
# 21:日割り計算用TO 22:請求月+1月初 23:起算開始日 24:日割り計算用TO
mdate 24 23 |
# 1:入居者ID 2:請求月 3:契約ID 4:利用料種別ID 5:契約枝番
# 6:利用料名 7:税区分 8:支払区分 9:費用算出方法 10:起算開始日
# 11:起算終了日 12:金額 13:特販ID 14:値引額 15:値引後金額
# 16:特販期間FROM 17:特販期間TO 18:起算開始月 19:起算終了月 20:レコードが発生した年月日
# 21:請求月+1月初 22:日割り計算用TO 23:起算開始日 24:日割り計算用TO 25:日割り日数(当日含まず)
awk '{print $0,$25+1}' |
self 1/20 26 > ${tmp}-tujyou_hiwari_taisyou_bunki2
# 1:入居者ID 2:請求月 3:契約ID 4:利用料種別ID 5:契約枝番
# 6:利用料名 7:税区分 8:支払区分 9:費用算出方法 10:起算開始日
# 11:起算終了日 12:金額 13:特販ID 14:値引額 15:値引後金額
# 16:特販期間FROM 17:特販期間TO 18:起算開始月 19:起算終了月 20:レコードが発生した年月日
# 21:日割り日数(当日含む)
#--------------------------------------
#--------------------------------------
# ファイルがある状態で処理に入るのでパイプでエラーのとき落とす
[ $(plus ${PIPESTATUS[@]}) -eq 0 ] || : > ${tmp}-err
[ -e ${tmp}-err ] && break
#--------------------------------------
#--------------------------------------
## 起算開始終了月が請求月
cat ${tmp}-hiwari_basedata |
awk '$1=="'${gyo_no}'"' |
delf 1 2 |
awk '$2==$18 && $2==$19' |
# 1:入居者ID 2:請求月 3:契約ID 4:利用料種別ID 5:契約枝番
# 6:利用料名 7:税区分 8:支払区分 9:費用算出方法 10:起算開始日
# 11:起算終了日 12:金額 13:特販ID 14:値引額 15:値引後金額
# 16:特販期間FROM 17:特販期間TO 18:起算開始月 19:起算終了月 20:レコードが発生した年月日
self 1/NF 10/11 |
mdate 22 21 |
# 1:入居者ID 2:請求月 3:契約ID 4:利用料種別ID 5:契約枝番
# 6:利用料名 7:税区分 8:支払区分 9:費用算出方法 10:起算開始日
# 11:起算終了日 12:金額 13:特販ID 14:値引額 15:値引後金額
# 16:特販期間FROM 17:特販期間TO 18:起算開始月 19:起算終了月 20:レコードが発生した年月日
# 21:起算開始日 22:起算終了日 23:日割り日数(当日含まず)
awk '{print $0,$23+1}' |
delf 21/23 > ${tmp}-tujyou_hiwari_taisyou_bunki3
# 1:入居者ID 2:請求月 3:契約ID 4:利用料種別ID 5:契約枝番
# 6:利用料名 7:税区分 8:支払区分 9:費用算出方法 10:起算開始日
# 11:起算終了日 12:金額 13:特販ID 14:値引額 15:値引後金額
# 16:特販期間FROM 17:特販期間TO 18:起算開始月 19:起算終了月 20:レコードが発生した年月日
# 21:日割り日数(当日含む)
#--------------------------------------
#--------------------------------------
# ファイルがある状態で処理に入るのでパイプでエラーのとき落とす
[ $(plus ${PIPESTATUS[@]}) -eq 0 ] || : > ${tmp}-err
[ -e ${tmp}-err ] && break
#--------------------------------------
#--------------------------------------
cat ${tmp}-tujyou_hiwari_taisyou_bunki1 \
${tmp}-tujyou_hiwari_taisyou_bunki2 \
${tmp}-tujyou_hiwari_taisyou_bunki3 |
# 特販期間の金額を採用するかの判定
awk '{if($16=="_")
{$16="99991231"};
print}' |
awk '{if($17=="_")
{$17="99991231"};
print}' |
self 1/NF 16.1.6 17.1.6 |
# 1:入居者ID 2:請求月 3:契約ID 4:利用料種別ID 5:契約枝番
# 6:利用料名 7:税区分 8:支払区分 9:費用算出方法 10:起算開始日
# 11:起算終了日 12:金額 13:特販ID 14:値引額 15:値引後金額
# 16:特販期間FROM 17:特販期間TO 18:起算開始月 19:起算終了月 20:レコードが発生した年月日
# 21:日割り日数 22:特販期間FROM月 23:特販期間TO月
awk '{if($22<=$2 && $23>=$2)
{flg=1}
else
{flg=0};
print flg,$0}' |
# 1:特販採用フラグ 2:入居者ID 3:請求月 4:契約ID 5:利用料種別ID
# 6:契約枝番 7:利用料名 8:税区分 9:支払区分 10:費用算出方法
# 11:起算開始日 12:起算終了日 13:金額 14:特販ID 15:値引額
# 16:値引後金額 17:特販期間FROM 18:特販期間TO 19:起算開始月 20:起算終了月
# 21:レコードが発生した年月日 22:日割り日数 23:特販期間FROM月 24:特販期間TO月
# 請求月の日数を算出
self 1/NF 3 |
# 1:特販採用フラグ 2:入居者ID 3:請求月 4:契約ID 5:利用料種別ID
# 6:契約枝番 7:利用料名 8:税区分 9:支払区分 10:費用算出方法
# 11:起算開始日 12:起算終了日 13:金額 14:特販ID 15:値引額
# 16:値引後金額 17:特販期間FROM 18:特販期間TO 19:起算開始月 20:起算終了月
# 21:レコードが発生した年月日 22:日割り日数 23:特販期間FROM月 24:特販期間TO月 25:請求月
mdate 25m/+1 |
# 1:特販採用フラグ 2:入居者ID 3:請求月 4:契約ID 5:利用料種別ID
# 6:契約枝番 7:利用料名 8:税区分 9:支払区分 10:費用算出方法
# 11:起算開始日 12:起算終了日 13:金額 14:特販ID 15:値引額
# 16:値引後金額 17:特販期間FROM 18:特販期間TO 19:起算開始月 20:起算終了月
# 21:レコードが発生した年月日 22:日割り日数 23:特販期間FROM月 24:特販期間TO月 25:請求月
# 26:請求月+1
awk '{$26=$26"01"; print}' |
mdate -e 26/-1 |
# 1:特販採用フラグ 2:入居者ID 3:請求月 4:契約ID 5:利用料種別ID
# 6:契約枝番 7:利用料名 8:税区分 9:支払区分 10:費用算出方法
# 11:起算開始日 12:起算終了日 13:金額 14:特販ID 15:値引額
# 16:値引後金額 17:特販期間FROM 18:特販期間TO 19:起算開始月 20:起算終了月
# 21:レコードが発生した年月日 22:日割り日数 23:特販期間FROM月 24:特販期間TO月 25:請求月
# 26:請求月月末 27:請求月+1
awk '{$25=$25"01"; print}' |
mdate 26 25 |
# 1:特販採用フラグ 2:入居者ID 3:請求月 4:契約ID 5:利用料種別ID
# 6:契約枝番 7:利用料名 8:税区分 9:支払区分 10:費用算出方法
# 11:起算開始日 12:起算終了日 13:金額 14:特販ID 15:値引額
# 16:値引後金額 17:特販期間FROM 18:特販期間TO 19:起算開始月 20:起算終了月
# 21:レコードが発生した年月日 22:日割り日数 23:特販期間FROM月 24:特販期間TO月 25:請求月
# 26:請求月月末 27:請求月日数 28:請求月+1
awk '{$27=$27+1; print}' |
delf 25/26 28 |
# 1:特販採用フラグ 2:入居者ID 3:請求月 4:契約ID 5:利用料種別ID
# 6:契約枝番 7:利用料名 8:税区分 9:支払区分 10:費用算出方法
# 11:起算開始日 12:起算終了日 13:金額 14:特販ID 15:値引額
# 16:値引後金額 17:特販期間FROM 18:特販期間TO 19:起算開始月 20:起算終了月
# 21:レコードが発生した年月日 22:日割り日数 23:特販期間FROM月 24:特販期間TO月 25:請求月日数
# -
# 日割り日数=請求月日数ならば日割りしないので日割り日数=0
# 日割り日数!=請求月日数ならば
# 日割り日数<30ならば日割りする
# 上記以外ならば日割り日数=0
awk '{if($22==$25)
{$22=0};
print}' |
awk '{if($22<30)
{$22=$22;}
else
{$22=0};
print}' |
delf NF > ${tmp}-hiwari_keisan_mae
# 1:特販採用フラグ 2:入居者ID 3:請求月 4:契約ID 5:利用料種別ID
# 6:契約枝番 7:利用料名 8:税区分 9:支払区分 10:費用算出方法
# 11:起算開始日 12:起算終了日 13:金額 14:特販ID 15:値引額
# 16:値引後金額 17:特販期間FROM 18:特販期間TO 19:起算開始月 20:起算終了月
# 21:レコードが発生した年月日 22:日割り日数 23:特販期間FROM月 24:特販期間TO月
#--------------------------------------
#--------------------------------------
# ファイルがある状態で処理に入るのでパイプでエラーのとき落とす
[ $(plus ${PIPESTATUS[@]}) -eq 0 ] || : > ${tmp}-err
[ -e ${tmp}-err ] && break
#--------------------------------------
# ----------------------
# 列チェック
[ "$(retu ${tmp}-hiwari_keisan_mae)" != "24" ] && : > $tmp-err
[ "$(retu ${tmp}-hiwari_keisan_mae | gyo)" != "1" ] && : > $tmp-err
[ "$(awk 'NF!="24"' ${tmp}-hiwari_keisan_mae | gyo)" != "0" ] && : > $tmp-err
[ -e ${tmp}-err ] && break;
# ----------------------
#--------------------------------------
cat ${tmp}-hiwari_keisan_mae |
while read is_tokuhan tokuhan_igai ; do
# !補足
# 月額賃料の1000円未満切捨ての計算は、一回10^-3かけて切捨て部分を小数点より以下にして、切捨て処理してから10^3かけて戻す作業をしている
case "${is_tokuhan}" in
#-------------------------
# 特販を採用しない日割り
0 )
# 特販を採用しないので日割り値引金額=0、日割り値引後金額=日割り金額
echo "${tokuhan_igai}" |
awk '$21!="0"' |
# 1:入居者ID 2:請求月 3:契約ID 4:利用料種別ID 5:契約枝番
# 6:利用料名 7:税区分 8:支払区分 9:費用算出方法 10:起算開始日
# 11:起算終了日 12:金額 13:特販ID 14:値引額 15:値引後金額
# 16:特販期間FROM 17:特販期間TO 18:起算開始月 19:起算終了月 20:レコードが発生した年月日
# 21:日割り日数 22:特販期間FROM月 23:特販期間TO月
awk '{if($4=="001")
{hiwari_kingaku=int($12*$21/30/1000)*1000;
hiwari_nebiki_kingaku=0;
hiwari_nebikigo_kingaku=hiwari_kingaku}
else
{hiwari_kingaku=int($12*$21/30);
hiwari_nebiki_kingaku=0;
hiwari_nebikigo_kingaku=hiwari_kingaku};
print $0,hiwari_kingaku,hiwari_nebiki_kingaku,hiwari_nebikigo_kingaku}' > ${tmp}-hiwari1
# 1:入居者ID 2:請求月 3:契約ID 4:利用料種別ID 5:契約枝番
# 6:利用料名 7:税区分 8:支払区分 9:費用算出方法 10:起算開始日
# 11:起算終了日 12:金額 13:特販ID 14:値引額 15:値引後金額
# 16:特販期間FROM 17:特販期間TO 18:起算開始月 19:起算終了月 20:レコードが発生した年月日
# 21:日割り日数 22:特販期間FROM月 23:特販期間TO月 24:日割り金額 25:日割り値引金額
# 26:日割り値引後金額
#--------------------------------------
# ファイルがある状態で処理に入るのでパイプでエラーのとき落とす
[ $(plus ${PIPESTATUS[@]}) -eq 0 ] || : > ${tmp}-err
[ -e ${tmp}-err ] && break
#--------------------------------------
echo "${tokuhan_igai}" |
awk '$21=="0"' |
awk '{if($4=="001")
{hiwari_kingaku=$12;
hiwari_nebiki_kingaku=0;
hiwari_nebikigo_kingaku=hiwari_kingaku}
else
{hiwari_kingaku=$12;
hiwari_nebiki_kingaku=0;
hiwari_nebikigo_kingaku=hiwari_kingaku};
print $0,hiwari_kingaku,hiwari_nebiki_kingaku,hiwari_nebikigo_kingaku}' > ${tmp}-hiwari2
#--------------------------------------
# ファイルがある状態で処理に入るのでパイプでエラーのとき落とす
[ $(plus ${PIPESTATUS[@]}) -eq 0 ] || : > ${tmp}-err
[ -e ${tmp}-err ] && break
#--------------------------------------
cat ${tmp}-hiwari1 ${tmp}-hiwari2 >> ${tmp}-nottokuhan_while_result
;;
#-------------------------
#-------------------------
# 特販を採用する日割り
1 )
echo "${tokuhan_igai}" |
awk '$21!="0"' |
# 1:入居者ID 2:請求月 3:契約ID 4:利用料種別ID 5:契約枝番
# 6:利用料名 7:税区分 8:支払区分 9:費用算出方法 10:起算開始日
# 11:起算終了日 12:金額 13:特販ID 14:値引額 15:値引後金額
# 16:特販期間FROM 17:特販期間TO 18:起算開始月 19:起算終了月 20:レコードが発生した年月日
# 21:日割り日数 22:特販期間FROM月 23:特販期間TO月
awk '{if(4=="001")
{hiwari_kingaku=int($12*$21/30/1000)*1000;
hiwari_nebiki_kingaku=int($14*$21/30/1000)*1000;
hiwari_nebikigo_kingaku=hiwari_kingaku-hiwari_nebiki_kingaku}
else
{hiwari_kingaku=int($12*$21/30);
hiwari_nebiki_kingaku=int($14*$21/30);
hiwari_nebikigo_kingaku=hiwari_kingaku-hiwari_nebiki_kingaku};
print $0,hiwari_kingaku,hiwari_nebiki_kingaku,hiwari_nebikigo_kingaku}' > ${tmp}-tokuhan_hiwari1
# 1:入居者ID 2:請求月 3:契約ID 4:利用料種別ID 5:契約枝番
# 6:利用料名 7:税区分 8:支払区分 9:費用算出方法 10:起算開始日
# 11:起算終了日 12:金額 13:特販ID 14:値引額 15:値引後金額
# 16:特販期間FROM 17:特販期間TO 18:起算開始月 19:起算終了月 20:レコードが発生した年月日
# 21:日割り日数 22:特販期間FROM月 23:特販期間TO月 24:日割り金額 25:日割り値引金額
# 26:日割り値引後金額
#--------------------------------------
# ファイルがある状態で処理に入るのでパイプでエラーのとき落とす
[ $(plus ${PIPESTATUS[@]}) -eq 0 ] || : > ${tmp}-err
[ -e ${tmp}-err ] && break
#--------------------------------------
echo "${tokuhan_igai}" |
awk '$21=="0"' |
awk '{if(4=="001")
{hiwari_kingaku=$12;
hiwari_nebiki_kingaku=$14;
hiwari_nebikigo_kingaku=$15}
else
{hiwari_kingaku=$12;
hiwari_nebiki_kingaku=$14;
hiwari_nebikigo_kingaku=$15};
print $0,hiwari_kingaku,hiwari_nebiki_kingaku,hiwari_nebikigo_kingaku}' > ${tmp}-tokuhan_hiwari2
#--------------------------------------
# ファイルがある状態で処理に入るのでパイプでエラーのとき落とす
[ $(plus ${PIPESTATUS[@]}) -eq 0 ] || : > ${tmp}-err
[ -e ${tmp}-err ] && break
#--------------------------------------
cat ${tmp}-tokuhan_hiwari1 ${tmp}-tokuhan_hiwari2 >> ${tmp}-tokuhan_while_result
;;
#-------------------------
esac
done
# エラー判定
[ -e ${tmp}-err ] && break
#--------------------------------------
;;
#------------------------------------------
#------------------------------------------
# 請求月が起算開始終了にかぶらない
2 )
#--------------------------------------
# 日割りしない
cat ${tmp}-hiwari_basedata |
awk '$1=="'${gyo_no}'"' |
delf 1 2 |
awk '{print $0,"0"}' |
# 特販期間の金額を採用するかの判定
self 1/NF 16.1.6 17.1.6 |
# 1:入居者ID 2:請求月 3:契約ID 4:利用料種別ID 5:契約枝番
# 6:利用料名 7:税区分 8:支払区分 9:費用算出方法 10:起算開始日
# 11:起算終了日 12:金額 13:特販ID 14:値引額 15:値引後金額
# 16:特販期間FROM 17:特販期間TO 18:起算開始月 19:起算終了月 20:レコードが発生した年月日
# 21:日割り日数 22:特販期間FROM月 23:特販期間TO月
awk '{if($22<=$2 && $23>=$2)
{flg=1}
else
{flg=0};
print flg,$0}' > ${tmp}-hiwari_keisan_mae2
# 1:特販採用フラグ 2:入居者ID 3:請求月 4:契約ID 5:利用料種別ID
# 6:契約枝番 7:利用料名 8:税区分 9:支払区分 10:費用算出方法
# 11:起算開始日 12:起算終了日 13:金額 14:特販ID 15:値引額
# 16:値引後金額 17:特販期間FROM 18:特販期間TO 19:起算開始月 20:起算終了月
# 21:レコードが発生した年月日 22:日割り日数 23:特販期間FROM月 24:特販期間TO月
#--------------------------------------
# ファイルがある状態で処理に入るのでパイプでエラーのとき落とす
[ $(plus ${PIPESTATUS[@]}) -eq 0 ] || : > ${tmp}-err
[ -e ${tmp}-err ] && break
#--------------------------------------
# ----------------------
# 列チェック
[ "$(retu ${tmp}-hiwari_keisan_mae2)" != "24" ] && : > $tmp-err
[ "$(retu ${tmp}-hiwari_keisan_mae2 | gyo)" != "1" ] && : > $tmp-err
[ "$(awk 'NF!="24"' ${tmp}-hiwari_keisan_mae2 | gyo)" != "0" ] && : > $tmp-err
[ -e ${tmp}-err ] && break;
# ----------------------
#--------------------------------------
#--------------------------------------
cat ${tmp}-hiwari_keisan_mae2 |
while read is_tokuhan tokuhan_igai ; do
# 日割りしないが、日割りと処理を同じにするため、日割り項目にそのままの値を入れる
case "${is_tokuhan}" in
#--------------------------------
# 特販を採用しない
0 )
# 特販を採用しないので、日割り金額=金額、日割り後値引金額=0、日割り値引後金額=金額
echo "${tokuhan_igai}" |
# 1:入居者ID 2:請求月 3:契約ID 4:利用料種別ID 5:契約枝番
# 6:利用料名 7:税区分 8:支払区分 9:費用算出方法 10:起算開始日
# 11:起算終了日 12:金額 13:特販ID 14:値引額 15:値引後金額
# 16:特販期間FROM 17:特販期間TO 18:起算開始月 19:起算終了月 20:レコードが発生した年月日
# 21:日割り日数 22:特販期間FROM月 23:特販期間TO月
awk '{print $0,"0"}' |
self 1/NF-1 12 NF 12 >> ${tmp}-nottokuhan_while_result
# 1:入居者ID 2:請求月 3:契約ID 4:利用料種別ID 5:契約枝番
# 6:利用料名 7:税区分 8:支払区分 9:費用算出方法 10:起算開始日
# 11:起算終了日 12:金額 13:特販ID 14:値引額 15:値引後金額
# 16:特販期間FROM 17:特販期間TO 18:起算開始月 19:起算終了月 20:レコードが発生した年月日
# 21:日割り日数 22:特販期間FROM月 23:特販期間TO月 24:日割り金額 25:日割り後値引金額
# 26:日割り値引後金額
# エラー判定
[ $(plus ${PIPESTATUS[@]}) -eq 0 ] || : > ${tmp}-err
[ -e ${tmp}-err ] && break
;;
#--------------------------------
#--------------------------------
# 特販を採用する
1 )
echo "${tokuhan_igai}" |
# 1:入居者ID 2:請求月 3:契約ID 4:利用料種別ID 5:契約枝番
# 6:利用料名 7:税区分 8:支払区分 9:費用算出方法 10:起算開始日
# 11:起算終了日 12:金額 13:特販ID 14:値引額 15:値引後金額
# 16:特販期間FROM 17:特販期間TO 18:起算開始月 19:起算終了月 20:レコードが発生した年月日
# 21:日割り日数 22:特販期間FROM月 23:特販期間TO月
awk '{print $0,"0"}' |
self 1/NF-1 15 NF 15 >> ${tmp}-tokuhan_while_result
# 1:入居者ID 2:請求月 3:契約ID 4:利用料種別ID 5:契約枝番
# 6:利用料名 7:税区分 8:支払区分 9:費用算出方法 10:起算開始日
# 11:起算終了日 12:金額 13:特販ID 14:値引額 15:値引後金額
# 16:特販期間FROM 17:特販期間TO 18:起算開始月 19:起算終了月 20:レコードが発生した年月日
# 21:日割り日数 22:特販期間FROM月 23:特販期間TO月 24:日割り金額 25:日割り後値引金額
# 26:日割り値引後金額
# エラー判定
[ $(plus ${PIPESTATUS[@]}) -eq 0 ] || : > ${tmp}-err
[ -e ${tmp}-err ] && break
;;
#--------------------------------
esac
#--------------------------------------
done
# エラー判定
[ -e ${tmp}-err ] && break
;;
#------------------------------------------
esac
# 1:入居者ID 2:請求月 3:契約ID 4:利用料種別ID 5:契約枝番
# 6:利用料名 7:税区分 8:支払区分 9:費用算出方法 10:起算開始日
# 11:起算終了日 12:金額 13:特販ID 14:値引額 15:値引後金額
# 16:特販期間FROM 17:特販期間TO 18:起算開始月 19:起算終了月 20:レコードが発生した年月日
# 21:日割り日数 22:特販期間FROM月 23:特販期間TO月 24:日割り金額 25:日割り後値引金額
# 26:日割り値引後金額
done
#---------------------------------------------------------------
#---------------------------------------------------------------
# エラーファイルがあったら落とす
[ -e ${tmp}-err ] && error_exit "処理中にエラーが発生しました(NORMAL_HIWARI)"
#---------------------------------------------------------------
#---------------------------------------------------------------
# 日割り日数が0でないものは日割り計算をしたので特別処理フラグ1(=日割り済み)をいれる。それ以外は0
cat ${tmp}-tokuhan_while_result ${tmp}-nottokuhan_while_result |
awk '{if($21!="0")
{flg=1}
else
{flg=0};
print $0,flg}' |
# 1:入居者ID 2:請求月 3:契約ID 4:利用料種別ID 5:契約枝番
# 6:利用料名 7:税区分 8:支払区分 9:費用算出方法 10:起算開始日
# 11:起算終了日 12:金額 13:特販ID 14:値引額 15:値引後金額
# 16:特販期間FROM 17:特販期間TO 18:起算開始月 19:起算終了月 20:レコードが発生した年月日
# 21:日割り日数 22:特販期間FROM月 23:特販期間TO月 24:日割り金額 25:日割り後値引金額
# 26:日割り値引後金額 27:特別処理フラグ
# 日割りや値引の結果をセットする
self 1/11 24 13 25/26 16/20 26 27 > ${tmp}-tujyou_hiwari_taisyou_kakutei
# 1:入居者ID 2:請求月 3:契約ID 4:利用料種別ID 5:契約枝番
# 6:利用料名 7:税区分 8:支払区分 9:費用算出方法 10:起算開始日
# 11:起算終了日 12:日割り金額 13:特販ID 14:日割り後値引金額 15:日割り値引後金額
# 16:特販期間FROM 17:特販期間TO 18:起算開始月 19:起算終了月 20:レコードが発生した年月日
# 21:日割り値引後金額 22:特別処理フラグ
#---------------------------------------------------------------
### 日割り側の処理ここまで
#---------------------------------------------------------------
#---------------------------------------------------------------
# エラーファイルがあったら落とす
[ -e ${tmp}-err ] && error_exit "処理中にエラーが発生しました(NORMAL_HIWARI)"
#---------------------------------------------------------------
exit 0
| true
|
800754956238bb1d170b25bcfa6d63f1421b62cf
|
Shell
|
CocoaLumberjack/CocoaLumberjack
|
/Demos/PerUserLogLevels/Scripts/LumberjackUser.bash
|
UTF-8
| 743
| 3.9375
| 4
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
# Get full user name of current user
# E.g. "Robbie Hanson"
full1=$(osascript -e "tell application \"System Events\"" -e "get the full name of the current user" -e "end tell")
#echo $full1
# Convert to lower case
# E.g. "robbie hanson"
full2=$(echo $full1 | awk '{print tolower($0)}')
#echo $full2
# Replace spaces with underscores
# E.g. "robbie_hanson"
full3=$(echo ${full2// /_})
#echo $full3
# Remove any characters that are illegal in a macro name
full4=$(echo $full3 | sed 's/[^0-9a-zA-Z_]*//g')
#echo $full4
# If blank, set the name to an anonymous user
if [ "$full4" == "" ]
then
full4='anonymous_user'
fi
echo "// This file is automatically generated" > ${SCRIPT_OUTPUT_FILE_0}
echo "#define $full4 1" >> ${SCRIPT_OUTPUT_FILE_0}
| true
|
05d388845bc9f04aff00066f12cfd54164b49a82
|
Shell
|
generalist/wikidata-hop
|
/hopextract-educated
|
UTF-8
| 4,737
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/bash
# script for extracting various metadata from HoP HTML pages and preparing for Wikidata
# nb this needs them all in a handy directory at eg 1386-1421/member/xyz.html
# the old one was quite clunky - essentially ran once for each matching item
# version 2 runs backwards and takes a known item, then pulls out what matches it can
# uses pup - https://github.com/ericchiang/pup
# refresh with
# curl "https://raw.githubusercontent.com/generalist/wikidata-hop/master/hopextract-educated" > hopextract-educated
rm working/*
rm quickstatements
# import some useful stuff
curl "http://tools.wmflabs.org/wikidata-todo/beacon.php?prop=1614&source=0" | sed 's/||/\t/g' | grep -v \# > working/beacon
# comment out to have a cheaty beacon
# curl "http://tools.wmflabs.org/wikidata-todo/beacon.php?prop=1614&source=0" | sed 's/||/\t/g' | grep -v \# | grep 123 > working/beacon
# gives us a nice sample set of 36 entries including six doubles
cut -f 1 working/beacon | sort | uniq > working/onwiki-qids
cut -f 2 working/beacon | sort > working/onwiki-slugs
# pull in the list of matched institution strings
# curl "https://raw.githubusercontent.com/generalist/wikidata-hop/master/institutions.tsv" > working/institutions.tsv
# for each member that exists onwiki, extract the 'educated' section
for i in `cat working/onwiki-slugs` ; do
echo -e $i "\t" `cat $i.html | pup --color | grep -A 2 "educ$" | tail -1` >> working/educated ;
echo -e $i "\t" `cat $i.html | pup --color | grep -A 2 "educ.$" | tail -1` >> working/educated ;
done
cat working/educated | sed 's/;/\t/g' | sed 's/\t/\n/g' | sed 's/ [0-9]/\t/g' | sed 's/ c./\t/g' | sed 's/^ //g' | sed 's/^\. //g' | cut -f 1 | sed 's/ $//g' | sed 's/\.$//g'| sed 's/,$//g' | sort | uniq -c | sort -rn > working/rawnumbers
for i in `cat working/institutions.tsv | cut -f 1` ; do grep "`grep $i working/institutions.tsv | cut -f 2`" working/rawnumbers >> working/scratch ; done
cat working/scratch | sort | uniq | sort -rn > working/rawmatched
grep -vf working/rawmatched working/rawnumbers > working/rawunmatched
# in other words...
# rawnumbers - overall crudely-deduplicated report
# rawmatched - number that will be matched by one of the dedicated regexps
# rawunmatched - number that is not matched by a regexp
# okay, now let's do this for each person
rm working/personfragments
for i in `cat working/onwiki-qids` ; do
rm working/pages ;
grep $i working/beacon | cut -f 2 > working/pages ;
# find all the pages matched to this person
for j in `cat working/pages` ; do
rm working/educated # clear it just to be safe
rm working/fragments # clear it just to be safe
cat $j.html | pup --color | grep -A 2 "educ$" | tail -1 | sed 's/; /\n/g' | sed 's/\t//g' > working/educated ;
cat $j.html | pup --color | grep -A 2 "educ.$" | tail -1 | sed 's/; /\n/g' | sed 's/\t//g' >> working/educated ;
cat working/educated | sed 's/; /\n/g' | sed 's/^ //g' | sed 's/^ //g' | sed 's/^\. //g' | sed 's/ $//g' | sed 's/\.$//g'| sed 's/,$//g' | sed 's/\t//g' > working/fragments ;
# at this point, we have working/fragments containing one line per "bit" of education from a single HoP entry
# we now need to combine it into a single file per person, noting which one from which entry
# this bit creates a variable k, set as the number of lines in working/fragments (and reset to 0 each time for safety)
# if zero, it doesn't do anything (no blank lines!)
# otherwise it cuts line k, then drops it into a nicely formatted new file
# then it takes one off k and does it again
# the test run successfully got two lines from the 1660 entry for Q2897123, and three from the 1690 entry
k=0 # reset it for safety
k="`cat working/fragments | wc -l`" # count lines to run the next section
while [ $k -ne 0 ] ; do
awk -v l=$k 'NR==l' working/fragments > working/kmfrag
echo -e $i"\t"$j"\t"`cat working/kmfrag` >> working/personfragments
((k--))
echo $k
done
done
done
# this takes each page, extracts the educated bit, breaks it into fragments, then saves these against the slug and QID
# note Q5537123 in our test - he has two different entries, yay!
# now, what do we want to do with this? we want to parse it, yes we do.
########
#
# STILL TO DO
#
########
# all the parsing, obviously
# - including a solution to the "privately" problem
# a way to deal with brackets, which currently cause problems for the beacon parser
# date parsing wow this will be amazing
# parsing notes -
# omit anything involving the word "privately" unless perhaps it matches a grand tour etc?
# omit any lines *starting* "fellow" eg Q922508
# omit anything with ? perhaps possibly poss.
| true
|
e4b7941aac50af1ebd77d820035231449edd6d69
|
Shell
|
huabingood/dayDayUp
|
/Bash/src/businessServiceDemand/functionFiles/functions.sh
|
UTF-8
| 2,108
| 3.390625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#######################################################
## 所有执行的函数
## @author huabingood
#######################################################
## 获取hqlLists中数组中所有需要执行的HQL脚本,并判断该脚本是否存在,如果存在就执行
function runDaily(){
for hqlFile in ${sqlLists[@]}
do
fileIsExist
hqlName=${hqlFile%.*} # 将脚本名去除后缀
logPath=${hqlName}_${today}.log # 拼接日志文件名
exDailyHive -S -f ${hqlFilesPath}/${hqlFile}
done
}
## 日常执行执行hive时加载的配置参数(不包括程序自己设置的参数)
## 这里面输出日志的时候,必须使用hive.root.logger=INFO,DRFA.我不知道为什么使用DRFA,但是如果不使用的话就不会向新指定的日志文件中输出
function exDailyHive(){
/opt/huabingood/pseudoDistributeHadoop/hive-1.1.0-cdh5.10.0/bin/hive "$@"\
--hiveconf hive.root.logger=${rootLogger} \
--hiveconf hive.log.dir=${logDir} \
--hiveconf hive.log.file=${logPath} \
--hiveconf startTime=${startTime} \
--hiveconf endTime=${endTime} \
--hiveconf rerun_term=${rerun_term}
}
## 判断文件是否存在
function fileIsExist(){
filePath=${hqlFilesPath}/${hqlFile}
if [ !-f ${filePaht} ]
then
echo -e "${hqlFile} not exists or is not a regular file." > ${logDir}/${otherLog}_${today}.log
break
fi
}
## 这种设置好像没有,无法使用一直报错
function aaaa(){
/opt/huabingood/pseudoDistributeHadoop/hive-1.1.0-cdh5.10.0/bin/hive "$@" \
--hiveconf hive.root.logger=INFO,aaa \
--hiveconf log4j.appender.aaa=org.apache.log4j.DailyRollingFileAppender \
--hiveconf log4j.appender.aaa.Threshold=DEBUG \
--hiveconf log4j.appender.aaa.ImmediateFlush=true \
--hiveconf log4j.appender.aaa.Append=true\
--hiveconf log4j.appender.aaa.File=/home/huabingood/log/BigData_Practise.log \
--hiveconf log4j.appender.aaa.DatePattern='.'yyyy-MM-dd \
--hiveconf log4j.appender.aaa.layout=org.apache.log4j.PatternLayout \
}
| true
|
1ed16ee63c928b975ebc284e654214ebcc82da58
|
Shell
|
jjevans/tools
|
/bin/findcwd
|
UTF-8
| 1,188
| 3.90625
| 4
|
[] |
no_license
|
#!/usr/bin/env sh
#find inputted filename (direct or with wc (*))
# from the cwd
#uses unix find
# ex. find . -iname 'input_from_arg0' -ls
# input is a word to search for as a filename
# (or part of it, substring with wc)
#all searches are without respect to case (-iname)
1&>2
echo "####" 1>&2
echo "#findcwd, 10212014" 1>&2
echo "#find a file rooted to your current location" 1>&2
echo "#care of your friendly personalized medicine bioinformatics team." 1>&2
echo "####" 1>&2
#usage
if [ "$1" == "" ]; then
echo "usage: findcwd filename use_wildcard(optional,default yes,any value)" 1>&2
exit
fi
echo CWD: `pwd` 1>&2
#no wildcard in search
if [[ "$2" == "" ]]; then
FILE="'*${1}*'"
echo WILDCARD: TRUE 1>&2
else
FILE="'${1}'"
echo WILDCARD: FALSE 1>&2
fi
#exec
if [ "$FILE" == "" ]; then
echo 1>&2
echo 'PARSE ERROR: stem is empty, "${FILE}"' 1>&2
exit
fi
echo "SEARCH STEM: $FILE" 1>&2
echo "####" 1&>2
echo 1&>2
CMD="find . -iname $FILE -ls"
echo CMD: $CMD 1>&2
echo "####" 1>&2
CMD="${CMD} | perl -Mstrict -e 'my \$num=0;while(<>){\$num++;print STDOUT \$_;}print STDERR \"\#\#\#\#\nNumber files found: \".\$num.\"\nDone.\n\";'"
eval $CMD
echo "####" 1>&2
exit
| true
|
d6494daafe453e7fc6e03bf272df137b6676be03
|
Shell
|
earthobservatory/slcp2pm
|
/script/create_lar.sh
|
UTF-8
| 548
| 3.375
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
#!/bin/bash
BASE_PATH=$(dirname "${BASH_SOURCE}")
BASE_PATH=$(cd "${BASE_PATH}"; pwd)
SLCP_PROD=$1
SWATH=$2
echo "##########################################" 1>&2
echo -n "Running S1 log amp ratio generation: " 1>&2
date 1>&2
source /opt/isce2/isce_env.sh && python3 $BASE_PATH/create_lar.py $SLCP_PROD > create_lar.log 2>&1
STATUS=$?
echo -n "Finished running S1 log amp ratio generation: " 1>&2
date 1>&2
if [ $STATUS -ne 0 ]; then
echo "Failed to run S1 log amp ratio generation." 1>&2
cat create_lar.log 1>&2
echo "{}"
exit $STATUS
fi
| true
|
b496149f316104de3f560bdb9828843ce71a6396
|
Shell
|
Meshl-555/FrameworkBenchmarks
|
/toolset/setup/linux/languages/elixir.sh
|
UTF-8
| 344
| 2.734375
| 3
|
[] |
permissive
|
#!/bin/bash
# TODO seems to be broken installation
echo "WARN: Elixir does not install"
return 1
# fw_exists v0.13.3.tar.gz
# [ $? -ne 0 ] || { return 0; }
# fw_get https://github.com/elixir-lang/elixir/archive/v0.13.3.tar.gz
# fw_untar v0.13.3.tar.gz
# cd elixir-0.13.3
# bash -c -i 'sudo make install'
# sudo make clean
# sudo make test
| true
|
06b0ac17cf41acbf5397bfa0eb28a8361bafb298
|
Shell
|
Fatdrunk1/avtse-iiotg-upgrade-exp
|
/ble5/opt/avnet-iot/services/ble5lr-uart
|
UTF-8
| 1,268
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/sh
BTATTACH=/usr/bin/btattach
BTMGMT=/usr/bin/btmgmt
HCIATTACH=/usr/bin/hciattach
HCITOOL=/usr/bin/hcitool
HCICONFIG=/bin/hciconfig
ble5lr_hat="/dev/serial0"
bd_prefix="FF:02:"
# Set static BD ADDR based on cpu serial
SERIAL=`cat /proc/device-tree/serial-number | cut -c9-`
B0=`echo $SERIAL | cut -c1-2`
B1=`echo $SERIAL | cut -c3-4`
B2=`echo $SERIAL | cut -c5-6`
B3=`echo $SERIAL | cut -c7-8`
BDADDR=`printf $bd_prefix%02x:%02x:%02x:%02x $((0x$B0 ^ 0x77)) $((0x$B1 ^ 0x77)) $((0x$B2 ^ 0x77)) $((0x$B3 ^ 0x77))`
M5=`echo $BDADDR | cut -c1-2`
M4=`echo $BDADDR | cut -c4-5`
M3=`echo $BDADDR | cut -c7-8`
M2=`echo $BDADDR | cut -c10-11`
M1=`echo $BDADDR | cut -c13-14`
M0=`echo $BDADDR | cut -c16-17`
ble5lr_set_bd_addr()
{
$BTMGMT --index $1 power off
$BTMGMT --index $1 static-addr $BDADDR
$BTMGMT --index $1 auto-power
$HCITOOL -i "hci$1" cmd 0x3f 0x006 0x$M0 0x$M1 0x$M2 0x$M3 0x$M4 0x$M5 > /dev/null
$HCICONFIG "hci$1" down
$HCICONFIG "hci$1" up
}
#sleep 1s
#$BTATTACH -B /dev/serial0 -S 115200 -P h4 &
$HCIATTACH /dev/serial0 any 115200 noflow - $BDADDR
for index in 0 1 2 3
do
bd=$($HCICONFIG "hci$index" 2> /dev/null | grep "BD Address: ")
case "$bd" in
*00:00:00:00:00:00*)
ble5lr_set_bd_addr $index
;;
esac
done
| true
|
4975034955af864697267d34c2b63ea9d8339ff8
|
Shell
|
4U6U57/dotfiles
|
/bin/git-sortmodules.sh
|
UTF-8
| 514
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
# Sorts the .gitmodules file in the current directory
# Taken from https://gist.github.com/jaytaylor/fad7bc69e5f12fc2331e2c6330bd8419
TempModules=$(mktemp)
awk 'BEGIN { I=0 ; J=0 ; K="" } ; /^\[submodule/{ N+=1 ; J=1 ; K=$2 ; gsub(/("vendor\/|["\]])/, "", K) } ; { print K, N, J, $0 } ; { J+=1 }' .gitmodules \
| sort \
| awk '{ $1="" ; $2="" ; $3="" ; print }' \
| sed 's/^ *//g' \
| awk '/^\[/{ print ; next } { print "\t", $0 }' \
>"$TempModules"
mv "$TempModules" .gitmodules
| true
|
ac6bca700b0a6aa11a93d5cba86c783f8068e59a
|
Shell
|
sdothum/dotfiles
|
/build/build/arch/post_install/passenger
|
UTF-8
| 1,008
| 2.65625
| 3
|
[
"MIT"
] |
permissive
|
# sdothum - 2016 (c) wtfpl
# Post_install
# ══════════════════════════════════════════════════════════════════════════════
# ........................................................................ nginx
# Usage: post_install passenger [<rubyver>]
# manual ruby version install
[ $2 ] && vsn=$2 || vsn=$(ls -1 $HOME/.gem/ruby | sort | tail -1)
# build passenger agent
$HOME/.gem/ruby/$vsn/gems/passenger-*/bin/passenger-config compile-agent
# build web server
sudo -E /usr/bin/ruby $HOME/.gem/ruby/$vsn/gems/passenger-*/bin/passenger-install-nginx-module
if [ -f /usr/lib/systemd/system/nginx.service ] ;then
sudo sed -i 's|/usr/bin|/opt/nginx/sbin|' /usr/lib/systemd/system/nginx.service
else
cfg_install /usr/lib/systemd/system/nginx.service
fi
server && service enable nginx || ditto run "sudo systemctl start nginx"
# vim: set ft=sh: #
| true
|
e401c288e0100c4286b8691ea4582458efd21487
|
Shell
|
MortenHe/wss-install
|
/install-powerblock.sh
|
UTF-8
| 248
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
echo 'get and install POWERBLOCK power button script'
cd ..
wget -O - https://raw.githubusercontent.com/petrockblog/PowerBlock/master/install.sh | sudo bash
echo 'edit config shutdownpin 18 -> 14'
echo 'nano /etc/powerblockconfig.cfg'
| true
|
7acdb85a3b60f58d014675f064a6917e8e4995a0
|
Shell
|
dreydean/nudgepad
|
/system/createOwnerFile.sh
|
UTF-8
| 612
| 3.25
| 3
|
[
"MIT"
] |
permissive
|
# Generate owner user file
createOwnerFile ()
{
domain=$1
ownerEmail=$2
OWNERKEY=$(echo $ownerEmail$RANDOM$RANDOM | shasum -a 256 | sed "s/ -//g")
OWNERNAME=$(echo $ownerEmail | sed "s/@.*//g" | sed "s/[0-9]//g" | sed "s/\./ /g")
OWNERLINK=$(echo "http://$domain/nudgepad.login?email=$ownerEmail&key=$OWNERKEY")
if isMac
then
printf "name $OWNERNAME\nkey $OWNERKEY\nrole owner\n" | tee -a $projectsPath/$domain/nudgepad/team/$ownerEmail.space >/dev/null
else
printf "name $OWNERNAME\nkey $OWNERKEY\nrole owner\n" > $projectsPath/$domain/nudgepad/team/$ownerEmail.space
fi
}
| true
|
50fb5d4b4530c2eda39fd3bd779869fa57f1dad3
|
Shell
|
staticdev/88x2bu
|
/dkms-install.sh
|
UTF-8
| 1,138
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
DRV_NAME=rtl88x2bu
DRV_VERSION=5.8.7.4
if [ $EUID -ne 0 ]
then
echo "You must run dkms-install.sh with superuser priviliges."
echo "Try: \"sudo ./dkms-install.sh\""
exit 1
fi
if [ -d "/usr/lib/dkms" ]
then
echo "dkms appears to be installed."
else
echo "dkms does not appear to be installed."
echo "Try: \"sudo apt install dkms\""
exit 1
fi
echo ""
echo "Copying driver to: /usr/src/${DRV_NAME}-${DRV_VERSION}"
cp -r $(pwd) /usr/src/${DRV_NAME}-${DRV_VERSION}
echo ""
echo "Copying 88x2bu.conf to: /etc/modprobe.d"
cp -r 88x2bu.conf /etc/modprobe.d
dkms add -m ${DRV_NAME} -v ${DRV_VERSION}
RESULT=$?
if [ "$RESULT" != "0" ]
then
echo "An error occurred while running: dkms add"
exit 1
else
echo "dkms add was successful."
fi
dkms build -m ${DRV_NAME} -v ${DRV_VERSION}
RESULT=$?
if [ "$RESULT" != "0" ]
then
echo "An error occurred while running: dkms build"
exit 1
else
echo "dkms build was successful."
fi
dkms install -m ${DRV_NAME} -v ${DRV_VERSION}
RESULT=$?
if [ "$RESULT" != "0" ]
then
echo "An error occurred while running: dkms install"
exit 1
else
echo "dkms install was successful."
fi
| true
|
9e63c027dd711ab652c344d29b08fdf3e6fa88c7
|
Shell
|
geekgit/linux_scripts_meta
|
/bash.sh
|
UTF-8
| 978
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
wget --secure-protocol=TLSv1_2 --https-only "https://raw.githubusercontent.com/geekgit/linux_configs/master/home/username/simple.bashrc" -O "simple.bashrc"
wget --secure-protocol=TLSv1_2 --https-only "https://raw.githubusercontent.com/geekgit/linux_configs/master/home/username/bash.sh" -O "simple_bash.sh"
chmod a+rwx "simple.bashrc"
chmod a+rwx "simple_bash.sh"
cp "simple.bashrc" "/home/${USERNAME}/simple.bashrc"
cp "simple_bash.sh" "/home/${USERNAME}/simple_bash.sh"
sudo cp "simple.bashrc" "/root/simple.bashrc"
sudo cp "simple_bash.sh" "/root/simple_bash.sh"
ln -sf "/home/${USERNAME}/simple.bashrc" "/home/${USERNAME}/.bashrc"
sudo chown root:root "/root/simple.bashrc"
sudo chmod a-rwx "/root/simple.bashrc"
sudo chmod u+rwx "/root/simple.bashrc"
sudo chown root:root "/root/simple_bash.sh"
sudo chmod a-rwx "/root/simple_bash.sh"
sudo chmod u+rwx "/root/simple_bash.sh"
sudo ln -sf "/root/simple.bashrc" "/root/.bashrc"
rm simple_bash.sh
rm simple.bashrc
| true
|
5bb7db10973fa76d725ee04a76cc4541a32de0ad
|
Shell
|
filwie/dotfiles
|
/docker/build_images.sh
|
UTF-8
| 1,235
| 4.09375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -eu
REPO_ROOT="$(git rev-parse --show-toplevel)"
BUILD_DIR="${REPO_ROOT}"
DOCKER_DIR="${REPO_ROOT}/docker"
IMAGE_TAG="${IMAGE_TAG:-latest}"
IMAGE_PREFIX="${IMAGE_PREFIX:-dotfiles-}"
IMAGE_SUFFIX="${IMAGE_SUFFIX:-""}"
function cprintf () {
[ $# -lt 2 ] && return
tput setaf "${1}"; shift
# shellcheck disable=SC2059
printf "${@}"
tput sgr0
}
function info () { cprintf 2 "${@}"; }
function warning () { cprintf 3 "${@}"; }
function error () { cprintf 1 "${@}"; }
function run_log () { info "Running: ${*} ($(basename "$(pwd)"))\n"; eval "${@}"; }
function generate_dockerfiles () {
"${DOCKER_DIR}/generate_dockerfiles.py"
}
function build_images () {
local image_name build_cmd
pushd "${BUILD_DIR}" > /dev/null
for dockerfile in "${DOCKER_DIR}"/**/Dockerfile; do
image_name="${IMAGE_PREFIX}$(basename "$(dirname "${dockerfile}")")${IMAGE_SUFFIX}"
dockerfile_relpath="$(realpath --relative-to="${BUILD_DIR}" "${dockerfile}")"
build_cmd="docker build . --file ${dockerfile_relpath} --tag ${image_name}:${IMAGE_TAG}"
run_log "${build_cmd}"
done
popd >/dev/null
}
function main () {
generate_dockerfiles
build_images
}
main
| true
|
09a534b9dbf5d6b0a1bcc0f8c03f1fe857a0183f
|
Shell
|
kellyuw/RoalfQA
|
/qa_clipcount_example.sh
|
UTF-8
| 937
| 3.5
| 4
|
[] |
no_license
|
#!/bin/bash
# ---------------------------------------------------------------
# QA_CLIPCOUNT.sh - find number of voxels with clipped amplitude (i.e. >= 4095)
#
# M. Elliott - 5/2013
# Edited by K. Sambrook - 1/2017
# --------------------------
Usage() {
echo "usage: `basename $0` [-append] [-keep] <4Dinput> [<maskfile>] <resultfile>"
exit 1
}
# --------------------------
# --- Perform standard qa_script code ---
source /mnt/stressdevlab/scripts/DTI/QA/qa_preamble.sh
# --- Parse inputs ---
dti_file=$1
mask_file=$2
result_file=$3
in_dir=`dirname ${dti_file}`
in_root=`basename ${dti_file} .nii.gz`
out_dir=`dirname ${result_file}`
# --- Find voxels which exceeded 4095 at any time ---
fslmaths $dti_file -mas $mask_file -Tmax -thr 4095 -bin $out_dir/${in_root}_clipmask -odt char
gzip "${out_dir}/${in_root}_clipmask.nii"
count=(`fslstats $out_dir/${in_root}_clipmask -V`)
echo -e "clipcount: ${count[0]}" >> $result_file
exit 0
| true
|
8b8534dbe93ab29395eb3605db9f7f65415d4f77
|
Shell
|
straitm/neutronfit
|
/stage_four.sh
|
UTF-8
| 452
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
# Make sure we fail if root fails at the beginning of a pipeline
set -o pipefail
if [ $1 == compile ]; then
root -b -q rhc_stage_four.C++'(NULL, false)'
exit
fi
name=$1
dim=$2
shift 2
cat $@ | \
grep Probdensity | \
awk '{print $2, $3}' > stage_four.$name.$dim.tmp
root -n -l -b -q rhc_stage_four.C+'("'stage_four.$name.$dim.'",'$(if [ $name == nm ]; then echo true; else echo false; fi)')' | \
tee stage_four.$name.$dim.out.txt
| true
|
dd8f26baaca5429c29dd4c8efa531bad9439d3c3
|
Shell
|
nuclewall/dev-tools
|
/pfPorts/squid/files/squid.in
|
UTF-8
| 2,702
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/sh
#
# $FreeBSD: ports/www/squid/files/squid.in,v 1.11 2012/11/18 16:55:52 svnexp Exp $
#
# PROVIDE: squid
# REQUIRE: LOGIN
# KEYWORD: shutdown
#
# Note:
# Set "squid_enable=yes" in either /etc/rc.conf, /etc/rc.conf.local or
# /etc/rc.conf.d/squid to activate Squid.
#
# Additional variables you can define in one of these files:
#
# squid_chdir: the directory into which the rc system moves into before
# starting Squid. Default: /var/squid/logs
#
# squid_conf: The configuration file that Squid should use.
# Default: %%PREFIX%%/etc/squid/squid.conf
#
# squid_fib: The alternative routing table id that Squid should use.
# Default: none
# See setfib(1) for further details. Note that the setfib(2)
# system call is not available in FreeBSD versions prior to 7.1.
#
# squid_user: The user id that should be used to run the Squid master
# process. Default: %%SQUID_UID%%.
# Note that you probably need to define "squid_user=root" if
# you want to run Squid in reverse proxy setups or if you want
# Squid to listen on a "privileged" port < 1024.
#
# squid_pidfile:
# The name (including the full path) of the Squid
# master process' PID file.
# Default: /var/squid/logs/squid.pid.
# You only need to change this if you changed the
# corresponding entry in your Squid configuration.
#
# squid_flags: Additional commandline arguments for Squid you might want to
# use. See squid(8) for further details.
#
squid_checkrunning() {
${command} ${squid_flags} -k check 2>/dev/null
}
squid_setfib() {
if command -v check_namevarlist > /dev/null 2>&1; then
check_namevarlist fib && return 0
fi
${SYSCTL} net.fibs >/dev/null 2>&1 || return 0
squid_fib=${squid_fib:-"NONE"}
if [ "x${squid_fib}" != "xNONE" ]; then
command="setfib -F ${squid_fib} ${command}"
else
return 0
fi
}
squid_stop() {
echo "Stopping ${name}."
${command} ${squid_flags} -k shutdown
run_rc_command poll
}
. /etc/rc.subr
name=squid
rcvar=squid_enable
command=%%PREFIX%%/sbin/squid
extra_commands=reload
reload_cmd="${command} ${squid_flags} -k reconfigure"
start_precmd="squid_setfib"
stop_precmd="squid_checkrunning"
stop_cmd="squid_stop"
load_rc_config ${name}
squid_chdir=${squid_chdir:-"/var/squid/logs"}
squid_conf=${squid_conf:-"%%PREFIX%%/etc/squid/squid.conf"}
squid_enable=${squid_enable:-"NO"}
squid_flags=${squid_flags-"-D"}
squid_pidfile=${squid_pidfile:-"/var/squid/logs/squid.pid"}
squid_user=${squid_user:-%%SQUID_UID%%}
pidfile=${squid_pidfile}
required_dirs=${squid_chdir}
# squid(8) will not start if ${squid_conf} is not present so try
# to catch that beforehand via ${required_files} rather than make
# squid(8) crash.
required_files=${squid_conf}
run_rc_command "$1"
| true
|
74cc6cf24fc45f4199a69d8c8d57b1a47e2e058f
|
Shell
|
orocos/orocos-docker-images
|
/ros2/ubuntu/orocos_entrypoint.sh
|
UTF-8
| 853
| 3.671875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
# Setup ros2 and orocos environment
source "/opt/ros/$ROS_DISTRO/setup.sh"
[ -d "/opt/orocos/$ROS_DISTRO" ] && source "/opt/orocos/$ROS_DISTRO/local_setup.sh"
# Append commands to setup ros2 and orocos environment to ~/.bashrc.
# This is only relevant if the command to be executed ("$@") is an interactive bash shell.
# Or for later `docker exec -it bash`.
# It is not sufficient to source bash-specific setup scripts here, because bash is executed as
# a new process. Aliases, command-line completion etc. configured here would have no effect.
if ! grep -q '# setup ros2 and orocos environment' ~/.bashrc >/dev/null; then
cat >>~/.bashrc <<'EOF'
# setup ros2 and orocos environment
source "/opt/ros/$ROS_DISTRO/setup.bash"
[ -d "/opt/orocos/$ROS_DISTRO" ] && source "/opt/orocos/$ROS_DISTRO/local_setup.bash"
EOF
fi
exec "$@"
| true
|
bd6cf0013df4536b5352d2db1d522643cdc530f8
|
Shell
|
godatadriven-dockerhub/spark
|
/tests/run_tests.sh
|
UTF-8
| 118
| 2.796875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
for f in /tests/[0-9]*.sh; do
echo "Running $f"
bash "$f" -H || (echo "FAILED" && exit 1)
done
| true
|
c77b7bd555be6fcf335ea82b1b90dcf1c54dd7af
|
Shell
|
TLINDEN/dotfiles
|
/.xmonad/.xprofile
|
UTF-8
| 727
| 2.546875
| 3
|
[] |
no_license
|
# -*-sh-*-
Setup some extra PATH variables
PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/games:/usr/local/sbin:/usr/local/bin:$HOME/bin; export PATH
XDG_DATA_DIRS="/share:/usr/share:/usr/local/share"
export XDG_DATA_DIRS
XDG_CONFIG_DIRS="/usr/local/etc/xdg"
export XDG_CONFIG_DIRS
# Check for any specific PC-BSD apps we need to start at login
for i in `ls /usr/local/share/pcbsd/xstartup/*`
do
if [ -x "${i}" ] ; then . ${i} ; fi
done
setxkbmap -model pc105 -layout de
xsetroot -solid "#000000"
/usr/local/bin/kdeinit4 &
/usr/local/bin/xscreensaver &
/usr/local/bin/konsole &
trayer --edge bottom --align right --SetDockType true --SetPartialStrut true --expand true --width 10 --transparent true --tint 0x191970 --height 17 &
| true
|
6c64510e682b63c8e8aaa606e7b8d3e6e5e2c48e
|
Shell
|
caadar/paludis-config
|
/etc/paludis/hooks/auto/70.update-search-index.hook
|
UTF-8
| 3,055
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
##
## Automatically update the cave search index in the background.
##
## IMPORTANT: this hook requires sys-apps/paludis to be built with the
## "search-index" option, and "--index /PATH/FILE" (with existing writable
## PATH) to be specified in the global CAVE_SEARCH_OPTIONS variable.
##
## Copyright (C) 2014 Tobias Geerinckx-Rice (nckx) <tobias.geerinckx.rice@gmail.com>
## https://github.com/nckx/palplus-hooks/blob/master/auto/update-search-index.hook
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License along
## with this program; if not, write to the Free Software Foundation, Inc.,
## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
##
_update_cave_search_index() {
##
## Try to be clever: using CAVE_SEARCH_OPTIONS is recommended anyway
## so attempt to sniff out the index file location automatically
##
local argument option= index_file=
for argument in ${CAVE_SEARCH_OPTIONS}; do
if [[ "${option}" == --index ]]; then
index_file="${argument}"
break
fi
option="${argument}"
done
local cave=( nice -n 19 ionice -c idle "${CAVE%% *}" )
if [[ "${index_file}" ]]; then
##
## Try to kill older running updates if possible
## Equivalent to: fuser -kw "${index}".*
##
(
local IFS=$'\n'
shopt -s nullglob
for file in "${index_file}".*; do
for proc in $( find /proc/*/fd -lname "${file}"
); do
proc="${proc#/proc/}"
kill "${proc%%/*}"
done
done
)
exec rm --force "${index_file}".* &
##
## Run all heavy tasks at lowest priority
##
export PALUDIS_DO_NOTHING_SANDBOXY=yes
##
## Generate a full search index; this also refreshes the caches
## Update the index file only if successfully completed
##
local temp_suffix="$$.temp"
"${cave[@]}" manage-search-index --create \
"${index_file}.${temp_suffix}" &&
mv "${index_file}"{."${temp_suffix}",} &&
return
fi
##
## Full search index generation failed; just refresh the caches
##
CAVE_SEARCH_OPTIONS= exec "${cave[@]}" search ""
}
hook_run_sync_all_post() {
# ensure that einfo etc are available
export PATH="$(${PALUDIS_EBUILD_DIR}/utils/canonicalise ${PALUDIS_EBUILD_DIR}/utils/ ):${PATH}"
source ${PALUDIS_EBUILD_DIR}/echo_functions.bash
source /etc/paludis/paludis_aux.bash
_bypass check
if [[ $? -eq 22 ]]; then
return 0
fi
einfo "Update search index..."
_update_cave_search_index >/dev/null 2>&1 &
}
hook_run_install_all_post() { hook_run_sync_all_post; }
hook_run_uninstall_all_post() { hook_run_sync_all_post; }
hook_auto_names() { echo {{un,}install,sync}_all_post; }
| true
|
f8428fce6b639b172de3e52d2c4f87f58bb12031
|
Shell
|
ymattw/profiles
|
/alacritty/install.sh
|
UTF-8
| 243
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
set -o errexit
set -o nounset
readonly SELF_DIR=$(cd $(dirname $0) && pwd)
[[ $(uname -s) == Darwin ]] || {
echo "Skipped (not on macOS)"
exit 0
}
mkdir -p ~/.config/alacritty
cp -a $SELF_DIR/*.yml ~/.config/alacritty/
| true
|
7c3df3fdcf887085fa44f5dcec0c20d6acf213de
|
Shell
|
ecator/domain-list
|
/convert.sh
|
UTF-8
| 1,394
| 4
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# 转换Charles导出的csv文件到domains下的csv文件,自动去除重复
# 输出用法信息并结束
function echo_usage(){
echo "usage: $0 charles_csv target_csv"
exit
}
# 查找目标csv中是否已经存在指定的记录
# $1:需要查找的记录
function find_in_target(){
lines=`cat $target_csv`
for line in $lines; do
if [[ $line == $1 ]]; then
echo $1
break
fi
done
}
#自动补空格
# $1:需要补空格的字符串
# $2:补完空格后的长度
# $3:是否前补空格
function fill_space(){
input_len=${#1}
out=$1
if [[ $input_len -lt $2 ]]; then
for (( i = $input_len ; i < $2; i++ )); do
[ -z $3 ] && out="${out} " || out=" ${out}"
done
fi
echo "$out"
}
if [[ -z $1 ]]; then
echo "charles_csv error"
echo_usage
elif [[ ! -f $1 ]]; then
echo "$1 not found"
echo_usage
else
charles_csv=$1
fi
if [[ -z $2 ]]; then
echo "target_csv error"
echo_usage
elif [[ ! -f $2 ]]; then
echo "$2 not found"
echo_usage
else
target_csv=$2
fi
echo "processing..."
cnt=0
awk -F, '{print $1}' "$charles_csv" | awk -F/ '{print $3}' | while read i
do
echo $i | grep -E '\w+\.\w+' > /dev/null 2>&1 || continue
if [ -n $i ] && [ -z `find_in_target $i` ] ;
then
cnt=`expr $cnt + 1`
echo "$(fill_space $cnt 3 r): add $(fill_space $i 40) to ${target_csv}"
echo $i>>$target_csv
fi
done
echo "---------------end---------------"
| true
|
1a2dd0be1421e91ebce5e6742e594c7960177328
|
Shell
|
uhulinux/ub-ubk4
|
/rkhunter/install
|
UTF-8
| 924
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/sh -eux
./installer.sh \
--layout custom \
${UB_INSTALLDIR}/usr \
--striproot \
${UB_INSTALLDIR} \
--install
cp ${UB_INSTALLDIR}/usr/etc/* ${UB_INSTALLDIR}/etc/
rm -rf ${UB_INSTALLDIR}/usr/etc
mv ${UB_INSTALLDIR}/usr/var ${UB_INSTALLDIR}/
sed -i -e "s#/usr/etc/rkhunter.conf#/etc/rkhunter.conf#g" "${UB_INSTALLDIR}/usr/bin/rkhunter"
sed -i -e "s#/usr/etc/rkhunter.conf#/etc/rkhunter.conf#g" "${UB_INSTALLDIR}/etc/rkhunter.conf"
sed -i -e "s#/usr/var/lib/rkhunter/db#/var/lib/rkhunter/db#g" "${UB_INSTALLDIR}/etc/rkhunter.conf"
sed -i -e "s#/usr/var/lib/rkhunter/tmp#/var/lib/rkhunter/tmp#g" "${UB_INSTALLDIR}/etc/rkhunter.conf"
# cleanup
rm -f "${UB_INSTALLDIR}/var/lib/rkhunter/tmp/"{group,passwd}
# we trust in udev
sed -i 's|^#ALLOWHIDDENDIR=/dev/.udev$|ALLOWHIDDENDIR=/dev/.udev|' \
"${UB_INSTALLDIR}/etc/rkhunter.conf"
# ln -s ../../share/doc/Packages/rkhunter "$UB_INSTALLDIR"/usr/lib/rkhunter/docs
| true
|
1de2734460022ee9495a14e6dd5e3d76f2819514
|
Shell
|
pepijndevos/pepijndevos.github.com
|
/_includes/code/multipygmentize.sh
|
UTF-8
| 209
| 2.609375
| 3
|
[] |
no_license
|
for f in $@
do
mkdir -p `dirname "output/$f.html"`
~/hg/pygments/pygmentize -f html -O anchorlinenos,linenos,full,urlformat=/{path}/{fname}{fext}.html,lineanchors=L,tagsfile=tags -o "output/$f.html" "$f"
done
| true
|
6d8dc30175b60629c7a3f6fd35e35371763a99fc
|
Shell
|
GaelReinaudi/Cache-Bot
|
/lambda/updateLambda.sh
|
UTF-8
| 961
| 2.75
| 3
|
[] |
no_license
|
#!/bin/sh
VV=$(git --git-dir ../.git --work-tree $$PWD describe --always --tags | tr "-" "_" | tr "." "-")
NAME=$1-$VV
echo $NAME
#rm -f $NAME.zip
zip -r $NAME index.js ../bin ../expressjs/extraCash.sh ../expressjs/fullAnalysis.sh ../expressjs/evo.sh ../cache_categories.json
aws s3 cp $NAME.zip s3://cache.ai
rm -f $NAME.zip
aws lambda update-function-code \
--region us-west-2 \
--function-name cache \
--s3-bucket=cache.ai --s3-key $NAME.zip \
--publish
aws lambda update-alias --function-name cache \
--name evo-dev \
--function-version \$LATEST \
--description $NAME
aws lambda update-alias --function-name cache \
--name extra-dev \
--function-version \$LATEST \
--description $NAME
#aws lambda update-function-code --region us-west-2 --function-name extra --s3-bucket=cache.ai --s3-key $NAME.zip
#aws lambda update-function-code --region us-west-2 --function-name evo --s3-bucket=cache.ai --s3-key $NAME.zip
| true
|
80080b1b55218e292f9af3ec4f002268b25f16a0
|
Shell
|
alabuel/hardening.amazon.linux
|
/sections/5.1-configure-cron.sh
|
UTF-8
| 2,254
| 2.703125
| 3
|
[] |
no_license
|
# Author: Ariel Abuel
# Benchmark: CIS
# --------------------------------------------
# 5 Access, Authentication and Authorization
# 5.1 Configure cron
# --------------------------------------------
# 5.1.1 Ensure cron daemon is enabled (Scored)
log "CIS" "5.1.1 Ensure cron daemon is enabled (Scored)"
service_enable "crond"
# 5.1.2 Ensure permissions on /etc/crontab are configured (Scored)
log "CIS" "5.1.2 Ensure permissions on /etc/crontab are configured (Scored)"
execute_command "chown root:root /etc/crontab"
execute_command "chmod og-rwx /etc/crontab"
# 5.1.3 Ensure permissions on /etc/cron.hourly are configured (Scored)
log "CIS" "5.1.3 Ensure permissions on /etc/cron.hourly are configured (Scored)"
execute_command "chown root:root /etc/cron.hourly"
execute_command "chmod og-rwx /etc/cron.hourly"
# 5.1.4 Ensure permissions on /etc/cron.daily are configured (Scored)
log "CIS" "5.1.4 Ensure permissions on /etc/cron.daily are configured (Scored)"
execute_command "chown root:root /etc/cron.daily"
execute_command "chown root:root /etc/cron.daily"
# 5.1.5 Ensure permissions on /etc/cron.weekly are configured (Scored)
execute_command "chown root:root /etc/cron.weekly"
execute_command "chmod og-rwx /etc/cron.weekly"
# 5.1.6 Ensure permissions on /etc/cron.monthly are configured (Scored)
log "CIS" "5.1.6 Ensure permissions on /etc/cron.monthly are configured (Scored)"
execute_command "chown root:root /etc/cron.monthly"
execute_command "chmod og-rwx /etc/cron.monthly"
# 5.1.7 Ensure permissions on /etc/cron.d are configured (Scored)
log "CIS" "5.1.7 Ensure permissions on /etc/cron.d are configured (Scored)"
execute_command "chown root:root /etc/cron.d"
execute_command "chmod og-rwx /etc/cron.d"
# 5.1.8 Ensure at/cron is restricted to authorized users (Scored)
log "CIS" "5.1.8 Ensure at/cron is restricted to authorized users (Scored)"
[ -f "/etc/cron.deny" ] && execute_command "rm /etc/cron.deny"
[ -f "/etc/at.deny" ] && execute_command "rm /etc/at.deny"
execute_command "touch /etc/cron.allow"
execute_command "touch /etc/at.allow"
execute_command "chmod og-rwx /etc/cron.allow"
execute_command "chmod og-rwx /etc/at.allow"
execute_command "chown root:root /etc/cron.allow"
execute_command "chown root:root /etc/at.allow"
| true
|
be427905b863c0291cd878c0f1df22597c5e5352
|
Shell
|
miho/kernal64
|
/Kernal64/build/k64.sh
|
UTF-8
| 709
| 3.03125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
HOME=`dirname "$0"`
HOME=`cd "$HOME"; pwd -P`
LIB=$HOME/lib
ROMS=$HOME/roms
CP=$ROMS:$LIB/kernal64.jar:$LIB/jinput.jar:$LIB/scala-library.jar:$LIB/scala-parser-combinators_2.12-1.0.5.jar:$LIB/commons-net-3.3.jar
if [ ! -x $JAVA_HOME/bin/java ]; then
JAVA=java
else
JAVA=$JAVA_HOME/bin/java
fi
# to add custom Kernals set the variable below adding inside quotes -Dkernal=<kernal file> -D1541kernal=<1541 kernal file>
# both kernal files must be placed under roms directory
# example: KERNALS_ROM="-Dkernal=jiffydos_kernal.bin -D1541kernal=jiffydos1541_kernal.bin"
KERNALS_ROMS=""
$JAVA -server -Xms64M -Xmx128M -cp $CP -Djava.library.path=$LIB $KERNALS_ROMS ucesoft.cbm.c64.C64 $*
| true
|
753cdce41a11745c23c73225d7885a2b33bf4563
|
Shell
|
mpdn/unthread
|
/posixtestsuite/conformance/interfaces/clock_settime/18-1.sh
|
UTF-8
| 626
| 2.75
| 3
|
[
"GPL-1.0-or-later",
"GPL-2.0-only",
"MIT"
] |
permissive
|
#!/bin/sh
# Copyright (c) 2002, Intel Corporation. All rights reserved.
# Created by: julie.n.fleischer REMOVE-THIS AT intel DOT com
# This file is licensed under the GPL license. For the full content
# of this license, see the COPYING file at the top level of this
# source tree.
#
# Test that clock_settime() sets errno=EINVAL if tp is outside the
# valid range for clock_id.
#
# This is tested implicitly via assertion 19 to the best of tester's
# knowledge. Cannot find additional parameters for CLOCK_REALTIME
# in the POSIX specification
echo "Tested implicitly via assertion 19. See output for status."
exit 0
| true
|
77c57c0af19b35c97be6fb91d91bfe061145d809
|
Shell
|
CarloWood/cwm4
|
/scripts/depcomp.sh
|
UTF-8
| 2,825
| 3.453125
| 3
|
[] |
no_license
|
#! /bin/sh
# depcomp - compile a program generating dependencies as side-effects
# Copyright 1999, 2000, 2003 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
# 02111-1307, USA.
# As a special exception to the GNU General Public License, if you
# distribute this file as part of a program that contains a
# configuration script generated by Autoconf, you may include it under
# the same distribution terms that you use for the rest of that program.
# Originally written by Alexandre Oliva <oliva@dcc.unicamp.br>.
# Completely stripped for own purposes by Carlo Wood.
if test -z "$depmode" || test -z "$source" || test -z "$object"; then
echo "depcomp: Variables source, object and depmode must be set" 1>&2
exit 1
fi
# `libtool' can also be set to `yes' or `no'.
if test -z "$depfile"; then
base=`echo "$object" | sed -e 's,^.*/,,' -e 's,\.\([^.]*\)$,.P\1,'`
dir=`echo "$object" | sed 's,/.*$,/,'`
if test "$dir" = "$object"; then
dir=
fi
# FIXME: should be _deps on DOS.
depfile="$dir.deps/$base"
fi
tmpdepfile=${tmpdepfile-`echo "$depfile" | sed 's/\.\([^.]*\)$/.T\1/'`}
rm -f "$tmpdepfile"
case "$depmode" in
pch)
# First compile file without dependency tracking.
"$@" || exit $?
# Remove the call to libtool its parameters.
if test "$libtool" = yes; then
while test $1 != '--mode=compile'; do
shift
done
shift
if expr "$1" : "--" >/dev/null; then
shift
fi
fi
# Remove `-o $object' and `-include pch.h'.
eatpch=
IFS=" "
for arg
do
case $arg in
-o)
shift
;;
$object)
shift
;;
-include)
shift
eatpch=yes
;;
*)
if test x$eatpch = xyes; then
if test "$arg" = "pch.h"; then
shift
else
set fnord "$@" -include "$arg"
shift # fnord
shift # $arg
fi
eatpch=
else
set fnord "$@" "$arg"
shift # fnord
shift # $arg
fi
;;
esac
done
# Generate dependency file.
"$@" -MT "$object" -M -MF "$tmpdepfile"
stat=$?
if test $stat -eq 0; then :
else
rm -f "$tmpdepfile"
exit $stat
fi
mv "$tmpdepfile" "$depfile"
;;
*)
echo "Unknown depmode $depmode" 1>&2
exit 1
;;
esac
exit 0
| true
|
053493ab8844508693d91bc63433b11630b57424
|
Shell
|
opium-sh/rl-game-recording
|
/recording-bash/f1-2017-runme.sh
|
UTF-8
| 2,061
| 3.203125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
QUALITY=15 # change recoding quality 0-25, 0 - perfect and large file , 25 and small file
APP_WINDOW_NAME="F1™ 2017" #part of the game window name, use xwininfo -tree to find it
SCALE_TO="1024:768"
##############################################################################################
OUT_FILENAME=F1\-$(date -d "today" +"%Y%m%d-%H%M")
#steam steam://rungameid/515220&
#sleep 10
CURRENT_XORG_DISPLAY_IDS=$(w -hs |awk '{print $3}')
CURRENT_DISPLAY_ID=$(echo "${CURRENT_XORG_DISPLAY_IDS}"|head -n 1) #hardcode any value like :0 :1 :2
WINDOW_ID=$(xwininfo -tree -root |grep "$APP_WINDOW_NAME"|awk '{print $1}'|head -n 1)
WINDOW_ATTR=$(xwininfo -id "${WINDOW_ID}")
if xprop -id $WINDOW_ID | grep -q '_NET_WM_STATE(ATOM) = _NET_WM_STATE_FULLSCREEN'; then
FULLSCREEN_GAME=1
fi
#execute when CRTL+c is pressed
trap recode 2
recode()
{
#encode/rescale so the mkv is smaller, then delete original recording
if [[ -z "${FULLSCREEN_GAME}" ]]; then
ffmpeg -y -i "$OUT_FILENAME"_big.mkv -c:v libx264 -crf "$QUALITY" -preset fast "$OUT_FILENAME"_small.mkv
else
ffmpeg -y -i "$OUT_FILENAME"_big.mkv -vf scale="$SCALE_TO" -c:v libx264 -crf "$QUALITY" -preset fast "$OUT_FILENAME"_small.mkv
fi
rm -f "$OUT_FILENAME"_big.mkv
}
#main game recording stuff
W=$(echo "${WINDOW_ATTR}"|grep Width|awk '{print $2}')
H=$(echo "${WINDOW_ATTR}"|grep Height|awk '{print $2}')
X=$(echo "${WINDOW_ATTR}"|grep "Absolute upper-left X"|awk '{print $4}')
Y=$(echo "${WINDOW_ATTR}"|grep "Absolute upper-left Y"|awk '{print $4}')
echo ==============================================
echo $W $H $X $Y $WINDOW_ID ====== \n $WINDOW_ATTR
echo ==============================================
echo Available screens:
echo $CURRENT_XORG_DISPLAY_IDS
echo Using $CURRENT_DISPLAY_ID by default.
echo ==============================================
#-f alsa -ac 2 -i pulse -acodec aac -strict experimental
ffmpeg -y -video_size "$W"x"$H" -framerate 30 -f x11grab -i "$CURRENT_DISPLAY_ID".0+"$X","$Y" -c:v libx264 -crf 0 -preset ultrafast "$OUT_FILENAME"_big.mkv
| true
|
283531208f509124e27fba864b56b11c3a73ed27
|
Shell
|
RoshanS21/bashScripting
|
/learningBash.sh
|
UTF-8
| 2,022
| 3.96875
| 4
|
[] |
no_license
|
# Roshan Shrestha
#!/usr/bin/bash
# This is a comment
# The one on Line 2 is a shebang
function NewLine {
echo " "
}
function TotalFiles {
find -type f | wc -l
}
function TotalDirectories {
find -type d | wc -l
}
echo $SHELL
NewLine
cal
NewLine
date
NewLine
echo "Hello World!"
NewLine
greeting="Welcome"
user=$(whoami)
day=$(date +%A)
echo "$greeting bak $user! Today is $day."
echo "Your Bash shell version is: $BASH_VERSION. Enjoy!"
NewLine
currentPath=$(pwd)
numberOfFiles=$(TotalFiles)
numberOfDirectories=$(TotalDirectories)
echo "Current Path: $currentPath"
echo "Number of Files: $numberOfFiles"
echo "Number of Directories: $numberOfDirectories"
NewLine
if [ $numberOfFiles -gt $numberOfDirectories ]; then
echo "$numberOfFiles is greater than $numberOfDirectories"
else
echo "$numberOfFiles is less than $numberOfDirectories"
fi
NewLine
# Numbers after a $ sign refer to arguments/parameters
# -z checks whether paramter contains any value
# returns true if length of $1 is zero
if [ -z $1 ]; then
user=$(whoami)
else
# -d returns true if directory exists
if [ ! -d "/home/$1" ]; then
echo "Requested \"$1\" user home directory does not exist."
exit 1
fi
user=$1
fi
echo "Current user: $user"
NewLine
# Loops
for i in 1 2 3; do echo "Number: $i"; done
NewLine
for i in $(cat input.txt); do
echo -n $i | wc -c;
done
NewLine
# For loop to print names of all files and directories inside your
# current working directory along with the number of characters
# each file consists from.
for i in $(find -type f); do
temp=$(echo -n $i | wc -c)
echo "$i $temp";
done
NewLine
# While Loop
counter=0
while [ $counter -lt 5 ]; do
let counter+=1
echo $counter
done
NewLine
# Untile loop
let counter+=1
until [ $counter -lt 2 ]; do
let counter-=1
echo $counter
done
NewLine
x=10;y=33
echo $(( $x + $y ))
echo $(expr 10 \* 2)
expr 10 + 2
let a=2**3
let a++
echo $a
NewLine
# bc
squareroot=$( echo 'scale=20;sqrt(50)' | bc)
echo $squareroot
| true
|
607908fa80c71290498439a684a7afbd674aa3d6
|
Shell
|
MarcelRaschke/docker
|
/versions.sh
|
UTF-8
| 7,889
| 3.4375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -Eeuo pipefail
# bashbrew arch to docker-release-arch
declare -A dockerArches=(
['amd64']='x86_64'
['arm32v6']='armel'
['arm32v7']='armhf'
['arm64v8']='aarch64'
['ppc64le']='ppc64le'
['s390x']='s390x'
['windows-amd64']='x86_64'
)
cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
versions=( "$@" )
if [ ${#versions[@]} -eq 0 ]; then
versions=( */ )
json='{}'
else
json="$(< versions.json)"
fi
versions=( "${versions[@]%/}" )
# "tac|tac" for http://stackoverflow.com/a/28879552/433558
dindLatest="$(curl -fsSL 'https://github.com/docker/docker/commits/master/hack/dind.atom' | tac|tac | awk -F '[[:space:]]*[<>/]+' '$2 == "id" && $3 ~ /Commit/ { print $4; exit }')"
dockerVersions="$(
git ls-remote --tags https://github.com/docker/docker.git \
| cut -d$'\t' -f2 \
| grep '^refs/tags/v[0-9].*$' \
| sed 's!^refs/tags/v!!; s!\^{}$!!' \
| sort -u \
| gawk '
{ data[lines++] = $0 }
# "beta" sorts lower than "tp" even though "beta" is a more preferred release, so we need to explicitly adjust the sorting order for RCs
# also, "18.09.0-ce-beta1" vs "18.09.0-beta3"
function docker_version_compare(i1, v1, i2, v2, l, r) {
l = v1; gsub(/-ce/, "", l); gsub(/-tp/, "-alpha", l)
r = v2; gsub(/-ce/, "", r); gsub(/-tp/, "-alpha", r)
patsplit(l, ltemp, /[^.-]+/)
patsplit(r, rtemp, /[^.-]+/)
for (i = 0; i < length(ltemp) && i < length(rtemp); ++i) {
if (ltemp[i] < rtemp[i]) {
return -1
}
if (ltemp[i] > rtemp[i]) {
return 1
}
}
return 0
}
END {
asort(data, result, "docker_version_compare")
for (i in result) {
print result[i]
}
}
'
)"
buildxVersions="$(
git ls-remote --tags https://github.com/docker/buildx.git \
| cut -d$'\t' -f2 \
| grep '^refs/tags/v[0-9].*$' \
| sed 's!^refs/tags/v!!; s!\^{}$!!' \
| grep -vE -- '-rc' \
| sort -ruV
)"
buildx=
buildxVersion=
for buildxVersion in $buildxVersions; do
if checksums="$(curl -fsSL "https://github.com/docker/buildx/releases/download/v${buildxVersion}/checksums.txt")"; then
buildx="$(jq <<<"$checksums" -csR --arg version "$buildxVersion" '
rtrimstr("\n") | split("\n")
| map(
split(" [ *]?"; "")
| {
sha256: .[0],
file: .[1],
url: ("https://github.com/docker/buildx/releases/download/v" + $version + "/" + .[1]),
}
| { (
.file
| capture("[.](?<os>linux|windows|darwin)-(?<arch>[^.]+)([.]|$)")
// error("failed to parse os-arch from filename: " + .[1])
| if .os == "linux" then "" else .os + "-" end
+ ({
"amd64": "amd64",
"arm-v6": "arm32v6",
"arm-v7": "arm32v7",
"arm64": "arm64v8",
"ppc64le": "ppc64le",
"riscv64": "riscv64",
"s390x": "s390x",
}[.arch] // error("unknown buildx architecture: " + .arch))
): . }
)
| add
| {
version: $version,
arches: .,
}
')"
break
fi
done
if [ -z "$buildx" ]; then
echo >&2 'error: failed to determine buildx version!'
exit 1
fi
composeVersions="$(
git ls-remote --tags https://github.com/docker/compose.git \
| cut -d$'\t' -f2 \
| grep '^refs/tags/v[0-9].*$' \
| sed 's!^refs/tags/v!!; s!\^{}$!!' \
| sort -ruV
)"
compose=
composeVersion=
for composeVersion in $composeVersions; do
if checksums="$(curl -fsSL "https://github.com/docker/compose/releases/download/v${composeVersion}/checksums.txt")"; then
compose="$(jq <<<"$checksums" -csR --arg version "$composeVersion" '
rtrimstr("\n") | split("\n")
| map(
split(" *")
| {
sha256: .[0],
file: .[1],
url: ("https://github.com/docker/compose/releases/download/v" + $version + "/" + .[1]),
}
| { (
.file
| ltrimstr("docker-compose-")
| rtrimstr(".exe")
| split("-")
| if .[0] == "linux" then "" else .[0] + "-" end
+ ({
aarch64: "arm64v8",
armv6: "arm32v6",
armv7: "arm32v7",
ppc64le: "ppc64le",
riscv64: "riscv64",
s390x: "s390x",
x86_64: "amd64",
}[.[1]] // error("unknown compose architecture: " + .[1]))
): . }
)
| add
| {
version: $version,
arches: .,
}
')"
break
fi
done
if [ -z "$compose" ]; then
echo >&2 'error: failed to determine compose version!'
exit 1
fi
for version in "${versions[@]}"; do
rcVersion="${version%-rc}"
export version rcVersion
channel='stable'
versionOptions="$(grep "^$rcVersion[.]" <<<"$dockerVersions")"
rcGrepV='-v'
if [ "$rcVersion" != "$version" ]; then
rcGrepV=
channel='test'
fi
if ! fullVersion="$(grep $rcGrepV -E -- '-(rc|tp|beta)' <<<"$versionOptions" | tail -1)" || [ -z "$fullVersion" ]; then
if currentNull="$(jq -r '.[env.version] == null' versions.json)" && [ "$currentNull" = 'true' ]; then
echo >&2 "warning: skipping '$version' (does not appear to be released yet)"
json="$(jq <<<"$json" -c '.[env.version] = null')"
continue
fi
echo >&2 "error: cannot find full version for $version"
exit 1
fi
# if this is a "-rc" release, let's make sure the release it contains isn't already GA (and thus something we should not publish anymore)
if [ "$rcVersion" != "$version" ] && rcFullVersion="$(jq <<<"$json" -r '.[env.rcVersion].version // ""')" && [ -n "$rcFullVersion" ]; then
latestVersion="$({ echo "$fullVersion"; echo "$rcFullVersion"; } | sort -V | tail -1)"
if [[ "$fullVersion" == "$rcFullVersion"* ]] || [ "$latestVersion" = "$rcFullVersion" ]; then
# "x.y.z-rc1" == x.y.z*
echo >&2 "warning: skipping/removing '$version' ('$rcVersion' is at '$rcFullVersion' which is newer than '$fullVersion')"
json="$(jq <<<"$json" -c '.[env.version] = null')"
continue
fi
fi
echo "$version: $fullVersion (buildx $buildxVersion, compose $composeVersion)"
export fullVersion dindLatest
doc="$(
jq -nc --argjson buildx "$buildx" --argjson compose "$compose" '{
version: env.fullVersion,
arches: {},
dindCommit: env.dindLatest,
buildx: $buildx,
compose: $compose,
}'
)"
hasWindows=
for bashbrewArch in "${!dockerArches[@]}"; do
arch="${dockerArches[$bashbrewArch]}"
# check whether the given architecture is supported for this release
case "$bashbrewArch" in
windows-*) url="https://download.docker.com/win/static/$channel/$arch/docker-$fullVersion.zip"; windows=1 ;;
*) url="https://download.docker.com/linux/static/$channel/$arch/docker-$fullVersion.tgz"; windows= ;;
esac
if wget --quiet --spider "$url" &> /dev/null; then
export bashbrewArch url
doc="$(
jq <<<"$doc" -c '.arches[env.bashbrewArch] = {
dockerUrl: env.url,
}'
)"
else
continue
fi
if [ -n "$windows" ]; then
hasWindows=1
continue # Windows doesn't have rootless extras :)
fi
# https://github.com/moby/moby/blob/v20.10.7/hack/make/binary-daemon#L24
# "vpnkit is available for x86_64 and aarch64"
case "$bashbrewArch" in
amd64 | arm64v8)
rootlessExtrasUrl="https://download.docker.com/linux/static/$channel/$arch/docker-rootless-extras-$fullVersion.tgz"
if wget --quiet --spider "$rootlessExtrasUrl" &> /dev/null; then
export rootlessExtrasUrl
doc="$(jq <<<"$doc" -c '
.arches[env.bashbrewArch].rootlessExtrasUrl = env.rootlessExtrasUrl
')"
fi
;;
esac
done
# order here controls the order of the library/ file
for variant in \
cli \
dind \
dind-rootless \
git \
windows/windowsservercore-ltsc2022 \
windows/windowsservercore-1809 \
; do
base="${variant%%/*}" # "buster", "windows", etc.
if [ "$base" = 'windows' ] && [ -z "$hasWindows" ]; then
continue
fi
export variant
doc="$(jq <<<"$doc" -c '.variants += [ env.variant ]')"
done
json="$(jq <<<"$json" -c --argjson doc "$doc" '
.[env.version] = $doc
# make sure both "XX.YY" and "XX.YY-rc" always exist
| .[env.rcVersion] //= null
| .[env.rcVersion + "-rc"] //= null
')"
done
jq <<<"$json" -S . > versions.json
| true
|
1eace990a917a8c85a8c707d1576976ec20f585e
|
Shell
|
ericminio/learning-oracle
|
/tests/python/test_instance_is_ready.sh
|
UTF-8
| 557
| 3.046875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
DIR=/tmp/learning-oracle
ORACLE_HOME=/u01/app/oracle/product/11.2.0/xe
OUTPUT=$(docker exec oracle /bin/bash -c "export ORACLE_HOME=$ORACLE_HOME ; pytest $DIR/tests/python/test_instance_is_ready.py")
if [[ $OUTPUT == *"ERROR"* ]]; then
echo "FAILURE"
echo "OUTPUT WAS: $OUTPUT"
exit 1
fi
if [[ $OUTPUT == *"failed"* ]]; then
echo "FAILURE"
echo "OUTPUT WAS: $OUTPUT"
exit 1
fi
if [[ $OUTPUT == *"no tests"* ]]; then
echo "FAILURE"
echo "OUTPUT WAS: $OUTPUT"
exit 1
fi
echo "$OUTPUT"
echo "SUCCESS"
| true
|
34c2097b76772c4767d1d2c5f379d2db6dbf7a89
|
Shell
|
moetunes/Schedule-dvb
|
/tv-record
|
UTF-8
| 3,483
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/bash
set -o nounset
shopt -s huponexit
# User defines
declare -i DVB_DEVICE_NUM="0"
declare CHANNELS_CONF="${HOME}/Mychannels.conf"
declare SAVE_FOLDER="${HOME}/TV/tele"
declare SCHED_FILE="$HOME/.sched-tv"
declare ZAP_COMMAND="tzap"
declare -i SLEEP=15
# Program defines
declare -i DAY="0"
declare -i START="0"
declare -i FINISH="0"
declare CHAN="0"
declare NAME="0"
declare -i MINUTES="0"
declare -i REC_START="0"
declare -i REC_HOURS="0"
declare -i REC_MINS="0"
declare -i howlong=$SLEEP
declare next_entry=""
declare current_entry=""
declare -i PIDOF_AZAP=0
declare -i PIDOF_CAT=0
red='\033[1;31m'
green='\033[1;32m'
yell='\033[1;33m'
cyan='\033[1;36m'
white='\033[1;37m'
reset='\033[0m'
function remove_entry {
if [ "$NAME" == "" ]; then
sed "/$DAY $START $FINISH $CHAN/d" $SCHED_FILE > /tmp/dummy
else
sed "/$DAY $START $FINISH $CHAN $NAME/d" $SCHED_FILE > /tmp/dummy
fi
mv /tmp/dummy $SCHED_FILE
}
function record_entry {
${ZAP_COMMAND} -a ${DVB_DEVICE_NUM} -f ${DVB_DEVICE_NUM} -d ${DVB_DEVICE_NUM} \
-c $CHANNELS_CONF -r ${CHAN} -p >/dev/null 2>&1 &
PIDOF_AZAP=$!
sleep 3
if [ "$PIDOF_AZAP" == "" ]; then
printf "$red\tError starting ${ZAP_COMMAND}.\n\tFAILED: $CHAN $START\n"
remove_entry
exit 1
fi
printf "$green\tSET CHANNEL$cyan ${CHAN}\n$green\tRECORDING$white $NAME\n"
REC_MINS=$((${START}%100))
REC_HOURS=0
MINUTES=0
REC_START=$(($START-$REC_MINS))
while [ $((${REC_START}+${REC_HOURS}+${REC_MINS})) -lt $FINISH ]; do
((REC_MINS++))
((MINUTES++))
if [ ${REC_MINS} -ge 60 ]; then
REC_MINS=0
((REC_HOURS+=100))
fi
done
if [ "$NAME" == "" ]; then
declare FILE_NAME="${SAVE_FOLDER}/TV-`date +%Y%m%d-%H%M`-ch.${CHAN}-${MINUTES}.min.mpg"
else
declare FILE_NAME="${SAVE_FOLDER}/TV-${NAME}-${CHAN}-${MINUTES}.min.mpg"
fi
dd if=/dev/dvb/adapter${DVB_DEVICE_NUM}/dvr${DVB_DEVICE_NUM} \
of=${FILE_NAME} conv=noerror &
PIDOF_CAT=$!
if (( ${PIDOF_CAT} == 0 )); then
printf "$red\tError Starting Recording.\n\t/dev/dvb/adapter${DVB_DEVICE_NUM}/dvr${DVB_DEVICE_NUM} Unavailable\n"
kill ${PIDOF_AZAP}
remove_entry
exit 1
fi
printf "$yell\tRECORDING TO :$cyan ${FILE_NAME}\n"
sleep ${MINUTES}m
kill ${PIDOF_CAT} && wait ${PIDOF_CAT} 2> /dev/null
# pkill $ZAP_COMMAND # && wait ${PIDOF_AZAP} 2> /dev/null
kill ${PIDOF_AZAP} && wait ${PIDOF_AZAP} 2> /dev/null
printf "$yell\tFINISHED REC :$cyan ${FILE_NAME}\n$reset"
remove_entry
}
howlong=1
printf "${green}Waiting to record ....\n"
while true; do
sleep $howlong
howlong=$SLEEP
[ -e "$SCHED_FILE" ] || continue
[ "`cat $SCHED_FILE`" == "" ] && continue
TODAY=`date +%Y%m%d`
NOW=`date +%k%M`
read -r DAY START FINISH CHAN NAME < $SCHED_FILE
#printf "$TODAY . $DAY . $START . $FINISH . $CHAN . $NAME\n"
if [ $DAY == $TODAY ] && [ $START -lt $NOW ] || [[ $DAY -lt $TODAY ]] ; then
printf "$red\tOld Entry : Removing $CHAN $DAY $START\n"
remove_entry
howlong=1
continue
fi
next_entry="${DAY}_${START}_$NAME"
[ "$next_entry" != "$current_entry" ] && printf "$cyan\tNEXT RECORDING :- $yell $next_entry\n"
current_entry=$next_entry
if [ $DAY == $TODAY ] && [ $START == $NOW ]; then
record_entry
howlong=1
fi
done
exit 0
| true
|
923045cf5e2bb99ce606a6b352c14f0c1dec62ce
|
Shell
|
richiefreedom/edit
|
/scripts/fontfinder.sh
|
UTF-8
| 440
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/sh
defPath="default.ttf"
HandleError() {
echo $defPath
exit 1
}
command -v "fc-match" > /dev/null || HandleError
command -v "fc-list" > /dev/null || HandleError
fontMatch=`fc-match monospace`
[ $? -eq 0 ] || HandleError
fontFile=${fontMatch%%:*}
fontDesc=`fc-list | grep $fontFile`
[ $? -eq 0 ] || HandleError
fontPath=${fontDesc%%:*}
[ "$fontPath" = "" ] && HandleError
[ ! -f "$fontPath" ] && HandleError
echo $fontPath
| true
|
f6053e5e1c46cbc29f76fdad02dbe4bcbbb23059
|
Shell
|
MenkeTechnologies/zpwr
|
/install/zpwrInstall.sh
|
UTF-8
| 32,252
| 3.46875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#{{{ MARK:Header
#**************************************************************
##### Author: JACOBMENKE
##### Date: Wed May 31 22:54:32 EDT 2017
##### Purpose: bash script for custom terminal setup
##### Notes: goal - work on mac and linux
##### Notes: this script should a one liner installer
#}}}***********************************************************
#{{{ MARK:zinit
#**************************************************************
export ZPWR_PLUGIN_MANAGER_HOME="$HOME/.zinit"
export ZPWR_PLUGIN_MANAGER="zinit"
# do not want any surprises when relative cd to other dirs
unset CDPATH
typeset zpwrBaseDir
VERSION="2.1.0"
# resolve all symlinks
ZPWR_PWD="$(pwd -P)"
#}}}***********************************************************
#{{{ MARK:Env vars
#**************************************************************
INSTALL_VIM_SRC=false
# shows count of steps in installer
INSTALL_COUNTER=0
# for the width of the install messages
export COLUMNS="$(tput cols)"
#}}}***********************************************************
#{{{ MARK:sanity
#**************************************************************
# source common functions
if ! source common.sh; then
echo "$ZPWR_PWD/common.sh source failed. Are you in \$ZPWR/install directory?" >&2
exit 1
fi
if [[ ! -d $ZPWR ]]; then
echo "Must be in $ZPWR/install directory" >&2
exit 1
fi
echo "installing to $ZPWR"
if [[ ! -d $ZPWR_LOCAL ]]; then
echo "Must be in $ZPWR/install directory" >&2
exit 1
fi
if [[ $ZPWR == "$ZPWR_PWD" ]]; then
echo "Must be in $ZPWR/install directory" >&2
exit 1
fi
if [[ ! -d $ZPWR_SCRIPTS ]]; then
echo "Must be in $ZPWR/install directory" >&2
exit 1
fi
ESCAPE_REMOVER="$ZPWR_SCRIPTS/escapeRemover.pl"
# the destination directory for zpwr specific installed files
LOGFILE="$ZPWR_INSTALLER_OUTPUT/escaped_logfile.txt"
LOGFILE_CARGO_YCM="$ZPWR_INSTALLER_OUTPUT/cargoYCM_logfile.txt"
if ! test -f "$ESCAPE_REMOVER"; then
zpwrLogConsoleErr "where is ESCAPE_REMOVER '$ESCAPE_REMOVER'?"
exit 1
fi
if ! test -x "$ESCAPE_REMOVER"; then
zpwrLogConsoleErr "why is '$ESCAPE_REMOVER' not executable?" >&2
exit 1
fi
if ! zpwrCommandExists sudo perl git curl bash; then
zpwrLogConsoleErr "you must have sudo, perl, git, bash and curl installed"
exit 1
fi
BACKUP_DIR="$ZPWR_LOCAL/$USER.rc.bak.$(date +'%m.%d.%Y')"
zpwrCommandExists vim && vimV="$(vim --version | head -n 1 | awk '{print $5}')"
if [[ -n $vimV ]]; then
if echo "$vimV >= 8.0" | bc 2>/dev/null | grep -q 1 || vim --version 2>&1 | grep -q '\-python3';then
INSTALL_VIM_SRC=true
fi
fi
#}}}***********************************************************
#{{{ MARK:mkdir if needed
#**************************************************************
if [[ ! -d $ZPWR_INSTALLER_OUTPUT ]]; then
mkdir -p $ZPWR_INSTALLER_OUTPUT
fi
if [[ ! -d $ZPWR_LOCAL ]]; then
mkdir -p $ZPWR_LOCAL
fi
# the destination directory for zpwr specific temp files
if [[ ! -d $ZPWR_LOCAL_TEMP ]]; then
mkdir -p $ZPWR_LOCAL_TEMP
fi
#}}}***********************************************************
#{{{ MARK:Stream tee to LOGFILE
#**************************************************************
clear
# replicate stdout and sterr to LOGFILE
exec > >(tee -a "$LOGFILE")
exec 2>&1
#}}}***********************************************************
#{{{ MARK:Banner
#**************************************************************
cat<<\EOF
... .. .. ..
x*8888x.:*8888: -"888: < .z@8"`
X 48888X `8888H 8888 u. u. !@88E
X8x. 8888X 8888X !888> .u x@88k u@88c. '888E u .u
X8888 X8888 88888 "*8%- ud8888. ^"8888""8888" 888E u@8NL ud8888.
'*888!X8888> X8888 xH8> :888'8888. 8888 888R 888E`"88*" :888'8888.
`?8 `8888 X888X X888> d888 '88%" 8888 888R 888E .dN. d888 '88%"
-^ '888" X888 8888> 8888.+" 8888 888R 888E~8888 8888.+"
dx '88~x. !88~ 8888> 8888L 8888 888R 888E '888& 8888L
.8888Xf.888x:! X888X.: '8888c. .+ "*88*" 8888" 888E 9888. '8888c. .+
:""888":~"888" `888*" "88888% "" 'Y" '"888*" 4888" "88888%
"~' "~ "" "YP' "" "" "YP'
.....
.H8888888h. ~-. .uef^"
888888888888x `> :d88E u. u. u.
X~ `?888888hx~ .u . `888E x@88k u@88c. ...ue888b
' x8.^"*88*" ud8888. .udR88N 888E .z8k ^"8888""8888" 888R Y888r
`-:- X8888x :888'8888. <888'888k 888E~?888L 8888 888R 888R I888>
488888> d888 '88%" 9888 'Y" 888E 888E 8888 888R 888R I888>
.. `"88* 8888.+" 9888 888E 888E 8888 888R 888R I888>
x88888nX" . 8888L 9888 888E 888E 8888 888R u8888cJ888
!"*8888888n.. : '8888c. .+ ?8888u../ 888E 888E "*88*" 8888" "*888*P"
' "*88888888* "88888% "8888P' m888N= 888> "" 'Y" 'Y"
^"***"` "YP' "P' `Y" 888
J88"
@%
:"
.. . .x+=:.
x .d88" @88> z` ^%
5888R u. %8P . <k
'888R ...ue888b uL . .u .@8Ned8"
888R 888R Y888r .ue888Nc.. .@88u ud8888. .@^%8888"
888R 888R I888> d88E`"888E` ''888E` :888'8888. x88: `)8b.
888R 888R I888> 888E 888E 888E d888 '88%" 8888N=*8888
888R 888R I888> 888E 888E 888E 8888.+" %8" R88
888R u8888cJ888 888E 888E 888E 8888L @8Wou 9%
.888B . "*888*P" 888& .888E 888& '8888c. .+ .888888P`
^*888% 'Y" *888" 888& R888" "88888% ` ^"F
"% `" "888E "" "YP'
.dWi `88E
4888~ J8%
^"===*"`
EOF
#}}}***********************************************************
#{{{ MARK:Setup deps
#**************************************************************
# Dependencies
# 1) neovim
# 2) tmux
# 3) lolcat in go
# 4) cmatrix
# 5) htop
# 6) cmake
# 7) youcompleteme
# 8) ultisnips
# 9) supertab
# 10) zinit
# 11) powerlevel10k prompt
# 12) pathogen
# 13) nerdtree
# 14) fzf
# 15) powerline
# 16) vim-airline
# 17) zsh
#etc
dependencies_ary=(subversion openssl moreutils cmake tig hexedit boxes tal iperf vim tmux chkrootkit wget cowsay cmatrix htop bpython sl mutt \
screenfetch ccze htop figlet zsh docker.io docker erlang elixir links \
rlwrap tor nvm nginx nmap mtr mytop tcpdump redis toilet mysql \
mongodb postgresql jnettop iotop fping ctags texinfo lsof \
whois weechat gradle ant maven telnet tree mc ocaml groovy slurm \
bmon ruby parallel pssh shfmt global)
addDependenciesLinux(){
dependencies_ary=(neovim pkg-config libclang1 clang llvm ${dependencies_ary[@]})
dependencies_ary+=(python-pip inxi build-essential traceroute proxychains atop tcl mlocate php-bcmath php-mysql php-sockets \
php-mbstring php-gettext nmon clamav gparted sysstat git reptyr iptraf dstat ecryptfs-utils at netatalk dnsutils ltrace zabbix-agent \
lua5.1 lua5.1-dev rl-dev software-properties-common sysv-rc-conf afpfs-ng \
samba samba-common scrot syslog-ng sshfs fuse tomcat8 golang xclip strace)
}
addDependenciesArch(){
dependencies_ary+=(cronie ncurses npm autoconf make the_silver_searcher go linux-headers net-tools)
}
addDependenciesSuse(){
dependencies_ary=(python3-devel llvm-devel openssl-devel go ${dependencies_ary[@]})
dependencies_ary+=(man gcc-c++ makeinfo autoconf openldap2-devel mariadb postgresql-server libcurl-devel net-snmp-devel \
mysql-devel libevent-devel postgresql-devel fortune ruby-devel net-tools-deprecated \
python3-pip curl libffi-devel grc libpcap-devel the_silver_searcher kernel-devel gcc libxml2-devel libxslt-devel)
}
addDependenciesAlpine(){
dependencies_ary=(nodejs npm go chrony ${dependencies_ary[@]} mandoc man-pages less procps)
}
addDependenciesDebian(){
dependencies_ary=(python3-dev libssl-dev ${dependencies_ary[@]})
dependencies_ary+=(mysql-server mariadb-server gcc bc lib-gnome2-dev silversearcher-ag libgnomeui-dev libgtk2.0-dev libatk1.0-dev libbonoboui2-dev \
ncurses-dev libevent-dev libncurses5-dev libcairo2-dev libx11-dev \
libxpm-dev libxt-dev \
libperl-dev libpq-dev libpcap-dev fortunes ruby-dev \
python3-pip libffi-dev libssl-dev grc automake whatweb)
}
addDependenciesRedHat(){
if [[ "$ZPWR_DISTRO_NAME" == centos ]]; then
sudo yum install -y epel-release
fi
dependencies_ary=(python3-devel llvm-devel openssl-devel ${dependencies_ary[@]})
dependencies_ary+=(gcc-c++ 'fortune-mod.*' mariadb-server clamav-update openldap-devel libcurl-devel net-snmp-devel mysql-devel libevent-devel libffi-devel mysql-server \
python36-tools ncurses-devel libpcap-devel curses-devel automake the_silver_searcher kernel-devel postgresql-devel)
}
addDependenciesFreeBSD(){
dependencies_ary+=(gcc go the_silver_searcher vim python3 gnome3 devel/ruby-gems)
}
addDependenciesMac(){
dependencies_ary=(neovim macvim ${dependencies_ary[@]})
dependencies_ary+=(tcl-tk httpie proxychains-ng s-search git ag automake autoconf fortune node the_silver_searcher \
fswatch zzz ghc lua python readline reattach-to-user-namespace speedtest-cli aalib ncmpcpp mpd ctop hub ncurses tomcat ninvaders kotlin grails go)
}
#}}}***********************************************************
#{{{ MARK:installer funcs
#**************************************************************
function usage(){
echo "Usage : $0 [options] [--]
Options:
-a Install all dependencies
-c Copy just configs
-n Do not start tmux at end of installer
-s Skip main section
-h Display this message
-v Display script version"
}
function showDeps(){
{
printf "Installing ${#dependencies_ary[@]} packages on $ZPWR_DISTRO_NAME: "
for dep in "${dependencies_ary[@]}" ; do
printf "$dep "
done
} | zpwrPrettyPrintBoxStdin
proceed
bash "$ZPWR_SCRIPTS/zpwrBannerSleep.sh"
}
files=(.zshrc .tmux.conf .vimrc .ideavimrc .iftopcolors .iftop.conf \
conf.gls conf.df conf.ifconfig conf.mount .inputrc .my.cnf motd.sh)
dirs=(.zpwr/scripts .config/htop .config/powerline/themes/tmux)
function backup(){
test -d "$BACKUP_DIR" || mkdir -p "$BACKUP_DIR"
for file in ${files[@]} ; do
test -f "$HOME/$file" && cp "$HOME/$file" "$BACKUP_DIR"
done
for dir in ${dirs[@]} ; do
test -d "$HOME/$dir" && cp -R "$HOME/$dir" "$BACKUP_DIR"
done
}
function warnOverwrite(){
zpwrPrettyPrintBox "The following will be overwritten: .zshrc, .tmux.conf, .inputrc, .vimrc, .ideavimrc, .iftop.conf, .shell_aliases_functions.sh in $HOME"
zpwrPrettyPrintBox "These files if they exist will be backed to $BACKUP_DIR"
zpwrPrettyPrintBoxStdin <<EOF
The following directories if they exist will be backed to $BACKUP_DIR:
$HOME/${dirs[0]},
$HOME/${dirs[1]},
$HOME/${dirs[2]}
EOF
proceed
backup
}
function warnSudo(){
zpwrPrettyPrintBox "It is highly recommended to run 'sudo visudo' to allow noninteractive install. This allows running sudo without a password. The following line would be added to /etc/sudoers: <Your Username> ALL=(ALL) NOPASSWD:ALL"
proceed
}
function pluginsinstall(){
zpwrGoInstallerDir
zpwrFileMustExist plugins_install.sh
bash plugins_install.sh >> "$LOGFILE_CARGO_YCM" 2>&1 &
PLUGIN_PID=$!
zpwrPrettyPrintBox "Installing vim and tmux plugins in background @ $PLUGIN_PID"
}
function ycminstall(){
zpwrGoInstallerDir
zpwrFileMustExist ycm_install.sh
bash ycm_install.sh >> "$LOGFILE_CARGO_YCM" 2>&1 &
YCM_PID=$!
zpwrPrettyPrintBox "Installing YouCompleteMe in background @ $YCM_PID"
}
function cargoinstall(){
zpwrGoInstallerDir
zpwrFileMustExist rustupinstall.sh
bash rustupinstall.sh >> "$LOGFILE_CARGO_YCM" 2>&1 &
CARGO_PID=$!
echo $CARGO_PID
zpwrPrettyPrintBox "Installing rustup for exa, fd and bat in background @ $CARGO_PID"
}
#}}}***********************************************************
#{{{ MARK:Getopts
#**************************************************************
# opt flags
skip=false
justConfig=false
noTmux=false
fullInstall=false
while getopts ":hnVsca" opt
do
case $opt in
h) usage; exit 0 ;;
V) echo "$0 -- Version $VERSION"; exit 0 ;;
a) fullInstall=true ;;
s) skip=true ;;
n) noTmux=true ;;
c) justConfig=true ;;
* ) echo -e "\n Option does not exist : $OPTARG\n"
usage; exit 1 ;;
esac # --- end of case ---
done
shift $(($OPTIND-1))
trap 'echo kill $YCM_PID $PLUGIN_PID $CARGO_PID; kill $YCM_PID $PLUGIN_PID $CARGO_PID 2>/dev/null;echo bye;exit' INT TERM HUP QUIT
if [[ $justConfig == true ]]; then
zpwrPrettyPrintBox "Installing just configs"
fi
if [[ $skip == true ]]; then
zpwrPrettyPrintBox "Skipping dependencies section"
fi
#}}}***********************************************************
#{{{ MARK:macOS
#**************************************************************
if [[ "$ZPWR_OS_TYPE" == "darwin" ]]; then
warnOverwrite
warnSudo
if [[ $justConfig != true ]]; then
zpwrPrettyPrintBox "Checking Dependencies for Mac..."
addDependenciesMac
ZPWR_DISTRO_NAME=Mac
ZPWR_DISTRO_FAMILY=mac
showDeps
if ! zpwrCommandExists brew; then
# install homebrew
zpwrPrettyPrintBox "Installing HomeBrew..."
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/zpwrInstall.sh)"
fi
if ! zpwrCommandExists brew; then
if [[ -d /opt/homebrew/bin ]]; then
PATH="/opt/homebrew/bin:$PATH"
if ! zpwrCommandExists brew; then
zpwrPrettyPrintBox "Need Homebrew"
exit 1
fi
else
zpwrPrettyPrintBox "Need Homebrew"
exit 1
fi
fi
zpwrPrettyPrintBox "We have Homebrew..."
if ! command brew ls python > /dev/null 2>&1; then
brew install python
brew install pip
fi
zpwrPrettyPrintBox "We have Python..."
if [[ $skip != true ]]; then
zpwrPrettyPrintBox "Now The Main Course..."
sleep 1
zpwrPrettyPrintBox "Updating dependency list"
zpwrInstallerRefresh "$ZPWR_DISTRO_FAMILY"
zpwrPrettyPrintBox "Installing java"
brew install --cask java
zpwrPrettyPrintBox "Checking for curl before rustup install"
zpwrExists curl || zpwrInstallerUpdate curl mac
cargoinstall
pluginsinstall
# main loop
for prog in "${dependencies_ary[@]}"; do
zpwrPrettyPrintBox "Installing $prog"
zpwrInstallerUpdate "$prog" mac
done
zpwrPrettyPrintBox "Upgrading packages"
zpwrInstallerUpgrade mac
fi
zpwrPrettyPrintBox "Tapping Homebrew fonts"
brew tap homebrew/cask-fonts
zpwrPrettyPrintBox "Installing hack nerd font"
brew install --cask font-hack-nerd-font
zpwrPrettyPrintBox "Installing meteor"
curl https://install.meteor.com/ | sh
zpwrPrettyPrintBox "PostInstalling nodejs"
brew postinstall node
if [[ -d "/opt/homebrew" ]]; then
export HOMEBREW_PREFIX='/opt/homebrew'
else
export HOMEBREW_PREFIX='/usr/local'
fi
# system sed breaks extended regex
ln -s "$HOMEBREW_PREFIX/bin/gsed" "$HOMEBREW_PREFIX/bin/sed"
if test -f "$HOMEBREW_PREFIX/share/zsh/site-functions/_git"; then
zpwrPrettyPrintBox "Removing homebrew installed git zsh completion at $HOMEBREW_PREFIX/share/zsh/site-functions/_git because conflicts with zsh's git completion"
rm "$HOMEBREW_PREFIX/share/zsh/site-functions/_git"
fi
fi
#}}}***********************************************************
#{{{ MARK:Linux distros
#**************************************************************
elif [[ "$ZPWR_OS_TYPE" == "linux" ]]; then
addDependenciesLinux
warnOverwrite
warnSudo
if [[ $justConfig != true ]]; then
zpwrOsFamily \
'ZPWR_DISTRO_FAMILY=debian
zpwrPrettyPrintBox "Fetching Dependencies for $ZPWR_DISTRO_NAME with the Advanced Package Manager..."
addDependenciesDebian' \
'ZPWR_DISTRO_FAMILY=redhat
zpwrPrettyPrintBox "Fetching Dependencies for $ZPWR_DISTRO_NAME with the Yellowdog Updater Modified"
addDependenciesRedHat' \
'ZPWR_DISTRO_FAMILY=arch
zpwrPrettyPrintBox "Fetching Dependencies for $ZPWR_DISTRO_NAME with zypper"
addDependenciesArch' \
'ZPWR_DISTRO_FAMILY=suse
zpwrPrettyPrintBox "Fetching Dependencies for $ZPWR_DISTRO_NAME with zypper"
addDependenciesSuse' \
'ZPWR_DISTRO_FAMILY=alpine
zpwrPrettyPrintBox "Fetching Dependencies for $ZPWR_DISTRO_NAME with apk"
addDependenciesAlpine
addDependenciesDebian' \
'zpwrPrettyPrintBox "Your ZPWR_DISTRO_FAMILY $ZPWR_DISTRO_NAME is unsupported!" >&2
exit 1'
showDeps
zpwrPrettyPrintBox "Updating dependency list"
zpwrInstallerRefresh "$ZPWR_DISTRO_FAMILY"
if [[ $skip != true ]]; then
zpwrPrettyPrintBox "Now The Main Course..."
sleep 1
zpwrPrettyPrintBox "Checking for curl before rustup install"
zpwrCommandExists curl || zpwrInstallerUpdate curl "$ZPWR_DISTRO_FAMILY"
cargoinstall
pluginsinstall
# main loop
for prog in "${dependencies_ary[@]}"; do
zpwrPrettyPrintBox "Installing $prog"
zpwrInstallerUpdate "$prog" "$ZPWR_DISTRO_FAMILY"
done
zpwrPrettyPrintBox "Upgrading $ZPWR_DISTRO_FAMILY"
zpwrInstallerUpgrade "$ZPWR_DISTRO_FAMILY"
fi
zpwrPrettyPrintBox "Installing Powerline fonts"
if [[ -d /usr/share/fonts ]] && [[ -d /etc/fonts/conf.d ]]; then
zpwrGoInstallerOutputDir
wget https://github.com/powerline/powerline/raw/develop/font/PowerlineSymbols.otf
wget https://github.com/powerline/powerline/raw/develop/font/10-powerline-symbols.conf
# move font to valid font path
sudo mv PowerlineSymbols.otf /usr/share/fonts/
# Update font cache for the path the font
sudo fc-cache -vf /usr/share/fonts/
# Install the fontconfig file
sudo mv 10-powerline-symbols.conf /etc/fonts/conf.d/
else
zpwrPrettyPrintBox "/usr/share/fonts and /etc/fonts/conf.d must exist for powerline fonts." >&2
fi
fi
#}}}***********************************************************
#{{{ MARK:other unix
#**************************************************************
else
#unix
if [[ "$ZPWR_OS_TYPE" == freebsd ]]; then
ZPWR_DISTRO_FAMILY=freebsd
ZPWR_DISTRO_NAME=FreeBSD
warnOverwrite
warnSudo
if [[ $justConfig != true ]]; then
zpwrPrettyPrintBox "Fetching Dependencies for $ZPWR_DISTRO_NAME with pkg"
addDependenciesFreeBSD
showDeps
zpwrPrettyPrintBox "Updating dependency list"
zpwrInstallerRefresh "$ZPWR_DISTRO_FAMILY"
if [[ $skip != true ]]; then
zpwrPrettyPrintBox "Now The Main Course..."
sleep 1
zpwrPrettyPrintBox "Checking for curl before rustup install"
zpwrCommandExists curl || zpwrInstallerUpdate curl "$ZPWR_DISTRO_FAMILY"
cargoinstall
pluginsinstall
# main loop
for prog in "${dependencies_ary[@]}"; do
zpwrPrettyPrintBox "Installing $prog"
zpwrInstallerUpdate "$prog" "$ZPWR_DISTRO_FAMILY"
done
zpwrPrettyPrintBox "Upgrading $ZPWR_DISTRO_FAMILY"
zpwrInstallerUpgrade "$ZPWR_DISTRO_FAMILY"
fi
zpwrPrettyPrintBox "Installing Powerline fonts"
if [[ -d /usr/share/fonts ]] && [[ -d /etc/fonts/conf.d ]]; then
wget https://github.com/powerline/powerline/raw/develop/font/PowerlineSymbols.otf
wget https://github.com/powerline/powerline/raw/develop/font/10-powerline-symbols.conf
# move font to valid font path
sudo mv PowerlineSymbols.otf /usr/share/fonts/
# Update font cache for the path the font
sudo fc-cache -vf /usr/share/fonts/
# Install the fontconfig file
sudo mv 10-powerline-symbols.conf /etc/fonts/conf.d/
else
zpwrPrettyPrintBox "/usr/share/fonts and /etc/fonts/conf.d must exist for powerline fonts." >&2
fi
fi
else
zpwrPrettyPrintBox "Your OS $ZPWR_OS_TYPE is unsupported!" >&2
exit 1
fi
fi
#}}}***********************************************************
#{{{ MARK:vim
#**************************************************************
zpwrPrettyPrintBox "Common Installer Section"
if [[ $justConfig != true ]]; then
if [[ $INSTALL_VIM_SRC == true ]]; then
#if neovim already installed, vim already points to neovim
zpwrPrettyPrintBox "Vim Version less than 8.0 or without python! Installing Vim from Source."
zpwrGoInstallerDir
zpwrFileMustExist vim_install.sh
source vim_install.sh
fi
zpwrGoInstallerDir
zpwrCommandExists nvim || {
zpwrFileMustExist neovim_install.sh
source neovim_install.sh
}
zpwrGoInstallerDir
zpwrFileMustExist npm_install.sh
source npm_install.sh
zpwrGoInstallerDir
fi
#}}}***********************************************************
#{{{ MARK:Tmux
#**************************************************************
if [[ $justConfig != true ]]; then
ycminstall
zpwrGoInstallerDir
zpwrFileMustExist pip_install.sh
source pip_install.sh
zpwrGoInstallerOutputDir
zpwrPrettyPrintBox "Installing Pipes.sh from source"
git clone https://github.com/pipeseroni/pipes.sh.git
builtin cd pipes.sh && {
sudo make install
}
fi
zpwrOsAllVsFedora \
zpwrNeedSudo=yes \
zpwrNeedSudo=no \
zpwrNeedSudo=no
zpwrPrettyPrintBox "Installing Iftop config..."
ip=$(ifconfig | grep "inet\s" | grep -v 127 | awk '{print $2}' | sed 's@addr:@@')
iface=$(ifconfig | grep -B3 "inet .*$ip" | grep '^[a-zA-Z0-9].*' | awk '{print $1}' | tr -d ":")
if [[ -n "$iface" ]]; then
echo "IPv4: $ip and interface: $iface"
if [[ -f "$HOME/.iftop.conf" ]]; then
if ! grep -E '^interface:\S+' "$HOME/.iftop.conf"; then
echo "no interface in $HOME/.iftop.conf"
echo cp "$HOME/.iftop.conf" "$ZPWR_INSTALLER_OUTPUT"
cp "$HOME/.iftop.conf" "$ZPWR_INSTALLER_OUTPUT"
echo "interface:$iface" >> "$ZPWR_INSTALLER_OUTPUT/.iftop.conf"
echo cp "$ZPWR_INSTALLER_OUTPUT/.iftop.conf" "$HOME"
cp "$ZPWR_INSTALLER_OUTPUT/.iftop.conf" "$HOME"
else
echo "interface in $HOME/.iftop.conf. No mod"
fi
else
echo "no $HOME/.iftop.conf"
echo cp "$ZPWR_INSTALL/.iftop.conf" "$ZPWR_INSTALLER_OUTPUT"
cp "$ZPWR_INSTALL/.iftop.conf" "$ZPWR_INSTALLER_OUTPUT"
echo "interface:$iface" >> "$ZPWR_INSTALLER_OUTPUT/.iftop.conf"
echo cp "$ZPWR_INSTALLER_OUTPUT/.iftop.conf" "$HOME"
cp "$ZPWR_INSTALLER_OUTPUT/.iftop.conf" "$HOME"
fi
else
echo "IPv4 Iface missing: $ip and interface: $iface"
if [[ -f "$HOME/.iftop.conf" ]]; then
if ! grep -E '^interface:\S+' "$HOME/.iftop.conf"; then
echo "no interface in $HOME/.iftop.conf. No mod"
else
echo "interface in $HOME/.iftop.conf. No mod"
fi
else
echo "no $HOME/.iftop.conf"
echo cp "$ZPWR_INSTALL/.iftop.conf" "$ZPWR_INSTALLER_OUTPUT"
cp "$ZPWR_INSTALL/.iftop.conf" "$ZPWR_INSTALLER_OUTPUT"
echo cp "$ZPWR_INSTALLER_OUTPUT/.iftop.conf" "$HOME"
cp "$ZPWR_INSTALLER_OUTPUT/.iftop.conf" "$HOME"
fi
fi
#}}}***********************************************************
#{{{ MARK:Utilities
#**************************************************************
if [[ $justConfig != true ]]; then
zpwrPrettyPrintBox "Installing IFTOP-color by MenkeTechnologies"
zpwrGoInstallerDir
zpwrFileMustExist iftop_install.sh
source iftop_install.sh
if ! zpwrCommandExists grc; then
zpwrGoInstallerOutputDir
zpwrPrettyPrintBox "Installing grc from source to $(pwd)"
git clone https://github.com/garabik/grc.git
if builtin cd grc; then
sudo bash zpwrInstall.sh
else
zpwrFail "could not cd to grc"
fi
fi
if [[ $ZPWR_OS_TYPE == darwin ]]; then
zpwrPrettyPrintBox "Try again for ponysay and lolcat on mac"
zpwrCommandExists ponysay || brew install ponysay
fi
zpwrPrettyPrintBox "Installing grc configuration for colorization...asking for passwd with sudo"
if [[ "$(uname)" == Darwin ]]; then
GRC_DIR=/usr/local/share/grc
else
GRC_DIR=/usr/share/grc
fi
zpwrGoInstallerOutputDir
zpwrPrettyPrintBox "Installing ponysay from source"
git clone https://github.com/erkin/ponysay.git && {
builtin cd ponysay && sudo ./setup.py --freedom=partial install
}
zpwrPrettyPrintBox "Installing Go deps"
zpwrGoInstallerDir
zpwrFileMustExist go_install.sh
source go_install.sh
if ! test -f /usr/local/sbin/iftop;then
zpwrPrettyPrintBox "No iftop so installing"
zpwrInstallerUpdate iftop "$ZPWR_DISTRO_FAMILY"
fi
if [[ $fullInstall == true ]]; then
if [[ "$ZPWR_OS_TYPE" != darwin ]]; then
zpwrPrettyPrintBox "Installing snort"
zpwrInstallerUpdate snort "$ZPWR_DISTRO_FAMILY"
zpwrPrettyPrintBox "Installing logwatch"
zpwrInstallerUpdate logwatch "$ZPWR_DISTRO_FAMILY"
zpwrPrettyPrintBox "Installing postfix"
zpwrInstallerUpdate postfix "$ZPWR_DISTRO_FAMILY"
fi
zpwrPrettyPrintBox "Installing wireshark"
zpwrInstallerUpdate wireshark "$ZPWR_DISTRO_FAMILY"
zpwrPrettyPrintBox "Installing mailutils"
zpwrInstallerUpdate mailutils "$ZPWR_DISTRO_FAMILY"
fi
fi
#}}}***********************************************************
#{{{ MARK:zsh
#**************************************************************
if [[ "$ZPWR_PLUGIN_MANAGER" == zinit ]]; then
zpwrPrettyPrintBox "Installing zinit"
mkdir "$ZPWR_PLUGIN_MANAGER_HOME"
git clone https://github.com/$ZPWR_ZDHARMA/zinit.git "$ZPWR_PLUGIN_MANAGER_HOME/bin"
if [[ ! -d $ZPWR_PLUGIN_MANAGER_HOME/plugins ]]; then
mkdir -pv $ZPWR_PLUGIN_MANAGER_HOME/plugins/
fi
zpwrPrettyPrintBox "Change default shell to zsh"
sudo chsh -s "$(which zsh)"
zpwrPrettyPrintBox "Clone fzf"
git clone https://github.com/MenkeTechnologies/fzf.git "$ZPWR_PLUGIN_MANAGER_HOME/plugins/MenkeTechnologies---fzf"
zpwrPrettyPrintBox "Link fzf"
echo ln -sfn "$ZPWR_PLUGIN_MANAGER_HOME/plugins/MenkeTechnologies---fzf" "$ZPWR_PLUGIN_MANAGER_HOME/plugins/fzf"
ln -sfn "$ZPWR_PLUGIN_MANAGER_HOME/plugins/MenkeTechnologies---fzf" "$ZPWR_PLUGIN_MANAGER_HOME/plugins/fzf"
zpwrPrettyPrintBox "Installing fzf"
"$ZPWR_PLUGIN_MANAGER_HOME/plugins/fzf/install" --bin
elif [[ "$ZPWR_PLUGIN_MANAGER" == oh-my-zsh ]]; then
zpwrPrettyPrintBox "Installing OhMyZsh"
sh -c "$(curl -fsSL https://raw.github.com/ohmyzsh/ohmyzsh/master/tools/zpwrInstall.sh)"
zpwrPrettyPrintBox "Installing fzf"
"$ZPWR_PLUGIN_MANAGER_HOME/custom/plugins/fzf/install" --bin
zpwrPrettyPrintBox "Installing Zsh plugins"
zpwrGoInstallerDir
zpwrFileMustExist zsh_plugins_install.sh
source zsh_plugins_install.sh
zpwrPrettyPrintBox "Installing Powerlevel9k"
git clone https://github.com/MenkeTechnologies/powerlevel9k.git "$ZPWR_PLUGIN_MANAGER_HOME/themes/powerlevel9k"
fi
zpwrPrettyPrintBox "Linking zshrc configuration file to home directory"
zpwrGoInstallerDir
echo ln -sf $ZPWR_INSTALL/.zshrc $HOME/.zshrc
ln -sf $ZPWR_INSTALL/.zshrc $HOME/.zshrc
zpwrPrettyPrintBox "Running Vundle"
#run vundle install for ultisnips, supertab
vim -c PluginInstall -c qall
if [[ $justConfig != true ]]; then
zpwrPrettyPrintBox "Final updating of dependency list"
zpwrInstallerRefresh "$ZPWR_DISTRO_FAMILY"
fi
#}}}***********************************************************
#{{{ MARK:Final sym links
#**************************************************************
zpwrGoInstallerDir
zpwrPrettyPrintBox "Generating $ZPWR_INSTALLER_OUTPUT/zpwr_log.txt with $ESCAPE_REMOVER from $LOGFILE"
"$ESCAPE_REMOVER" "$LOGFILE" > "$ZPWR_INSTALLER_OUTPUT/zpwr_log.txt"
if [[ $justConfig != true ]] && [[ $skip != true ]]; then
zpwrPrettyPrintBox "Waiting for cargo installer to finish"
wait $CARGO_PID
wait $YCM_PID
wait $PLUGIN_PID
fi
if [[ $justConfig != true ]] && [[ $skip != true ]]; then
zpwrPrettyPrintBox "Done!!!!!!"
zpwrPrettyPrintBox "Starting Tmux..."
zpwrPrettyPrintBox "Starting the matrix"
fi
# must have zsh at this point
export SHELL="$(which zsh)"
dir="$(sudo -EH python3 -m pip show powerline-status | \grep --color=always '^Location' | awk '{print $2}')/powerline"
zpwrPrettyPrintBox "linking $dir to ~/.tmux/powerline"
if [[ -z $TMUX_HOME ]]; then
TMUX_HOME="$HOME/.tmux"
fi
if [[ ! -d "$TMUX_HOME" ]]; then
zpwrPrettyPrintBox "$TMUX_HOME does not exist"
else
if [[ ! -d "$dir" ]]; then
zpwrPrettyPrintBox "$dir does not exist"
else
if zpwrNeedSudo "$dir"; then
zpwrPrettyPrintBox "linking $dir to $TMUX_HOME/powerline with sudo"
echo sudo ln -sfn "$dir" "$TMUX_HOME/powerline"
sudo ln -sfn "$dir" "$TMUX_HOME/powerline"
else
zpwrPrettyPrintBox "linking $dir to $TMUX_HOME/powerline"
echo ln -sfn "$dir" "$TMUX_HOME/powerline"
ln -sfn "$dir" "$TMUX_HOME/powerline"
fi
fi
fi
#}}}***********************************************************
#{{{ MARK:start tmux
#**************************************************************
if [[ $justConfig != true ]] && [[ $skip != true ]]; then
if [[ $noTmux != true ]];then
zpwrGoInstallerDir
source "startMux.sh"
fi
fi
#}}}***********************************************************
| true
|
51d51cf70f4f4ed298ebbe2ecbf097b47b262fb3
|
Shell
|
rabahi/epel-scripts
|
/Production/srv-monitoring/centreon.bash
|
UTF-8
| 6,659
| 3.28125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
##################################################
# DEFINES
##################################################
# all version are available here : https://download.centreon.com/
# tab "Centreon Core"
CENTREON_ENGINE_VERSION=1.5.1
CENTREON_BROKER_VERSION=2.11.5
CENTREON_CONNECTOR_VERSION=1.1.2
CENTREON_CLIB_VERSION=1.4.2
# tab "Centreon Web"
CENTREON_WEB_VERSION=2.7.7
# current directory
BASEDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
##################################################
# PREREQUISTES
##################################################
# cmake, gcc
dnf -y install cmake make gcc gcc-c++
# qt
dnf -y install qt qt-devel
# librrd
dnf -y install rrdtool rrdtool-devel
# gnutls
dnf -y install gnutls-devel
# perl
dnf -y install perl-devel perl-ExtUtils-Embed
# mail
dnf -y install mailx
# php
dnf -y install php php-pear php-ldap php-intl
# snmp
dnf -y install net-snmp
echo "create users abd groups"
groupadd centreon
useradd -g centreon centreon
useradd -g centreon centreon-engine
useradd -g centreon centreon-broker
##################################################
# CENTREON BROKER
##################################################
echo "create required folders"
mkdir -p /var/log/centreon-broker
mkdir -p /etc/centreon-broker
chown centreon-broker: /var/log/centreon-broker/ -R
chown centreon-broker: /etc/centreon-broker/ -R
chmod 775 /var/log/centreon-broker/ -R
chmod 775 /etc/centreon-broker/ -R
echo "download and build centreon broker"
cd /tmp
wget https://s3-eu-west-1.amazonaws.com/centreon-download/public/centreon-broker/centreon-broker-$CENTREON_BROKER_VERSION.tar.gz
tar -xvf centreon-broker-$CENTREON_BROKER_VERSION.tar.gz
rm -f centreon-broker-$CENTREON_BROKER_VERSION.tar.gz
cd centreon-broker-$CENTREON_BROKER_VERSION/build
cmake \
-DWITH_STARTUP_DIR=/etc/init.d \
-DWITH_PREFIX_CONF=/etc/centreon-broker \
-DWITH_PREFIX_LIB=/usr/lib64/nagios \
-DWITH_PREFIX_MODULES=/usr/share/centreon/lib/centreon-broker \
.
make
make install
# hack under centos 7 ("make install" does not create "cdb" service)
if [ ! -f /etc/init.d/cdb ]; then
cp $BASEDIR/cdb /etc/init.d
chmod +x /etc/init.d/cdb
fi
##################################################
# CENTREON CONNECTOR
##################################################
echo "download and build centreon connector"
cd /tmp
wget https://s3-eu-west-1.amazonaws.com/centreon-download/public/centreon-connectors/centreon-connector-$CENTREON_CONNECTOR_VERSION.tar.gz
tar -xvf centreon-connector-$CENTREON_CONNECTOR_VERSION.tar.gz
rm -f centreon-connector-$CENTREON_CONNECTOR_VERSION.tar.gz
cd centreon-connector-$CENTREON_CONNECTOR_VERSION/perl/build
cmake \
-DWITH_PREFIX_BINARY=/usr/lib64/centreon-connector \
.
make -j 4
make install
##################################################
# CENTREON CLIB
##################################################
echo "download and build centreon clib"
cd /tmp
wget https://s3-eu-west-1.amazonaws.com/centreon-download/public/centreon-clib/centreon-clib-$CENTREON_CLIB_VERSION.tar.gz
tar -xvf centreon-clib-$CENTREON_CLIB_VERSION.tar.gz
rm -f centreon-clib-$CENTREON_CLIB_VERSION.tar.gz
cd centreon-clib-$CENTREON_CLIB_VERSION/build
cmake .
make
make install
ln -s /usr/local/lib/libcentreon_clib.so /lib64/libcentreon_clib.so
##################################################
# CENTREON ENGINE
##################################################
echo "create required folders"
mkdir -p /var/log/centreon-engine
mkdir -p /etc/centreon-engine
chown centreon-engine: /var/log/centreon-engine/ -R
chown centreon-engine: /etc/centreon-engine/ -R
chmod 775 /var/log/centreon-engine/ -R
chmod 775 /etc/centreon-engine/ -R
echo "download and build centreon engine"
cd /tmp
wget https://s3-eu-west-1.amazonaws.com/centreon-download/public/centreon-engine/centreon-engine-$CENTREON_ENGINE_VERSION.tar.gz
tar -xvf centreon-engine-$CENTREON_ENGINE_VERSION.tar.gz
rm -f centreon-engine-$CENTREON_ENGINE_VERSION.tar.gz
cd centreon-engine-$CENTREON_ENGINE_VERSION/build
cmake \
-DWITH_PREFIX_BIN=/usr/sbin \
-DWITH_RW_DIR=/var/lib64/centreon-engine/rw \
-DWITH_PREFIX_LIB=/usr/lib64/centreon-engine \
.
make
make install
##################################################
# CENTREON WEB
##################################################
cd /tmp
wget https://s3-eu-west-1.amazonaws.com/centreon-download/public/centreon/centreon-web-$CENTREON_WEB_VERSION.tar.gz
tar -xvf centreon-web-$CENTREON_WEB_VERSION.tar.gz
rm -f centreon/centreon-web-$CENTREON_WEB_VERSION.tar.gz
cd centreon-web-$CENTREON_WEB_VERSION
# install centreon web :
dos2unix $BASEDIR/centreon-response.txt
./install.sh -f $BASEDIR/centreon-response.txt
##################################################
# POST INSTALLATION CONFIGURATION
##################################################
echo "configure apache for apache 2.4"
cat > /etc/httpd/conf.d/centreon.conf << "EOF"
Alias /centreon /usr/local/centreon/www/
<Directory "/usr/local/centreon/www">
Options Indexes
AllowOverride AuthConfig Options
Require all granted
</Directory>
EOF
echo "configure default timezone in php.ini"
sed -i "s/^;\(date.timezone =\).*/\1Europe\/Paris/g" /etc/php.ini
echo "enable write to SmartyCache directory"
chown centreon: /usr/local/centreon/GPL_LIB/SmartyCache/ -R
echo "restart httpd"
systemctl restart httpd
echo "add option 'innodb_file_per_table=1' to /etc/my.cnf"
if ! grep -q innodb_file_per_table=1 /etc/my.cnf; then
sed -i 's/\(\[mysqld\]\)/\1\ninnodb_file_per_table=1/' /etc/my.cnf
systemctl restart mariadb.service
fi;
echo "create databases and grant amm privileges to user/password centreon/centreon":
mysql --user=root --password=root -e "CREATE USER 'centreon'@'localhost' IDENTIFIED BY 'centreon';"
mysql --user=root --password=root -e "CREATE DATABASE IF NOT EXISTS centreon;"
mysql --user=root --password=root -e "use centreon; GRANT ALL PRIVILEGES ON centreon.* TO 'centreon'@'localhost' WITH GRANT OPTION;"
mysql --user=root --password=root -e "CREATE DATABASE IF NOT EXISTS centreon_status;"
mysql --user=root --password=root -e "use centreon_status; GRANT ALL PRIVILEGES ON centreon_status.* TO 'centreon'@'localhost' WITH GRANT OPTION;"
## meet you
myip=`hostname -I`
echo "Now meet you here: http://$myip/centreon/"
| true
|
843819d25ab37ddc499631de857a805a03b0d7af
|
Shell
|
sbenthall/cda-workshop
|
/cda.sh
|
UTF-8
| 1,348
| 2.515625
| 3
|
[
"MIT"
] |
permissive
|
# This script assumes the python packages and libraries installed in BCE-0.1
# However, we avoid using the more straightforward approach using the ubuntu
# package for pydot (which avoids the pyparsing issue), this way we document the
# issue for other platforms
mkdir communications-data-analysis
cd communications-data-analysis
wget https://launchpad.net/gephi/0.8/0.8.2beta/+download/gephi-0.8.2-beta.tar.gz
tar -xzf gephi-0.8.2-beta.tar.gz
apt-get install -y graphviz
# For some reason, it's been over a year since the default install of pyparsing
# worked for pydot
pip install -Iv https://pypi.python.org/packages/source/p/pyparsing/pyparsing-1.5.7.tar.gz#md5=9be0fcdcc595199c646ab317c1d9a709
git clone https://github.com/sbenthall/poll.emic.git
cd poll.emic
python setup.py develop
wget https://raw.githubusercontent.com/sbenthall/cda-workshop/master/config.cfg
wget https://raw.githubusercontent.com/sbenthall/cda-workshop/master/mentionball-@DLabAtBerkeley.gexf
wget https://raw.githubusercontent.com/sbenthall/cda-workshop/master/mentionball-@sbenthall~berkeley-data-science-3.gexf
wget https://raw.githubusercontent.com/sbenthall/cda-workshop/master/mentionball-@sbenthall~indian-politics.gexf
cd ..
git clone https://github.com/sbenthall/bigbang.git
cd bigbang
pip install -r requirements.txt
python setup.py develop
cd ..
cd ..
| true
|
c8e472b09931b587731c55283e9326bdf91d7e72
|
Shell
|
NHS-digital-website/build
|
/packer/vagrant/scripts/cleanup.sh
|
UTF-8
| 711
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash -eux
# Delete unneeded files.
rm -f /home/vagrant/*.sh
rm -f /home/vagrant/VBoxGuestAdditions_5.0.32.iso
# Download vagrant ssh key
apt-get install -y curl
mkdir -p /home/vagrant/.ssh
chmod 0700 /home/vagrant/.ssh
curl -so /home/vagrant/.ssh/authorized_keys \
https://raw.githubusercontent.com/mitchellh/vagrant/master/keys/vagrant.pub
chmod 0600 /home/vagrant/.ssh/authorized_keys
chown -R vagrant:vagrant /home/vagrant/.ssh
# Apt cleanup.
apt-get clean
apt autoremove
apt update
# Zero out the rest of the free space using dd, then delete the written file.
dd if=/dev/zero of=/EMPTY bs=1M
rm -f /EMPTY
# Add `sync` so Packer doesn't quit too early, before the large file is deleted.
sync
| true
|
b3eab82606def92c471dba796fc7b4b242de0419
|
Shell
|
abhishekbalam/bin-server
|
/gpush
|
UTF-8
| 532
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
if [ "$#" -eq "0" ]
then
echo -e "\e[38;5;222m---------------------------------------------";
echo -e "Commit Message:\"$(date)\"" ;
echo -e "---------------------------------------------\e[0m";
git add .;
git commit -m "$(date)";
git push -u origin master;
else
echo -e "\e[38;5;222m---------------------------------------------";
echo -e "Commit Message: \"$1\"";
echo -e "---------------------------------------------\e[0m";
git add .;
git commit -m "$1";
git push -u origin master;
fi
| true
|
75dd543979de452ee97dfa9c4bca10a125c5e8cf
|
Shell
|
hk6an6/jobladder
|
/IDE_setup_scripts/03_fix_heroku_aws_keyx.sh
|
UTF-8
| 944
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
#takes two positional arguments: <exit status> and a <tag>. Aborts process if $? isn't zero
#<exit status>: stands for the last process execution status. Zero means everything worked. This can be queried by using $?
#<tag>: a tag to identify the process. Just use any text that suits you
abortOnFailure(){
if [ $1 -eq 0 ]; then
echo "$2: completed successfully";
else
echo "$2: failed";
exit $1
fi
}
#make this function available to any subsequent scripts
export -f abortOnFailure
echo "if you need to enable remote user environment compilation, then use:";
echo "'sh 03_fix_heroku_aws_keyx.sh user-env-compile'";
heroku config:add AWS_ACCESS_KEY_ID=AKIAJ7JRXK43OVKXUM7A
heroku config:add AWS_SECRET_ACCESS_KEY=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
heroku config:add AWS_STORAGE_BUCKET_NAME=pacific_energy
if [ "$1" == 'user-env-compile' ]; then
echo "enabling user environment compilation";
heroku labs:enable user-env-compile -a secret-casstle-3861
fi
| true
|
50da872659dd3c9104cab07f0a3751c7cee0100f
|
Shell
|
123FLO321/mongo-swift-driver
|
/.evergreen/run-atlas-tests.sh
|
UTF-8
| 903
| 3.046875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
set -o errexit # Exit the script with error if any of the commands fail
# variables
PROJECT_DIRECTORY=${PROJECT_DIRECTORY:-$PWD}
SWIFT_VERSION=${SWIFT_VERSION:-4.2}
INSTALL_DIR="${PROJECT_DIRECTORY}/opt"
OS=$(uname -s | tr '[:upper:]' '[:lower:]')
EXTRA_FLAGS="-Xlinker -rpath -Xlinker ${INSTALL_DIR}/lib"
# enable swiftenv
export SWIFTENV_ROOT="${INSTALL_DIR}/swiftenv"
export PATH="${SWIFTENV_ROOT}/bin:$PATH"
eval "$(swiftenv init -)"
export PKG_CONFIG_PATH="${INSTALL_DIR}/lib/pkgconfig"
# override where we look for libmongoc
export LD_LIBRARY_PATH="${INSTALL_DIR}/lib"
export DYLD_LIBRARY_PATH="${INSTALL_DIR}/lib"
export DEVELOPER_DIR=/Applications/Xcode10.1.app
swiftenv local $SWIFT_VERSION
# run the tests
ATLAS_REPL="$ATLAS_REPL" ATLAS_SHRD="$ATLAS_SHRD" ATLAS_FREE="$ATLAS_FREE" ATLAS_TLS11="$ATLAS_TLS11" ATLAS_TLS12="$ATLAS_TLS12" swift run AtlasConnectivity $EXTRA_FLAGS
| true
|
ad0d3758ef1fb4f167db21a85ac4cf2d4c578182
|
Shell
|
jack1869/linux-auto-script
|
/autocp/remote_1.sh
|
UTF-8
| 633
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
#./main.sh a.conf
base=/root/autocp
curhost=$base/conf/host.conf
confpath=$base/conf
cat $curhost | while read line
do
hostip=`echo $line|awk '{print $1}'`
password=`echo $line|awk '{print $2}'`
copyconf=`echo $line|awk '{print $3}'`
copyconff=${confpath}/$copyconf
cat $copyconff | while read line
do
srcpath=`echo $line|awk '{print $1}'`
backpath=`echo $line|awk '{print $2}'`
destpath=`echo $line|awk '{print $3}'`
srcfile=${srcpath##*/}
echo $backpath/$srcfile $destpath >> $base/rfile/${copyconf%%.*}_r.conf
#./remotescp $srcpath $hostip $password $backpath
done
done
echo 'scp remote success!'
| true
|
f2ca26de22b5e72ea2dd4ff28130fef05d2c186c
|
Shell
|
aprato/dotfiles
|
/osx/set-defaults.sh
|
UTF-8
| 4,429
| 2.5625
| 3
|
[] |
no_license
|
# Sets reasonable OS X defaults.
#
# Or, in other words, how I like it
#
# The original idea (and a couple settings) were grabbed from:
# https://github.com/mathiasbynens/dotfiles/blob/master/.osx
# This script was lifted from:
# https://github.com/holman/dotfiles/blob/master/osx/set-defaults.sh
#
# Run ./set-defaults.sh and you'll be good to go.
# no ipad not charging notifications
defaults write com.apple.usbd NoiPadNotifications -bool true
# Use AirDrop over every interface. srsly this should be a default.
defaults write com.apple.NetworkBrowser BrowseAllInterfaces 1
# Always show scrollbars
# defaults write NSGlobalDomain AppleShowScrollBars -string "Always"
# Expand save panel by default
defaults write NSGlobalDomain NSNavPanelExpandedStateForSaveMode -bool true
# Expand print panel by default
defaults write NSGlobalDomain PMPrintingExpandedStateForPrint -bool true
# Save to disk (not to iCloud) by default
# defaults write NSGlobalDomain NSDocumentSaveNewDocumentsToCloud -bool false
# Trackpad: enable tap to click for this user and for the login screen
# defaults write com.apple.driver.AppleBluetoothMultitouch.trackpad Clicking -bool true
# defaults -currentHost write NSGlobalDomain com.apple.mouse.tapBehavior -int 1
# defaults write NSGlobalDomain com.apple.mouse.tapBehavior -int 1
# Always open everything in Finder's list view. This is important.
defaults write com.apple.Finder FXPreferredViewStyle Nlsv
# Show the ~/Library folder.
chflags nohidden ~/Library
# Show icons for hard drives, servers, and removable media on the desktop
# defaults write com.apple.finder ShowExternalHardDrivesOnDesktop -bool true
# defaults write com.apple.finder ShowHardDrivesOnDesktop -bool true
# defaults write com.apple.finder ShowMountedServersOnDesktop -bool true
# defaults write com.apple.finder ShowRemovableMediaOnDesktop -bool true
# Finder: show hidden files by default
# defaults write com.apple.finder AppleShowAllFiles -bool true
# Finder: show all filename extensions
defaults write NSGlobalDomain AppleShowAllExtensions -bool true
# Finder: show status bar
defaults write com.apple.finder ShowStatusBar -bool true
# Finder: allow text selection in Quick Look
defaults write com.apple.finder QLEnableTextSelection -bool true
# Display full POSIX path as Finder window title
# defaults write com.apple.finder _FXShowPosixPathInTitle -bool true
# When performing a search, search the current folder by default
# defaults write com.apple.finder FXDefaultSearchScope -string "SCcf"
# Disable the warning when changing a file extension
# defaults write com.apple.finder FXEnableExtensionChangeWarning -bool false
# Avoid creating .DS_Store files on network volumes
defaults write com.apple.desktopservices DSDontWriteNetworkStores -bool true
# Don’t show Dashboard as a Space
# defaults write com.apple.dock dashboard-in-overlay -bool true
# Hot corners
# Top left screen corner → Mission Control
defaults write com.apple.dock wvous-tl-corner -int 2
defaults write com.apple.dock wvous-tl-modifier -int 0
# Top right screen corner → Application Windows
defaults write com.apple.dock wvous-tr-corner -int 3
defaults write com.apple.dock wvous-tr-modifier -int 0
# Bottom left screen corner → Start screen saver
defaults write com.apple.dock wvous-bl-corner -int 5
defaults write com.apple.dock wvous-bl-modifier -int 0
# Bottom left screen corner → Desktop
defaults write com.apple.dock wvous-br-corner -int 4
defaults write com.apple.dock wvous-br-modifier -int 0
###############################################################################
# TextEdit, and Disk Utility #
###############################################################################
# Use plain text mode for new TextEdit documents
defaults write com.apple.TextEdit RichText -int 0
# Open and save files as UTF-8 in TextEdit
# defaults write com.apple.TextEdit PlainTextEncoding -int 4
# defaults write com.apple.TextEdit PlainTextEncodingForWrite -int 4
# Enable the debug menu in Disk Utility
# defaults write com.apple.DiskUtility DUDebugMenuEnabled -bool true
# defaults write com.apple.DiskUtility advanced-image-options -bool true
########################
# Xcode
#######################
defaults write com.apple.dt.Xcode ShowBuildOperationDuration YES
echo "Done. Note that some of these changes require a logout/restart to take effect."
| true
|
d4e66efb8d6a1175817262bad68553b3e7c151c0
|
Shell
|
lib-crash/lib-teeworlds
|
/bin/tw_split_logs
|
UTF-8
| 3,576
| 4.34375
| 4
|
[] |
no_license
|
#!/bin/bash
# tw_split_logs - by ChillerDragon
# splits teeworlds logfiles into smaller chunks
# and adds proper timestamps to all of the files
MAX_LINES=100000
LOGFILE=invalid.txt
mkdir -p /tmp/tw_split_"$USER" || exit 1
function bold() {
tput bold
echo "$*"
tput sgr0
}
function show_help() {
echo "usage: $(basename "$0") [OPTIONS]"
echo "options:"
echo " filename file to split"
echo " <MAX_LINES> split chunk size"
echo " --auto split all big files in current dir"
echo "examples:"
bold " $(basename "$0") 2020-05-21_21-18-00.log"
echo " split logfile into $MAX_LINES lines chunks"
echo ""
bold " $(basename "$0") foo.log 20"
echo " split logfile into 20 lines chunks"
echo ""
bold " $(basename "$0") --auto"
echo " split all logfiles in current dir that are too big"
}
if [ "$1" == "--help" ] || [ "$1" == "-h" ] || [ "$#" == "0" ]
then
show_help
exit 0
fi
IS_AUTO=0
for arg in "$@"
do
# OPTIONS
if [ "${arg:0:2}" == "--" ]
then
if [ "$arg" == "--auto" ]
then
echo "[!] WARNING DO NOT USE --auto WHEN THE SERVER IS RUNNING"
echo "[!] IT COULD FORMAT A LOGFILE THAT IS CURRENTLY USED FOR WRITING"
echo "do you really want to continue? [y/N]"
read -r -n 1 yn
echo ""
if [[ ! "$yn" =~ [yY] ]]
then
exit
fi
IS_AUTO=1
else
echo "unkown option '$arg' try --help"
exit 1
fi
fi
# FILE OR LINENUM
if [[ "$arg" =~ ^[0-9]*$ ]]
then
MAX_LINES="$arg"
else
LOGFILE="$arg"
fi
done
function split_log() {
# usage: split_log <filename>
local ts_and_ext
local backup
local basename
local lines
local file_ext
local file_ts
local logfile
logfile="$1"
if [[ "$logfile" =~ / ]]
then
echo "Error: logfile '$logfile' can not contain slashes"
exit 1
fi
if [ ! -f "$logfile" ]
then
echo "Error: logfile not found '$logfile'"
exit 1
fi
ts_and_ext="$(
echo "$logfile" | \
grep -oE '[0-9]{4}-[0-9]{2}-[0-9]{2}_[0-9]{2}-[0-9]{2}-[0-9]{2}(.txt|.log)+$'
)"
if [ "$ts_and_ext" == "" ]
then
echo "Error: could not parse timestamp of file '$logfile'"
exit 1
fi
file_ext="${ts_and_ext##*.}"
file_ts="${ts_and_ext%%.*}"
if [ ! -f "$logfile" ]
then
echo "invalid logfile $logfile"
exit 1
fi
lines="$(wc -l "$logfile" | cut -d' ' -f1)"
if [ "$lines" -lt "$((MAX_LINES * 2))" ]
then
echo "skipping file because it has less than $((MAX_LINES * 2)) lines:"
wc -l "$logfile"
return
fi
mkdir -p "$file_ts" || exit 1
mv "$logfile" "$file_ts" || exit 1
cd "$file_ts" || exit 1
backup="/tmp/tw_split_$USER/$logfile.bak"
cp "$logfile" "$backup"
basename="${logfile%$ts_and_ext*}"
split -d -l "$MAX_LINES" "$logfile"
rm "$logfile"
for log in ./x*
do
echo "logfile: $log"
first_line="$(head -n1 "$log")"
if [[ "$first_line" =~ ^\[(.*)\]\[ ]]
then
ts_raw="${BASH_REMATCH[1]}"
ts="$(echo "$ts_raw" | sed 's/:/-/g' | sed 's/ /_/g')"
mv "$log" "${basename}${ts}.${file_ext}"
else
echo "Could not parse teeworlds time stamp"
exit 1
fi
done
echo "finished!"
echo "replaced file '$logfile'"
echo "with the folder '$file_ts'"
echo "original file was moved to '$backup'"
}
if [ "$IS_AUTO" == "1" ]
then
for f in ./*.log ./*.txt
do
[[ -e "$f" ]] || { echo "no logfiles found."; exit 1; }
split_log "$f"
done
else
split_log "$LOGFILE"
fi
| true
|
059c61ab92f4fd849fc545f281d8dba24be7d5ce
|
Shell
|
ttarczynski/kubernetes-101-workshop
|
/manifests/05_Pods/05-05_resource_management.sh
|
UTF-8
| 1,537
| 2.953125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -x
# what's changed in the kuard pod manifest:
# kuard: 0.5 CPU / 128 Mi Mem
diff 05-04_kuard-pod-health.yaml 05-05_kuard-pod-reqreq.yaml
read -p "Continue?"
# apply the pod manifest
kubectl apply -f 05-05_kuard-pod-reqreq.yaml
read -p "Continue?"
# observe effects of resources request
kubectl describe pod kuard
read -p "Continue?"
KUARD_NODE=$(kubectl get pod kuard -o jsonpath --template={.spec.nodeName})
kubectl describe node $KUARD_NODE
read -p "Continue?"
# schedule more pods
# kuard-a: 0.5 CPU / 128 Mi Mem
diff 05-05_kuard-pod-reqreq.yaml 05-05_kuard-pod-reqreq_a.yaml
read -p "Continue?"
kubectl apply -f 05-05_kuard-pod-reqreq_a.yaml
read -p "Continue?"
KUARD_A_NODE=$(kubectl get pod kuard-a -o jsonpath --template={.spec.nodeName})
kubectl describe node $KUARD_A_NODE
read -p "Continue?"
# kuard-b: 0.6 CPU / 128 Mi Mem
diff 05-05_kuard-pod-reqreq.yaml 05-05_kuard-pod-reqreq_b.yaml
read -p "Continue?"
kubectl apply -f 05-05_kuard-pod-reqreq_b.yaml
read -p "Continue?"
kubectl get pod -o wide
read -p "Continue?"
kubectl describe pod kuard-b
read -p "Continue?"
# kuard-c: 0.3 CPU / 128 Mi Mem
diff 05-05_kuard-pod-reqreq.yaml 05-05_kuard-pod-reqreq_c.yaml
read -p "Continue?"
kubectl apply -f 05-05_kuard-pod-reqreq_c.yaml
read -p "Continue?"
kubectl get pod -o wide
read -p "Continue?"
KUARD_C_NODE=$(kubectl get pod kuard-c -o jsonpath --template={.spec.nodeName})
kubectl describe node $KUARD_C_NODE
read -p "Continue?"
# delete all kuard pods
kubectl delete pod --all
read -p "Continue?"
| true
|
ed07ccbc05f60ff6c931895812fadd607e1563d4
|
Shell
|
iCodeIN/kernel-ml
|
/ml-models-analyses/readahead-mixed-workload/spawn-workloads.sh
|
UTF-8
| 1,723
| 3.0625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/local/bin/bash
#
# Copyright (c) 2019-2021 Ibrahim Umit Akgun
# Copyright (c) 2021-2021 Andrew Burford
# Copyright (c) 2021-2021 Mike McNeill
# Copyright (c) 2021-2021 Michael Arkhangelskiy
# Copyright (c) 2020-2021 Aadil Shaikh
# Copyright (c) 2020-2021 Lukas Velikov
# Copyright (c) 2019-2021 Erez Zadok
# Copyright (c) 2019-2021 Stony Brook University
# Copyright (c) 2019-2021 The Research Foundation of SUNY
#
# You can redistribute it and/or modify it under the terms of the Apache License,
# Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0).
#
declare -A reads
declare -A times
for rand in readrandom readrandomwriterandom mixgraph updaterandom
do
reads[$rand]=200000
times[$rand]=15
done
reads[readwhilewriting]=100000
times[readwhilewriting]=5
reads[readreverse]=20000000
times[readreverse]=25
reads[readseq]=20000000
times[readseq]=50
min() {
printf "%s\n" "$@" | sort -g | head -n1
}
if $KML; then
echo $1 $2 >> /var/log/kern.log
fi
iters=$(min ${times[$1]} ${times[$2]})
for i in $(seq 1 $iters); do
sync && echo 3 > /proc/sys/vm/drop_caches
umount $mount_point
mount /dev/$device $mount_point
if $KML; then
insmod /home/kml/build/kml.ko
insmod /home/kml/kernel-interfaces/readahead/readahead.ko
fi
echo $1 $i >> /tmp/detail.txt
echo $2 $i >> /tmp/detail2.txt
./run-bench.sh $1 ${reads[$1]} $ROCKSDB $mount_point/rocksdb_bench &
./run-bench.sh $2 ${reads[$2]} $ROCKSDB2 $mount_point/rocksdb_bench2 &
# not sure why (or if?) two waits are necessary
wait
wait
if $KML; then
rmmod readahead kml
fi
# TODO don't need to clear both databases
if [[ $1 == "updaterandom" ]] || [[ $2 == "updaterandom" ]]; then
./clear-dataset.sh $2
fi
done
| true
|
b0a4d3f3c3ac56ceece28922acabe89a35c21ff7
|
Shell
|
chilakagit/chilakasible
|
/nfstest/shellclasses/section1/commandscript.sh
|
UTF-8
| 539
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash -x
#This script explain about the dedug mode which is (-x in #!/bin/bash) if i remove -x then it will give normal output
#This script also explains about the commands in echo command,whatever we put in $(cat /etc/profile) it will execute and same like $(utime) $(ls -l)
#$(ls -ltr) $(date) and in the same way like `date` `uptime` `ls -l` `cat /etc/hosts`
#echo "Today is date $(date)"
echo "Today is date `date`"
#echo "system is running from $(uptime)"
echo "system is up from `uptime`"
echo "output of $(cat /etc/profile)"
| true
|
dc5bdb2cb41ce9875789b556ee82fe018f4c6dd3
|
Shell
|
isoundy000/ipa_re_sign
|
/shell/Resign_en.sh
|
UTF-8
| 4,179
| 3.25
| 3
|
[
"Apache-2.0"
] |
permissive
|
#! /bin/bash
# Program :
# This program re-sign an ipa with 'Apple Enterprise Certificate'
# History :
# 2019/07/05 GuoFeng First release
## TODO : check command result
echo "Start Signing"
# absolute path :
work_path=$(dirname $0)
cd ${work_path}
# temp path
rm -rf tmp target
mkdir tmp target
CURRENT_PATH=$(pwd)
TMP_PATH="$CURRENT_PATH/tmp"
# IPA_PATH="$CURRENT_PATH/ipa"
SIGNINFO_PATH="$CURRENT_PATH/signInfo"
TARGET_PATH="$CURRENT_PATH/target"
## get codesigning certificate install in Mac
CERTIFICATE=$(security find-identity -v -p codesigning)
echo "$CERTIFICATE"
echo $'************************************* \nPlease select one certificate you would like to use (Only select content within double quotes)'
read CERT
/usr/libexec/PlistBuddy -c "Set :DistributionCertificateName $CERT" "$SIGNINFO_PATH/signInfo.plist"
# choose ipa file
echo $'\n************************************* \nPlease drag in the ipa file'
read ORGIPA
/usr/libexec/PlistBuddy -c "Set :IpaPath $ORGIPA" "$SIGNINFO_PATH/signInfo.plist"
# choose mobileprovision file
echo $'\n*************************************\nPlease drag in the mobileprovision file'
read MobileProvisionPath
/usr/libexec/PlistBuddy -c "Set :MobileProvisionPath $MobileProvisionPath" "$SIGNINFO_PATH/signInfo.plist"
# input BundleID
echo $'\n*************************************\nPlease Input the BundleID'
read BundleID
/usr/libexec/PlistBuddy -c "Set :BundleID $BundleID" "$SIGNINFO_PATH/signInfo.plist"
APPBundleID=$(/usr/libexec/PlistBuddy -c 'Print :'BundleID'' $SIGNINFO_PATH/signInfo.plist)
APPCertificate=$(/usr/libexec/PlistBuddy -c 'Print :'DistributionCertificateName'' $SIGNINFO_PATH/signInfo.plist)
APPMobileProvision=$(/usr/libexec/PlistBuddy -c 'Print :'MobileProvisionPath'' $SIGNINFO_PATH/signInfo.plist)
IPA_PATH=$(/usr/libexec/PlistBuddy -c 'Print :'IpaPath'' $SIGNINFO_PATH/signInfo.plist)
echo -e "*************************************\n Ready ! \n BundleID : $APPBundleID \n Distribution certificate name : $APPCertificate \n mobileprovision file : $APPMobileProvision \n*************************************"
echo -e "Please wait for a litte while \nSigning ... "
# create Entitlements.plist file
# echo "start creating entitlements ..............."
$(/usr/libexec/PlistBuddy -x -c "print :Entitlements " /dev/stdin <<< $(security cms -D -i $APPMobileProvision) > $TMP_PATH/entitlements.plist)
# echo "entitlements created ..............."
# echo "unzip $IPA_PATH/*.ipa ............"
unzip -oqq "$IPA_PATH" -d "$TMP_PATH"
# echo "replace BundleID ..............."
PAYLOAD_PATH=$TMP_PATH/Payload
# TODO : use appropriate command to find the .app file
# find $PAYLOAD_PATH -name "*.app"
APP_NAME=$(ls $PAYLOAD_PATH)
# echo "app name : $APP_NAME"
chmod 666 $PAYLOAD_PATH/$APP_NAME/Info.plist
/usr/libexec/PlistBuddy -c "Set :CFBundleIdentifier ${APPBundleID}" "$PAYLOAD_PATH/$APP_NAME/Info.plist"
# # replace mobileprovision
# echo "replace mobileprovision file ..............."
cp $APPMobileProvision $TMP_PATH/Payload/$APP_NAME/embedded.mobileprovision
# Set execution permissions --- maybe unnecessary
# UNIXFILE="$APP_NAME"
# # echo $UNIXFILE
# x=${UNIXFILE%*.app}
# # echo "x : $x"
# get value for key 'CFBundleExecutable' from Info.plist
EXECFILE=$(/usr/libexec/PlistBuddy -c 'Print :CFBundleExecutable' $PAYLOAD_PATH/$APP_NAME/Info.plist)
# echo "executable file : $EXECFILE"
# TODO -- checkout if necessary
# chmod +x $TMP_PATH/Payload/$APP_NAME/$EXECFILE
# re_sign Frameworks
# echo "sign framework"
APP_PATH=$TMP_PATH/Payload/$APP_NAME
FRAMEWORKS_PATH=$APP_PATH/Frameworks
for framework in "$FRAMEWORKS_PATH/"*
do
# echo "FRAMEWORK : $framework"
codesign -fs "$APPCertificate" $framework
# TODO : check sign result
sleep 0.1 # Just sleep for a little while
done
# #codesign
codesign -fs "$APPCertificate" --no-strict --entitlements=$TMP_PATH/entitlements.plist $TMP_PATH/Payload/$APP_NAME
# zip
cd $TMP_PATH/
zip -ryq ../target/Resigned.ipa Payload/
echo -e "*************************************\n !!! APP signed !!! \n ipa path: $TARGET_PATH/Resigned.ipa\n*************************************"
# remove tmp
rm -rf $TMP_PATH
| true
|
69493ead3e85dbfd4a77a4571feeb569914839f8
|
Shell
|
DarcyChang/MyProjects
|
/Gemtek/diagmon/diagmon/src/modules/manufacture/mf-builder.sh
|
UTF-8
| 609
| 3.125
| 3
|
[] |
no_license
|
#! /bin/sh
if [ $# != 3 ]; then
echo "Usage mf-builder.sh add [feature name] [backdoor string]"
elif [ -n $2 ]; then
mkdir feature/$2;
cp Sample/Sample.c feature/$2/$2.c;
cp Sample/Sample.h feature/$2/$2.h;
cp Sample/rule.mk feature/$2/rule.mk;
sed -i s/Sample/$2/g feature/$2/$2.c;
sed -i s/Sample/$2/g feature/$2/$2.h;
sed -i s/Sample/$2/g feature/$2/rule.mk;
if [ -n $3 ]; then
sed -i s/String/$3/g feature/$2/$2.c;
echo "Create the feature test!!!"
echo "please add \"{\"$3_Req\", $2},\" in include/handlers.h"
echo "please add \"-include feature/$2/rule.mk\" in feature/rule.mk"
fi
fi
| true
|
f619ec90754571a0389cb1731a81fa1da35edba4
|
Shell
|
akhepcat/Miscellaneous
|
/ipt_or_nft.sh
|
UTF-8
| 891
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
if [ "$USER" != "root" ]
then
echo "Must be run as root"
exit 1
fi
if [ -n "$(which nft)" ]
then
NFTL=$(nft -s -nnn list ruleset | wc -l)
fi
if [ -n "$(which iptables-save-legacy)" ]
then
IPTLL=$(iptables-legacy-save | grep -vE '^[:#*]|COMMIT' | wc -l )
fi
if [ -n "$(which iptables-save)" ]
then
IPTL=$(iptables-save | grep -vE '^[:#*]|COMMIT' | wc -l )
fi
# echo "NFT lines: ${NFTL:-0}"
# echo "IPT lines: ${IPTL:-0}"
# echo "IPT(legacy) lines: ${IPTLL:-0}"
if [ ${NFTL:-0} -gt ${IPTL:-0} -a ${NFTL:-0} -gt ${IPTLL:-0} ]
then
echo "NFtables is running"
elif [ ${IPTL:-0} -gt ${NFTL:-0} -a ${IPTL:-0} -gt ${IPTLL:-0} ]
then
echo "IPTables is running"
elif [ ${IPTLL:-0} -gt ${IPTL:-0} -a ${IPTLL:-0} -gt ${NFTL:-0} ]
then
echo "IPtables-Legacy is running"
else
echo "none, or can't figure out what's running"
fi
| true
|
0d63bfb639cfaa7154d9131920e7488c17541c72
|
Shell
|
tharangni/LSHTC-2019
|
/starspace/with_hierarchy.sh
|
UTF-8
| 2,237
| 2.71875
| 3
|
[] |
no_license
|
MODELNAME=oms
MODELDIR=/home/harshasivajit/sspace/model/${MODELNAME}
DATADIR=/home/harshasivajit/sspace/data/${MODELNAME}
PREDDIR=/home/harshasivajit/sspace/pred/${MODELNAME}
hneg=5
neg=40
echo "making directories"
mkdir -p "${MODELDIR}"
mkdir -p "${DATADIR}"
mkdir -p "${PREDDIR}"
echo "Compiling StarSpace"
make
echo "Start to train on ${MODELNAME} data's hierarchy:"
for dim in 300
do
echo "hierarchy hneg ${hneg}"
/home/harshasivajit/StarSpace/starspace train \
-trainFile "${DATADIR}/oms_2nd_dag2tree_fasttext_reverse".txt \
-initModel "${MODELDIR}/${MODELNAME}-init-d${dim}".tsv \
-model "${MODELDIR}/${MODELNAME}-d${dim}-h-only-neg-dag-${hneg}" \
-adagrad true \
-ngrams 1 \
-lr 0.05 \
-epoch 20 \
-thread 50 \
-dim ${dim} \
-margin 0.05 \
-batchSize 5 \
-negSearchLimit ${hneg} \
-maxNegSamples 10 \
-trainMode 4 \
-label "__label__" \
-similarity "cosine" \
-verbose true
echo "Start to train on ${MODELNAME} data's documents with trained model on hierarchy:"
echo "classification neg ${neg}"
/home/harshasivajit/StarSpace/starspace train \
-trainFile "${DATADIR}/${MODELNAME}"-train.txt \
-initModel "${MODELDIR}/${MODELNAME}-d${dim}-h-only-neg-${hneg}".tsv \
-model "${MODELDIR}/${MODELNAME}-d${dim}-neg-${neg}-h-${hneg}" \
-adagrad true \
-ngrams 1 \
-lr 0.1 \
-epoch 10 \
-thread 50 \
-dim ${dim} \
-batchSize 25 \
-negSearchLimit ${neg} \
-trainMode 0 \
-label "__label__" \
-similarity "cosine" \
-verbose true \
-validationFile "${DATADIR}/${MODELNAME}-valid.txt" \
-validationPatience 10 \
echo "Start to evaluate trained model with clf neg ${neg} and h-neg ${hneg}:"
/home/harshasivajit/StarSpace/starspace test \
-model "${MODELDIR}/${MODELNAME}-d${dim}-neg-${neg}-h-${hneg}" \
-testFile "${DATADIR}/${MODELNAME}"-test.txt \
-ngrams 1 \
-dim ${dim} \
-label "__label__" \
-thread 50 \
-batchSize 10 \
-similarity "cosine" \
-trainMode 0 \
-verbose true \
-adagrad true \
-negSearchLimit ${neg} \
-predictionFile "${PREDDIR}/${MODELNAME}-d${dim}-neg-${neg}-h-${hneg}"-pred.txt \
-K 5
done
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.