blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
21b4a931a2542c6f4ab647809627c559dd0ec63f
|
Shell
|
mustafaiman/epfl-dbs-assignment3
|
/test_submission.sh
|
UTF-8
| 222
| 2.8125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
GASPAR="iman"
SUBMISSION_DIR="submission/$GASPAR/exercise3"
javac $SUBMISSION_DIR/task1/*.java
if [ $? -ne 0 ]
then
exit 1
fi
javac $SUBMISSION_DIR/task2/*.java
if [ $? -ne 0 ]
then
exit 1
fi
| true
|
1ce90ec838900ddc4937a5700fdb501d59f4a4f8
|
Shell
|
ineat/ocelot
|
/docker/entrypoint.sh
|
UTF-8
| 289
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/sh
if [ "$CONFIG_HTTP_URL" == "" ]
then
echo "CONFIG_HTTP_URL is empty"
else
dockerize -wait "$CONFIG_HTTP_URL"
sleep 60
java -javaagent:"inspectit-ocelot-agent-0.4.jar" -Dinspectit.config.http.attributes.service=$1 -Dinspectit.config.http.url="$CONFIG_HTTP_URL" -jar $2
fi
| true
|
9c041e92e370e746975099e1da9d5d4e0877d278
|
Shell
|
nictrix/perfect-mon
|
/perfect-mon.sh
|
UTF-8
| 1,350
| 3.625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# https://goo.gl/mdc2k7
function cpu_percent_used {
printf %.0f $(echo "100 - $(mpstat | grep -Po 'all.* \K[^ ]+$')" | bc)
}
# https://goo.gl/NMLeIb
function mem_percent_used {
printf %.0f $(free | grep Mem | awk '{print $3/$2 * 100}')
}
# https://goo.gl/iihlP4
function disk_percent_used {
df -k --output=pcent /dev/sda1 | tail -n 1 | tr -d ' %'
}
# https://goo.gl/aFNhNd
function top_cpu_process {
ps -eo pcpu,pid,ppid,user,args | sort -bnr | head -1
}
function top_cpu_process {
ps -eo pmem,pid,ppid,user,args | sort -bnr | head -1
}
echo "Checking CPU, MEM, Disk..."
echo
echo "CPU Used: $(cpu_percent_used)%"
echo "MEM Used: $(mem_percent_used)%"
echo "DISK Used: $(disk_percent_used)%"
while (( $(cpu_percent_used) >= 90 )); do
echo
echo "CPU above 90%, killing off process!"
echo $(top_cpu_process)
kill -9 $(top_cpu_process | awk '{print $2}')
kill -9 $(top_cpu_process | awk '{print $3}')
sleep 2
done
while (( mem_percent_used >= 10 )); do
echo
echo "MEM above 90%, killing off process!"
echo $(top_mem_process)
kill -9 $(top_mem_process | awk '{print $2}')
kill -9 $(top_mem_process | awk '{print $3}')
sleep 2
done
while (( disk_percent_used >= 90 )); do
echo
echo "DISK above 90%, powering off server!"
poweroff
done
echo
echo "Check Complete, Happy April Fools Day"
| true
|
9acc2f126010284ff9d36fc7dcfad96e642e0eb9
|
Shell
|
tyz1030/SLAMBotCtr
|
/touch.sh
|
UTF-8
| 149
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
# echo "This is a shell script"
find . -exec touch {} \;
echo "I am touching all files"
# SOMEVAR='text stuff'
# echo "$SOMEVAR"
| true
|
710eb134f4f2d0ca71102bdb93130e78272fefd1
|
Shell
|
a-h/vagrant-playground
|
/mysql.sh
|
UTF-8
| 637
| 2.921875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Create repo file.
echo "[mariadb]" > /etc/yum.repos.d/MariaDB.repo
echo "name = MariaDB" >> /etc/yum.repos.d/MariaDB.repo
echo "baseurl = http://yum.mariadb.org/10.0/centos6-amd64" >> /etc/yum.repos.d/MariaDB.repo
echo "gpgkey=https://yum.mariadb.org/RPM-GPG-KEY-MariaDB" >> /etc/yum.repos.d/MariaDB.repo
echo "gpgcheck=1" >> /etc/yum.repos.d/MariaDB.repo
# Now install maridb.
yum install -y MariaDB-server MariaDB-client
# Map the local ./db folder to the local machine.
sudo ln -fs /vagrant/db /srv/db
#TODO: mount existing databases, or create from script.
# Start the server.
sudo /etc/init.d/mysql start
| true
|
a850af0241c804486df924f3044a1ec3831f53b7
|
Shell
|
JeffersonLab/QwAnalysis
|
/Extensions/Regression/QwBeamModulation/scripts/bash/bmod_jsub.bash
|
UTF-8
| 2,342
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash -u
#
# User should set final copy directory to personal directory
#
umask 002
USER=`whoami`
if [ -n "$2" ]; then
export QWANALYSIS=${2}
else
export QWANALYSIS=/u/home/jhoskins/QwAnalysis/
fi
echo "using $QWANALYSIS for analyzer directory."
cd $QWANALYSIS
. $QWANALYSIS/SetupFiles/SET_ME_UP.bash
# ------------------------------------------------------------------- #
# Here I am just setting up my local enviroment variables #
# so that things work properly. #
# ------------------------------------------------------------------- #
<<<<<<< .mine
FINAL_PATH=/work/hallc/qweak/QwAnalysis/run1/bmod_regression/
export QW_ROOTFILES=/volatile/hallc/qweak/QwAnalysis/run2/rootfiles
=======
#FINAL_PATH=/work/hallc/qweak/QwAnalysis/run2/bmod_regression/
FINAL_PATH=/volatile/hallc/qweak/jhoskins/run2/bmod_regression
export QW_ROOTFILES=/volatile/hallc/qweak/QwAnalysis/run2/rootfiles
>>>>>>> .r4446
export QWSCRATCH=/group/qweak/QwAnalysis/common/QwScratch
cd $QWANALYSIS/Extensions/Regression/QwBeamModulation
# ------------------------------------------------------------------- #
# #
# ------------------------------------------------------------------- #
REG_STEM="regression_"
BMOD_FILE_STEM="bmod_tree_"
DIAGNOSTIC_STEM="_diagnostic"
SLOPES_STEM="slopes_"
ERROR="diagnostic_"
HOST=`hostname`
echo "hostname is set to $HOST"
if [ -n "$1" ]
then
RUN_NUMBER=${1}
else
echo "Error::Run number not specified."
exit
fi
./qwbeammod ${RUN_NUMBER}
if [ $? -ne 0 ]; then
echo "There was and error in the completion of qwbeammod"
exit
fi
ROOTFILE=${QW_ROOTFILES}/${BMOD_FILE_STEM}${RUN_NUMBER}.root
REGRESSION=${REG_STEM}${RUN_NUMBER}.dat
BMOD_OUT=${BMOD_FILE_STEM}${RUN_NUMBER}.root${DIAGNOSTIC_STEM}
SLOPES=${SLOPES_STEM}${RUN_NUMBER}.dat
DIAGNOSTICS=${ERROR}${RUN_NUMBER}.dat
echo "found :: $ROOTFILE"
if [ -f "${ROOTFILE}" ]
then
./qwlibra ${RUN_NUMBER}
else
echo "There was a problem in finding $ROOTFILE Directory."
exit
fi
if [ $? -ne 0 ]
then
echo "qwlibra did not exit correctly."
exit
fi
echo "mv -v ${REGRESSION} ${ROOTFILE} ${SLOPES} ${DIAGNOSTICS} ${FINAL_PATH}/"
mv -v ${REGRESSION} ${SLOPES} ${BMOD_OUT} ${DIAGNOSTICS} ${FINAL_PATH}/
| true
|
5983eb87ee022b63c1f590fd13119fb387bc3e3e
|
Shell
|
ling32945/linux-configuration
|
/server-setup.sh
|
UTF-8
| 4,770
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Description: Auto config you server script
#
# Copyright (C) 2017 Jae Liu <jae.liu11@gmail.com>
#
OSVersion=("Ubuntu" "Debian" "CentOS");
curUser=`who am i | awk '{print $1}'`
echo "Info: Current User: "$curUser
if [ `whoami` != "root" ]; then
echo "Error: This script must run with Administrator!"
exit
fi
curOSVersion=`cat /etc/issue | grep -o -E "^\b\w+\b"`
echo "Info: Update Applications"
if [[ $curOSVersion = ${OSVersion[0]} || $curOSVersion = ${OSVersion[1]} ]]; then
#apt-get update
echo
elif [ $curOSVersion = ${OSVersion[2]} ]; then
yum update
fi
# Install Git
which git
if [ $? -ne 0 ]; then
echo "Info: Git is not installed, install it first!"
if [[ $curOSVersion = ${OSVersion[0]} || $curOSVersion = ${OSVersion[1]} ]]; then
apt-get install git
elif [ $curOSVersion = ${OSVersion[2]} ]; then
yum -y install git
fi
fi
if [ 1 ]; then
echo "Config Git"
git config --global user.name "Jae Liu"
#git config --global user.email ling32945@sina.com
git config --global core.editor vim
git config --global merge.tool vimdiff
git config --global color.status auto
git config --global color.diff auto
git config --global color.branch auto
git config --global color.interactive auto
fi
echo "Info: Config Vim"
which vim
if [ $? -ne 0 ]; then
echo "Info: Vim is not installed, install it first!"
if [[ $curOSVersion = ${OSVersion[0]} || $curOSVersion = ${OSVersion[1]} ]]; then
apt-get install vim
elif [ $curOSVersion = ${OSVersion[2]} ]; then
yum -y install vim
fi
fi
path=$(cd `dirname $0`; pwd)
echo 'Path: '$path
echo "$(pwd)"
vimrcFilePath=$(cd `dirname $0`; pwd)"/.vimrc"
if [ -f $vimrcFilePath ]; then
#cp $vimrcFilePath ~/
#chown ${curUser}:${curUser} ~/.vimrc
ln -s $vimrcFilePath ~/.vimrc
fi
if [ ! -d ~/.vim ]; then
echo "Info: .vim folder is not there, create it"
mkdir ~/.vim
chown $curUser:$curUser ~/.vim
fi
# Install Vim color scheme
if [ ! -d ~/.vim/colors ]; then
mkdir ~/.vim/colors
chown ${curUser}:${curUser} ~/.vim/colors
fi
if [ ! -f ~/.vim/colors/solarized.vim ]; then
echo "Tomorrow.vim is not there"
git clone https://github.com/altercation/vim-colors-solarized.git /tmp/vim-colors-solarized
mv /tmp/vim-colors-solarized/colors/*.vim ~/.vim/colors/
fi
rm -rf /tmp/vim-colors-solarized
if [ ! -f ~/.vim/colors/Tomorrow.vim ]; then
echo "Tomorrow.vim is not there"
git clone https://github.com/chriskempson/tomorrow-theme.git /tmp/tomorrow-theme
mv /tmp/tomorrow-theme/vim/colors/*.vim ~/.vim/colors/
fi
rm -rf /tmp/tomorrow-theme
# copy screen configuration
screenFilePath=$(cd `dirname $0`; pwd)"/.screenrc"
if [ -f $screenFilePath ]; then
cp $screenFilePath ~/
chown $curUser:$curUser ~/.screenrc
fi
# Install Vim Vundle
if [ -d ~/.vim/bundle/Vundle.vim ]; then
cd ~/.vim/bundle/Vundle.vim
git pull
else
git clone https://github.com/VundleVim/Vundle.vim.git ~/.vim/bundle/Vundle.vim
fi
echo "Info: Add User Group Web"
#grep -E ":600:" /etc/group >& /dev/null
#if [ $? -ne 0 ]; then
#echo "Group ID 600 not found"
grep -E "^web" /etc/group >& /dev/null
if [ $? -ne 0 ]; then
echo "Info: Group web not found, add user group web"
groupadd web
fi
#fi
gidOfWeb=`awk 'BEGIN{FS=":"} $1=="web" {print $3}' /etc/group`
#echo "Gid of web: "$gidOfWeb
id $curUser | grep $gidOfWeb
if [ $? -ne 0 ]; then
gpasswd -a $curUser web
fi
if [[ $curOSVersion = ${OSVersion[0]} || $curOSVersion = ${OSVersion[1]} ]]; then
id www-data
if [ $? -eq 0 ]; then
echo "Info: Add user www-data to user group web"
gpasswd -a www-data web
fi
elif [ $curOSVersion = ${OSVersion[2]} ]; then
id apache
if [ $? -eq 0 ]; then
echo "Info: Add user apache to user group web"
gpasswd -a www-data web
fi
fi
grep -E "^app" /etc/group >& /dev/null
if [ $? -ne 0 ]; then
echo "Group app not fount"
groupadd app
fi
gidOfApp=`awk 'BEGIN{FS=":"} $1=="app" {print $3}' /etc/group`
#echo "Gid of app: "$gidOfApp
id $curUser | grep $gidOfApp
if [ $? -ne 0 ]; then
gpasswd -a $curUser app
fi
# config history
grep "history config" /etc/profile
if [ $? -ne 0 ]; then
cat << EOF >> /etc/profile
# history config
HISTTIMEFORMAT='%F %T '
HISTSIZE="5000"
HISTFILESIZE=5000
#HISTCONTROL=ignoredups
#HISTCONTROL=ignorespace
HISTCONTROL=ignorespace:erasedups
shopt -s histappend
PROMPT_COMMAND="history -a"
EOF
fi
# config PS1
grep "PS1 config" /etc/profile
if [ $? -ne 0 ]; then
cat << EOF >> /etc/profile
# PS1 config
PS1='\[\e[37;1m[\]\[\e[31;1m\]\u\[\e[39;1m\]@\[\e[33;1m\]\H \[\e[34;1m\]\w\[\e[37;1m\]]\n\[\e[32;1m\]\\$ \[\e[0m\]'
EOF
fi
exit;
| true
|
583a4443f94741650d69b78f7cf2260130924ad8
|
Shell
|
giancastro/ddpt
|
/{{cookiecutter.django_project_name}}/deployment/django/django-healthcheck.sh
|
UTF-8
| 636
| 3.328125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Local and production health checks.
# https://docs.docker.com/compose/compose-file/#healthcheck
set -o errexit
set -o pipefail
# Just checking if our homepage is 200
# So, if you plan to never have a view for /, just change
# localhost:5000/ to some available endpoint, otherwise the health check will fail.
# For example: localhost:5000/admin
# IMPORTANT: In Docker Swarm if a health check fail, the container will be restarted.
# That's a naive healthcheck, as your application get complex, you'll need a better way
# to determine if your Django project is running as it should.
wget --spider localhost:5000/ || exit 1
| true
|
9e4ab9b8e8341cc9ca4bda9aeba225af1282e190
|
Shell
|
pedrochons/evaluacionUD
|
/start_vm.sh
|
UTF-8
| 439
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Nombre del disco "
read name_img
echo "Tamano del disco(G)? "
read size_img
echo "Nombre de la maquina virtual? "
read name_vm
echo "Tamano de RAM(INCLUYE M o G)? "
read ram_vm
echo "Path del ISO (INCLUYE '.iso') "
read path_iso
qemu-img create -f qcow2 ~/images/$name_img.img "$size_img"G
qemu-system-$name-vm -m $ram_vm -cdrom $path_iso
#echo $name_img
#echo "$size_img"G
#echo $name_vm
#echo $ram_vm
#echo $ram_vm
| true
|
064f50cb4bcfbc921786e6ebd6044b337de20991
|
Shell
|
luzidchris/rtems-testing
|
/gcc/testsuite/ada/acats/rtems_acats_status
|
UTF-8
| 799
| 3.984375
| 4
|
[] |
no_license
|
#! /bin/sh
#
# Report ACATS status summary
#
if [ $# -ne 2 -a $# -ne 3 ] ; then
echo Usage $0: CPU BSP [seconds]
exit 1
fi
vfile=`dirname $0`/../../../../VERSIONS
if [ ! -r ${vfile} ] ; then
echo VERSIONS file not found
exit 1
fi
source ${vfile}
CPU=$1
BSP=$2
if [ $# -eq 3 ] ; then
seconds=$3
else
seconds=0
fi
DIR=${GCCDIR}/gcc/testsuite/ada/acats
if [ ! -d ${DIR} ] ; then
echo "No Ada build directory for the BSP (${DIR})"
exit 1
fi
cd ${DIR}
if [ ! -d work-${BSP} ] ; then
echo "No ACATS working directory for the BSP (${BSP})"
exit 1
fi
while true
do
printf "PASSED: %4d FAILED %4d\n" \
`grep ^PASS work-${BSP}/acats.sum | wc -l` \
`grep ^FAIL work-${BSP}/acats.sum | wc -l`
if [ ${seconds} -eq 0 ] ; then
break
fi
sleep ${seconds}
done
exit 0
| true
|
8d04056eacab1b6e5f7a38fd20b0d051415b5211
|
Shell
|
latifkabir/Computation_using_C
|
/heated_plate_openmp/heated_plate_local_gcc.sh
|
UTF-8
| 598
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Compile the program with gcc
#
/usr/local/bin/gcc -fopenmp heated_plate_openmp.c -lm
#
mv a.out heated_plate
#
# Run with 1, 2, and 4 threads.
#
echo "Run with 1 thread."
export OMP_NUM_THREADS=1
./heated_plate > heated_plate_local_gcc_output.txt
#
echo "Run with 2 threads."
export OMP_NUM_THREADS=2
./heated_plate >> heated_plate_local_gcc_output.txt
#
echo "Run with 4 threads."
export OMP_NUM_THREADS=4
./heated_plate >> heated_plate_local_gcc_output.txt
#
# Discard the executable file.
#
rm heated_plate
#
echo "Program output written to heated_plate_local_gcc_output.txt"
| true
|
34898f0ffb7246b9a9e6a84b034e1c680218973d
|
Shell
|
KevinPatist/EOS-ShellScripting
|
/telBestandTypes
|
UTF-8
| 816
| 3.75
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
files=$(find $1 -maxdepth 1 -type f)
type1Count=0
type2Count=0
type3Count=0
type4Count=0
for file in $files; do
case $file in
*$2)
((type1Count+=1))
;;
*$3)
((type2Count+=1))
;;
*$4)
((type3Count+=1))
;;
*$5)
((type4Count+=1))
;;
*)
echo "Geef niet meer dan 4 bestandstypen mee"
;;
esac
done
case $# in
2)
echo aantal $2 bestanden: $type1Count
;;
3)
echo aantal $2 bestanden: $type1Count
echo aantal $3 bestanden: $type2Count
;;
4)
echo aantal $2 bestanden: $type1Count
echo aantal $3 bestanden: $type2Count
echo aantal $4 bestanden: $type3Count
;;
*)
echo aantal $2 bestanden: $type1Count
echo aantal $3 bestanden: $type2Count
echo aantal $4 bestanden: $type3Count
echo aantal $5 bestanden: $type4Count
;;
esac
| true
|
24736fc21ba36f65d3b583a0be64d6ffd7bc839a
|
Shell
|
b-saideepak/Cross-core-covert-channel
|
/C-Box/src/start.sh
|
UTF-8
| 1,023
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/bash
MSR_TOOLS_PATH=/path
SRC_PATH=/path
# SRC_PATH is the path to the src folder
# Values for Sandy Bridge architecture
MSR_UNC_PERF_GLOBAL_CTRL="0x391"
MSR_UNC_CBO_0_PERFEVTSEL0="0x700"
MSR_UNC_CBO_1_PERFEVTSEL0="0x710"
MSR_UNC_CBO_2_PERFEVTSEL0="0x720"
MSR_UNC_CBO_3_PERFEVTSEL0="0x730"
MSR_UNC_CBO_0_PERFCTR0="0x706"
MSR_UNC_CBO_1_PERFCTR0="0x716"
MSR_UNC_CBO_2_PERFCTR0="0x726"
MSR_UNC_CBO_3_PERFCTR0="0x736"
cd $MSR_TOOLS_PATH
# Stop counting
sudo ./wrmsr $MSR_UNC_PERF_GLOBAL_CTRL 0x0
# Clear the counter values
sudo ./wrmsr $MSR_UNC_CBO_0_PERFCTR0 0x0
sudo ./wrmsr $MSR_UNC_CBO_1_PERFCTR0 0x0
sudo ./wrmsr $MSR_UNC_CBO_2_PERFCTR0 0x0
sudo ./wrmsr $MSR_UNC_CBO_3_PERFCTR0 0x0
# Select LLC_References event (0x34) in all slices
sudo ./wrmsr $MSR_UNC_CBO_0_PERFEVTSEL0 0x508f34
sudo ./wrmsr $MSR_UNC_CBO_1_PERFEVTSEL0 0x508f34
sudo ./wrmsr $MSR_UNC_CBO_2_PERFEVTSEL0 0x508f34
sudo ./wrmsr $MSR_UNC_CBO_3_PERFEVTSEL0 0x508f34
# Start counting
sudo ./wrmsr $MSR_UNC_PERF_GLOBAL_CTRL 0x2000000f
cd $SRC_PATH
| true
|
4a695aefa987f6872814af7dce043af363fe00ce
|
Shell
|
stefanovualto/circleci-packages-bump
|
/info.sh
|
UTF-8
| 247
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
BUMPED_PACKAGE_VERSION=$(cat package.json \
| grep version \
| head -1 \
| awk -F: '{ print $2 }' \
| sed 's/[",]//g' \
| awk -F '.' '{ print $1,$2,$3+1 }' \
| sed 's/^ //' \
| sed 's/ /./g')
echo "${PACKAGE_VERSION}"
| true
|
2a81d3d8e0c03e5f75d60f60e7520ea123464129
|
Shell
|
SStauden/adversarial-squad
|
/generate_add_sent2.sh
|
UTF-8
| 566
| 2.53125
| 3
|
[
"MIT"
] |
permissive
|
# CREATES ADVERSARIAL SAMPLES FOR SQUAD DEV
mkdir out
# Precompute nearby words in word vector space; takes roughly 1 hour
python src/py/find_squad_nearby_words.py \
glove/glove.6B.100d.txt \
-n 100 \
-f data/squad/dev-v1.1.json > out/nearby_n100_glove_6B_100d.json
# Run CoreNLP on the SQuAD training data; takes roughly 1 hour, uses ~18GB memory
python src/py/convert_questions.py corenlp -d dev
# Actually generate the raw AddSent examples; takes roughly 7 minutes, uses ~15GB memory
python src/py/convert_questions.py dump-highConf -d dev
| true
|
1b645e7284051d5a0dfea033a191b8bb3bde08d3
|
Shell
|
smartpcr/auth-benchmark
|
/deploy/scripts/setup-devbox.sh
|
UTF-8
| 3,969
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
export DEBIAN_FRONTEND=noninteractive
sudo echo "APT::Get::Assume-Yes \"true\";" > /etc/apt/apt.conf.d/90assumeyes
sudo apt-get update \
&& apt-get install -y --no-install-recommends \
ca-certificates \
curl \
openssl \
apt-utils \
apt-transport-https \
git \
iputils-ping \
libcurl3 \
libicu55 \
libunwind8 \
lsb-release \
gnupg2 \
software-properties-common \
netcat \
wget \
unzip \
openssh-server \
sshfs
# Install jq-1.6 (beta)
sudo wget -q https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 \
&& sudo chmod +x jq-linux64 \
&& sudo mv jq-linux64 /usr/bin/jq
# install node
curl -sL https://deb.nodesource.com/setup_10.x | sudo -E bash -
sudo apt install nodejs
sudo chown -R $(id -u):$(id -g) /usr/lib/node_modules
# Install docker, requires docker run args: `-v /var/run/docker.sock:/var/run/docker.sock`
sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - && \
sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" && \
sudo apt-get update && \
sudo apt-get -y install docker-ce
# Install docker-compose
sudo curl -L "https://github.com/docker/compose/releases/download/1.24.1/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose && \
sudo chmod +x /usr/local/bin/docker-compose
# Install terraform
sudo wget -q https://releases.hashicorp.com/terraform/0.12.6/terraform_0.12.6_linux_amd64.zip \
&& unzip terraform_0.12.6_linux_amd64.zip \
&& chmod +x terraform \
&& sudo mv terraform /usr/local/bin/ \
&& rm terraform_0.12.6_linux_amd64.zip -f
# Install kubectl
sudo curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl \
&& chmod +x kubectl \
&& sudo mv ./kubectl /usr/local/bin/kubectl
# Install helm
sudo curl -LO https://get.helm.sh/helm-v2.14.3-linux-amd64.tar.gz \
&& tar -zxvf helm-v2.14.3-linux-amd64.tar.gz \
&& chmod +x ./linux-amd64/helm \
&& sudo mv ./linux-amd64/helm /usr/local/bin/helm \
&& rm helm-v2.14.3-linux-amd64.tar.gz -f \
&& rm -rf linux-amd64 -f
# Install fab
sudo curl -LO 'https://github.com/microsoft/fabrikate/releases/download/0.15.0/fab-v0.15.0-linux-amd64.zip' \
&& unzip fab-v0.15.0-linux-amd64.zip \
&& rm fab-v0.15.0-linux-amd64.zip -f \
&& chmod +x fab \
&& sudo mv ./fab /usr/local/bin/fab
# Install AZ CLI
sudo curl -sL https://aka.ms/InstallAzureCLIDeb | bash
sudo echo "AZURE_EXTENSION_DIR=/usr/local/lib/azureExtensionDir" | tee -a /etc/environment \
&& mkdir -p /usr/local/lib/azureExtensionDir
sudo chown -R $(id -u):$(id -g) /home/$USER/.azure
# Install az extensions
sudo az extension add --name application-insights
# Install powershell core
sudo wget -q https://packages.microsoft.com/config/ubuntu/16.04/packages-microsoft-prod.deb \
&& sudo dpkg -i packages-microsoft-prod.deb \
&& sudo apt-get update \
&& sudo apt-get install -y powershell \
&& rm packages-microsoft-prod.deb -f
# Install dotnet core sdk, this fix powershell core handling of cert trust chain problem
sudo apt-get install -y dotnet-sdk-3.1
# add basic git config
sudo git config --global user.email "xiaodoli@microsoft.com" && \
sudo git config --global user.name "Xiaodong Li" && \
sudo git config --global push.default matching && \
sudo git config --global credential.helper store
# setup azure function tools
curl https://packages.microsoft.com/keys/microsoft.asc | gpg --dearmor > microsoft.gpg
sudo mv microsoft.gpg /etc/apt/trusted.gpg.d/microsoft.gpg
sudo sh -c 'echo "deb [arch=amd64] https://packages.microsoft.com/repos/microsoft-ubuntu-$(lsb_release -cs)-prod $(lsb_release -cs) main" > /etc/apt/sources.list.d/dotnetdev.list'
sudo apt-get update
sudo apt-get install -y azure-functions-core-tools
| true
|
7b446c47d89b3125bcfbd364e0fe4c8f88719b52
|
Shell
|
ivancho/riverboard
|
/uninstall
|
UTF-8
| 217
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
# Remove the self-update cronjob and the local repo
cd $( dirname "${BASH_SOURCE[0]}" )
INSTALL="$( pwd )"
DEPLOY="${INSTALL}/deploy"
(crontab -l | grep -v "$DEPLOY" | crontab -)
cd ..
rm -rf $INSTALL
| true
|
008b16772373e06a92b34fff7d70cfbc07c014b2
|
Shell
|
yinghai9989/OpenStackDeploy
|
/DeployScripts/run-post-install-script.sh
|
UTF-8
| 2,392
| 3.46875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
CMD_PATH=.
DST_PATH=./conf_orig
CONF_DEPLOY_DIR=./conf_deploy
RUN_DATE=$1
MY_LOCALE=$($CMD_PATH/get-conf-data.sh ./locale.txt LOCALE)
if [ $MY_LOCALE = 'CN' ]; then
source ./locale_cn.txt
else
source ./locale_en.txt
fi
#If use ceph
PREFIX_STORAGE=$($CMD_PATH/get-max-prefix.sh $DST_PATH Storage.txt)
MY_GLANCE_STORAGE=$($CMD_PATH/get-conf-data.sh $DST_PATH/$PREFIX_STORAGE-Storage.txt GLANCE_STORAGE)
MY_CINDER_STORAGE=$($CMD_PATH/get-conf-data.sh $DST_PATH/$PREFIX_STORAGE-Storage.txt CINDER_STORAGE)
MY_NOVA_STORAGE=$($CMD_PATH/get-conf-data.sh $DST_PATH/$PREFIX_STORAGE-Storage.txt NOVA_STORAGE)
if [ "$MY_GLANCE_STORAGE" = "ceph" -o "$MY_CINDER_STORAGE" = "ceph" -o "$MY_NOVA_STORAGE" = "ceph" ]; then
echo "Used ceph block devices, the format of image file needed be raw, now convert it ..."
MY_CONTROLLER_NODE_IP=$(head -n 1 $CONF_DEPLOY_DIR/Controller-Nodes-IPs.txt)
MY_COMPUTER_NODE_IP=$(head -n 1 $CONF_DEPLOY_DIR/Computer-Nodes-IPs.txt)
echo "Copy image file to computer node $MY_COMPUTER_NODE_IP, then convert format from qcow2 to raw"
rsync -vaI ./images/cirros-0.3.2-x86_64-disk.img root@$MY_COMPUTER_NODE_IP:/root/OpenStack-Install-HA/images/
ssh root@$MY_COMPUTER_NODE_IP "cd /root/OpenStack-Install-HA/images;qemu-img convert -f qcow2 -O raw cirros-0.3.2-x86_64-disk.img cirros-0.3.2-x86_64-disk.raw;"
echo "Copy image file to local server then copy it to controller node $MY_CONTROLLER_NODE_IP"
rsync -vaSI root@$MY_COMPUTER_NODE_IP:/root/OpenStack-Install-HA/images/cirros-0.3.2-x86_64-disk.raw ./
rsync -vaSI ./cirros-0.3.2-x86_64-disk.raw root@$MY_CONTROLLER_NODE_IP:/root/OpenStack-Install-HA/images/
rm -f ./cirros-0.3.2-x86_64-disk.raw
fi
#Run post installation scripts on first controller node
echo $STR_RUN_POST_INSTALL_SCRIPT
screen -dmS niu -U -t sleeping $CMD_PATH/sleep-x-seconds.sh 10
$CMD_PATH/check-screen-started.sh
IP=$(head -n 1 $CONF_DEPLOY_DIR/Controller-Nodes-IPs.txt)
screen -S niu -U -X screen -U -t $IP $CMD_PATH/run-on-node.expect $IP post-install-script.sh $RUN_DATE-post-install-script-$IP.log
$CMD_PATH/check-screen-ended.sh
echo $STR_COMPLETE_POST_INSTALL_SCRIPT
echo $STR_GET_LOG_FILE_FROM_SERVERS
IP=$(head -n 1 $CONF_DEPLOY_DIR/Controller-Nodes-IPs.txt)
rsync -va $IP:/root/OpenStack-Install-HA/log/$RUN_DATE-post-install-script-$IP.log $CMD_PATH/log/
#
exit 0
| true
|
088999fe74e8a19e5b79c62eed9df140ad4c0b59
|
Shell
|
blade2005/gameserver_admin
|
/check-newest-build
|
UTF-8
| 1,828
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
gameid=$1
gamename=$2
function usage {
echo "Usage $0 <gameid> <gamename>"
exit 1
}
test -n "$gameid" || usage
test -n "$gamename" || usage
filecount=$(find ~/gameservers/$gamename -name "appmanifest_${gameid}.acf" | wc -l)
files=$(find ~/gameservers/$gamename -name "appmanifest_${gameid}.acf")
if [ "$filecount" -ge "2" ];then
echo "Found too many game manifestos"
exit 2
fi
if [ "$filecount" -eq "0" ];then
echo "Found no game manifestos"
exit 2
fi
currentbuild=$(grep buildid "${files}" | tr '[:blank:]"' ' ' | tr -s ' ' | cut -d\ -f3)
if [ -z "$currentbuild" ];then
echo "Unable to find current build"
exit 2
fi
cd ~/steamcmd
availablebuild=$(./steamcmd.sh +login anonymous +app_info_update 1 +app_info_print "$gameid" +app_info_print "$gameid" +quit | \
grep -EA 1000 "^\s+\"branches\"$" | \
grep -EA 5 "^\s+\"public\"$" | \
grep -m 1 -EB 10 "^\s+}$" | \
grep -E "^\s+\"buildid\"\s+" | \
tr '[:blank:]"' ' ' | \
tr -s ' ' | \
cut -d\ -f3)
if [ -z "$availablebuild" ];then
echo "Unable to find latest build"
exit 2
fi
if [ "${currentbuild}" != "${availablebuild}" ]; then
echo -e "Update available:"
sleep 1
echo -e " Current build: \e[0;31m${currentbuild}\e[0;39m"
echo -e " Available build: \e[0;32m${availablebuild}\e[0;39m"
echo -e ""
echo -e " https://steamdb.info/app/${gameid}/"
sleep 1
echo ""
echo -en "Applying update.\r"
sleep 1
echo -en "Applying update..\r"
sleep 1
echo -en "Applying update...\r"
sleep 1
echo -en "\n"
exit 1
else
echo -e "No update available:"
echo -e " Current version: \e[0;32m${currentbuild}\e[0;39m"
echo -e " Available version: \e[0;32m${availablebuild}\e[0;39m"
echo -e " https://steamdb.info/app/${gameid}/"
echo -e ""
exit 0
fi
| true
|
bdc2667f0f275f57f078f929abbf7519e9848d8d
|
Shell
|
h1994st/Athena
|
/oh-my-zsh.sh
|
UTF-8
| 1,096
| 4.21875
| 4
|
[] |
no_license
|
#!/bin/bash
# init
: ${SYSTEM:=`uname -s`}
if [ "$SYSTEM"x = "Linux"x ] ; then
# Check zsh
CHECK_ZSH_INSTALLED=$(grep /zsh$ /etc/shells | wc -l)
if [ ! $CHECK_ZSH_INSTALLED -ge 1 ]; then
echo 'Install zsh ...'
sudo apt-get install zsh
fi
unset CHECK_ZSH_INSTALLED
fi
if [ ! -n "$ZSH" ]; then
ZSH=~/.oh-my-zsh
fi
# Check oh-my-zsh
if [ -d "$ZSH" ]; then
echo 'Update Oh My Zsh ...'
env ZSH=$ZSH sh $ZSH/tools/upgrade.sh
else
echo 'Install Oh My Zsh ...'
sh -c "$(curl -fsSL https://raw.github.com/robbyrussell/oh-my-zsh/master/tools/install.sh)"
fi
# Configuration file
if [ -f ~/.zshrc ] ; then
echo 'Configuration file ~/.zshrc exists'
if ! [[ -L ~/.zshrc || -h ~/.zshrc ]] ; then
# Not a symbolic link
# Back-up
echo 'Back up existing configuration file to ~/.zshrc.bak'
mv ~/.zshrc ~/.zshrc.bak;
fi
fi
if [ "$(readlink ~/.zshrc)"x != "$(pwd)/zshrc"x ] || ! [ -f ~/.zshrc ] ; then
echo 'Create a symbolic link ...'
ln -s $(pwd)/zshrc ~/.zshrc
ls -al ~/.zshrc
fi
echo 'Done!'
| true
|
16001478940d497cde6fb3ca0996e35050e4a8ff
|
Shell
|
h01ger/piuparts
|
/custom-scripts/scripts/pre_remove_50_find_bad_permissions
|
UTF-8
| 2,043
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/sh
set -e
case "$PIUPARTS_DISTRIBUTION" in
lenny)
case ${PIUPARTS_OBJECTS%%=*} in
linpopup)
# package removed after lenny
for file in /var/lib/linpopup/messages.dat
do
test ! -f "$file" || chmod -c o-w "$file"
done
;;
esac
;;
esac
case "$PIUPARTS_DISTRIBUTION" in
lenny|squeeze*)
case ${PIUPARTS_OBJECTS%%=*} in
citadel-server|citadel-dbg|citadel-mta|citadel-suite|bcron-run|capisuite|debbugs|raccess4vbox3|smartlist|sxid)
#WORKSAROUND #684964: citadel-server: world writable config file: /etc/citadel/netconfigs/7
for file in /etc/citadel/netconfigs/7 /etc/citadel/refcount_adjustments.dat /etc/citadel/citadel.control
do
test ! -f "$file" || chmod -c o-w "$file"
done
;;
esac
;;
esac
case "$PIUPARTS_DISTRIBUTION" in
wheezy*)
case ${PIUPARTS_OBJECTS%%=*} in
citadel-server|citadel-dbg|citadel-mta|citadel-suite|bcron|bcron-run|capisuite|debbugs|exmh|nmh|raccess4vbox3|smartlist|xlbiff)
#WORKSAROUND #684964: citadel-server: world writable config file: /etc/citadel/netconfigs/7
for file in /etc/citadel/netconfigs/7 /etc/citadel/refcount_adjustments.dat /var/lib/citadel/data/refcount_adjustments.dat
do
test ! -f "$file" || chmod -c o-w "$file"
done
;;
ilisp)
#WORKSAROUND #682826: ilisp: creates world writable directory /usr/lib/ilisp
# package was not in wheezy
for dir in /usr/lib/ilisp
do
test ! -d "$dir" || chmod -c o-w "$dir"
done
;;
esac
;;
esac
case ${PIUPARTS_OBJECTS%%=*} in
gpe-tetris|gpe)
#WORKSAROUND #684178: gpe-tetris: creates world writable directory /var/games/gpe
# package removed after wheezy
for file in /var/games/gpe/gpe-tetris.dat
do
test ! -f "$file" || chmod -c o-w "$file"
done
for dir in /var/games/gpe
do
test ! -d "$dir" || chmod -c o-w "$dir"
done
;;
esac
# find world writables without sticky bit
BADPERMS=$(find / -mount ! -type l ! -type c ! -type p ! -type s -perm -o+w ! -perm -1000)
if [ -n "$BADPERMS" ]; then
echo "ERROR: BAD PERMISSIONS"
ls -lad $BADPERMS
exit 1
fi
| true
|
a2c471cb0a2b93131ddd095e38897a3880e6d5c0
|
Shell
|
JXia7/linux-shell
|
/3_Flow_Control_Loops_and_Documentation/test.sh
|
UTF-8
| 1,788
| 4.53125
| 5
|
[] |
no_license
|
#!/bin/bash
# Juanjuan Xia #
# Test for existence of a file
if [ -f "hwtest" ]; then
echo "the file name is: " hwtest
fi
# Check if a particular directory exists.ls
if [ -d "DIRECTORY" ]; then
echo "the directory exists."
else
mkdir DIRECTORY
fi
# Use if/elif statement to test input from the user
echo -n "Task IF-ELSE, Enter a number > "
read character
if [ $character -ge 0 ] && [ $character -lt 9 ]; then
echo "The input is a number between 0 and 9."
elif [ $character -ge 50 ] && [ $character -lt 100 ]; then
echo "The input is a number between 50 and 100."
elif [ $character -ge 500 ]; then
echo "The input is a number > 500."
else
echo "The input not belong the following values:"
echo "number < 10, number between 50 and 100, number > 500."
fi
# Use switch statement to test input for the user
echo -n "Task CASE, Enter a number > "
read character
case $character in
[0-9] ) echo "The input is a number between 0 and 9."
;;
[1-4][0-9] ) echo "The input is between 10 and 49, not what we wanted."
;;
[5-9][0-9] ) echo "The input is a numbwe between 50 and 99."
;;
[1-4][0-9][0-9] ) echo "The input is between 100 and 499, not what we wanted."
;;
* ) echo "The input is a number >= 500."
esac
# Iterate through all the files in the current directory
for element in ./*;
do
if [ -f $element ]; then
echo "$element is a File."
elif [ -d $element ]; then
echo "$element is a Directory."
fi
done
# A for loop which starts at 7
for iter_number in {7..37..3}
do
echo $iter_number
done
# A until loop
number=0
until [ $number -ge 10 ]; do
echo "(Until loop) Enter a number:"
read character
number=$character
done
# A while loop
character=no
while [ $character != yes ]; do
echo "Do you like BASH?"
read character
character=$character
done
| true
|
08c07f70ee591814db70d0b9b40f55f1005c7219
|
Shell
|
ozercimilli/petclinic-microservices
|
/jenkins/build-and-deploy-petclinic-on-qa-env-manually.sh
|
UTF-8
| 961
| 2.609375
| 3
|
[
"Apache-2.0"
] |
permissive
|
PATH="$PATH:/usr/local/bin"
APP_NAME="petclinic"
APP_REPO_NAME="muratcw-repo/petclinic-app-qa"
APP_STACK_NAME="Murat1-petclinic-App-QA-1"
CFN_KEYPAIR="murat-petclinic-qa.key"
AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text)
AWS_REGION="us-east-1"
ECR_REGISTRY="${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com"
export ANSIBLE_PRIVATE_KEY_FILE="${JENKINS_HOME}/.ssh/${CFN_KEYPAIR}"
export ANSIBLE_HOST_KEY_CHECKING="False"
echo 'Packaging the App into Jars with Maven'
. ./jenkins/package-with-maven-container.sh
echo 'Preparing QA Tags for Docker Images'
. ./jenkins/prepare-tags-ecr-for-qa-docker-images.sh
echo 'Building App QA Images'
. ./jenkins/build-qa-docker-images-for-ecr.sh
echo "Pushing App QA Images to ECR Repo"
. ./jenkins/push-qa-docker-images-to-ecr.sh
echo 'Deploying App on Swarm'
. ./ansible/scripts/deploy_app_on_qa_environment.sh
echo 'Deleting all local images'
docker image prune -af
| true
|
b4c2b7d6624c1f8942e6e4c1dc11280002663991
|
Shell
|
agowa/html-inliner
|
/inline.sh
|
UTF-8
| 1,506
| 3.359375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# Call tidy for sanitizing
cp main/index.html main/index.ea742abd-3300-47dc-ae64-3157acdc917f.html
tidy --doctype html5 -asxhtml -indent --show-warnings no main/index.ea742abd-3300-47dc-ae64-3157acdc917f.html > main/index.html || if [ $? -ne 1 ]; then exit 100; fi
rm -f main/index.ea742abd-3300-47dc-ae64-3157acdc917f.html
# Inline all svg files.
pushd main
for f in *.svg
do
uuid=`uuidgen`
# Add unique uuid to id's within the svg file to prevent overlapping css
sed -i "s/glyph0-/$uuid-glyph0-/" $f
sed -i 's/<g id="surface/<g id="'"$uuid"'-surface/' $f
sed -i 's/clipPath id="clip/clipPath id="'"$uuid"'-clip/' $f
sed -i 's/clip-path="url(#clip/clip-path="url(#'"$uuid"'-clip/' $f
sed -i 's/<?xml version="1.0" encoding="UTF-8"?>//' $f
# Replace img node where `src="$f"` with uuid
sed -i ':a;N;$!ba; s|<img[^>]*src="'"$f"'"[^>]*>|'"$uuid"'|g' index.html
# replace uuid with content of $f
sed -i -e "/$uuid/{r $f" -e "d}" index.html
done
# Inline *.css
for f in *.css
do
sed -i 's/<link rel="STYLESHEET" href="'"$f"'"[^>]*>/<style type="text\/css">\n<\/style>/' index.html
sed -i '/<style type="text\/css">/ r '"$f"'' index.html
done
popd
# Add doctype header if not present
sed '/^<!DOCTYPE html>/{q 0};{q 1}' main/index.html || sed -i '1s/^/<!DOCTYPE html>/' main/index.html
# Call tidy for sanitizing
tidy --doctype html5 -asxhtml -indent --show-warnings no main/index.html > public/index.html || if [ $? -ne 1 ]; then exit 100; fi
| true
|
3e4db180a10dc69b925646ac6671ff349954c14b
|
Shell
|
swizzley/vagrant-kibi
|
/provision.sh
|
UTF-8
| 2,981
| 3.578125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
case "$1" in
reinstall)
case "$2" in
lite)
KIBI="kibi-4-4-2-linux-x64-demo-lite-zip"
;;
full)
KIBI="kibi-4-4-2-linux-x64-demo-full-zip"
;;
*)
echo "Must specify kibi to restart, options are : [full, lite]"
exit 1
;;
esac
if [ -n "$(pgrep -u kibi)" ]; then
sudo kill -9 $(pgrep -u kibi)
fi
sudo rm -rf /opt/kibi
;;
full)
KIBI="kibi-4-4-2-linux-x64-demo-full-zip"
;;
*)
KIBI="kibi-4-4-2-linux-x64-demo-lite-zip"
;;
esac
if [ "$(ping -c 1 $MY_PRIVATE_REPO)" ]; then
SRC="$MY_PRIVATE_REPO/x86_64/src/kibi"
EPEL_BASE="$MY_PRIVATE_REPO/epel/x86_64/7/"
HQ_PLUGIN="$MY_PRIVATE_REPO/x86_64/src/kibi/elasticsearch-HQ-2.0.3.zip"
else
SRC="bit.do"
EPEL_BASE="http://download.fedoraproject.org/pub/epel/7/$basearch"
HQ_PLUGIN="https://github.com/royrusso/elasticsearch-HQ/archive/v2.0.3.zip"
fi
if [ ! -f "/tmp/$KIBI" ]; then
sudo curl -kL -o /tmp/$KIBI http://$SRC/$KIBI
fi
if [ ! "$(which node &> /dev/null)" ]; then
sudo yum makecache fast &> /dev/null
if [ ! "$(yum repolist all|grep -i epel)" ]; then
sudo echo "[epel]" > /etc/yum.repos.d/epel.repo
sudo echo "name=EPEL" >> /etc/yum.repos.d/epel.repo
sudo echo "baseurl=$EPEL_BASE" >> /etc/yum.repos.d/epel.repo
sudo echo "gpgcheck=0" >> /etc/yum.repos.d/epel.repo
fi
sudo yum -y install nodejs
fi
if [ ! "$(which java &> /dev/null)" ] || [ ! "$(readlink -f $(which java)|grep -q 1.8)" ]; then
sudo yum -y install java-1.8.0-openjdk
fi
if [ ! "$(which unzip &> /dev/null)" ]; then
sudo yum -y install unzip
fi
if [ ! "$(id kibi)" ]; then
echo "adding user kibi"
sudo useradd -d /opt/kibi -M -s /sbin/nologin kibi
fi
if [ ! -d "/opt/kibi" ]; then
sudo unzip -q /tmp/$KIBI -d /tmp/archive
sudo mv /tmp/archive/* /opt/kibi
sudo rmdir /tmp/archive
sudo chown -R kibi:kibi /opt/kibi
sudo sed -i s/'server.host: "localhost"'/'server.host: "0.0.0.0"'/g /opt/kibi/kibi/config/kibi.yml
sudo sed -i "s#elasticsearch\\.url.*#elasticsearch.url: \"http://$(facter ipaddress):9220\"#g" /opt/kibi/kibi/config/kibi.yml
sudo sed -i s/'# network.host: 192.168.0.1'/'network.host: _site_'/g /opt/kibi/elasticsearch/config/elasticsearch.yml
fi
sudo /sbin/service firewalld stop
echo "starting elasticsearch"
sudo -u kibi /opt/kibi/elasticsearch/bin/elasticsearch &
until [ -d "/opt/kibi/elasticsearch/logs" ]; do sleep 1; done
until [ "$(grep node /opt/kibi/elasticsearch/logs/*.log|grep started)" ]; do sleep 1; done
sudo /opt/kibi/elasticsearch/bin/plugin install $HQ_PLUGIN
sudo -u kibi /opt/kibi/kibi/bin/kibi 0<&- &>/dev/null &
echo "Kibi running at http://$(facter ipaddress):5606"
echo "Elastic HQ running at http://$(facter ipaddress):9220/_plugin/HQ"
| true
|
3c656ce6de0a9f1c1f208c1a9d9f18a285834709
|
Shell
|
xfantasy/dotfiles
|
/zshrc
|
UTF-8
| 2,096
| 2.734375
| 3
|
[] |
no_license
|
# Path to your oh-my-zsh configuration.
ZSH=$HOME/.oh-my-zsh
# Set name of the theme to load.
# Look in ~/.oh-my-zsh/themes/
# Optionally, if you set this to "random", it'll load a random theme each
# time that oh-my-zsh is loaded.
export ZSH_THEME="wedisagree"
# Set to this to use case-sensitive completion
# CASE_SENSITIVE="true"
# Comment this out to disable weekly auto-update checks
DISABLE_AUTO_UPDATE="true"
# Uncomment following line if you want to disable colors in ls
# DISABLE_LS_COLORS="true"
# Uncomment following line if you want to disable autosetting terminal title.
# DISABLE_AUTO_TITLE="true"
# Uncomment following line if you want disable red dots displayed while waiting for completion
DISABLE_COMPLETION_WAITING_DOTS="true"
# Which plugins would you like to load? (plugins can be found in ~/.oh-my-zsh/plugins/*)
# Example format: plugins=(rails git textmate ruby lighthouse)
plugins=(git svn node npm git-flow vundle osx)
source $ZSH/oh-my-zsh.sh
# Customize to your needs...
export PATH=$PATH:/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin:/usr/X11/bin:~/.dotfiles/shell:/developer/android-sdk:/developer/android-sdk/tools:/developer/android-sdk/platform-tools:/usr/local/sbin
export NODE_PATH=/usr/local/lib/node_modules:/usr/local/lib/jsctags/:$NODE_PATH
export JAVA_LIBRARY_PATH=/Library/Java/JavaVirtualMachines/jdk1.7.0_17.jdk/Contents/Home:$JAVA_LIBRARY_PATH
alias v="open -a MacVim"
alias s="open -a Sublime\ Text\ 2"
hash -d workspace="/Volumes/User/xijiangbo/workspace"
hash -d trunk="/Volumes/User/xijiangbo/workspace/trunk/"
hash -d dropbox="/Volumes/User/xijiangbo/Dropbox/"
# Customize to your needs...
alias ..="cd .."
alias la="ls -la"
alias ll="ls -l"
alias ~="cd ~"
alias mcd="mvn clean deploy"
alias grep="grep --color=auto"
alias fuckgfw='ssh -qTfnN -D 7070 lufei@shaoshuai.me'
#mysql
alias mysql='/usr/local/mysql/bin/mysql'
alias mysqladmin='/usr/local/mysql/bin/mysqladmin'
#svn
alias uu="svn up";
alias cc="svn ci --message='test'"
alias st="svn st"
alias oo="open ./"
alias cd=cdAndLs
function cdAndLs() {
builtin cd "$@"
ls | head -20
}
| true
|
00c09a1e95d6fea7e26eb33e17bdc43e2be66a29
|
Shell
|
RabadanLab/melamed_comorbidity
|
/code/setup_cancer_data.sh
|
UTF-8
| 1,216
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
firehose=analyses__2013_09_23/*/20130923 ## CHANGE THIS LINE FOR DIFFERENT FIREHOSE DOWNLOAD
cancer_links=data_source/cancer_alterations
mkdir $cancer_links
cn_suffix=CopyNumber_Gistic2.Level_4.*
del_table=table_del.conf_99.txt
amp_table=table_amp.conf_99.txt
cn_genes=all_data_by_genes.txt
cn_thres=all_thresholded.by_genes.txt
for cntype in {$del_table,$amp_table}; do
for cn_res in `ls $firehose/$mut_prefix*$cn_suffix/$cntype`; do
cancer_name=`echo $cn_res | cut -d "/" -f 2`
f=$cancer_links/$cancer_name.$cntype
#echo $cn_res
if [ ! -e $f ]; then ln -s `pwd`/$cn_res $f; fi
f=$cancer_links/$cancer_name.$cn_genes
if [ ! -e $f ]; then ln -s `pwd`/`dirname $cn_res`/$cn_genes $f; fi
f=$cancer_links/$cancer_name.$cn_thres
if [ ! -e $f ]; then ln -s `pwd`/`dirname $cn_res`/$cn_thres $f; fi
done
done
mut_suffix="-T*.MutSigNozzleReportMerged.Level_4.*"
mut_prefix="gdac.broadinstitute.org_"
mut_table=sig_genes.txt
for mut_res in `ls -1 $firehose/$mut_prefix*$mut_suffix/*.$mut_table`; do
cancer_name=`echo $mut_res | cut -d "/" -f 2`
#echo $mut_res
ln -s `pwd`/$mut_res $cancer_links/$cancer_name.mut
done
echo DONE
| true
|
1d8445e13bdf493e9ed4f66a3c7909aa40449a61
|
Shell
|
sarry007/coat
|
/autocomplete/sqlmap.completion.bash
|
UTF-8
| 6,505
| 3
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# ---------------------------------------------------------------------------+
# |
# Thanks to Alexander Korznikov |
# http://www.korznikov.com/2014/12/bash-tab-completion-for-awesome-tool.html |
# |
# ---------------------------------------------------------------------------+
if command -v sqlmap > /dev/null; then
_sqlmap()
{
local cur prev
COMPREPLY=()
cur=$(_get_cword)
prev=$(_get_pword)
case $prev in
# List directory content
--tamper)
COMPREPLY=( $( compgen -W "$tamper" -- "$cur" ) )
return 0
;;
--output-dir|-t|-l|-m|-r|--load-cookies|--proxy-file|--sql-file|--shared-lib|--file-write)
_filedir
return 0
;;
-c)
_filedir ini
return 0
;;
--method)
COMPREPLY=( $( compgen -W 'GET POST PUT' -- "$cur" ) )
return 0
;;
--auth-type)
COMPREPLY=( $( compgen -W 'Basic Digest NTLM PKI' -- "$cur" ) )
return 0
;;
--tor-type)
COMPREPLY=( $( compgen -W 'HTTP SOCKS4 SOCKS5' -- "$cur" ) )
return 0
;;
-v)
COMPREPLY=( $( compgen -W '1 2 3 4 5 6' -- "$cur" ) )
return 0
;;
--dbms)
COMPREPLY=( $( compgen -W 'mysql mssql access postgres' -- "$cur" ) )
return 0
;;
--level|--crawl)
COMPREPLY=( $( compgen -W '1 2 3 4 5' -- "$cur" ) )
return 0
;;
--risk)
COMPREPLY=( $( compgen -W '0 1 2 3' -- "$cur" ) )
return 0
;;
--technique)
COMPREPLY=( $( compgen -W 'B E U S T Q' -- "$cur" ) )
return 0
;;
-s)
_filedir sqlite
return 0
;;
--dump-format)
COMPREPLY=( $( compgen -W 'CSV HTML SQLITE' -- "$cur" ) )
return 0
;;
-x)
_filedir xml
return 0
;;
esac
if [[ "$cur" == * ]]; then
COMPREPLY=( $( compgen -W '-h --help -hh --version -v -d -u --url -l -x -m -r -g -c --method \
--data --param-del --cookie --cookie-del --load-cookies \
--drop-set-cookie --user-agent --random-agent --host --referer \
--headers --auth-type --auth-cred --auth-private --ignore-401 \
--proxy --proxy-cred --proxy-file --ignore-proxy --tor --tor-port \
--tor-type --check-tor --delay --timeout --retries --randomize \
--safe-url --safe-freq --skip-urlencode --csrf-token --csrf-url \
--force-ssl --hpp --eval -o --predict-output --keep-alive \
--null-connection --threads -p --skip --dbms --dbms-cred \
--os --invalid-bignum --invalid-logical --invalid-string \
--no-cast --no-escape --prefix --suffix --tamper --level \
--risk --string --not-string --regexp --code --text-only \
--titles --technique --time-sec --union-cols --union-char \
--union-from --dns-domain --second-order -f --fingerprint \
-a --all -b --banner --current-user --current-db --hostname \
--is-dba --users --passwords --privileges --roles --dbs --tables \
--columns --schema --count --dump --dump-all --search --comments \
-D -T -C -X -U --exclude-sysdbs --where --start --stop \
--first --last --sql-query --sql-shell --sql-file --common-tables \
--common-columns --udf-inject --shared-lib --file-read --file-write \
--file-dest --os-cmd --os-shell --os-pwn --os-smbrelay --os-bof \
--priv-esc --msf-path --tmp-path --reg-read --reg-add --reg-del \
--reg-key --reg-value --reg-data --reg-type -s -t --batch \
--charset --crawl --csv-del --dump-format --eta --flush-session \
--forms --fresh-queries --hex --output-dir --parse-errors \
--pivot-column --save --scope --test-filter --update \
-z --alert --answers --beep --check-waf --cleanup \
--dependencies --disable-coloring --gpage --identify-waf \
--mobile --page-rank --purge-output --smart \
--sqlmap-shell --wizard' -- "$cur" ) )
# this removes any options from the list of completions that have
# already been specified somewhere on the command line, as long as
# these options can only be used once (in a word, "options", in
# opposition to "tests" and "actions", as in the find(1) manpage).
onlyonce=' -h --help -hh --version -v -d -u --url -l -x -m -r -g -c \
--drop-set-cookie --random-agent \
--ignore-401 \
--ignore-proxy --tor \
--check-tor \
--skip-urlencode \
--force-ssl --hpp -o --predict-output --keep-alive \
--null-connection -p \
--invalid-bignum --invalid-logical --invalid-string \
--no-cast --no-escape \
--text-only \
--titles \
-f --fingerprint \
-a --all -b --banner --current-user --current-db --hostname \
--is-dba --users --passwords --privileges --roles --dbs --tables \
--columns --schema --count --dump --dump-all --search --comments \
-D -T -C -X -U --exclude-sysdbs \
--sql-shell --common-tables \
--common-columns --udf-inject \
--os-shell --os-pwn --os-smbrelay --os-bof \
--priv-esc --reg-read --reg-add --reg-del \
-s -t --batch \
--eta --flush-session \
--forms --fresh-queries --hex --parse-errors \
--save --update \
-z --beep --check-waf --cleanup \
--dependencies --disable-coloring --identify-waf \
--mobile --page-rank --purge-output --smart \
--sqlmap-shell --wizard '
COMPREPLY=( $( \
(while read -d ' ' i; do
[[ -z "$i" || "${onlyonce/ ${i%% *} / }" == "$onlyonce" ]] &&
continue
# flatten array with spaces on either side,
# otherwise we cannot grep on word boundaries of
# first and last word
COMPREPLY=" ${COMPREPLY[@]} "
# remove word from list of completions
COMPREPLY=( ${COMPREPLY/ ${i%% *} / } )
done
printf '%s ' "${COMPREPLY[@]}") <<<"${COMP_WORDS[@]}"
) )
# else
# _filedir bat
fi
}
complete -F _sqlmap sqlmap
fi
| true
|
eee005a44b0f385c3f3b5a2e05805d7b66c5696d
|
Shell
|
shodi/PI-V
|
/ffmpeg_verify.sh
|
UTF-8
| 109
| 2.75
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
if [$(command -v ffmpeg) == ''];then
sudo apt-get update
sudo apt-get install ffmpeg -y
fi
| true
|
3c8ecfa33593809098f215e3841ebd73ff0f6d6e
|
Shell
|
stevenhoneyman/buildstuff
|
/buildstuff.sh
|
UTF-8
| 35,565
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash -e
#
# buildstuff.sh - Compile musl libc, then busybox & some libs
# 2014-10-02 Steven Honeyman <stevenhoneyman at gmail com>
#
##
if [ $UID -eq 0 ]; then
echo "Don't run this as root; are you crazy?!"
exit 64
fi
## destination prefix for musl libc
export _pfx=/musl
## destination for compiles using glibc
export _bin=$HOME/bin
## configs, patches, etc
export _breqs=/opt/reqs
## local already cloned git sources
export _gitdir=$HOME/git
## additional busybox applets
export _bbext=$HOME/bbext
## temp dir for compiling
export _tmp=$(mktemp -d)
## clone depth for stuff you don't have already.
## the lower the number, the more chance of a version generator problem.
## set to 1 if you don't care, or have a crap internet connection
export _gitdepth=100
## comment this line out to use latest (or uncomment to use specific snapshot)
#export _ncurses='ncurses-5.9-20140927.tgz'
unset CC CXX CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
export CC="musl-gcc"
export CXX=/bin/false
export CFLAGS='-s -Os -march=x86-64 -mtune=generic -pipe -fno-strict-aliasing -fomit-frame-pointer -falign-functions=1 -falign-jumps=1 -falign-labels=1 -falign-loops=1 -fno-asynchronous-unwind-tables -fno-unwind-tables -fvisibility=hidden -D_GNU_SOURCE'
export _orig_CFLAGS="${CFLAGS}"
#######################
#if [ -e "$_pfx" ] && [ -z "$NODIRCHECK" ]; then
# echo "$_pfx already exists, delete it and re-run"
# exit 1
#fi
mkdir -p "$_pfx" || exit 1
function msg1() { echo -e "\e[91m==> $@\e[0m"; } # red
function msg2() { echo -e "\e[92m==> $@\e[0m"; } # green
function msg3() { echo -e "\e[93m==> $@\e[0m"; } # yellow
function msg4() { echo -e "\e[94m==> $@\e[0m"; } # blue
function msg5() { echo -e "\e[95m==> $@\e[0m"; } # magenta
function msg6() { echo -e "\e[96m==> $@\e[0m"; } # cyan
function msg7() { echo -e "\e[97m==> $@\e[0m"; } # white
for i in {1..7}; do export -f msg$i ; done
function git_pkg_ver() {
[[ -f "config.h" ]] && cf="config.h"
[[ -f "include/config.h" ]] && cf="include/config.h"
[[ -f "lib/config.h" ]] && cf="lib/config.h"
[[ ! -z "$2" ]] && cf="$2"
if [[ -f "$cf" ]]; then
echo $(awk '/PACKAGE_VERSION/ {gsub(/"/,"",$3); print "'$1' "$3}' $cf)-$(git log -1 --format=%cd.%h --date=short|tr -d -)
else
echo "$1 $(git log -1 --format=%cd.%h --date=short|tr -d -)"
fi
}
function new_pkg_ver() {
sed -i "/^$1/d" "${_pfx}/version"
}
function cc_wget() {
[[ $# -lt 2 ]] && return
wget -nv "$1" -O - | $CC $CFLAGS $LDFLAGS -x c - -s -o "$2"
}
function download_source() {
local url="no"
case $1 in
musl) url="git://git.musl-libc.org/musl" ;;
*-headers) url="git://github.com/sabotage-linux/kernel-headers.git" ;;
busybox) url="git://github.com/stevenhoneyman/busybox.git" ;;
#url="git://git.busybox.net/busybox" ;;
acl) url="git://git.sv.gnu.org/acl.git" ;;
attr) url="git://git.sv.gnu.org/attr.git" ;;
bash) url="git://git.sv.gnu.org/bash.git" ;;
bison) url="git://git.sv.gnu.org/bison.git" ;;
coreutils) url="git://git.sv.gnu.org/coreutils.git" ;;
# cryptsetup) url="git://git.kernel.org/pub/scm/utils/cryptsetup/cryptsetup.git" ;;
curl) url="git://github.com/bagder/curl.git" ;;
cv) url="git://github.com/Xfennec/cv.git" ;;
dash) url="git://git.kernel.org/pub/scm/utils/dash/dash.git" ;;
diffutils) url="git://git.sv.gnu.org/diffutils.git" ;;
dropbear) url="git://github.com/mkj/dropbear.git" ;;
e2fsprogs) url="git://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git" ;;
ethtool) url="git://git.kernel.org/pub/scm/network/ethtool/ethtool.git" ;;
# eudev) url="git://github.com/gentoo/eudev.git" ;;
file) url="git://github.com/file/file.git" ;;
findutils) url="git://git.sv.gnu.org/findutils.git" ;;
flex) url="git://git.code.sf.net/p/flex/flex" ;;
gawk) url="git://git.sv.gnu.org/gawk.git" ;;
gnulib) url="git://git.sv.gnu.org/gnulib.git" ;;
gzip) url="git://git.sv.gnu.org/gzip.git" ;;
# hexedit) url="git://github.com/pixel/hexedit.git" ;;
htop) url="git://github.com/hishamhm/htop.git" ;;
# icoutils) url="git://git.sv.gnu.org/icoutils.git" ;;
iproute2) url="git://git.kernel.org/pub/scm/linux/kernel/git/shemminger/iproute2.git" ;;
iptables) url="git://git.netfilter.org/iptables.git" ;;
iw) url="git://git.kernel.org/pub/scm/linux/kernel/git/jberg/iw.git" ;;
kmod) url="git://git.kernel.org/pub/scm/utils/kernel/kmod/kmod.git" ;;
lbzip2) url="git://github.com/kjn/lbzip2.git" ;;
libnl-tiny) url="git://github.com/sabotage-linux/libnl-tiny.git" ;;
# libpng) url="git://git.code.sf.net/p/libpng/code" ;;
lz4) url="git://github.com/Cyan4973/lz4.git" ;;
make) url="git://git.sv.gnu.org/make.git" ;;
# md5deep) url="git://github.com/jessek/hashdeep.git" ;;
mksh) url="git://github.com/MirBSD/mksh.git" ;;
multitail) url="git://github.com/flok99/multitail.git" ;;
nasm) url="git://repo.or.cz/nasm.git" ;;
nbwmon) url="git://github.com/causes-/nbwmon.git" ;;
ncdu) url="git://g.blicky.net/ncdu.git" ;;
openssl) url="git://git.openssl.org/openssl.git" ;;
patch) url="git://git.sv.gnu.org/patch.git" ;;
patchelf) url="git://github.com/NixOS/patchelf.git" ;;
pigz) url="git://github.com/madler/pigz.git" ;;
pipetoys) url-"git://github.com/AndyA/pipetoys.git" ;;
pixelserv) url="git://github.com/h0tw1r3/pixelserv.git" ;;
pkgconf) url="git://github.com/pkgconf/pkgconf.git" ;;
readline) url="git://git.sv.gnu.org/readline.git" ;;
screen) url="git://git.sv.gnu.org/screen.git" ;;
sed) url="git://git.sv.gnu.org/sed.git" ;;
sstrip) url="git://github.com/BR903/ELFkickers.git" ;;
strace) url="git://git.code.sf.net/p/strace/code" ;;
tar) url="git://git.sv.gnu.org/tar.git" ;;
# tcc) url="git://repo.or.cz/tinycc.git" ;;
util-linux) url="git://git.kernel.org/pub/scm/utils/util-linux/util-linux.git" ;;
wget) url="git://git.sv.gnu.org/wget.git" ;;
wpa_supplicant) url="git://w1.fi/hostap.git" ;;
xz) url="http://git.tukaani.org/xz.git" ;;
yasm) url="git://github.com/yasm/yasm.git" ;;
zlib) url="git://github.com/madler/zlib.git" ;;
## there's always a few awkward ones...
# distcc) svn co http://distcc.googlecode.com/svn/trunk/ "$_tmp/distcc-src" ;;
mdocml) (cd "$_tmp" && CVS_RSH=ssh cvs -d :ext:anoncvs@mdocml.bsd.lv:/cvs co -d mdocml-src mdocml) ;;
minised) svn co http://svn.exactcode.de/minised/trunk/ "$_tmp/minised-src" ;;
nano) svn co svn://svn.savannah.gnu.org/nano/trunk/nano "$_tmp/nano-src" ;;
ncurses) wget -nv ftp://invisible-island.net/ncurses/current/${_ncurses:-ncurses.tar.gz} -O-| tar zxf - -C "$_tmp" && mv "$_tmp"/${1}-* "$_tmp"/${1}-src ;;
netcat) svn co svn://svn.code.sf.net/p/netcat/code/trunk "$_tmp/netcat-src" ;;
pax-utils) (cd "$_tmp" && cvs -d :pserver:anonymous@anoncvs.gentoo.org:/var/cvsroot co -d ${1}-src gentoo-projects/${1}) ;;
pcre) svn co svn://vcs.exim.org/pcre/code/trunk "$_tmp/pcre-src" ;;
popt) (cd "$_tmp" && cvs -d :pserver:anonymous@rpm5.org:/cvs co -d popt-src popt) ;;
tree) wget -nv http://mama.indstate.edu/users/ice/tree/src/tree-1.7.0.tgz -O-|tar zxf - -C "$_tmp" && mv "$_tmp"/${1}-* "$_tmp"/${1}-src ;;
wol) svn co svn://svn.code.sf.net/p/wake-on-lan/code/trunk "$_tmp/wol-src" ;;
## and a few that I can't find a source repo or daily snapshot of...
atop) wget -nv http://www.atoptool.nl/download/$(wget -qO- http://atoptool.nl/downloadatop.php|grep -om1 'atop-[0-9.-]*tar\.gz'|head -n1) -O-|tar zxf - -C "$_tmp" && mv "$_tmp"/${1}-* "$_tmp"/${1}-src ;;
bc) wget -nv ftp://alpha.gnu.org/gnu/bc/bc-1.06.95.tar.bz2 -O-|tar jxf - -C "$_tmp" && mv "$_tmp"/${1}-* "$_tmp"/${1}-src ;;
cpuid) wget -nv http://etallen.com/${1}/$(wget -qO- "http://etallen.com/$1/?C=M;O=D;F=1;P=$1*src*"|grep -om1 "$1.*gz") -O-|tar zxf - -C "$_tmp" && mv "$_tmp"/${1}-* "$_tmp"/${1}-src ;;
dhcpcd) wget -nv http://roy.marples.name/downloads/dhcpcd/$(wget http://roy.marples.name/downloads/dhcpcd/ -qO-|grep -o 'dhcpcd-[0-9.-]*tar\.bz2'|sort -ruV|head -n1) -O-|tar jxf - -C "$_tmp" && mv "$_tmp"/${1}-* "$_tmp"/${1}-src ;;
kwakd) wget -nv https://kwakd.googlecode.com/files/kwakd-0.5.tar.gz -O-|tar zxf - -C "$_tmp" && mv "$_tmp"/${1}-* "$_tmp"/${1}-src ;;
less) wget -nv http://greenwoodsoftware.com/less/$(wget http://greenwoodsoftware.com/less/download.html -qO-|grep -om1 'less-[0-9]*\.tar\.gz') -O-|tar zxf - -C "$_tmp" && mv "$_tmp"/${1}-* "$_tmp"/${1}-src ;;
libedit) wget -nv http://thrysoee.dk/editline/$(wget http://thrysoee.dk/editline/ -qO-|grep -om1 'libedit[0-9.-]*\.tar\.gz'|head -n1) -O-|tar zxf - -C "$_tmp" && mv "$_tmp"/${1}-* "$_tmp"/${1}-src ;;
## and then there's this! wtf? also, requiring unzip, to unzip unzip is stupid.
# unzip) (wget http://antinode.info/ftp/info-zip/$(wget -qO- 'http://antinode.info/ftp/info-zip/?C=M;O=D;P=unzip*.zip'|grep -o 'unzip[0-9a-zA-Z_.-]*\.zip'|head -n1) -O "$_tmp/unzip.zip"
# unzip "$_tmp"/unzip.zip -d "$_tmp" && rm "$_tmp/unzip.zip" && mv "$_tmp"/unzip* "$_tmp"/unzip-src ) & ;;
*) url="no" ;;
esac
[[ "$url" == "no" ]] && : || \
git clone --single-branch --depth=${_gitdepth} $url "${_tmp}/${1}-src" || \
git clone --single-branch $url "${_tmp}/${1}-src"
}
export -f download_source
function get_source() {
local src=$1
if [ -d "$_gitdir/$src" ]; then
msg3 "Updating $_gitdir/$src"; cd "$_gitdir/$src" && git pull
msg6 "Copying $src source"; cp -r "$_gitdir/$src" "$_tmp/${src}-src"
else
msg5 "Downloading $src source..."
download_source "$src"
fi
}
if [[ ! -z "$GNULIB_SRCDIR" ]]; then
echo "Using GNUlib from $GNULIB_SRCDIR"
else
msg5 'Downloading GNUlib. Consider setting $GNULIB_SRCDIR for faster builds'
download_source gnulib
export GNULIB_SRCDIR="${_tmp}/gnulib-src"
fi
if [[ -e "$_pfx/bin/pkg-config" ]]; then
export PKG_CONFIG="$_pfx/bin/pkg-config"
fi
# TODO: if !gcc; then
export STATIC_OPTS="--disable-shared --enable-static"
for inst in $@; do
get_source $inst
case $inst in
musl)
cd "$_tmp/musl-src"
CC=/bin/gcc CFLAGS="-Os -pipe" LDFLAGS="" ./configure --prefix="$_pfx" --disable-shared --disable-debug
make && make install || exit 3
echo "musl $(<VERSION)-$(git log -1 --format=%cd.%h --date=short|tr -d -)" >>"$_pfx/version"
if [[ -x "/usr/bin/ccache" ]]; then
msg2 'ccache found, using that with $CC'
export CC="ccache $_pfx/bin/musl-gcc"
else
export CC="$_pfx/bin/musl-gcc"
fi
get_source musl-kernel-headers
cd "$_tmp/musl-kernel-headers-src"
make ARCH=x86_64 prefix="$_pfx" install
echo "kernel-headers $(git describe --tags|cut -d'-' -f'1,2').$(git log -1 --format=%cd.%h --date=short|tr -d -)" >>"$_pfx/version"
;; ### musl */
busybox)
cd "$_tmp/busybox-src"
#if [ -d "$_bbext" ]; then
# cp -v "$_bbext/nproc/nproc.c" "coreutils/nproc.c"
# cp -v "$_bbext/acpi/acpi.c" "miscutils/acpi.c"
# cp -v "$_bbext/bin2c/bin2c.c" "miscutils/bin2c.c"
# cp -v "$_bbext/uuidgen/uuidgen.c" "miscutils/uuidgen.c"
# cp -v "$_bbext/nologin/nologin.c" "util-linux/nologin.c"
#fi
#patch -p1 -i "$_breqs/busybox-1.22-dmesg-color.patch"
#patch -p1 -i "$_breqs/busybox-1.22-httpd-no-cache.patch"
#patch -p1 -i "$_breqs/busybox-1.22-ifplugd-musl-fix.patch"
#patch -p1 -i "$_breqs/busybox-1.22-fix-od-octal.patch"
#patch -p1 -i "$_breqs/busybox-1.22-fix-vi-eof.patch"
#patch -p1 -i "$_breqs/busybox-1.22-fix-vi-newfile.patch"
#patch -p1 -i "$_breqs/busybox-1.22-fix-syslogd-missing-initializer.patch"
cp -v "$_breqs/busybox.config" "$_tmp/busybox-src/.config"
[ -z "$CONFIG" ] || make gconfig
cp .config "$_pfx"/busybox.config
make CC="$_pfx/bin/musl-gcc" && install -Dm755 busybox "$_pfx"/bin/busybox || exit 3
echo busybox $(sed 's/.git//' .kernelrelease)-$(git log -1 --format='%cd.%h' --date=short|tr -d '-') >>"$_pfx/version"
;; ### busybox */
pkgconf)
cd "$_tmp/pkgconf-src"
./autogen.sh
./configure --prefix=${_pfx} CFLAGS="${CFLAGS/-D_GNU_SOURCE/}"
make && make check && make install && strip -s ${_pfx}/bin/pkgconf || exit 3
ln -s "$_pfx"/bin/pkgconf "$_pfx/bin/pkg-config"
git_pkg_ver "pkgconf" >>"$_pfx/version"
export PKG_CONFIG="$_pfx/bin/pkg-config"
;; ### pkgconf */
ncurses)
cd "$_tmp/ncurses-src"
## Generated by: sh -e ./tinfo/MKfallback.sh /usr/share/terminfo ../misc/terminfo.src /usr/bin/tic linux vt100 xterm xterm-256color >fallback.c
cp ${_breqs}/ncurses-fallback.c ncurses/fallback.c
#
CFLAGS="$CFLAGS -fPIC" ./configure --prefix="$_pfx" --sysconfdir=/etc \
--enable-{widec,symlinks,pc-files} --disable-rpath \
--without-{ada,cxx-binding,debug,develop,manpages,shared,tests} \
--with-{default-terminfo-dir,terminfo-dirs}=/usr/share/terminfo \
--disable-db-install --with-fallbacks="linux vt100 xterm xterm-256color" #--disable-home-terminfo
make && make install || exit 3
cp -vnpP "$_pfx"/include/ncurses*/* "$_pfx/include/"
awk '/NCURSES_VERSION_STRING/ {gsub(/"/,"",$3); print "ncurses "$3}' config.status >>"$_pfx/version"
;; ### ncurses */
zlib)
cd "$_tmp/zlib-src"
CFLAGS="$CFLAGS -fPIC" ./configure --prefix=${_pfx} --static --64
make && make test && make install || exit 3
make -C contrib/minizip CC=musl-gcc CFLAGS="$CFLAGS"
make -C contrib/untgz CC=musl-gcc CFLAGS="$CFLAGS"
for b in minigzip{,64} contrib/minizip/mini{unz,zip} contrib/untgz/untgz; do
strip -s $b && cp -v $b "$_pfx/bin/"
done
echo "zlib $(git describe --tags|tr '-' ' ')" >>"$_pfx/version"
;; ### zlib */
popt)
cd "$_tmp/popt-src"
./autogen.sh
CFLAGS="$CFLAGS -fPIC" ./configure --prefix=${_pfx} --disable-{nls,doxygen,shared}
make && make install-strip
awk '/PACKAGE_VERSION/ {gsub(/"/,"",$3); print "popt "$3}' config.h >>"$_pfx/version"
;; ### popt */
make)
cd "$_tmp/make-src"
sed -i '/^SUBDIRS/s/doc//' Makefile.am
autoreconf -fi
patch -p1 -i ${_breqs}/make4-git_bug23273.patch
./configure --prefix=${_pfx} --sysconfdir=/etc \
--disable-nls --disable-rpath --without-guile
make && strip -s make && cp make "$_pfx/bin/"
git_pkg_ver "make" >>"$_pfx/version"
;; ### make */
htop)
cd "$_tmp/htop-src"
./autogen.sh
./configure --prefix=${_pfx} --sysconfdir=/etc
make && strip -s htop && cp htop "$_pfx/bin/"
git_pkg_ver "htop" >>"$_pfx/version"
;; ### htop */
nano)
cd "$_tmp/nano-src"
./autogen.sh
./configure --prefix=${_pfx} --sysconfdir=/etc --datarootdir=/usr/share \
--disable-{nls,extra,speller,browser,mouse,wrapping} \
--disable-{multibuffer,tabcomp,justify,operatingdir} \
--enable-{color,nanorc,utf8}
make && strip -s src/nano && cp src/nano "$_pfx/bin/"
awk '/PACKAGE_VERSION/ {gsub(/"/,"",$3); print "nano "$3"'$(svnversion)'"}' config.h >>"$_pfx/version"
;; ### nano */
dropbear) ## *** 272kb with zlib, 227kb without ***
cd "$_tmp/dropbear-src"
autoreconf -fi
./configure --prefix=${_pfx} --sysconfdir=/etc --datarootdir=/usr/share --sbindir=/usr/bin \
--disable-{lastlog,utmp,utmpx,wtmp,wtmpx,pututline,pututxline,pam} --disable-zlib
sed -e '/#define INETD_MODE/d' \
-e '/#define DROPBEAR_BLOWFISH/d' \
-e '/#define DROPBEAR_ECDH/d' \
-e '/#define DROPBEAR_ECDSA/d' \
-e '/#define DROPBEAR_MD5_HMAC/d' \
-e '/#define DROPBEAR_TWOFISH/d' \
-e '/#define SFTPSERVER_PATH/d' \
-e '/DEFAULT_KEEPALIVE/s/0/30/' -i options.h
sed -i 's|-dropbear_" DROPBEAR_VERSION|-sshserver_" "2014"|' sysoptions.h
make PROGRAMS="dropbear dropbearkey dbclient" MULTI=1
strip -s dropbearmulti && cp -v dropbearmulti "$_pfx/bin/"
for p in dropbear dropbearkey dbclient ssh; do ln -s dropbearmulti "$_pfx/bin/$p"; done
echo "dropbear $(awk '/define DROPBEAR_VERSION/ {gsub(/"/,"",$3); print $3}' sysoptions.h)-$(git log -1 --format=%cd.%h --date=short|tr -d -)" >>"$_pfx/version"
;; ### dropbear */
xxd)
cc_wget 'https://vim.googlecode.com/hg/src/xxd/xxd.c' "${_pfx}/bin/xxd"
echo "xxd 1.10" >>"$_pfx/version"
;; ### xxd */
strace)
cd "$_tmp/strace-src"
./bootstrap
./configure --prefix=${_pfx}
make && strip -s strace && cp -v strace "$_pfx/bin/"
git_pkg_ver "strace" >>"$_pfx/version"
;; ### strace */
multitail)
cd "$_tmp/multitail-src"
_MT_VER="$(sed 's/VERSION=//' version)-$(git log -1 --format=%cd.%h --date=short|tr -d -)"
sed -i '/ts...mt_started/d; /show_f1 =/d; s/if (show_f1)/if (0)/g' mt.c
${CC} ${CFLAGS} -s *.c -lpanelw -lncursesw -lm -lutil ${LDFLAGS} -o multitail -DUTF8_SUPPORT=yes -DCONFIG_FILE=\"/etc/multitail.conf\" -DVERSION=\"${_MT_VER}\"
install -Dm755 multitail "${_pfx}/bin/multitail"
install -Dm644 multitail.conf "${_pfx}/etc/multitail.conf"
install -Dm644 multitail.1 "${_pfx}/share/man/man1/multitail.1"
echo "multitail ${_MT_VER}" >>"$_pfx/version"
;; ### multitail */
cv)
cd "$_tmp/cv-src"
${CC} ${CFLAGS} -s *.c -lncursesw ${LDFLAGS} -o "${_pfx}"/bin/cv
echo $(awk '/VERSION/ {gsub(/"/,"",$3); print "'cv' "$3}' cv.h)-$(git log -1 --format=%cd.%h --date=short|tr -d -) >>"$_pfx/version"
;; ### cv */
attr)
cd "$_tmp/attr-src"
./autogen.sh
CFLAGS="$CFLAGS -fPIC" ./configure --prefix=${_pfx} --disable-{nls,rpath,shared,debug}
make && make install-binPROGRAMS install-pkgconfDATA install-pkgincludeHEADERS
git_pkg_ver "attr" >>"$_pfx/version"
;; ### attr */
acl) #+# requires: attr
cd "$_tmp/acl-src"
./autogen.sh
CFLAGS="$CFLAGS -fPIC" ./configure --prefix=${_pfx} --disable-{nls,rpath,shared,debug}
make && make install-binPROGRAMS install-pkgconfDATA install-pkgincludeHEADERS install-sysincludeHEADERS
git_pkg_ver "acl" >>"$_pfx/version"
;; ### acl */
# TODO: check include/sys/acl.h, include/attr/xattr.h exist before starting coreutils build
coreutils) #+# requires: acl, attr
cd "$_tmp/coreutils-src"
./bootstrap --skip-po
## Werror breaks compile
sed -i '/as_fn_append CFLAGS.*Werror/d' configure
## visual tweaks
sed -i 's|online help: <%s>\(.n.., PACKAGE_NAME, PACKAGE_\)URL|%s\1VERSION|' src/system.h
sed -i '/redundant message/,/program . . invocation/d' src/system.h
./configure --prefix=${_pfx} --sysconfdir=/etc --disable-{nls,rpath,assert} \
--enable-{acl,xattr} --without-gmp --enable-no-install-program=stdbuf
make && make install-strip
## let's have the multicall binary as well
./configure --prefix=${_pfx} --sysconfdir=/etc --disable-{nls,rpath,assert} \
--enable-{acl,xattr} --without-gmp --enable-no-install-program=stdbuf --enable-single-binary=symlinks
make && strip -s src/coreutils && cp -v src/coreutils "$_pfx/bin/"
git_pkg_ver "coreutils" | cut -f1,2,3 -d. >>"$_pfx/version"
;; ### coreutils */
util-linux)
cd "$_tmp/util-linux-src"
./autogen.sh
## sbin... pfft...
sed -i "/^usrsbin_execdir=/ s|/sbin|/bin|g" configure
## hackish fix for musl libc
# TODO: find an actual fix for logger ntp_gettime
sed -i 's|ntp_gettime(&ntptv) == TIME_OK|0|g' misc-utils/logger.c
## minor tweaks
patch -p1 -i ${_breqs}/util-linux-nicer-fdisk.patch
## 1 line descriptions ##
mv sys-utils/swapoff.8 sw8 && cp sys-utils/swapon.8 sys-utils/swapff.8
for mp in $(find *utils -name *.1 -o -name *.8|sed 's%schedutils/ionice.1%%'); do
sed -i "s#^.*fputs(USAGE_HEADER, \([a-z]*\)#\tfputs(_(\"$(grep -m1 "^$(basename ${mp%%.*})" "$mp"|sed s@\\\"@\'@g)\\\\n\"), \1);\n&#" ${mp%%.*}.c || true
done
mv sw8 sys-utils/swapoff.8
### / ###
./configure --prefix=${_pfx} --without-{python,user,udev,systemd} --disable-{rpath,nls,makeinstall-chown,shared} \
--disable-{bash-completion,use-tty-group,pylibmount,wall,minix,mesg,uuidd,write,cramfs,switch_root} \
--enable-fs-paths-extra=/usr/bin --localstatedir=/run --sbindir=${_pfx}/bin --with-pic
make && \
make install-binPROGRAMS install-sbinPROGRAMS install-usrbin_execPROGRAMS install-usrsbin_execPROGRAMS \
install-nodist_blkidincHEADERS install-nodist_mountincHEADERS install-nodist_smartcolsincHEADERS \
install-uuidincHEADERS install-exec install-pkgconfigDATA
git_pkg_ver "util-linux" >>"$_pfx/version"
;; ### util-linux */
tree)
cd "$_tmp/tree-src"
make prefix=${_pfx} CC=${CC} CFLAGS="${CFLAGS/-D_GNU_SOURCE/} -DLINUX -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64"
echo "tree 1.7.0" >>"$_pfx/version"
;; ### tree */
iptables) ## ?? libnftnl libmnl ??
cd "$_tmp/iptables-src"
./autogen.sh
sed -i '/^#inc.*types/a#include <sys/types.h>' include/linux/netfilter.h
sed -i '/^#inc.*6_tab/a#include <sys/types.h>' iptables/xshared.h
CFLAGS="$CFLAGS -D_GNU_SOURCE -D__GLIBC__=2 \
-DTCPOPT_WINDOW=2 -DTCPOPT_MAXSEG=2 -DTCPOPT_SACK_PERMITTED=4 -DTCPOPT_SACK=5 -DTCPOPT_TIMESTAMP=8" \
./configure --prefix=${_pfx} --sbindir=${_pfx}/bin --sysconfdir=/etc --disable-{shared,ipv6,devel,nftables}
make && make install-strip
git_pkg_ver "iptables" >>"$_pfx/version"
;; ### iptables */
screen)
cd "$_tmp/screen-src/src"
mkdir -p ${_pfx}/extra/screen/terminfo
sed -i "s|tic|tic -o $_pfx/extra/screen|; /chmod/d" Makefile.in
./autogen.sh
./configure --prefix=${_pfx} --disable-pam --enable-{colors256,rxvt_osc,telnet} \
--with-pty-group=5 --with-socket-dir=/run/screens --with-sys-screenrc=/etc/screenrc
make && make install
rm -f config.h # yeah, whatever - fix your PACKAGE_VERSION then, GNU!
awk '/^VERSION/ {print "#define PACKAGE_VERSION "$3}' Makefile >config.h
git_pkg_ver "screen" >>"$_pfx/version"
;; ### screen */
dash)
cd "$_tmp/dash-src"
./autogen.sh
CC=${CC} CFLAGS="$CFLAGS -ffunction-sections -fdata-sections" LDFLAGS="-Wl,--gc-sections" \
./configure --prefix=${_pfx} --sysconfdir=/etc
make && make install-strip
git_pkg_ver "dash" >>"$_pfx/version"
;; ### dash */
mksh)
cd "$_tmp/mksh-src"
CPPFLAGS="-DMKSH_SMALL_BUT_FAST -DMKSH_S_NOVI -DMKSH_NOPWNAM" sh ./Build.sh -r -c lto
strip -s ./mksh && cp -v mksh "${_pfx}/bin/"
install -Dm644 mksh.1 "${_pfx}/share/man/man1/mksh.1"
install -Dm644 dot.mkshrc "${_pfx}/etc/skel/.mkshrc"
git_pkg_ver "mksh" >>"$_pfx/version"
;; ### mksh */
readline)
cd "$_tmp/readline-src"
CFLAGS="$CFLAGS -fPIC" ./configure --prefix=${_pfx} --with-curses --disable-shared
make && make install-headers && cp -v lib*.a "${_pfx}/lib/"
git_pkg_ver "readline" >>"$_pfx/version"
;; ### readline */
bash)
cd "$_tmp/bash-src"
CFLAGS="${CFLAGS/-Os/-O2} -DDEFAULT_PATH_VALUE='\"/bin\"' -DSYS_BASHRC='\"/etc/bash.bashrc\"' -DSTANDARD_UTILS_PATH='\"/bin\"' -L${_pfx}/lib" \
./configure --prefix=${_pfx} --disable-nls --without-bash-malloc \
--enable-static-link --enable-readline --with-installed-readline --with-curses
sed -i 's|\(#define PPROMPT\).*$|\1 "[\\\\u@\\\\h \\\\W]\\\\$ "|' config-top.h
sed -i 's|-lcurses|-lncursesw|' Makefile
make && make install-strip
find "${_pfx}/" -name "bashbug*" -delete
git_pkg_ver "bash" >>"$_pfx/version"
;; ### bash */
sstrip)
cd "$_tmp/sstrip-src"
sed -i '/cp doc/d; s/cp /cp -f /g' Makefile
make install prefix=${_pfx} CC="$CC -s" PROGRAMS="elfls objres rebind sstrip"
git_pkg_ver "sstrip" >>"$_pfx/version"
;; ### sstrip */
mesa-utils)
## GLIBC ONLY (needs X11 etc)
mkdir -p "$_tmp/mesa-utils-src" && cd "$_tmp/mesa-utils-src"
wget -nv http://cgit.freedesktop.org/mesa/demos/plain/src/xdemos/glinfo_common.c
wget -nv http://cgit.freedesktop.org/mesa/demos/plain/src/xdemos/glinfo_common.h
wget -nv http://cgit.freedesktop.org/mesa/demos/plain/src/xdemos/glxgears.c
wget -nv http://cgit.freedesktop.org/mesa/demos/plain/src/xdemos/glxinfo.c
gcc $CFLAGS glxinfo.c glinfo_common.c glinfo_common.h $LDFLAGS -lX11 -lGL -o "$_bin"/glxinfo-git -s
gcc $CFLAGS glxgears.c $LDFLAGS -lX11 -lGL -lm -o "$_bin"/glxgears-git -s
git_pkg_ver "mesa-utils" >>"$_pfx/version"
;; ### mesa-utils */
libnl-tiny)
cd "$_tmp/libnl-tiny-src"
make prefix=${_pfx} CC="$CC" CFLAGS="${CFLAGS/-D_GNU_SOURCE/}" ALL_LIBS=libnl-tiny.a install
git_pkg_ver "libnl-tiny" >>"$_pfx/version"
;; ### libnl-tiny */
iproute2)
cd "$_tmp/iproute2-src"
sed -i '/_GLIBC_/d; s/else/if 0/g' include/libiptc/ipt_kernel_headers.h
sed -i '/^TARGET/s/arpd//' misc/Makefile
sed -i '/example/d; s/doc//g' Makefile
make CFLAGS="$CFLAGS -DHAVE_SETNS -I../include" CC="$CC -s" SHARED_LIBS=n PREFIX=${_pfx} SBINDIR=${_pfx}/bin install
git_pkg_ver "iproute2" >>"$_pfx/version"
;; ### iproute2 */
iw)
cd "$_tmp/iw-src"
make prefix=${_pfx} CC="$CC" CFLAGS="$CFLAGS -DCONFIG_LIBNL20 -DLIBNL1_COMPAT -I${_pfx}/include/libnl-tiny" PKG_CONFIG=${_pfx}/bin/pkg-config NLLIBNAME=libnl-tiny
strip -s iw && cp iw "${_pfx}/bin/"
install -Dm644 iw.8 "${_pfx}/share/man/man8/iw.8"
git_pkg_ver "iw" >>"$_pfx/version"
;; ### iw */
xz)
cd "$_tmp/xz-src"
./autogen.sh 2>/dev/null
./configure --prefix=${_pfx} --with-pic \
--disable-{nls,rpath,symbol-versions,debug,werror,lzmadec,lzmainfo,lzma-links,scripts,doc} ${STATIC_OPTS}
make && make install-strip
git_pkg_ver "xz" >>"$_pfx/version"
;; ### xz */
pcre) #+# requires: readline #+#
cd "$_tmp/pcre-src"
./autogen.sh
./configure --prefix=${_pfx} --disable-{cpp,pcregrep-jit} --with-pic \
--enable-unicode-properties --enable-pcretest-libreadline #--enable-pcre16 --enable-pcre32
make && strip -s pcretest && make install-binPROGRAMS install-includeHEADERS install-nodist_includeHEADERS install-libLTLIBRARIES install-pkgconfigDATA
echo "$(awk '/PACKAGE_VERSION/ {gsub(/"/,"",$3); print "pcre "$3}' $cf)-svn$(svnversion)" >>"$_pfx/version"
;; ### pcre */
less)
cd "$_tmp/less-src"
./configure --prefix=${_pfx} --with-regex=regcomp-local
make && make install-strip
echo "less $(sed -n 's|char version.*"\([0-9]*\)".*$|\1|p' version.c)" >>"$_pfx/version"
;; ### less */
nasm)
cd "$_tmp/nasm-src"
./autogen.sh
./configure --prefix=${_pfx}
make nasm ndisasm && make strip && make install
git_pkg_ver "nasm" >>"$_pfx/version"
;; ### nasm */
yasm) #~# makedeps: python #~# # daily snapshots don't need python...
cd "$_tmp/yasm-src"
./autogen.sh
./configure --prefix=${_pfx} --disable-{nls,rpath,debug,maintainer-mode} #,python,python-bindings}
make && strip -s ./*asm
make install-binPROGRAMS install-man
git_pkg_ver "yasm" >>"$_pfx/version"
;; ### yasm */
openssl)
cd "$_tmp/openssl-src"
sed -i 's/-DTERMIO/&S/g' Configure
sed -i 's/defined(linux)/0/' crypto/ui/ui_openssl.c
sed -i '/LD_LIBRARY_PATH/d' Makefile.shared
sed -i '/pod2man/s/sh -c/true &/g; /PREFIX.*MANDIR.*SUFFIX/d' Makefile.org
./config --prefix=${_pfx} --openssldir=/etc/ssl -L${_pfx}/lib -I${_pfx}/include no-dso no-krb5 zlib ${CFLAGS} #no-shared
make depend
make build_libs
make build_apps openssl.pc libssl.pc libcrypto.pc
make INSTALL_PREFIX=$PWD/OUT install_sw
## OUT/etc/* ignored
cp -rv OUT/${_pfx}/* ${_pfx}/
mv "${_pfx}"/bin/c_rehash "${_pfx}"/bin/c_rehash.pl
wget -nv "http://git.pld-linux.org/?p=packages/openssl.git;a=blob_plain;f=openssl-c_rehash.sh" -O "${_pfx}"/bin/c_rehash
echo $(awk '/VERSION_NUMBER/ {gsub(/"/,"",$3); print "openssl "$3}' Makefile)-$(git log -1 --format=%cd.%h --date=short|tr -d -) >>"$_pfx/version"
;; ### openssl */
wpa_supplicant) #+# requires: openssl, zlib
cd "$_tmp/wpa_supplicant-src/wpa_supplicant"
cp defconfig .config
sed -i 's|__uint|uint|g; s|__int|int|g' ../src/drivers/linux_wext.h
sed -i '/wpa_.*s.*LIBS/s/$/& -lz/' Makefile
CFLAGS="$CFLAGS -DCONFIG_LIBNL20=y -I${_pfx}/include/libnl-tiny" CONFIG_LIBNL_TINY=y make
strip -s wpa_{cli,passphrase,supplicant} && make BINDIR=${_pfx}/bin install
install -Dm600 wpa_supplicant.conf "${_pfx}"/etc/wpa_supplicant.conf
git_pkg_ver "wpa_supplicant" >>"$_pfx/version"
;; ### wpa_supplicant */
libedit)
cd "$_tmp/libedit-src"
sed -i 's|-lcurses|-lncursesw|' configure
./configure --prefix=${_pfx} --disable-examples
make && make LN_S=true install-strip
echo $(awk '/PACKAGE_VERSION/ {gsub(/"/,"",$3); print "libedit "$3}' config.h)-$(grep "GE.=" Makefile|cut -d- -f2) >>"$_pfx/version"
;; ### libedit */
flex)
cd "$_tmp/flex-src"
./autogen.sh
sed -i '/doc /d; /tests /d' Makefile.am
./configure --prefix=${_pfx} CXX=/bin/false CXXCPP=/bin/cpp
make -C src flex && strip -s src/flex
make -C src install-binPROGRAMS install-includeHEADERS install-libLTLIBRARIES
git_pkg_ver "flex" >>"$_pfx/version"
;; ### flex */
bc) #+# requires: libedit, ncurses
cd "$_tmp/bc-src"
./configure --prefix=${_pfx} CFLAGS="$CFLAGS -DLIBEDIT"
make LIBL="-ledit -lncursesw" && strip -s bc/bc && cp -v bc/bc "$_pfx/bin/"
echo "bc 1.06.95" >>"$_pfx/version"
;; ### bc */
cpuid)
cd "$_tmp/cpuid-src"
make CC="$CC -s" CFLAGS="$CFLAGS"
cp -v cpuid "$_pfx/bin/"
echo "cpuid $(sed -n 's@^VERSION=\([0-9.]*\).*$@\1@p' Makefile)" >>"$_pfx/version"
;; ### cpuid */
diffutils)
cd "$_tmp/diffutils-src"
./bootstrap --skip-po
./configure --prefix=${_pfx} --disable-nls --disable-rpath
make && make install-strip
git_pkg_ver "diffutils" >>"$_pfx/version"
;; ### diffutils */
patch)
cd "$_tmp/patch-src"
./bootstrap --skip-po
./configure --prefix=${_pfx}
sed -i 's|/usr||g' config.h
make && make install-strip
git_pkg_ver "patch" >>"$_pfx/version"
;; ### patch */
pipetoys)
cd "$_tmp/pipetoys-src"
autoreconf -i
./configure --prefix=${_pfx}
make && make install-strip
git_pkg_ver "pipetoys" >>"$_pfx/version"
;; ### pipetoys */
pax-utils) # dumpelf, lddtree #
cd "$_tmp/pax-utils-src"
make CC="$CC" CFLAGS="$CFLAGS" USE_CAP=no USE_PYTHON=no PREFIX=${_pfx} strip install
## open to better suggestions here!
pax_ver=$(wget -qO- 'http://sources.gentoo.org/cgi-bin/viewvc.cgi/gentoo-x86/app-misc/pax-utils'|sed -n 's@.*ils-\([0-9.]*\).eb.*@\1@p'|sort -urV|head -n1)
echo "pax-utils ${pax_ver}-cvs" >>"$_pfx/version"
;; ### pax-utils */
wol)
cd "$_tmp/wol-src"
./autogen.sh
sed -i 's/__GLIBC.*/0/g' lib/getline.h
./configure --prefix=${_pfx} --disable-{nls,rpath}
sed -i '/ETHER/s/0/1/g;/STRUCT_ETHER_ADDR_OCTET/d' config.h
make && make install-strip
awk '/define VER/ {gsub(/"/,"",$3); print "wol "$3"'$(svnversion)'"}' config.h >>"$_pfx/version"
;; ### wol */
atop)
cd "$_tmp/atop-src"
sed -i '/O2/d; s/lncurses/&w/' Makefile
find . -name "show*.c" -exec sed -i 's@termio.h@termios.h@g' '{}' \;
make BINDIR=/bin SBINDIR=/bin CC="$CC -s" CFLAGS="$CFLAGS" DESTDIR="$_pfx" atop
cp -v atop "$_pfx/bin/"
cp -v man/atop.1 "$_pfx/share/man/man1/"
cp -v man/atoprc.5 "$_pfx/share/man/man5/"
awk '/ATOPVER/ {gsub(/"/,"",$3); print "atop "$3}' version.h >>"$_pfx/version"
;; ### atop */
netcat)
cd "$_tmp/wol-src"
autoreconf -i
./configure --prefix=${_pfx} --disable-{nls,rpath,debug}
make && make install-strip
awk '/define VER/ {gsub(/"/,"",$3); print "netcat "$3"'$(svnversion)'"}' config.h >>"$_pfx/version"
;; ### netcat */
ncdu)
cd "$_tmp/ncdu-src"
autoreconf -fi
./configure --prefix=${_pfx}
make && make install-strip
git_pkg_ver "ncdu" >>"$_pfx/version"
;; ### ncdu */
sed)
cd "$_tmp/sed-src"
./bootstrap --skip-po
./configure --prefix=${_pfx} --disable-{nls,rpath,i18n}
make && make install-strip
git_pkg_ver "sed" >>"$_pfx/version"
;; ### sed */
gawk)
cd "$_tmp/gawk-src"
./bootstrap.sh
sed -i 's/lncurses/&w/g' configure
./configure --prefix=${_pfx} --disable-{nls,rpath,extensions}
make && strip -s gawk
cp -v gawk "$_pfx/bin/"
cp -v doc/gawk.1 "$_pfx/share/man/man1/"
git_pkg_ver "gawk" >>"$_pfx/version"
;; ### gawk */
tar)
cd "$_tmp/tar-src"
./bootstrap --skip-po
sed -i 's/-Werror//g' configure
./configure --prefix=${_pfx} --disable-{nls,rpath} --with-rmt=/bin/rmt
make && make install-strip
git_pkg_ver "tar" >>"$_pfx/version"
;; ### tar */
gzip)
cd "$_tmp/gzip-src"
./bootstrap --skip-po
sed -i 's/-Werror//g' configure
./configure --prefix=${_pfx}
make && make install-strip
git_pkg_ver "gzip" >>"$_pfx/version"
;; ### gzip */
pigz)
cd "$_tmp/pigz-src"
make pigz CC="$CC -s" CFLAGS="$CFLAGS"
cp -v pigz "$_pfx/bin/"
cp -v pigz.1 "$_pfx/share/man/man1/"
echo "pigz $(git describe --tags)" >>"$_pfx/version"
;; ### pigz */
kmod)
cd "$_tmp/kmod-src"
autoreconf -fi
./configure --prefix=${_pfx} --disable-{debug,python,maintainer-mode} --enable-{tools,manpages} --with-{pic,xz,zlib}
make && make install-strip
git_pkg_ver "kmod" >>"$_pfx/version"
;; ### kmod */
e2fsprogs)
cd "$_tmp/e2fsprogs-src"
patch -p1 -i ${_breqs}/e2fsprogs-magic_t-fix.patch
./configure --prefix=${_pfx} --sbindir=${_pfx}/bin --enable-symlink-{build,install} --enable-relative-symlinks \
--disable-{nls,rpath,fsck,uuidd,libuuid,libblkid,tls,e2initrd-helper}
make && make install-strip
git_pkg_ver "e2fsprogs" >>"$_pfx/version"
;; ### e2fsprogs */
ethtool)
cd "$_tmp/ethtool-src"
./autogen.sh
sed -i 's/__uint/uint/g; s/__int/int/g' internal.h
./configure --prefix=${_pfx} --sbindir=${_pfx}/bin
make && make install-strip
git_pkg_ver "ethtool" >>"$_pfx/version"
;; ### ethtool */
bison) # *** BROKEN ***
cd "$_tmp/bison-src"
git submodule update --init
./bootstrap --skip-po
./configure --prefix=${_pfx} --disable-{nls,rpath}
make && make install-strip
git_pkg_ver "bison" >>"$_pfx/version"
;; ### bison */
cryptsetup)
;; ### cryptsetup */
file)
cd "$_tmp/file-src"
autoreconf -fi
./configure --prefix=${_pfx} --enable-static --with-pic
make && make install-strip
git_pkg_ver "file" >>"$_pfx/version"
;; ### file */
findutils)
cd "$_tmp/findutils-src"
./import-gnulib.sh
./configure --prefix=${_pfx} --disable-{nls,rpath,debug} PYTHON=false
make
make -C find install-strip
make -C xargs install-strip
git_pkg_ver "findutils" >>"$_pfx/version"
;; ### findutils */
libpng)
;; ### libpng */
icoutils)
;; ### icoutils */
wget) #+# requires: openssl, zlib
cd "$_tmp/wget-src"
./bootstrap --skip-po
./configure --prefix=${_pfx} --sysconfdir=/etc --disable-{nls,rpath,debug,ipv6,ntlm} --with-ssl=openssl
make && strip -s src/wget
cp -v src/wget "$_pfx/bin/"
cp -v doc/wget.1 "$_pfx/share/man/man1/"
cp -v doc/sample.wgetrc "$_pfx/etc/wgetrc"
git_pkg_ver "wget" >>"$_pfx/version"
;; ### wget */
curl)
cd "$_tmp/curl-src"
./buildconf
./configure --prefix=${_pfx} --sysconfdir=/etc --with-pic --enable-threaded-resolver \
--disable-{debug,werror,curldebug,ares,rtsp,dict,telnet,pop3,imap,smtp,gopher,manual,ipv6,ntlm-wb}
sed -i '/INSTALL.*man3dir/d' docs/libcurl/Makefile
sed -i '/INSTALL.*man3dir/d' docs/libcurl/opts/Makefile
make
make install-strip || true # sed hackery might cause this to "fail"
git_pkg_ver "curl" >>"$_pfx/version"
;; ### curl */
md5deep) # grrr, C++
#cd "$_tmp/md5deep-src"
#./bootstrap.sh
#./configure --prefix=${_pfx}
#make && make install-strip
#git_pkg_ver "md5deep" >>"$_pfx/version"
;; ### md5deep */
nbwmon)
cd "$_tmp/nbwmon-src"
make CC="$CC -s" LDLIBS="-lncursesw"
cp -v nbwmon "$_pfx/bin/"
git_pkg_ver "nbwmon" >>"$_pfx/version"
;; ### nbwmon */
pixelserv)
cd "$_tmp/pixelserv-src"
$CC $CFLAGS $LDFLAGS -O2 -DDO_COUNT -DTEXT_REPLY -DREAD_FILE -DREAD_GIF -DNULLSERV_REPLIES -DSSL_RESP -o pixelserv pixelserv.c
git_pkg_ver "pixelserv" >>"$_pfx/version"
;; ### pixelserv */
minised)
cd "$_tmp/minised-src"
$CC $CFLAGS sedcomp.c sedexec.c $LDFLAGS -o minised
cp -v minised "$_pfx/bin/"
cp -v minised.1 "$_pfx/share/man/man1/"
echo "minised $(grep -Eo '[0-9]+\.[0-9]+' README|tail -n1)-r$(svnversion)" >>"$_pfx/version"
;; ### minised */
lz4)
cd "$_tmp/lz4-src"
sed '/SHARED/d' Makefile >Makefile.static
make CC="$CC -s -fPIC" PREFIX="$_pfx" -f Makefile.static install
git_pkg_ver "lz4" >>"$_pfx/version"
;; ### lz4 */
dhcpcd)
cd "$_tmp/dhcpcd-src"
./configure --prefix=${_pfx} --libexecdir=${_pfx}/lib --sbindir=${_pfx}/bin \
--sysconfdir=/etc --dbdir=/var/lib/dhcpcd --rundir=/run \
--without-udev --disable-debug --disable-ipv6
find . -type f -exec sed -i '/#[[:blank:]]*include <sys\/\(queue\|cdefs\).h>/d' '{}' \;
make && make install
awk '/define VER/ {gsub(/"/,"",$3); print "dhcpcd "$3}' defs.h >>"$_pfx/version"
;; ### dhcpcd */
kwakd)
cd "$_tmp/kwakd-src"
./configure --prefix=${_pfx} --sysconfdir=/etc
make && make install-strip
echo "kwakd $(sed -n 's/^VERSION=\(.*\)$/\1/p' configure)" >>"$_pfx/version"
;; ### kwakd */
mdocml)
cd "$_tmp/mdocml-src"
echo "OSNAME=Linux" >configure.local
echo "BUILD_DB=0" >>configure.local
echo "PREFIX=${_pfx}" >>configure.local
./configure
make && make install
awk '/define VER/ {gsub(/"/,"",$3); print "mdocml "$3"-cvs"}' config.h >>"$_pfx/version"
;; ### mdocml */
cryptsetup)
;; ### cryptsetup */
hexedit)
;; ### hexedit */
tcc)
;; ### tcc */
*) ;;
esac
done
exit
# remove libtool junk
find "$_pfx" -type f -name *.la -delete
# compress man pages
find "$_pfx/share/man" -type f -exec gzip -9 '{}' \;
# trash the downloaded source
rm -rf "$_tmp"
| true
|
6628801e2279825bd9f568c9f4599eb1ccf277eb
|
Shell
|
freeioe/freeioe
|
/openwrt/init.d/skynet
|
UTF-8
| 889
| 3.125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh /etc/rc.common
# Copyright (C) 2017-2023 kooiot.com
USE_PROCD=1
START=96
STOP=1
BASE_DIR=/usr/ioe
SKYNET_DIR=$BASE_DIR/skynet
FREEIOE_DIR=$SKYNET_DIR/ioe
PM_BIN=process-monitor
SKYNET_BIN=$SKYNET_DIR/skynet
STARTUP_SH=$FREEIOE_DIR/startup.sh
if [ -f /etc/os-release ]
then
source /etc/os-release
VERSION_ID=`echo $VERSION_ID | sed -e 's/\-snapshot//g'`
OS_ARCH=$LEDE_ARCH
if [ $OPENWRT_ARCH ]; then
OS_ARCH=$OPENWRT_ARCH
fi
if [ -f $FREEIOE_DIR/openwrt/$VERSION_ID/$OS_ARCH/process-monitor ]
then
PM_BIN=$FREEIOE_DIR/openwrt/$VERSION_ID/$OS_ARCH/process-monitor
fi
fi
start_service () {
logger -t "FreeIOE" -p user.notice "Starting..."
procd_open_instance
procd_set_param env IOE_RUN_AS_DAEMON=1
procd_set_param command "$PM_BIN" -M 2 -D "$SKYNET_DIR" -S "sh $STARTUP_SH $BASE_DIR" $SKYNET_BIN ioe/config
procd_set_param respawn
procd_close_instance
}
| true
|
dfb28b8c7765862d6d26706190faed913223d688
|
Shell
|
romuald-r/livebox
|
/Livebox 2/sagem/normal/usr/lib/wwan/connect.sh
|
UTF-8
| 753
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/sh
WWAN_PID_FILE="/var/run/ppp-wwan.pid"
logger -t wwan "connect.sh called with ACTION=$ACTION"
if [ ! -e /var/run/wwan_connect.pid ] ; then
echo $$ > /var/run/wwan_connect.pid
case $ACTION in
add)
[ -e $WWAN_PID_FILE ] && kill `head -n 1 $WWAN_PID_FILE`
[ -e /var/lib/nemo/wwan/options.3g ] && pon wwan
pcb_cli "NeMo.Intf.wwan.KeyStatus=Running"
;;
remove)
[ -e $WWAN_PID_FILE ] && kill `head -n 1 $WWAN_PID_FILE`
pcb_cli "NeMo.Intf.wwan.KeyStatus=None"
;;
reload)
if [ "`pcb_cli -l NeMo.Intf.wwan.KeyStatus?`" == "Running" ]
then
[ -e $WWAN_PID_FILE ] && kill `head -n 1 $WWAN_PID_FILE`
[ -e /var/lib/nemo/wwan/options.3g ] && pon wwan
fi
;;
esac
rm -f /var/run/wwan_connect.pid
fi
| true
|
ff0c9b1eb7c034170ff7949906ec6d951a62df1e
|
Shell
|
wNakiami/env_init
|
/bootstrap.sh
|
UTF-8
| 504
| 3.234375
| 3
|
[] |
no_license
|
#! /usr/bin/env bash
#set -x
set -e
ENV=$HOME/.env_init
if [ ! -d $ENV ]
then
git clone https://github.com/wNakiami/env_init.git $ENV
fi
cd $ENV
bash update.sh
bash git.sh
INIT=$HOME/.local/etc/init.sh
if [ -f "$HOME/.bashrc" ]
then
echo "source $INIT" >> $HOME/.bashrc
echo "umask 023" >> $HOME/.bashrc
fi
if [ -f "$HOME/.zshrc" ]
then
echo "source $INIT" >> $HOME/.zshrc
echo "umask 023" >> $HOME/.zshrc
fi
echo "source $HOME/.vim/init.vim" >> $HOME/.vimrc
echo 'init over'
| true
|
3a73bc77e0ab73f1eb11d2d33aabaa7fc9a9d5fb
|
Shell
|
pk-codebox-evo/android-apps-BioWiki
|
/tools/release-checks.sh
|
UTF-8
| 1,424
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/sh
function checkDeviceToTest() {
lines=$(adb devices -l|wc -l)
if [ $lines -le 2 ]; then
echo You need a device connected or an emulator running
exit 2
fi
}
function runConnectedTests() {
echo Tests will be run on following devices:
adb devices -l
echo -----------
./gradlew cIT
}
function pOk() {
echo "[$(tput setaf 2)OK$(tput sgr0)]"
}
function pFail() {
echo "[$(tput setaf 1)KO$(tput sgr0)]"
}
function checkENStrings() {
if [[ -n $(git status --porcelain|grep "M res") ]]; then
/bin/echo -n "Unstagged changes detected in res/ - can't continue..."
pFail
exit 3
fi
# save local changes
git stash | grep "No local changes to save" > /dev/null
needpop=$?
rm -f res/values-??/strings.xml
/bin/echo -n "Check for missing strings (slow)..."
./gradlew build > /dev/null 2>&1 && pOk || (pFail; ./gradlew build)
git checkout -- res/
# restore local changes
if [ $needpop -eq 1 ]; then
git stash pop > /dev/null
fi
}
function checkVersions() {
gradle_version=$(grep -E 'versionName' build.gradle \
| grep -Eo "[0-9.]+")
tag=$(git tag -l|sort|tail -1)
if [[ $gradle_version != $tag ]]; then
/bin/echo -n "build.gradle version and git tag version mismatch..."
pFail
fi
echo "build.gradle version $gradle_version"
echo "last git tag version is $tag"
}
# Check strings
checkENStrings
# Run tests
# checkDeviceToTest
# runConnectedTests
checkVersions
| true
|
d2b2464643b5abd66cfd8588bb3141605e090648
|
Shell
|
phil-nye/HMP_2015
|
/Odroid_Scripts/list_cpus.sh
|
UTF-8
| 2,125
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/bash
# Text Styles
norm="\e[0m" # default bash text settings
bold="\e[38;5;227m\e[1m" # bold and hex (color) code #227
numcores=$(cat /proc/cpuinfo | grep processor) # command to get cpu information
#echo -n "String: ${numcores}"
cores=(${numcores}) # put cpu information into a list
#echo -e "Length: ${#cores[@]}\n"
numcores=$((${#cores[@]} / 3)) # number of cpus is size of list div by 3; removes the unneccessary "processor : " from the list and only accounts for the processor numbers
echo -e "Num Cores: ${bold}${numcores}${normal}\n"
cpupath="/sys/devices/system/cpu/cpu"
cpu="0" # always start with cpu0
while [ ${cpu} -lt ${numcores} ]
do
echo -e "${norm}CPU: ${bold}${cpu}"
echo -en "${norm}CPUINFO_MAX_FREQ (kHz): ${bold}"
cat ${cpupath}${cpu}/cpufreq/cpuinfo_max_freq
echo -en "${norm}CPUINFO_MIN_FREQ (kHz): ${bold}"
cat ${cpupath}${cpu}/cpufreq/cpuinfo_max_freq
echo -en "${norm}CPUINFO_CUR_FREQ (kHz): ${bold}"
cat ${cpupath}${cpu}/cpufreq/cpuinfo_cur_freq
echo -en "${norm}AFFECTED_CPUS: ${bold}"
cat ${cpupath}${cpu}/cpufreq/affected_cpus
echo -en "${norm}RELATED_CPUS: ${bold}"
cat ${cpupath}${cpu}/cpufreq/related_cpus
echo -en "${norm}SCALING_GOVERNOR: ${bold}"
cat ${cpupath}${cpu}/cpufreq/scaling_governor
echo -en "${norm}SCALING_MAX_FREQ (kHz): ${bold}"
cat ${cpupath}${cpu}/cpufreq/scaling_max_freq
echo -en "${norm}SCALING_MIN_FREQ (kHz): ${bold}"
cat ${cpupath}${cpu}/cpufreq/scaling_min_freq
echo -en "${norm}SCALING_SETSPEED: ${bold}"
cat ${cpupath}${cpu}/cpufreq/scaling_setspeed
echo -en "${norm}SCALING_DRIVER: ${bold}"
cat ${cpupath}${cpu}/cpufreq/scaling_driver
echo -en "${norm}SCALING_AVAILABLE_GOVERNORS: ${bold}"
cat ${cpupath}${cpu}/cpufreq/scaling_available_governors
echo -en "${norm}SCALING_AVAILABLE_FREQUENCIES (kHz): ${bold}"
cat ${cpupath}${cpu}/cpufreq/scaling_available_frequencies
echo -en "${norm}CPUINFO_TRANSITION_LATENCY (us): ${bold}"
cat ${cpupath}${cpu}/cpufreq/cpuinfo_transition_latency
cpu=$((${cpu} + 1))
echo
done
| true
|
f5bdac030a76ff055581a8be589b279e9488cdae
|
Shell
|
aldrich/typinggenius
|
/Resources/Textorize/for-blocks/genfont.sh
|
UTF-8
| 546
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/bash
FONT_PARAM="-f\"Avenir Next Regular\""
SIZE_PARAM="-s128"
OTHER_PARAMS="-cwhite -gtransparent -a2"
STRING="ABCDEFGHIJKLMNOPQRSTUVWXYZ"
generate()
{
CHAR=$1
FILE=$2
command="textorize $SIZE_PARAM $OTHER_PARAMS $FONT_PARAM $CHAR -o\"./out/$FILE.png\""
echo 'executing command:' $command
eval $command
}
mkdir -p out
for (( i=0; i<${#STRING}; i++ )); do
CHAR=${STRING:$i:1}
generate $CHAR $CHAR
done
# generate "BS" "backspace"
# generate "CR" "carriage-return"
# generate "," "comma"
# generate "." "period"
| true
|
fde4b4a2f5a83cb6c676f82978fa0177737a6635
|
Shell
|
OnyxAI/onyx-installer
|
/make_host_win.sh
|
UTF-8
| 1,716
| 3.609375
| 4
|
[] |
no_license
|
# Run make win from MSYS
#!/bin/bash
echo Building host installer for Windows via MSYS
QT_VER="4.8.6"
QT_PATH="/c/MinGW/qt/qt-everywhere-opensource-src-4.8.6/bin"
SDK_PATH="/c/Program Files/Microsoft SDKs/Windows/v7.1/Bin"
RAR_PATH="/c/Program Files/WinRAR/"
MINGW_PATH="/c/MinGW/bin"
echo -e "Updating PATH"
PATH="${PATH}:${QT_PATH}:${SDK_PATH}:${RAR_PATH}:${MINGW_PATH}"
TARGET="onyx_installer"
ZLIB_VER="1.2.8"
pushd ${TARGET}
VERSION=$(cat ${TARGET}.pro | grep VERSION | tail -n 1 | awk {'print $3'})
if [ -f Makefile ]; then
echo "Cleaning Qt project"
mingw32-make clean
if [ $? != 0 ]; then echo "Clean failed"; exit 1; fi
fi
pushd w32-lib/zlib-${ZLIB_VER}
make -f win32/Makefile.gcc clean
popd
echo Building zlib version ${ZLIB_VER}
pushd w32-lib/zlib-${ZLIB_VER}
make -f win32/Makefile.gcc
if [ $? != 0 ]; then echo "Building zlib failed" && exit 1; fi
popd
echo Building installer
qmake
mingw32-make
if [ $? != 0 ]; then echo "Building project failed" && exit 1; fi
strip release/${TARGET}.exe
echo Packaging installer
popd
INSTALL="install"
if [ -d ${INSTALL} ]; then echo "Cleaning old install directory " && rm -rf ${INSTALL}; fi
mkdir -p ${INSTALL}
cp ${TARGET}/release/${TARGET}.exe ${INSTALL}/
cp ${TARGET}/*.qm ${INSTALL}/ > /dev/null 2>&1
cp ${TARGET}/winrar.sfx ${INSTALL}
echo Building manifest
mt.exe -manifest onyx_installer/onyx_installer.exe.manifest -outputresource:install/onyx_installer.exe
pushd ${INSTALL}
Rar.exe a -r -sfx -z"winrar.sfx" onyx-installer onyx_installer.exe *.qm >/dev/null 2>&1
popd
mv ${INSTALL}/onyx-installer.exe .
rm -rf ${INSTALL}
umount /qtbin >/dev/null 2>&1
umount /mgwbin >/dev/null 2>&1
# Update on server
echo ${VERSION} > latest_windows
echo Build complete
| true
|
f034961b9580e7cd95c70dbcb9a3d9601876a97d
|
Shell
|
dyanakiev/MinecraftUbuntuFastServer
|
/FastMCServer-java-and-screen.sh
|
UTF-8
| 885
| 3.3125
| 3
|
[] |
no_license
|
#/bin/sh
echo "---------------------------------------"
echo "Добре дошли в инсталатора FastMCServer java8 и screen, инсталатора е за Ubuntu 14.04-16.04!!!"
echo "Инсталатора инсталира java и screen"
echo "Видео урок:"
echo "---------------------------------------"
echo " "
while true; do
read -p "Желаете ли да започнем инсталацията? (y/n)" yn
case $yn in
[Yy]* )
echo " ";
echo "Инсталиране на java 8 и screen...";
echo " ";
sudo add-apt-repository ppa:webupd8team/java;
sudo apt-get update;
sudo apt-get install oracle-java8-installer screen;
break;;
[Nn]* ) exit;;
* ) echo "Отговри с yes или no";;
esac
done
| true
|
87e4c590aa73bac6c2e93c88409e3cf09d1dac05
|
Shell
|
quirkasaurus/xterm-256
|
/pouncy
|
UTF-8
| 6,718
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
#----------------------------------------------------------------------#
# FILE: pouncy #
# AUTHOR: Douglas Roberts -- droberts #
# CREATED: Thu Nov 5 20:25:21 EST 2020 #
# PURPOSE: new printc. print text with named colors or numbers. #
# #
#----------------------------------------------------------------------#
. more_funx ${0}
/bin/rm $FSTATE
# set -xv
# color chart by name
aqua=43
black=0
blue=21
brown=94
cyan=51
gray=145
green=34
hotpink=198
lime=46
magenta=201
orange=214
pink=213
purple=93
red=196
seablue=39
strawberry=205
turquoise=37
white=231
yellow=226
available_colors=( $( grep '^[a-z_][a-z_]*=[0-9][0-9]*$' $0 ) )
status=False
function run_tests
{
# tests:
echo -n "[m"
pouncy red this should be red
pouncy green,yellow this should be yellow text on green background
pouncy -n blue,white white on blue and no new line date should be snug: ; date
pouncy -u white,magenta magenta on white and underlined
pouncy -b yellow yellow-foreground and reverse-highlighted
pouncy -b purple,cyan cyan on purple and reverse-highlighted
pouncy 21,213 pink on blue background using numbers 21,213
pouncy -i 48 bright-green with italics using the number 48
echo
pouncy -e purple,aqua no terminus character
echo " so this line, even without pouncy, should be highlighted "
echo "[m and this one not highlighted. "
echo
pouncy info info - - - white on blue
pouncy info2 info - - - white on purple
pouncy info3 info - - - white on green
pouncy info4 info - - - white on strawberry
pouncy error error - - - yellow on red
pouncy fail fail - - - pink on purple
pouncy pass pass - - - yellow on green
pouncy warn warn - - - blue on orange
pouncy warn2 warn2 - - - blue on pink
pouncy warn3 warn3 - - - blue on hotpink
pouncy warn4 warn4 - - - purple on pink
pouncy warn5 warn5 - - - purple on hotpink
pouncy super-info frogs: yellow on purple, white on blue
pouncy super-info2 frogs: green on yellow, yellow on green
return
}
function is_a_color
{
debug
local color _color
local input=${1:-NO-COLOR}
status=False
for color in ${available_colors[*]}; do
_color=${color%=*}
if [[ $input == $_color ]]; then
status=True
eval idx=\$$_color
return
fi
done
idx=
}
function clean_exit
{
echo
echo usage: $zero '[-nudebi] bg,fg text'
echo usage: $zero '[-nudebi] fg text'
echo where:
echo option -n means suppress-newlines
echo option -u means underline
echo option -d means run the debugging tests
echo option -e means do not end the specified colors
echo
echo option -b means bold
echo option -i means italicize
echo
echo colors can be names or numbers or one of these levels:
for log_level in info info2 info3 info4 fail pass warn warn2 warn3 \
woo-hoo error ; do
echo -n "pouncy $log_level : "
pouncy $log_level $log_level
done
echo -n "pouncy super-info : "
pouncy super-info super-info value
echo -n "pouncy super-info2 : "
pouncy super-info2 '[i]' 'super-info2'
echo
echo current colors by name:
echo
for color in ${available_colors[*]}; do
color_idx=${color##*=}
# black hack
if [[ $color_idx -eq 0 ]]; then
echo -n '[48;5;145;1m'
fi
echo "[38;5;${color_idx}m${color}[m"
done
echo
exit 9
}
function blippo
{
if [[ -n $fg ]]; then
text="${text:+${text} }${arg}"
continue
fi
}
DEBUG=0
case ${#} in
(0) clean_exit ;;
(1)
case ${1} in
(-h|--help) clean_exit ;;
esac
;;
esac
# MAIN
bg=
fg=
no_newline=
italics=
underline=
bold=
text=
TERMINUS='[m'
super_info=
debug
init
for arg in "${@}" ; do
incr
case $arg in
(woo-hoo)
blippo
italics='[3m'
eval bg=$purple
eval fg=$hotpink
;;
(super-info2)
shift
col=$1
shift
val="$*"
echo -n "[38;5;${yellow};48;5;${purple}m ${col%:}: "
echo "[38;5;${white};48;5;${blue};3m $val [m"
exit
;;
(super-info)
shift
val="$*"
echo -n "[38;5;36;48;5;226m [i] "
echo "[38;5;226;48;5;36m $val [m"
exit
;;
(-n) no_newline=-n ;;
(-u) underline='[4m' ;;
(-d) run_tests ; exit ;;
(-e) TERMINUS= ;;
(-b) bold='[1m' ;;
(-i) italics='[3m' ;;
(info) blippo ; eval bg=$blue ; eval fg=$white ;;
(info2) blippo ; eval bg=$purple ; eval fg=$white ;;
(info3) blippo ; eval bg=30 ; eval fg=$white ;;
(info4) blippo ; eval bg=$strawberry ; eval fg=$white ;;
(error) blippo ; eval bg=$red ; eval fg=$yellow ; italics='[3m' ;;
(fail) blippo ; eval bg=$red ; eval fg=$white ; italics='[3m' ;;
(pass) blippo ; eval bg=$green ; eval fg=$yellow ;;
(warn) blippo ; eval bg=$orange ; eval fg=$black ; italics='[3m' ;;
#----------------------------------------------------------------------#
# (warn2) blippo ; eval bg=$pink ; eval fg=$red ; italics='[3m' ;; #
#----------------------------------------------------------------------#
(warn2) blippo ; eval bg=$pink ; eval fg=$blue ; italics='[3m' ; bold='[1m' ;;
(warn3) blippo ; eval bg=$hotpink ; eval fg=$blue ; italics='[3m' ; bold='[1m' ;;
(warn4) blippo ; eval bg=$pink ; eval fg=$purple ; italics='[3m' ; bold='[1m' ;;
(warn5) blippo ; eval bg=$hotpink ; eval fg=$purple ; italics='[3m' ; bold='[1m' ;;
([0-9]*,[0-9]*)
blippo;
bg=${arg%,*}
fg=${arg#*,}
;;
([0-9]*)
blippo;
fg=${arg}
;;
([a-z]*,[a-z]*)
blippo;
_bg=${arg%,*}
_fg=${arg#*,}
is_a_color $_bg
if [[ $status == True ]]; then
bg=$idx
fi
is_a_color $_fg
if [[ $status == True ]]; then
fg=$idx
fi
;;
(*)
blippo;
is_a_color $arg
if [[ $status == True ]]; then
fg=$idx
fi
;;
esac
done
# vecho bg
# vecho fg
if [[ -n $bg ]]; then
colors="[48;5;${bg};38;5;${fg}m"
elif [[ -n $fg ]]; then
colors="[38;5;${fg}m"
else
colors=
fi
echo ${no_newline} "${bold}${underline}${italics}${colors}${text} $TERMINUS"
| true
|
81d87dc545a94ffb9d8dcce424cc255c390eccf4
|
Shell
|
beeender/openmediavault-remotedesktop
|
/usr/share/openmediavault/mkconf/remotedesktop
|
UTF-8
| 1,913
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/sh
#
# @author Volker Theile <volker.theile@openmediavault.org>
# @author OpenMediaVault Plugin Developers <plugins@omv-extras.org>
# @copyright Copyright (c) 2009-2013 Volker Theile
# @copyright Copyright (c) 2015-2016 OpenMediaVault Plugin Developers
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
set -e
. /etc/default/openmediavault
. /usr/share/openmediavault/scripts/helper-functions
XRDP_CONFIG="/etc/xrdp/xrdp.ini"
rdpport=$(omv_config_get "/config/services/remotedesktop/rdpport")
cryptlevel=$(omv_config_get "/config/services/remotedesktop/cryptlevel")
# Create updatedb config file
cat <<EOF > ${XRDP_CONFIG}
# this file was automatically generated
[globals]
bitmap_cache=yes
bitmap_compression=yes
port=${rdpport}
crypt_level=${cryptlevel}
channel_code=1
[xrdp1]
name=sesman-Xvnc
lib=libvnc.so
username=ask
password=ask
ip=127.0.0.1
port=-1
EOF
# backup keyboard shortcuts file and change tab value
cp /etc/xdg/xfce4/xfconf/xfce-perchannel-xml/xfce4-keyboard-shortcuts.xml \
/etc/xdg/xfce4/xfconf/xfce-perchannel-xml/xfce4-keyboard-shortcuts.xml.bak
sed -i -e 's/<property name="\<Super\>Tab" type="string" value="switch_window_key"\/>/<property name="\<Super\>Tab" type="empty"\/>/' \
/etc/xdg/xfce4/xfconf/xfce-perchannel-xml/xfce4-keyboard-shortcuts.xml
# restart xrdp service
deb-systemd-invoke start xrdp.service >/dev/null || true
exit 0
| true
|
ca2b4be04bee481b15ece7fd7be8223060bef984
|
Shell
|
jensp/Arch-Linux-on-i586
|
/extra/fam/fam
|
UTF-8
| 901
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
. /etc/rc.conf
. /etc/rc.d/functions
PID=`pidof -o %PPID /usr/sbin/famd`
case "$1" in
start)
#Check for running portmap, start when not running
ck_daemon rpcbind && /etc/rc.d/rpcbind start
stat_busy "Starting File Alteration Monitor"
if [ ! -f /var/run/daemons/rpcbind ]; then
stat_fail
echo "ERROR: rpcbind is not running"
exit 1
fi
if [ -z "$PID" ]; then
/usr/sbin/famd -T 0 -c /etc/fam/fam.conf
fi
if [ ! -z "$PID" -o $? -gt 0 ]; then
stat_fail
else
add_daemon fam
stat_done
fi
;;
stop)
stat_busy "Stopping File Alteration Monitor"
[ ! -z "$PID" ] && kill $PID &> /dev/null
if [ $? -gt 0 ]; then
stat_fail
else
rm_daemon fam
stat_done
fi
;;
restart)
$0 stop
sleep 1
$0 start
;;
*)
echo "usage: $0 {start|stop|restart}"
esac
| true
|
952c8c43ea43fb435be2f27a81740921ff01a5b5
|
Shell
|
OrpingtonClose/daily
|
/bash/kubernetes_exercises_3.sh
|
UTF-8
| 4,344
| 2.578125
| 3
|
[] |
no_license
|
gcloud auth login
gcloud config set project $(gcloud projects list | awk '/avid/ {print $1}')
gcloud config set compute/zone europe-west1
gcloud container clusters create kubia --num-nodes 5 --machine-type f1-micro
#possible probes:
#tcp
#http
#exec - don't use with Java
cat <<EOF | kubectl create -f -
apiVersion: v1
kind: Pod
metadata:
name: kubia-liveliness
spec:
containers:
- image: luksa/kubia-unhealthy
name: kubia
livenessProbe:
httpGet:
path: /
port: 8080
initialDelaySeconds: 15
EOF
kubectl logs kubia-liveliness --previous
kubectl describe po kubia-liveliness
#Normal Killing 6s (x2 over 1m) kubelet, gke-kubia-default-pool-0f9e039a-jmw5 Killing container with id docker://kubia:Container failed liveness probe.. Container will be killed and recreated.
cat <<EOF | kubectl create -f -
apiVersion: v1
kind: Pod
metadata:
name: kubia-liveness-2
spec:
containers:
- image: luksa/kubia-unhealthy
name: kubia
livenessProbe:
httpGet:
path: /
port: 8080
initialDelaySeconds: 15
EOF
cat <<EOF | kubectl create -f -
apiVersion: v1
kind: ReplicationController
metadata:
name: kubia
spec:
replicas: 3
selector:
app: kubia
template:
metadata:
labels:
app: kubia
spec:
containers:
- name: kubia
image: luksa/kubia
ports:
- containerPort: 8080
EOF
kubectl get po | awk '{print $1}' | head -4 | tail -2 | xargs -n 1 kubectl delete po
gcloud compute ssh gke-kubia-default-pool-a94d1538-q6z5 --zone europe-west1-c
sudo ifconfig eth0 down
#NotReady --> Unknown
gcloud compute instances reset gke-kubia-default-pool-a94d1538-q6z5 --zone europe-west1-c
#ready again
#Controlled By: ReplicationController/kubia
kubectl label pod $(kubectl get po | tail -1 | awk '{print $1}') type=special
kubectl label pod $(kubectl get po | tail -1 | awk '{print $1}') app=foo --overwrite
# a new container is created by the replication controller
kubectl edit rc kubia
#export KUBE_EDITOR="/usr/bin/nano"
kubectl scale rc kubia --replicas 10
#leave pods alone
kubectl delete rc kubia --cascade false
kubectl delete po -l app=kubia
#replicasets add matchLabels
cat <<EOF | kubectl create -f -
apiVersion: apps/v1beta2
kind: ReplicaSet
metadata:
name: kubia
spec:
replicas: 3
selector:
matchLabels:
app: kubia
template:
metadata:
labels:
app: kubia
spec:
containers:
- image: luksa/kubia
name: kubia
EOF
cat <<EOF | kubectl create -f -
apiVersion: apps/v1beta2
kind: ReplicaSet
metadata:
name: kubia
spec:
replicas: 3
selector:
matchExpressions:
- key: app
operator: In
values:
- kubia
template:
metadata:
labels:
app: kubia
spec:
containers:
- image: luksa/kubia
name: kubia
EOF
#In
#NotIn
#Exists
#DoesNotExist
# not tried not tried not tried not tried
# not tried not tried not tried not tried
# not tried not tried not tried not tried
# not tried not tried not tried not tried
cat <<EOF | kubectl create -f -
apiVersion: apps/v1beta2
kind: DaemonSet
metadata:
name: ssd-monitor
spec:
selector:
matchLabels:
app: kubia-ssd
template:
metadata:
labels:
app: kubia-ssd
spec:
nodeSelectors:
disk: ssd
containers:
- image: luksa/ssd-monitor
name: main
EOF
kubectl get ds
kubectl label node $(kubectl get node | tail -1 | awk '{print $1}') disk=ssd
cat <<EOF | kubectl create -f -
apiVersion: batch/v1
kind: Job
metadata:
name: batch-job
spec:
completions: 5
parallelism: 2
template:
metadata:
labels:
app: batch-job
spec:
restartPolicy: OnFailure
containers:
- image: luksa/batch-job
name: main
EOF
kubectl get jobs
kubectl get po -a # --show-all
kubectl scale job batch-job --replicas 3 #increases parallelism
#activateDadlineSeconds
#spec.backOffLimit
cat <<EOF | kubectl create -f -
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: batchjob-fifteen-minutes
spec:
schedule: "0,15,30,45 * * * *"
startingDeadlineSeconds: 15
jobTemplate:
spec:
template:
metadata:
labels:
app: periodic-job
spec:
restartPolicy: OnFailure
containers:
- image: luksa/batch-job
name: main
EOF
| true
|
576459e7607e7190949c687b14c20e052528c28e
|
Shell
|
hebr3/wasm_game_of_life
|
/.devcontainer/postCreateCommand.sh
|
UTF-8
| 448
| 2.75
| 3
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
echo "Installing Cargo Requirements"
cargo install cargo-generate
echo "Installing wasm-pack"
curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh
echo "Installing node-14"
curl -fsSL https://deb.nodesource.com/setup_14.x | sudo -E bash -
sudo apt-get install -y nodejs
echo "Build the wasm-pack"
wasm-pack Build
echo "Install the npm dependencies"
cd www/
npm install
echo "Start the dev server with npm run"
# npm run start
| true
|
d5b85b5e5a75301f52b7ba33884aa147f1f80da6
|
Shell
|
solosTec/cyng
|
/build_src_archive.sh
|
UTF-8
| 851
| 3.25
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
outputPath="outdir"
if [ ! -z ${1} ]; then
outputPath="outdir"
fi
# Tools directory for utility scripts relative to the current directory
export TOOLS_DIR="tools"
echo "[INFO] >>smf<< Creating project source archives for buildsystem integration"
export REV=$(${TOOLS_DIR}/get_branch_rev.sh)
echo "[INFO] Revision: ${REV}"
echo "[INFO] outputPath: ${outputPath}"
rm -rf ${outputPath}
# All these are part of the libCLS_suite
PROJECTSTORE="cyng"
echo "[INFO] Projectstore: ${PROJECTSTORE}"
########################################
# amrd
pkgdir=${outputPath}/cyng
srcdir=$(pwd)
docdir="${srcdir}/docs"
mkdir -p ${pkgdir}
${TOOLS_DIR}/create-src-archive.sh \
'cyng' \
${srcdir} \
${srcdir}/src_filelist.txt \
${pkgdir} \
${REV} \
${PROJECTSTORE}
mv ${pkgdir}/*.bz2 ${pkgdir}/*.md5 .
#### Clean at exit ####
#rm -rf ${outputPath}
| true
|
77b86cca13f0a3010f19f7b0e89c99a8bf355ae7
|
Shell
|
xinwendewen/adb-workshop
|
/adb-functions.sh
|
UTF-8
| 471
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
#
#
current_package() {
adb shell dumpsys activity recents | grep -w "Recent #0" | cut -d '=' -f 2 | cut -d ' ' -f 1
}
pull() {
adb pull "$1"
}
clear_dir() {
adb shell rm -rf "$1/*"
}
remotable_adb() {
set -u
local cmd=$1
set +u
if [[ -n ${HOST} && -n ${PORT} ]];
then
#echo "adb -H ${HOST} -P ${PORT} ${cmd}"
adb -H ${HOST} -P ${PORT} ${cmd}
else
#echo "adb ${cmd}"
adb ${cmd}
fi
}
| true
|
f46e3cbce268f3159c62c46ef10a606961d45e88
|
Shell
|
nulogy/branj
|
/branj
|
UTF-8
| 5,102
| 4.0625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
BRANJ_CONFIG_DIR="${HOME}/.config/branj"
BRANJ_CONFIG_FILEPATH="${BRANJ_CONFIG_DIR}/.jiratoken"
print_usage_doc() {
cat <<USAGE_DOC
Usage: `basename ${0}`[-m] [-s BRANCH_SUFFIX] JIRA_TICKET_NUMBER [BASE_BRANCH]
Automatically generates a branch name given a JIRA ticket number.
Command line options:
-m: rename the current branch instead of creating a new one (does not rename it upstream)
-s BRANCH_SUFFIX: Add a suffix to the end of the created branch name
Note that this script requires jq; run 'brew install jq' to install.
USAGE_DOC
echo "Example: $ `basename ${0}` PM-1337"
}
first_time_setup() {
if [[ ! -d "${BRANJ_CONFIG_DIR}" ]]; then
mkdir "${BRANJ_CONFIG_DIR}"
fi
cat <<FIRST_TIME_SETUP_DOC
================================================================================
Running first time setup.
Visit https://id.atlassian.com/manage/api-tokens to generate an API token:
1) Visit https://id.atlassian.com/manage/api-tokens (you may have to login)
2) Click "Create API Token"
3) For a Label, enter "branj"
4) Copy the API token to your clipboard.
================================================================================
FIRST_TIME_SETUP_DOC
echo -n "Once you have generated a token, enter it here: "
read JIRA_API_TOKEN
echo -n "Please enter the email associated to your JIRA account: "
read JIRA_EMAIL
echo -n "Please enter the URL of your JIRA installation (e.g. https://acme-corp.atlassian.net): "
read JIRA_URL
echo "Writing username and token to ${BRANJ_CONFIG_FILEPATH}"
echo -e "${JIRA_EMAIL}:${JIRA_API_TOKEN}\n${JIRA_URL}" > "${BRANJ_CONFIG_FILEPATH}"
}
generate_branch_name() {
local TICKET_KEY="$(jq -r '.key' <(echo ${1}))"
local TICKET_NAME="$(jq -r '.fields.summary' <(echo ${1}) | tr '[:upper:]' '[:lower:]')"
local BRANCH_NAME="$(echo ${TICKET_KEY}-${TICKET_NAME} | sed -E 's/[^A-Za-z0-9]/-/g;s/-{2,}/-/g;s/-$//g')"
if [[ -z "${BRANCH_SUFFIX}" ]]; then
echo "${BRANCH_NAME}"
else
echo "${BRANCH_NAME}-${BRANCH_SUFFIX}"
fi
}
checkout_branch() {
local JIRA_RESPONSE="${1}"
local BRANCH_NAME=$(generate_branch_name "${JIRA_RESPONSE}")
set +e
git rev-parse --verify "${BRANCH_NAME}" &> /dev/null
local BRANCH_EXISTS=$?
set -e
if [[ "${BRANCH_EXISTS}" -eq 0 ]]; then
git checkout "${BRANCH_NAME}"
[[ ! -z "${BASE_BRANCH}" ]] && echo "Ignored base branch '${BASE_BRANCH}' since '${BRANCH_NAME}' already exists"
elif [[ "${RENAME_BRANCH}" == "true" ]]; then
git branch -m "${BRANCH_NAME}"
else
git checkout -b "${BRANCH_NAME}" ${BASE_BRANCH:+ "${BASE_BRANCH}"}
fi
}
print_unauthorized_message() {
cat <<UNAUTHORIZED_DOC
================================================================================
Got 401 Unauthorized from JIRA.
Please make sure you have entered your JIRA email and token correctly.
Visit https://id.atlassian.com/manage/api-tokens to generate a new API token.
================================================================================
UNAUTHORIZED_DOC
echo "Removing ${BRANJ_CONFIG_FILEPATH}"
rm "${BRANJ_CONFIG_FILEPATH}"
exit 1
}
print_not_found_message() {
echo "Couldn't find ticket ${1}."
exit 1
}
get_branch_name_from_jira() {
local CURL_OUTPUT="$(curl -s -w '\n%{http_code}' -u "${JIRA_AUTH_STRING}" -X GET -H 'Content-Type: application/json' "${JIRA_URL}/rest/api/3/issue/${1}")"
local STATUS_CODE="${CURL_OUTPUT##*$'\n'}"
local JSON_RESPONSE="${CURL_OUTPUT%%$'\n'*}"
case "${STATUS_CODE}" in
200)
checkout_branch "${JSON_RESPONSE}"
;;
401)
print_unauthorized_message
;;
404)
print_not_found_message ${1}
;;
*)
echo "Got HTTP ${STATUS_CODE}. Payload:"
echo "${JSON_RESPONSE}"
exit 1
;;
esac
}
read_from_config() {
local LINE_NUMBER="${1}"
echo "$(sed -n ${LINE_NUMBER}p ${BRANJ_CONFIG_FILEPATH})"
}
parse_args() {
if [[ $# -lt 1 ]]; then
print_usage_doc
exit 1
fi
local NON_OPTION_ARGS=()
# Extract leading non-option arguments, since getopts will immediately
# stop processing at the first non-option argument.
while [[ -n "${1}" ]] && [[ "${1:0:1}" != "-" ]]; do
NON_OPTION_ARGS+=("${1}")
shift
done
JIRA_TICKET_NUMBER="${NON_OPTION_ARGS[0]}"
BASE_BRANCH="${NON_OPTION_ARGS[1]}"
while getopts "ms:" OPTION; do
case $OPTION in
s)
BRANCH_SUFFIX="${OPTARG}"
;;
m)
RENAME_BRANCH="true"
;;
\?)
echo "Invalid option: -${OPTARG}" >&2
exit 1
;;
:)
echo "Option -${OPTARG} requires an argument." >&2
exit 1
;;
esac
done
# Set non-option arguments if they come after option arguments
shift "$((OPTIND-1))"
JIRA_TICKET_NUMBER="${JIRA_TICKET_NUMBER:-${1}}"
BASE_BRANCH="${BASE_BRANCH:-${2}}"
}
main() {
parse_args "$@"
if [[ ! -e "${BRANJ_CONFIG_FILEPATH}" ]]; then
first_time_setup
fi
JIRA_AUTH_STRING=$(read_from_config 1)
JIRA_URL=$(read_from_config 2)
get_branch_name_from_jira "${JIRA_TICKET_NUMBER}"
}
main "$@"
| true
|
c06433b3596ce85e1bec8e57c6e4e9e5892c4bf9
|
Shell
|
pseudoPixels/SciWorCS
|
/app_collaborative_sci_workflow/External_Libraries/NiCad-4.0/scripts/Rename
|
UTF-8
| 3,402
| 3.78125
| 4
|
[
"LicenseRef-scancode-txl-10.5",
"MIT"
] |
permissive
|
#!/bin/bash
# Generic NiCad renaming script
#
# Usage: Rename granularity language pcfile.xml renaming
# where granularity is one of: { functions blocks ... }
# and language is one of: { c java cs py ... }
# and pcfile.xml is an edtracted potential clones file
# and renaming is one of: { blind, consistent }
# Revised 11.8.15
ulimit -s hard
output_destination=$5
# Find our installation
lib="${0%%/scripts/Rename}"
if [ ! -d ${lib} ]
then
echo "*** Error: cannot find NiCad installation ${lib}"
echo ""
exit 99
fi
# check granularity
if [ "$1" != "" ]
then
granularity=$1
shift
else
echo "Usage: Rename granularity language pcfile.xml renaming "
echo " where granularity is one of: { functions blocks ... }"
echo " and language is one of: { c java cs py ... }"
echo " and pcfile.xml is an edtracted potential clones file"
echo " and renaming is one of: { blind, consistent }"
exit 99
fi
# check language
if [ "$1" != "" ]
then
language=$1
shift
else
echo "Usage: Rename granularity language pcfile.xml renaming "
echo " where granularity is one of: { functions blocks ... }"
echo " and language is one of: { c java cs py ... }"
echo " and pcfile.xml is an edtracted potential clones file"
echo " and renaming is one of: { blind, consistent }"
exit 99
fi
# check we have a potential clones file
if [ "$1" != "" ]
then
pcfile=${1%%.xml}
shift
else
pcfile=""
fi
if [ ! -s "${pcfile}.xml" ]
then
echo "Usage: Rename granularity language pcfile.xml renaming "
echo " where granularity is one of: { functions blocks ... }"
echo " and language is one of: { c java cs py ... }"
echo " and pcfile.xml is an edtracted potential clones file"
echo " and renaming is one of: { blind, consistent }"
exit 99
fi
# check renaming
if [ "$1" = "blind" ] || [ "$1" = "consistent" ]
then
renaming=$1
shift
else
echo "Usage: Rename granularity language pcfile.xml renaming "
echo " where granularity is one of: { functions blocks ... }"
echo " and language is one of: { c java cs py ... }"
echo " and pcfile.xml is an edtracted potential clones file"
echo " and renaming is one of: { blind, consistent }"
exit 99
fi
# check we have the renamer we need
if [ ! -s ${lib}/txl/${language}-rename-${renaming}-${granularity}.txl ]
then
echo "*** ERROR: ${renaming} renaming not supported for ${language} ${granularity}"
exit 99
fi
if [ ! -x ${lib}/txl/${language}-rename-${renaming}-${granularity}.x ]
then
echo "*** ERROR: ${lib}/txl/${language}-rename-${renaming}-${granularity}.txl has not been compiled - use 'make'"
exti 100
fi
# Clean up any previous results
/bin/rm -f "${pcfile}-${renaming}.xml"
# Rename potential clones
date
echo "${lib}/tools/streamprocess.x '${lib}/txl/${language}-rename-${renaming}-${granularity}.x stdin' < ${pcfile}.xml > ${pcfile}-${renaming}.xml"
time ${lib}/tools/streamprocess.x "${lib}/txl/${language}-rename-${renaming}-${granularity}.x stdin" < ${pcfile}.xml > ${output_destination}
result=$?
echo ""
date
echo ""
exit $result
| true
|
822ba85f7d3ce9150366850e0494945eb18b00fc
|
Shell
|
korostelevm/ExpressLambda
|
/deploy.sh
|
UTF-8
| 1,684
| 3.3125
| 3
|
[] |
no_license
|
#! /usr/bin/env bash
set -e
version=$(git rev-parse HEAD | cut -c1-8)
version=xxxxxxx
app_name=${PWD##*/}
bucket=$1
echo "
Deploying -
bucket: ${bucket}
version: ${version}
app_name: ${app_name}
"
# echo -n "you down with that? (y/n)? "
# read answer
# if [ "$answer" != "${answer#[Yy]}" ] ;then
# echo Deploying ${app_name}-${version}
# else
# echo Aborted
# exit 0
# fi
set -x
npm run build:microfrontend
aws cloudformation deploy --template-file template-static-infra.yaml --stack-name ${app_name}-static-infra --capabilities CAPABILITY_NAMED_IAM --no-fail-on-empty-changeset
APIDomainName=$(aws cloudformation describe-stack-resources --stack-name ${app_name}-static-infra --logical-resource-id APIDomainName --query 'StackResources[].{name:PhysicalResourceId}' --output text)
sam package --template-file template-api.yaml --s3-bucket ${bucket} --s3-prefix ${app_name}-${version} --output-template-file template-api-built.yaml
sam deploy --template-file template-api-built.yaml --stack-name ${app_name}-${version}-api --capabilities CAPABILITY_NAMED_IAM --no-fail-on-empty-changeset
# launch/update zdt switch stack wand point to api from service
RestApiId=$(aws cloudformation describe-stack-resources --stack-name ${app_name}-${version}-api --logical-resource-id ServiceApi --query 'StackResources[].{name:PhysicalResourceId}' --output text)
aws cloudformation deploy --template-file template-version-pointer.yaml --stack-name ${app_name}-version-pointer --capabilities CAPABILITY_NAMED_IAM --no-fail-on-empty-changeset --parameter-overrides RestApiId=${RestApiId} APIDomainName=${APIDomainName} CreateMapping=true
rm template-api-built.yaml
| true
|
ad908e1372611d60c95d9a91b79c0228a93dfc5f
|
Shell
|
nvm-sh/nvm
|
/test/installation_node/install hook
|
UTF-8
| 1,404
| 3.8125
| 4
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/sh
die () { echo "$@" ; exit 1; }
. ../../nvm.sh
VERSION='v0.11.0'
VERSION_PATH="$(nvm_version_path "${VERSION}")"
succeed() {
nvm_echo "$@"
NVM_INSTALL_THIRD_PARTY_HOOK= nvm install "${VERSION}"
}
fail() {
succeed "$@"
return 11
}
! nvm_is_version_installed "${VERSION}" || nvm uninstall "${VERSION}" || die 'uninstall failed'
# an existing but empty VERSION_PATH directory should not be enough to satisfy nvm_is_version_installed
rm -rf "${VERSION_PATH}"
mkdir -p "${VERSION_PATH}"
nvm_is_version_installed "${VERSION}" && die 'nvm_is_version_installed check not strict enough'
rmdir "${VERSION_PATH}"
OUTPUT="$(NVM_INSTALL_THIRD_PARTY_HOOK=succeed nvm install "${VERSION}")"
USE_OUTPUT="$(nvm use "${VERSION}")"
EXPECTED_OUTPUT="${VERSION} node std binary ${VERSION_PATH}
Downloading and installing node ${VERSION}...
${USE_OUTPUT}"
[ "${OUTPUT}" = "${EXPECTED_OUTPUT}" ] || die "expected >${EXPECTED_OUTPUT}<; got >${OUTPUT}<"
! nvm_is_version_installed "${VERSION}" || nvm uninstall "${VERSION}" || die 'uninstall 2 failed'
OUTPUT="$(NVM_INSTALL_THIRD_PARTY_HOOK=fail nvm install "${VERSION}" || echo 'failed')"
USE_OUTPUT="$(nvm use "${VERSION}")"
EXPECTED_OUTPUT="${VERSION} node std binary ${VERSION_PATH}
Downloading and installing node ${VERSION}...
${USE_OUTPUT}
failed"
[ "${OUTPUT}" = "${EXPECTED_OUTPUT}" ] || die "expected >${EXPECTED_OUTPUT}<; got >${OUTPUT}<"
| true
|
491c0e943ecbdcf1f6005590081cfb056be475b2
|
Shell
|
bryanrossUK/dotfiles
|
/dot_config/bin/executable_hidefiles
|
UTF-8
| 173
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/bash
# Show/hide hidden files in the Finder
function hidefiles()
{
defaults write com.apple.finder AppleShowAllFiles -bool false && killall Finder
}
hidefiles
| true
|
9b06b55d0b45e78a3d1370350daac71c694050b4
|
Shell
|
staswiner/bash_scripts
|
/Logger/logger
|
UTF-8
| 1,011
| 4.09375
| 4
|
[] |
no_license
|
#!/bin/bash
# Logger levels, set global variable LOGGER_LEVEL={0,1,2,3}
# Silent 0
# Info, Error 1
# Warn 2
# Debug 3
logger.base() {
LOGGER_LEVEL=`./getLoggerLevel`
logger.setDefaultLevel $LOGGER_LEVEL
function_logger_level=$1; shift;
if [[ $LOGGER_LEVEL -lt $function_logger_level ]]
then
return 1
fi
color=$1; shift;
log_type=$1; shift;
white_color='\e[97m'
_time=`date +"%T"`
echo -e "$color$_time [$log_type]:$white_color $@"
}
logger.setDefaultLevel() {
LOGGER_LEVEL=$1
if [[ -z $LOGGER_LEVEL ]]
then
./setLoggerLevel debug
fi
}
logger.error() {
red='\e[31m'
function_logger_level=1
logger.base $function_logger_level $red 'Error' $@
}
logger.info() {
green='\e[32m'
function_logger_level=1
logger.base $function_logger_level $green 'Info' $@
}
logger.warn() {
yellow='\e[93m'
function_logger_level=2
logger.base $function_logger_level $yellow 'Warn' $@
}
logger.debug() {
blue='\e[34m'
function_logger_level=3
logger.base $function_logger_level $blue 'Debug' $@
}
| true
|
aaed17aa4be20d2dc1ce39f96048f38938b46ecf
|
Shell
|
1nam/List-of-bash-files
|
/terminal-curser-position.sh
|
UTF-8
| 287
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
# 1nam Testing tput cup terminal curser postion.
clear
tput cup 10 5 # setting postion tput cup x, y
read -p 'Enter Name: ' raw
if [[ $raw -eq $raw ]]
then
clear && echo "Good Evening $raw." && tput cup 0 20 # setting postion tput cup x, y
sleep 2
clear
fi
| true
|
f108090b0dabc471e85bbe992760783a741e24c3
|
Shell
|
paulojeronimo/dotfiles
|
/.scripts/docker/docker-termux
|
UTF-8
| 1,310
| 4.34375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
#
# Base for this script: https://github.com/termux/termux-docker/blob/master/run.sh
set -eou pipefail
docker_log=$(mktemp)
default_container_name=${0##*/}
ok() { echo ok; }
fail() { echo fail!; cat $docker_log; }
f=.${0##*/}
if ! [ -f $f ]
then
container_name=${f#\.}
else
source "$PWD/$f"
fi
echo Container: $container_name
case "${1:-run}" in
run|start)
docker start $container_name &> /dev/null || {
echo Creating container ...
mount_source=$HOME
mount_target=home-$USER
[ "$container_name" = "$default_container_name" ] || {
mount_source=$PWD
mount_target=$container_name
}
docker run \
--mount type=bind,source="$mount_source",target=/mnt/$mount_target \
--detach \
--name $container_name \
--tty \
termux/termux-docker:x86_64
}
echo Acccessing container ...
docker exec --interactive --tty $container_name \
/data/data/com.termux/files/usr/bin/login
;;
stop)
echo -n Stopping container "... "
docker stop $container_name &> $docker_log && ok || fail
;;
rm|remove)
echo -n Removing container "... "
docker rm $container_name &> $docker_log && ok || fail
;;
*)
cat <<'EOF'
Usage:
$0 <<run|start>|stop|<rm|remove>>
EOF
esac
| true
|
02ab8769b897cdbf0abc402eaae9840d8e177de8
|
Shell
|
gsisson/bin
|
/aws-keys-to-creds-file.sh
|
UTF-8
| 1,841
| 3.71875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
pbpaste | grep -i AWS_SESSION_TOKEN >/dev/null
if [ $? != 0 ] ; then
if [[ "$1" =~ [0-9]* ]]; then
echo $1 has digits
:
else
echo "clipboard doesn't appear to contain credentials!"
exit 1
fi
fi
if [ "$AWS_REGION" = "" ]; then
echo "AWS_REGION is not set in the environment!"
exit 1
fi
if [ "$AWS_PROFILE" = "" ]; then
echo "AWS_PROFILE is not set in the environment!"
exit 1
fi
line_no=`grep -n $AWS_PROFILE ~/.aws/credentials | cut -f1 -d:`
let "line_no=line_no+1"
if [ $? != 0 ] ; then
echo "~/.aws/credentials does not have a profile section [$AWS_PROFILE]!"
exit 1
fi
pbpaste | sed \
-e 's|export AWS_ACCESS_KEY_ID="\(.*\)"|aws_access_key_id=\1|' \
-e 's|export AWS_SECRET_ACCESS_KEY="\(.*\)"|aws_secret_access_key=\1|' \
-e 's|export AWS_SESSION_TOKEN="\(.*\)"|aws_session_token=\1|' | pbcopy
vi +0$line_no ~/.aws/credentials
echo + aws s3 ls
aws s3 ls || exit 1
iterm-set-tab-color-red() {
echo -ne "\033]6;1;bg;red;brightness;255\a\033]6;1;bg;blue;brightness;0\a\033]6;1;bg;green;brightness;0\a"
}
iterm-reset-tab-color() {
echo -ne "\033]6;1;bg;*;default\a"
}
iterm-set-tab-name() {
echo -ne "\033]0;$@\007"
}
iterm-reset-tab-color
time=59
if [ -n "$1" ] ; then
time=$1
fi
while : ; do
aws s3 ls > /dev/null || iterm-set-tab-color-red
aws s3 ls > /dev/null || echo "CREDENTIALS EXPIRED!!"
let "time=time-1"
echo "time left: $time min"
iterm-set-tab-name "⏰ ⏰ $time minutes left ⏰ ⏰"
if [ "$time" -lt 15 ]; then
break
fi
sleep 60
done
while : ; do
for i in 1 2 3 4 5; do
for j in 1 2 3 4 5 6; do
iterm-set-tab-color-red
sleep 1
iterm-reset-tab-color
sleep 1
done
done
let "time=time-1"
echo "time left: $time min"
iterm-set-tab-name "⏰ ⏰ $time minutes left ⏰ ⏰"
done
| true
|
a86b74f9946fe4bfae9247a5ed77d24d462e40eb
|
Shell
|
coltonjgerber/bin-ghamton
|
/auto/auto_bader_crontab.sh
|
UTF-8
| 1,631
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
source rerunVASP_functions.sh
if ! [ $# -eq 0 ]; then
# Options must be included separately, e.g. -c -v, and NOT -cv
while :; do
case $1 in
(-v|--v|--ve|--ver|--verb|--verbo|--verbos|--verbose)
;;
(--) # End of all options.
shift
break
;;
(-?*)
printf 'WARN: Unknown option (ignored): %s\n' "$1" >&2
;;
(*) # Default case: No more options, so break out of the loop.
break
esac
shift
done
fi
auto_crontab_ion_element="${2}"
auto_crontab_num_ions="${1}"
find_folder_list
# printf "Checking for folders ... "
if [[ -n "${folder_list}" ]] ; then
# printf "found folders\n"
# printf "Checking if calculations finished in folders ... "
if check_folders_finished ; then
# printf "calculations finished in folders\n"
while IFS="" read -r folder || [ -n "${folder}" ] ; do
cd "${folder}"
chgsum.pl AECCAR0 AECCAR2
bader CHGCAR -ref CHGCAR_sum
species="$(grep "TITEL" POTCAR | sed 's/ TITEL = PAW_PBE /""/')"
if [ "${num_species}" -eq 2 ] ; then
grep -o ""
elif [ "${num_species}" -eq 3 ] ; then
fi
cd ..
done < <(printf '%s\n' "${folder_list}")
fi
else
# printf "no appropriate folders found\n"
find_slurm_and_job
# printf "Checking to see if slurm file exists ... "
if [[ -n "${slurm_file}" ]] ; then
# printf "slurm found\n"
# printf "Checking slurm to see if finished ... "
if [[ $(grep 'reached required accuracy - stopping structural energy minimisation' "${slurm_file}") ]] ; then
# printf "calculation finished\n"
auto_chg.sh "${auto_crontab_num_ions}" "${auto_crontab_ion_element}"
fi
fi
fi
fi
| true
|
eb2fc427196e76711b95d6d77f0a301e3cede4e7
|
Shell
|
qyouurcs/ncut
|
/image_quantization/scolorq/rename_prefix.sh
|
UTF-8
| 132
| 3.296875
| 3
|
[] |
no_license
|
#!/usr/bin/bash
if [ $# -lt 1 ]; then
echo "Usage: $0 <dst_dir>"
exit
fi
for a in `ls $1`
do
mv $1/$a $1/o_${a}
done
| true
|
c826b8a8feb38e5dd489d21eb4e8a30ef5a2e0a2
|
Shell
|
karlbates/dotfiles
|
/bashrc
|
UTF-8
| 10,114
| 3.34375
| 3
|
[] |
no_license
|
# ~/.bashrc: executed by bash(1) for non-login shells.
# see /usr/share/doc/bash/examples/startup-files (in the package bash-doc)
# for examples
# If not running interactively, don't do anything
case $- in
*i*) ;;
*) return;;
esac
# don't put duplicate lines or lines starting with space in the history.
# See bash(1) for more options
HISTCONTROL=ignoreboth
# append to the history file, don't overwrite it
shopt -s histappend
# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
HISTSIZE=20000
HISTFILESIZE=100000
# check the window size after each command and, if necessary,
# update the values of LINES and COLUMNS.
shopt -s checkwinsize
# If set, the pattern "**" used in a pathname expansion context will
# match all files and zero or more directories and subdirectories.
#shopt -s globstar
# make less more friendly for non-text input files, see lesspipe(1)
[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "${debian_chroot:-}" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# set a fancy prompt (non-color, unless we know we "want" color)
case "$TERM" in
xterm-color|*-256color) color_prompt=yes;;
esac
# uncomment for a colored prompt, if the terminal has the capability; turned
# off by default to not distract the user: the focus in a terminal window
# should be on the output of commands, not on the prompt
force_color_prompt=yes
if [ -n "$force_color_prompt" ]; then
if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then
# We have color support; assume it's compliant with Ecma-48
# (ISO/IEC-6429). (Lack of such support is extremely rare, and such
# a case would tend to support setf rather than setaf.)
color_prompt=yes
else
color_prompt=
fi
fi
# set colour codes (fg=foreground, bg=background)
# name standard bright
# fg bg fg bg
# black 30 40 90 100
# red 31 41 91 101
# green 32 42 92 102
# yellow 33 43 93 103
# blue 34 44 94 104
# magenta 35 45 95 105
# cyan 36 46 96 106
# white 37 47 97 107
# eg "\e[1;30;47m" = bold black letters ith white background
# ^^ escape code (\e or \033)
# ^ [ required
# ^ format, optional (0=standard, 1=bold)
# ^ separator
# ^^ text code
# ^ separator
# ^^ background code
# ^ end code
C_RESET="\033[0m"
# standard colours
C_black="\033[30m"
C_red="\033[31m"
C_green="\033[32m"
C_yellow="\033[33m"
C_blue="\033[34m"
C_magenta="\033[35m"
C_cyan="\033[36m"
C_white="\033[37m"
# bold standard colours
C_BLACK="\033[1;30m"
C_RED="\033[1;31m"
C_GREEN="\033[1;32m"
C_YELLOW="\033[1;33m"
C_BLUE="\033[1;34m"
C_MAGENTA="\033[1;35m"
C_CYAN="\033[1;36m"
C_WHITE="\033[1;37m"
# bright colours
C_B_black="\033[90m"
C_B_red="\033[91m"
C_B_green="\033[92m"
C_B_yellow="\033[93m"
C_B_blue="\033[94m"
C_B_magenta="\033[95m"
C_B_cyan="\033[96m"
C_B_white="\033[97m"
# bold bright colours
C_B_BLACK="\033[1;90m"
C_B_RED="\033[1;91m"
C_B_GREEN="\033[1;92m"
C_B_YELLOW="\033[1;93m"
C_B_BLUE="\033[1;94m"
C_B_MAGENTA="\033[1;95m"
C_B_CYAN="\033[1;96m"
C_B_WHITE="\033[1;97m"
# custom colours
C_OCHRE="\033[38;5;95m"
C_WHITE_RED="\033[1;37;41m"
function git_branch {
local git_status="$(timeout 1 git status 2> /dev/null)"
local on_branch="On branch ([^${IFS}]*)"
local on_commit="HEAD detached at ([^${IFS}]*)"
if [[ $git_status =~ $on_branch ]]; then
local branch=${BASH_REMATCH[1]}
echo "($branch)"
elif [[ $git_status =~ $on_commit ]]; then
local commit=${BASH_REMATCH[1]}
echo "($commit)"
fi
}
function git_colour {
local git_status="$(timeout 2 git status 2> /dev/null)"
if [[ ! $git_status =~ "working directory clean" ]]; then
echo -e ${C_RED}red
elif [[ $git_status =~ "Your branch is ahead of" ]]; then
echo -e ${C_YELLOW}yellow
elif [[ $git_status =~ "nothing to commit" ]]; then
echo -e ${C_GREEN}green
else
echo -e ${C_OCHRE}ochre
fi
}
function parse_git_branch {
local branch=$(git_branch)
case "$branch" in
"(master)"|master|"(main)"|main)
echo -en " ${C_red}${branch}"
;;
"")
;;
*)
echo -en " ${C_YELLOW}${branch}"
;;
esac
}
function user_colour {
case $USER in
root)
echo -e $C_WHITE_RED
;;
*)
echo -e $C_MAGENTA
;;
esac
}
if [ "$color_prompt" = yes ]; then
PS1="${debian_chroot:+($debian_chroot)}"
PS1+="\[\$(user_colour)\]\u"
PS1+="\[$C_RESET\]@"
PS1+="\[$C_GREEN\]\h"
PS1+="\[$C_RESET\]:"
PS1+="\[$C_BLUE\]\W"
PS1+="\[$C_RESET\]"
PS1+="\$(parse_git_branch)"
PS1+="\n"
PS1+="\[$C_RESET\]\$ "
else
PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ '
fi
unset color_prompt force_color_prompt
# If this is an xterm set the title to user@host:dir
case "$TERM" in
xterm*|rxvt*)
PS1="\[\e]0;${debian_chroot:+($debian_chroot)}\u@\h: \w\a\]$PS1"
;;
*)
;;
esac
# colored GCC warnings and errors
export GCC_COLORS='error=01;31:warning=01;35:note=01;36:caret=01;32:locus=01:quote=01'
# Add an "alert" alias for long running commands. Use like so:
# sleep 10; alert
alias alert='notify-send --urgency=low -i "$([ $? = 0 ] && echo terminal || echo error)" "$(history|tail -n1|sed -e '\''s/^\s*[0-9]\+\s*//;s/[;&|]\s*alert$//'\'')"'
# Alias definitions.
# You may want to put all your additions into a separate file like
# ~/.bash_aliases, instead of adding them here directly.
# See /usr/share/doc/bash-doc/examples in the bash-doc package.
if [ -f ~/.bash_aliases ]; then
. ~/.bash_aliases
fi
# enable programmable completion features (you don't need to enable
# this, if it's already enabled in /etc/bash.bashrc and /etc/profile
# sources /etc/bash.bashrc).
if ! shopt -oq posix; then
if [ -f /usr/share/bash-completion/bash_completion ]; then
. /usr/share/bash-completion/bash_completion
elif [ -f /etc/bash_completion ]; then
. /etc/bash_completion
fi
fi
# set PATH so it includes user's private bin if it exists
if [ -d "$HOME/bin" ] ; then
PATH="$HOME/bin:$PATH"
fi
# set PATH so it includes user's private bin if it exists
if [ -d "$HOME/.local/bin" ] ; then
PATH="$HOME/.local/bin:$PATH"
fi
# added by perl
PATH="/home/$USER/perl5/bin${PATH:+:${PATH}}"; export PATH;
PERL5LIB="/home/$USER/perl5/lib/perl5${PERL5LIB:+:${PERL5LIB}}"; export PERL5LIB;
PERL_LOCAL_LIB_ROOT="/home/$USER/perl5${PERL_LOCAL_LIB_ROOT:+:${PERL_LOCAL_LIB_ROOT}}"; export PERL_LOCAL_LIB_ROOT;
PERL_MB_OPT="--install_base \"/home/$USER/perl5\""; export PERL_MB_OPT;
PERL_MM_OPT="INSTALL_BASE=/home/$USER/perl5"; export PERL_MM_OPT;
##########
## self added
export EDITOR=vim
export USING_PER_DEV_REPLICATION=1
export PIPENV_VENV_IN_PROJECT=1
if [[ -f $HOME/.env ]]; then
source $HOME/.env
fi
function path_remove {
# Delete path by parts so we can never accidentally remove sub paths
PATH=${PATH//":$1:"/":"} # delete any instances in the middle
PATH=${PATH/#"$1:"/} # delete any instance at the beginning
PATH=${PATH/%":$1"/} # delete any instance in the at the end
}
function _tmux {
tmux new-session -d -s $1
tmux send-keys -t $1 "export PATH=/usr/local/bin:/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/sbin:/var/cfengine/bin:/usr/share/doc/subversion-1.6.11:/opt/mv_pro_5.0/montavista/pro/devkit/arm/v5t_le/bin/:/home/london/$USER/bin" enter
tmux send-keys -t $1 "unset XDG_DATA_DIRS" Enter
tmux send-keys -t $1 "unset X_SCLS" Enter
tmux send-keys -t $1 "unset PKG_CONFIG_PATH" Enter
tmux send-keys -t $1 "unset LD_LIBRARY_PATH" Enter
tmux attach -t $1
}
alias tmuxx=_tmux
function glog {
git log --graph --all --format=format:"%x09%C(yellow)%h%C(reset) %C(green)%ai%x08%x08%x08%x08%x08%x08%C(reset) %C(bold white)%cn%C(reset)%C(auto)%d%C(reset)%n%x09%C(white)%s%C(reset)" --abbrev-commit "$@"
echo
}
function docker_login {
docker login registry.gitlab.com -u ${USER}@mintel.com -p ${GITLAB_REGISTRY_TOKEN}
}
if [ -d /opt/rh/rh-python36 ]; then
source /opt/rh/rh-python36/enable
fi
if [ -d /opt/rh/httpd24 ]; then
source /opt/rh/httpd24/enable
fi
if [ -f $HOME/.local/autocomplete/make_sh_autocomplete ]; then
source $HOME/.local/autocomplete/make_sh_autocomplete
fi
# CUDA et al
if [ -d /usr/local/cuda ]; then
PATH=/usr/local/cuda/bin${PATH:+:${PATH}}
LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64"
export CUDA_HOME=/usr/local/cuda
fi
if [ -f $HOME/.poetry/env ]; then
source $HOME/.poetry/env
fi
# jfrog
if [[ -f $HOME/.jfrog-credentials ]]; then
now=$(date +%s)
enddate=$(jq -r .expiry_date $HOME/.jfrog-credentials)
jfrog_user=$(jq -r .username $HOME/.jfrog-credentials)
end=$(date -d ${enddate} +%s)
if [[ $now -gt $end ]]; then
rm $HOME/.jfrog-credentials
rm $HOME/.jfrog-env
fi
fi
if [[ -n $PORTUNUS_USER ]]; then
jfrog_user=$PORTUNUS_USER
fi
if [[ -f $HOME/.jfrog-env ]]; then
source $HOME/.jfrog-env
elif [[ `which portunus` ]]; then
if [[ -n "$jfrog_user" ]]; then
echo "portunus (user: $jfrog_user)"
portunus -u $jfrog_user
else
echo portunus
portunus
fi
source $HOME/.jfrog-env
fi
_=`which poetry`
if [[ $? -eq 0 ]]; then
if [[ -n $JFROG_USERNAME ]] && [[ -n $JFROG_ACCESS_TOKEN ]]; then
poetry config http-basic.jfrog ${JFROG_USERNAME} ${JFROG_ACCESS_TOKEN}
fi
poetry config virtualenvs.in-project true
fi
if [[ -d "./bin" ]]; then
export PATH=`pwd`/bin:$PATH
fi
# thefuck
_=`which thefuck`
if [[ $? -eq 0 ]]; then
eval "$(thefuck --alias)"
fi
# asdf
if [[ -f $HOME/.asdf/asdf.sh ]]; then
# . $HOME/.asdf/asdf.sh
:
fi
if [[ -f $HOME/.asdf/completions/asdf.bash ]]; then
. $HOME/.asdf/completions/asdf.bash
fi
| true
|
1dff211b5e7a719bc6ec998684df1574b493e316
|
Shell
|
gmaclennan/dotfiles
|
/functions.zsh
|
UTF-8
| 3,620
| 3.671875
| 4
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
# Force SD card to be re-mounted
function mountsd() {
sudo kextunload -b com.apple.driver.AppleSDXC;
sudo kextload -b com.apple.driver.AppleSDXC;
}
# Open Android avd in emulator
function avd() {
cd ~/Library/Android/sdk/emulator
emulator -avd $(emulator -list-avds | head -n 1)
}
# weather forecast
weather() { curl -4 wttr.in/${1:-london} }
# Simple calculator
function calc() {
local result="";
result="$(printf "scale=10;$*\n" | bc --mathlib | tr -d '\\\n')";
# └─ default (when `--mathlib` is used) is 20
#
if [[ "$result" == *.* ]]; then
# improve the output for decimal numbers
printf "$result" |
sed -e 's/^\./0./' `# add "0" for cases like ".5"` \
-e 's/^-\./-0./' `# add "0" for cases like "-.5"`\
-e 's/0*$//;s/\.$//'; # remove trailing zeros
else
printf "$result";
fi;
printf "\n";
}
# Create a new directory and enter it
function mkd() {
mkdir -p "$@" && cd "$@";
}
# Determine size of a file or total size of a directory
function fs() {
if du -b /dev/null > /dev/null 2>&1; then
local arg=-sbh;
else
local arg=-sh;
fi
if [[ -n "$@" ]]; then
du $arg -- "$@";
else
du $arg .[^.]* *;
fi;
}
# Use Git’s colored diff when available
hash git &>/dev/null;
if [ $? -eq 0 ]; then
function diff() {
git diff --no-index --color-words "$@";
}
fi;
# Create a data URL from a file
function dataurl() {
local mimeType=$(file -b --mime-type "$1");
if [[ $mimeType == text/* ]]; then
mimeType="${mimeType};charset=utf-8";
fi
echo "data:${mimeType};base64,$(openssl base64 -in "$1" | tr -d '\n')";
}
# Compare original and gzipped file size
function gz() {
local origsize=$(wc -c < "$1");
local gzipsize=$(gzip -c "$1" | wc -c);
local ratio=$(echo "$gzipsize * 100 / $origsize" | bc -l);
printf "orig: %d bytes\n" "$origsize";
printf "gzip: %d bytes (%2.2f%%)\n" "$gzipsize" "$ratio";
}
# Run `dig` and display the most useful info
function digga() {
dig +nocmd "$1" any +multiline +noall +answer;
}
# Decode \x{ABCD}-style Unicode escape sequences
function unidecode() {
perl -e "binmode(STDOUT, ':utf8'); print \"$@\"";
# print a newline unless we’re piping the output to another program
if [ -t 1 ]; then
echo ""; # newline
fi;
}
# `tre` is a shorthand for `tree` with hidden files and color enabled, ignoring
# the `.git` directory, listing directories first. The output gets piped into
# `less` with options to preserve color and line numbers, unless the output is
# small enough for one screen.
function tre() {
tree -aC -I '.git|node_modules|bower_components' --dirsfirst "$@" | less -FRNX;
}
# From Dan Ryan's blog - http://danryan.co/using-antigen-for-zsh.html
man() {
env \
LESS_TERMCAP_mb=$(printf "\e[1;31m") \
LESS_TERMCAP_md=$(printf "\e[1;31m") \
LESS_TERMCAP_me=$(printf "\e[0m") \
LESS_TERMCAP_se=$(printf "\e[0m") \
LESS_TERMCAP_so=$(printf "\e[1;44;33m") \
LESS_TERMCAP_ue=$(printf "\e[0m") \
LESS_TERMCAP_us=$(printf "\e[1;32m") \
man "$@"
}
# Start an HTTP server from a directory, optionally specifying the port
serve() {
local port="${1:-8000}";
sleep 1 && open "http://localhost:${port}/" &
# Set the default Content-Type to `text/plain` instead of `application/octet-stream`
# And serve everything as UTF-8 (although not technically correct, this doesn’t break anything for binary files)
python -c $'import SimpleHTTPServer;\nmap = SimpleHTTPServer.SimpleHTTPRequestHandler.extensions_map;\nmap[""] = "text/plain";\nfor key, value in map.items():\n\tmap[key] = value + ";charset=UTF-8";\nSimpleHTTPServer.test();' "$port";
}
| true
|
4196de2ee4693ed318489b3550eadc0d2767b34f
|
Shell
|
excaliburQc/L4T-Megascript
|
/scripts/games_and_emulators/SRB2Kart.sh
|
UTF-8
| 1,934
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
cd ~
clear -x
echo "SRB2 Kart script started!"
echo "Downloading the files, and installing needed dependencies..."
sleep 2
sudo rm -r /usr/share/SRB2Kart
cd ~/.srb2kart
#rm kartconfig.cfg
cd /usr/share/applications
sudo rm "SRB2 Kart.desktop"
cd
sudo apt install wget curl libsdl2-dev libsdl2-mixer-dev cmake extra-cmake-modules subversion p7zip-full -y
wget https://github.com/STJr/Kart-Public/archive/master.zip
unzip master.zip
svn export https://github.com/$repository_username/L4T-Megascript/trunk/assets/SRB2Kart
mkdir -p SRB2Kart-Data && cd SRB2Kart-Data
wget $(curl --silent "https://api.github.com/repos/STJr/Kart-Public/releases/latest" | grep "Installer" | grep ".exe" | cut -c 31- | cut -d '"' -f 2) -O SRB2Kart.exe
7z x SRB2Kart.exe
cd ~/Kart-Public-master/assets
mkdir -p installer
cd ~/SRB2Kart-Data
mv chars.kart bonuschars.kart gfx.kart maps.kart patch.kart music.kart sounds.kart srb2.srb textures.kart -t ~/Kart-Public-master/assets/installer
cd ~/Kart-Public-master
mkdir -p build && cd build
echo
echo "Compiling the game..."
sleep 1
echo
cmake ..
make -j$(nproc)
echo
echo "Game compiled!"
sleep 1
cd ~
echo "Erasing temporary build files to save space, installing the direct access and configuration files....."
sleep 5
echo
mkdir -p .srb2kart
rm -r SRB2Kart-Data
cd SRB2Kart
chmod 777 SRB2Kart.sh
mv kartconfig.cfg -t ~/.srb2kart
sudo mv "SRB2 Kart.desktop" -t /usr/share/applications
cd ~/Kart-Public-master
mv assets -t ~/SRB2Kart
cd ~/Kart-Public-master/build
mv bin -t ~/SRB2Kart
cd ~
rm master.zip*
rm -r Kart-Public-master
sudo mv SRB2Kart -t /usr/share
echo
echo "Game Installed!"
echo
echo
echo "[NOTE] Remember NOT to move the SRB2Kart folder or any file inside it or the game will stop working."
echo "If the game icon doesn't appear inmediately, restart the system."
echo "This message will close in 10 seconds."
sleep 10
echo
echo "Sending you back to the main menu..."
sleep 1
| true
|
ff714c6994ec481243c33e42fc8b39af11f86d4b
|
Shell
|
Niranjan-Ananth/Operating-Systems-Lab
|
/UNIX/prog1.sh
|
UTF-8
| 144
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/sh
if [ $# -gt 0 ]
then
while [ $# -gt 0 ]
do
rev="$1 $rev"
shift
done
echo "$rev"
else
echo "Invalid number of arguments"
fi
| true
|
b0a2e541e40125f9a9f85c8a921594b7d7bf6f34
|
Shell
|
bdonne/ACIT3900_ISSP
|
/Kiosk_config/kiosk_setup_1.sh
|
UTF-8
| 2,330
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
# Config current Ubuntu into a Kiosk System
# Author: Jun.C
# download all necessary files from git
sudo wget https://raw.githubusercontent.com/bdonne/ACIT3900_ISSP/master/Kiosk_config/custom.conf
sudo wget https://raw.githubusercontent.com/bdonne/ACIT3900_ISSP/master/Kiosk_config/firefoxKiosk.sh
sudo wget https://raw.githubusercontent.com/bdonne/ACIT3900_ISSP/master/Kiosk_config/firefoxKiosk.sh.desktop
sudo wget https://raw.githubusercontent.com/bdonne/ACIT3900_ISSP/master/Kiosk_config/vagrant
# Auto Login
sudo echo "Setting up auto login ..."
sudo chown root:root ./custom.conf
sudo cp ./custom.conf /etc/gdm3/
# Setup xsession for kiosk system under vagrant a/c
sudo echo "Setting up xsession ..."
sudo chown root:root ./firefoxKiosk.sh
sudo chmod 766 ./firefoxKiosk.sh
sudo cp ./firefoxKiosk.sh /usr/share/xsessions/
sudo chown root:root ./firefoxKiosk.sh.desktop
sudo cp ./firefoxKiosk.sh.desktop /usr/share/xsessions/
sudo chown root:root ./vagrant
sudo chmod 600 ./vagrant
sudo cp ./vagrant /var/lib/AccountsService/users/
# Disable all shortcuts
#sudo echo "Disable all keyboard short-cuts..."
#sudo -Hu vagrant dbus-launch gsettings list-keys org.gnome.settings-daemon.plugins.media-keys | xargs -I@ sudo -Hu vagrant dbus-launch gsettings set org.gnome.settings-daemon.plugins.media-keys @ ['']
#sudo -Hu vagrant dbus-launch gsettings list-keys org.gnome.desktop.wm.keybindings | xargs -I@ sudo -Hu vagrant dbus-launch gsettings set org.gnome.desktop.wm.keybindings @ ['']
#sudo -Hu vagrant dbus-launch gsettings list-keys org.gnome.shell.keybindings | xargs -I@ sudo -Hu vagrant dbus-launch gsettings set org.gnome.shell.keybindings @ ['']
#sudo -Hu vagrant dbus-launch gsettings set org.gnome.desktop.wm.keybindings close "['<Alt>F4']"
# Add a new user 'Admin'
sudo echo "Added 'Admin' and added it into sudo group"
sudo groupadd Admin
sudo useradd Admin -s /bin/bash -m -g Admin -G sudo
sudo usermod --password $(openssl passwd -1 Password) Admin
# Rename vagrant to student
sudo echo "rename vagrant to student"
sudo usermod -c 'student' vagrant
# Remove current vagrant a/c from sudo and admin group
sudo echo "removed vagrant from adm group."
sudo deluser vagrant adm
sudo echo "removed vagrant from sudo group."
sudo deluser vagrant sudo
# reboot
echo "reboot..."
#sleep 5s
reboot
| true
|
8dfb06188de6de0523bd69d442b02f5b2e98626a
|
Shell
|
billy-mosse/jconsume
|
/java-project/bin/tools/instrumentationJPF.sh
|
UTF-8
| 604
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
# Descripcion:
# ------------
#
# Este script permite instrumentar el codigo para luego ser ejecutado y obtener metricas sobre el consumo asumiendo un esquema por scopes.
# PARAMETROS:
#
# 1: Nombre de la clase que queremos procesar. Ejemplo: ar.uba.dc.simple.EjemploSimple
# 2: Configuracion a utilizar para el proceso (en archivo *.properties).
# 3: Indica si correr previa a la instrumentacion el escape analisis o no (con true se corre)
CURRNET_DIR=$(dirname $0)
$CURRNET_DIR/../run.sh "ar.uba.dc.tools.instrumentation.resource.tracker.madeja.InstrumentationJPFTool" "$1" "$2" "$3"
| true
|
622e5563955d9b09e009d913e16bb23bd77302d3
|
Shell
|
kpensec/zsh-config
|
/.zshrc
|
UTF-8
| 2,506
| 3.234375
| 3
|
[] |
no_license
|
# Additional user paths
user_paths=(
"${HOME}/.local/usr/bin"
"${HOME}/.local/bin"
"${HOME}/.cargo/bin"
"${HOME}/.yarn/bin"
)
# Path to your oh-my-zsh installation.
export ZSH="$HOME/.oh-my-zsh"
# Node Version Manager base directory.
export NVM_DIR="$HOME/.nvm"
# Additional sourced file
sourced_file=(
"${ZSH}/oh-my-zsh.sh"
"${HOME}/.zsh_keybind"
"${HOME}/.aliases"
"${HOME}/.functions"
"${NVM_DIR}/nvm.sh"
)
if [[ -z $USERPATH_ADDED ]]
then
# Adding user paths to PATH variable
for ex_path in ${user_paths}
do
PATH="$ex_path:$PATH"
done
export PATH
export USERPATH_ADDED=1
fi
# Set name of the theme to load. Optionally, if you set this to "random"
# it'll load a random theme each time that oh-my-zsh is loaded.
# See https://github.com/robbyrussell/oh-my-zsh/wiki/Themes
if [[ "$TERM" == "linux" ]]
then
ZSH_THEME="bureau"
else
ZSH_THEME="agnoster"
fi
DISABLE_AUTO_UPDATE="true"
# Uncomment the following line to display red dots whilst waiting for completion.
COMPLETION_WAITING_DOTS="true"
# Uncomment the following line if you want to disable marking untracked files
# under VCS as dirty. This makes repository status check for large repositories
# much, much faster.
# DISABLE_UNTRACKED_FILES_DIRTY="true"
# The optional three formats: "mm/dd/yyyy"|"dd.mm.yyyy"|"yyyy-mm-dd"
HIST_STAMPS="yyyy-mm-dd"
# Which plugins would you like to load? (plugins can be found in ~/.oh-my-zsh/plugins/*)
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
# Add wisely, as too many plugins slow down shell startup.
plugins=(
git
cargo
emoji
yarn
)
# User configuration
# You may need to manually set your language environment
export LANG=en_US.UTF-8
export EDITOR='vim'
# ssh default identity
export SSH_KEY_PATH="~/.ssh/rsa_id"
# non interactive skip aliases and function
[[ "$-" != *i* ]] && return
# Sourcing additional files
for to_source in ${sourced_file}
do
if [[ -f "${to_source}" ]]
then
source "${to_source}"
fi
done
# Display INSERT or NORMAL mode
function zle-line-init zle-keymap-select {
RPS1="${${KEYMAP/vicmd/-- NORMAL --}/(main|viins)/-- INSERT --}"
RPS2=$RPS1
zle reset-prompt
}
zle -N zle-line-init
zle -N zle-keymap-select
# Add RVM to PATH for scripting. Make sure this is the last PATH variable change.
# do not use default ruby path
export PATH="$PATH:$HOME/.rvm/bin"
# virtual env activation
alias venva='source ./bin/activate'
| true
|
b80c155391bced88ac2b93976f23c7ffa8bbb19b
|
Shell
|
Zocalo-ICT/modsecurity-docker
|
/build.sh
|
UTF-8
| 1,119
| 3.171875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Prerequisites:
# - Set DOCKER_USER environment variable: export DOCKER_USER='[MY_USER_NAME]'
# - Make sure buildx builder has been set to active before running this
# docker buildx use [MY_BUILDER_NAME]
# - Log in to dockerhub with docker login -u "$DOCKER_USER"
export VERSION=$(./version.sh v3-nginx/ -vvv)
export NGINX_VERSION=$(grep -m1 "ARG NGINX_VERSION" v3-nginx/Dockerfile | cut -f2 -d= | sed "s/\"//g")
export NGINX_VERSION_ALPINE=$(grep -m1 "ARG NGINX_VERSION" v3-nginx/Dockerfile-alpine | cut -f2 -d= | sed "s/\"//g")
export DOCKER_USER="zocaloict"
## Alpine
#--platform=linux/amd64,linux/arm64,linux/arm/v7 \
cp ./v3-nginx/Dockerfile-alpine .
docker buildx build \
-t "${DOCKER_USER}/modsecurity:v$VERSION-nginx_v$NGINX_VERSION_ALPINE-alpine" \
--platform=$1 \
-f Dockerfile-alpine \
--push \
.
rm ./Dockerfile-alpine
## Regular
#--platform=linux/amd64,linux/arm64,linux/arm/v7 \
cp ./v3-nginx/Dockerfile .
docker buildx build \
-t "${DOCKER_USER}/modsecurity:v$VERSION-nginx_v$NGINX_VERSION" \
--platform=$1 \
--push \
.
rm ./Dockerfile
| true
|
783aadec86b22f4b39f21b9af5a32d6b368df54e
|
Shell
|
ViktorNova/tornado-skeleton
|
/tests/test_readfile_func.sh
|
UTF-8
| 644
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
function readTmplFile {
# echo $1
if [ -f "$2" ];
then
# use the local copy
# echo "Use local file: $1"
# echo $("$1")
echo ""
echo "printing string"
cat $2 > temp_test/webapp.py
else
echo $1
# fail over to remote
# echo "Use remote file $2"
curl -fsSL "$3" -o "temp_test/$1" 2>/dev/null
fi
return 0
}
mkdir -p "temp_test/app"
readTmplFile "app/webapp.py" "${SCRIPTDIR}/build_templates/app/webapp.py" "https://raw.github.com/gregory80/heroku-skeleton/master/build_templates/app/webapp.py"
cat temp_test/app/webapp.py
| true
|
1cefd6b2b7a546305779641bd160df74eecc1325
|
Shell
|
photon-platform/.photon
|
/.config/sxiv/exec/key-handler
|
UTF-8
| 1,440
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/sh
# Example for $XDG_CONFIG_HOME/sxiv/exec/key-handler
# Called by sxiv(1) after the external prefix key (C-x by default) is pressed.
# The next key combo is passed as its first argument. Passed via stdin are the
# images to act upon, one path per line: all marked images, if in thumbnail
# mode and at least one image has been marked, otherwise the current image.
# sxiv(1) blocks until this script terminates. It then checks which images
# have been modified and reloads them.
# The key combo argument has the following form: "[C-][M-][S-]KEY",
# where C/M/S indicate Ctrl/Meta(Alt)/Shift modifier states and KEY is the X
# keysym as listed in /usr/include/X11/keysymdef.h without the "XK_" prefix.
rotate() {
degree="$1"
mimetype= "$(file -b -i "$file")"
case "$mimetype" in
image/jpeg*) jpegtran -rotate "$degree" -copy all -outfile "$file" "$file" ;;
*) mogrify -rotate "$degree" "$file" ;;
esac
}
while read file;
do
case "$1" in
"C-x") xclip -in -filter | tr '\n' ' ' | xclip -in -selection clipboard ;;
"C-c") while read file; do xclip -selection clipboard -target image/png "$file"; done ;;
"e") gnome-terminal -- bash -c "exiftool '$file' | less" & ;;
"s") convert "$file" -gravity Center -resize 1921 "$file.small.jpg" ;;
"g") setsid -f gimp "$file" & ;;
"d") setsid -f darktable "$file" & ;;
"R") rotate 270 ;;
"r") rotate 90 ;;
esac
done
| true
|
4ad11f997b20d08fa4bbc51d71c92950ebf785b4
|
Shell
|
BelfodilAimene/semanticWebProject4IFGuide
|
/dbpediaSPARQL.sh
|
UTF-8
| 321
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
: ${SPARQL_QUERY_PATH?"Please precise SPARQL_QUERY file path by adding SPARQL_QUERY=your_sparql_query_path"}
OUTPUT_FORMAT=${OUTPUT_FORMAT:-json}
curl -H "Accept: application/$OUTPUT_FORMAT" -g --data-urlencode query@$SPARQL_QUERY_PATH http://dbpedia.org/sparql 2>/dev/null |\
jq '.["results"]["bindings"]'
| true
|
55533fcd187f70837d544876f6940e5dddd96815
|
Shell
|
ilyapirogovs/csmc-horizon
|
/setup-env.sh
|
UTF-8
| 594
| 3.046875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e -o xtrace
VERSION=$MINECRAFT_VERSION-$FORGE_VERSION-$MINECRAFT_VERSION
FORGE_INSTALLER=forge-$VERSION-installer.jar
FORGE_UNIVERSAL=forge-$VERSION-universal.jar
echo eula=true > eula.txt
if [[ ! -f ./server.properties ]]; then
echo level-type=$PROPERTY_LEVEL_TYPE > server.properties
fi
if [[ ! -f ./minecraft_server.$MINECRAFT_VERSION.jar ]]; then
wget http://files.minecraftforge.net/maven/net/minecraftforge/forge/$VERSION/$FORGE_INSTALLER
echo $FORGE_INSTALLER_SHA1 $FORGE_INSTALLER | sha1sum -c
java -jar $FORGE_INSTALLER --installServer
fi
| true
|
ae7781e648a62a99e8cffe70cd173a3bc6c3b24b
|
Shell
|
sychiao/CountingCoin
|
/test.sh
|
UTF-8
| 522
| 3.078125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
exe=orgin/CountingCoin_CU_LLVM
total=.total
bd=.breakdown
out=out.csv
count=5
set -x
echo "Hello User please enter the title name of the data"
read title
echo $title >> $out
echo "Overhead,Erode,Dilate,Canny,Hough,cleanup,Total" >> $out
while [ $count -gt 0 ];
do
count=$(($count-1))
/usr/bin/time -f'%e' -o $total ./$exe &> $bd
data=`cat $bd`
for i in $data
do
echo -n $i >> $out
echo -n "," >> $out
done
cat $total >> $out
sleep 1
done
echo "" >> $out
echo "" >> $out
rm -f $total $bd
| true
|
daca1656a1514c510ee97f1a257b265036f23b75
|
Shell
|
Echo199801/nCovMemory
|
/archive/convertPngToJpg.bash
|
UTF-8
| 271
| 3.375
| 3
|
[] |
no_license
|
for file in png/*
do
if [ ! -d jpg ];then
mkdir jpg
fi
id=`echo "$file" | cut -c 5- | cut -d'.' -f1`
if [ -f "jpg/$id.jpg" ];then
echo "$file.jpg already exist, skip!"
else
echo Converting $file ...
gm convert -compress BZip "$file" "jpg/$id.jpg"
fi
done
| true
|
af6c7ed45b84b7d836b7bdfb12cf43317a863f63
|
Shell
|
nandub/RemoteMaster
|
/km/setup.sh
|
UTF-8
| 3,969
| 4.15625
| 4
|
[] |
no_license
|
#!/bin/sh
#
# Script to create .desktop files for RMIR and RM in their installation directory. This script must itself be
# in the installation directory to identify correctly the resources to which it links.
#
# For more information on .desktop files, see for example
# https://xpressrazor.wordpress.com/2013/07/07/playing-with-desktop-files-aka-application-launchers-in-linux/
#
# Following their creation, the .desktop files are made executable in accordance with that reference.
# RemoteMaster.jar is also made executable so that it can be run by double-clicking.
# Further, the user is added to the dialout group if not already a member. This is needed in order to use
# RMIR USB serial interfaces without root access.
#
# Created by Graham Dixon (mathdon), March 22, 2015.
# First identify the directory containing this script.
here=$(dirname $(readlink -sf $0)) 2>/dev/null || here=$(dirname $0)
# Test if we are in the directory containing RemoteMaster.jar
if ! [ -f $here/RemoteMaster.jar ]; then
echo "This script must be in the same directory as RemoteMaster.jar"
exit 1
fi
# Construct paths to the new files and to the jar file.
desktopRMIR=$here/RMIR.desktop
desktopRM=$here/RemoteMaster.desktop
desktopRMPB=$here/RMPB.desktop
rmprog=$here/RemoteMaster.jar
desktopdir=$HOME/.local/share/applications
[ -d $desktopdir ] || mkdir -p $desktopdir
# Create the .desktop files.
cat >$desktopRMIR << EOF
[Desktop Entry]
Comment=Edit JP1 remotes
Categories=Application;Java
Terminal=false
Name=RMIR
Exec=java -jar $rmprog -ir
Type=Application
Icon=$here/RMIR.ico
StartupNotify=true
Version=1.0
EOF
cat >$desktopRM << EOF
[Desktop Entry]
Comment=Edit JP1 device upgrades
Categories=Application;Java
Terminal=false
Name=RemoteMaster
Exec=java -jar $rmprog -rm
Type=Application
Icon=$here/RM.ico
StartupNotify=true
Version=1.0
EOF
cat >$desktopRMPB << EOF
[Desktop Entry]
Comment=Edit JP1 protocols
Categories=Application;Java
Terminal=false
Name=RMPB
Exec=java -jar $rmprog -pb
Type=Application
Icon=$here/RMPB.ico
StartupNotify=true
Version=1.0
EOF
# Copy desktop files to desktopdir
cp $desktopRMIR $desktopdir
cp $desktopRM $desktopdir
cp $desktopRMPB $desktopdir
# Set executable permissions.
chmod 775 $rmprog
chmod 775 $desktopRMIR
chmod 775 $desktopRM
chmod 775 $desktopRMPB
chmod 755 $here/irptransmogrifier.sh
echo Desktop files created and executable permissions set.
alldone()
{
echo "Setup complete."
exit 0
}
# Test if the dialout group exists, exit with message if not
grep -q dialout /etc/group || {
cat << EOF
There is no user group named "dialout" in this OS, so this script does not
know how to enable a USB serial interface to be used with RMIR. As all other
procedures of this script have been performed, setup is now complete.
EOF
exit 0
}
# Dialout group exists, test if user is a member, exit if so
id -Gn | grep -q dialout && alldone
# User is not a member, test if user is root, exit if so
if [ "$(id -u)" -eq 0 ]; then alldone; fi
# User is not root, so ask if user wishes to be added
cat << EOF
To use a USB serial interface with RMIR, you need to be a member of the
dialout group. Currently you are not a member. This script can add you
to that group but you will need to give your sudo password. Do you want
it to add you to that group?
EOF
# Force user to answer Y or N (or equivalently y or n)
reply=x
while ! echo "YN" | grep -qi $reply; do
printf "%s" "Please answer Y or N. > "
read reply
done
if echo "Y" | grep -qi $reply; then
# User answered yes
sudo usermod -aG dialout $USER
cat << EOF
Done. You will need to log out and log back in again for this change
to take effect.
EOF
else
# User answered no, so tell user what to do
cat << EOF
You will need to run the command "usermod -aG dialout $USER" to be added
to the dialout group before you can use a USB serial interface with RMIR.
After running the command, you need to log out and log back in again for
this change to take effect.
EOF
fi
alldone
| true
|
015d3489500e2be47c2f42f587c672589a487ce7
|
Shell
|
VladlenaSkubi-du/DevOps_42school
|
/inception-of-things/p1/scripts/prepare_master_vm.sh
|
UTF-8
| 1,119
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
# Arguments: [K3S_SERVER_IP]
echo "[INFO] Update packages on $1"
sudo yum update -y
ifconfig=$(which ifconfig)
if [ -z "$ifconfig" ]
then
echo "[INFO] Install net-tools on $1"
sudo yum install net-tools -y # download ifconfig
fi
k3s=$(which k3s)
if [ -z "$k3s" ]
then
echo "[INFO] Install k3s and set $1 as server"
curl -sfL https://get.k3s.io | sh -s - server --node-ip $1 --tls-san $1
# Problem: WARN[0014] Unable to read /etc/rancher/k3s/k3s.yaml, please start server with --write-kubeconfig-mode to modify kube config permissions
mkdir -p /home/vagrant/.kube # ignore command if directory exists
sudo cp /etc/rancher/k3s/k3s.yaml /home/vagrant/.kube/config && \
sudo chown vagrant /home/vagrant/.kube/config && \
sudo chmod 600 /home/vagrant/.kube/config && \
echo "export KUBECONFIG=/home/vagrant/.kube/config" >> /home/vagrant/.bashrc
echo "alias k=\"k3s kubectl\"" >> /home/vagrant/.bashrc
fi
echo "[INFO] Copying server $1 token to host machine to create agents"
sudo cp /var/lib/rancher/k3s/server/node-token /tmp/confs/server_token
| true
|
9b327c08c5c8f09e96493995038307023e42f5dc
|
Shell
|
Jorgeromeu/dotfiles
|
/.aliases.sh
|
UTF-8
| 979
| 2.671875
| 3
|
[
"MIT"
] |
permissive
|
alias sudo='sudo '
# python...
alias ipy='ipython'
alias pypy='pypy3'
alias py='python'
alias py39='python3.9'
alias ipy39='ipython3.9'
alias py36='python3.6'
alias ipy36='ipython3.6'
# shorteneed commands
alias v='nvim'
alias g='git'
alias pm='pacman'
alias spm='sudo pacman'
alias ytdl='youtube-dl'
alias cl='clear'
alias hd='hexdump' # though i use hx instead
alias xopen='xdg-open'
alias sysd='systemctl'
alias antlr='antlr4'
alias docker='sudo docker'
alias e='emacs -nw'
alias eg='emacs'
alias scrnkey='screenkey -t 0.25 --persist --opacity 0 --font-color black'
# replace 'ls' with exa + parameters
alias ls='exa -F --group-directories-first --git'
alias ll='exa -F --group-directories-first -l --git'
alias la='exa -F --color=auto --group-directories-first -a'
# default options
alias cp='cp -v'
alias mv='mv -v'
alias rm='rm -v'
alias grep="grep --color=auto"
alias gcc="gcc -no-pie -Wall -g"
alias g++="g++ -no-pie -Wall -g"
alias gdb="gdb -q"
alias make="make -k"
| true
|
cd530a1eea21369b34a2e217277ec71a638eba35
|
Shell
|
liusongsir/shell
|
/fun.sh
|
UTF-8
| 591
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash
echo "本节演示函数的使用"
echo "首先定义一个加函数"
fun(){
echo "请输入第一个数"
read a
echo "请输入第二个数"
read b
return $(($a + $b))
}
fun
echo "两个数求和得:$?"
echo "函数中输出参数"
funP(){
echo "第一个参数$1"
echo "第二个参数$2"
echo "第十个参数${10}" #从第10个参数开始,需要使用{}才能获取到参数
echo "第十一个参数${11}"
echo "总参数个数为:$#"
echo "全部参数:$*"
echo "全部参数:$@"
echo "当前进程ID:$$"
}
funP A B 3 4 5 6 7 8 9 C D F
| true
|
892963a0c8e6354b822c5fbf45e3de85cb6bf122
|
Shell
|
tolmalev/SPM
|
/install
|
UTF-8
| 14,224
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
#
# if this is a request for the latim_pp module,
# do nothing and switch to latim_pp/install
#
[[ $1 == latim ]] && {
shift 1
(cd latim_pp && ./install $*) || {
echo "Load of latim_pp module failed."
exit -1
}
exit
}
echo "
*
* spm - An Open Source package for digital feedback in
* Scanning Probe Microscopy
*
* Copyright (C) 2008-2009 Marcello Carla' <carla@fi.infn.it>
*
* This program is free software; you can use, modify and redistribute
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation (www.fsf.org).
*"
#
# preset scan defaults
#
points=100
lines=20
samples=1
cadence=200
adc=8
dac=8
adc_cal="3 0 0.000320 0 0"
dac_cal_m="1 0 3125 0 0"
dac_cal_s="1 0 3125 0 0"
feedback=""
dma=1
irq=""
major=""
shared=""
swap=0
dummy=0
new_feedback=0
# modify scan defaults according to "parameters.def"
. ./parameters.def
#
# define supported boards and default values
#
ni=0; names[ni]=nibac
io=1; names[io]=iobac
mc=2; names[mc]=mcbac
no=9; names[no]=nobac
models[$ni]="1093:71bc 1093:70af" # National Instruments
models[$io]="1616:0409" # Iotech
models[$mc]="1307:0079" # Measurement Computing
models[$no]="" # no board (dummy board)
types="$ni $io $mc $no"
for i in $types ; do
ranks[$i]="none"
count[$i]=0
master[$i]="0"
slave[$i]="0"
done
cal_master="/tmp/spm.calibration.master"
cal_slave="/tmp/spm.calibration.slave"
cal_file="/tmp/spm.calibration"
# decode options
for i in $*; do
command=`echo $i | cut -d "=" -f 1`
argument=`echo $i | cut -d "=" -f 2`
case $command in
swap)
swap=$argument
;;
dummy)
if [[ $argument != "0" ]]; then dummy=1; fi
;;
ni)
ranks[$ni]=$argument
;;
io)
ranks[$io]=$argument
;;
mc)
ranks[$mc]=$argument
;;
no)
ranks[$no]=$argument
;;
irq)
irq=$argument
;;
major)
major=$argument
;;
ufc)
feedback=$argument
;;
feedback)
feedback=$argument
;;
new)
new_feedback=1
;;
shared)
shared=$argument
;;
dma)
dma=$argument
;;
cadence)
cadence=$argument
;;
points)
points=$argument
;;
lines)
lines=$argument
;;
samples)
samples=$argument
;;
adc)
adc=$argument
;;
dac)
dac=$argument
;;
help)
echo "
Options are:
dummy=1 Only try a dummy installation, without really executing the
commands
ni=<m>[,<s>] Override preference rules in assigning roles for National
Instruments boards - m: board to be used as master
s: board to be used as slave
io=<m>[,<s>] idem for Iotech boards
mc=<m>[,<s>] idem for Measurement Computing boards
no=<m>[,<s>] idem for the dummy board
swap=1/0 Swap/not swap the roles of identical master and slave boards.
irq=<int> Specify the interrupt line to be used
major=<int> Use a static major number
ufc=<name> Load a user feedback code module ...
new ... replacing an already loaded one
shared=no Obtain an exclusive irq line or fail
=yes Request a shared irq line
=try Try an exclusive line first, then a shared one (default)
dma=1/0 Use/not use a DMA channel for master board ADC, if possible
cadence=<n> sampling rate (usec) [note: interrupt rate = cadence * samples]
points=<n> points per line
lines=<n> lines per frame
samples=<n> Oversampling factor when using a DMA channel
adc=<n> Set conversion to n ADC for each sample
dac=<n> Allocate for n DAC in each event
latim Install latim_pp instead of spm modules
help Show this text and exit"
exit
;;
*)
echo "Bad options - try ./install help"
exit
;;
esac
done
#
# perform some check on argument sanity
#
irq_cmd=""
major_cmd=""
shared_cmd=""
do_it=""
if [[ $irq != "" ]]; then irq_cmd="irq=$irq" ; fi
if [[ $major != "" ]]; then major_cmd="major=$major" ; fi
if [[ $shared != "" ]]; then shared_cmd="shared=$shared" ; fi
if [[ $dummy == 1 ]]; then
do_it="echo ---->"
echo "A dummy installation will be tried:"
echo "commands will only be printed, marked with ---->"
fi
if [[ -d /sys/module/spm_dev && $dummy == 0 && $feedback == "" ]]; then
echo -e "\n There is an 'spm_dev' module already installed,"
echo -e " but its functionality cannot be determined.\n"
exit
fi
rate=$((cadence*samples))
echo "Interrupt rate = $rate usec"
if [ $rate -lt 100 ]; then
echo "Your interrupt rate is too fast."
echo "'samples * cadence' must not be less than 100 usec"
exit -1
elif [ $rate -lt 200 ]; then
echo -e "\n ***** WARNING ***** WARNING ***** WARNING *****\n"
echo "Your interrupt rate (samples * cadence) is very fast and may cause"
echo "interrupt overrun. If the module freezes, unload and reload with a"
echo "slower rate."
fi
#
# the function definitions follow - main code at the end of the file
#
#
# list available boards
#
list_boards() {
echo -n $1:
for i in $types ; do
echo -n " ${names[i]}:${count[i]}"
done
echo
}
#
# identify_boards()
#
# look for available supported DAQ boards
#
identify_boards() {
# locate lspci
if [[ -x /usr/bin/lspci ]]; then lspci="/usr/bin/lspci -n"
elif [[ -x /usr/sbin/lspci ]]; then lspci="/usr/sbin/lspci -n"
elif [[ -x /bin/lspci ]]; then lspci="/bin/lspci -n"
elif [[ -x /sbin/lspci ]]; then lspci="/sbin/lspci -n"
else
echo -e "\nWARNING: 'lspci' not found - autoconfigure will fail"
lspci=echo
fi
# identify available boards
echo -e "\nBuilding board configuration:"
for i in $types ; do
echo " Testing ${names[$i]}: ${models[$i]}"
if [[ $i == $no ]]; then
(( count[$i]++ ))
else
for b in `$lspci | cut -d ' ' -f 3`; do
for j in ${models[$i]}; do
[[ $b == $j ]] && (( count[$i]++ ));
done
done
fi
done
}
#
# select_one( "role" "elegible boards")
#
# select one board for a specific role
#
select_one() {
selected="none"
for i in $2 ; do
if [[ ${count[$i]} > 0 ]]; then
selected=$i
(( count[$i]-- ))
break
fi
done
if [[ $selected == "none" ]]; then
echo "No board found suitable to be $1"
else
echo "Board ${names[$selected]} can be $1"
fi
}
#
# check options for a given configuration
#
check_given_configuration () {
r_master="none"
r_slave="none"
given=0
for i in $types ; do
altmaster[$i]=0
altslave[$i]=0
[[ ${ranks[$i]} == "none" ]] || {
new=`echo ${ranks[$i]} | cut -d"," -f 1`
[[ $new == 0 ]] || {
[[ $r_master == "none" ]] || {
echo " Two boards requested as master!"
exit -1
}
r_master=$i
altmaster[$i]=$new
given=1
}
new=`echo ${ranks[$i]},0 | cut -d"," -f 2`
[[ $new == 0 ]] || {
[[ $r_slave == "none" ]] || {
echo " Two boards requested as slave!"
exit -1
}
r_slave=$i
altslave[$i]=$new
given=1
}
[[ ${altslave[$i]} == ${altmaster[$i]} ]] &&
[[ ${altmaster[$i]} != 0 ]] && {
echo -e "\nDouble request for '${names[$i]}' board.\n"
exit -1
}
}
done
}
#
# build board configuration
#
build_board_configuration () {
identify_boards
list_boards "Found"
# who is master?
#Preferences for master are 1: ni 2: mc 3: no
select_one "master" "$ni $mc $no"
f_master=$selected
list_boards "Left "
# who is slave?
#Preferences for slave are 1: io 2: ni 3: mc
select_one "slave" "$io $ni $mc"
f_slave=$selected
# assign board ranks according to what has been found
[[ $f_master != "none" ]] && master[$f_master]=1
[[ $f_slave != "none" ]] && slave[f_slave]=$((${master[$f_slave]}+1))
# now look at options for a given board configuration
check_given_configuration
[[ $given == 1 ]] && echo \
"Given explicit assignment overrides default configuration"
echo -e "Suggested/Requested->Assigned board ranks:"
# explicit assignement overrides the found one
for i in $types ; do
echo -n " ${names[$i]}: ${master[$i]},${slave[$i]} / ${ranks[$i]}"
[[ $given == 0 ]] || {
master[$i]=${altmaster[$i]}
slave[$i]=${altslave[$i]}
}
echo " -> ${master[$i]},${slave[$i]}"
done
if [[ $given == 0 ]]; then
masterboard=$f_master
slaveboard=$f_slave
else
masterboard=$r_master
slaveboard=$r_slave
fi
}
#
# install spm_dev
#
# install the spm module and create the scan device
#
install_spm_dev () {
echo -e "\nInstalling spm_dev ... "
($do_it cd spm_dev && $do_it ./install $major_cmd \
cadence_usec=$cadence \
points_per_line=$points \
lines_per_frame=$lines \
samples_per_point=$samples \
sample_adc=$adc \
sample_dac=$dac) || {
echo "Load of core module 'spm_dev' for SPM failed."
exit -1
}
}
#
# build default calibration files
#
build_cal_files () {
[[ $dummy == 1 ]] && cal_slave=/dev/null && cal_master=/dev/null
if [[ $slaveboard != "none" ]]; then
name=${names[$slaveboard]}
echo DAC $name $dac_cal_s > $cal_slave
echo DAC $name $dac_cal_s >> $cal_slave
[[ $slaveboard == $io ]] && \
echo -e DAC $name $dac_cal_s >> $cal_slave &&
echo -e DAC $name $dac_cal_s >> $cal_slave
fi
if [[ $masterboard != "none" ]]; then
name=${names[$masterboard]}
echo ADC $name $adc_cal > $cal_master
echo DAC $name $dac_cal_m >> $cal_master
echo DAC $name $dac_cal_m >> $cal_master
fi
}
#
# install the slave module
#
install_slave () {
touch $cal_slave
if [[ $masterboard != $slaveboard && $slaveboard != "none" ]]; then
slaverank=${master[$slaveboard]},${slave[$slaveboard]}
name=${names[$slaveboard]}
echo "Installing $name with board_rank=$slaverank ..."
($do_it cd $name && \
$do_it ./install board_rank=$slaverank) || {
echo -e "Attention: load and configure of slave board" \
"module failed!"
echo "Warning: forcing operations beyond this point" \
"may crash your system!"
exit -1
}
fi
}
#
# install the master module
#
install_master () {
if [[ $masterboard == "none" ]]; then
echo -e "\nMaster board not found. The system will be incomplete."
exit
fi
if [[ $swap == 1 && $masterboard == $slaveboard ]]; then
masterank=${slave[$masterboard]},${master[$masterboard]}
else
masterank=${master[$masterboard]},${slave[$masterboard]}
fi
board_type=${names[$masterboard]}
if [[ $board_type == nibac ]]; then
dma="dma_use=$dma"
else
if [[ $dma == 1 ]]; then
echo -e "\nUse of DMA not supported with \"$board_type\" board"
fi
dma=""
fi
echo "Installing ${names[$masterboard]} with board_rank=$masterank $irq_cmd"
($do_it cd $board_type && \
$do_it ./install board_rank=$masterank $irq_cmd $shared_cmd $dma) || {
echo -e "\nLoad and configure of master board module failed."
echo "The system will be unusable."
exit -1
}
}
#
# perform consistency check
#
consistency_check () {
if [[ $do_it == "" ]]; then
echo "Performing consistency check ..."
else
$do_it "Performing consistency check ..."
return
fi
for i in 0 1 2; do
[[ ${master[i]} == 0 ]] || [[ -c /dev/spm/${names[$i]}0 ]] || {
echo "Configuration failed for master board '${names[$i]}'."
echo "The system will be unusable."
exit
}
[[ ${slave[i]} == 0 ]] || [[ -c /dev/spm/${names[$i]}1 ]] || {
echo "Configuration failed for slave board '${names[$i]}'."
echo "The system will be unusable."
exit
}
done
if [[ $slaveboard == "none" ]]; then
echo "No slave board. The system may be incomplete."
else
echo " ... board module(s) have been loaded."
fi
}
#
# select and load a feedback code module
#
load_feedback_code () {
# if no ufc has been given, pick a default
if [[ $feedback == "" ]]; then
for i in feedback_code template simple; do
[[ -d spm_fc/module/$i ]] && {
feedback=$i
echo "Selected default feedback code '$i'"
break
}
done
fi
if [[ $feedback == "" ]]; then
echo "No feedback code will be loaded"
else
[[ $feedback == "none" || -f spm_fc/module/$feedback/spm_fc.ko ]] || {
echo
echo "Feedback module '$feedback' not found"
echo
return -1
}
if [[ -d /sys/module/spm_fc ]]; then
if [[ $new_feedback == 1 ]]; then
echo "Removing feedback code ..."
($do_it cd spm_fc && $do_it ./remove)
else
echo
echo "A feedback module is already installed."
echo "If I have to replace it, add the option 'new'"
echo
exit -1
fi
fi
if [[ $feedback == "none" ]]; then
echo "No feedback code will be loaded"
else
echo -e "\nInstalling feedback code '$feedback'"
($do_it cd spm_fc && $do_it ./install $feedback)
fi
fi
}
#
# main code
#
if [[ ! -d /sys/module/spm_dev || $dummy == 1 ]]; then
$do_it rm -f /tmp/spm.calibration $cal_master $cal_slave
build_board_configuration
install_spm_dev
build_cal_files
install_slave
install_master
consistency_check
$do_it cp $cal_master $cal_file
$do_it dd if=$cal_slave of=$cal_file oflag=append conv=notrunc
fi
load_feedback_code && echo -e "\n Have a good scan!\n"
| true
|
9218034d5b59253f0c76dd4ebcadc29bd1d195c1
|
Shell
|
gkouassi/hello-world
|
/install-bw6.sh
|
UTF-8
| 6,163
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
#set -x
#-----------------------------------------
# Installation script BW 5 & 6(.x) for Linux/AIX
# Author : GERARD KOUASSI
#-----------------------------------------
#-----------------------------------------------------
# failInstall
#-----------------------------------------------------
failInstall(){
echo "Installation failed, $1"
}
#-----------------------------------------------------
# sub routines (associative array)
#-----------------------------------------------------
# put map
hput () {
eval hash"$1"='$2'
}
# get map
hget () {
eval echo '${hash'"$1"'#hash}'
}
# Test numeric chararacter
isNumeric () {
case $1 in
*[!0-9]*)
echo 0;;
*)
echo 1;;
esac
}
#-----------------------------------------------------
# function getFileNameNoVersion
#-----------------------------------------------------
function getFileNameNoVersion
{
local inputstr=$1
local currentNameStr=
typeset -i charIndex=1
typeset -i nextCharIndex=0
local nameLength=${#inputstr}
while [ ${charIndex} -le ${nameLength} ];do
curChar=$(expr substr "$inputstr" ${charIndex} 1)
if [ "${curChar}" != "-" ];then
currentNameStr=${currentNameStr}${curChar}
else
nextCharIndex=charIndex+1
nextChar=$(expr substr "$inputstr" ${nextCharIndex} 1)
if [ $(isNumeric ${nextChar}) -eq 1 ];then
break
else
currentNameStr=${currentNameStr}${curChar}
fi
fi
charIndex=charIndex+1
done
echo $currentNameStr
}
#-----------------------------------------------------
# function locateMaxVersion
#-----------------------------------------------------
function locateMaxVersion
{
#$1 source dir
#$2 test file
local maxVersion
local currentVersion
typeset -i maxVersion=0
typeset -i currentVersion=0
typeset -i charIndex=0
#base
local basedir=$1
if [ ! -d $basedir ];then
return 0
fi
for f in $(ls $basedir); do
if [ -d $basedir/$f ];then
if [ "$2" != "" ];then
#test file
if [ ! -f $basedir/$f/$2 ] && [ ! -d $basedir/$f/$2 ];then
continue;
fi
fi
charIndex=1
currentVersionStr=
nameLength=${#f}
while [ ${charIndex} -le ${nameLength} ];do
curChar=$(expr substr "$f" ${charIndex} 1)
if [ $(isNumeric ${curChar}) -eq 1 ];then
if [ ${curChar} -ne 0 ];then
currentVersionStr=${currentVersionStr}${curChar}
elif [ ${#currentVersionStr} -gt 0 ];then
currentVersionStr=${currentVersionStr}${curChar}
fi
fi
charIndex=charIndex+1
done
currentVersion=${currentVersionStr}
if [ ${currentVersion} -gt ${maxVersion} ];then
maxVersion=$currentVersion
hput $maxVersion "$f"
fi
fi
done
if [ $maxVersion -gt 0 ];then
echo `hget $maxVersion`
fi
}
#-----------------------------------------------------
# function copyVersion
#-----------------------------------------------------
function copyVersion
{
#$1 source dir
#$2 target dir
#$3 exclude
for f in $(ls $1/*.jar); do
if [ -f $f ];then
local fiNameVersion=`basename $f`
local fiName=$(getFileNameNoVersion ${fiNameVersion})
if [ ! -z "$3" ];then
local filtered=`echo $fiName | grep -E $3`
if [ "${filtered}" != "" ];then
continue
fi
fi
if [ ! -f $2/${fiNameVersion} ];then
echo "Updating $fiName on $2"
rm -f $2/${fiName}-* 2>/dev/null
cp -f ${f} $2/${fiNameVersion}
if [ $? -ne 0 ];then
echo "Problem during copy of ${f} to $2"
return 2
fi
else
local dupList=`ls $2/${fiName}-* 2>/dev/null`
for fdup in ${dupList}; do
#remove dups
if [ "`basename ${fdup}`" != "${fiNameVersion}" ];then
echo "removing duplicates ${fdup} on $2"
rm -f ${fdup}
fi
done
fi
fi
done
}
#-----------------------------------------------------
# Main routine
#-----------------------------------------------------
#Resolve links
if [ -h $0 ] && [ "`uname`" != "AIX" ];then
prg=`readlink $0`
elif [ -h $0 ] && [ "`uname`" == "AIX" ];then
prg=`ls -l $0 | awk '{print $11}'`
else
prg=$0
fi
EXT_HOME=`dirname $prg`
EXT_HOME=`cd ${EXT_HOME}; pwd -P`
EXT_NEW_VERSION=`basename $EXT_HOME`
#Cls if no arg
if [ $# -eq 0 ];then
clear
else
echo
fi
echo "------------------------------------------------------"
echo "Start install JDBC drivers at : `date`"
echo "------------------------------------------------------"
echo
echo "JDBC drivers binary is located at ${EXT_HOME}"
echo "JDBC drivers version is ${EXT_NEW_VERSION}"
#Fixed file name
installFile=${HOME}/tibco-installation.properties
if [ $# -gt 0 ];then
TIBCO_ROOT=`dirname $1`/`basename $1`
else
if [ ! -f ${installFile} ];then
failInstall "${installFile} not found, you must call $prg <TIBCO_ROOT>, exemple : $prg /opt/tibco"
exit 1
fi
#Get TIBCO ROOT
TIBCO_ROOT=`cat ${installFile} | grep -wE ^TIBCO_ROOT | awk -F '=' 'NR==1 {print $2}'`
fi
echo "TIBCO_ROOT is ${TIBCO_ROOT}"
if [ "${TIBCO_ROOT}" == "" ];then
failInstall "TIBCO_ROOT not found"
exit 1
fi
if [ ! -d ${TIBCO_ROOT} ];then
failInstall "${TIBCO_ROOT} not found"
exit 1
fi
if [ ! -d ${TIBCO_ROOT}/bw ];then
failInstall "${TIBCO_ROOT} is not a valid TIBCO installation directory"
exit 1
fi
#--------------------------------------------------------------------------------
BW_VERSION=$(locateMaxVersion ${TIBCO_ROOT}/bw system)
if [ "${BW_VERSION}" == "" ];then
echo "BW_VERSION not found"
return 1
fi
#bw drs
TIBCO_BW_SYSTEM_DIR=${TIBCO_ROOT}/bw/${BW_VERSION}/system
TIBCO_BW_SYSTEM_LIB_DIR=${TIBCO_BW_SYSTEM_DIR}/lib
TIBCO_BW_SYSTEM_JDBC_LIB_DIR=${TIBCO_BW_SYSTEM_LIB_DIR}
if [ ! -d ${TIBCO_BW_SYSTEM_JDBC_LIB_DIR} ];then
echo "${TIBCO_BW_SYSTEM_JDBC_LIB_DIR} not found"
return 1
fi
echo
echo "Installing drivers on ${TIBCO_BW_SYSTEM_JDBC_LIB_DIR}"
copyVersion ${EXT_HOME} ${TIBCO_BW_SYSTEM_JDBC_LIB_DIR}
if [ $? -ne 0 ];then
echo "Problem during jdbc drivers installation"
return 2
fi
echo
echo "${EXT_NEW_VERSION} installed at ${TIBCO_BW_SYSTEM_JDBC_LIB_DIR}"
echo
echo "------------------------------------------------------"
echo "End install ${EXT_NEW_VERSION} at : `date`"
echo "------------------------------------------------------"
exit 0
| true
|
f062f6e21269ff5fc64e410432fa6a3ec110ea33
|
Shell
|
ebo/netcdf-c
|
/nc_perf/run_knmi_bm.sh
|
UTF-8
| 602
| 3.890625
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
# This shell gets some files from the netCDF ftp site for testing,
# then runs the tst_knmi benchmarking program.
# Ed Hartnett
# Load common values for netCDF shell script tests.
if test "x$srcdir" = x ; then srcdir=`pwd`; fi
. ../test_common.sh
# Get files if needed.
echo ""
file_list="MSGCPP_CWP_NC3.nc MSGCPP_CWP_NC4.nc"
echo "Getting KNMI test files $file_list"
for f1 in $file_list
do
if ! test -f $f1; then
wget ftp://ftp.unidata.ucar.edu/pub/netcdf/sample_data/$f1.gz
gunzip $f1.gz
fi
done
# Run the C program on these files.
${execdir}/tst_knmi
echo "SUCCESS!!!"
exit 0
| true
|
f9b2a64caeba582ffb2a6d0d9fba9db9f409ff09
|
Shell
|
jtallar/game-of-life
|
/rule-multianalysis.sh
|
UTF-8
| 1,427
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
if [ "$#" -ne 4 ]; then
echo "Illegal number of parameters."
echo "Run with ./rule-multianalysis.sh rule_num fill_start fill_step repetitions"
exit 1
fi
case $1 in
1)
RULE="=.2.|.=.3/=.3"; DIM=2; MOORE=1;;
2)
RULE=">.8.&.<.11/>.0.&.<.5"; DIM=2; MOORE=2;;
3)
RULE="<.3/=.3"; DIM=2; MOORE=1;;
4)
RULE="=.2.|.=.3/=.3"; DIM=3; MOORE=1;;
5)
RULE=">.4.&.<.8/=.6"; DIM=3; MOORE=1;;
6)
RULE=">.5.&.<.11/=.7.|.=.8"; DIM=3; MOORE=1;;
*)
echo "Invalid rule number, must be between 1 and 6."
exit 1
;;
esac
ROOT_DIR="data_dir_$1"
if [ -d "$ROOT_DIR" ]; then
printf '%s\n' "Removing Directory recursively ($ROOT_DIR)"
rm -rf "$ROOT_DIR"
fi
mkdir "$ROOT_DIR"
FILL="$2"
while [ "$FILL" -le 100 ]
do
SIM_DIR="$ROOT_DIR"/"$FILL"
if [ -d "$SIM_DIR" ]; then
printf '%s\n' "Removing Directory recursively ($SIM_DIR)"
rm -rf "$SIM_DIR"
fi
mkdir "$SIM_DIR"
echo "Running $4 times with fill $FILL..."
for i in $(seq 1 $4)
do
./target/tp2-simu-1.0/life.sh -Drule="$RULE" -Dsize=101 -Dinit=41 -Ddim="$DIM" -Dmoore="$MOORE" -Dpel=true -Dcenter=true -Dfill="$FILL" -Dout="$SIM_DIR/data$i"
done
echo "-----------------------------------"
((FILL = FILL + "$3"))
done
PICS_DIR="pics_rule$1"
OUT_FILE="out$1.txt"
python3.8 multipleAnalysis.py "$ROOT_DIR" "$PICS_DIR" > "$OUT_FILE"
mv "$OUT_FILE" "$PICS_DIR"
| true
|
1429ff0fde923b09e147ada32cc63a4260858217
|
Shell
|
Hsuing/shelll_script
|
/shell/judge.sh
|
UTF-8
| 436
| 3.46875
| 3
|
[] |
no_license
|
is_int() { #? Check if value(s) is integer
local param
for param; do
if [[ ! $param =~ ^[\-]?[0-9]+$ ]]; then return 1; fi
done
}
is_float() { #? Check if value(s) is floating point
local param
for param; do
if [[ ! $param =~ ^[\-]?[0-9]*[,.][0-9]+$ ]]; then return 1; fi
done
}
is_hex() { #? Check if value(s) is hexadecimal
local param
for param; do
if [[ ! ${param//#/} =~ ^[0-9a-fA-F]*$ ]]; then return 1; fi
done
}
| true
|
343cf40e42a836722bb8d26075788cf900daa1d2
|
Shell
|
Interlisp/maiko
|
/bin/machinetype
|
UTF-8
| 916
| 3.65625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
#########################################################################
# #
# M A C H I N E T Y P E #
# #
# Compute the hardware architecture we're running on. #
# #
# (C) Copyright 2001 Venue. All Rights Reserved #
# #
#########################################################################
SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
os=${LDEARCH:-`$SCRIPTPATH/config.guess`}
# o/s switch block
case "$os" in
m68k-*) echo m68k ;;
sparc-*) echo sparc ;;
alpha-*) echo alpha ;;
i*86-*-*) echo 386 ;;
armv7l-*-*) echo armv7l ;;
aarch64-*-*) echo aarch64 ;;
x86_64-apple-darwin15*) echo 386 ;;
x86_64-*) echo x86_64 ;;
powerpc-*) echo ppc ;;
amd64-*) echo x86_64 ;;
esac
### Don't leave the variables set.
unset os
| true
|
795f66e0fb559d1460240658704d5d674ed0c50c
|
Shell
|
mrlesmithjr/dotfiles
|
/setup.sh
|
UTF-8
| 6,982
| 3.546875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
set -x
# Check for install type to ensure proper setup
if [ -f "$HOME/.minimal-install" ]; then
INSTALL_TYPE="minimal"
else
INSTALL_TYPE="full"
fi
if [[ $(uname) == "Linux" ]]; then
# Arch
if [ -f /etc/arch-release ]; then
codename="$(awk </etc/arch-release '{print $1}')"
if [[ $codename == "Manjaro" ]]; then
yes | sudo pacman -Syyu && yes | sudo pacman -S gc guile autoconf automake \
binutils bison curl fakeroot file findutils flex gawk gcc gettext grep \
groff gzip libtool m4 make pacman patch pkgconf sed sudo systemd \
texinfo util-linux which python-setuptools python-virtualenv python-pip \
python-pyopenssl python2-setuptools python2-virtualenv python2-pip \
python2-pyopenssl
fi
fi
# Ubuntu
if [ -f /etc/debian_version ]; then
# shellcheck source=/dev/null
source /etc/os-release
# id=$ID
os_version_id=$VERSION_ID
sudo apt-get update
sudo apt-get install -y bc
if (($(echo "$os_version_id" '<' 20.04 | bc))); then
sudo apt-get -y install build-essential curl fontconfig libbz2-dev libffi-dev \
libreadline-dev libsqlite3-dev libssl-dev python-dev python-minimal python-pip \
python-setuptools python-virtualenv python3-pip python3-venv vim virtualenv zlib1g-dev zsh
else
sudo apt-get -y install build-essential curl fontconfig libbz2-dev libffi-dev \
liblzma-dev libreadline-dev libsqlite3-dev libssl-dev python-is-python3 python3-dev \
python3-minimal python3-pip python3-setuptools python3-virtualenv \
python3-venv vim virtualenv zlib1g-dev zsh
fi
if [ ! -d "$HOME/.fonts" ]; then
mkdir "$HOME/.fonts"
fi
if [ ! -d "$HOME/.config/fontconfig" ]; then
mkdir -p "$HOME/.config/fontconfig/conf.d"
fi
if [ ! -f "$HOME/.fonts/PowerlineSymbols.otf" ]; then
wget https://github.com/powerline/powerline/raw/develop/font/PowerlineSymbols.otf -O "$HOME"/.fonts/PowerlineSymbols.otf
fi
if [ ! -f "$HOME/.config/fontconfig/conf.d/10-powerline-symbols.conf" ]; then
fc-cache -vf "$HOME"/.fonts/
wget https://github.com/powerline/powerline/raw/develop/font/10-powerline-symbols.conf -O "$HOME"/.config/fontconfig/conf.d/10-powerline-symbols.conf
fi
fi
# RHEL
if [ -f /etc/redhat-release ]; then
codename="$(awk </etc/redhat-release '{print $1}')"
if [[ $codename == "Fedora" ]]; then
sudo dnf -y install curl bzip2 bzip2-devel gmp-devel libffi-devel openssl-devel \
python-crypto python-devel python-dnf python-pip python-setuptools python-virtualenv \
python3-devel python3-dnf python3-setuptools python3-virtualenv \
redhat-rpm-config readline-devel sqlite sqlite-devel wget xz xz-devel zlib-devel zsh &&
sudo dnf -y group install "C Development Tools and Libraries"
elif [[ $codename == "CentOS" ]]; then
sudo yum -y install bzip2 bzip2-devel curl gmp-devel libffi-devel openssl-devel \
python-crypto python-devel python-pip python-setuptools python-virtualenv \
redhat-rpm-config readline-devel sqlite sqlite-devel wget xz xz-devel zlib-devel zsh &&
sudo yum -y group install "Development Tools"
fi
fi
fi
### Homebrew ###
if [[ $INSTALL_TYPE == "full" ]]; then
if [[ $(uname) == "Darwin" ]]; then
if ! xcode-select --print-path &>/dev/null; then
xcode-select --install &>/dev/null
fi
set +e
command -v brew >/dev/null 2>&1
BREW_CHECK=$?
if [ $BREW_CHECK -eq 0 ]; then
echo "Brew already installed"
else
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
fi
elif [[ $(uname) == "Linux" ]]; then
set +e
if [[ $(arch) != "aarch64" ]]; then
command -v brew >/dev/null 2>&1
BREW_CHECK=$?
if [ $BREW_CHECK -eq 0 ]; then
echo "Brew already installed"
else
bash -c \
"$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
test -d /home/linuxbrew/.linuxbrew && eval "$(/home/linuxbrew/.linuxbrew/bin/brew shellenv)"
fi
if [ ! -d /home/linuxbrew/.linuxbrew/var/homebrew/linked ]; then
sudo mkdir -p /home/linuxbrew/.linuxbrew/var/homebrew/linked
sudo chown -R "$(whoami)" /home/linuxbrew/.linuxbrew/var/homebrew/linked
fi
fi
fi
fi
### Pyenv ###
export PYENV_ROOT="$HOME/.pyenv"
if [[ $INSTALL_TYPE == "full" ]]; then
if [ ! -d "$PYENV_ROOT" ]; then
git clone https://github.com/pyenv/pyenv.git "$PYENV_ROOT"
git clone https://github.com/pyenv/pyenv-update.git "$PYENV_ROOT/plugins/pyenv-update"
git clone https://github.com/pyenv/pyenv-virtualenv.git "$PYENV_ROOT/plugins/pyenv-virtualenv"
export PATH="$PYENV_ROOT/bin:$PATH"
if [ -f .python-version ]; then
pyenv install
pyenv global "$(cat .python-version)"
else
DEFAULT_PYTHON_VERSION=$(pyenv install --list | grep -v - | grep -v a | grep -v b | grep -v mini | grep -v rc | tail -1 | awk '{ print $1 }')
pyenv install "$DEFAULT_PYTHON_VERSION"
pyenv global "$DEFAULT_PYTHON_VERSION"
fi
eval "$(pyenv init --path)"
eval "$(pyenv init -)"
# eval "$(pyenv virtualenv-init -)"
pip install --upgrade pip pip-tools
pip-sync "requirements.txt" "requirements-dev.txt"
else
export PATH="$PYENV_ROOT/bin:$PATH"
eval "$(pyenv init --path)"
eval "$(pyenv init -)"
# eval "$(pyenv virtualenv-init -)"
fi
fi
### Fonts ###
if [[ $(uname) == "Darwin" ]]; then
FONTS_DIR="$HOME"/Library/Fonts
elif [[ $(uname) == "Linux" ]]; then
FONTS_DIR="$HOME"/.fonts
fi
if [[ ! -d "$FONTS_DIR" ]]; then
mkdir -p "$FONTS_DIR"
fi
if [[ ! -f "$FONTS_DIR/MesloLGS NF Regular.ttf" ]]; then
cd "$FONTS_DIR"
curl -L https://github.com/romkatv/powerlevel10k-media/raw/master/MesloLGS%20NF%20Regular.ttf >"MesloLGS NF Regular.ttf"
if [[ $(uname) == "Linux" ]]; then
fc-cache -vf "$FONTS_DIR"
fi
cd -
fi
### Bash Git Prompt ###
if [[ $(uname) == "Linux" ]]; then
if [ ! -f "$HOME/.bash-git-prompt/gitprompt.sh" ]; then
git clone https://github.com/magicmonty/bash-git-prompt.git "$HOME/.bash-git-prompt" --depth=1
fi
fi
### Netrc ###
if [ ! -f "$HOME/.netrc" ]; then
touch "$HOME/.netrc"
fi
### Oh My Zsh ###
OMZ_DIR=$HOME/.oh-my-zsh
OMZ_CUSTOM_DIR=$OMZ_DIR/custom
OMZ_PLUGINS_DIR=$OMZ_CUSTOM_DIR/plugins
OMZ_THEMES_DIR=$OMZ_CUSTOM_DIR/themes
if [ ! -d "$OMZ_DIR" ]; then
git clone https://github.com/ohmyzsh/ohmyzsh.git "$OMZ_DIR"
fi
if [ ! -d "$OMZ_PLUGINS_DIR/zsh-autocomplete" ]; then
git clone https://github.com/marlonrichert/zsh-autocomplete.git "$OMZ_PLUGINS_DIR/zsh-autocomplete"
fi
if [ ! -d "$OMZ_PLUGINS_DIR/zsh-autosuggestions" ]; then
git clone https://github.com/zsh-users/zsh-autosuggestions.git "$OMZ_PLUGINS_DIR/zsh-autosuggestions"
fi
if [ ! -d "$OMZ_PLUGINS_DIR/zsh-syntax-highlighting" ]; then
git clone https://github.com/zsh-users/zsh-syntax-highlighting.git "$OMZ_PLUGINS_DIR/zsh-syntax-highlighting"
fi
if [ ! -d "$OMZ_THEMES_DIR/powerlevel10k" ]; then
git clone --depth=1 https://github.com/romkatv/powerlevel10k.git "$OMZ_THEMES_DIR/powerlevel10k"
fi
### macOS ###
if [[ $(uname) == "Darwin" ]]; then
source "$HOME/.macos"
fi
| true
|
dced91f8ea6e60d729183377f5220b9ddf4ece34
|
Shell
|
takd/remotepi-init2
|
/boot/run-once.sh
|
UTF-8
| 3,453
| 3.5625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
## Usage: This script will run once after systemd brings up the network and
# then get moved into /boot/run-once.d/completed which will be created for
# you. This script is in this project to serve as an demo of how you might use
# it. There are several distinct tasks in commented blocks. The recommended
# use would be to create the /boot/run-once.d/ directory yourself and put
# each task in its own file and name them so they sort in the order you want
# them ran.
# See: http://manpages.ubuntu.com/manpages/bionic/man8/run-parts.8.html
echo "Starting run-once.sh..."
echo "Updating hostname..."
#### Update hostname
## See https://raspberrypi.stackexchange.com/a/66939/8375 for a list of all the raspi-config magic you may want ot automate.
raspi-config nonint do_hostname "$(cat /boot/hostname)"
echo "Updating locale, keyboard, WiFi country..."
### Update locale, keyboard, WiFi country
raspi-config nonint do_change_locale "hu_HU.UTF-8"
raspi-config nonint do_configure_keyboard "hu"
raspi-config nonint do_wifi_country "HU"
echo "Enabling UART..."
### Enable UART for status LED
raspi-config nonint do_onewire 1
echo "Setting up WiFi..."
#### Wifi Setup (WPA Supplicant)
## Replaces the magic of https://github.com/RPi-Distro/raspberrypi-net-mods/blob/master/debian/raspberrypi-net-mods.service
## See: https://www.raspberrypi.org/documentation/configuration/wireless/wireless-cli.md
cat /etc/wpa_supplicant/wpa_supplicant.conf /boot/network.conf > /etc/wpa_supplicant/wpa_supplicant.conf
chmod 600 /etc/wpa_supplicant/wpa_supplicant.conf
wpa_cli -i wlan0 reconfigure
rm /boot/network.conf
echo "Setting up SSH..."
#### SSH Daemon Setup
## Replaces the magic of https://github.com/RPi-Distro/raspberrypi-sys-mods/blob/master/debian/raspberrypi-sys-mods.sshswitch.service
## See also: https://github.com/RPi-Distro/raspberrypi-sys-mods/blob/master/debian/raspberrypi-sys-mods.regenerate_ssh_host_keys.service
update-rc.d ssh enable && invoke-rc.d ssh start
dd if=/dev/hwrng of=/dev/urandom count=1 bs=4096
rm -f -v /etc/ssh/ssh_host_*_key*
/usr/bin/ssh-keygen -A -v
echo "Setting up services..."
#### Setup own services
mv /boot/services /home/pi/services
chmod +x /home/pi/services/DS1302/setDateToRTC.py
chmod +x /home/pi/services/DS1302/getDateFromRTC.py
echo "Setting up RTC..."
### Setup RTC time load at startup
echo "$(sed '$ i\/home/pi/services/DS1302/getDateFromRTC.py' /etc/rc.local)" > /etc/rc.local
### Run it too
/home/pi/services/DS1302/getDateFromRTC.py
echo "Setting up shutdown, wifi indicator"
### Copy shutdown button and wifi indicator services
## http://www.diegoacuna.me/how-to-run-a-script-as-a-service-in-raspberry-pi-raspbian-jessie/
cp /home/pi/shutdown-button.service /lib/systemd/system/shutdown-button.service
chmod 644 /lib/systemd/system/shutdown-button.service
cp /home/pi/wifi-checker.service /lib/systemd/system/wifi-checker.service
chmod 644 /lib/systemd/system/wifi-checker.service
echo "Enabling new services"
### Enable the new services
sudo systemctl daemon-reload
sudo systemctl enable shutdown-button.service
sudo systemctl start shutdown-button.service
sudo systemctl enable wifi-checker.service
sudo systemctl start wifi-checker.service
echo "Additional scripts"
#### Get additional scripts for subsequent usage, get will be run manually
mv /boot/get-remotepi-scripts /home/pi/get-remotepi-scripts
chmod +x get-remotepi-scripts
echo "All done, moving on!"
| true
|
b6e4afd6c156893107d1b45ca4ea591376ef755c
|
Shell
|
mathieurodic/cpptrading
|
/run
|
UTF-8
| 759
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
DEBUG=false
# COMPILER="g++ --short-enums -std=c++17"
COMPILER="clang++-5.0 -std=c++17"
INPUT_PATH="$1"
OUTPUT_PATH="bin/${INPUT_PATH}"
OUTPUT_PATH="${OUTPUT_PATH%.*}"
OUTPUT_DIR="$(dirname "${OUTPUT_PATH}")"
if [ "${DEBUG}" = true ] ; then
EXECUTION_PRECOMMAND="valgrind"
else
EXECUTION_PRECOMMAND=""
fi
if [ "${DEBUG}" = true ] ; then
OPTIONS="${OPTIONS} -g -O0"
else
OPTIONS="${OPTIONS} -O3"
fi
mkdir -p "${OUTPUT_DIR}"
time ${COMPILER} ${OPTIONS} -fdiagnostics-color=always -Isrc "${INPUT_PATH}" \
-Wno-write-strings -Wno-narrowing -Wno-trigraphs \
-lpthread -lupscaledb -lstdc++fs -lz -lwebsockets -lcurl -ltbb -lncursesw \
-o "${OUTPUT_PATH}" && echo && time ${EXECUTION_PRECOMMAND} ./${OUTPUT_PATH}
exit $?
| true
|
a91215c7ab459a1438af4fea0a6584c6ddc3d692
|
Shell
|
ccFiona/data-analyse
|
/5.104 xiaofei/getExpmLics.sh
|
UTF-8
| 738
| 2.734375
| 3
|
[] |
no_license
|
############################################################################
##
## Copyright (c) 2013 hunantv.com, Inc. All Rights Reserved
## $Id: getExpmOpenStat.sh,v 0.0 2016年05月26日 星期四 10时50分10秒 <tangye> Exp $
##
############################################################################
#
###
# # @file getExpmOpenStat.sh
# # @date 2016年05月26日 星期四 10时50分10秒
# # @brief
# #
# ##
#!/bin/bash
####
# input1: "intermediate/expLics_'$stat_date'"
# out1:"intermediate/expLicsAll"
logs=$1
licsAll=$2
stat_date=$3
cd /data/dev/xiaofei
awk 'BEGIN{FS=OFS="\t";}
{
UV[$1]=1;
}
END{
for(u in UV){
print u > "intermediate/expLicsAll_tmp";
}
}
' $logs $licsAll
## vim: set ts=2 sw=2: #
| true
|
6f972aad293e437461b7c53541a554a259371da1
|
Shell
|
crowbar/crowbar-core
|
/chef/cookbooks/provisioner/templates/suse/crowbar_register.erb
|
UTF-8
| 13,389
| 3.5625
| 4
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
#! /bin/bash -e
# vim: sw=4 et
#
# Copyright 2013, SUSE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# TODO:
# change timezeone?
usage () {
cat <<EOF
<% if !node[:provisioner][:keep_existing_hostname] -%>
`basename $0` [-h|--help] [-v|--verbose] [-f|--force] [--gpg-auto-import-keys] [--keep-existing-hostname] [--no-gpg-checks] [--interface IF]
<% else -%>
`basename $0` [-h|--help] [-v|--verbose] [-f|--force] [--gpg-auto-import-keys] [--no-gpg-checks] [--interface IF]
<% end -%>
Register node in Crowbar.
EOF
exit
}
# Variables for options
CROWBAR_AUTO_IMPORT_KEYS=0
CROWBAR_NO_GPG_CHECKS=0
CROWBAR_FORCE=0
CROWBAR_VERBOSE=0
<% if !node[:provisioner][:keep_existing_hostname] -%>
KEEP_EXISTING_HOSTNAME=0
<% else -%>
KEEP_EXISTING_HOSTNAME=1
<% end -%>
DEFINEDDEV=
while test $# -gt 0; do
case "$1" in
-h|--help|--usage|-\?) usage ;;
-v|--verbose) CROWBAR_VERBOSE=1 ;;
-f|--force) CROWBAR_FORCE=1 ;;
<% if !node[:provisioner][:keep_existing_hostname] -%>
--keep-existing-hostname) KEEP_EXISTING_HOSTNAME=1 ;;
<% end -%>
--gpg-auto-import-keys) CROWBAR_AUTO_IMPORT_KEYS=1 ;;
--no-gpg-checks) CROWBAR_NO_GPG_CHECKS=1 ;;
--interface)
if test $# -eq 1; then
echo "Option --interface requires an argument."
exit 1
else
shift
DEFINEDDEV="$1"
fi
;;
*) ;;
esac
shift
done
if [ $(id -u) -gt 0 ]; then
echo "$0 needs to be run as root user."
echo ""
exit 1
fi
if test -n "$SSH_CONNECTION" -a -z "$STY"; then
echo "Not running in screen. Please run $0 inside screen to avoid problems during network re-configuration."
echo ""
exit 1
fi
if test $CROWBAR_FORCE -ne 1; then
echo "Running this tool will alter the system for integration with Crowbar."
echo -n "Continue? [y/N]: "
read ANSWER
if test "x$ANSWER" != xy -a "x$ANSWER" != xY; then
exit 0
fi
fi
# Helper functions
# ----------------
add_group() {
gid=$(getent group $1 | cut -f 3 -d ":")
if [[ -z "$gid" ]]; then
groupadd --system --gid $2 $1
elif [[ "$gid" != "$2" ]]; then
groupmod -g $2 $1
if [[ -n "$3" ]] && rpm -q $3 >/dev/null; then
zypper --non-interactive install --force $3
fi
fi
}
add_user() {
uid=$(getent passwd $1 | cut -f 3 -d ":")
gid=$(getent passwd $1 | cut -f 4 -d ":")
if [[ -z "$uid" ]]; then
useradd --system --shell /sbin/nologin -d / --gid $2 --uid $2 --groups $3 $1
elif [[ "$uid" != $2 ]] || [[ "$gid" != $2 ]]; then
if [[ -n "$gid" ]]; then
echo "Group $1 for the same username doesn't exist. Please clean up manually."
exit 1
fi
usermod -u $2 $1
groupmod -g $2 $1
if [[ -n "$4" ]] && rpm -q $4 >/dev/null; then
zypper --non-interactive install --force $4
fi
fi
}
# Variables that are templated
# ----------------------------
ADMIN_IP="<%= @admin_ip %>"
ADMIN_BROADCAST="<%= @admin_broadcast %>"
WEB_PORT="<%= @web_port %>"
HTTP_SERVER="http://${ADMIN_IP}:${WEB_PORT}"
CROWBAR_OS="<%= @os %>"
CROWBAR_ARCH="<%= @arch %>"
DOMAIN="<%= @domain %>"
NTP_SERVERS="<%= @ntp_servers_ips.join(" ") %>"
# we need to know an architecture we are running on
ARCH=`uname -m`
# Make sure we know which interface to use as a basis
# ---------------------------------------------------
NIC_CANDIDATES=
DEFINEDDEV_FOUND=0
MULTIPLE_NICS=0
for nic in /sys/class/net/*; do
[[ -f $nic/address && -f $nic/type && \
$(cat "$nic/type") = 1 ]] || continue
NICDEV="${nic##*/}"
if ip addr show $NICDEV | grep -q " brd $ADMIN_BROADCAST "; then
test "x$NICDEV" == "x$DEFINEDDEV" && DEFINEDDEV_FOUND=1
test -n "$NIC_CANDIDATES" && MULTIPLE_NICS=1
NIC_CANDIDATES="$NIC_CANDIDATES $NICDEV"
fi
done
# remove leading space
NIC_CANDIDATES=${NIC_CANDIDATES## }
if test $DEFINEDDEV_FOUND -ne 1 -a -n "$DEFINEDDEV"; then
if test -f "/sys/class/net/$DEFINEDDEV/address"; then
echo "Defined interface to use ($DEFINEDDEV) does not seem to be on the admin network."
echo "Is DHCP used for it?"
else
echo "Defined interface to use ($DEFINEDDEV) was not detected."
fi
exit 1
elif test "x$NIC_CANDIDATES" == "x"; then
echo "Cannot find any good interface that would be on the admin network."
echo "Did the node boot with DHCP on the admin network?"
exit 1
elif test $DEFINEDDEV_FOUND -ne 1 -a $MULTIPLE_NICS -ne 0; then
echo "More than one potential interface can be used:"
echo " $NIC_CANDIDATES"
echo ""
echo "Please define the one to use with the --interface option."
exit 1
fi
if test -n "$DEFINEDDEV"; then
BOOTDEV="$DEFINEDDEV"
else
BOOTDEV="$NIC_CANDIDATES"
fi
# Setup groups and users
# ---------------
# for making HA on shared NFS backend storage work
add_group glance 200 openstack-glance
add_group qemu 201 qemu
add_group kvm 202 qemu
add_group cinder 203 openstack-cinder
add_user glance 200 glance openstack-glance
add_user qemu 201 kvm qemu
add_user cinder 203 cinder openstack-cinder
# Check that we're really on the admin network
# --------------------------------------------
MD5_ADMIN=$(curl -s $HTTP_SERVER/$CROWBAR_OS/$CROWBAR_ARCH/crowbar_register | md5sum | awk -F " " '{print $1}')
MD5_LOCAL=$(md5sum $0 | awk -F " " '{print $1}')
if test "x$MD5_ADMIN" != "x$MD5_LOCAL"; then
echo "This script does not match the one from the administration server."
echo "Please download $HTTP_SERVER/$CROWBAR_OS/$CROWBAR_ARCH/crowbar_register and use it."
exit 1
fi
# Setup the repos
# ---------------
<% @repos.keys.sort.each do |name| %>
zypper -n ar "<%= @repos[name][:url] %>" "<%= name %>"
<% unless @repos[name][:priority] == 99 -%>
zypper -n mr -p "<%= @repos[name][:priority] %>" "<%= name %>"
<% end -%>
<% end %>
<% if @repos.keys.include? "PTF" -%>
# PTF has an unknown key, and this is expected
zypper -n --gpg-auto-import-keys refresh -r PTF
<% end -%>
ZYPPER_REF_OPT=
test $CROWBAR_AUTO_IMPORT_KEYS -eq 1 && ZYPPER_REF_OPT=--gpg-auto-import-keys
test $CROWBAR_NO_GPG_CHECKS -eq 1 && ZYPPER_REF_OPT=--no-gpg-checks
zypper -n $ZYPPER_REF_OPT refresh
# Install packages that are needed
# --------------------------------
PATTERNS_INSTALL=
PACKAGES_INSTALL=
# Obvious dependencies
PACKAGES_INSTALL="$PACKAGES_INSTALL openssh"
# From autoyast profile
<% if @platform == "suse" -%>
PATTERNS_INSTALL="$PATTERNS_INSTALL Minimal base"
<% elsif @platform == "opensuse" -%>
PATTERNS_INSTALL="$PATTERNS_INSTALL base enhanced_base sw_management"
<% end -%>
PACKAGES_INSTALL="$PACKAGES_INSTALL netcat-openbsd ruby2.1-rubygem-chef ruby2.1-rubygem-crowbar-client"
# We also need ntp for this script
PACKAGES_INSTALL="$PACKAGES_INSTALL ntp"
case $ARCH in
x86_64) PACKAGES_INSTALL+=" biosdevname";;
esac
# Also install relevant microcode packages
case $(grep -m 1 'model name' /proc/cpuinfo) in
*Intel\(R\)*)
PACKAGES_INSTALL+=" ucode-intel";;
*AuthenticAMD*)
PACKAGES_INSTALL+=" ucode-amd";;
esac
zypper --non-interactive install -t pattern $PATTERNS_INSTALL
# Auto-agree with the license since it was already agreed on for the admin server
<% if @platform == "suse" && @target_platform_version.to_f >= 12.1 -%>
zypper --non-interactive install --auto-agree-with-licenses suse-openstack-cloud-crowbar-release supportutils-plugin-suse-openstack-cloud
<% end -%>
zypper --non-interactive install $PACKAGES_INSTALL<%= " '#{@packages.join("' '")}'" unless @packages.empty? %>
<% if @platform == "opensuse" -%>
# we need rsyslog, not systemd-logger
zypper --non-interactive remove systemd-logger
<% end -%>
# Fail early if we know we can't succeed because of a missing chef-client
if ! which chef-client &> /dev/null; then
echo "chef-client is not available."
exit 1
fi
# Set up /etc/crowbarrc with crowbarctl credentials
# -------------------------------------------
cat <<EOF > /etc/crowbarrc
[default]
server = <%= @crowbar_protocol %>://<%= @admin_ip %>
username = <%= @crowbar_client_username %>
password = <%= @crowbar_client_password %>
<% unless @crowbar_verify_ssl %>
verify_ssl = 0
<% end %>
EOF
# Check that we can really register this node
# -------------------------------------------
if ! crowbarctl restricted ping &> /dev/null; then
echo "Failed to contact the administration server."
echo "Is the administration server up?"
exit 1
fi
# Copied from sledgehammer
MAC=$(cat /sys/class/net/$BOOTDEV/address)
if test $KEEP_EXISTING_HOSTNAME -ne 1; then
HOSTNAME="d${MAC//:/-}.${DOMAIN}"
else
HOSTNAME=$(cat /etc/HOSTNAME)
if ! echo "$HOSTNAME" | grep -q "\.$DOMAIN$"; then
echo "The fully qualified domain name did not contain the $DOMAIN cloud domain."
exit 1
elif echo "${HOSTNAME%%.$DOMAIN}" | grep -q "\."; then
echo "The hostname is in a subdomain of the $DOMAIN cloud domain."
exit 1
fi
fi
if crowbarctl restricted show $HOSTNAME &> /dev/null; then
echo "This node seems to be already registered."
exit 1
fi
# Some initial setup
# ------------------
# Disable firewall
if [ -x /sbin/SuSEfirewall2 ] ; then
/sbin/SuSEfirewall2 off
fi
# SSH setup
systemctl enable sshd.service
systemctl start sshd.service
mkdir -p /var/log/crowbar
# Taken from autoyast profile
mkdir -p /root/.ssh
chmod 700 /root/.ssh
if ! curl -s -o /root/.ssh/authorized_keys.wget \
$HTTP_SERVER/authorized_keys ||\
grep -q "Error 404" /root/.ssh/authorized_keys.wget; then
rm -f /root/.ssh/authorized_keys.wget
else
test -f /root/.ssh/authorized_keys && chmod 644 /root/.ssh/authorized_keys
cat /root/.ssh/authorized_keys.wget >> /root/.ssh/authorized_keys
rm -f /root/.ssh/authorized_keys.wget
fi
# Steps from sledgehammer
# -----------------------
if test $KEEP_EXISTING_HOSTNAME -ne 1; then
echo "$HOSTNAME" > /etc/HOSTNAME
fi
sed -i -e "s/\(127\.0\.0\.1.*\)/127.0.0.1 $HOSTNAME ${HOSTNAME%%.*} localhost.localdomain localhost/" /etc/hosts
hostname "$HOSTNAME"
export DOMAIN
export HOSTNAME
ntp="/usr/sbin/ntpdate -u $NTP_SERVERS"
# Make sure date is up-to-date
until $ntp; do
echo "Waiting for NTP server(s) $NTP_SERVERS"
sleep 1
done
for retry in $(seq 1 30); do
curl -f --retry 2 -o /etc/chef/validation.pem \
--connect-timeout 60 -s -L \
"$HTTP_SERVER/validation.pem"
[ -f /etc/chef/validation.pem ] && break
sleep $retry
done
TMP_ATTRIBUTES=$(mktemp --suffix .json)
# Make sure that we have the right target platform
echo "{ \"target_platform\": \"$CROWBAR_OS\", \"crowbar_wall\": { \"registering\": true } }" > "$TMP_ATTRIBUTES"
crowbarctl restricted transition $HOSTNAME "discovering"
chef-client -S http://$ADMIN_IP:4000/ -N "$HOSTNAME" --json-attributes "$TMP_ATTRIBUTES"
crowbarctl restricted transition $HOSTNAME "discovered"
# TODO need to find way of knowing that chef run is over on server side
sleep 30
crowbarctl restricted allocate $HOSTNAME
# Cheat to make sure that the bootdisk finder attribute will claim the right disks
echo '{ "crowbar_wall": { "registering": true } }' > "$TMP_ATTRIBUTES"
rm -f /etc/chef/client.pem
crowbarctl restricted transition $HOSTNAME "hardware-installing"
chef-client -S http://$ADMIN_IP:4000/ -N "$HOSTNAME" --json-attributes "$TMP_ATTRIBUTES"
crowbarctl restricted transition $HOSTNAME "hardware-installed"
#TODO
#wait_for_pxe_state ".*_install"
sleep 30
rm -f "$TMP_ATTRIBUTES"
crowbarctl restricted transition $HOSTNAME "installing"
# Obviously, on reboot from sledgehammer, we lose the client key
rm -f /etc/chef/client.pem
# Revert bits from sledgehammer that should not persist
# -----------------------------------------------------
# Now setup the hostname with the short name; we couldn't do that earlier
# because we do like in sledgehammer (which sets the hostname to the FQDN one)
hostname "${HOSTNAME%%.*}"
# Remove the resolution for hostname in /etc/hosts; this shouldn't be needed anymore
sed -i -e "s/\(127\.0\.0\.1.*\)/127.0.0.1 localhost.localdomain localhost/" /etc/hosts
# Steps from autoyast profile
# ---------------------------
# normally done by crowbar_join, but only when chef is installed by crowbar_join
systemctl enable chef-client.service
curl -s -o /usr/sbin/crowbar_join $HTTP_SERVER/$CROWBAR_OS/$CROWBAR_ARCH/crowbar_join.sh
chmod +x /usr/sbin/crowbar_join
crowbarctl restricted transition $HOSTNAME "installed"
#TODO
# Wait for DHCP to update
sleep 30
<% if @enable_pxe -%>
# Make sure we can always resolve our hostname; we use DHCP to find what's our
# admin IP
DHCP_VARS=$(mktemp)
/usr/lib/wicked/bin/wickedd-dhcp4 --test --test-output $DHCP_VARS $BOOTDEV
if test $? -eq 0; then
eval $(grep ^IPADDR= "$DHCP_VARS")
ADMIN_IP=${IPADDR%%/*}
echo "$ADMIN_IP $HOSTNAME ${HOSTNAME%%.*}" >> /etc/hosts
fi
rm -f "$DHCP_VARS"
<% end -%>
/usr/sbin/crowbar_join --setup --verbose
| true
|
ec3bc6c2c16073e290849cd5838cfeeb2e2409a1
|
Shell
|
osfove2021asix2/asix2_fortea_oscar_m06uf2pr3
|
/url.sh
|
UTF-8
| 354
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
function AÑADE(){
echo -n "URL: "
read URL
}
function COMPROBAR(){
curl --connect-timeout 5 $URL > /dev/null 2>&1
if [[ "$?" -eq 0 ]]; then
echo "CORRECTA"
else
echo "INCORRECTO"
exit 1;
fi
}
function NAVEGADOR(){
echo "Abriendo $URL en el navegador"
firefox $URL
}
function FUN(){
AÑADE
COMPROBAR
NAVEGADOR
}
FUN
| true
|
3dc3ee23f2c1eb577e11ae9f050e4a7793558f6e
|
Shell
|
blueyed/dotfiles
|
/usr/bin/awesome-restart
|
UTF-8
| 1,239
| 3.921875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Restart awesome by signalling the session runner to restart it.
# This loads a new binary.
# The session runner is ./usr/bin/session-run-awesome.sh.
awesome_pid=
for p in $(pgrep -x awesome); do
if grep -q "^DISPLAY=$DISPLAY$" "/proc/$p/environ"; then
echo "Found pid $p for DISPLAY=$DISPLAY."
if [ -n "$awesome_pid" ]; then
echo "ERROR: found multiple pids!? ($awesome_pid, $p) - aborting." >&2
exit 1
fi
awesome_pid=$p
fi
done
[ -z "$awesome_pid" ] && {
echo "No awesome found for DISPLAY=$DISPLAY." >&2
exit 1
}
session_pid=$(ps -o ppid= "$awesome_pid")
[ -z "$session_pid" ] && {
echo "No session PID found for pid $awesome_pid (DISPLAY=$DISPLAY)." >&2
exit 1
}
# echo "Signalling session (USR1), pid=${session_pid}."
kill -USR1 "$session_pid"
# Write args to a file that is read by ~/.dotfiles/usr/bin/session-run-awesome.sh.
# This allows to use another config temporarily, e.g.:
# awesome-restart -c ~/src/awesome/awesomerc.lua
args_file="/var/run/user/$(id -u)/awesome-restart-args"
echo "$@" > "$args_file"
# echo "Sending TERM to awesome: pid=${awesome_pid}."
kill -TERM "$awesome_pid"
sleep 1
if kill -0 "$awesome_pid" 2>/dev/null; then
kill -KILL "$awesome_pid"
fi
| true
|
332a9bff8acff60a9154bbe8663b67005e24407b
|
Shell
|
zlzlife/config
|
/zsh/.zsh/git.zsh
|
UTF-8
| 1,875
| 3.328125
| 3
|
[] |
no_license
|
# git 命令相关的函数
_current_branch() {
# git symbolic-ref --short -q HEAD
br=$(git branch | grep "*")
echo ${br/* /}
}
# git提交信息
_gac() {
commit_info=$1
if [ -z ${commit_info} ]
then
commit_info=$(date "+%Y-%m-%d %H:%M:%S")
fi
git add .
git commit -m ${commit_info}
}
# git 提交信息,并且推送分支
_gacp() {
_gac $1
git push origin "$(_current_branch)"
}
# git 通过cz提交信息
_gacz() {
git add .
git cz
}
# git通过cz提交信息,并且推送分支
_gaczp() {
git add .
git cz
git push origin "$(_current_branch)"
}
# git 推送分支
_gpush() {
git push origin "$(_current_branch)"
}
# git pull 分支最新代码
_gpull() {
git pull origin "$(_current_branch)"
}
_gfp() {
git fetch
git pull origin "$(_current_branch)"
}
# git 提交代码
_gc() {
git add .
git commit -m "$1"
}
# git 提交tag
_gctag(){
git tag -a $1 -m $2
}
# git 提交分支,并且推送
_gcptag(){
_gctag $1 $2
git push origin $1
}
# git 命令简写
alias gac=_gac
alias gacp=_gacp
alias gacz=_gacz
alias gaczp=_gaczp
alias gpush=_gpush
alias gpull=_gpull
alias gfp=_gfp
alias gc=_gc
alias gctag=_gctag
alias gcptag=_gcptag
alias gstat="git status"
alias gssh="git stash"
alias gsshp="git stash pop"
alias gsshls="git stash list"
alias gadd="git add ."
alias gclone="git clone"
alias gcz="git cz"
alias gfetch="git fetch"
alias gck="git checkout"
alias gba="git branch -a"
alias gr='git remote'
alias grv='git remote -v'
alias gtag='git tag'
alias gdtag='git tag -d $1'
alias gstag='git show $1'
alias gpatag='git push --tags'
alias gchpick='git cherry-pick'
alias glola="git log --graph --pretty='%Cred%h%Creset -%C(auto)%d%Creset %s %Cgreen(%cr) %C(bold blue)<%an>%Creset' --all"
alias gpath="git rev-parse --show-toplevel"
| true
|
61c53f85803b801b528a887a4b681f6abfc74df9
|
Shell
|
adityalstkp/docker-playground
|
/frontend/scripts/run.sh
|
UTF-8
| 695
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
(/scripts/wait-for-it.sh -t 15 consul.play.local:8500)
WAIT_STATUS=$?
if [ "$WAIT_STATUS" -ne 0 ]; then
echo "Failed waiting consul ready"
exit "$WAIT_STATUS"
fi
NAME="play-frontend"
PORT=3001
# add service json consul agent
mkdir -p /etc/consul/consul.d/
/scripts/consul.service.node.template.sh $NAME $PORT > /etc/consul/consul.d/service-backend.json
# add consul-agent config
mkdir -p /var/consul/config/
/scripts/consul.agent.template.sh $NAME > /var/consul/config/config.json
consul agent -config-file=/var/consul/config/config.json -config-dir=/etc/consul/consul.d -join=consul.play.local -retry-join "consul.play.local" &
# Run the main container command
exec "$@"
| true
|
5e58f53354296caee6031308b36f47acf2d77aba
|
Shell
|
serifyesil/PMOD
|
/sssp.sh
|
UTF-8
| 2,202
| 2.984375
| 3
|
[] |
no_license
|
# GALOIS_HOME=
# SOC_DATA=./datasets
# ROAD_DATA=./datasets
THREADS=( 1 5 10 20 30 40 )
NUM_TRY=5
LOG_DIR=sssp-logs
mkdir $LOG_DIR
for t in "${THREADS[@]}"
do
count=0
while [ $count -lt $NUM_TRY ]
do
wl=obim
$GALOIS_HOME/sssp/sssp -t $t -wl $wl -delta 0 -startNode 12 $SOC_DATA/twitter40.bin > ${LOG_DIR}/twitter40_wl_${wl}_t_${t}_n_${count}
$GALOIS_HOME/sssp/sssp -t $t -wl $wl -delta 14 -startNode 1 $ROAD_DATA/USA-road-dUSA.bin > ${LOG_DIR}/USA-road-dUSA_wl_${wl}_t_${t}_n_${count}
wl=spraylist
$GALOIS_HOME/sssp/sssp -t $t -wl $wl -delta 0 -startNode 12 $SOC_DATA/twitter40.bin > ${LOG_DIR}/twitter40_wl_${wl}_t_${t}_n_${count}
$GALOIS_HOME/sssp/sssp -t $t -wl $wl -delta 0 -startNode 1 $ROAD_DATA/USA-road-dUSA.bin > ${LOG_DIR}/USA-road-dUSA_wl_${wl}_t_${t}_n_${count}
wl=multiqueue4
$GALOIS_HOME/sssp/sssp -t $t -wl $wl -delta 0 -startNode 12 $SOC_DATA/twitter40.bin > ${LOG_DIR}/twitter40_wl_${wl}_t_${t}_n_${count}
$GALOIS_HOME/sssp/sssp -t $t -wl $wl -delta 0 -startNode 1 $ROAD_DATA/USA-road-dUSA.bin > ${LOG_DIR}/USA-road-dUSA_wl_${wl}_t_${t}_n_${count}
wl=swarm
$GALOIS_HOME/sssp/sssp -t $t -wl $wl -delta 0 -startNode 12 $SOC_DATA/twitter40.bin > ${LOG_DIR}/twitter40_wl_${wl}_t_${t}_n_${count}
$GALOIS_HOME/sssp/sssp -t $t -wl $wl -delta 0 -startNode 1 $ROAD_DATA/USA-road-dUSA.bin > ${LOG_DIR}/USA-road-dUSA_wl_${wl}_t_${t}_n_${count}
wl=adap-obim
$GALOIS_HOME/sssp/sssp -t $t -wl $wl -delta 0 -startNode 12 $SOC_DATA/twitter40.bin > ${LOG_DIR}/twitter40_wl_${wl}_t_${t}_n_${count}
$GALOIS_HOME/sssp/sssp -t $t -wl $wl -delta 0 -startNode 1 $ROAD_DATA/USA-road-dUSA.bin > ${LOG_DIR}/USA-road-dUSA_wl_${wl}_t_${t}_n_${count}
wl=obim
$GALOIS_HOME/sssp/sssp -t $t -wl $wl -delta 8 -startNode 12 $SOC_DATA/twitter40.bin > ${LOG_DIR}/twitter40_wl_${wl}d_t_${t}_n_${count}
$GALOIS_HOME/sssp/sssp -t $t -wl $wl -delta 8 -startNode 1 $ROAD_DATA/USA-road-dUSA.bin > ${LOG_DIR}/USA-road-dUSA_wl_${wl}d_t_${t}_n_${count}
echo "Done ${count}"
((count++))
done
echo "Done thread ${t}"
done
| true
|
102d642cb83956bd927244da53015b0f5d5e3e97
|
Shell
|
JUrban/BliStr
|
/setupdirs.sh
|
UTF-8
| 800
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/bash
# run as ./setupdirs.sh /home/mptp/big/blistr2
if [ -z "$1" ]; then echo "install dir required"; exit 1; fi
distrdir=`pwd`
mkdir $1; mkdir $1/bin
tar xzf trainingproblems.tar.gz -C$1
tar xzf initialresults.tar.gz -C$1
cp -a BliStr.pl $1/bin/BliStr.pl
cp -a prmils/params2str.pl $1/bin/params2str.pl
cd $1
mv trainingproblems allprobs
mv initialresults initprots
mkdir strats; mkdir prots
wget http://www.cs.ubc.ca/labs/beta/Projects/ParamILS/paramils2.3.5-source.zip
unzip paramils2.3.5-source.zip
ln -s paramils2.3.5-source paramils
cd paramils2.3.5-source
mkdir example_e1
cp -a $distrdir/prmils/e-params.txt example_e1/
cp -a $distrdir/prmils/e_wrapper1.rb example_e1/
tar xzf $distrdir/trainingproblems.tar.gz -Cexample_data
mv example_data/trainingproblems example_data/e1
| true
|
bdf7887aa0e78921b743cc4b7be3973032932c4b
|
Shell
|
PMc84/vncBlanking
|
/vncBlanking.sh
|
UTF-8
| 2,234
| 3.921875
| 4
|
[] |
no_license
|
#!/bin/bash
# Declare variables
export DISPLAY=:0
VNCACTIVE="no"
VNCPORT=":5900"
SLEEP="3"
ADMINIP="192.168.99.50 192.168.99.54"
BRIGHTNESS=1
CONNECTEDDISPLAYS=$(xrandr | grep " connected" | cut -f1 -d " ")
VNCCONNECTED=$(ss -H -t4 state established sport = $VNCPORT | grep -c $VNCPORT)
MOUSEID=$(xinput --list --long | grep XIButtonClass | head -n 1 | egrep -o '[0-9]+')
KEYBOARDID=$(xinput --list --long | grep XIKeyClass | head -n 1 | egrep -o '[0-9]+')
# Main loop function
# Uses SS to determine if the VNC port is in use
# If in use, calls DISABLE_LOCAL function, then loops till VNCACTIVE changes state, at which point calls ENABLE_LOCAL function
MAIN_LOOP () {
VNCCONNECTED=$(ss -H -t4 state established sport = $VNCPORT | grep -c $VNCPORT)
BRIGHTNESS=1
while :
do
if (( $VNCCONNECTED >= "1" )); then
if [[ $VNCACTIVE == "yes" ]]; then
SLEEP_LOOP
else
VNCACTIVE="yes"
DISABLE_LOCAL
fi
else
if [[ $VNCACTIVE == "no" ]]; then
SLEEP_LOOP
else
VNCACTIVE="no"
ENABLE_LOCAL
fi
fi
done
}
# The following fuction sets the brightness to 0 and disables the local keyboard and mouse input
DISABLE_LOCAL () {
echo "DISABLE!"
CONNECTINGIP=$(ss -H -t4 state established sport = 5900 | awk 'NR==1{ print $4}' | cut -d ':' -f 1)
if [[ $ADMINIP =~ $CONNECTINGIP ]]; then
echo "ADMIN IP ADDRESS DOING NOTHING"
SLEEP_LOOP
else
until [[ $BRIGHTNESS -eq 0 ]]; do
BRIGHTNESS=$(echo "$BRIGHTNESS-0.02" | bc)
for i in $CONNECTEDDISPLAYS; do
xrandr --output $i --brightness $BRIGHTNESS
sleep 0.0005
done
done
fi
# xinput disable $MOUSEID
# xinput disable $KEYBOARDID
SLEEP_LOOP
}
# The following function sets the brightness to 1 and enables the local keyboard and mouse input
ENABLE_LOCAL () {
echo "ENABLE!"
for i in $CONNECTEDDISPLAYS; do
xrandr --output $i --brightness 1
done
xinput enable $MOUSEID
xinput enable $KEYBOARDID
SLEEP_LOOP
}
#seperating sleep out so we can call it at different points
SLEEP_LOOP () {
sleep $SLEEP
MAIN_LOOP
}
MAIN_LOOP
| true
|
807055dd2244ee8da5a3582f6e7db57dfe47fa2c
|
Shell
|
lukealbao/dotfiles
|
/shell/.bash_profile
|
UTF-8
| 250
| 2.53125
| 3
|
[] |
no_license
|
#! /usr/bin/env bash
# Load Aliases
[[ -f "$HOME/.aliases" ]] && source "$HOME/.aliases"
# Load Prompt
[[ -f "$HOME/.bash_prompt" ]] && source "$HOME/.bash_prompt"
# .bashrc has most everything
[[ -f "$HOME/.bashrc" ]] && source "$HOME/.bashrc"
| true
|
61347daa5666b95033abdd58eea97655628296a3
|
Shell
|
Network-verification/batfish
|
/projects/allinone/allinone
|
UTF-8
| 398
| 2.84375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
ALLINONE=${BASH_SOURCE[0]}
ALLINONE_PATH=$(dirname $ALLINONE)
ALLINONE_JAR=$ALLINONE_PATH/out/allinone.jar
ALLINONE_JAVA_ARGS=
if batfish_cygwin; then
ALLINONE_JAR="$(cygpath -w $ALLINONE_JAR)"
ALLINONE_JAVA_ARGS="-Djline.terminal=jline.UnixTerminal"
stty -icanon min 1 -echo
fi
java $ALLINONE_JAVA_ARGS -jar "$ALLINONE_JAR" "$@"
if batfish_cygwin; then
stty icanon echo
fi
| true
|
703135a2e9e5de65ff3340773749aa44a526bc99
|
Shell
|
scylladb/scylla-monitoring
|
/start-datadog.sh
|
UTF-8
| 3,005
| 3.953125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
if [ -f env.sh ]; then
. env.sh
fi
usage="$(basename "$0") [-h] [-A DD_API_KEY ][-p ip:port address of prometheus ] [-d configuration directory] [-e enviroment variable, multiple params are supported] [-D encapsulate docker param] -- Start a datadog agent inside a container"
while getopts ':hA:p:e:H:D:' option; do
case "$option" in
h) echo "$usage"
exit
;;
A) DD_API_KEY=$OPTARG
;;
D) DOCKER_PARAM="$DOCKER_PARAM $OPTARG"
;;
H) hostname="$OPTARG"
;;
e) ENV_ARRAY+=("$OPTARG")
;;
d) CONF_DIR="$OPTARG"
;;
p) PROMIP="$OPTARG"
;;
l) DOCKER_PARAM="$DOCKER_PARAM --net=host"
;;
:) printf "missing argument for -%s\n" "$OPTARG" >&2
echo "$usage" >&2
exit 1
;;
\?) printf "illegal option: -%s\n" "$OPTARG" >&2
echo "$usage" >&2
exit 1
;;
esac
done
if [ -z "$DD_API_KEY" ]; then
printf "\nDatagot API keys are not pressent, exiting.\n"
exit 1
fi
if [ -z "$DATADOG_NAME" ]; then
DATADOG_NAME="datadog-agent"
fi
docker container inspect $DATADOG_NAME > /dev/null 2>&1
if [ $? -eq 0 ]; then
printf "\nSome of the monitoring docker instances ($DATADOG_NAME) exist. Make sure all containers are killed and removed. You can use kill-all.sh for that\n"
exit 1
fi
group_args=()
is_podman="$(docker --help | grep -o podman)"
if [ ! -z "$is_podman" ]; then
group_args+=(--userns=keep-id)
fi
if [ -z "$CONF_DIR" ]; then
CONF_DIR="datadog_conf"
fi
for val in "${ENV_ARRAY[@]}"; do
ENV_COMMAND="$ENV_COMMAND -e $val"
done
if [ ! -z "$is_podman" ]; then
if [[ $(uname) == "Linux" ]]; then
DOCKER_HOST=$(hostname -I | awk '{print $1}')
elif [[ $(uname) == "Darwin" ]]; then
DOCKER_HOST=$(ifconfig bridge0 | awk '/inet / {print $2}')
fi
else
if [[ $(uname) == "Linux" ]]; then
DOCKER_HOST=$(ip -4 addr show docker0 | grep -Po 'inet \K[\d.]+')
elif [[ $(uname) == "Darwin" ]]; then
DOCKER_HOST=$(ifconfig bridge0 | awk '/inet / {print $2}')
fi
fi
if [[ $(uname) == "Linux" ]]; then
readlink_command="readlink -f"
elif [[ $(uname) == "Darwin" ]]; then
readlink_command="realpath "
fi
if [ -z "$hostname" ]; then
hostname=$HOSTNAME
fi
mkdir -p $CONF_DIR/conf.d/prometheus.d
if [ ! -f $CONF_DIR/datadog.yaml ]; then
cat >$CONF_DIR/datadog.yaml <<EOL
# datadog.yaml
process_config:
enabled: true
scrub_args: true
logs_enabled: true
confd_path: /conf.d
log_level: INFO
hostname: ${hostname}
EOL
fi
cat docs/source/procedures/datadog/conf.yaml|sed "s/IP:9090/$PROMIP/g" > $CONF_DIR/conf.d/prometheus.d/conf.yaml
CONF_DIR=$($readlink_command "$CONF_DIR")
docker run -d $DOCKER_PARAM ${DOCKER_LIMITS["datadog"]} -i \
--name $DATADOG_NAME \
--pid host -v $CONF_DIR/datadog.yaml:/etc/datadog-agent/datadog.yaml \
-v $CONF_DIR/conf.d/:/conf.d \
$ENV_COMMAND \
-e DD_API_KEY="$DD_API_KEY" -e DD_CONTAINER_INCLUDE="" gcr.io/datadoghq/agent:latest
| true
|
709e2bfb9c0aad171ee37dcd8408f750ee0d95fa
|
Shell
|
Cour-de-cassation/judilibre-ops
|
/scripts/check_install.sh
|
UTF-8
| 1,479
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
export OS_TYPE=$(cat /etc/os-release | grep -E '^NAME=' | sed 's/^.*debian.*$/DEB/I;s/^.*ubuntu.*$/DEB/I;s/^.*fedora.*$/RPM/I;s/.*centos.*$/RPM/I;')
if ! (which envsubst > /dev/null 2>&1); then
if [ "${OS_TYPE}" = "DEB" ]; then
sudo apt-get install -yqq gettext;
fi;
if [ "${OS_TYPE}" = "RPM" ]; then
sudo yum install -y gettext;
fi;
fi;
if ! (which jq > /dev/null 2>&1); then
if [ "${OS_TYPE}" = "DEB" ]; then
sudo apt-get install -yqq jq;
fi;
if [ "${OS_TYPE}" = "RPM" ]; then
sudo yum install -y jq;
fi;
fi
if ! (which htpasswd > /dev/null 2>&1); then
if [ "${OS_TYPE}" = "DEB" ]; then
sudo apt-get install -yqq apache2-utils;
fi;
if [ "${OS_TYPE}" = "RPM" ]; then
sudo yum install -y httpd-tools;
fi;
fi
if ! (which rclone > /dev/null 2>&1); then
if [ "${OS_TYPE}" = "DEB" ]; then\
curl -s -O https://downloads.rclone.org/rclone-current-linux-amd64.deb;\
sudo dpkg -i rclone-current-linux-amd64.deb; \
rm rclone-*-linux-amd64*;\
fi;\
if [ "${OS_TYPE}" = "RPM" ]; then\
curl -s -O https://downloads.rclone.org/rclone-current-linux-amd64.rpm;\
sudo yum localinstall -y rclone-current-linux-amd64.rpm; \
rm rclone-*-linux-amd64*;\
fi;\
fi;
| true
|
87bf5f7701d9bd8d749986fe2485da4f9ca8d85e
|
Shell
|
KebinuChiousu/ddns-powerdns
|
/stop.sh
|
UTF-8
| 137
| 3.03125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ -e ddns.pid ]
then
echo "stopping: ddns..."
kill `cat ddns.pid`
rm ddns.pid
else
echo "ddns is not running."
fi
| true
|
194a06898244c1b666c65638955697221ae1e83b
|
Shell
|
glxe/glxe.github.io
|
/commit.sh
|
UTF-8
| 261
| 2.828125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#如果是mac
git status
read -p "Press ctrl-c exit. Any key to continue! "
git pull
git add .
read -p "Please enter git commit message: " c
if [ " $c " ]; then
git commit -m " $c "
git push
echo go on ...
fi
#git push -u origin master -f
| true
|
eff1008675051901a62df6513d3ebd213bbb231b
|
Shell
|
valtaz/ansible-aws-squid
|
/templates/squid_monitor.sh
|
UTF-8
| 16,891
| 3.875
| 4
|
[] |
no_license
|
#!/bin/bash
#
# DESCRIPTION:
# ============
# NAT self-monitoring script for a simple HA hot fail-over NAT
# based on the AWS [HA NAT](https://aws.amazon.com/articles/2781451301784570)
# Should be run on each of the **two** NAT instances
#
# OUTPUT:
# =======
#
# REQUIREMENTS:
# =============
# AWS CLI version >= 1.3
# IAM Instance profile role allowing:
# - ec2:CreateRoute
# - ec2:CreateRouteTable
# - ec2:DeleteRoute
# - ec2:DeleteRouteTable
# - ec2:DescribeInstances
# - ec2:DescribeNetworkInterfaces
# - ec2:DescribeRouteTables
# - ec2:DescribeTags
# - ec2:ReplaceRoute
#
# VERSION: 1.0
# CREATED: 20/08/2014 09:01:57 EST
#
# $Id$
PROG=${0##*/}
# Not dryrun by default
DRYRUN=0
# No excessive output
DEBUG=0
# Directory to store all temporary files in
TMPDIR="/tmp/$PROG.$$"
mkdir -p "$TMPDIR"
# Exit on any error
set -e
# Clean up after yourself on exit
trap '_exit_trap' 0 HUP INT QUIT ABRT TERM
# Function to execute upon termination
_exit_trap()
{
exit_status=$?
# Clear the traps
trap 0 HUP INT QUIT ABRT TERM
if [ -d "$TMPDIR" ]; then
rm -rf "$TMPDIR"
fi
exit $exit_status
}
# Fallback functions before any libraries are sourced
log()
{
sns_notice=false
case ${pri:-notice} in
*notice)
out_msg="$*"
;;
*warning)
out_msg="-WARN- $*"
sns_notice=true
;;
*error)
out_msg="*ERROR* $*"
sns_notice=true
;;
*)
out_msg="$*"
;;
esac
if [ -w "$LOGFILE" ]; then
echo "[$(${date:-date} --iso-8601=seconds)] ${PROG:+$PROG:} $out_msg" >> "$LOGFILE"
fi
if [ -t 0 ]; then
echo "$out_msg" >&2
fi
if [ -n "$SYSLOG_FAC" ]; then
logger -t $PROG -p $SYSLOG_FAC.$pri -- "$out_msg"
fi
if [ -n "$SNSARN" ] && $sns_notice; then
aws sns --region $region publish --topic-arn "$SNSARN" --subject "$pri message from $PROG on NAT $instance_id" \
--message "$out_msg"
fi
}
info()
{
pri=notice log "$@"
}
warn()
{
pri=warning log "$@"
}
error()
{
pri=error log "$@"
}
debug_info()
{
if [ ${DEBUG:-0} -gt 0 ]; then
echo "$@" >&2
fi
}
debug_cat()
{
if [ ${DEBUG:-0} -gt 0 ]; then
echo "$@" >&2
cat - >&2
fi
}
die()
{
error "$@"
exit 1
}
run()
{
if [ ${DRYRUN:-0} -gt 0 ]; then
echo "--dry-run: $@" >&2
else
if [ ${DEBUG:-0} -gt 0 ]; then
echo "+ run: $@" >&2
fi
log "+ run: $@"
"$@"
fi
}
query_nat_instances()
{
debug_info "Running query_nat_instances with arguments $@"
aws ec2 describe-instances --region $region \
--query "$1" \
--filters ${vpc_id:+Name=vpc-id,Values=$vpc_id} ${iam_profile:+Name=iam-instance-profile.arn,Values=$iam_profile} ${2} \
--output text
}
get_vpc_id_for_instances()
{
{
if [ $? -ne 0 ]; then
error "Error querying instances for VPC id using ${iam_profile:+'$iam_profile' IAM profile} ${1:+'$1' filters}"
return 1
fi
uniq | awk '{ print $1 }
END { if (NR != 1) {
print "*ERROR* More than 1 VPC id for instances using \"'"${iam_profile:+$iam_profile IAM profile} ${1:+$1 filters}"'\"" | "cat - >&2"
exit 1
}
} '
}<<EOF_QUERY_NAT
$(debug_info "Running get_vpc_id_for_instances with arguments $@"
query_nat_instances 'Reservations[*].Instances[*].VpcId' $1)
EOF_QUERY_NAT
}
filter_route_tables_by_gw()
{
{
if [ $? -ne 0 ]; then
error "Error occured running describe-route-tables"
return 1
fi
sort -k 2
}<<EOF_EC2_DESCRIBE_RTABLES
$(debug_info "Running filter_route_tables_by_gw with arguments $@"
aws ec2 describe-route-tables --region $region \
--query 'RouteTables[*][Associations[*].[SubnetId,RouteTableId]]' \
--filters Name=vpc-id,Values=$vpc_id Name=route.gateway-id,Values=$1 --output text)
EOF_EC2_DESCRIBE_RTABLES
}
filter_route_tables_by_monitor_tag()
{
{
if [ $? -ne 0 ]; then
error "Error occured running describe-route-tables"
return 1
fi
sort -k 2
}<<EOF_EC2_DESCRIBE_RTABLES
$(debug_info "Running filter_route_tables_by_monitor_tag with arguments $@"
aws ec2 describe-route-tables --region $region \
--query 'RouteTables[*][Associations[*].[SubnetId,RouteTableId]]' \
--filters Name=vpc-id,Values=$vpc_id Name=tag:Monitor,Values=$1 --output text)
EOF_EC2_DESCRIBE_RTABLES
}
get_local_metadata()
{
debug_info "Running get_local_metadata with arguments $@"
/usr/bin/curl --silent http://169.254.169.254/latest/meta-data/$1
}
assign_outbound_route()
{
debug_info "Running assign_outbound_route with arguments $@"
local command="$1"
local route_table="$2"
local to_instance_id="${3:-$instance_id}"
aws ec2 $command-route --region $region \
--route-table-id $route_table \
--destination-cidr-block 0.0.0.0/0 \
--instance-id $to_instance_id --output text
}
query_nat_state()
{
{
if [ $? -ne 0 ]; then
error "Error occurred running query_nat_instances"
echo "ERROR"
return 1
fi
awk '/'"${1:-$instance_id}"'/ { print $NF}'
}<<EOF_QUERY_NAT_STATE
$(debug_info "Running query_nat_state with arguments $@"
query_nat_instances "Reservations[*].Instances[*].[InstanceId, State.Name]" "Name=instance-id,Values=${1:-$instance_id}")
EOF_QUERY_NAT_STATE
}
wait_for_nat_state()
{
debug_info "Running wait_for_nat_state with arguments $@"
local sought_state=${1:-running}
local sought_instance_id=${2:-$instance_id}
local current_state="unknown"
local wait_time=0
current_state=$(query_nat_state $sought_instance_id)
while [ $current_state != "$sought_state" ] && [ $wait_time -lt $timeout ]; do
sleep $sleep_time
wait_time=$((wait_time + sleep_time))
current_state=$(query_nat_state $sought_instance_id)
done
if [ "$current_state" == "$sought_state" ]; then
return 0
else
error "Timed out waiting for the other NAT instance in '$vpc_id' to turn to a '$sought_state' state"
return 1
fi
}
wait_for_nat_ping()
{
debug_info "Running wait_for_nat_ping with arguments $@"
local ping_ip=${1:-$other_nat_ip}
local ping_count=${2:-$num_pings}
local successful_pings=$(ping -c ${ping_count} -W $ping_timeout $ping_ip | grep time= | wc -l)
local wait_time=0
while [ ${successful_pings:-0} -eq 0 ] && [ $wait_time -lt $timeout ]; do
sleep $sleep_time
wait_time=$((wait_time + sleep_time))
successful_pings=$(ping -c ${ping_count} -W $ping_timeout $ping_ip | grep time= | wc -l)
done
if [ ${successful_pings:-0} -gt 0 ]; then
return 0
else
error "Timed out waiting for the other NAT instance in '$vpc_id' to turn to start respoding to ping"
return 1
fi
}
# Usage helper
usage()
{
cat >&2 <<EOF_USAGE
Usage: $PROG [options]
Options description:
--dryrun (-n) : dry-run level
--verbose (-v) : verbosity (more v's = more verbose)
--log (-L) : log file
--sns (-S) : SNS topic to post into
EOF_USAGE
}
# getopts-style loop: walk the args in order, processing options and placing non-option
# arguments at the end. When finished, arguments are in reverse order.
i=0
n=$#
while [ "$i" -lt "$n" ]
do
arg="$1" ; shift
case "$arg" in
--log|-L) LOGFILE="$1"; i=$((i+1)); shift ;;
--sns|-S) SNSTOPIC="$1"; i=$((i+1)); shift ;;
--dryrun|-n) DRYRUN=$((DRYRUN+1)) ;;
--verbose|-v) DEBUG=$((DEBUG+1)) ;;
--region|-r) region="$1"; i=$((i+1)); shift ;;
--help|-h) usage; exit 0 ;;
-*) error "Wrong option used: '$arg'"; usage; exit 1 ;;
*) set -- "$@" "$arg"; ;;
esac
i=$((i+1))
done
: ${sleep_time:=60}
: ${timeout:=600}
: ${ping_timeout:=1}
: ${num_pings:=3}
: ${wait_between_pings:=2}
# Redirect all stdout and stderr to a log file in case its name is provided
if [ -n "$LOGFILE" ]; then
touch "$LOGFILE"
if ! [ -t 0 ]; then
# Close STDOUT file descriptor
exec 1<&-
# Close STDERR FD
exec 2<&-
fi
# Open STDOUT as $LOG_FILE file for read and write
exec 1<>$LOGFILE
# Redirect STDERR to STDOUT
exec 2>&1
fi
export PATH=/usr/local/bin:$PATH
# Get this instance's ID
instance_id=$(get_local_metadata instance-id)
# Get this instance availabilty zone
my_az=$(get_local_metadata placement/availability-zone)
# Form the region based on the current AZ
: ${region:=${my_az%[a-z]*}}
# Get this instance IAM profile
iam_profile=$(get_local_metadata iam/info |
python -c 'import sys, json; print json.load(sys.stdin)["InstanceProfileArn"]')
# Get a VPC ID for this instance
vpc_id=$(get_vpc_id_for_instances Name=instance-id,Values=$instance_id)
# Get the AWS account ID in order to form the proper SNS ARN
if [ -n "$SNSTOPIC" ]; then
user_arn="$(aws iam --output text list-users --query Users[0].Arn)"
arn_id="${user_arn%:*}"
aws_account_id="${arn_id##*:}"
SNSARN="arn:aws:sns:$region:$aws_account_id:$SNSTOPIC"
fi
# First make sure the default route for private subnets in the current AZ is being routed
# through this NAT instance
# 1. Get the list of all subnets associated with a route table with at least one local gateway
# it will be a mixture of private and public subnets as public ones also have at least
# one local gateway for talking to their peers in the same private network
subnets_with_local_gw="$TMPDIR/subnets_with_local_gw.$$"
filter_route_tables_by_gw local > "$subnets_with_local_gw"
debug_cat subnets_with_local_gw < "$subnets_with_local_gw"
# 2. Get the list of all subnets associated with a route table which has an internet gateway in it:
# those would be the public subnets
public_subnets="$TMPDIR/public_subnets.$$"
filter_route_tables_by_gw igw-* > "$public_subnets"
debug_cat public_subnets < "$public_subnets"
# 3. Filter the first list of subnets by the public subnets to get the list of private ones
private_subnets="$TMPDIR/private_subnets.$$"
grep -v -f "$public_subnets" "$subnets_with_local_gw" > "$private_subnets"
debug_cat private_subnets < "$private_subnets"
# 3.1 Filter the monitor subnet by tag
monitor_subnets="$TMPDIR/monitor_subnets.$$"
filter_route_tables_by_monitor_tag squid > "$monitor_subnets"
debug_cat monitor_subnets < "$monitor_subnets"
# 4. Get the list of all subnets in the current AZ
all_subnets_az="$TMPDIR/all_subnets_az.$$"
aws ec2 describe-subnets --region $region \
--query 'Subnets[*].[SubnetId]' \
--filters Name=vpc-id,Values=$vpc_id Name=availabilityZone,Values=$my_az --output text > "$all_subnets_az"
debug_cat all_subnets_az < "$all_subnets_az"
# 5. Get the list of the route tables for private subnets in the current AZ
# filtering the list of all private subnets
my_route_table_ids=$(grep -f "$all_subnets_az" "$monitor_subnets" | sort -k 2 | uniq -f 1 | awk '{print $2}')
debug_info my_route_table_ids=$my_route_table_ids
# 6. Update route tables for all private subnets in the current AZ
for rt_id in $my_route_table_ids; do
info "Adding this instance to $rt_id default route on start"
if assign_outbound_route replace $rt_id; then
:
else
info "Creating a route in $rt_id for this instance to be a gateway on start"
assign_outbound_route create $rt_id
fi
done
info "Starting NAT monitor"
# Obtain all NAT instances ids and their state
nat_instances_ip="$TMPDIR/nat_instances_ip.$$"
nat_instances_state="$TMPDIR/nat_instances_state.$$"
query_nat_instances "Reservations[*].Instances[*].[InstanceId, Placement.AvailabilityZone, State.Name]" > "$nat_instances_state"
# Check there are only 2 NAT instances and fail the script otherwise
nat_num=$(sed -n '$ { $=; }' "$nat_instances_state")
[ "${nat_num:-0}" -eq 2 ] || die "Number of detected NAT instances assigned IAM profile '$iam_profile' in VPC '$vpc_id' is $nat_num != 2"
# Make sure to wait for the other NAT to come up in case it is not in the 'running' state
other_nat_state=$(awk '! /'"$instance_id"'/ { print $NF}' "$nat_instances_state" )
other_az=$(awk '! /'"$instance_id"'/ { print $2}' "$nat_instances_state" )
other_nat_id=$(awk '! /'"$instance_id"'/ { print $1}' "$nat_instances_state" )
while ! wait_for_nat_state running $other_nat_id; do
case "$(query_nat_state $other_nat_id)" in
pending) :
;;
*) die "The other NAT instance is in not running, not monitoring it"
;;
esac
done
# Obtain all NAT instances instance ids and private IPs matching the VPC ID and the IAM profile
query_nat_instances 'Reservations[*].Instances[*].[InstanceId, PrivateIpAddress]' > "$nat_instances_ip"
# Get the other NAT instance's IP
other_nat_ip=$(awk '! /'"$instance_id"'/ { print $NF}' "$nat_instances_ip" )
# Get the list of all subnets in the other NAT's AZ
all_subnets_other_az="$TMPDIR/all_subnets_other_az.$$"
aws ec2 describe-subnets --region $region \
--query 'Subnets[*].[SubnetId]' \
--filters Name=vpc-id,Values=$vpc_id Name=availabilityZone,Values=$other_az --output text > "$all_subnets_other_az"
debug_cat all_subnets_other_az < "$all_subnets_other_az"
# Get the list of the route tables for private subnets in the other AZ
# filtering the list of all private subnets
other_route_table_ids=$(grep -f "$all_subnets_other_az" "$monitor_subnets" | sort -k 2 | uniq -f 1 | awk '{print $2}')
debug_info other_route_table_ids=$other_route_table_ids
info "Monitoring other NAT instance '$other_nat_id' ($other_nat_ip)"
while true; do
# Check health of other NAT instance
pingresult=$(ping -c ${retry_pings:-$num_pings} -W $ping_timeout $other_nat_ip | grep time= | wc -l)
if [ ${pingresult:-0} -eq 0 ]; then
other_nat_state="$(query_nat_state $other_nat_id)"
# If the other NAT state is not 'running' or we have already retried pinging it
# then set all vars so that a fail-over occur
if [ "$other_nat_state" != "running" ] || [ $retry_pings ]; then
# Set all health-tracking vars to false
route_healthy=false
nat_healthy=false
unset retry_pings
else
# Retry pinging the other NAT for a random number of pings again
# this is in order to prevent race condition where both NATs
# cannot reach each other but both are healthy
retry_pings=$(($num_pings + $RANDOM%32))
fi
while ! $nat_healthy; do
# NAT instance is unhealthy, loop while we try to fix it
if ! $route_healthy; then
warn "Other NAT '$other_nat_id' ($other_nat_ip) heartbeat failed, taking over default routes: $other_route_table_ids"
for rt_id in $other_route_table_ids; do
assign_outbound_route replace $rt_id
done
route_healthy=true
fi
# Check NAT state to see if we should stop it or start it again
case "$other_nat_state" in
stopped)
info "Other NAT instance '$other_nat_id' stopped, starting it back up"
aws ec2 start-instances --region $region --instance-ids $other_nat_id --output text
if wait_for_nat_state running $other_nat_id; then
info "Other NAT instance '$other_nat_id' started, continuing to monitor"
nat_healthy=true
fi
;;
running)
info "Other NAT instance '$other_nat_id' is running, attempting to reboot it"
aws ec2 reboot-instances --region $region --instance-ids $other_nat_id --output text
if wait_for_nat_ping $other_nat_ip 3; then
info "Other NAT instance '$other_nat_id' started, continuing to monitor"
nat_healthy=true
fi
;;
stopping)
info "Other NAT instance '$other_nat_id' is stopping, waiting for it to stop"
if wait_for_nat_state stopped $other_nat_id; then
:
fi
;;
shutting-down|terminated)
die "Other NAT instance '$other_nat_id' is terminated, nothing to monitor any more"
;;
esac
done
else
sleep $wait_between_pings
fi
done
# vi: sw=4 ts=4 et:
| true
|
b05144e4a3a13ba2d7dcdb518b7ab2073eec469b
|
Shell
|
C-Coupler-Group/c-coupler-doc
|
/examples/demo_coupler/model_platform/config/cesm/cesm1_2_1/build.sh
|
UTF-8
| 2,257
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
function dump_Macros
{
cat > /tmp/makefile << EOF
include $2
all:
@echo "CPPDEFS += \${CPPDEFS}"
@echo
@echo "SLIBS += \${SLIBS}"
@echo
@echo "CFLAGS := \${CFLAGS}"
@echo
@echo "CXX_LDFLAGS := \${CXX_LDFLAGS}"
@echo
@echo "CXX_LINKER := \${CXX_LINKER}"
@echo
@echo "FC_AUTO_R8 := \${FC_AUTO_R8}"
@echo
@echo "FFLAGS := \${FFLAGS}"
@echo
@echo "FFLAGS_NOOPT := \${FFLAGS_NOOPT}"
@echo
@echo "FIXEDFLAGS := \${FIXEDFLAGS}"
@echo
@echo "FREEFLAGS := \${FREEFLAGS}"
@echo
@echo "MPICC := \${MPICC}"
@echo
@echo "MPICXX := \${MPICXX}"
@echo
@echo "MPIFC := \${MPIFC}"
@echo
@echo "SCC := \${SCC}"
@echo
@echo "SCXX := \${SCXX}"
@echo
@echo "SFC := \${SFC}"
@echo
@echo "SUPPORTS_CXX := \${SUPPORTS_CXX}"
@echo
@echo "ifeq (\\\$\$(DEBUG), true)"
@echo " FFLAGS += -g -CU -check pointers -fpe0 "
@echo "endif"
@echo
@echo "LDFLAGS += \${LDFLAGS}"
@echo
@echo "ifeq (\\\$\$(compile_threaded), true) "
@echo " LDFLAGS += -openmp "
@echo " CFLAGS += -openmp "
@echo " FFLAGS += -openmp "
@echo "endif"
@echo
EOF
make -f /tmp/makefile >& $3
sed -i "/\<FFLAGS\>/{s# -r8 # #; s# -i4 # #}" $3
ncpath=$(grep "^NETCDFINC" $1)
ncpath=$(echo $ncpath|sed "s#.*-I\(.*\)/include#\1#g")
echo "NETCDF_PATH := $ncpath" >> $3
}
export Env=$1
export Exedir=$2
error_exit() {
cleanup
exit 1
}
export ENV_COMPILE="${CASEROOT}/config/common/env_compile"
source ${ENV_COMPILE}
# == Get the path of this script ==
MYPATH=$(readlink -f "$0")
MYPATH=$(dirname "$MYPATH")
# =================================
Macfile=${CASEROOT}/config/common/machine/${MACH}/common_compiler.${MACH}.cfg
Common=$Macfile
if [ -f $MYPATH/compiler.cfg ]; then
Macfile=$MYPATH/compiler.cfg
fi
cd $MYPATH/cesm_case_scripts
dump_Macros "$COMMON_COMPILER" "$MACFILE" "Macros"
cat Macros
if [ ! -e "./.env_run.xml" ]; then
echo "Can't find .env_run.xml file"
error_exit
fi
rm -f $Exedir/cesm_bld/cesm.exe
./$CASE_NAME.build
if [ -f $Exedir/cesm_bld/cesm.exe ] ; then
cp $Exedir/cesm_bld/cesm.exe $EXEC
else
exit 1
fi
cd ${CASEROOT}
find ./ -name "seq_maps.rc" > .temp_file_list
while read line
do
sed -i "s/'Y'/'X'/g" $line
done < .temp_file_list
rm .temp_file_list
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.