blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
c060b54e50d3ed3dfe5a8bf007b10856646787d9
|
Shell
|
martiansnoop/dotfiles
|
/.no-my-zsh/prompt.zsh
|
UTF-8
| 769
| 3.578125
| 4
|
[] |
no_license
|
# Use single quotes so variables are executed by the prompt,
# as opposed to once, when this file is run.
color='%B%F{black}'
decolor='%b%f'
omg_color='%b%F{001}'
PROMPT='$omg_color$(error_status)$color%n@%m> $decolor'
RPROMPT='$color$(git_prompt_info) %3~$decolor'
function error_status() {
last_command=$?
[[ last_command -ne 0 ]] && echo "!"
}
# Outputs current branch info in prompt format
function git_prompt_info() {
local ref
ref=$(command git symbolic-ref HEAD 2> /dev/null) || \
ref=$(command git rev-parse --short HEAD 2> /dev/null) || return 0
echo "[${ref#refs/heads/}$(parse_git_dirty)]"
}
# Checks if working tree is dirty
function parse_git_dirty() {
[[ -n $(git status --porcelain --ignore-submodules=dirty) ]] && echo "*"
}
| true
|
0bb33306e67316e745a1c0c247c5f5cc5be96eda
|
Shell
|
thiago/easy-docker
|
/commands/run.sh
|
UTF-8
| 6,178
| 4.21875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
function usage_run {
echo -e "
$(title Usage):
$PROGRAM_NAME $CURRENT_CMD [options] alias[:version] [alias command]
$(title Options):
-h, --help Display this help and exit
-f, --file FILE Specify an alternate compose file
-i, --interactive=false Enter in container with shell
-e KEY=VALUE Set an environment variable (can be used multiple times)
--entrypoint=/bin/bash Change the entrypoint
$(title Example):
$PROGRAM_NAME $CURRENT_CMD python -V
$PROGRAM_NAME $CURRENT_CMD python:2.7 -m SimpleHTTPServer 80
$PROGRAM_NAME $CURRENT_CMD -i python
$PROGRAM_NAME $CURRENT_CMD --entrypoint /bin/bash python ls
"
}
function upsearch {
slashes=${PWD//[^\/]/}
directory="$PWD"
for (( n=${#slashes}; n>0; --n ))
do
test -e "$directory/$1" && echo "$directory/$1" && return
directory="$directory/.."
done
}
function main_run {
shift
local IFS=$'\n'
local docker_env_opt=""
local docker_args=()
local docker_custom_args=()
local docker_flag=""
local docker_opt=""
local docker_value=""
while [[ $1 = -?* ]]; do
case $1 in
-d | -i | -P | -t) docker_custom_args+=("$1") ;;
*) docker_custom_args+=("$1 $2"); shift ;;
esac
shift
done
if [ -z "${1}" ]; then
usage_run
exit 1
fi
local args_length=$(($#))
local args_array=${@:2:args_length}
local image_repository=$(get_image_repository $1)
local image_name=$(get_image_name $1)
local image_version=$(get_image_version $1)
if [ "$(echo $2 | head -c 1)" == ":" ]; then
local image_version=$(get_image_version $2)
local args_array=${@:3:args_length}
fi
local image_version_strip=${image_version//./}
image_version_strip=${image_version_strip//-/}
image_repository=${image_repository:=_}
local ENVS=(
"$PROJECT_DIR/schema/_.sh"
"$PROJECT_DIR/schema/base.sh"
"$PROJECT_DIR/schema/${image_name}.sh"
"$PROJECT_DIR/schema/${image_repository}/${image_name}.sh"
"$PROJECT_DIR/schema/${image_repository}/${image_name}/${image_version}.sh"
)
for current_env in ${ENVS[@]}; do
if [ -f "${current_env}" ]; then
. $current_env
fi
done
# parse default help of docker cli
local docker_help=(`docker run --help | grep -e --`)
for (( i = 0; i < ${#docker_help[@]}; i++ )); do
# get flag like -v -c -P
docker_flag=`echo "${docker_help[$i]}" | awk -F', ' '{print $1}' | awk -F'-' '{print $2}'`
# get option like --volume --cpu-shares --publish-all
docker_opt=`echo "${docker_help[$i]}" | awk -F'=' '{print $1}' | awk -F'--' '{print $2}'`
# get default value like [] 0 false
docker_value=`echo "${docker_help[$i]}" | awk -F'=' '{print $2}' | awk '{print $1}'`
# replace - to _ like cpu-shares to cpu_shares
docker_env_opt=${docker_opt//-/_}
# if exist value in option
if [ -n ${!docker_opt} ]; then
# parse default values by type to apply different ways to parse and insert arguments
# if is a array then add in loop
if is_array ${docker_env_opt}; then
for item in $(eval "echo \"\${"${docker_env_opt}"[*]}\""); do
docker_args+=( "--$docker_opt $item" )
done
# if is a boolean
elif [[ ${docker_value} = "false" ]] || [[ ${docker_value} = "true" ]]; then
# and current value is not equal a default value
if [[ ${!docker_env_opt} != ${docker_value} ]]; then
# then add in args
docker_args+=( "--$docker_opt" )
fi
# if is a string
elif [[ ${docker_value} == \"* ]]; then
# and current value is not equal a default value
if [[ "\"${!docker_env_opt}\"" != ${docker_value} ]]; then
# then add in args
docker_args+=( "--$docker_opt ${!docker_env_opt}" )
fi
# if is a number
elif [[ "${docker_value}" != "${!docker_env_opt}" ]]; then
docker_args+=( "--$docker_opt ${!docker_env_opt}" )
fi
fi
done
# if repository of image is not a default library (_)
if [[ "${image_repository}" != "_" ]]; then
# then increment with /
image_repository="${image_repository}/"
else
# then clean variable
image_repository=""
fi
# join command line options into default args
docker_args+=( "${docker_custom_args[*]}" )
local cmd=`echo docker run ${docker_args[*]} ${image_repository}${image_name}:${image_version} ${args_array[*]}`
eval $cmd
}
function dockerSwarm(){
local name=$1
local token=$(docker run swarm create 2>&1 | tail -1)
docker-machine create -d virtualbox --swarm --swarm-master --swarm-discovery token://$token $name
docker-machine create -d virtualbox --swarm --swarm-discovery token://$token ${name}1
docker-machine create -d virtualbox --swarm --swarm-discovery token://$token ${name}2
docker-machine create -d virtualbox --swarm --swarm-discovery token://$token ${name}3
echo $token
}
function runOptsToEnv(){
local IFS=$'\n'
local docker_help=(`docker run --help | grep -e --`)
local docker_opts=()
local docker_env_opt=""
local docker_args=()
local docker_flag=""
local docker_opt=""
local docker_value=""
for (( i = 0; i < ${#docker_help[@]}; i++ )); do
docker_flag=`echo "${docker_help[$i]}" | awk -F', ' '{print $1}' | awk -F'-' '{print $2}'`
docker_opt=`echo "${docker_help[$i]}" | awk -F'=' '{print $1}' | awk -F'--' '{print $2}'`
docker_value=`echo "${docker_help[$i]}" | awk -F'=' '{print $2}' | awk '{print $1}'`
docker_env_opt=${docker_opt//-/_}
docker_opts+=(${docker_env_opt}=${docker_value//[]/()})
done
echo "${docker_opts[*]}"
}
function is_array() {
local variable_name=$1
[[ "$(declare -p $variable_name)" =~ "declare -a" ]]
}
| true
|
f1e9c4e4a74c699f7338483eb8c479aef6e47a93
|
Shell
|
moritzbuck/Limno2
|
/0006_Erken_Genome_Project/9999_scripts/usearch_script.sh
|
UTF-8
| 2,791
| 2.6875
| 3
|
[] |
no_license
|
base_path=$HOME/people/0006_Erken_Genome_Project/20000_mesocosm_amplicon_1/
raws_folder=/home/moritz/people/0006_Erken_Genome_Project/0000_rawdata/0200_mesocosm_amplicon_I/
python_hjelper=/home/moritz/temp/helper.py
usearch=usearch10
usearch_path=$base_path/2000_usearch/
nproc=19
db=~/dbs/rdp_16s_v16_sp.udb
source ~/people/0010_Pauline/arctic/bin/activate
cd $base_path
#python ~/temp/fastq_deplex.py $raws_folder/Undetermined/Undetermined_S0_L001_R1_001.fastq.gz $raws_folder/Undetermined/Undetermined_S0_L001_R2_001.fastq.gz /home/moritz/people/0024_haiyan/0000_data/haiyan_bcs.txt $raws_folder/deplexed/ $raws_folder/deplex_stats.txt False False
name=mesocosm_all
mkdir -p $base_path/1000_formated_data/1100_$name/
touch $base_path/1000_formated_data/output_merging.txt
samples=($(find $raws_folder -name "*.fastq" | grep _R1 | rev | cut -f1 -d"/" | rev | cut -f2 -d_))
echo "sample" $'\t' "merged_reads" > $base_path/1000_formated_data/1100_$name/output_merging.txt
for s in ${samples[@]}
do
echo $s
fwd=`ls $raws_folder/P11402_${s}_*_R1_001.fastq`
rev=`ls $raws_folder/P11402_${s}_*_R2_001.fastq`
$usearch -threads 16 -fastq_mergepairs $fwd -reverse $rev -fastqout $base_path/1000_formated_data/${s}.fastq --relabel "$s:" -fastq_maxdiffs 50 -fastq_trunctail 5 2>&1 | grep -E '% merged' | tail -n1 | sed "s/.* \([0-9.].*%\) merged/$s\t\1/" >> $base_path/1000_formated_data/output_merging.txt
done
cat $base_path/1000_formated_data/*.fastq > $base_path/1000_formated_data/all.fastq
mkdir -p $usearch_path/
vsearch -fastq_filter $base_path/1000_formated_data/all.fastq -fastq_minlen 400 -fastq_maxee 1 -fastaout $base_path/1000_formated_data/all.qced.fasta
vsearch -fastq_filter $base_path/1000_formated_data/all.fastq -fastaout $base_path/1000_formated_data/all.fasta
#sed -i 's/-/_/g' $base_path/1000_formated_data/all.fasta
vsearch --derep_fulllength $base_path/1000_formated_data/all.qced.fasta -relabel Uniq_ -sizeout --minuniquesize 2 --output $usearch_path/uniques_nosingletons.fa
$usearch -cluster_otus $usearch_path/uniques_nosingletons.fa -otus $usearch_path/otus.fasta
vsearch -usearch_global $base_path/1000_formated_data/all.fasta -db $usearch_path/otus.fasta -strand plus -id 0.97 -otutabout $usearch_path/otu_table.txt
$usearch -sintax $usearch_path/otus.fasta -db $db -tabbedout $usearch_path/taxonomy.sintax -strand both
#$usearch --fastx_uniques $base_path/100_formated_data/all_qced_reads.fasta -relabel Uniq_ -sizeout --minuniquesize 2 --fastaout $usearch_path/uniques_nosingletons.fa
### STANDARD USEARCH
### generate tables:
mkdir -p $base_path/3000_tables
python $python_hjelper make_taxo_table $usearch_path/otu_table.txt $usearch_path/taxonomy.sintax $base_path/3000_tables/ 0.8
mkdir -p $base_path/4000_post_processing
| true
|
faddb2bc2bb650f20ee66db8cb66ca9900b757de
|
Shell
|
ubiqube/quickstart
|
/scripts/swarm-fix-route.sh
|
UTF-8
| 19,044
| 3.40625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#set -x
set -e
unset ID
unset NET_ID
unset OVERLAY_NET_PREFIX
unset MATCH
unset IFACE
unset NS_ID
unset LB_NET_ID
unset LB_NS_ID
unset NS_ID_LONG
unset NS_ID_SHORT
unset CONT_ID
unset RSORTED_RULES
unset RUN_DIR
unset NETNS_DIR
unset DOCKER_NETWORK_1
unset DOCKER_NETWORK_2
unset rule
unset RULES
unset INGRESS_NS
unset MSA_SMS
unset MSA_SMS_HERE
unset MSA_FRONT_HERE
unset MSA_FRONT
unset MSA_PREFIX
unset MSA_FRONT_IFACE
unset MSA_SMS_IFACE
unset MSA_FRONT_NS_ID
unset MSA_SMS_NS_ID
RUN_DIR="/var/run"
NETNS_DIR="/var/run/docker/netns"
DOCKER_NETWORK_1="ingress"
DOCKER_NETWORK_2="a_default"
INGRESS_NS="ingress_sbox"
MSA_SMS="msa-sms"
MSA_FRONT="msa-front"
function print_help {
echo "###################################################################"
echo "###################################################################"
echo "###################################################################"
echo "###################################################################"
echo ""
echo "Script to update docker swarm network configuration"
echo ""
echo "Options:"
echo "[-s]"
echo "shows current configuration"
echo "example: ./script.sh -s"
echo "[-a]"
echo "updates (adds) configuration"
echo "example: ./script.sh -a"
echo "[-d]"
echo "updates (reverts) configuration"
echo "example: ./script.sh -d"
echo "[-h]"
echo "prints help"
echo ""
echo "###################################################################"
echo "###################################################################"
echo "###################################################################"
echo "###################################################################"
}
function create_ns_symlink {
# $1 - supposed to be "/var/run"
# $2 - supposed to be "/var/run/docker/netns"
# if [ -d "$1" ] && [ -d "$2" ]; then
if [ -d "$1" ]; then
cd "$1"
if [ -d "netns" ];then
echo "Deleting default netns"
rm -rf netns
fi
sudo ln -sfn "$2" netns
if [ $? -eq 0 ]; then
echo "Symlink successfully created"
else
echo "Error: Can't create symlink. Can't continue."
exit 1
fi
cd - > /dev/null
else
echo "Error: $1 or $2 not found. Can't continue."
exit 1
fi
}
function check_ns {
# $1 - supposed to be "/var/run/docker/netns"
# $2 - docker network name
ls -lah $1 > /dev/null
if [ $? -eq 0 ]; then
local MATCH=$(ls -lah $1 | grep $2 | wc -l)
if [ $MATCH -eq 1 ]; then
echo $2
else
echo "Error: $1 namespace not found. Can't continue."
exit 1
fi
else
echo "Error: $1 directory not found. Can't continue."
exit 1
fi
}
function check_lb_ns {
# $1 - supposed to be "/var/run/docker/netns"
# $2 - docker network id
# namespace format: lb_ + $2(with last 3 symbols cutted off)
local ID=$2
local NS_ID="lb_${ID::-3}"
ls -lah $1 > /dev/null
if [ $? -eq 0 ]; then
local MATCH=$(ls -lah $1 | grep $NS_ID | wc -l)
if [ $MATCH -eq 1 ]; then
echo $NS_ID
else
echo "Warning: $1 namespace not found."
fi
else
echo "Warning: $1 directory not found."
fi
}
function add_default_route {
# $1 - namespace id
sudo ip netns exec $1 ip r > /dev/null
if [ $? -eq 0 ]; then
local MATCH=$(sudo ip netns exec $1 ip r | grep default | wc -l)
if [ $MATCH -eq 1 ]; then
echo "Default route already exists"
else
sudo ip netns exec $1 ip r a default dev eth0
if [ $? -eq 0 ]; then
echo "Default rule successfully added."
else
"Error: Can't add default route to $1 namespace. Can't continue."
fi
fi
else
echo "Error: Can't add default route to $1 namespace. Can't continue."
exit 1
fi
}
function add_nat_exception {
# $1 - namespace id
# $2 - destinnation ip prefix
sudo ip netns exec $1 iptables -t nat -nvL POSTROUTING --line-numbers > /dev/null
if [ $? -eq 0 ]; then
sudo ip netns exec $1 iptables -t nat -I POSTROUTING 2 -m ipvs --ipvs -s 0.0.0.0/0 -d $2 -p udp --dport 514 -j ACCEPT
sudo ip netns exec $1 iptables -t nat -I POSTROUTING 2 -m ipvs --ipvs -s 0.0.0.0/0 -d $2 -p udp --dport 162 -j ACCEPT
sudo ip netns exec $1 iptables -t nat -I POSTROUTING 2 -m ipvs --ipvs -s 0.0.0.0/0 -d $2 -p tcp --dport 6514 -j ACCEPT
sudo ip netns exec $1 iptables -t nat -I POSTROUTING 2 -m ipvs --ipvs -s 0.0.0.0/0 -d $2 -p tcp --dport 514 -j ACCEPT
if [ $? -eq 0 ]; then
echo "NAT exception successfully added"
else
echo "Error: Can't add NAT exception. Can't continue."
exit 1
fi
else
echo "Error: Can't add NAT exception. Can't continue."
exit 1
fi
}
function show_nat_exceptions {
# $1 - namespace id
sudo ip netns exec $1 iptables -t nat -nvL POSTROUTING --line-numbers > /dev/null
if [ $? -eq 0 ]; then
local MATCH=$(sudo ip netns exec $1 iptables -t nat -nvL POSTROUTING --line-numbers | grep "ACCEPT" | wc -l)
if [ $MATCH -ne 1 ]; then
local EXCEPTION_RULES=$(sudo ip netns exec $1 iptables -t nat -nvL POSTROUTING --line-numbers | grep "ACCEPT")
echo "$EXCEPTION_RULES"
else
echo "No exception rules found."
fi
fi
}
function delete_514_nat_exception {
# $1 - namespace id
sudo ip netns exec $1 iptables -t nat -nvL POSTROUTING --line-numbers > /dev/null
if [ $? -eq 0 ]; then
# check if the rule exists
local MATCH=$(sudo ip netns exec $1 iptables -t nat -nvL POSTROUTING --line-numbers | grep "udp dpt:514" | wc -l)
if [ $MATCH -ge 1 ]; then
# get rules ids and delete them one by one from bottom to top
local RULES=$(sudo ip netns exec $1 iptables -t nat -nvL POSTROUTING --line-numbers | grep "udp dpt:514" | awk '{print $1}')
local RSORTED_RULES=$(sort -r <<< "$RULES")
for rule in $RSORTED_RULES
do
sudo ip netns exec $1 iptables -t nat -D POSTROUTING $rule
if [ $? -eq 0 ]; then
echo "NAT exception for dst udp 514 successfully deleted, rule num $rule"
else
echo "Error: Can't delete NAT exception. Can't continue."
exit 1
fi
done
fi
local MATCH=$(sudo ip netns exec $1 iptables -t nat -nvL POSTROUTING --line-numbers | grep "udp dpt:162" | wc -l)
if [ $MATCH -ge 1 ]; then
# get rules ids and delete them one by one from bottom to top
local RULES=$(sudo ip netns exec $1 iptables -t nat -nvL POSTROUTING --line-numbers | grep "udp dpt:162" | awk '{print $1}')
local RSORTED_RULES=$(sort -r <<< "$RULES")
for rule in $RSORTED_RULES
do
sudo ip netns exec $1 iptables -t nat -D POSTROUTING $rule
if [ $? -eq 0 ]; then
echo "NAT exception for dst udp 162 successfully deleted, rule num $rule"
else
echo "Error: Can't delete NAT exception. Can't continue."
exit 1
fi
done
fi
local MATCH=$(sudo ip netns exec $1 iptables -t nat -nvL POSTROUTING --line-numbers | grep "tcp dpt:6514" | wc -l)
if [ $MATCH -ge 1 ]; then
# get rules ids and delete them one by one from bottom to top
local RULES=$(sudo ip netns exec $1 iptables -t nat -nvL POSTROUTING --line-numbers | grep "tcp dpt:6514" | awk '{print $1}')
local RSORTED_RULES=$(sort -r <<< "$RULES")
for rule in $RSORTED_RULES
do
sudo ip netns exec $1 iptables -t nat -D POSTROUTING $rule
if [ $? -eq 0 ]; then
echo "NAT exception for dst tcp 6514 successfully deleted, rule num $rule"
else
echo "Error: Can't delete NAT exception. Can't continue."
exit 1
fi
done
fi
local MATCH=$(sudo ip netns exec $1 iptables -t nat -nvL POSTROUTING --line-numbers | grep "tcp dpt:514" | wc -l)
if [ $MATCH -ge 1 ]; then
# get rules ids and delete them one by one from bottom to top
local RULES=$(sudo ip netns exec $1 iptables -t nat -nvL POSTROUTING --line-numbers | grep "tcp dpt:514" | awk '{print $1}')
local RSORTED_RULES=$(sort -r <<< "$RULES")
for rule in $RSORTED_RULES
do
sudo ip netns exec $1 iptables -t nat -D POSTROUTING $rule
if [ $? -eq 0 ]; then
echo "NAT exception for dst tcp 514 successfully deleted, rule num $rule"
else
echo "Error: Can't delete NAT exception. Can't continue."
exit 1
fi
done
fi
fi
}
function check_ingress_interface {
# $1 - namespace id
# $2 - network prefix contains eg. 10.0.0., 10.0.2. etc.
sudo ip netns exec $1 ip a | grep $2 > /dev/null
if [ $? -eq 0 ]; then
# count matched interfaces
local MATCH=$(sudo ip netns exec $1 ip a | grep $2 | wc -l)
if [ $MATCH -gt 1 ]; then
echo "Error: Multiple ($MATCH) interfaces match prefix $2. Specify more accurate prefix."
exit 1
else
local IFACE=$(sudo ip netns exec $1 ip a | grep $2 | awk '{print $(NF)}')
echo $IFACE
fi
else
echo "Error: Can't find $1 namespace or prefix contains $2 is wrong. Can't continue."
exit 1
fi
}
function check_containter {
# $1 - container name contains eg. msa_sms
docker ps | grep $1 > /dev/null
if [ $? -eq 0 ]; then
local MATCH=$(docker ps | grep $1 | wc -l)
if [ $MATCH -eq 1 ]; then
echo "True"
elif [ $MATCH -eq 0 ]; then
echo "False"
else
echo "Found multiple containers with the same name. Can't continue."
exit 1
fi
fi
}
function check_container_ns {
# $1 - container name contains eg. msa_sms
docker ps | grep $1 | awk '{print $1}' > /dev/null
if [ $? -eq 0 ]; then
local CONT_ID=$(docker ps | grep $1 | awk '{print $1}')
docker inspect -f '{{.NetworkSettings.SandboxKey}}' $CONT_ID > /dev/null
if [ $? -eq 0 ]; then
local NS_ID_LONG=$(docker inspect -f '{{.NetworkSettings.SandboxKey}}' $CONT_ID)
local NS_ID_SHORT=$(cut -d'/' -f6 <<< $NS_ID_LONG)
echo $NS_ID_SHORT
else
echo "Error: Namespace for $ID container not found. Can't continue."
exit 1
fi
else
echo "NOT_FOUND"
fi
}
function set_rp_filter {
# https://www.kernel.org/doc/Documentation/networking/ip-sysctl.txt
# $1 - namespace id
# $2 - interface name
# $3 set "2" to loose, set "1" to strict
sudo ip netns exec $1 sysctl -w net.ipv4.conf.$2.rp_filter=$3 > /dev/null
if [ $? -eq 0 ]; then
echo "rp_filter updated for $1 interface $2 namespace"
else
echo "Error: Can't update rp_filter for $1 interface in $2 namespace. Can't continue."
exit 1
fi
}
function show_rp_filter {
# https://www.kernel.org/doc/Documentation/networking/ip-sysctl.txt
# $1 - namespace id
sudo ip netns exec $1 sysctl -a | grep '\.rp_filter' > /dev/null
if [ $? -eq 0 ]; then
sudo ip netns exec $1 sysctl -a | grep '\.rp_filter'
else
echo "Error: Can't check rp_filter in $2 namespace. Can't continue."
exit 1
fi
}
function get_overlay_net_id {
# $1 - name of the docker network from docker-compose file
docker network ls | grep $1 | awk '{print $1}' > /dev/null
if [ $? -eq 0 ]; then
local NET_ID=$(docker network ls | grep $1 | awk '{print $1}')
echo $NET_ID
else
echo "Error: $1 docker network not found. Can't continue."
exit 1
fi
}
function get_overlay_net_prefix {
# $1 - docker network name
local NET_NAME=$(docker network ls | grep $1 | awk '{print $2}')
docker inspect -f '{{range .IPAM.Config}}{{println .Subnet}}{{end}}' $NET_NAME > /dev/null
if [ $? -eq 0 ]; then
local OVERLAY_NET_PREFIX=$(docker inspect -f '{{range .IPAM.Config}}{{println .Subnet}}{{end}}' $NET_NAME)
if [[ $OVERLAY_NET_PREFIX =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+\/[0-9]+$ ]]; then
echo $OVERLAY_NET_PREFIX
else
echo "Error: Can't get docker swarm overlay network prefix. Can't continue."
exit 1
fi
else
echo "Error: Can't get docker swarm overlay network prefix. Can't continue."
exit 1
fi
}
function main {
echo "STEP 1:"
echo "Creating symlink for netns..."
create_ns_symlink $RUN_DIR $NETNS_DIR
echo ""
echo "STEP 2:"
echo "Checking <<$INGRESS_NS>> namespace..."
INGRESS_NS=$(check_ns $NETNS_DIR $INGRESS_NS)
echo $INGRESS_NS
echo ""
echo "STEP 3:"
echo "Checking docker network <<$DOCKER_NETWORK_2>> namespace..."
LB_NET_ID=$(get_overlay_net_id $DOCKER_NETWORK_2)
LB_NS_ID=$(check_lb_ns $NETNS_DIR $LB_NET_ID)
echo $LB_NS_ID
echo ""
echo "STEP 4:"
echo "Retrieving docker <<$DOCKER_NETWORK_1>> overlay network prefix..."
OVERLAY_NET_1_PREFIX=$(get_overlay_net_prefix $DOCKER_NETWORK_1)
echo $OVERLAY_NET_1_PREFIX
echo ""
echo "STEP 5:"
echo "Retrieving docker <<$DOCKER_NETWORK_2>> overlay network prefix..."
OVERLAY_NET_2_PREFIX=$(get_overlay_net_prefix $DOCKER_NETWORK_2)
echo $OVERLAY_NET_2_PREFIX
echo ""
echo "STEP 6:"
echo "Checking msa_front is here on this host..."
MSA_FRONT_HERE=$(check_containter $MSA_FRONT)
echo $MSA_FRONT_HERE
echo ""
echo "STEP 7:"
echo "Checking msa_sms is here on this host..."
MSA_SMS_HERE=$(check_containter $MSA_SMS)
echo $MSA_SMS_HERE
echo ""
while getopts "hasd" opt
do
case $opt in
h)
print_help
;;
a)
echo "STEP 8:"
echo "Updating <<$INGRESS_NS>> namespace..."
echo "---> adding NAT UDP/514 UDP/162 TCP/6514 TCP/514 exception... "
add_nat_exception $INGRESS_NS $OVERLAY_NET_1_PREFIX
echo ""
echo "STEP 9:"
echo "Updating load balancer <<$LB_NS_ID>> namespace..."
echo "---> adding NAT UDP/514 UDP/162 TCP/6514 TCP/514 exception... "
add_nat_exception $LB_NS_ID $OVERLAY_NET_2_PREFIX
echo "---> adding default route... "
add_default_route $LB_NS_ID
echo ""
if [ "$MSA_FRONT_HERE" = "True" ]; then
echo "STEP 10 (FRONT):"
echo "Checking <<$MSA_FRONT>> container namespace..."
MSA_FRONT_NS_ID=$(check_container_ns $MSA_FRONT)
echo "YES: $MSA_FRONT_NS_ID"
echo ""
echo "STEP 11 (FRONT)::"
echo "Checking ingress <<$MSA_FRONT>> interface..."
# ${OVERLAY_NET_1_PREFIX::-4} - makes 10.0.0. from 10.0.0.0/24
MSA_PREFIX=${OVERLAY_NET_1_PREFIX::-4}
MSA_FRONT_IFACE=$(check_ingress_interface "$MSA_FRONT_NS_ID" "$MSA_PREFIX")
echo $MSA_FRONT_IFACE
echo ""
echo "STEP 12 (FRONT):"
echo "Updating <<$MSA_FRONT>> rp_filter..."
set_rp_filter $MSA_FRONT_NS_ID $MSA_FRONT_IFACE 2
echo ""
fi
if [ "$MSA_SMS_HERE" = "True" ]; then
echo "STEP 10 (SMS):"
echo "Checking <<$MSA_SMS>> container namespace..."
MSA_SMS_NS_ID=$(check_container_ns $MSA_SMS)
echo "YES: $MSA_SMS_NS_ID"
echo ""
echo "STEP 11 (SMS):"
echo "Checking ingress <<$MSA_SMS>> interface..."
# ${OVERLAY_NET_2_PREFIX::-4} - makes 10.0.2. from 10.0.2.0/24
MSA_PREFIX=${OVERLAY_NET_2_PREFIX::-4}
MSA_SMS_IFACE=$(check_ingress_interface "$MSA_SMS_NS_ID" "$MSA_PREFIX")
echo $MSA_SMS_IFACE
echo ""
echo "STEP 12 (SMS):"
echo "Updating <<$MSA_SMS>> rp_filter..."
set_rp_filter $MSA_SMS_NS_ID $MSA_SMS_IFACE 2
echo ""
fi
;;
s)
echo "STEP 8:"
echo "NAT EXCEPTIONS LIST:"
echo "---> show NAT UDP/514 UDP/162 TCP/514 TCP/6514 exceptions in <<$INGRESS_NS>> namespace:"
show_nat_exceptions $INGRESS_NS
echo ""
echo "---> show NAT UDP/514 UDP/162 TCP/514 TCP/6514 exceptions in <<$LB_NS_ID>> namespace:"
show_nat_exceptions $LB_NS_ID
echo ""
if [ "$MSA_FRONT_HERE" = "True" ]; then
echo "STEP 9 (FRONT):"
echo "Checking <<$MSA_FRONT>> container namespace..."
MSA_FRONT_NS_ID=$(check_container_ns $MSA_FRONT)
echo "YES: $MSA_FRONT_NS_ID"
echo ""
echo "STEP 10 (FRONT):"
echo "Checking <<$MSA_FRONT>> rp_filter..."
show_rp_filter $MSA_FRONT_NS_ID
echo ""
echo "STEP 11 (FRONT):"
echo "Checking ingress <<$MSA_FRONT>> interface..."
# ${OVERLAY_NET_1_PREFIX::-4} - makes 10.0.0. from 10.0.0.0/24
MSA_PREFIX=${OVERLAY_NET_1_PREFIX::-4}
MSA_FRONT_IFACE=$(check_ingress_interface "$MSA_FRONT_NS_ID" "$MSA_PREFIX")
echo $MSA_FRONT_IFACE
echo ""
fi
if [ "$MSA_SMS_HERE" = "True" ]; then
echo "STEP 9 (SMS):"
echo "Checking <<$MSA_SMS>> container namespace..."
MSA_SMS_NS_ID=$(check_container_ns $MSA_SMS)
echo "YES: $MSA_SMS_NS_ID"
echo ""
echo "STEP 10 (SMS):"
echo "Checking <<$MSA_SMS>> rp_filter..."
show_rp_filter $MSA_SMS_NS_ID
echo ""
echo "STEP 11 (SMS):"
echo "Checking ingress <<$MSA_SMS>> interface..."
# ${OVERLAY_NET_2_PREFIX::-4} - makes 10.0.2. from 10.0.2.0/24
MSA_PREFIX=${OVERLAY_NET_2_PREFIX::-4}
MSA_SMS_IFACE=$(check_ingress_interface "$MSA_SMS_NS_ID" "$MSA_PREFIX")
echo $MSA_SMS_IFACE
echo ""
fi
;;
d)
echo "STEP 8:"
echo "Removing UDP/514 UDP/162 TCP/514 TCP/6514 exceptions in <<$INGRESS_NS>> namespace..."
delete_514_nat_exception $INGRESS_NS
echo "Removing UDP/514 UDP/162 TCP/514 TCP/6514 exceptions in <<$LB_NS_ID>> namespace..."
delete_514_nat_exception $LB_NS_ID
echo ""
if [ "$MSA_FRONT_HERE" = "True" ]; then
echo "STEP 9 (FRONT):"
echo "Checking <<$MSA_FRONT>> container namespace..."
MSA_FRONT_NS_ID=$(check_container_ns $MSA_FRONT)
echo "YES: $MSA_FRONT_NS_ID"
echo ""
echo "STEP 10 (FRONT):"
echo "Checking ingress <<$MSA_FRONT>> interface..."
# ${OVERLAY_NET_1_PREFIX::-4} - makes 10.0.0. from 10.0.0.0/24
MSA_PREFIX=${OVERLAY_NET_1_PREFIX::-4}
MSA_FRONT_IFACE=$(check_ingress_interface "$MSA_FRONT_NS_ID" "$MSA_PREFIX")
echo $MSA_FRONT_IFACE
echo ""
echo "STEP 11 (FRONT):"
echo "Updating <<$MSA_FRONT>> rp_filter..."
set_rp_filter $MSA_FRONT_NS_ID $MSA_FRONT_IFACE 1
echo ""
fi
if [ "$MSA_SMS_HERE" = "True" ]; then
echo "STEP 9 (SMS):"
echo "Checking <<$MSA_SMS>> container namespace..."
MSA_SMS_NS_ID=$(check_container_ns $MSA_SMS)
echo "YES: $MSA_SMS_NS_ID"
echo ""
echo "STEP 10 (SMS):"
echo "Checking ingress <<$MSA_SMS>> interface..."
# ${OVERLAY_NET_2_PREFIX::-4} - makes 10.0.2. from 10.0.2.0/24
MSA_PREFIX=${OVERLAY_NET_2_PREFIX::-4}
MSA_SMS_IFACE=$(check_ingress_interface "$MSA_SMS_NS_ID" "$MSA_PREFIX")
echo $MSA_SMS_IFACE
echo ""
echo "STEP 11 (SMS):"
echo "Updating <<$MSA_SMS>> rp_filter..."
set_rp_filter $MSA_SMS_NS_ID $MSA_SMS_IFACE 1
echo ""
fi
;;
*)
echo "Invalid option"
print_help
;;
esac
done
echo "Completed successfully."
}
main "$@"
| true
|
6d8476089af49b0971b52fb96607629c8c0bc516
|
Shell
|
syrincs/system
|
/gitlab-runner/start.sh
|
UTF-8
| 688
| 3.21875
| 3
|
[] |
no_license
|
#! /bin/bash
if [ -f ../system/.docker.env ] && [ -f ../gitlab/.docker.env ]; then
source ../system/.docker.env
source ../gitlab/.docker.env
if [ $# -lt 1 ] || [ "$1" = "main" ]; then
docker-compose -p runner up --build --scale docker=$GITLAB_RUNNER_SCALE --remove-orphans -d
fi
if [ $# -lt 1 ] || [ "$1" = "projects" ]; then
for i in $(find ../system/.projects.env ../system/projects.env -type f -name "*.env" 2>/dev/null); do
source $i
docker-compose -p ${PROJECT_NAME}_runner up --build --scale docker=$GITLAB_RUNNER_SCALE --remove-orphans -d
done
fi
bash register.sh $1
else
echo "[start] Skipped."
fi
| true
|
7df7d189c290c5d78dc69a92e515e0243d3b1060
|
Shell
|
sylvain-artois/ContrePensees
|
/contrepensees.sh
|
UTF-8
| 2,586
| 3.9375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# description: contrepensees service
# processname: node
# pidfile: /var/run/contrepensees.pid
# logfile: /var/log/contrepensees.log
#
# Based on https://gist.github.com/jinze/3748766
# @see https://gist.github.com/operatino/8389370
#
# To use it as service on Ubuntu:
# cp contrepensees.sh /etc/init.d/contrepensees
# chmod a+x /etc/init.d/contrepensees
# insserv contrepensees
#
# Then use commands:
# service contrepensees <command (start|stop|etc)>
#
# Voir aussi https://www.exratione.com/2013/02/nodejs-and-forever-as-a-service-simple-upstart-and-init-scripts-for-ubuntu/
# Voir aussi https://www.terlici.com/2015/02/05/hosting-deploying-nodejs.html
NAME=contrepensees # Unique name for the application
SOURCE_DIR=/srv/contrepensees.fr # Location of the application source
COMMAND=node # Command to run
SOURCE_NAME=keystone.js # Name os the applcation entry point script
USER=www-data # User for process running
NODE_ENVIROMENT=production # Node environment
UUID=contrepensees
pidfile=/var/run/$NAME.pid
logfile=/var/log/$NAME.log
forever=forever
start() {
export NODE_ENV=$NODE_ENVIROMENT
echo "Starting $NAME node instance : "
touch $logfile
chown $USER $logfile
touch $pidfile
chown $USER $pidfile
#iptables -t nat -A PREROUTING -i eth0 -p tcp --dport 80 -j REDIRECT --to-port 8080
/bin/su - $USER -c "export NODE_ENV=$NODE_ENVIROMENT && $forever start --pidFile $pidfile -l $logfile -a --sourceDir $SOURCE_DIR -c $COMMAND $SOURCE_NAME"
RETVAL=$?
}
restart() {
echo -n "Restarting $NAME node instance : "
/bin/su - $USER -c "$forever restart $SOURCE_NAME"
RETVAL=$?
}
status() {
echo "Status for $NAME:"
/bin/su - $USER -c "$forever list"
RETVAL=$?
}
stop() {
# echo -n "Shutting down $NAME node instance : "
# /bin/su - $USER -c "$forever stop $SOURCE_NAME"
if [ -f $PIDFILE ]; then
echo "Shutting down $NAME"
# Tell Forever to stop the process.
forever stop $APPLICATION_PATH 2>&1 > /dev/null
# Get rid of the pidfile, since Forever won't do that.
rm -f $PIDFILE
RETVAL=$?
else
echo "$NAME is not running."
RETVAL=0
fi
}
case "$1" in
start)
start
;;
stop)
stop
;;
status)
status
;;
restart)
restart
;;
*)
echo "Usage: {start|stop|status|restart}"
exit 1
;;
esac
exit $RETVAL
| true
|
90b3281d300ea9e3febf277d20d8db3a1a506033
|
Shell
|
nth10sd/dotfiles
|
/bin/manjaro/01_aur.sh
|
UTF-8
| 253
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash
function f_aur_install() {
local _pkg
_pkg=$1
f_check_prog "git"
f_out "Installing ${_pkg} from the AUR"
cd ~/.build
rm -rf ~/.build/${_pkg}
git clone https://aur.archlinux.org/${_pkg}.git
cd ~/.build/${_pkg}
makepkg -si
cd ~
}
| true
|
683051ca37bdad2dad13ebbe1d84e87afc11313b
|
Shell
|
dyweb/dyweb.github.io
|
/update-gh-pages
|
UTF-8
| 625
| 3.25
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
local BUILD_BRANCH='gh-pages'
local BUILD_COMMAND='npm run build'
local BUILD_PATH='dist index.html'
local BUILD_COMMIT_TITLE='build'
local CURRENT_BRANCH=$(git branch | grep --color=never '^\* ' | cut -d' ' -f 2)
local ADD_BUILD_COMMAND="git add -f ${BUILD_PATH}"
# Switch to the build branch
git branch -D ${BUILD_BRANCH}
git checkout -b ${BUILD_BRANCH}
# Build, commit, push
eval ${BUILD_COMMAND}
eval ${ADD_BUILD_COMMAND}
git commit -m ${BUILD_COMMIT_TITLE}
git push -f origin ${BUILD_BRANCH}
# Switch back and clean up
git checkout ${CURRENT_BRANCH}
git branch -D ${BUILD_BRANCH}
eval ${BUILD_COMMAND}
| true
|
4f687b724dec62445519bc983bb390dca852de9a
|
Shell
|
fisanchez/scripts
|
/gen_rails_stack
|
UTF-8
| 1,383
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
echo Hello, what do you want to name your app?
read appName
echo press y if this is an api server
read api_server
echo Creating $appName app
if [[ $api_server =~ ^[Yy]$ ]]
then
rails new $appName -O --api --skip-coffee
else
rails new $appName -O --skip-coffee --skip-turbolinks
fi
echo move into app folder
cd $appName
echo add sqlite to gemfile
echo "gem 'sqlite3', '~> 1.4'" >> Gemfile
echo "gem 'rspec-rails'" >> Gemfile
echo bundling
bundle
echo installing rspec
rails generate rspec:install
echo Creating database.yml file
touch config/database.yml
echo Append sqlit configs
echo Creating database.yml
echo "# SQLite version 3.x
# gem install sqlite3
#
# Ensure the SQLite 3 gem is defined in your Gemfile
# gem 'sqlite3'
#
default: &default
adapter: sqlite3
development:
<<: *default
database: db/development.sqlite3
# Warning: The database defined as "test" will be erased and
# re-generated from your development database when you run "rake".
# Do not set this db to the same as development or production.
test:
<<: *default
database: db/test.sqlite3
production:
<<: *default
database: db/production.sqlite3
" >> config/database.yml
echo Adding sequel-rails to gemfile
echo "gem 'sequel-rails'" >> Gemfile
echo creating db folder
mkdir db
echo Creating dev file
touch db/development.sqlite3
echo All done! Enjoy your app.
| true
|
7ad097bb7134abfc79d74f75fb935905d57cda63
|
Shell
|
mongodb/mongodb-kubernetes-operator
|
/scripts/git-hooks/pre-commit
|
UTF-8
| 3,558
| 3.984375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# This should be copied to .git/hooks/pre-commit
function go_imports() {
if ! type goimports &> /dev/null; then
echo "Installing goimports"
GO111MODULE=off go get golang.org/x/tools/cmd/goimports
fi
# Formats each file that was changed.
for file in $(git diff --cached --name-only --diff-filter=ACM | grep '\.go$')
do
goimports -w "${file}"
git add "$file"
done
}
function generate_crd(){
echo "Generating CRD"
make manifests
git add config/crd/bases/mongodbcommunity.mongodb.com_mongodbcommunity.yaml
}
function mypy_check()
{
local exit_status=0
# Mypy doesn't support recursive traversal of directories
# So we manually call it on every staged python file
echo "Running mypy on staged python files"
for file in $(git diff --cached --name-only --diff-filter=ACM | grep '\.py$')
do
echo "Analyzing $file ..."
# We ignore missing import otherwise mypy will complain
# about 3rd party libraries not providing type hints
if ! mypy --disallow-untyped-calls --disallow-untyped-defs --disallow-incomplete-defs --ignore-missing-imports "${file}"; then
exit_status=1
fi
done
return $exit_status
}
function go_linting() {
dirs_to_analyze=()
for file in $(git diff --cached --name-only --diff-filter=ACM | grep '\.go$')
do
dirs_to_analyze+=("$(dirname "${file}")" )
done
if [ ${#dirs_to_analyze[@]} -ne 0 ]; then
mapfile -t dirs_to_analyze < <(printf '%s\n' "${dirs_to_analyze[@]}" | sort -u)
echo "Running golangci-lint on staged files"
local exit_status=0
for file in "${dirs_to_analyze[@]}"
do
if ! golangci-lint run "${file}"; then
exit_status=1
fi
done
return $exit_status
fi
return 0
}
function black_formatting()
{
# Black formatting of every python file that was changed
for file in $(git diff --cached --name-only --diff-filter=ACM | grep '\.py$')
do
black -q "$file"
git add "$file"
done
}
function generate_github_actions(){
scripts/dev/generate_github_actions.py
git add .github/workflows
}
generate_github_actions
generate_crd
go_imports
black_formatting
if ! mypy_check; then
echo "MyPy returned some errors, please correct them"
echo "Commit aborted"
# In some cases we might encounter mypy errors that we do not
# actually treat as such. So we provide a link to the dev
# for ignoring them through code annotation
echo "If some of the errors reported are false positives "\
"and should be ignored, mypy provides a way to silence "\
"errors: https://mypy.readthedocs.io/en/stable/common_issues.html#spurious-errors-and-locally-silencing-the-checker"
echo "Please use this only for errors that you are sure are"\
"false positives."
exit 1
fi
if ! go_linting; then
echo "Golancli-lint returned some errors, please correct them"
echo "Commit aborted"
# In some cases we might encounter mypy errors that we do not
# actually treat as such. So we provide a link to the dev
# for ignoring them through code annotation
echo "If some of the errors reported are false positives "\
"and should be ignored, golanci-lint provides a way to silence "\
"errors: https://golangci-lint.run/usage/false-positives/"
echo "Please use this only for errors that you are sure are"\
"false positives."
exit 1
fi
| true
|
3b9dae6c7c95f78b19dc9b1bc1b39afc88b68cac
|
Shell
|
antoniocaia/kattis
|
/update_readme.sh
|
UTF-8
| 1,119
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
curl 'https://github.com/antoniocaia/kattis/tree/master/src/kattis' | tac | tac | pup '#js-repo-pjax-container' >source.html;
cat source.html | pup 'a[class="js-navigation-open link-gray-dark"] attr{href}' | grep "\S" | sed 's/ //g' >gitlinks;
cat source.html | pup 'a[class="js-navigation-open link-gray-dark"] text{}' | grep "\S" | sed 's/ //g' >javas;
cat source.html | pup 'a[class="link-gray"] text{}' | sed -e 's/\<Solved\>//g' | grep "\S" | sed 's/ //g' >id #Remove 'Solved' from the last commit description;
tr A-Z a-z <id >ids; #Uppercase to lowercase
echo "# Kattis Problems Java Solutions" >README.md;
echo "Problem ID | Solution | Difficulty" >>README.md;
echo "--- | --- |:-:" >>README.md;
paste ids gitlinks javas | while IFS="$(printf '\t')" read -r id link java; do
dif=$(curl https://open.kattis.com/problems/$id | pup 'div[class="problem-sidebar sidebar-info"] > div:nth-child(3) > p:nth-child(4) > span text{}');
echo "[$id](https://open.kattis.com/problems/$id) | [$java](https://github.com$link) | $dif" >>README.md
done
rm source.html
rm gitlinks
rm javas
rm id
rm ids
exit 0
| true
|
2c45bd93dea7386fb654c016deefbecfd581adec
|
Shell
|
alixaxel/halBox
|
/packages/ubuntu/trusty/dexec.sh
|
UTF-8
| 357
| 2.96875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
wget -q https://bintray.com/artifact/download/dexec/release/dexec_1.0.3_linux_$halBox_Arch.tar.gz -O /tmp/dexec.tar.gz
if [[ $? == 0 ]]; then
cd /tmp/ && mkdir -p /tmp/dexec/ && tar -xf /tmp/dexec.tar.gz -C /tmp/dexec/ && mv /tmp/dexec/*/dexec /usr/local/sbin/dexec && chmod +x /usr/local/sbin/dexec
fi
cd ~ && rm -rf /tmp/dexec*
| true
|
78e158ad35d861af3e57ea242d6f66d51c90be47
|
Shell
|
telegraph/telegraph-engineer-laptop
|
/aem-setup/scripts/05a-build-deploy-ooyala.sh
|
UTF-8
| 1,018
| 3.28125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
source config
#################################################################################################
# 1. Build And Deploy Ooyala using Maven
# Clone the repo if it does not exist
echo "OOYALA HOME is $AEM_PROJECT_HOME/$AEM_OOYALA"
cd "$AEM_PROJECT_HOME"
if [ ! -d "$AEM_PROJECT_HOME/$AEM_OOYALA" ]; then
git clone git@github.com:telegraph/aem-ooyala.git
fi
# Go to root of project
cd "$AEM_PROJECT_HOME/$AEM_OOYALA/cq"
if [ $? = 1 ]; then
echo "Cannot navigate to $AEM_PROJECT_HOME/$AEM_OOYALA folder. Exiting..."
exit 1
fi
# Make sure we have the latest
git stash
git pull
# Checkout branch "develop"
git checkout develop
# Build the project (must specify the settings file)
mvn clean install -s ../settings.xml
# Deploy to AEM author
mvn crx:install -Dinstance.url=http://localhost:4502 -Dinstance.password=admin
# Deploy to AEM publish
mvn crx:install -Dinstance.url=http://localhost:4503 -Dinstance.password=admin
#cd "$AEM_PROJECT_HOME"
# return to where we were
# popd
| true
|
3250d35ed841c99a834dbe59af2a9788b36056e4
|
Shell
|
ContinualAI/avalanche
|
/tests/checkpointing/test_checkpointing.sh
|
UTF-8
| 1,877
| 3.4375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Script used to automatically test various combinations of plugins when used with
# the checkpointing functionality.
set -euo pipefail
cd tests/checkpointing
rm -rf logs
rm -rf checkpoints
rm -rf metrics_no_checkpoint
rm -rf metrics_checkpoint
export PYTHONUNBUFFERED=1
export PYTHONPATH=../..
export CUDA_VISIBLE_DEVICES=0
export CUBLAS_WORKSPACE_CONFIG=:4096:8
BENCHMARK="TestBenchmark"
# Config from env
# https://blog.stigok.com/2022/02/08/parsing-boolean-string-statements-in-bash.html
function str_bool {
local str="${1:-false}"
local pat='^(true|1|yes)$'
if [[ ${str,,} =~ $pat ]]
then
echo 'true'
else
echo 'false'
fi
}
RUN_FAST_TESTS=$(str_bool "${FAST_TEST:-False}")
RUN_GPU_TESTS=$(str_bool "${USE_GPU:-False}")
GPU_PARAM="--cuda -1"
if [ "$RUN_GPU_TESTS" = "true" ]
then
GPU_PARAM="--cuda 0"
fi
run_and_check() {
# Without checkpoints
python -u task_incremental_with_checkpointing.py $GPU_PARAM --checkpoint_at -1 \
--plugins "$@" --benchmark $BENCHMARK --log_metrics_to './metrics_no_checkpoint'
rm -r ./checkpoint.pkl
# Stop after experience 1
python -u task_incremental_with_checkpointing.py $GPU_PARAM --checkpoint_at 1 \
--plugins "$@" --benchmark $BENCHMARK --log_metrics_to './metrics_checkpoint'
echo "Running from checkpoint 1..."
python -u task_incremental_with_checkpointing.py $GPU_PARAM --checkpoint_at 1 \
--plugins "$@" --benchmark $BENCHMARK --log_metrics_to './metrics_checkpoint'
rm -r ./checkpoint.pkl
python -u check_metrics_aligned.py \
"./metrics_no_checkpoint" "./metrics_checkpoint"
rm -r metrics_no_checkpoint
rm -r metrics_checkpoint
rm -r logs
}
run_and_check "replay"
if [ "$RUN_FAST_TESTS" = "false" ]
then
echo "Running slow tests..."
run_and_check "lwf"
run_and_check "ewc"
run_and_check "gdumb"
run_and_check "cwr" "replay"
fi
| true
|
e89d0104373bf1df603726363690ab665f313f4e
|
Shell
|
jeonghanlee/fluka-setup
|
/set_fluka_env.bash
|
UTF-8
| 1,249
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
# The program is free software: you can redistribute
# it and/or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation, either version 2 of the
# License, or any newer version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see https://www.gnu.org/licenses/gpl-2.0.txt
#
# Shell : set_fluka_env.bash
# Author : Jeong Han Lee
# email : jeonghan.lee@gmail.com
SRC="${BASH_SOURCE[0]}"
SRC_DIR="$( cd -P "$( dirname "$SRC" )" && pwd )"
function print_options
{
printf "\n"
printf ">>> The following variables are set.\n"
printf " FLUPRO : ${FLUPRO}\n";
printf " FLUFOR : ${FLUFOR}\n\n";
printf ">>> GFortan information as follows:\n"
printf " \n";
gfortran --version
printf ">>> \n";
}
source /opt/rh/devtoolset-7/enable
fortran_compiler="gfortran"
FLUPRO=${SRC_DIR}
FLUFOR=${fortran_compiler}
export FLUPRO
export FLUFOR
print_options
| true
|
51c4e106b2ef0ab8895dcb29eccc74e4fee84acc
|
Shell
|
janosmiko/reward
|
/images/php-fpm/context/magento2-web/docker-entrypoint
|
UTF-8
| 3,566
| 3.609375
| 4
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
#!/bin/bash
set -e
# SUPERVISOR
if [ "${CRON_ENABLED}" = "true" ]; then
envsubst </etc/supervisor/available.d/cron.conf.template | sudo tee /etc/supervisor/conf.d/cron.conf
fi
if [ "${SOCAT_ENABLED}" = "true" ] &&
[ -S /run/host-services/ssh-auth.sock ] &&
[ "${SSH_AUTH_SOCK}" != "/run/host-services/ssh-auth.sock" ]; then
envsubst </etc/supervisor/available.d/socat.conf.template | sudo tee /etc/supervisor/conf.d/socat.conf
fi
envsubst </etc/supervisor/available.d/nginx.conf.template | sudo tee /etc/supervisor/conf.d/nginx.conf
envsubst </etc/supervisor/available.d/permission.conf.template | sudo tee /etc/supervisor/conf.d/permission.conf
envsubst </etc/supervisor/available.d/php-fpm.conf.template | sudo tee /etc/supervisor/conf.d/php-fpm.conf
# NGINX
find /etc/nginx -name '*.template' -exec sh -c 'envsubst <${1} | sudo tee ${1%.*}' sh {} \;
sudo ln -sf /proc/self/fd/1 /var/log/nginx/access.log && sudo ln -sf /proc/self/fd/2 /var/log/nginx/error.log
# PHP
if [ -x "$(command -v apt-get)" ]; then
PHP_VERSION=$(php -v | head -n1 | cut -d' ' -f2 | awk -F '.' '{print $1"."$2}')
envsubst <"/etc/php/${PHP_VERSION}/mods-available/docker.ini.template" \
| sudo tee "/etc/php/${PHP_VERSION}/mods-available/docker.ini"
printf "[www]
user=%s
group=%s
;listen=%s
;listen.owner=%s
;listen.group=%s
" "${UID}" "${GID}" "${NGINX_UPSTREAM_PORT}" "${UID}" "${GID}" | sudo tee "/etc/php/${PHP_VERSION}/fpm/zzz-docker.conf"
if [ -f /etc/ssl/reward-rootca-cert/ca.cert.pem ]; then
sudo cp /etc/ssl/reward-rootca-cert/ca.cert.pem /usr/local/share/ca-certificates/reward-rootca-cert.pem
fi
sudo phpenmod docker
sudo update-ca-certificates
elif [ -x "$(command -v dnf)" ] || [ -x "$(command -v yum)" ]; then
envsubst </etc/php.d/docker.ini.template | sudo tee /etc/php.d/01-docker.ini
printf "[www]
user=%s
group=%s
listen=%s
;listen.owner=%s
;listen.group=%s
" "${UID}" "${GID}" "${NGINX_UPSTREAM_PORT}" "${UID}" "${GID}" | sudo tee /etc/php-fpm.d/zzz-docker.conf
if [ -f /etc/ssl/reward-rootca-cert/ca.cert.pem ]; then
sudo cp /etc/ssl/reward-rootca-cert/ca.cert.pem /etc/pki/ca-trust/source/anchors/reward-rootca-cert.pem
fi
sudo update-ca-trust
fi
# install requested node version if not already installed
NODE_INSTALLED="$(node -v | perl -pe 's/^v([0-9]+)\..*$/$1/')";
if [ "${NODE_INSTALLED}" -ne "${NODE_VERSION}" ] || [ "${NODE_VERSION}" = "latest" ] || [ "${NODE_VERSION}" = "lts" ]; then
sudo n "${NODE_VERSION}"
fi
# Resolve permission issues with directories auto-created by volume mounts; to use set CHOWN_DIR_LIST to
# a list of directories (relative to working directory) to chown, walking up the paths to also chown each
# specified parent directory. Example: "dir1/dir2 dir3" will chown dir1/dir2, then dir1 followed by dir3
# shellcheck disable=SC2039
for DIR in ${CHOWN_DIR_LIST:-}; do
if [ -d "${DIR}" ]; then
while :; do
sudo chown www-data:www-data "${DIR}"
DIR=$(dirname "${DIR}")
if [ "${DIR}" = "." ] || [ "${DIR}" = "/" ]; then
break;
fi
done
fi
done
# Resolve permission issue with /var/www/html being owned by root as a result of volume mounted on php-fpm
# and nginx combined with nginx running as a different uid/gid than php-fpm does. This condition, when it
# surfaces would cause mutagen sync failures (on initial startup) on macOS environments.
# sudo chown www-data:www-data /var/www/html
# first arg is `-f` or `--some-option`
if [ "${1#-}" != "$1" ]; then
set -- sudo supervisord -c /etc/supervisor/supervisord.conf
fi
exec "$@"
| true
|
c3399c9265a9bba322927184ccbb4b11a2cd4a24
|
Shell
|
dpwolfe/good-morning
|
/dotfiles/.bash_profile
|
UTF-8
| 5,588
| 3.515625
| 4
|
[] |
no_license
|
#!/usr/bin/bash
export LANG="en_US.UTF-8"
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
#Git autocomplete
# shellcheck source=git-completion.bash
source "$DIR/git-completion.bash"
function contains {
# contains(string, substring)
# Returns 0 if string contains the substring, otherwise returns 1
string="$1"
substring="$(printf '%q' "$2")"
if test "${string#*$substring}" != "$string"; then return 0; else return 1; fi
}
function rgfunction { grep -Ers ".{0,40}$1.{0,40}" --color=auto --include="*.$2" -- *; }
function findfunction { find . -name "$1" 2>/dev/null; }
function unix2dos {
sed "s/$/$(printf '\r')/" "$1" > "$1.new";
rm "$1";
mv "$1.new" "$1";
}
function parse_git_branch {
git branch --no-color 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/\1/'
}
function npm-exec {
bin="$1"
shift
# shellcheck disable=SC2068
"$(npm bin)/$bin" $@
}
function kill-function {
local pid
pid="$(pgrep "$1" | tr '\n' ' ')"
if [ -n "$pid" ]; then
# shellcheck disable=SC2086
kill -s KILL $pid;
echo "Killed $1 $pid"
else
echo "No proc to kill with the name '$1'"
fi
}
function vpn-connect {
if [[ -n "$1" ]]; then
osascript <<-EOF
tell application "System Events"
tell current location of network preferences
set VPN to service "$1"
if exists VPN then connect VPN
repeat while (current configuration of VPN is not connected)
delay 1
end repeat
end tell
end tell
EOF
else
scutil --nc list | grep --color=never "\(Disconnected\)"
echo "Provide the name of one of the connections above."
fi
}
function vpn-disconnect {
if [[ -n "$1" ]]; then
osascript <<-EOF
tell application "System Events"
tell current location of network preferences
set VPN to service "$1"
if exists VPN then disconnect VPN
end tell
end tell
return
EOF
else
scutil --nc list | grep --color=never "\(Connected\)"
echo "Provide the name of one of the connections above."
fi
}
function askto {
echo "Do you want to $1? $3"
read -r -n 1 -p "(Y/n) " yn < /dev/tty;
echo # echo newline after input
# shellcheck disable=SC2091
case $yn in
y|Y ) $($2); return 0;;
n|N ) return 1;;
esac
}
alias gvim='/Applications/MacVim.app/Contents/MacOS/Vim -g'
alias ls='ls -G'
alias ll='ls -la'
alias l.='ls -dG .*'
alias cd..='cd ..'
alias ..='cd ..'
alias ...='cd ../..'
alias .3='cd ../../..'
alias .4='cd ../../../..'
alias .5='cd ../../../../..'
alias .6='cd ../../../../../..'
alias ....='cd ../../..'
alias .....='cd ../../../..'
alias ......='cd ../../../../..'
alias .......='cd ../../../../../..'
alias grep='grep --color=auto'
alias eg='egrep --color=auto'
alias fg='fgrep --color=auto'
alias rg=rgfunction
alias sha1='openssl sha1'
alias bc='bc -l'
alias mkdir='mkdir -pv'
alias mount='mount |column -t'
alias h='history'
alias j='jobs -l'
alias path='echo -e ${PATH//:/\\n}'
alias now='date +"%T"'
alias nowtime=now
alias nowdate='date +"%d-%m-%Y"'
# editors
alias vi=vim
alias svi='sudo vim'
alias vis='vim "+set si"'
alias edit='vim'
alias e='vim'
alias ping='ping -c 5'
alias fastping='ping -c 100 -s.2'
alias ports='netstat -tulanp'
alias routes='netstat -rn'
alias mv='mv -i'
alias cp='cp -i'
alias ln='ln -i'
alias k=kill-function
alias kg='kill-function grunt'
alias ks='kill-function safari'
alias kc='kill-function chrome'
alias kf='kill-function firefox'
alias kn='kill-function node'
alias s='source $HOME/.bash_profile'
alias eb='vim $HOME/.bash_profile'
alias ebpub='vim $HOME/repo/good-morning/dotfiles/.bash_profile'
alias u2d=unix2dos
alias f=findfunction
alias initem='source $HOME/emsdk_portable/emsdk_env.sh'
alias xs='sudo xcode-select --switch "/Applications/Xcode.app/Contents/Developer/"'
alias dn='debug-node --web-port 8081'
# git
alias gc='git commit -m'
alias gca='git commit -a -m'
alias pull='git pull'
alias pullr='git pull --rebase origin'
alias pullrm='git pull --rebase origin master'
alias mm='git merge master'
alias push='git push'
alias pushs='git push --set-upstream origin $(parse_git_branch)'
alias cm='git checkout master'
alias gco='git checkout'
alias gbd='askto "delete all local git branches except master" "git branch | grep -Ev master | xargs -n 1 git branch -D"'
alias flushdns='sudo killall -HUP mDNSResponder;sudo killall mDNSResponderHelper;sudo dscacheutil -flushcache'
# create a new SSL cert with Let's Encrypt using certbot and a DNS TXT challenge
alias certonly='sudo certbot certonly --manual --preferred-challenges dns'
gmfunction() {
pushd "$DIR" > /dev/null || return
echo "Pulling latest version of good-morning..."
git pull > /dev/null
export GOOD_MORNING_RUN=1
popd > /dev/null || return
# shellcheck disable=SC1090
source "$DIR/../good-morning.sh"
}
alias good-morning='gmfunction'
export PYENV_ROOT="$HOME/.pyenv"
export PATH="$PYENV_ROOT/bin:$HOME/go/bin:$HOME/.local/bin:/usr/local/sbin:/usr/local/git/bin:/Library/Developer/CommandLineTools/usr/bin:/Applications/CMake.app/Contents/bin:$PATH"
export PS1='\[\033]0;$TITLEPREFIX:${PWD//[^[:ascii:]]/?}\007\]\n\[\033[32m\]\u@\h \[\033[33m\]\w \[\033[36m\](`parse_git_branch`)\[\033[0m\] \[\033[35m\]\t\[\033[0m\]\n$'
export BASH_SILENCE_DEPRECATION_WARNING=1
export DOCKER_BUILDKIT=1
if [ -f "$(brew --prefix)/etc/bash_completion" ]; then
# shellcheck source=/dev/null
. "$(brew --prefix)/etc/bash_completion"
elif [ -f "$(brew --prefix)/etc/profile.d/bash_completion.sh" ]; then
# shellcheck source=/dev/null
. "$(brew --prefix)/etc/profile.d/bash_completion.sh"
fi
| true
|
07727db816ec0ad6f0e234ced7b2e8f095024c04
|
Shell
|
jumpingElephant/potential-octo-broccoli
|
/run-mongodb.sh
|
UTF-8
| 222
| 2.75
| 3
|
[
"MIT"
] |
permissive
|
if [ -z $1 ];
then
echo Command: run-mongodb.sh /path/to/mongodb/data
exit 1
fi
sudo docker run --name mongo-consumption \
--volume $1:/data/db \
--network consumption_bridge \
--restart always \
--detach \
mongo
| true
|
53bd008bb5950b22aba5203839441e1b2e7a67ed
|
Shell
|
antifob/nixexprs
|
/tests/run-test.sh
|
UTF-8
| 1,693
| 3.859375
| 4
|
[] |
no_license
|
#!/bin/sh
# ==================================================================== #
set -eu
PROGNAME=$(basename -- "${0}")
PROGBASE=$(d=$(dirname -- "${0}"); cd "${d}" && pwd)
# -------------------------------------------------------------------- #
if [ X0 = X"${#}" ]; then
usage >&2
printf 'usage: %s test...\n' "${PROGNAME}" >&2
exit 2
fi
if [ ! -f "${PROGBASE}/${1}.tests" ]; then
printf '[E] No such tests: %s\n' "${1}" >&2
exit 1
fi
# -------------------------------------------------------------------- #
genin() {
fn="${1}"
shift
cat << __EOF__
${fn} = import ${PROGBASE}/../${fn}.nix
r = ${fn} ${@}
r
__EOF__
}
filtr() {
sed -E "s/(\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]|)//g" | \
grep -Ev '(^Welcome|TCSADRAIN)' | \
grep .
}
# -------------------------------------------------------------------- #
prfail() {
printf '[F] "%s" -> "%s" (expected "%s")\n' "${1}" "${3}" "${2}"
}
prsucc() {
printf '[S] %s -> %s\n' "${1}" "${2}"
}
run() {
printf '[I] Running test for: %s\n' "${1}"
grep '^[^#]' "${PROGBASE}/${1}.tests" | grep -Ev '^[[:space:]]*$' | \
while read _line; do
i=$(printf '%s' "${_line}" | awk -F'#' '{print $1;}' | sed -Ee 's|\s*$||')
x=$(printf '%s' "${_line}" | awk -F'#' '{print $2;}' | sed -Ee 's|^\s*||')
set +e
o=$(genin "${1}" "${i}" | nix repl 2>&1 | filtr)
set -e
fn=prsucc
if [ X- = X"${x}" ]; then
if ! printf '%s' "${o}" | grep -q '^error:'; then
fn=prfail
fi
elif [ X"${x}" != X"${o}" ]; then
fn=prfail
fi
$fn "${i}" "${x}" "${o}"
[ prsucc = $fn ] || exit 1
done
}
while [ X != X"${1:-}" ]; do
run "${1}"
shift
done
# ==================================================================== #
| true
|
f2baf592d646623e39aa9272caa6af622e6b1db2
|
Shell
|
qbit/dotfiles
|
/bin/wpacfg
|
UTF-8
| 512
| 3.515625
| 4
|
[
"ISC"
] |
permissive
|
#!/bin/ksh
DEBUG=1
eval . ~/lib/functions
# WIF=iwn0
WIF=$(ifconfig | grep -B 4 IEEE802 | grep ^[a-z] | awk -F\: '{print $1}')
IF=/sbin/ifconfig
echo -n "SSID: "
read SSID
echo -n "WPA-PSK: "
stty -echo
read -s KEY
stty echo
echo ""
#log "using ${KEY} on ${WIF}"
echo -n "Use dhclient (y/n)? "
read DH
sudo $IF $WIF up nwid $SSID wpa wpakey $KEY
if [ $? ]; then
log "'${WIF}' configured for '$SSID'"
if [[ $DH = "y" ]]; then
log "using dhclient on '${WIF}'."
sudo dhclient -q $WIF
fi
exit;
fi
| true
|
b387374938a9e814b60ec6201b3b224bb36d4ecd
|
Shell
|
raulhuamani/.dotfiles
|
/zsh/.zshrc
|
UTF-8
| 3,527
| 2.875
| 3
|
[] |
no_license
|
# Enable Powerlevel10k instant prompt. Should stay close to the top of ~/.zshrc.
# Initialization code that may require console input (password prompts, [y/n]
# confirmations, etc.) must go above this block; everything else may go below.
if [[ -r "${XDG_CACHE_HOME:-$HOME/.cache}/p10k-instant-prompt-${(%):-%n}.zsh" ]]; then
source "${XDG_CACHE_HOME:-$HOME/.cache}/p10k-instant-prompt-${(%):-%n}.zsh"
fi
# Fix the Java Problem
export _JAVA_AWT_WM_NONREPARENTING=1
# alias
#alias ls='ls -lh --color=auto'
alias dir='dir --color=auto'
alias vdir='vdir --color=auto'
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
#####################################################
# Auto completion / suggestion
# Mixing zsh-autocomplete and zsh-autosuggestions
# Requires: zsh-autocomplete (custom packaging by Parrot Team)
# Jobs: suggest files / foldername / histsory bellow the prompt
# Requires: zsh-autosuggestions (packaging by Debian Team)
# Jobs: Fish-like suggestion for command history
source /usr/share/zsh-autosuggestions/zsh-autosuggestions.zsh
source /usr/share/zsh-autocomplete/zsh-autocomplete.plugin.zsh
# Select all suggestion instead of top on result only
zstyle ':autocomplete:tab:*' insert-unambiguous yes
zstyle ':autocomplete:tab:*' widget-style menu-select
zstyle ':autocomplete:*' min-input 2
bindkey $key[Up] up-line-or-history
bindkey $key[Down] down-line-or-history
##################################################
# Fish like syntax highlighting
# Requires "zsh-syntax-highlighting" from apt
source /usr/share/zsh-syntax-highlighting/zsh-syntax-highlighting.zsh
# Save type history for completion and easier life
HISTFILE=~/.zsh_history
HISTSIZE=10000
SAVEHIST=10000
setopt histignorealldups sharehistory
#setopt appendhistory
# Custom Aliases
alias cat='/bin/bat'
alias catn='/bin/cat'
alias catln='/bin/bat --paging=never'
# Manual aliases
alias ll='lsd -lh --group-dirs=first'
alias la='lsd -a --group-dirs=first'
alias l='lsd --group-dirs=first'
alias lla='lsd -lha --group-dirs=first'
alias ls='lsd --group-dirs=first'
# Mac style copy/paste for linux machines
alias pbcopy='xclip -selection clipboard' # copy to clipboard, ctrl+c, ctrl+shift+c
alias pbpaste='xclip -selection clipboard -o' # paste from clipboard, ctrl+v, ctrl+shift+v
alias pbselect='xclip -selection primary -o' # paste from highlight, midle click, shift+insert
source ~/powerlevel10k/powerlevel10k.zsh-theme
# To customize prompt, run `p10k configure` or edit ~/.p10k.zsh.
[[ ! -f ~/.p10k.zsh ]] || source ~/.p10k.zsh
[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh
source /usr/share/zsh-plugins/sudo.plugin.zsh
#plugin=(
# node
# )
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion
# Change cursor shape for different vi modes.
function zle-keymap-select {
if [[ $KEYMAP == vicmd ]] || [[ $1 = 'block' ]]; then
echo -ne '\e[1 q'
elif [[ $KEYMAP == main ]] || [[ $KEYMAP == viins ]] || [[ $KEYMAP = '' ]] || [[ $1 = 'beam' ]]; then
echo -ne '\e[5 q'
fi
}
zle -N zle-keymap-select
# Start with beam shape cursor on zsh startup and after every command.
zle-line-init() { zle-keymap-select 'beam'}
#THIS MUST BE AT THE END OF THE FILE FOR SDKMAN TO WORK!!!
export SDKMAN_DIR="$HOME/.sdkman"
[[ -s "$HOME/.sdkman/bin/sdkman-init.sh" ]] && source "$HOME/.sdkman/bin/sdkman-init.sh"
| true
|
e6dafc29624cb2d8ecc592e1a45750d805434e12
|
Shell
|
neovraz/ssh_ubuntu_serve
|
/test.sh
|
UTF-8
| 187
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
script_path="install.sh"
bash "$script_path"
result=$?
echo "$result"
if [ $result = 0 ]; then
echo "SSH already installed"
else
sudo apt-get install openssh-server
fi
| true
|
b9bc6852e2ef6ce6a50f4071dc6349cc85a292b2
|
Shell
|
pitakill/hashicorp-vagrant
|
/provision/hashi-stack.sh
|
UTF-8
| 2,849
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
export ENVOY_VERSION=1.13.1
export CONSUL_VERSION=1.7.5
export CONSUL_TEMPLATE_VERSION=0.25.0
export CONSUL_REPLICATE_VERSION=0.4.0
export CNI_PLUGINS=0.8.6
export NOMAD_VERSION=0.12.1
export VAULT_VERSION=1.4.3
# Install base
sudo apt-get update
sudo apt-get install -y \
software-properties-common \
unzip \
curl \
gnupg \
neovim
# Install Keys
curl -sL -s https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
sudo apt-get update
sudo apt-get install -y \
docker-ce \
docker-ce-cli \
containerd.io \
apt-transport-https \
ca-certificates \
gnupg-agent \
software-properties-common \
jq \
dnsmasq
curl -L https://getenvoy.io/cli | sudo bash -s -- -b /usr/local/bin
getenvoy run standard:"${ENVOY_VERSION}" -- --version
sudo mv $HOME/.getenvoy/builds/standard/"${ENVOY_VERSION}"/linux_glibc/bin/envoy /usr/bin/envoy
cat >> ~/.bashrc <<"END"
# Coloring of hostname in prompt to keep track of what's what in demos, blue provides a little emphasis but not too much like red
NORMAL="\[\e[0m\]"
BOLD="\[\e[1m\]"
DARKGRAY="\[\e[90m\]"
BLUE="\[\e[34m\]"
PS1="$DARKGRAY\u@$BOLD$BLUE\h$DARKGRAY:\w\$ $NORMAL"
END
# Download hashi stack
curl -L https://releases.hashicorp.com/consul/${CONSUL_VERSION}/consul_${CONSUL_VERSION}_linux_amd64.zip -o consul.zip
curl -L https://releases.hashicorp.com/consul-template/${CONSUL_TEMPLATE_VERSION}/consul-template_${CONSUL_TEMPLATE_VERSION}_linux_amd64.zip -o consul-template.zip
curl -L https://releases.hashicorp.com/consul-replicate/${CONSUL_REPLICATE_VERSION}/consul-replicate_${CONSUL_REPLICATE_VERSION}_linux_amd64.zip -o consul-replicate.zip
curl -L https://github.com/containernetworking/plugins/releases/download/v${CNI_PLUGINS}/cni-plugins-linux-amd64-v${CNI_PLUGINS}.tgz -o cni-plugins.tgz
curl -L https://releases.hashicorp.com/nomad/${NOMAD_VERSION}/nomad_${NOMAD_VERSION}_linux_amd64.zip -o nomad.zip
curl -L https://releases.hashicorp.com/vault/${VAULT_VERSION}/vault_${VAULT_VERSION}_linux_amd64.zip -o vault.zip
# Install consul
sudo unzip consul.zip
sudo chmod +x consul
sudo mv consul /usr/bin/consul
# Install consul-template
sudo unzip consul-template.zip
sudo chmod +x consul-template
sudo mv consul-template /usr/bin/consul-template
# Install consul-replicate
sudo unzip consul-replicate.zip
sudo chmod +x consul-replicate
sudo mv consul-replicate /usr/bin/consul-replicate
# Install momad
sudo unzip nomad.zip
sudo chmod +x nomad
sudo mv nomad /usr/bin/nomad
# Install CNI Plugin
sudo mkdir -p /opt/cni/bin
sudo tar -C /opt/cni/bin -xzf cni-plugins.tgz
# Install vault
sudo unzip vault.zip
sudo chmod +x vault
sudo mv vault /usr/bin/vault
# Add user to docker
sudo gpasswd -a $USER docker
| true
|
fdf141c75178b7226f1c521b05c6206cee7a66f6
|
Shell
|
dapphub/git-ci
|
/bin/git-submodule-bisect-prepare
|
UTF-8
| 884
| 3.734375
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
set -e
notif() { echo "== git submodule-bisect-prepare: $@" >&2 ; }
fatal() { notif "$@" ; exit 1 ; }
submodule_dir="$1" && shift
source_branch="$1" && shift
target_branch="$1" && shift
notif "submodule: $submodule_dir"
notif "source_branch: $source_branch"
notif "target_branch: $target_branch"
current_commit=$( cd "$submodule_dir"
git rev-parse "$source_branch"
)
while true; do
commit_msg=$( cd "$submodule_dir"
git checkout "$current_commit" >&2
git log --pretty=format:'%h - %s' --max-count=1
)
git add "$submodule_dir"
git commit --allow-empty --message "submodule $submodule_dir: $commit_msg"
current_commit=$( cd "$submodule_dir"
git next "$current_commit" "$target_branch"
)
done
| true
|
37aa05d7860df2e91bc24c0f767f49c2982a9f64
|
Shell
|
AnjaWestram/Littorina_hybrid_zone_1
|
/ang10_clip_overlap.sh
|
UTF-8
| 784
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/bash -ve
# remove overlap between PE reads from one of the two reads (only for paired files...)
# request memory for job (default 6G, max 72G)
#$ -l mem=3G
#$ -l rmem=3G
# run time for job in hours:mins:sec (max 168:0:0, jobs with h_rt < 8:0:0 have priority)
#$ -l h_rt=02:30:00
##$ -P molecol
##$ -q molecol.q
#$ -t 1-567
##$ -tc 25
taskid=${SGE_TASK_ID}
files=`ls *_paired_dedup.bam` # all PE dedup files, e.g. ANG370_000_paired_dedup.bam
samples=`for n in $files;do echo $n | cut -d"_" -f 1,2;done | sort | uniq` # all samples, e.g. ANG370_000
sample=`echo $samples | cut -d" " -f $taskid` # get focal sample, e.g. ANG370_000
/home/bo1awx/programs/bamUtil_1.0.13/bamUtil/bin/bam clipOverlap --in $sample\_paired_dedup.bam --out $sample\_pairedNoOverl_dedup.bam --stats
| true
|
9de70a293a21e5fbb5cebeef71288536076b39a6
|
Shell
|
lowskill272/oslabs2019
|
/lab1/randomScript.sh
|
UTF-8
| 126
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
num=0
for((i=1;i<151;i++))
do
num=$(od -An -N4 -i < /dev//urandom)
nums="$num $nums"
done
echo $nums >> file1.txt
| true
|
d7f7c99ec019b967d61615c33f3adff7bccb302c
|
Shell
|
Silentsoul04/env
|
/linux/bin/user-terminal.sh
|
UTF-8
| 198
| 3.09375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env sh
[ $# -eq 0 ] && exec user-terminal
if readlink "$(command -v user-terminal)" | grep -qi gnome-terminal; then
exec user-terminal -- "${@}"
else
exec user-terminal -e "${@}"
fi
| true
|
899012428c24d60510e25346487db7722882e82c
|
Shell
|
Abdullah-Mubark/Academic-Records-Blockchain-Network
|
/bc-config/scripts/createchannel.sh
|
UTF-8
| 759
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
# import utils
. scripts/utils.sh
CHANNEL_NAME="mychannel"
createChannel() {
setGlobals 0 1
echo $CHANNEL_NAME
if [ -z "$CORE_PEER_TLS_ENABLED" -o "$CORE_PEER_TLS_ENABLED" = "false" ]; then
set -x
peer channel create -o orderer.academicrecords.com:7050 -c $CHANNEL_NAME -f ./channel-artifacts/channel.tx >&log.txt
res=$?
set +x
else
set -x
peer channel create -o orderer.academicrecords.com:7050 -c $CHANNEL_NAME -f ./channel-artifacts/channel.tx --tls $CORE_PEER_TLS_ENABLED --cafile $ORDERER_CA >&log.txt
res=$?
set +x
fi
cat log.txt
verifyResult $res "Channel creation failed"
echo "===================== Channel '$CHANNEL_NAME' created ===================== "
echo
}
createChannel
| true
|
90d25b3b3c05dafe9255f65d03550bd62bf70287
|
Shell
|
ecylcje/rtl_433-hass-addons
|
/rtl_433_mqtt_autodiscovery/run.sh
|
UTF-8
| 1,088
| 3.078125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bashio
if bashio::services.available mqtt; then
echo "mqtt found in this Home Assistance instance."
MQTT_HOST=$(bashio::services mqtt "host")
MQTT_PORT=$(bashio::services mqtt "port")
export MQTT_USERNAME=$(bashio::services mqtt "username")
export MQTT_PASSWORD=$(bashio::services mqtt "password")
else
echo "Using an external mqtt broker."
MQTT_HOST=$(bashio::config "mqtt_host")
MQTT_PORT=$(bashio::config "mqtt_port")
export MQTT_USERNAME=$(bashio::config "mqtt_user")
export MQTT_PASSWORD=$(bashio::config "mqtt_password")
fi
RTL_TOPIC=$(bashio::config "rtl_topic")
DISCOVERY_PREFIX=$(bashio::config "discovery_prefix")
DISCOVERY_INTERVAL=$(bashio::config "discovery_interval")
OTHER_ARGS=""
if bashio::config.true "mqtt_retain"; then
OTHER_ARGS="${OTHER_ARGS} --retain"
fi
if bashio::config.true "force_update"; then
OTHER_ARGS="${OTHER_ARGS} --force_update"
fi
echo "Starting rtl_433_mqtt_hass.py..."
python3 -u /rtl_433_mqtt_hass.py -d -H $MQTT_HOST -p $MQTT_PORT -R "$RTL_TOPIC" -D "$DISCOVERY_PREFIX" -i $DISCOVERY_INTERVAL $OTHER_ARGS
| true
|
e3be4a28c2fd28709ce7f3c22112daec81be98c2
|
Shell
|
Nixalde/rsync
|
/rsync-script-local-diario
|
UTF-8
| 2,770
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/sh
# Directorios de los que se harán las copias
SOURCES="/boot /home /usr /var /etc /sbin /bin"
# Directorio donde se almacenarán las copias
TARGET="/backup/rajoy-diaria"
# Asignamos el numero de rotaciones que tendrá el backup. Asegurate de tener espacio en disco.
ROTATIONS=6
# Habilitamos la salida extensa.
VERBOSE="-v"
# Asignar la fecha al backup.
BACKUP_DATE="`date +%F_%H-%M`"
# Comprobaciones previas.
if [ ! -x $TARGET ]; then
echo "No existe el directorio o no tienes permisos de ejecucion"
echo "Saliendo..."
exit 2
fi
if [ ! $ROTATIONS -gt 1 ]; then
echo "Tienes que asignar más de una rotación a ROTATIONS "
echo "Saliendo..."
exit 2
fi
# Incremental para guardar el número de backup.
BACKUP_NUMBER=1
# Listamos todos los backups por la copia más nueva.
# Se asigna el nombre de la más antigua a la que se vaya a crear si se alcanzó el límite de copias.
for backup in `ls -dXr $TARGET/*/`; do
if [ $BACKUP_NUMBER -eq 1 ]; then
NEWEST_BACKUP="$backup"
fi
if [ $BACKUP_NUMBER -eq $ROTATIONS ]; then
OLDEST_BACKUP="$backup"
break
fi
let "BACKUP_NUMBER=$BACKUP_NUMBER+1"
done
# Comprobamos si existe un backup anterior. Si existe, se rotará la copia. Si no, se creará un directorio para la copia.
if [ $OLDEST_BACKUP ]; then
# Cambiamos el antiguo backup al actual.
mv $OLDEST_BACKUP $TARGET/$BACKUP_DATE
else
mkdir $TARGET/$BACKUP_DATE
fi
# Actualizamos el backup actual usando el más reciente.
if [ $NEWEST_BACKUP ]; then
cp -al $NEWEST_BACKUP. $TARGET/$BACKUP_DATE
fi
# Chequeamos si se ha realizado la rotación correctamente.
if [ ! -d $TARGET/$BACKUP_DATE ]; then
echo "No se puede escribir en el directorio asignado al backup"
echo "Saliendo..."
exit 2
fi
echo "Verificando origen..."
for source in $SOURCES; do
echo "Comprobando $source..."
if [ ! -x $source ]; then
echo "Error con $source!"
echo "El directorio no existe o no tienes permisos de ejecucion"
exit 2
fi
done
#Buscamos la última copia completa.
LATEST_WEEK_BACKUP= "ls -Ft | grep '/backup/rajoy/$' | tail -1"
#Últimos pasos antes de realizar la copia.
echo "Origen comprobado. Iniciando rsync..."
for source in $SOURCES; do
# Creamos los directorios en $TARGET para copiar la herarquía de los directorios
if [ ! -d $TARGET/$BACKUP_DATE/$source ]; then
mkdir -p $TARGET/$BACKUP_DATE/$source
fi
# Creamos el fichero con los paquetes instalados en el sistema.
dpkg-query -f '${binary:Package}\n' -W > /usr/local/bin/package-selections
# Realizamos el comando rsync.
if [ ! -d /backup/rajoy/$BACKUP_DATE ] && [ ! -d /backup/rajoy-mensual/$BACKUP_DATE ]; then
rsync $VERBOSE --compare-dest=$LATEST_WEEK_BACKUP -z -a --delete $source/ $TARGET/$BACKUP_DATE/$source/
fi
done
exit 0
| true
|
1d0b5d12a7210d76184ec44e857749da8762f779
|
Shell
|
shaundomingo/coreos-units
|
/clusterable-wordpress/redeploy-coreos.sh
|
UTF-8
| 353
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
COSV_DIR=../../coreos-vagrant
UNIT_DIR=$(pwd)
cd ${COSV_DIR}
url=$(curl "https://discovery.etcd.io/new?size=3")
echo "New CoreOS Discovery URL: ${url}"
replace_url=$(echo $url | sed -e 's/[\/&]/\\&/g')
sed -i.bak "s/https:\/\/discovery.etcd.io\/.*/$replace_url/g" user-data
vagrant destroy -f
vagrant box update
vagrant up
cd ${UNIT_DIR}
| true
|
5b0262fcdcc915b55bfd78db53dd559938db1984
|
Shell
|
timdaman/quartermaster
|
/deploy/build_docker_images.sh
|
UTF-8
| 412
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
set -eux
if [ ! -d quartermaster_server ]; then
ehco "This script should be run from the repository root"
exit 1
fi
# Generate static files from Django
STATIC_FILES_DIR="$PWD/deploy/static"
rm -rf "$STATIC_FILES_DIR"
docker-compose build backend
docker-compose run -v "$STATIC_FILES_DIR":/deploy/static backend ./manage.py collectstatic --noinput
# Build everything else
docker-compose build
| true
|
ee7ae8cea2d3b237f95328d7708e4487b0ab06ee
|
Shell
|
nova-video-player/aos-AVP
|
/android-setup-light.sh
|
UTF-8
| 1,887
| 3.53125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
case `uname` in
Linux)
READLINK=readlink
SED=sed
export CORES=$((`nproc`+1))
;;
Darwin)
# assumes brew install coreutils in order to support readlink -f on macOS
READLINK=greadlink
SED=gsed
export CORES=$((`sysctl -n hw.logicalcpu`+1))
;;
esac
# android sdk directory is changing
[ -n "${ANDROID_HOME}" ] && androidSdk=${ANDROID_HOME}
[ -n "${ANDROID_SDK}" ] && androidSdk=${ANDROID_SDK}
[ -n "${ANDROID_SDK_ROOT}" ] && androidSdk=${ANDROID_SDK_ROOT}
# java8/java11/java17 paths
case `uname` in
Linux)
JAVA17=$(update-alternatives --list java | sed -nE -e 's/(.*java-17[^/]*).*/\1/p')
[ -z "$JAVA17" ] && JAVA17=$(update-alternatives --list java | sed -nE -e 's/(.*jdk-17[^/]*).*/\1/p')
;;
Darwin)
JAVA17=$(/usr/libexec/java_home -v 17)
;;
esac
[ -n "$JAVA17" ] && export PATH=${JAVA17}/bin:$PATH
export PATH=${androidSdk}/cmdline-tools/latest/bin:${androidSdk}/cmdline-tools/bin:${androidSdk}/tools/bin:$PATH
#${NDK_PATH}
#/opt/android-sdk/ndk/21.1.6352462/prebuilt/darwin-x86_64/
# android tools
#yes | sdkmanager 'cmdline-tools;latest' 'ndk;23.1.7779620' 'cmake;3.18.1' 'build-tools;30.0.3' > /dev/null
[ -d "${androidSdk}/ndk" ] && NDK_PATH=$(ls -d ${androidSdk}/ndk/* | sort -V | tail -n 1)
echo NDK_PATH is ${NDK_PATH}
export ANDROID_NDK_HOME=${NDK_PATH}
export ANDROID_NDK_ROOT=${NDK_PATH}
# latest cmake
[ -d "${androidSdk}/cmake" ] && CMAKE_PATH=$(ls -d ${androidSdk}/cmake/* | sort -V | tail -n 1)
echo CMAKE_PATH is ${CMAKE_PATH}
export PATH=$CMAKE_PATH/bin:$PATH
# make sure we use first sdk/ndk and not host tools
OS=$(uname -s | tr '[:upper:]' '[:lower:]')
PREBUILT=prebuilt/${OS}-$(uname -m)
export PATH=${NDK_PATH}/$PREBUILT/bin:$PATH
echo PREBUILT_PATH is ${NDK_PATH}/$PREBUILT
export PATH=${NDK_PATH}/toolchains/llvm/$PREBUILT/bin:$PATH
echo LLVM_PATH is ${NDK_PATH}/toolchains/llvm/$PREBUILT
| true
|
6f2cac72f1250b7e1a2e65a9fde4286b1887fe10
|
Shell
|
adrewasa/control
|
/start-server.sh
|
UTF-8
| 313
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/sh
cp ../base/build/libs/*.jar ../lib/import/
classPath=`find build/libs -name '*.jar'`
mainClass=com.asa.computer.ui.server.ServerUi
importJar=./
for jar in `find ../lib/import/ -name '*.jar'`;do
importJar="$importJar:$jar"
done
classPath="$classPath:$importJar"
java -classpath $classPath $mainClass
| true
|
0d3852982a9d113c8003f1d62c932b7e6e34f5b0
|
Shell
|
brianchewson/bash-tools
|
/AllImgXmlRandomOrders.sh
|
UTF-8
| 1,232
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/sh -e
XML=$1
rm $XML
RANDOM_ORDER_FILE=t.rand
P=`pwd`
echo "<background>" >> $XML
echo " <starttime>" >> $XML
echo " <year>2009</year>" >> $XML
echo " <month>08</month>" >> $XML
echo " <day>04</day>" >> $XML
echo " <hour>00</hour>" >> $XML
echo " <minute>00</minute>" >> $XML
echo " <second>00</second>" >> $XML
echo " </starttime>" >> $XML
echo "<!-- This animation will start at midnight. -->" >> $XML
gore=""
SIZE=$(shuf -i 1-100 -n 1)
for (( i=0; i<$SIZE; i++ )); do
ls -1 > ${RANDOM_ORDER_FILE}
for FILE in xml \.sh ${RANDOM_ORDER_FILE}; do
sed -i "/${FILE}/d" ${RANDOM_ORDER_FILE}
done
sort -R ${RANDOM_ORDER_FILE} -o ${RANDOM_ORDER_FILE}
while read line; do
echo " <transition>" >> $XML
echo " <duration>0.1</duration>" >> $XML
echo " <from>$P/$gore</from>" >> $XML
echo " <to>$P/$line</to>" >> $XML
echo " </transition>" >> $XML
echo " <static>" >> $XML
echo " <duration>$(shuf -i 1-100 -n 1)</duration>" >> $XML
echo " <file>$P/$line</file>" >> $XML
echo " </static>" >> $XML
gore=$line
done < ${RANDOM_ORDER_FILE}
done
echo "</background>" >> $XML
sed -i "s#/<#/$gore<#" ${XML}
| true
|
db7715444eea7a33e88d56f261dd5c6c9d2368d5
|
Shell
|
ViniciusLisboa07/Atividade-Pr-tica-Shell-Linux
|
/3/entrada.sh
|
UTF-8
| 713
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
clear
SS=0
MM=0
HH=0
echo "--CRONÔMETRO--"
echo "E - INICIAR"
echo "S - SAIR"
echo "Obs: Digite F a qualquer momento para finalizar o cronômetro!"
echo
echo -n "Resposta: "
read EX
echo
if [ $EX = "E" ]||[ $EX = "e" ]
then
while (sleep 1)
do
if [ $SS -gt 59 ]
then
SS=0
MM=$(echo "$MM + 1" | bc)
fi
if [ $MM -gt 59 ]
then
MM=0
HH=$(echo "$HH +1" | bc)
fi
if [ $HH -gt 23 ]
then
MM=0
HH=0
HH=0
fi
echo -n -e "\rContando... $HH:$MM:$SS "
SS=$(echo "$SS+1" | bc)
read -n1 -t 1 FINALIZAR
case "$FINALIZAR" in
f) echo
./saida.sh $HH $MM $SS
exit
;;
F) echo
./saida.sh $HH $MM $SS
exit
;;
esac
done
elif [ $EX = "S" ]||[ $EX = "s" ]
then
exit
else
./entrada.sh
fi
| true
|
55c09ece881953a0802d03cf1b0d075f4c5918a6
|
Shell
|
ARoS-NCSU/WolfBot-Software
|
/salt/files/etc/dhcp/dhclient-exit-hooks.d/hostname
|
UTF-8
| 938
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/sh
# Filename: /etc/dhcp3/dhclient-exit-hooks.d/hostname
# Purpose: Used by dhclient-script to set the hostname of the system
# to match the DNS information for the host as provided by
# DHCP.
# Depends: dhcp3-client (should be in the base install)
# hostname (for hostname, again, should be in the base)
# bind9-host (for host)
# coreutils (for cut and echo)
#
if [ "$reason" != BOUND ] && [ "$reason" != RENEW ] \
&& [ "$reason" != REBIND ] && [ "$reason" != REBOOT ]
then
return
fi
echo dhclient-exit-hooks.d/hostname: Dynamic IP address = $new_ip_address
#hostname=$(host $new_ip_address | cut -d ' ' -f 5)
hostname=$(getent hosts $new_ip_address | awk '{print $2}')
echo $hostname > /etc/hostname
hostname $hostname
echo dhclient-exit-hooks.d/hostname: Dynamic Hostname = $hostname
# And that _should_ just about do it...
| true
|
0d8e36d1683d14438a4ff8c9552bae2ae471ba2a
|
Shell
|
sreesowmya/gittest
|
/argf.sh
|
UTF-8
| 504
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
#
#passing arguments to a function
###function 1
func1()
{
echo "${FUNCNAME}()"
echo "first argument is $1"
echo "sec argument is $2"
echo ""
echo "total arguments passed are $#"
}
###function 2
func2()
{
echo "${FUNCNAME}()"
m=$1
n=$2
echo "$m + $n=`expr $m + $n`"
}
###function 3
func3()
{
echo "${FUNCNAME}()"
echo "enter x"
read x
echo "enter y"
read y
func2 $x $y
echo "end of script"
}
##invoking function
#func1
#func2
#func3
| true
|
600644bfbafcd511caba81f08e42b7fdd9b09be9
|
Shell
|
home-assistant/hassbian-scripts
|
/package/opt/hassbian/helpers/suite/helper
|
UTF-8
| 4,395
| 3.734375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!bin/bash
# Helper script for hassbian-config.
declare packages
declare suitename
declare manifestkey
function hassbian.suite.helper.install.pip {
# Purpose: Install python packages in the homeassistant venv.
# Usage:
# hassbian.suite.helper.install.pip sampleclient
# hassbian.suite.helper.install.pip sampleclient someotherpackage
local logcheck
packages=$@
echo "Changing to homeassistant user..."
sudo -u homeassistant -H /bin/bash << EOF
echo "Changing to Home Assistant venv..."
source /srv/homeassistant/bin/activate
echo "Updating Python dependencies..."
python -m pip install --upgrade pip --no-cache-dir
python -m pip install --upgrade setuptools --no-cache-dir
python -m pip install --upgrade wheel --no-cache-dir
echo "Installing Python packages..."
python -m pip install --upgrade $packages
source "$HASSBIAN_HELPERS_DIR"/workaround
hassbian.workaround.check $packages
echo "Deactivating virtualenv..."
deactivate
EOF
}
function hassbian.suite.helper.install.apt {
# Purpose: Install apt packages.
# Usage:
# hassbian.suite.helper.install.apt sampleclient
# hassbian.suite.helper.install.apt sampleclient someotherpackage
packages=$@
echo "Updating apt..."
apt update
echo "Installing packages..."
apt install -y $packages
}
function hassbian.suite.helper.install.node {
# Purpose: Install node.
# Usage:
# hassbian.suite.helper.install.node sampleclient
local node
local nodeversion
nodeversion='11'
node=$(command -v npm)
if [ -z "${node}" ]; then
echo "Downloading and installing NodeJS..."
curl -sL https://deb.nodesource.com/setup_"$nodeversion".x | bash -
hassbian.suite.helper.install.apt nodejs
else
echo "NodeJS is installed."
fi
}
function hassbian.suite.helper.manifest {
# Purpose: retrun info from the suite manifest.
# Usage:
# hassbian.suite.helper.manifest [suite] [key]
# hassbian.suite.helper.manifest duckdns author
local manifestvalue
suitename="$1"
manifestkey="$2"
manifestvalue=$(jq ."$manifestkey" -r "$HASSBIAN_SUITE_DIR"/"$suitename"/manifest)
printf "%s" "$manifestvalue"
}
function hassbian.suite.helper.blockcheck {
# Check the manifest if the suite is blocked.
# If blocked, print the message and exit.
# Usage:
# hassbian.suite.helper.blockcheck [suite]
# hassbian.suite.helper.blockcheck duckdns
local blocked
local blocked_releasename
local blocked_message
local osrelease
suitename="$1"
blocked=$(hassbian.suite.helper.manifest "$suitename" blocked)
if [ "$blocked" = true ]; then
osrelease=$(hassbian.info.version.osreleasename)
blocked_releasename=$(hassbian.suite.helper.manifest "$suitename" blocked_releasename)
if [ "$blocked_releasename" == "ALL" ] || [ "$blocked_releasename" == "$osrelease" ]; then
blocked_message=$(hassbian.suite.helper.manifest "$suitename" blocked_message)
echo -e "$blocked_message"
exit
fi
fi
}
function hassbian.suite.helper.exist {
# Purpose: Check if a suite exist.
# Usage:
# hassbian.suite.helper.exist [suite]
# hassbian.suite.helper.exist duckdns
suitename="$1"
# Check if suite directory exist.
if [ ! -d "$HASSBIAN_SUITE_DIR/$suitename" ]; then
echo "$suitename does not exist."
exit 1
fi
# Check if suite manifest file exist.
if [ ! -f "$HASSBIAN_SUITE_DIR/$suitename/manifest" ]; then
echo "$suitename are missing manifest file."
exit 1
fi
}
function hassbian.suite.helper.pizerocheck {
# Check for suites not able to install on a Raspberry Pi Zero.
local REVCODE
suitename="$1"
REVCODE=$(cat /proc/cpuinfo | grep 'Revision' | \
awk '{print $3}' | sed 's/^ *//g' | sed 's/ *$//g')
if [ "$REVCODE" = "90092" ] || \
[ "$REVCODE" = "90093" ] || \
[ "$REVCODE" = "0x9000C1" ] || \
[ "$REVCODE" = "9000c1" ]; then
if [[ "$suitename" =~ ^(mosquitto|cloud9|zigbee2mqtt)$ ]]; then
echo "This suite can't be installed on Raspberry Pi Zero..."
return 0
fi
fi
}
function hassbian.suite.helper.action.success {
# Print message to the console on success action.
printf "\\n\\e[32mOperation completed...\\e[0m\\n\\n"
}
function hassbian.suite.helper.action.failed {
# Print message to the console on failed action.
printf "\\n\\e[31mOperation failed...\\e[0m\\n\\n"
}
[[ "$_" == "$0" ]] && echo "$ECHO_HELPER_WARNING"
| true
|
f2985982442b8d62f4f3d0c04c100a3de4bda889
|
Shell
|
angelbonet/smb-service-restart
|
/smb-restart.sh
|
UTF-8
| 382
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Author: Angel Bonet
# 10/06/2019
#
# Stop service
echo yourAdminPassword | sudo -S launchctl unload -w /System/Library/LaunchDaemons/com.apple.smbd.plist
wait $!
# Start again service
echo yourAdminPassword | sudo -S launchctl load -w /System/Library/LaunchDaemons/com.apple.smbd.plist
wait $!
# End process, create log
echo `date` " " `whoami` " smb service restarted."
| true
|
715cfd05a22b78f507a71ff81ac625acfd91a3d6
|
Shell
|
AdamStepan/exfuse
|
/zsh-completions/exmkfs.sh
|
UTF-8
| 314
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/zsh
compdef _exmkfs exmkfs
function _exmkfs() {
_arguments '--device[device name]:filename:_files' \
'--inodes[number of inodes]:number:' \
'--size[size of a device]:size:' \
'--create[create device]' \
'--log-level[log level]:level:(debug info warning error fatal)'
}
| true
|
74f1381f7f8a161108fb5af208a75d5039bd86d1
|
Shell
|
somlyr/mansion
|
/mansion_verify.sh
|
UTF-8
| 1,949
| 3.890625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
set -e
usage()
{
echo "${0} Usage:"
echo " -i <inputfile>, the encrypted mansion package(.pkg) wait to be dencrypted."
echo " -o <outputdir>, the output dir for package verify and dencrpytion output."
echo " default: inputfile's dir."
echo " -k <pubkey>, the RSA public key for this dencryption."
}
OPTS="i:k:o:"
while getopts ${OPTS} OPT; do
case ${OPT} in
i)
export MANSION_SRC_ROOT=$(dirname ${OPTARG})
export MANSION_SRC_FILE=$(basename ${OPTARG})
export MANSION_RLS_FILE=${MANSION_SRC_FILE%%.pkg}
;;
o)
export MANSION_RLS_ROOT=${OPTARG}
;;
k)
export MANSION_KEY_ROOT=$(dirname ${OPTARG})
export MANSION_KEY_PUB=$(basename ${OPTARG})
;;
*)
echo "${0}: Unknown argument '${OPT}'" >&2
usage
exit 1
;;
esac
done
if [ -z "${MANSION_SRC_ROOT}" -o \
-z "${MANSION_SRC_FILE}" -o \
-z "${MANSION_RLS_FILE}" -o \
-z "${MANSION_KEY_ROOT}" -o \
-z "${MANSION_KEY_PUB}" ]; then
echo "${0}: Missing argument" >&2
usage
exit 1
fi
if [ -z "${MANSION_RLS_ROOT}" ]; then
export MANSION_RLS_ROOT=${MANSION_SRC_ROOT}
fi
echo "Verify ${MANSION_RLS_PKG} for mansion......"
echo "--------------"
echo " Preparing the verify environment..."
TMP_BUILD_DIR=$(mktemp -d)
echo " Dencryped to: ${TMP_BUILD_DIR}/"
tar -xf ${MANSION_SRC_ROOT}/${MANSION_SRC_FILE} -C ${TMP_BUILD_DIR} || true
openssl pkeyutl -verifyrecover -in ${TMP_BUILD_DIR}/${MANSION_RLS_FILE}.sig \
-pubin -inkey ${MANSION_KEY_ROOT}/${MANSION_KEY_PUB} \
-out ${TMP_BUILD_DIR}/${MANSION_RLS_FILE}.key || true
echo " Key ReGenerated."
openssl enc -aes-256-ecb -in ${TMP_BUILD_DIR}/${MANSION_RLS_FILE}.tgt \
-out ${TMP_BUILD_DIR}/${MANSION_RLS_FILE} \
-kfile ${TMP_BUILD_DIR}/${MANSION_RLS_FILE}.key -d || true
echo " Verify Ok: ${MANSION_RLS_FILE}"
mv -f ${TMP_BUILD_DIR}/${MANSION_RLS_FILE} ${MANSION_RLS_ROOT}/${MANSION_RLS_FILE} || true
echo " Clean over the verify enviroment..."
rm -rf ${TMP_BUILD_DIR}
echo " Done."
| true
|
5ec4e577a9fb28e054ffb5a53227654406218bb7
|
Shell
|
appcrawler/ksql-hybris-poc
|
/cleanup-ksql.sh
|
UTF-8
| 954
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/sh
<<HEADER
*******************************************************************************************
Author: Steve Howard
Date: April 20, 2020
Purpose: Cleanup KSQL application objects
Revisions: 2020-04-20 - Initial copy (SDH)
*******************************************************************************************
HEADER
echo "list queries;" | \
ksql | \
awk '$1 ~ "^CSA|^CTA|InsertQuery" {print "terminate",$1";"}' > queries.txt
cat queries.txt | ksql
echo "list streams;" | \
ksql | \
awk '{if ($0 ~ "Stream Name") {prnt=1;} else if(prnt==1 && length($0) > 1 && $1 != "ksql>" && $1 != "KSQL_PROCESSING_LOG") {print "drop stream",$1";"}}' > streams.txt
cat streams.txt | ksql
echo "list tables;" | \
ksql | \
awk '{if ($0 ~ "Table Name") {prnt=1;} else if(prnt==1 && length($0) > 1 && $1 != "ksql>" && $1 != "KSQL_PROCESSING_LOG") {print "drop table",$1";"}}' > tables.txt
cat tables.txt | ksql
| true
|
d43ac6d26047061b7f40f6591e6c1cdea729da11
|
Shell
|
JulianCanoDev/holberton-system_engineering-devops
|
/0x07-networking_basics/5-is_the_host_on_the_network
|
UTF-8
| 135
| 3.203125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Ip
if [[ ${#@} -eq 0 ]]; then
echo "Usage: 5-is_the_host_on_the_network {IP_ADDRESS}"
exit 1
fi
ping -c 5 "$1"
| true
|
39842c814ec4843e4fef00c0b4cda818ff6df718
|
Shell
|
GimhanAkalanke/DEV_OPS
|
/bootstrap.sh
|
UTF-8
| 2,758
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
LOGFILE="/var/log/DEV_OPS_training.log"
SYSLOGFILE="/var/log/DEV_OPS_trainingSYS.log"
GITPATH="/opt/DEV_OPS"
GITUSER="GimhanAkalanke"
GITEMAIL="myinbox.gm@gmail.com"
GITPW="Vcs_1234"
GITREPO="https://github.com/GimhanAkalanke/DEV_OPS.git"
#RTPW="vcs@1234"
#####EXIT CODES#####
##100### Changing to a directory failed
##101### Config file download from GIT failed
echo "DEV_OPS_Trainng Log" > ${LOGFILE}
echo "===================" >> ${LOGFILE}
echo "Installing GIT....." >> ${LOGFILE}
if yum install -y git
then
echo "[NORMAL] GIT Install Completed" >> ${LOGFILE}
git -version >> ${LOGFILE}
else
echo "[WARNING] GIT Installation Failed" >> ${LOGFILE}
fi
echo "===================" >> ${LOGFILE}
echo "[NORMAL] Making ENV for GIT repository" >> ${LOGFILE}
if [ ! -d "$GITPATH" ]
then
if mkdir -p $GITPATH
then
echo "[NORMAL] \"$GITPATH\" directory created" >> ${LOGFILE}
else
echo "[MAJOR] \"$GITPATH\" directory create failed" >> ${LOGFILE}
fi
else
echo "[WARNING] \"$GITPATH\" directory already exsists" >> ${LOGFILE}
fi
if cd $GITPATH
then
echo "[NORMAL] Changed to $GITPATH"
else
echo "[CRITICAL] Changing to $GITPATH failed.. Exiting..100"
exit 100
fi
echo "[NORMAL] Making ENV for GIT repository Completed" >> ${LOGFILE}
echo "===================" >> ${LOGFILE}
echo "[NORMAL] GIT repository configuration" >> ${LOGFILE}
git config --global user.name $GITUSER ##No harm if failed at this point
git config --global user.email $GITEMAIL ##No harm if failed at this point
if git init
then
if git remote add origin $GITREPO
then
echo "[NORMAL] GIT repository configuration Completed" >> ${LOGFILE}
else
echo "[MAJOR] GIT repository initializing Failed" >> ${LOGFILE}
fi
else
echo "[CRITICAL] GIT repository configuration Failed" >> ${LOGFILE}
fi
echo "===================" >> ${LOGFILE}
echo "[NORMAL] Pulling config files" >> ${LOGFILE}
if git pull https://$GITUSER:$GITPW@github.com/GimhanAkalanke/DEV_OPS.git master
then
echo "[NORMAL] Pulling config files Completed" >> ${LOGFILE}
else
echo "[CRITICAL] Pulling config files Failed.Cannot run further. Exiting....101" >> ${LOGFILE}
exit 101
fi
chmod +x $GITPATH/sysconfig.sh
echo "[NORMAL] Changing to sysconfig.sh for system configuration" >> ${LOGFILE}
$GITPATH/sysconfig.sh &> $SYSLOGFILE
if $? -eq 0
then
echo "[NORMAL] System configuration completed successfully" >> ${LOGFILE}
else
echo "[WARNING] System configuration failed. Rerun sysconfig.sh or configure manually. GOOD LUCK" >> ${LOGFILE}
fi
echo "===================" >> ${LOGFILE}
echo "====+END OF SR=====" >> ${LOGFILE}
| true
|
c6340667a8186e803aa3436ef102e0a295227692
|
Shell
|
oznakn/dotfiles
|
/polybar/launch.sh
|
UTF-8
| 806
| 2.84375
| 3
|
[] |
no_license
|
#!/usr/bin/env sh
# Terminate already running bar instances
killall -q polybar
# Wait until the processes have been shut down
while pgrep -u $UID -x polybar >/dev/null; do sleep 1; done
# WORK_MODE="left"
if type "xrandr"; then
if [[ $(xrandr --query | grep "HDMI2 connected" | wc -l) == 1 ]]; then
if [[ "$WORK_MODE" == "left" ]]; then
MONITOR="eDP1" polybar --reload main &
TRAYPOS="right" MONITOR="HDMI2" polybar --reload main &
elif [[ "$WORK_MODE" == "top" ]]; then
MONITOR="HDMI2" polybar --reload main &
TRAYPOS="right" MONITOR="eDP1" polybar --reload main &
elif [[ "$WORK_MODE" == "dup" ]]; then
# MONITOR="eDP1" polybar --reload main &
TRAYPOS="right" MONITOR="HDMI2" polybar --reload main &
fi
else
TRAYPOS="right" MONITOR="eDP1" polybar --reload main &
fi
fi
| true
|
a32b25677f44934a4339589ef7db0cc758826dd0
|
Shell
|
atong01/personal
|
/macfiles/macos
|
UTF-8
| 2,608
| 3.125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Thanks to Mathias Bynens! https://mths.be/macos
# Ask for the administrator password upfront
sudo -v
# Keep-alive: update existing `sudo` time stamp until `.macos` has finished
while true; do sudo -n true; sleep 60; kill -0 "$$" || exit; done 2>/dev/null &
# Reveal IP address, hostname, OS version, etc. when clicking the clock
# in the login window
sudo defaults write /Library/Preferences/com.apple.loginwindow AdminHostInfo HostName
# Finder: show path bar
defaults write com.apple.finder ShowPathbar -bool true
# Finder: show all files
# defaults write com.apple.finder AppleShowAllFiles -bool true
# Show the ~/Library folder
chflags nohidden $HOME/Library
# Dark menu bar and dock
defaults write $HOME/Library/Preferences/.GlobalPreferences.plist AppleInterfaceTheme -string "Dark"
# Show only open applications in the Dock
defaults write com.apple.dock static-only -bool true
# Privacy: don’t send search queries to Apple
defaults write com.apple.Safari UniversalSearchEnabled -bool false
defaults write com.apple.Safari SuppressSearchSuggestions -bool true
###############################################################################
# Mail #
###############################################################################
# Disable send and reply animations in Mail.app
defaults write com.apple.mail DisableReplyAnimations -bool true
defaults write com.apple.mail DisableSendAnimations -bool true
# Copy email addresses as `foo@example.com` instead of `Foo Bar <foo@example.com>` in Mail.app
defaults write com.apple.mail AddressesIncludeNameOnPasteboard -bool false
# Disable inline attachments (just show the icons)
defaults write com.apple.mail DisableInlineAttachmentViewing -bool true
# Show 24 hours a day
defaults write com.apple.ical "number of hours displayed" 24
# Week should start on Monday
defaults write com.apple.ical "first day of the week" 1
# Day starts at 9AM
defaults write com.apple.ical "first minute of work hours" 540
###############################################################################
# Kill affected applications #
###############################################################################
for app in "Activity Monitor" "Address Book" "Calendar" "Contacts" "cfprefsd" \
"Dock" "Finder" "Mail" "Messages" "Photos" "Safari" "SystemUIServer" \
"Terminal" "Tweetbot" "iCal"; do
killall "${app}" &> /dev/null
done
echo "Done. Note that some of these changes require a logout/restart to take effect."
| true
|
a2dc8b572a10e4b1fd82e2937ca99045f712d81b
|
Shell
|
schwer-q/continuous-core
|
/pkg/lib/common.sh
|
UTF-8
| 2,668
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/sh -
set -e
set +h
test "$DEBUG" == "yes" && set -x
CURDIR=$(pwd)
LC_ALL="POSIX"
export CURDIR LC_ALL
: ${DESTDIR="/"}
: ${HOME="/root"}
: ${LFS="/home/lfs"}
: ${LFS_TGT="`uname -m`-lfs-linux-gnu"}
: ${PATH="/bin:/usr/bin:/sbin:/usr/sbin:/tools/bin"}
export DESTDIR HOME LFS LFS_TGT PATH
do_build() {
test "$NO_BUILD" == "yes" && return 0
echo -e -n "\tBuilding..."
log_phase "build"
if test "$USE_EXT_BUILD" == "yes"; then
PKG_BUILD="`readlink -f ${PKG_SOURCES}/../${PKG_NAME}-build`"
log_exec mkdir -pv $PKG_BUILD
fi
log_exec _build
echo -e "\tdone."
return 0
}
do_clean() {
test "$NO_CLEAN" == "yes" && return 0
echo -e -n "\tCleanning..."
log_phase "clean"
cd $CURDIR
log_exec rm -rf $PKG_SOURCES
test -d "$PKG_BUILD" && log_exec rm -rf $PKG_BUILD
echo -e "\tdone."
return 0
}
do_install() {
echo -e -n "\tInstalling..."
log_phase "install"
log_exec _install
if test -d ${DESTDIR}/usr/share/man; then
log_exec find ${DESTDIR}/usr/share/man -type f '(' ! -name '*.gz' ')' \
-exec gzip -f -9 '{}' ';'
fi
echo -e "\tdone."
return 0
}
do_unpack() {
test "$NO_UNPACK" == "yes" && return 0
echo -e -n "\tUnpacking..."
log_phase "unpack"
log "$PKG_ARCHIVE"
log_exec tar xf ${LFS}/sources/$PKG_ARCHIVE
cd $PKG_SOURCES
echo -e "\tdone."
return 0
}
dump_files() {
origin=$1
dest=$2
tar -cvf - -C $origin . | tar -xf - -C $dest
}
log() {
echo "$*" >> $PKG_LOGFILE
}
log_exec() {
log "$@"
cmd=$1
shift
args="$@"
$cmd $args >> $PKG_LOGFILE 2>&1
}
log_phase() {
touch $PKG_LOGFILE
_tmp=`echo "$1" | tr '[[:print:]]' '='`
log "===============================${_tmp}"
log "==========< phase: $1 >=========="
log "===============================${_tmp}"
log ""
}
log_start() {
test -e $PKG_LOGFILE && rm -f $PKG_LOGFILE
echo ""
echo "${PKG_NAME} ${PKG_VERSION}"
log "Build started on `date -u`"
start_time=`date '+%s'`
log=""
if test "$USE_EXT_BUILD" == "yes"; then
PKG_BUILD="`readlink -f ${PKG_SOURCES}/../${PKG_NAME}-build || echo ""`"
fi
return 0
}
log_stop() {
log "Build completed on `date -u`"
end_time=`date '+%s'`
_time=$(($end_time - $start_time))
day=$(($_time / 86400))
_time=$(($_time % 86400))
hour=$(($_time / 3600))
_time=$(($_time % 3600))
min=$(($_time / 60))
sec=$(($_time % 60))
log "Build time: ${day}:${hour}:${min}:${sec}"
xz -9e -f $PKG_LOGFILE
test -n "$NO_STATUS" && return 0
touch "${CURDIR}/status/${PKG_SCRIPT}-${PKG_VERSION}.done"
return 0
}
| true
|
935edea4107e4d450b2692f36d5c92765836dbb2
|
Shell
|
lucasfer97/adm-scripts
|
/scripts/openvidu_download_release.sh
|
UTF-8
| 300
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash -xe
echo "##################### EXECUTE: openvidu_download_release #####################"
[ -z "$RELEASE" ] && exit 1
[ -z "$RELEASE_URL" ] && exit 1
DOWNLOAD_URL=$(curl -s $RELEASE_URL | grep browser_download_url | cut -d '"' -f 4 | grep $RELEASE)
curl -L -o $OUTPUT $DOWNLOAD_URL
| true
|
a41db662207c1a95c76fce7d8da98f637dda1905
|
Shell
|
punalpatel/kubo-ci
|
/docker-images/curl/run_tests
|
UTF-8
| 120
| 2.859375
| 3
|
[] |
no_license
|
#!/usr/bin/env sh
if which curl > /dev/null; then
echo "Image is good"
else
echo "curl is missing" >&2
exit 1
fi
| true
|
083ae3e38f1c635d7344cd8a63765b8f77809448
|
Shell
|
33cn/plugin
|
/plugin/consensus/tendermint/tools/auto_deploy.sh
|
UTF-8
| 6,394
| 3.953125
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
# shellcheck disable=SC1078
# shellcheck disable=SC1079
# shellcheck disable=SC1117
# shellcheck disable=SC2002
# shellcheck disable=SC2003
# shellcheck disable=SC2086
# shellcheck disable=SC2091
# shellcheck disable=SC2116
# shellcheck disable=SC2129
# shellcheck disable=SC2140
# shellcheck disable=SC2162
# shellcheck disable=SC2181
package="chain33_tendermint_config.tar.gz"
log_file=".auto_deploy.log"
config_file="auto_deploy.config"
serverStr="servers"
InitLog() {
if [ -f ${log_file} ]; then
rm ${log_file}
fi
touch ${log_file}
}
Log() {
if [ -e ${log_file} ]; then
touch ${log_file}
fi
# get current time
local curtime
curtime=$(date "+%Y-%m-%d %H:%M:%S")
echo "[$curtime] $* ..." >>$log_file
}
GetInputFile() {
echo 'Please input the file: (such as "chain33 chain33-cli genesis.json" ...) '
read file
# todo: file detection
Log "The input file is ${file}"
}
PackageFiles() {
Log "Begin to package the files: ${file}"
tar zcf "${package}" "$file"
}
GetUserNamePasswdAndPath() {
echo "Which way to get environment? 1) Input 2) Config file"
read choice
if [ "${choice}" -eq 1 ]; then
echo 'Please input the username, password and path of the destination: (such as "ubuntu 123456 /home/ubuntu/chain33")'
read destInfo
username=$(echo "${destInfo}" | awk -F ' ' '{print $1}')
password=$(echo "${destInfo}" | awk -F ' ' '{print $2}')
remote_dir=$(echo "${destInfo}" | awk -F ' ' '{print $3}')
echo 'Please input ip list of your destination: (such as "192.168.3.143 192.168.3.144 192.168.3.145 192.168.3.146")'
read -a iplist
index=0
CreateNewConfigFile
for ip in "${iplist[@]}"; do
index=$((index + 1))
{
echo "[servers.${index}]"
echo "userName:${username}"
echo "password:${password}"
echo "hostIp:${ip}"
echo "path:${remote_dir}"
} >>${config_file}
done
Log "The dest ip is ${ip} and path is ${remote_dir}"
elif [ "${choice}" -eq 2 ]; then
ShowConfigInfo
echo "Does the config of destination right?(yes/no)"
read input
if [ "X${input}" = "Xno" ]; then
echo "The config file is wrong. You can config it manually."
return 1
fi
elif [ "${choice}" -eq 3 ]; then
echo "Wrong input..."
return 2
fi
ShowConfigInfo
}
SendFileAndDecompressFile() {
getSections
for line in $sections; do
if [[ $line =~ $serverStr ]]; then
index=$(echo "$line" | awk -F '.' '{print $2}' | awk -F ']' '{print$1}')
getInfoByIndexAndKey "$index" "userName"
username=${info}
getInfoByIndexAndKey "$index" "password"
password=${info}
getInfoByIndexAndKey "$index" "hostIp"
ip=${info}
getInfoByIndexAndKey "$index" "path"
remote_dir=${info}
ExpectCmd "scp ${package} ${username}@${ip}:${remote_dir}"
if [ $? -ne 0 ]; then
Log "Send file failed, this tool will stoped..."
return 1
fi
ExpectCmd "ssh ${username}@${ip} tar zxf ${remote_dir}/${package} -C ${remote_dir}"
if [ $? -ne 0 ]; then
Log "Decompress file failed, this tool will stoped..."
return 2
fi
fi
done
}
ExpectCmd() {
cmd=$*
expect -c "
spawn ${cmd}
expect {
\"yes\" { send \"yes\\r\"; exp_continue }
\"password\" { send \"${password}\\r\"}
}
expect eof"
}
CreateNewConfigFile() {
if [ -f ${config_file} ]; then
rm ${config_file}
fi
touch ${config_file}
}
ShowConfigInfo() {
if [ ! -f ${config_file} ]; then
Log "Config file is not existed."
return 1
fi
getSections
for line in $sections; do
if [[ $line =~ $serverStr ]]; then
index=$(echo "$line" | awk -F '.' '{print $2}' | awk -F ']' '{print$1}')
getInfoByIndexAndKey "$index" "userName"
echo "servers.$index: userName->$info"
getInfoByIndexAndKey "$index" "password"
echo "servers.$index: password->$info"
getInfoByIndexAndKey "$index" "hostIp"
echo "servers.$index: hostIp->$info"
getInfoByIndexAndKey "$index" "path"
echo "servers.$index: path->$info"
fi
done
}
getSections() {
sections=$(sed -n '/^[# ]*\[.*\][ ]*/p' ${config_file})
}
getInfoByIndex() {
index=$1
nextIndex=$((index + 1))
info=$(sed <${config_file} -n "/^[# ]*\[servers.${index}/,/^[# ]*\[servers.${nextIndex}/p")
}
getInfoByIndexAndKey() {
index=$1
nextIndex=$((index + 1))
key=$2
info=$(sed <${config_file} -n "/^[# ]*\[servers.${index}/,/^[# ]*\[servers.${nextIndex}/p" | grep -i "$key" | awk -F ':' '{print $2}')
}
help() {
echo "***************************************************************************************************"
echo "*"
echo "* This tool can send file to specified path."
echo "* And you should input the file first(It doesn't support get file auto-matically now)"
echo "* Then it will pack those file into a package and send to the environment."
echo "*"
echo "* Note: You should move the file to the current directory, otherwise the packing process will be failed."
echo "*"
echo "***************************************************************************************************"
}
main() {
# Help for this tool
help
# Init log file
InitLog
# Input the file want to be translate
GetInputFile
# Package the files
PackageFiles
if [ $? -ne 0 ]; then
Log "Pachage file err, this tool will be stoped..."
exit
fi
# Input the IP and path of the destination
GetUserNamePasswdAndPath
if [ $? -ne 0 ]; then
Log "GetUserNamePasswdAndPath err, this tool will be stoped..."
exit
fi
# Send and decompress the package
SendFileAndDecompressFile
if [ $? -eq 1 ]; then
echo "Send file err and exit soon..."
exit
elif [ $? -eq 2 ]; then
echo "Decompress file err and exit soon..."
fi
}
main
| true
|
2bde99198c9a426c08a13c6500b71a770e8636f7
|
Shell
|
Nitrokey/nitrokey-3-firmware
|
/utils/nrf-builder/wait_for_usb.sh
|
UTF-8
| 184
| 3.046875
| 3
|
[
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
VID=$1
PID=$2
sleep 1
for i in `seq 1 30`
do
lsusb -d $VID:$PID >/dev/null || (echo -ne "." && sleep 1)
done
lsusb -d $VID:$PID || (echo "Device not found" && exit 1)
| true
|
bd35f906b24ac51118a229ed18bb2d338a84582e
|
Shell
|
jstty/docker-taiga
|
/build.sh
|
UTF-8
| 5,838
| 3.984375
| 4
|
[] |
no_license
|
#!/bin/bash
export PWD=$(pwd)
ENV_FILE="./env.sh"
# if Mac OSX then set env to docker machine so docker commands work
if [ "$(uname)" == "Darwin" ]; then
eval "$(docker-machine env default)"
fi
echo "--------------------------------------------"
echo "Taiga Setup Wizard"
echo "--------------------------------------------"
error () {
echo "$1" >&2
kill -INT $$
}
yesNoPrompt () {
read -p "$2 [Yes/No] (default: $1)?" -n 1 choice
echo "" >&2
case "$choice" in
y|Y ) echo "Yes";;
n|N ) echo "No";;
* ) echo "$1";;
esac
}
prompt () {
if [ -z "$3" ]; then
read -p "$2 (default: $1)?" var
if [ -z "$var" ]; then
var=$1
fi
elif [ "$3" = "--required" ]; then
if [ -z "$1" ]; then
read -p "$2 (ex. $4)?" var
if [ -z "$var" ]; then
error "$2 is require"
fi
else
read -p "$2 (enter to use: $1)?" var
if [ -z "$var" ]; then
var=$1
fi
fi
fi
echo $var
return 0
}
CLEANUP_CONTAINERS=$(yesNoPrompt "No" "Do you want to stop and remove Taiga services")
if [ "$CLEANUP_CONTAINERS" == "Yes" ]; then
# # Stop all containers
# docker stop $(docker ps -a -q)
# # Delete all containers
# docker rm $(docker ps -a -q)
# Stop service containers
docker-compose stop
# Delete service containers
docker-compose rm
fi
CLEANUP_IMAGES=$(yesNoPrompt "No" "Do you want to remove Taiga images before we start")
if [ "$CLEANUP_IMAGES" == "Yes" ]; then
# # Delete all images
# docker rmi $(docker images -q)
# Delete taiga images
docker rmi taiga-events
docker rmi taiga
fi
if [ -f "${ENV_FILE}" ]; then
USE_ENV=$(yesNoPrompt "Yes" "Do you want to use the 'env.sh' settings")
fi
if [ "${USE_ENV}" == "Yes" ]; then
source ${ENV_FILE}
else
if [ "$(uname)" == "Darwin" ]; then
HOST_IP=$(docker-machine ip)
else
HOST_IP=$(/sbin/ip route|awk '/default/ { print $3 }')
fi
DEFAULT_DATA_DIR="./data"
export TAIGA_DATA_DIR=$(prompt "${TAIGA_DATA_DIR-$DEFAULT_DATA_DIR}" "Frontend Hostname" --required "${DEFAULT_DATA_DIR}" )
# create dir's
mkdir -p $TAIGA_DATA_DIR
mkdir -p $TAIGA_DATA_DIR/media
mkdir -p $TAIGA_DATA_DIR/db
export TAIGA_PORT=$(prompt "${TAIGA_PORT-8000}" "Frontend Port")
export TAIGA_HOST=$(prompt "$TAIGA_HOST" "Frontend Hostname" --required "${HOST_IP}" )
TAIGA_SSL_Q=$(yesNoPrompt "No" "Use SSL")
if [ "$TAIGA_SSL_Q" == "Yes" ]; then
export TAIGA_SSL="True"
else
export TAIGA_SSL="False"
fi
if [ "${EMAIL_USE_HOSTIP}" == "False" ]; then
EMAIL_USE_HOSTIP_DEFAULT="No"
else
EMAIL_USE_HOSTIP_DEFAULT="Yes"
fi
export EMAIL_USE_HOSTIP_P=$(yesNoPrompt "${EMAIL_USE_HOSTIP_DEFAULT}" "Use Host Machine as Email server")
if [ "$EMAIL_USE_HOSTIP_P" == "Yes" ]; then
export EMAIL_USE_HOSTIP="True"
export EMAIL_HOST=''
else
export EMAIL_USE_HOSTIP="False"
export EMAIL_HOST=$(prompt "${HOST_IP}" "Email Hostname")
fi
export EMAIL_PORT=$(prompt "25" "Email Port")
export EMAIL_HOST_USER=$(prompt "" "Email Login Username")
export EMAIL_HOST_PASSWORD=$(prompt "" "Email Login Password")
export TAIGA_DB_NAME=$(prompt "taiga" "Taiga DB Name")
export TAIGA_DB_USER=$(prompt "taiga" "Taiga DB Username")
export TAIGA_DB_PASSWORD=$(prompt "password" "Taiga DB Password")
EMAIL_USETLS=$(yesNoPrompt "No" "Email use TLS")
if [ "$EMAIL_USETLS" == "Yes" ]; then
export EMAIL_USE_TLS="True"
else
export EMAIL_USE_TLS="False"
fi
GITHUB=$(yesNoPrompt "No" "Github Integration")
if [ "$GITHUB" == "Yes" ]; then
export GITHUB_URL=$(prompt "https://github.com/" "Github URL")
export GITHUB_API_URL=$(prompt "https://api.github.com/" "Github API URL")
export GITHUB_API_CLIENT_ID=$(prompt "$GITHUB_API_CLIENT_ID" "Github API Client ID" --required "yourClientId")
export GITHUB_API_CLIENT_SECRET=$(prompt "$GITHUB_API_CLIENT_SECRET" "Github API Client Secret" --required "yourClientSecret")
else
export GITHUB_URL=""
export GITHUB_API_URL=""
export GITHUB_API_CLIENT_ID=""
export GITHUB_API_CLIENT_SECRET=""
fi
fi
echo "--------------------------------------------"
echo "Hostname: $TAIGA_HOST"
echo "Port: $TAIGA_PORT"
echo "Data Path: $TAIGA_DATA_DIR"
echo "Use SSL: $TAIGA_SSL"
echo "Email use HostIP: $EMAIL_USE_HOSTIP"
echo "Email use TLS: $EMAIL_USE_TLS"
echo "Email Hostname: $EMAIL_HOST"
echo "Email Port: $EMAIL_PORT"
echo "Email Login User: $EMAIL_HOST_USER"
echo "Email Login Password: $EMAIL_HOST_PASSWORD"
echo "Taiga DB Name: $TAIGA_DB_NAME"
echo "Taiga DB Username: $TAIGA_DB_USER"
echo "Taiga DB Password: $TAIGA_DB_PASSWORD"
echo "Github URL: $GITHUB_URL"
echo "Github API URL: $GITHUB_API_URL"
echo "Github API Client ID: $GITHUB_API_CLIENT_ID"
echo "Github API Client Secret: $GITHUB_API_CLIENT_SECRET"
echo "--------------------------------------------"
if [ "$USE_ENV" != "Yes" ]; then
cat >${ENV_FILE} <<EOL
export TAIGA_HOST=$TAIGA_HOST
export TAIGA_PORT=$TAIGA_PORT
export TAIGA_DATA_DIR=$TAIGA_DATA_DIR
export EMAIL_USE_HOSTIP=$EMAIL_USE_HOSTIP
export EMAIL_USE_TLS=$EMAIL_USE_TLS
export EMAIL_HOST=$EMAIL_HOST
export EMAIL_PORT=$EMAIL_PORT
export EMAIL_HOST_USER=$EMAIL_HOST_USER
export EMAIL_HOST_PASSWORD=$EMAIL_HOST_PASSWORD
export TAIGA_DB_NAME=$TAIGA_DB_NAME
export TAIGA_DB_USER=$TAIGA_DB_USER
export TAIGA_DB_PASSWORD=$TAIGA_DB_PASSWORD
export GITHUB_URL=$GITHUB_URL
export GITHUB_API_URL=$GITHUB_API_URL
export GITHUB_API_CLIENT_ID=$GITHUB_API_CLIENT_ID
export GITHUB_API_CLIENT_SECRET=$GITHUB_API_CLIENT_SECRET
EOL
# change file permin so it's executable
chmod +x ${ENV_FILE}
fi
# convert to absolute path
export TAIGA_DATA_DIR=`cd "$TAIGA_DATA_DIR"; pwd`
echo "--------------------------------------------"
echo "Build Services..."
echo "--------------------------------------------"
docker-compose build
echo "--------------------------------------------"
echo "Creating Containers..."
echo "--------------------------------------------"
docker-compose create
| true
|
249436e053e257304a7ed1222d43d5a019ea44b8
|
Shell
|
mtan93/auocloud-auowp
|
/upgrade.sh
|
UTF-8
| 579
| 2.578125
| 3
|
[] |
no_license
|
echo Working out HOME directory
cd ~
sleep 1s
echo Setting WP Path
HMEPATH="$(pwd)"
sleep 1s
echo Deactivating Old Workers
echo
wp plugin deactivate worker --path=$HMEPATH/public_html/ && wp plugin delete worker --path=$HMEPATH/public_html/
wp plugin deactivate auocloudworker --path=$HMEPATH/public_html/ && wp plugin delete auocloudworker --path=$HMEPATH/public_html/
sleep 1s
echo Installing New Workers
wp plugin install https://downloads.wordpress.org/plugin/mainwp-child.4.0.7.1.zip --path=$HMEPATH/public_html/
wp plugin activate mainwp-child --path=$HMEPATH/public_html/
| true
|
e112dbcf955f2b7215477f592c2ee40a8f43ceda
|
Shell
|
reisritter/Algoritmos-Shell
|
/1133.sh
|
UTF-8
| 109
| 2.984375
| 3
|
[] |
no_license
|
read x
read y
for ((i=x;i<y;i++))
do
if [ $((i%5)) -eq 2 ] || [ $((i%5)) -eq 3 ]
then
echo $i
fi
done
| true
|
e706b7de247aedc19171d9b6b91a28f8c087c96a
|
Shell
|
fortytwoio/docker-php
|
/bin/start-php.sh
|
UTF-8
| 686
| 2.734375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -eu # exit on error or undefined variable
# Defaults
export PHP_MAX_CHILDREN=${PHP_MAX_CHILDREN:-"24"}
export PHP_START_SERVERS=${PHP_START_SERVERS:-"10"}
export PHP_MIN_SPARE_SERVERS=${PHP_MIN_SPARE_SERVERS:-"8"}
export PHP_MAX_SPARE_SERVERS=${PHP_MAX_SPARE_SERVERS:-"12"}
export PHP_MAX_REQUESTS=${PHP_MAX_REQUESTS:-"200"}
# Config file template from environment variables
#envsubst < /etc/php5/fpm/php.ini.tpl > /etc/php5/fpm/php.ini
#envsubst < /etc/php5/fpm/php-fpm.conf.tpl > /etc/php5/fpm/php-fpm.conf
envsubst < /etc/php5/fpm/pool.d/www.conf.tpl > /etc/php5/fpm/pool.d/www.conf
mkdir -p /var/log/php
# Launch
exec /usr/sbin/php5-fpm --nodaemonize $@
| true
|
5fe047ba65ddafd2250a7a338e87e699e1676249
|
Shell
|
bm4cs/dots-stow
|
/stow-home/bin/bin/dwmbar
|
UTF-8
| 114
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/sh
print_date(){
date "+%a %d %b %H:%M"
}
while true
do
xsetroot -name "$(print_date)"
sleep 1m
done
| true
|
da278834720e21dc07250e7897fb39476014e1da
|
Shell
|
cupnes/megadrive_test
|
/tools/bin_splitter.sh
|
UTF-8
| 926
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
set -uex
# set -ue
# 以下のように分割する
# | 上位8ビット | 下位8ビット |
# | (奇数番目) | (偶数番目) |
# |-------------|-------------|
# 0x0000 | 0xNN | 0xMM |
# 0x0002 | 0xAA | 0xBB |
IN_FILE=$1
name=$(echo $IN_FILE | rev | cut -d'.' -f2- | rev)
suff=$(echo $IN_FILE | rev | cut -d'.' -f1 | rev)
out_file_upper="${name}_upper.$suff"
out_file_lower="${name}_lower.$suff"
file_size=$(stat -c '%s' $IN_FILE)
for i in $(seq 0 $((file_size - 1))); do
if [ $((i % 2)) -eq 1 ]; then
# 奇数番目(上位8ビット)
# i=1: seek=0
# i=3: seek=1
# i=5: seek=2
dd if=$IN_FILE of=$out_file_upper bs=1 ibs=1 obs=1 count=1 skip=$i seek=$((i / 2))
else
# 偶数番目(下位8ビット)
# i=0: seek=0
# i=2: seek=1
# i=4: seek=2
dd if=$IN_FILE of=$out_file_lower bs=1 ibs=1 obs=1 count=1 skip=$i seek=$((i / 2))
fi
done
| true
|
db10bd0e66bf86bfbd69534503a097ded3a01612
|
Shell
|
ShiKaiWi/Compiler-for-SimpleC
|
/bin/run
|
UTF-8
| 876
| 2.5625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
cd "$(dirname "$0")"
cd ../
make
mkdir test/build
echo -e "bin/c1c test/test1"
bin/c1c test/test1.c1
as --32 test/test1.c1.out -o test/test1.o
ld -melf_i386 test/test1.o -o test/build/test1
test/build/test1
echo $?
echo -e "the output is the result of 'test1.c1'"
echo -e "please press any key to continue:"
read wt
echo -e "bin/c1c test/test2"
bin/c1c test/test2.c1
as --32 test/test2.c1.out -o test/test2.o
ld -melf_i386 test/test2.o -o test/build/test2
test/build/test2
echo $?
echo -e "the output is the result of 'test2.c1'"
echo -e "please press any key to continue:"
read wt
echo -e "bin/c1c test/test3"
bin/c1c test/test3.c1
as --32 test/test3.c1.out -o test/test3.o
ld -melf_i386 test/test3.o -o test/build/test3
test/build/test3
echo $?
echo -e "the output is the result of 'test3.c1'"
echo -e "please press any key to end"
cd test
rm *.o
read wt
| true
|
b85117b00e4a77797fdd30f6de8beb28cb318a67
|
Shell
|
darthsuogles/drgscl
|
/lib_wisper_fetch.sh
|
UTF-8
| 770
| 3.515625
| 4
|
[] |
no_license
|
# Internet resource acquisition
REQ_HEADER="Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
USER_AGENT="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) AppleWebKit/601.5.17 (KHTML, like Gecko) Version/9.1 Safari/601.5.17"
function _fetch_with_curl {
curl -A "${USER_AGENT}" "$@"
}
function _fetch_with_wget {
wget --header="${REQ_HEADER}" --user-agent="${USER_AGENT}" "$@"
}
function _wisper_fetch {
[[ $# -ge 1 ]] || quit_with "usage: _wisper_fetch <wget|curl> ..."
local cmd=$1; shift
case "${cmd}" in
curl) _fetch_with_curl "$@" && return 0 ;;
wget) _fetch_with_wget "$@" && return 0 ;;
\?) log_errs "do not recognize download command ";
_fetch_with_curl $@ || _fetch_with_wget $@ || return 1 ;;
esac
}
| true
|
f8e091225e679617f0c60aa1b2f35c779c9ebca0
|
Shell
|
jyotirepo/SCB-shellscript
|
/utilities.sh
|
UTF-8
| 1,721
| 4.59375
| 5
|
[] |
no_license
|
#!/bin/bash
# This is a utilities script for common tasks in Linux environments.
# Function to display a menu and read user choice
display_menu() {
clear
echo "Utilities Script Menu"
echo "0. System Info"
echo "1. Update System"
echo "2. Check Disk Usage"
echo "3. List Running Processes"
echo "4. Memory Usage"
echo "5. Package Installation"
echo "6. Exit"
echo -n "Enter your choice: "
read choice
}
# Function to view system info
system_info() {
echo "System Information:"
echo "Hostname: $(hostname)"
echo "Kernel: $(uname -r)"
echo "Uptime: $(uptime)"
echo "Logged-in Users: $(who | wc -l)"
}
# Function to update the system
update_system() {
echo "Starting Yum update and upgrade"
sudo yum update
sudo yum upgrade -y
echo "System updated!"
}
# Function to check disk usage
check_disk_usage() {
echo "Dispalying Display Usage"
df -Th
}
# Function to list running processes
list_processes() {
echo "Displaying current runnig process"
ps aux
}
# Function to show kernel info
memory_info() {
echo "Displaying Memory usage"
free -h
}
# Enter package name to install
package_info() {
read -p "Enter Package Name to Install : " package_name
echo "deploying $package_name"
sudo yum install $package_name -y
}
# Main script loop
while true; do
display_menu
case $choice in
0) system_info ;;
1) update_system ;;
2) check_disk_usage ;;
3) list_processes ;;
4) memory_info ;;
5) package_info ;;
6) echo "Exiting. Goodbye!"; exit ;;
*) echo "Invalid choice. Please select a valid option." ;;
esac
echo -n "Press Enter to continue..."
read -r
done
| true
|
c3fc6acf5cc595ee4f2ef5ede4c24f2a949b1b70
|
Shell
|
olback/dotfiles
|
/bin/new-project.sh
|
UTF-8
| 1,210
| 4.21875
| 4
|
[
"Unlicense"
] |
permissive
|
#!/bin/bash
set -e
#echo $@
SELF=$0
LANG=$1
function usage {
echo "$SELF [lang] [name] <options>"
}
function generate_rust_project {
echo "Generating new Rust Project \"$1\""
cargo new $@
}
function generate_c_project {
echo "Generating new C Project \"$1\""
mkdir $1
cd $1
# Create main file
printf "#include <stdio.h>\n\n int main(int argc, char **argv, char **env) {\n printf(\"Hello, World!\\\n\");\n return 0;\n}\n" > main.c
# Create Makefile
printf 'PKGS=\nCLFAGS=-Wall -Wextra `pkg-config --cflags $(PKGS)`\nLIBS=`pkg-config --libs $(PKGS)`\nSRC=main.c\n\n'$1': $(SRC)\n\t$(CC) $(CLFAGS) -o '$1' $(SRC) $(LIBS)\n\n' > Makefile
# Ignore exe
printf "$1\n" > .gitignore
# Init git
set +e
git status &> /dev/null
if [ $? != 0 ]; then
git init
fi
set -e
}
if [ $# -lt 2 ]; then
usage;
exit 1
fi
if [ -d $2 ]; then
echo "Directory \"$2\" already exists"
exit 1
fi
# Pop lang
shift
case $LANG in
rust)
generate_rust_project $@
;;
c)
generate_c_project $@
;;
*)
echo "Unknown language \"$LANG\""
usage;
exit 1
;;
esac
| true
|
605cf4a3b596fce1800aba00582ba20d7b2127c0
|
Shell
|
jpuritz/BIO_594_2019
|
/Final_Assignment/Chille_Final_Assignment/Scripts/bwa.sh
|
UTF-8
| 763
| 3.109375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
F=/home/echille/finalproject/mapping
array1=($(ls *pass_1.fastq.gz_paired_qtrim.fq.gz | sed 's/pass_1.fastq.gz_paired_qtrim.fq.gz//g'))
bwa index GCF_000222465.1_Adig_1.1_genomic.fna
echo "done index $(date)"
for i in ${array1[@]}; do
bwa mem $F/GCF_000222465.1_Adig_1.1_genomic.fna ${i}pass_1.fastq.gz_paired_qtrim.fq.gz ${i}pass_2_paired_qtrim.fq.gz -t 8 -a -M -B 3 -O 5 -R "@RG\tID:${i}\tSM:${i}\tPL:Illumina" 2> bwa.${i}.log | samblaster -M --removeDups | samtools view -@4 -q 1 -SbT $F/GCF_000222465.1_Adig_1.1_genomic.fna - > ${i}.bam
echo "done ${i}"
done
array2=($(ls *.bam | sed 's/.bam//g'))
#now sort the bam files with samtools sort
for i in ${array2[@]}; do
samtools sort -@8 ${i}.bam -o ${i}.bam && samtools index ${i}.bam
done
| true
|
455d448255c48ebff7fd412f368f93b2e9c333e9
|
Shell
|
mdchambers/groucho
|
/pdb/generateScoreReport.sh
|
UTF-8
| 294
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
startdir=`pwd`
mkdir fasc.analysis
for x in output*; do
cp ${x}/repacked/repacked.fasc fasc.analysis/${x}.fasc
done
cd fasc.analysis
for x in *fasc; do
cat $x | tr -s ' ' | cut -d' ' -f 2,33 | sort -nk 2,2 > ${x}.temp
done
for x in *temp; do
paste $x out.fasc >> out.fasc
done
| true
|
79c770a523ca562af39b7f64f606bf48ad404cde
|
Shell
|
Rud5G/Ajasta
|
/docker/dev/create-changelog-diff.sh
|
UTF-8
| 1,231
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
docker exec -it $(docker-compose ps -q mysql | head -1) /bin/bash -c " \
export MYSQL_PWD=root;
echo 'DROP DATABASE IF EXISTS reference' | mysql -uroot; \
echo 'CREATE DATABASE reference' | mysql -uroot; \
"
docker-compose run liquibase \
--username=root \
--password=root \
--url=jdbc:mysql://mysql/reference \
--changeLogFile=/var/www/html/changelog/master.xml \
update
TS=$(date -u +%Y%m%d-%H%M%S)
docker-compose run liquibase \
--username=root \
--password=root \
--url=jdbc:mysql://mysql/reference?tinyInt1isBit=false \
--referenceUrl=jdbc:mysql://mysql/ajasta?tinyInt1isBit=false \
--referenceUsername=root \
--referencePassword=root \
--changeLogFile=/var/www/html/changelog/changelog-$TS.xml \
diffChangeLog
sed -i "
/<\/databaseChangeLog>/ \
<include relativeToChangelogFile=\"true\" file=\"/changelog-$TS.xml\"/>" changelog/master.xml
docker exec -it $(docker-compose ps -q application | head -1) chown -R $(id -u):$(id -g) /var/www/html/changelog
docker exec -it $(docker-compose ps -q mysql | head -1) /bin/bash -c " \
export MYSQL_PWD=root;
echo 'DROP DATABASE reference' | mysql -uroot; \
"
| true
|
be870f738581a59d7b9203c840271782265b6a76
|
Shell
|
EricALBRECHT/Huawei_E3531_Stick_configuration
|
/sms/sendSMS.sh
|
UTF-8
| 537
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/bash
NUMBER=$1
MESSAGE=$2
./session.sh
./token.sh
LENGTH=${#MESSAGE}
TIME=$(date +"%Y-%m-%d %T")
TOKEN=$(<token.txt)
SMS="<request><Index>-1</Index><Phones><Phone>$NUMBER</Phone></Phones><Sca/><Content>$MESSAGE</Content><Length>$LENGTH</Length><Reserved>1</Reserved><Date>$TIME</Date></request>"
echo $SMS
curl -v -b session.txt -c session.txt -H "X-Requested-With: XMLHttpRequest" --data "$SMS" http://192.168.8.1/api/sms/send-sms --header "__RequestVerificationToken: $TOKEN" --header "Content-Type:text/xml"
| true
|
f271c3d5dabc12e757aa9841ff3d53cbff687472
|
Shell
|
m-martinez/docker-postgres-nondurable
|
/update.sh
|
UTF-8
| 705
| 4.15625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#
# Updates docker files for major postgres versions
#
# Usage:
# > ./update.sh
#
versions=( "$@" )
if [ ${#versions[@]} -eq 0 ]; then
versions=( */ )
fi
versions=( "${versions[@]%/}" )
read -r -d '' generated_warning <<EOF
#
# THIS FILE IS GENERATED BY "update.sh"
# DO NOT EDIT
#
EOF
for version in "${versions[@]}"; do
for variant in '' 'alpine'; do
if [ "$variant" == "" ]; then
template="Dockerfile-debian.template"
else
template="Dockerfile-$variant.template"
fi
echo "$generated_warning" > "$version/$variant/Dockerfile"
sed -e 's/%%PG_VERSION%%/'"$version"'/g' \
"$template" >> "$version/$variant/Dockerfile"
done
done
| true
|
c273b61d9a76a9b79a29dc440095d97add526286
|
Shell
|
kdlong/SelectorTools
|
/Templates/CondorSubmit/wrapRunSelector.sh
|
UTF-8
| 469
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/bash
# Following implementation by N. Smith, Fermilab
# https://gitlab.cern.ch/ncsmith/monoZ/tree/master/selector
tar xvzf ${tarball}
CMSSW_RELEASE_BASE="${CMSSW_RELEASE_BASE}"
source /cvmfs/cms.cern.ch/cmsset_default.sh
pushd $$CMSSW_RELEASE_BASE
eval `scramv1 runtime -sh`
popd
export LD_LIBRARY_PATH=$$PWD/lib:$$LD_LIBRARY_PATH
export ROOT_INCLUDE_PATH=$$PWD:$$ROOT_INCLUDE_PATH
./Analysis/SelectorTools/Utilities/scripts/makeHistFile.py $$@ || exit $$?
| true
|
fb911f370495e4941f1cf4793baee1e8eef37978
|
Shell
|
intel-analytics/BigDL
|
/python/dev/add_type_hint.sh
|
UTF-8
| 1,296
| 2.921875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
#
# Copyright 2022 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
hint_module_name="friesian"
if [ "$1" ]; then
hint_module_name=$1
echo hint_module_name:${hint_module_name}
fi
hint_submodule_name="feature"
if [ "$2" ]; then
hint_submodule_name=$2
echo hint_submodule_name:${hint_submodule_name}
fi
cd "`dirname $0`"
export MT_DB_PATH="$(pwd)/${hint_module_name}_hint.sqlite3"
echo $MT_DB_PATH
export PYSPARK_PYTHON=python
export PYSPARK_DRIVER_PYTHON=python
cd ../${hint_module_name}
echo "Automatically Add Type Hint"
if [ -f $MT_DB_PATH ];then
rm $MT_DB_PATH
fi
for file in $(find test/bigdl/${hint_module_name}/${hint_submodule_name} -name test_*.py)
do
echo $file
monkeytype run $file
done
cd -
unset MT_DB_PATH
| true
|
0955558d3f9148c496ebbcea75edc3141b508d01
|
Shell
|
brewingcode/dotfiles
|
/bin/git-author-rewrite
|
UTF-8
| 785
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/sh
export OLD_EMAIL="${OLD_EMAIL:-$1}"
export NEW_EMAIL="${NEW_EMAIL:-$2}"
export NEW_NAME="${NEW_NAME:-$3}"
export REF="${4:-HEAD}"
[ -z "$OLD_EMAIL" ] && { echo "define OLD_EMAIL and re-run" >&2; exit 1; }
[ -z "$NEW_EMAIL" ] && { echo "define NEW_EMAIL and re-run" >&2; exit 3; }
[ -z "$NEW_NAME" ] && { echo "define NEW_NAME and re-run" >&2; exit 2; }
echo "correcting $OLD_EMAIL -> $NEW_NAME <$NEW_EMAIL>"
sleep 5
git filter-branch --env-filter '
if [ "$GIT_COMMITTER_EMAIL" = "$OLD_EMAIL" ]
then
export GIT_COMMITTER_NAME="$NEW_NAME"
export GIT_COMMITTER_EMAIL="$NEW_EMAIL"
fi
if [ "$GIT_AUTHOR_EMAIL" = "$OLD_EMAIL" ]
then
export GIT_AUTHOR_NAME="$NEW_NAME"
export GIT_AUTHOR_EMAIL="$NEW_EMAIL"
fi
' --tag-name-filter cat -- --branches --tags "$REF"
| true
|
67a8440c8792a8945b67dd49c4c3154e45400c6e
|
Shell
|
hashgraph/hedera-services
|
/hedera-node/docker/update-env.sh
|
UTF-8
| 370
| 3.421875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# This scripts create a '.env' file that is used for docker & docker-compose as an input of environment variables.
# This script is called by gradle and get the current project version as an input param
if [ $# -lt 1 ]; then
echo "USAGE: $0 <TAG>"
exit 1
fi
echo "TAG=$1" > .env
echo "REGISTRY_PREFIX=" >> .env
echo "VERSION/TAG UPDATED TO $1"
| true
|
a04d93cdf53ae02230ac9823534c7c9911f39202
|
Shell
|
danielshahaf/modernish
|
/libexec/modernish/var/setlocal.mm
|
UTF-8
| 9,695
| 3.4375
| 3
|
[
"ISC",
"CC0-1.0"
] |
permissive
|
#! /module/for/moderni/sh
# --- { setlocal...endlocal } ---
# A pair of aliases for a { setlocal ... endlocal } code block. Local variables
# and local shell options are supported, with those specified becoming local
# and the rest remaining global. The exit status of the block is the exit
# status of the last command. Positional parameters are passed into the
# block but changes are lost when exiting from it. Use 'return' (not
# 'break') to safely break out from the block and automatically restore the
# global state. (That's because, internally, the block is a temporary shell
# function.)
#
# ksh93 (AT&T ksh) compatibility note:
# Unfortunately, on AT&T ksh, we have to put up with BUG_FNSUBSH breakage. That
# is, if a script is to be compatible with AT&T ksh, setlocal/endlocal cannot
# be used within non-forked subshells, because unsetting/redefining the
# temporary function is impossible. The program would silently execute the
# WRONG code if not for a test implemented below that checks if unsetting
# a dummy function defined in the main shell succeeds. If the function
# cannot be redefined, modernish kills the program rather than allowing the
# shell to execute the wrong code.
# Note that background subshells are forked and this does not apply there.
# Command substitution is also forked if output redirection occurs within
# it; modernish adds a dummy output redirection to the alias, which makes it
# possible to use setlocal in command substitutions on ksh93.
# (Luckily, AT&T ksh also has LEPIPEMAIN, meaning, the last element of a pipe is
# executed in the main shell. This means you can still pipe the output of a
# command into a { setlocal...endlocal } block with no problem, provided that
# block is the last element of the pipe.)
# All of the above applies only to ksh93 and not to any other shell.
# However, this does mean portable scripts should NOT use setlocal in
# subshells other than background jobs and command substitutions.
#
# Usage:
# { setlocal <item> [ <item> ... ]
# <command> [ <command> ... ]
# endlocal }
# where <item> is a variable name, variable assignment, or shell
# option. Unlike with 'push', variables are unset or assigned, and
# shell options are set (e.g. -f) or unset (e.g. +f), after pushing
# their original values/settings onto the stack.
#
# Usage example:
# { setlocal IFS=',' +f -C somevar='Something'
# commands
# if <errorcondition>; then return 1; fi
# morecommands
# endlocal }
#
# There are also a few convenience/readability synonyms:
# setlocal --dosplit = setlocal IFS=" $CCt$CCn"
# setlocal --nosplit = setlocal IFS=''
# setlocal --split='STRING' = setlocal IFS='STRING'
# setlocal --doglob = setlocal +f
# setlocal --noglob = setlocal -f
#
# Nesting { setlocal...endlocal } blocks also works; redefining the temporary
# function while another instance of it is running is not a problem because
# shells create an internal working copy of a function before executing it.
#
# WARNING: Don't pop any of the local variables or settings within the
# block; (at least not unless you locally push them first); this will screw
# up the main stack and 'endlocal' will be unable to restore the global
# state properly.
#
# WARNING: For the same reason, never use 'continue' or 'break' within
# { setlocal..endlocal } unless the *entire* loop is within the setlocal block!
# A few shells (ksh, mksh) disallow this because they don't allow 'break' to
# interrupt the temporary shell function, but on others this will silently
# result in stack corruption and non-restoration of global variables and
# shell options. There is no way to block this.
#
# TODO? implement a key option for push/pop, and use it here to protect
# globals from being accidentially popped within a { setlocal..endlocal } block.
#
# TODO: support local traps.
#
# --- begin license ---
# Copyright (c) 2016 Martijn Dekker <martijn@inlv.org>, Groningen, Netherlands
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# --- end license ---
# The aliases below pass $LINENO on to the handling functions for use in error messages, so they can report
# the line number of the 'setlocal' or 'endlocal' where the error occurred. But on shells with BUG_LNNOALIAS
# (pdksh, mksh) this is pointless as the number is always zero when $LINENO is expanded from an alias.
if not thisshellhas LINENO || thisshellhas BUG_LNNOALIAS; then
_Msh_sL_LINENO="''"
else
_Msh_sL_LINENO='"${LINENO-}"'
fi
# The pair of aliases.
if thisshellhas ANONFUNC; then
# zsh: an anonymous function is very convenient here; anonymous
# functions are basically the native zsh equivalent of setlocal.
alias setlocal='{ () { _Msh_doSetLocal '"${_Msh_sL_LINENO}"
alias endlocal='} "$@"; _Msh_doEndLocal "$?" '"${_Msh_sL_LINENO}; };"
else
if thisshellhas BUG_FNSUBSH; then
if not thisshellhas KSH93FUNC; then
putln "var/setlocal: You're on a shell with BUG_FNSUBSH that is not ksh93! This" \
" is not known to exist and cannot be handled. Please report." 1>&2
return 1
fi
# ksh93: Due to BUG_FNSUBSH, this shell cannot unset or
# redefine a function within a non-forked subshell. 'unset -f' and function
# redefinitions in non-forked subshells are silently ignored without error,
# and the wrong code, i.e. that from the main shell, is
# re-executed! It's better to kill the program than to execute
# the wrong code. (The functions below must be defined using
# the 'function' keyword, or ksh93 will segfault.)
eval 'function _Msh_sL_BUG_FNSUBSH_dummyFn { :; }
function _Msh_sL_ckSub {
unset -f _Msh_sL_BUG_FNSUBSH_dummyFn
if isset -f _Msh_sL_BUG_FNSUBSH_dummyFn; then
die "setlocal: FATAL: Detected use of '\''setlocal'\'' in subshell on ksh93 with BUG_FNSUBSH."
return
fi
function _Msh_sL_BUG_FNSUBSH_dummyFn { :; }
}'
alias setlocal='{ : 1>&-; _Msh_sL_ckSub && _Msh_sL_temp() { _Msh_doSetLocal '"${_Msh_sL_LINENO}"
# ^^^^^^ Make use of a ksh93 quirk: if this is a command substitution subshell, a dummy
# output redirection within it will cause it to be forked, undoing BUG_FNSUBSH.
# It has no effect in the main shell or in non-forked non-cmd.subst. subshells.
else
alias setlocal='{ _Msh_sL_temp() { _Msh_doSetLocal '"${_Msh_sL_LINENO}"
fi
alias endlocal='} || die; _Msh_sL_temp "$@"; _Msh_doEndLocal "$?" '"${_Msh_sL_LINENO}; };"
fi 2>/dev/null
# Internal functions that do the work. Not for direct use.
_Msh_doSetLocal() {
# line number for error message if we die (if shell has $LINENO)
_Msh_sL_LN=$1
shift
unset -v _Msh_sL
# Validation; gather arguments for 'push' in ${_Msh_sL}.
for _Msh_sL_A do
case "${_Msh_sL_A}" in
( --dosplit | --nosplit | --split=* )
_Msh_sL_V='IFS'
;;
( --doglob | --noglob )
_Msh_sL_V='-f'
;;
( [-+]["$ASCIIALNUM"] )
_Msh_sL_V="-${_Msh_sL_A#[-+]}"
;;
( *=* )
_Msh_sL_V=${_Msh_sL_A%%=*}
;;
( * )
_Msh_sL_V=${_Msh_sL_A}
;;
esac
case "${_Msh_sL_V}" in
( -["$ASCIIALNUM"] )
# shell option: ok
;;
( '' | [0123456789]* | *[!"$ASCIIALNUM"_]* | *__[VS]* )
die "setlocal${_Msh_sL_LN:+ (line $_Msh_sL_LN)}: invalid variable name or shell option: ${_Msh_sL_V}" || return
;;
esac
_Msh_sL="${_Msh_sL+${_Msh_sL} }${_Msh_sL_V}"
done
# Push the global values/settings onto the stack.
# (Since our input is now safely validated, abuse 'eval' for
# field splitting so we don't have to bother with $IFS.)
eval "push ${_Msh_sL-} _Msh_sL" || return
# Apply local values/settings.
for _Msh_sL_A do
case "${_Msh_sL_A}" in
( --dosplit )
IFS=" ${CCt}${CCn}"
;;
( --nosplit )
IFS=''
;;
( --split=* )
IFS=${_Msh_sL_A#--split=}
;;
( --doglob )
set +f
;;
( --noglob )
set -f
;;
( [-+]["$ASCIIALNUM"] )
# 'command' disables 'special built-in' properties, incl. exit shell on error,
# except on shells with BUG_CMDSPEXIT
command set "${_Msh_sL_A}" \
|| die "setlocal${_Msh_sL_LN:+ (line $_Msh_sL_LN)}: 'set ${_Msh_sL_A}' failed" || return
;;
( *=* )
eval "${_Msh_sL_A%%=*}=\${_Msh_sL_A#*=}"
;;
( * )
unset -v "${_Msh_sL_A}"
;;
esac
done
unset -v _Msh_sL _Msh_sL_V _Msh_sL_A _Msh_sL_o _Msh_sL_LN
}
_Msh_doEndLocal() {
# Unsetting the temp function makes ksh93 "AJM 93u+ 2012-08-01", the
# latest release version as of 2016, segfault if { setlocal...endlocal }
# blocks are nested.
# So we don't do this:
#unset -f _Msh_sL_temp
pop _Msh_sL || die "endlocal${2:+ (line $2)}: stack corrupted (failed to pop arguments)" || return
if isset _Msh_sL; then
eval "pop ${_Msh_sL}" || die "endlocal${2:+ (line $2)}: stack corrupted (failed to pop globals)" || return
unset -v _Msh_sL
fi
return "$1"
}
| true
|
4b14564465319c85b498408f0b19dccf6745f34f
|
Shell
|
kaaaaassim/CROMi-X_DEODEX_5.4
|
/system/bin/ps3service
|
UTF-8
| 1,014
| 3.3125
| 3
|
[] |
no_license
|
#!/system/bin/sh
# Start PS3 pairing
# Check Bluetooth Status
LOG_NAME="BlueZ - PS3 Game-pad"
LOG_TAG="ps3service"
logi ()
{
/system/bin/log -t $LOG_TAG -p i "$LOG_NAME $@"
}
/system/xbin/ps3bttest is_enabled > /dev/null
btstatus=$?
logi "getBtStatus(): btstatus = $btstatus"
case $btstatus in
1)
logi "BT is enabled"
/system/bin/sixpair
sleep 2
#Re-start the bluetooth service after pairing
service call bluetooth 5; > /dev/null
btresult=0
while [ $btresult != 1 ];
do
sleep 1;
/system/xbin/ps3bttest is_enabled > /dev/null
btresult=$?
logi "getBtStatus(): btresult = $btresult"
if [ "$btresult" = "0" ];then
logi "Bluetooth turn off success"
sleep 1;
service call bluetooth 3; > /dev/null
fi
done
;;
*)
logi "BT function is disabled, we do not enable bluetooth PS3 Game-pad"
;;
esac
| true
|
f7420d69a7a9067f937fafa9f173bf7d030ab791
|
Shell
|
kism/dotfiles
|
/_zsh/.zshrc
|
UTF-8
| 3,390
| 3.25
| 3
|
[] |
no_license
|
# zsh Settings
source ~/.antigen/antigen.zsh
# Load the oh-my-zsh's library.
antigen use oh-my-zsh
# Bundles from the default repo (robbyrussell's oh-my-zsh).
antigen bundle git
antigen bundle pip
antigen bundle virtualenv
antigen bundle command-not-found
# Syntax highlighting bundle.
antigen bundle zsh-users/zsh-syntax-highlighting
# fish like completion
# antigen bundle zsh-users/zsh-completions
# antigen bundle zsh-users/zsh-autosuggestions
# Load the theme.
antigen theme kism/zsh-bira-mod
# Tell Antigen that you're done.
antigen apply
# Alias
alias please='sudo $(fc -ln -1)'
alias sudp='sudo'
alias tmux='tmux -u'
alias sl='ls'
alias nano='vim'
alias bim='echo -e "\033[0;31m\033[0;41mB\033[0mim"'
alias screen='echo no #'
alias cgrep='grep --color=always -e "^" -e'
alias youtube-dl='yt-dlp -o "%(upload_date)s %(title)s [%(id)s].%(ext)s"'
alias vim=nvim
alias view="nvim -R"
alias whom=who
# Set editor
export EDITOR="$(which vim)"
# set PATH so it includes user's private bin if it exists
if [ -d "$HOME/.local/bin" ] ; then
PATH="$HOME/.local/bin:$PATH"
fi
# Load up ssh keys into keychain if it is on this system
if type keychain > /dev/null; then
sshkeylist=('id_rsa' 'id_ed25519')
for i in $sshkeylist; do
if [[ -e ~/.ssh/$i ]] ; then
eval `keychain -q --eval --agents ssh $i`
fi
done
fi
# Functions
function get_mercury_retrograde() {
RESULT=""
RETROGRADETEMPFILE=~/.config/mercuryretrograde
if ! [ -f $RETROGRADETEMPFILE ]; then
mkdir -p ~/.config/ > /dev/null
touch -a -m -t 197001010000.00 $RETROGRADETEMPFILE
fi
if type curl > /dev/null; then
if [[ $(find "$RETROGRADETEMPFILE" -mmin +600 -print) ]]; then
curl -s https://mercuryretrogradeapi.com > $RETROGRADETEMPFILE 2>/dev/null
fi
if cat $RETROGRADETEMPFILE | grep false >/dev/null ; then
RESULT="☿ \033[0;32mPrograde\033[0m"
else
RESULT="☿ \033[0;31mRetrograde\033[0m"
fi
fi
echo -e " $RESULT"
}
function get_ssh_keys_loaded() {
if type keychain > /dev/null; then
keychain -l | grep -v "The agent has no identities." | wc -l | xargs
fi
}
# Keybinds
## ctrl+arrows
bindkey "\e[1;5C" forward-word
bindkey "\e[1;5D" backward-word
### urxvt
bindkey "\eOc" forward-word
bindkey "\eOd" backward-word
## ctrl+delete
bindkey "\e[3;5~" kill-word
### urxvt
bindkey "\e[3^" kill-word
## ctrl+backspace
bindkey '^H' backward-kill-word
## ctrl+shift+delete
bindkey "\e[3;6~" kill-line
### urxvt
bindkey "\e[3@" kill-line
# Absolutely filthy way of checking if we are in windows terminal, I hate this but it works for me
SPACING=" "
SPACING2=" "
if [[ $(uname -r) == *WSL* ]]; then
SPACING=""
SPACING2=""
fi
# Unbreak ansible on macos
if [[ "$OSTYPE" == darwin* ]]; then
export OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES
fi
# Startup welcome message, only if we are in an interactive shell
if [[ -o interactive ]]; then
if test -f /etc/os-release; then
. /etc/os-release
echo -e "$PRETTY_NAME, \c"
elif type sw_vers > /dev/null; then
echo -e "$(sw_vers | grep -E "ProductName|ProductVersion" | awk '{print $2}' | tr '\n' ' ' | sed 's/.$//'), \c"
fi
echo -e "$(uname -s -r), \c"
echo -e "🗝️$SPACING$(get_ssh_keys_loaded),$SPACING2\c"
get_mercury_retrograde
fi
| true
|
4ff2b4abfa2082d57cd024e7d85634cbfbd93423
|
Shell
|
molleweide/dorothy
|
/sources/edit.sh
|
UTF-8
| 217
| 2.734375
| 3
|
[
"LicenseRef-scancode-public-domain",
"Unlicense"
] |
permissive
|
#!/usr/bin/env sh
eval "$(setup-editor-commands)"
edit () {
if is-ssh; then
eval "${TERMINAL_EDITOR:?TERMINAL_EDITOR must be configured}" "$@"
else
eval "${GUI_EDITOR:?GUI_EDITOR must be configured}" "$@"
fi
}
| true
|
d42a31eab1d5c96bf78614cd667b419c7ed8db12
|
Shell
|
Stackdriver/stackdriver-prometheus-sidecar
|
/bench/run.sh
|
UTF-8
| 1,151
| 3.078125
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
#!/usr/bin/env bash
set -e
pushd "$( dirname "${BASH_SOURCE[0]}" )"
go build github.com/Stackdriver/stackdriver-prometheus-sidecar/cmd/stackdriver-prometheus-sidecar
trap 'kill 0' SIGTERM
echo "Starting Prometheus"
prometheus \
--storage.tsdb.min-block-duration=15m \
--storage.tsdb.retention=48h 2>&1 | sed -e "s/^/[prometheus] /" &
echo "Starting server"
go run main.go --latency=30ms 2>&1 | sed -e "s/^/[server] /" &
sleep 2
echo "Starting sidecar"
./stackdriver-prometheus-sidecar \
--config-file="sidecar.yml" \
--stackdriver.project-id=test \
--web.listen-address="0.0.0.0:9091" \
--stackdriver.generic.location="test-cluster" \
--stackdriver.generic.namespace="test-namespace" \
--stackdriver.api-address="http://127.0.0.1:9092/?auth=false" \
2>&1 | sed -e "s/^/[sidecar] /" &
if [ -n "${SIDECAR_OLD}" ]; then
echo "Starting old sidecar"
${SIDECAR_OLD} \
--log.level=debug \
--stackdriver.project-id=test \
--web.listen-address="0.0.0.0:9093" \
--stackdriver.debug \
--stackdriver.api-address="http://127.0.0.1:9092/?auth=false" \
2>&1 | sed -e "s/^/[sidecar-old] /" &
fi
wait
popd
| true
|
dea236933ad80fdce2be8da32648739b0fc0f7f9
|
Shell
|
second-state/oasis-ssvm-runtime
|
/scripts/check_artifacts.sh
|
UTF-8
| 773
| 3.453125
| 3
|
[
"GPL-3.0-only"
] |
permissive
|
#! /bin/bash
# Path to Oasis Core root.
OASIS_CORE_ROOT_PATH=${OASIS_CORE_ROOT_PATH:-${WORKDIR}}
# Path to the Oasis node.
OASIS_NODE=${OASIS_NODE:-${OASIS_CORE_ROOT_PATH}/go/oasis-node/oasis-node}
# Path to oasis-net-runner.
OASIS_NET_RUNNER=${OASIS_NET_RUNNER:-${OASIS_CORE_ROOT_PATH}/go/oasis-net-runner/oasis-net-runner}
# Path to the runtime loader.
OASIS_CORE_RUNTIME_LOADER=${OASIS_CORE_RUNTIME_LOADER:-${OASIS_CORE_ROOT_PATH}/target/default/debug/oasis-core-runtime-loader}
function check_executable() {
if [[ ! -x ${!1} ]]; then
echo "$1 not found at: '${!1}'. Make sure to set $1 or OASIS_CORE_ROOT_PATH env variable"
exit 1
fi
}
check_executable OASIS_NODE
check_executable OASIS_NET_RUNNER
check_executable OASIS_CORE_RUNTIME_LOADER
| true
|
7b50a18b0e1eb7317ccb8628f2474e34de8b97b7
|
Shell
|
n1k0/oh-my-zsh
|
/themes/n1k0.zsh-theme
|
UTF-8
| 757
| 2.953125
| 3
|
[] |
no_license
|
ZSH_THEME_GIT_PROMPT_PREFIX="%{$fg_bold[green]%} ("
ZSH_THEME_GIT_PROMPT_SUFFIX=")%{$reset_color%}"
ZSH_THEME_GIT_PROMPT_DIRTY="*"
ZSH_THEME_GIT_PROMPT_UNTRACKED="?"
ZSH_THEME_GIT_PROMPT_CLEAN=""
local light_grey='\e[0;37m'
local return_code="%(?..%{$fg_bold[red]%}(%?%) %{$reset_color%})"
PROMPT=' '
PROMPT=$PROMPT'%{$fg_bold[$light_grey]%}%n%{$reset_color%}'
PROMPT=$PROMPT'%{$fg[white]%}@%{$reset_color%}'
PROMPT=$PROMPT'%{$fg_bold[grey]%}%m%{$reset_color%} '
PROMTP=$PROMPT'→ ${return_code}'
PROMPT=$PROMPT'%{${fg_bold[yellow]}%}%~%{$reset_color%}'
PROMPT=$PROMPT'$(git_prompt_info)
'
if [ "$(whoami)" = "root" ]; then
PROMPT=$PROMPT'%{${fg[red]}%}#%{${reset_color}%} %b';
else
PROMPT=$PROMPT'%{${fg[white]}%}\$%{${reset_color}%} %b';
fi
| true
|
ae625737e8d1ab9f6dc4a004a290e416c8b7877c
|
Shell
|
leeonky/Owl
|
/platform/software/packages/401.docker_tools/docker_tools.sh
|
UTF-8
| 952
| 3.765625
| 4
|
[] |
no_license
|
#! /bin/bash
DOCKER_BIN=docker
retain_images() {
local image_list="$($DOCKER_BIN images)"
local target_images="$(echo "$image_list" | grep "^$1 " | awk '{print $1":"$2}' | grep -v $1:$2)"
if [ "" != "$target_images" ]; then
local all_containers="$($DOCKER_BIN ps)"
local target_containers="$(echo "$all_containers" | grep " $1:" | grep -v " $1:$2 "| awk '{print $1}')"
if [ "" != "$target_containers" ]; then
echo "$DOCKER_BIN stop $target_containers"
$DOCKER_BIN stop $target_containers
fi
all_containers="$($DOCKER_BIN ps -a)"
target_containers="$(echo "$all_containers" | grep " $1:" | grep -v " $1:$2 "| awk '{print $1}')"
if [ "" != "$target_containers" ]; then
echo "$DOCKER_BIN rm $target_containers"
$DOCKER_BIN rm $target_containers
fi
echo "$DOCKER_BIN rmi $target_images"
$DOCKER_BIN rmi $target_images
fi
}
docker_tool_option=$1
shift 1
case $docker_tool_option in
retain)
retain_images "$@"
;;
esac
| true
|
0362d495cf66ed00f6eefed3e5e044aef5539c2a
|
Shell
|
hasanalanya/Master_Bash_Shell_Scripting
|
/functions.sh
|
UTF-8
| 278
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
mydate(){
echo "today is: "
date
echo "have a great day!"
}
echo "start here"
mydate
echo "----------------------"
hello2(){
echo "hello $1"
echo "hello also to $2"
}
hello2 "Hasan" "Huseyin"
echo "return value of my function is $?"
echo "move on..."
| true
|
7e3d6ec6810ca399913d294097a79deebd32f044
|
Shell
|
metwork-framework/github_force_common_files
|
/src/_force.sh
|
UTF-8
| 2,259
| 3.59375
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
set -e
ORG=metwork-framework
if test "${3:-}" = ""; then
echo "_force.sh REPO-NAME BRANCH INTEGRATION_LEVEL [DEBUG] [RESOURCES_DIR]"
exit 1
fi
if test "${4:-}" = "DEBUG"; then
DEBUG=1
else
DEBUG=0
fi
if test "${5:-}" = ""; then
RESOURCES_DIR=~/tmp/resources
else
RESOURCES_DIR="$5"
fi
if ! test -d ${RESOURCES_DIR}; then
echo "${RESOURCES_DIR} does not exist"
exit 1
fi
REPONAME=$1
BRANCH=$2
INTEGRATION_LEVEL=$3
if test "${MFSERV_CURRENT_PLUGIN_NAME:-}" = ""; then
echo "ERROR: load the plugin environnement before"
exit 1
fi
cat >~/tmp/force.yaml <<EOF
default_context:
repo: "${REPONAME}"
integration_level: "${INTEGRATION_LEVEL}"
EOF
TMPREPO=${TMPDIR:-/tmp}/force_$$
rm -Rf "${TMPREPO}"
mkdir -p "${TMPREPO}"
cd "${TMPREPO}"
git clone "git@github.com:${ORG}/${REPONAME}"
cd "${REPONAME}"
git checkout -b common_files_force --track "origin/${BRANCH}"
REPO_TOPICS=$(metwork_topics.py --json "${ORG}" "${REPONAME}")
export REPO_TOPICS
export REPO_HOME="${TMPREPO}/${REPONAME}"
cookiecutter --no-input --config-file ~/tmp/force.yaml ${RESOURCES_DIR}/cookiecutter
cp -Rf _${REPONAME}/* . |true
cp -Rf _${REPONAME}/.* . 2>/dev/null | true
rm -Rf "_${REPONAME}"
find . -type f -name "*.forcedelete" |sed 's/\.forcedelete$//g' |xargs rm -rf
find . -type f -name "*.forcedelete" -exec rm -f {} \;
git add -u
git add --all
N=$(git diff --cached |wc -l)
if test "${N}" -gt 0; then
if test ${DEBUG} -eq 1; then
echo "***** DEBUG *****"
echo "***** DIFF FOR REPO ${REPONAME} (BRANCH: ${BRANCH}, INTEGRATION_LEVEL: ${INTEGRATION_LEVEL}) *****"
git status
git diff --cached
echo
echo
else
git commit -m "chore: sync common files from resources repository"
git push -u origin -f common_files_force
SHA=$(git rev-parse HEAD)
metwork_valid_merge_logic_status.py "${REPONAME}" "${SHA}"
if test "${BRANCH}" = "master"; then
git checkout master
else
git checkout -b "${BRANCH}" --track "origin/${BRANCH}"
fi
git merge common_files_force
git push -u origin "${BRANCH}"
git push origin --delete common_files_force
fi
fi
rm -Rf "${TMPREPO}"
echo "DONE"
| true
|
d614996fc3430724da266631014c4248c0d4bb3d
|
Shell
|
ncliang/kafka-docker-playground
|
/troubleshooting/timeout-while-waiting-for-command-topic/cleanup-queries.sh
|
UTF-8
| 2,029
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
source ${DIR}/../../scripts/utils.sh
# https://rmoff.net/2019/03/25/terminate-all-ksql-queries/
log "TERMINATE all queries, if applicable"
curl -s -X "POST" "http://localhost:8088/ksql" \
-H "Content-Type: application/vnd.ksql.v1+json; charset=utf-8" \
-d '{"ksql": "SHOW QUERIES;"}' | \
jq '.[].queries[].id' | \
xargs -Ifoo curl -s -X "POST" "http://localhost:8088/ksql" \
-H "Content-Type: application/vnd.ksql.v1+json; charset=utf-8" \
-d '{"ksql": "TERMINATE 'foo';"}' | jq . > /tmp/out.txt 2>&1
if [[ $(cat /tmp/out.txt) =~ "statement_error" ]]
then
logerror "Cannot terminate all queries, check the errors below:"
cat /tmp/out.txt
exit 1
fi
log "DROP all streams, if applicable"
curl -s -X "POST" "http://localhost:8088/ksql" \
-H "Content-Type: application/vnd.ksql.v1+json; charset=utf-8" \
-d '{"ksql": "SHOW STREAMS;"}' | \
jq '.[].streams[].name' | \
xargs -Ifoo curl -s -X "POST" "http://localhost:8088/ksql" \
-H "Content-Type: application/vnd.ksql.v1+json; charset=utf-8" \
-d '{"ksql": "DROP STREAM 'foo';"}' | jq . > /tmp/out.txt 2>&1
if [[ $(cat /tmp/out.txt) =~ "statement_error" ]]
then
logerror "Cannot drop all streams, check the errors below:"
cat /tmp/out.txt
exit 1
fi
log "DROP all tables, if applicable"
curl -s -X "POST" "http://localhost:8088/ksql" \
-H "Content-Type: application/vnd.ksql.v1+json; charset=utf-8" \
-d '{"ksql": "SHOW TABLES;"}' | \
jq '.[].tables[].name' | \
xargs -Ifoo curl -s -X "POST" "http://localhost:8088/ksql" \
-H "Content-Type: application/vnd.ksql.v1+json; charset=utf-8" \
-d '{"ksql": "DROP TABLE 'foo';"}' | jq . > /tmp/out.txt 2>&1
if [[ $(cat /tmp/out.txt) =~ "statement_error" ]]
then
logerror "Cannot drop all tables, check the errors below:"
cat /tmp/out.txt
exit 1
fi
| true
|
a8ace44ccd6402aba4a6158c0468a63982fad623
|
Shell
|
drewmoseley/meta-mender
|
/meta-mender-raspberrypi/recipes-mender/update-firmware-state-script/files/ArtifactInstall_Leave_50.in
|
UTF-8
| 921
| 3.5625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
set -e
trap_exit() {
umount -l /tmp/inactive_part
}
trap trap_exit EXIT
MENDER_ROOTFS_PART_A="@@MENDER_ROOTFS_PART_A@@"
MENDER_ROOTFS_PART_B="@@MENDER_ROOTFS_PART_B@@"
if mount | grep ${MENDER_ROOTFS_PART_A}; then
inactive_part="${MENDER_ROOTFS_PART_B}"
else
inactive_part="${MENDER_ROOTFS_PART_A}"
fi
mkdir -p /tmp/inactive_part
mount -o ro ${inactive_part} /tmp/inactive_part
# These are dangerous operations and if they fail (partial copy) it might
# render the device unusable.
# Copy 'core' firmware files first
find /tmp/inactive_part/boot/firmware/ -maxdepth 1 -type f | xargs -I {} cp -v {} /uboot/
# Synchronize before trying to copy the rest of the files
sync
# Copy overlays
find /tmp/inactive_part/boot/firmware/overlays/ -maxdepth 1 -type f | xargs -I {} cp -v {} /uboot/overlays/
# Synchronize to ensure all files are written before leaving the ArtifactInstall state
sync
exit 0
| true
|
fd545b6cffafa7567b9f6bc7515f62ca46e1e45f
|
Shell
|
AlexanderP/deb-script
|
/tesseractbuild.sh
|
UTF-8
| 16,392
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
#set -x
green() { echo -e "\e[1;32m$1\e[0;39;49m"; }
red() { echo -e "\e[1;31m$1\e[0;39;49m"; }
error(){
red "Error! $1"
exit 1
}
#--------------------------------Переменные-----------------------------
export DEBFULLNAME="Ivan Petrov" # gpg name
export DEBEMAIL="example@mail.ru" # gpg mail
PPANAME=test #name ppa in dput.cf
DIST_PPA="trusty xenial artful bionic" #Distributions for assembly in PPA and pbuilder
DIST_DEB="jessie stretch buster sid" #Distributions for assembly in PPA and pbuilder
PDIR=${HOME}/pbuilder #Path pbulder dir
DIR1="${HOME}/tesseract-build" #Build dir
while getopts "d:uptlj:c" Option
do
case $Option in
d) DISTRIB=$OPTARG ;; #Specifying build distributions
u) UPDATEI=1 ;; #pbulder update
p) PPA_BUILD=1 ;; #Build in PPA.
l) UBUNTUBUILD=1 ;; #Build ubuntu dist in pbuilder
j) PARALLEL2=$OPTARG ;; #number of threads assembly
c) CREATE=1 ;; #Creating the pbuilder environment
esac
done
shift $((OPTIND - 1))
DIR=$DIR1/reps
ORIGDIR=${DIR}/OrigSource
BUILDDIR=$DIR1/build
DEBDIR=$DIR1/deb-result
SOURCEDIR=$DEBDIR/source
LOGDIR=$DIR1/log
LOGFILE=$LOGDIR/log
LOGFILE1=$LOGDIR/logp
DSCDIR=$DIR/dsc-file
LINTIANDIR=$DIR1/lintianlog
test -d $BUILDDIR || mkdir -p $BUILDDIR
test -d $DIR || mkdir -p $DIR
test -d $LOGDIR || mkdir -p $LOGDIR
test -d $DEBDIR || mkdir -p $DEBDIR
test -d $SOURCEDIR || mkdir -p $SOURCEDIR
test -d $ORIGDIR || mkdir -p $ORIGDIR
test -d $LINTIANDIR || mkdir -p $LINTIANDIR
if [ -z $PPA_BUILD ]
then
test -d $PDIR || error "path to the working directory of pbuilder is incorrectly set"
fi
touch $LOGFILE
sourceclean(){
find . -depth -depth -type d -name .svn -exec rm -rf \{\} \;
find . -depth -depth -type d -name .git -exec rm -rf \{\} \;
find . -depth -depth -type d -name .hg -exec rm -rf \{\} \;
find . -depth -depth -type d -name .bzr -exec rm -rf \{\} \;
find . -depth -depth -type f -name .hgtags -exec rm -rf \{\} \;
find . -depth -depth -type f -iname "*.pyc" -exec rm -rf \{\} \;
}
createorighqgit(){
testorig
cp -r ${DIR}/${PKG_NAME}-debian/debian $BUILDDIR/${PKG_NAME}-${VERSION1}${SVNGITBZR}${dat}/debian
if [ ! -f "${ORIGDIR}/${ORIGTAR}" ]
then
case ${PKG_NAME} in
tesseract-*)
mkdir "$BUILDDIR/${PKG_NAME}-${VERSION1}${SVNGITBZR}${dat}/debian/patches"
cp -f ${DIFFTMP} "$BUILDDIR/${PKG_NAME}-${VERSION1}${SVNGITBZR}${dat}/debian/patches/ChangeLog.diff"
echo ChangeLog.diff > "$BUILDDIR/${PKG_NAME}-${VERSION1}${SVNGITBZR}${dat}/debian/patches/series"
;;
esac
fi
cd $BUILDDIR/${PKG_NAME}-${VERSION1}${SVNGITBZR}${dat}/
OLDCOMMIT=$(cat debian/changelog |grep '* Commit' | sed 1q |awk -F ': ' '{print $2}')
OLDCOMMIT7=$(echo $OLDCOMMIT | cut -c 1-7)
LINE=$(cat $GITTMPFILE | grep -n $OLDCOMMIT7 | sed 1q |awk -F ':' '{print $1}')
dch -v "${VERSION}${SVNGITBZR}${dat}-1" -D "unstable" --force-distribution "Compile"
dch -a "URL: $(echo $GITREPS | sed 's/.*git\:/git\:/g' | sed 's/.*http/http/g' | awk '{print $1}')"
dch -a "Branch: $branch"
dch -a "Commit: $commit"
dch -a "Date: $datct"
if [ ! -z $OLDCOMMIT ]
then
dch -a "git changelog:"
var0=1
LIMIT=$(($LINE))
while [ "$var0" -lt "$LIMIT" ]
do
dch -a " $(cat $GITTMPFILE | sed 1q)"
sed -i '1 d' $GITTMPFILE
var0=$(($var0+1))
done
fi
}
testorig(){
ORIGTAR=${PKG_NAME}_${VERSION1}${SVNGITBZR}${dat}.orig.tar.xz
if [ -f "${ORIGDIR}/${ORIGTAR}" ]
then
green "orig.tar.xz уже создан."
rm -fr $BUILDDIR/${PKG_NAME}-${VERSION1}*
rm -fr $BUILDDIR/${PKG_NAME}_${VERSION1}*
cp "${ORIGDIR}/${ORIGTAR}" ${BUILDDIR}/
cd ${BUILDDIR}
tar xJf "${BUILDDIR}/${ORIGTAR}"
else
cd $DIR
rm -fr $BUILDDIR/${PKG_NAME}-${VERSION1}*
rm -fr $BUILDDIR/${PKG_NAME}_${VERSION1}*
case ${PKG_NAME} in
aegisub|tesseract|mypaint|libmypaint|audacity)
cd "$BUILDDIR" && git clone --depth=10 "${GITREPS}" "${PKG_NAME}-${VERSION1}${SVNGITBZR}${dat}"
;;
tesseract-*)
rsync -rpav -c --delete-during --progress --exclude-from ${EXCLUDE} ${PKG_NAME}/ "$BUILDDIR/${PKG_NAME}-${VERSION1}${SVNGITBZR}${dat}"
cp "${GITCLTMPFILE}" "$BUILDDIR/${PKG_NAME}-${VERSION1}${SVNGITBZR}${dat}/ChangeLog"
DIFFTMP=$(mktemp)
cd "$BUILDDIR"
cat > ${DIFFTMP} << EOF
Description: Add git ChangeLog
Author: ${DEBFULLNAME} ${DEBEMAIL}
Last-Update: 2018-02-17
$(diff -Naur /dev/null "${PKG_NAME}-${VERSION1}${SVNGITBZR}${dat}/ChangeLog")
EOF
;;
esac
test -d "$BUILDDIR/${PKG_NAME}-${VERSION1}${SVNGITBZR}${dat}/debian" && rm -fr "$BUILDDIR/${PKG_NAME}-${VERSION1}${SVNGITBZR}${dat}/debian"
case ${PKG_NAME} in
tesseract)
echo "sourceclean disabled"
;;
*)
sourceclean
;;
esac
tar -cpf "${PKG_NAME}_${VERSION1}${SVNGITBZR}${dat}.orig.tar" "${PKG_NAME}-${VERSION1}${SVNGITBZR}${dat}"
xz -9 "${PKG_NAME}_${VERSION1}${SVNGITBZR}${dat}.orig.tar"
fi
}
gitupdate(){
if [ -d "$DIR/${PKG_NAME}/.git" ]
then
cd "$DIR/${PKG_NAME}/" || error
git pull || error
else
cd "$DIR" || error
git clone $GITREPS ${PKG_NAME} || error
cd "$DIR/${PKG_NAME}/"
fi
abbrevcommit=$(git log -1 --abbrev-commit | grep -i "^commit" | awk '{print $2}')
numcommit=$(git log | grep "^Date:" | wc -l)
dat="${numcommit}-${abbrevcommit}"
case ${PKG_NAME} in
tesseract-*)
GITCLTMPFILE=$(mktemp)
cat >> ${GITCLTMPFILE} << EOF
$(git log --date="short" --no-merges --pretty=format:"%cd - %s (commit: %h)")
EOF
;;
esac
GITTMPFILE=$(mktemp)
branch=$(git branch | grep "\*" | sed 's/\* //g')
commit=$(git log -1 | grep -i "^commit" | awk '{print $2}')
datct=$(git log -n 1 --format=%ct)
tag=$(git describe --tags --dirty)
git log -1000 --pretty=format:"%h - %s" > $GITTMPFILE
}
build_it () {
local OLDDIR=`pwd`
local SOURCE=$(dpkg-parsechangelog | awk '/^Source: / {print $2}')
local a=$(date +%s)
local DIST=$1
local ARCH="amd64"
local tmplogfile="/tmp/$SOURCE-$DIST-$ARCH-$(date +%Y%m%d-%s).log"
local tmplintianfile="$LINTIANDIR/$SOURCE-$DIST-$ARCH.lintian"
DIST=$1 ARCH="amd64" pdebuild 2>&1 | tee $tmplogfile
local ext=$(cat $tmplogfile | grep "Failed autobuilding" | wc -l)
local ext2=$(cat $tmplogfile | grep "FAILED" | wc -l)
local ext3=$(cat $tmplogfile | grep "pbuilder-satisfydepends failed" | wc -l)
local b=$(date +%s)
local time=$((b-a))
cd $PDIR/${DIST}-${ARCH}/result || error
test -f ${ORIGTAR} || cp ${BUILDDIR}/${ORIGTAR} .
test -f $SOURCE*.changes && lintian -IE --pedantic $SOURCE*.changes 2>&1 | sort -u > $tmplintianfile
if [ -f $tmplintianfile ]
then
local ERROR=$(cat $tmplintianfile | grep -i "^E:" | grep -v lzma | wc -l)
local WARNING=$(cat $tmplintianfile | grep -i "^W:" | grep -v lzma | wc -l)
fi
if [[ $ext != "0" ]]
then
red "$(date +'%Y.%m.%d %H:%M:%S') - Error. Package: $SOURCE Distribution: ${DIST} Architecture: ${ARCH} Build time: $time"
echo "$(date +'%Y.%m.%d %H:%M:%S') - Error. Package: $SOURCE Distribution: ${DIST} Architecture: ${ARCH} Build time: $time" >> $LOGFILE
ext4=1
elif [[ $ext2 != "0" ]]
then
red "$(date +'%Y.%m.%d %H:%M:%S') - Patch Error. Package: $SOURCE Distribution: ${DIST} Architecture: ${ARCH} Build time: $time"
echo "$(date +'%Y.%m.%d %H:%M:%S') - Patch Error. Package: $SOURCE Distribution: ${DIST} Architecture: ${ARCH} Build time: $time" >> $LOGFILE
ext4=1
elif [[ $ext3 != "0" ]]
then
red "$(date +'%Y.%m.%d %H:%M:%S') - Dependency Error. Package: $SOURCE Distribution: ${DIST} Architecture: ${ARCH} Build time: $time"
echo "$(date +'%Y.%m.%d %H:%M:%S') - Dependency Error. Package: $SOURCE Distribution: ${DIST} Architecture: ${ARCH} Build time: $time" >> $LOGFILE
ext4=1
else
green "$(date +'%Y.%m.%d %H:%M:%S') - Package: $SOURCE Distribution: ${DIST} Architecture: ${ARCH} Build time: $time W:${WARNING} E:${ERROR}"
echo "$(date +'%Y.%m.%d %H:%M:%S') - Package: $SOURCE Distribution: ${DIST} Architecture: ${ARCH} Build time: $time W:${WARNING} E:${ERROR}" >> $LOGFILE
echo "${SOURCE} ${DIST} ${PARALLEL} ${ARCH} ${time}" >> $LOGFILE1
ext4=0
fi
find . -name "*.deb" -exec rename "s/${ARCH}.deb/${DIST}_${ARCH}.deb/g" \{\} \;
find . -name "*.deb" -exec rename "s/all.deb/${DIST}_all.deb/g" \{\} \;
if [[ $ext4 = "0" ]]
then
mv *.deb $DEBDIR
cp *.orig*.tar.* $SOURCEDIR
mv *.debian.tar.* $SOURCEDIR
mv *.dsc $SOURCEDIR
test -f *.diff.* && mv *.diff.* $SOURCEDIR
fi
if [ ! -f "${ORIGDIR}/${ORIGTAR}" ]
then
mv ${PKG_NAME}_*.orig*.tar.* $ORIGDIR
fi
cp $tmplogfile $LOGDIR
rm -rf *
cd $OLDDIR
}
build_it_32 () {
local OLDDIR=`pwd`
local SOURCE=$(dpkg-parsechangelog | awk '/^Source: / {print $2}')
local a=$(date +%s)
local DIST=$1
local ARCH="i386"
local tmplogfile="/tmp/$SOURCE-$DIST-$ARCH-$(date +%Y%m%d-%s).log"
local tmplintianfile="$LINTIANDIR/$SOURCE-$DIST-$ARCH.lintian"
DIST=$1 ARCH="i386" linux32 pdebuild 2>&1 | tee $tmplogfile
local ext=$(cat $tmplogfile | grep "Failed autobuilding" | wc -l)
local ext2=$(cat $tmplogfile | grep "FAILED" | wc -l)
local ext3=$(cat $tmplogfile | grep "pbuilder-satisfydepends failed" | wc -l)
local b=$(date +%s)
local time=$((b-a))
cd $PDIR/${DIST}-${ARCH}/result || error
test -f ${ORIGTAR} || cp ${BUILDDIR}/${ORIGTAR} .
test -f $SOURCE*.changes && lintian -IE --pedantic $SOURCE*.changes 2>&1 | sort -u > $tmplintianfile
if [ -f $tmplintianfile ]
then
local ERROR=$(cat $tmplintianfile | grep -i "^E:" | grep -v lzma | wc -l)
local WARNING=$(cat $tmplintianfile | grep -i "^W:" | grep -v lzma | wc -l)
fi
if [[ $ext != "0" ]]
then
red "$(date +'%Y.%m.%d %H:%M:%S') - Error. Package: $SOURCE Distribution: ${DIST} Architecture: ${ARCH} Build time: $time"
echo "$(date +'%Y.%m.%d %H:%M:%S') - Error. Package: $SOURCE Distribution: ${DIST} Architecture: ${ARCH} Build time: $time" >> $LOGFILE
ext4=1
elif [[ $ext2 != "0" ]]
then
red "$(date +'%Y.%m.%d %H:%M:%S') - Patch Error. Package: $SOURCE Distribution: ${DIST} Architecture: ${ARCH} Build time: $time"
echo "$(date +'%Y.%m.%d %H:%M:%S') - Patch Error. Package: $SOURCE Distribution: ${DIST} Architecture: ${ARCH} Build time: $time" >> $LOGFILE
ext4=1
elif [[ $ext3 != "0" ]]
then
red "$(date +'%Y.%m.%d %H:%M:%S') - Dependency Error. Package: $SOURCE Distribution: ${DIST} Architecture: ${ARCH} Build time: $time"
echo "$(date +'%Y.%m.%d %H:%M:%S') - Dependency Error. Package: $SOURCE Distribution: ${DIST} Architecture: ${ARCH} Build time: $time" >> $LOGFILE
ext4=1
else
green "$(date +'%Y.%m.%d %H:%M:%S') - Package: $SOURCE Distribution: ${DIST} Architecture: ${ARCH} Build time: $time W:${WARNING} E:${ERROR}"
echo "$(date +'%Y.%m.%d %H:%M:%S') - Package: $SOURCE Distribution: ${DIST} Architecture: ${ARCH} Build time: $time W:${WARNING} E:${ERROR}" >> $LOGFILE
echo "${SOURCE} ${DIST} ${PARALLEL} ${ARCH} ${time}" >> $LOGFILE1
ext4=0
fi
find . -name "*.deb" -exec rename "s/${ARCH}.deb/${DIST}_${ARCH}.deb/g" \{\} \;
find . -name "*.deb" -exec rename "s/all.deb/${DIST}_all.deb/g" \{\} \;
if [[ $ext4 = "0" ]]
then
mv *.deb $DEBDIR
cp *.orig*.tar.* $SOURCEDIR
mv *.debian.tar.* $SOURCEDIR
mv *.dsc $SOURCEDIR
test -f *.diff.* && mv *.diff.* $SOURCEDIR
fi
if [ ! -f "${ORIGDIR}/${ORIGTAR}" ]
then
mv ${PKG_NAME}_*.orig*.tar.* $ORIGDIR
fi
cp $tmplogfile $LOGDIR
rm -rf *
cd $OLDDIR
}
build(){
if [ -z $PPA_BUILD ]
then
parallelbuild
for d in ${DISTRIBUTIONS}
do
amd64i386 ${d}
done
else
ppa_pkg
fi
}
amd64i386(){
cd ${BUILDDIR}
cd ${PKG_NAME}-${VERSION1}*
if [ $(cat debian/control | grep "^Architecture" | grep "any" | sort -u | wc -l) -eq 1 ]
then
build_it_32 $1 && build_it $1
else
build_it_32 $1
fi
}
parallelbuild(){
if [ -z $PARALLEL2 ]
then
DEBFLAGS="parallel=2"
export DEB_BUILD_OPTIONS="$DEBFLAGS"
else
PARALLEL=${PARALLEL2}
DEBFLAGS="parallel=${PARALLEL}"
export DEB_BUILD_OPTIONS="$DEBFLAGS"
fi
}
dscgit(){
PKG_NAME=tesseract-debian
GITREPS="https://github.com/AlexanderP/tesseract-debian.git"
gitupdate
PKG_NAME=tesseract-lang-debian
GITREPS="https://github.com/AlexanderP/tesseract-lang-debian.git"
gitupdate
}
dist(){
if [ -z "$DISTRIB" ]
then
DISTRIBUTIONS="${DIST_DEB}"
else
DISTRIBUTIONS="$DISTRIB"
fi
if [ ! -z "$UBUNTUBUILD" ]
then
DISTRIBUTIONS="$DISTRIBUTIONS ${DIST_PPA}"
fi
green "$(date +'%Y.%m.%d %H:%M:%S') - Build the package $PKG_NAME for distributions $DISTRIBUTIONS"
}
update_it () {
if [ -z "$DISTRIB" ]
then
DISTRIBUTIONS="${DIST_DEB} ${DIST_PPA}"
else
DISTRIBUTIONS="$DISTRIB"
fi
for i in $DISTRIBUTIONS; do
for j in i386 amd64; do
export DIST=$i
export ARCH=$j
sudo -E pbuilder --update --override-config --configfile ~/.pbuilderrc
echo "$(date +'%Y.%m.%d %H:%M:%S') - Обновление - $i - $j" >> $LOGFILE
done
done
}
create_it () {
if [ -z "$DISTRIB" ]
then
DISTRIBUTIONS="${DIST_DEB} ${DIST_PPA}"
else
DISTRIBUTIONS="$DISTRIB"
fi
for i in $DISTRIBUTIONS; do
for j in i386 amd64; do
export DIST=$i
export ARCH=$j
sudo -E pbuilder create --configfile ~/.pbuilderrc
echo "$(date +'%Y.%m.%d %H:%M:%S') - Создан - $i - $j" >> $LOGFILE
done
done
}
tesseractgit(){
PKG_NAME=tesseract
SVNGITBZR="~git"
GITREPS="git://github.com/tesseract-ocr/tesseract.git"
VERSION='4.00'
DSC_NAME=${PKG_NAME}
VERSION1=${VERSION}
dist
gitupdate
createorighqgit
build
}
tesseractlanggit(){
PKG_NAME=tesseract-lang
SVNGITBZR="~git"
GITREPS="https://github.com/tesseract-ocr/tessdata_fast.git"
VERSION='4.00'
EXCLUDE=/tmp/${PKG_NAME}_exlude.txt
DSC_NAME=${PKG_NAME}
VERSION1=${VERSION}
cat > ${EXCLUDE} << EOF
.git
EOF
dist
gitupdate
createorighqgit
build
unset EXCLUDE
}
dist_ppa(){
if [ -z "$DISTRIB" ]
then
DIST="${DIST_PPA}"
else
DIST="$DISTRIB"
fi
green "Сборка пакета $PKG_NAME под дистрибутивы $DIST"
}
dchppa_pkg(){
for i in ${DIST_PPA}
do
cp -f ${TMPFILE} debian/changelog
dch -b --force-distribution --distribution "$i" -v "${NEW_VER}ppa1~${i}1" \
"Automated backport upload; no source changes."
[ -z $(echo $SOURCEUP | grep YES) ] && debuild --no-lintian -S -d -sa
[ -z $(echo $SOURCEUP | grep YES) ] || debuild --no-lintian -S -d -sd
SOURCEUP=YES
done
}
ppa_pkg(){
TMPFILE=$(mktemp)
NEW_VER=$(dpkg-parsechangelog | awk '/^Version: / {print $2}')
PKG_NAME=$(dpkg-parsechangelog | awk '/^Source: / {print $2}')
cp debian/changelog ${TMPFILE}
if [ -z "${DIST_PPA}" ]
then
dist_ppa
DIST_PPA=${DIST}
fi
dchppa_pkg
unset SOURCEUP
for i in ${DIST_PPA}
do
dput ${PPANAME} ../${PKG_NAME}_*${i}1_source.changes
sleep 3
done
cp -f ${TMPFILE} debian/changelog
unset DIST_PPA
if [ ! -f "${ORIGDIR}/${PKG_NAME}_${VERSION1}${SVNGITBZR}${dat}.orig.tar.xz" ]
then
cp ../${PKG_NAME}_*.orig*.tar.* $ORIGDIR
fi
}
if [ ! -z "$UPDATEI" ]
then
update_it
exit 0
fi
if [ ! -z "$CREATE" ]
then
create_it
exit 0
fi
dscgit
#tesseractgit
#tesseractlanggit
| true
|
6bacb3e321df144d319d41f1c4c865f3ec458f7c
|
Shell
|
an3ol/caravel
|
/.travisCI/lvs/maglef-lvs-check.sh
|
UTF-8
| 1,580
| 3.078125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# SPDX-FileCopyrightText: 2020 Efabless Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# SPDX-License-Identifier: Apache-2.0
export IMAGE_NAME=efabless/openlane:$OPENLANE_TAG
export CARAVEL_PATH=$(pwd)
cd ../
export PDK_ROOT=$(pwd)/pdks
cd $CARAVEL_PATH
export PDKPATH=$PDK_ROOT/sky130A
make uncompress
# MAGLEF LVS
echo "Running Abstract (maglef) LVS:"
docker run -it -v $CARAVEL_PATH:$CARAVEL_PATH -e CARAVEL_PATH=$CARAVEL_PATH -v $PDK_ROOT:$PDK_ROOT -e PDK_ROOT=$PDK_ROOT -u $(id -u $USER):$(id -g $USER) $IMAGE_NAME bash -c "cd $CARAVEL_PATH; make lvs-maglef-caravel"
lvs_report=$CARAVEL_PATH/spi/lvs/tmp/caravel.maglef.lvs.summary.log
if [ -f $lvs_report ]; then
lvs_total_errors=$(grep "Total errors =" $lvs_report -s | tail -1 | sed -r 's/[^0-9]*//g')
if ! [[ $lvs_total_errors ]]; then lvs_total_errors=0; fi
else
echo "lvs check failed due to netgen failure";
exit 2;
fi
echo "Maglef LVS summary:"
cat $lvs_report
echo "Total Count: $lvs_total_errors"
if [[ $lvs_total_errors -eq 0 ]]; then exit 0; fi
exit 2
| true
|
576ffbb6cfdd7e2aa6acb2d593c89d4aaf644c9c
|
Shell
|
digideskio/rapp-platform-scripts
|
/setup/2_ros_setup.sh
|
UTF-8
| 1,944
| 2.96875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -i
##
#Copyright 2015 RAPP
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
# Authors: Manos Tsardoulias
# Contact: etsardou@iti.gr
##
##
# Installation of ros-indigo-desktop.
##
source redirect_output.sh
# Setup sources list
echo -e "\e[1m\e[103m\e[31m [RAPP] ROS - Setup sources list \e[0m"
sudo sh -c 'echo "deb http://packages.ros.org/ros/ubuntu trusty main" > \
/etc/apt/sources.list.d/ros-latest.list'
# Setup keys
echo -e "\e[1m\e[103m\e[31m [RAPP] ROS - Setup keys \e[0m"
wget https://raw.githubusercontent.com/ros/rosdistro/master/ros.key -O - 2>/dev/null | \
sudo apt-key add - &> /dev/null
# Installation
echo -e "\e[1m\e[103m\e[31m [RAPP] ROS - Installing \e[0m"
redirect_all sudo apt-get update
redirect_all sudo apt-get install -y ros-indigo-desktop
# Initialize rosdep
echo -e "\e[1m\e[103m\e[31m [RAPP] ROS - Initializing rosdep \e[0m"
redirect_all sudo rosdep init
redirect_all rosdep update
# Setup environment
echo -e "\e[1m\e[103m\e[31m [RAPP] Setup ROS environment \e[0m"
append="source /opt/ros/indigo/setup.bash --extend"
grep -q "${append}" ~/.bashrc || echo -e \
"\n# Load ROS environment variables\n${append}" \
>> ~/.bashrc
# Install rosbridge_server. This will allow third party clients (web clients)
# to connect to ROS.
redirect_all sudo apt-get install -y ros-indigo-rosbridge-server
redirect_all sudo apt-get install -y ros-indigo-global-planner
redirect_all sudo apt-get install -y ros-indigo-map-server
| true
|
19c8d5e59f58b38908c30f67f8dcee1ad41cd51f
|
Shell
|
Samraksh/Tuscarora
|
/TuscaroraFW/Scripts/Simulations/runSim.sh
|
UTF-8
| 514
| 2.9375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -x
echo "Running $0 $@"
jobsdir=~/Samraksh/TuscaroraPrivate/TuscaroraFW/
jobsfile=jobs
suffix="-$(date +%F_%T | sed s/[-:]//g;)-$(whoami)"
echo "suffix = $suffix"
./iterate.sh echo "$@" | \
parallel -kq echo "cd $jobsdir; ./runOrDebug.sh --suffix $suffix {}" | \
cat - >> $jobsfile
cat $jobsfile
# to run ssh in background, see http://stackoverflow.com/a/2831449/268040
# ssh -n -f c2e@l1 "cd $jobsdir; nohup ./runJobs.sh >> runJobs.out 2>&1 &"
./runJobs.sh >> runJobs.out 2>&1 &
| true
|
4b705cf5b7f2e048bedd8b88af05c791e43564e5
|
Shell
|
cloudnautique/stats-collection
|
/scripts/release
|
UTF-8
| 665
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
set -x
cd $(dirname $0)/..
REL_VER=$1
if [ -z "${REL_VER}" ]; then
echo "Need a RELEASE Version of RLDataCollectors" 1>&2
exit 1
fi
if [ -e build ]; then
rm -rf build
fi
mkdir -p build
cp tools/release/Dockerfile.tmpl ./build/
cp tools/bin/wrap_cron ./build/
cp scripts/bootstrap ./build
cp -r crontabs build
cp -r services build
if [ -e dist ]; then
cp -r dist *egg-info ./build/
sed -e "s/VERSION/${REL_VER}/g" ./build/Dockerfile.tmpl > ./build/Dockerfile
else
echo "Python package rldc-${REL_VER}*.tar.gz not found" 1>&2
exit 1
fi
cd build
docker build --rm -t cloudnautique/rldc:$(git rev-parse --abbrev-ref HEAD) .
| true
|
b82dee5b8925c6bd2c966ddd7663428f03f3c6f8
|
Shell
|
amyq7526110/ebook
|
/chapter12/guidang.sh
|
UTF-8
| 2,868
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
# 归档命令
# tar
# 标准的Uinx 归档工具.起初这只是一个 磁带 归档 程序,
# 而现在这个工具已经被开 发为通用打包程序, 它能够处
# 理所有设备的所有类型的归档文件, 包括磁带设备, 正常文件,
# 甚至是 stdout (参见 Example 3-4). GNU 的 tar 工具现在可
# 以接受不同种类的压缩过 滤器, 比如 tar czvf archive_name.tar.gz *,
# 并且可以递归的处理归档文件, 还可以用gzip 压缩目录下的所有文件,
# 除了当前目录下($PWD)的 点文件
# 一些有用的 tar 命令选项:
# 1. -c 创建 (一个新的归档文件)
# 2. -x 解压文件 (从存在的归档文件中)
# 3. --delete 删除文件 (从存在的归档文件中)
# 注意: 这个选项不能用于磁带类型设备.
# 4. -r 将文件添加到现存的归档文件的尾部
# 5. -A 将 tar 文件添加到现存的归档文件的尾部
# 6. -t 列出现存的归档文件中包含的内容
# 7. -u 更新归档文件
# 8. -d 使用指定的文件系统 比较归档文件
# 9. -z 用 gzip 压缩归档文件
# (压缩还是解压, 依赖于是否组合了 -c 或 -x)选项
# 10. -j 用 bzip2 压缩归档文件
# 注意: 如果想从损坏的用 gzip 压缩过的 tar 文件中取得数据,
# 那将是很困难的. 所有当我们归档重要的文件的时候, 一定要保留多个备份.
# shar
# Shell 归档工具. 存在于 shell 归档文件中的所有文件都是未经压缩的,
# 并且本质上是一 个 shell 脚本,以 #!/bin/sh 开头, 并且包含所有必要
# 的解档命令. Shar 归档文件 至今还在 Internet 新闻组中使用, 否则的话
# shar 早就被 tar/gzip 所取代了. unshar 命令 用来解档 shar 归档文件.
# ar
# 创建和操作归档文件的工具, 主要在对 2 进制目标文件打包成库时才会用到.
# rpm
# Red Hat 包管理器, 或者说 rpm 工具提供了一种对源文件或
# 2 进制文件进行打包的方法. 除此之外, 它还包括安装命令,
# 并且还检查包的完整性. 一个简单的 rpm -i package_name.rpm
# 令还有好多其它的选项
# 注意: rpm -qf 列出一个文件属于那个包.
# 注意: rpm -qa 将会列出给定系统上所有安装了的 rpm 包.
# bash$ rpm -qa docbook-utils
# docbook-utils-0.6.9-2
# bash$ rpm -qa docbook | grep docbook
# cpio
# 这个特殊的归档拷贝命令(拷贝输入和输出)现在已经很少能见到了,
# 因为它已经被 tar/gzip 所替代了.现在这个命令只在一些比较特殊
# 的地方还在使用,比如拷贝一个目录树.
| true
|
b24d0e5510686d8fa12b305589a38fa0a3456eb9
|
Shell
|
zhiqiangxu/avflow
|
/third_party/ffmpeg/build.sh
|
UTF-8
| 990
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
set -x
echo "Doing build..."
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
BUILD_DIR="$DIR/build"
pushd "$DIR/remote"
git reset --hard
git clean -f
cp ../qrpc/qrpc* libavformat/
if [ `uname` = "Darwin" ]; then
sed -i '' '/#include "libavformat\/protocol_list.c"/i \
extern const URLProtocol ff_qrpc_protocol;\
' libavformat/protocols.c
sed -i '' '/+= tcp.o/a \
OBJS += qrpc.o qrpcpkt.o\
' libavformat/Makefile
else
sed -i '/#include "libavformat\/protocol_list.c"/i \
extern const URLProtocol ff_qrpc_protocol;\
' libavformat/protocols.c
sed -i '/+= tcp.o/a \
OBJS += qrpc.o qrpcpkt.o\
' libavformat/Makefile
fi
./configure \
--prefix="$BUILD_DIR" \
--pkg-config-flags="--static" \
--extra-libs="-lpthread -lm" \
--bindir="$BUILD_DIR/bin" \
--enable-debug \
--disable-stripping \
--enable-libx264 \
--enable-gpl \
&& make -j 4 && make install
git reset --hard
git clean -f
popd
| true
|
bb5e45d36a7a2303cfb5766063a55ebeccbb9602
|
Shell
|
claf/blur
|
/plot_vols.sh
|
UTF-8
| 513
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/sh
T="Vols.data"
PDFFILE="plot_vol.pdf"
echo "Generating ${PDFFILE}"
DATAFILE="${T}"
EPSFILE="plot_vol.eps"
echo "Warning : hard coded binaries order!!!"
gnuplot << EOF
set terminal postscript eps color enhanced
set output '${EPSFILE}'
set xlabel 'Number of tasks replied'
set ylabel 'Time'
set title 'Steal'
set grid
plot '${DATAFILE}' title 'Tasks reply time'
EOF
epstopdf --nocompress --outfile="${PDFFILE}" "${EPSFILE}"
rm ${EPSFILE}
echo "Output file: ${PDFFILE}"
| true
|
df4c63812a7fd5cc4250208ca88181292678d971
|
Shell
|
landrytiano/skipsi
|
/job/job.sh
|
UTF-8
| 196
| 2.65625
| 3
|
[] |
no_license
|
#! /bin/bash
RUNAT="08:01"
while [ 1 ]
do
DATE=`/bin/date +%H:%M`
if [ $DATE. = $RUNAT. ]
then
curl localhost/php-fann/examples/logic_gates/simple_train.php
fi
sleep 60
done
| true
|
4aa40b3dd33c87c8665375288fd751dd9325628a
|
Shell
|
mhwombat/bin
|
/pdf2isbn
|
UTF-8
| 1,033
| 3.75
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env sh
#: Extract the ISBN from a PDF.
#:
#: Usage:
#:
#: pdf2doi [filename]
#:
#: If no filename is specified, read from stdin.
#:
#: Requirements: pdfinfo (install poppler_utils), pdftotext (install xpdf or python's pdftotext).
method1() {
pdfinfo $1 | sed -n 's/.*ISBN:* *\([0-9\-]\+\)/\1/Ip'
}
method2() {
pdfinfo -meta $1 | sed -n 's/.*>ISBN:* *\(.*\)<.*/\1/p'
}
method3() {
pdftotext -l 10 $1 - | sed -n 's/.*isbn/ISBN/ip'
}
method4() {
pdftotext -l 10 $1 - | sed -n 's/.*\(978\)/\1/ip'
}
method5() {
pdftotext -l 10 $1 - | sed -n 's/.*0-/0-/ip'
}
ISBN=$(method1 $1)
if [ -z "${ISBN}" ]; then
# echo "Trying method 2 for $1"
ISBN=$(method2 $1)
fi
if [ -z "${ISBN}" ]; then
# echo "Trying method 3 for $1"
ISBN=$(method3 $1)
fi
if [ -z "${ISBN}" ]; then
# echo "Trying method 4 for $1"
ISBN=$(method4 $1)
fi
if [ -z "${ISBN}" ]; then
# echo "Trying method 5 for $1"
ISBN=$(method4 $1)
fi
if [ -z "${ISBN}" ]; then
ISBN=???
fi
echo "ISBN for $1 is ${ISBN}"
| true
|
c5084a7118d44764d9153ee3717e2a794c388eb2
|
Shell
|
initialed85/mac_os_scripts
|
/user_template/create_user_template.sh
|
UTF-8
| 2,154
| 3.796875
| 4
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
echo -ne "info: $0 started\n\n"
TARGET_USER_TEMPLATE=/System/Library/User\ Template/English.lproj
TARGET_USER_TEMPLATE_BACKUP=./English.lproj
if [[ "$USER" != "root" ]] ; then
echo "error: must be run as root/sudo"
exit 1
fi
TARGET_USER=$1
if [[ "$TARGET_USER" == "" ]] ; then
echo "error: must have target user specified as argument"
exit 1
fi
if [[ ! -d "/Users/$TARGET_USER" ]] ; then
echo "error: $TARGET_USER is unknown"
exit 1
fi
TARGET_USER_HOME=/Users/$TARGET_USER
echo "info: backing up current user template"
mv -fv "$TARGET_USER_TEMPLATE" "$TARGET_USER_TEMPLATE_BACKUP"
echo ""
echo "info: dittoing the user $TARGET_USER"
ditto -v "$TARGET_USER_HOME" "$TARGET_USER_TEMPLATE"
echo ""
echo "info: deleting some unrequired files from the new template"
rm -frv "$TARGET_USER_TEMPLATE/Library/Application Support/com.apple.sharedfilelist"
rm -frv "$TARGET_USER_TEMPLATE/Library/Keychains/"*
rm -frv "$TARGET_USER_TEMPLATE/Library/Keychains/".* 2>/dev/null
rm -frv "$TARGET_USER_TEMPLATE/".bash_*
echo ""
echo "info: modifying com.apple.dock.plist as required"
plutil -convert xml1 -o com.apple.dock.plist-before "$TARGET_USER_TEMPLATE/Library/Preferences/com.apple.dock.plist"
cat com.apple.dock.plist-before | python -c "import sys; import re; open('com.apple.dock.plist-after', 'w').write(re.sub(r'\n\s+<key>_CFURLString</key>\n\s+<string>file:///Users/$TARGET_USER/Downloads/</string>\n', '\n', sys.stdin.read()))"
plutil -convert xml1 -o "$TARGET_USER_TEMPLATE/Library/Preferences/com.apple.dock.plist" com.apple.dock.plist-after
defaults read "$TARGET_USER_TEMPLATE/Library/Preferences/com.apple.dock.plist" | grep \"_CFURLString\" -A 1 | grep "/Downloads/"
if [[ $? -ne 0 ]]; then
echo "info: seems to have worked (no mention of Downloads)"
else
echo "warning: failed to modify com.apple.dock.plist- strange behaviour may occur!"
fi
rm -fr com.apple.dock.plist-before
rm -fr com.apple.dock.plist-after
echo ""
echo "info: fixing permissions on the new template"
chown -fR root:wheel "$TARGET_USER_TEMPLATE"
chmod -fR 755 "$TARGET_USER_TEMPLATE"
echo ""
echo -ne "info: $0 finished\n\n"
| true
|
a94e5c60f4c2b5c7be14a98743dabde863840605
|
Shell
|
damphat/centos-apps
|
/upstart/add-node.sh
|
UTF-8
| 870
| 3.421875
| 3
|
[] |
no_license
|
if [ $# != 2 ]; then
echo 'Usage: ./add-node.sh <PROJECT_NAME> <PORT>'
echo
echo 'Example: ./add-node.sh damphat 3000'
exit 1
fi
PROJECT_NAME=${1//\*/_}
PORT=$2
if [ ! -f /myprojects/$PROJECT_NAME/app.js ]; then
echo "/myprojects/$PROJECT_NAME/app.js not exist"
exit 2
fi
$(cd /myprojects/$PROJECT_NAME && npm install)
which supervisor || npm install -g supervisor
#TODO generate upstart script at /etc/init/$PROJECT_NAME
cat > /etc/init/$PROJECT_NAME.conf <<_EOF_
#!upstart
description "expressjs project=$PROJECT_NAME port=$PORT"
author "by centos-apps/upstart/add-node.sh"
start on startup
stop on shutdown
script
cd /myprojects/$PROJECT_NAME
export PORT=$PORT
export NODE_ENV=production
supervisor -w -- app.js # "-w" disable watch because of server high load
end script
_EOF_
stop $PROJECT_NAME
start $PROJECT_NAME
| true
|
8801d5c6bfb16e83229481ff84bcf14296374708
|
Shell
|
aweimeow/dotfiles
|
/zsh/install.sh
|
UTF-8
| 1,155
| 3.59375
| 4
|
[
"MIT"
] |
permissive
|
#! /bin/bash
# Change directory to the directory of this script
cd $(dirname $0)
if [ ! -d "$HOME/.oh-my-zsh" ]; then
sh -c "$(curl -fsSL https://raw.githubusercontent.com/ohmyzsh/ohmyzsh/master/tools/install.sh)"
echo "[zsh] oh-my-zsh installed."
fi
if [ ! -f "$HOME/.oh-my-zsh/themes/aweimeow.zsh-thme" ]; then
cp ./aweimeow.zsh-theme $HOME/.oh-my-zsh/themes/;
echo "[zsh] oh-my-zsh theme installed."
fi
if [ -f "$HOME/.zshrc" ]; then
echo "\033[1;33m[WARN] $HOME/.zshrc exists, overwrite.\033[0m"
fi
# Install zsh-syntax-highlighting and zsh-autosuggestions
if [ ! -d "$HOME/.oh-my-zsh/custom/plugins/zsh-syntax-highlighting" ]; then
git clone --depth 1 https://github.com/zsh-users/zsh-syntax-highlighting/ $HOME/.oh-my-zsh/custom/plugins/zsh-syntax-highlighting
echo "[zsh] zsh-syntax-highlighting installed."
fi
if [ ! -d "$HOME/.oh-my-zsh/custom/plugins/zsh-autosuggestions" ]; then
git clone --depth 1 https://github.com/zsh-users/zsh-autosuggestions $HOME/.oh-my-zsh/custom/plugins/zsh-autosuggestions
echo "[zsh] zsh-autosuggestions installed."
fi
cp .zshrc $HOME/.zshrc
echo "[zsh] .zshrc installed."
| true
|
e2be6fdee598192d850689204430c622fadd3430
|
Shell
|
l1kw1d/hot-fuzz
|
/run_fuzzer_container.sh
|
UTF-8
| 707
| 3.859375
| 4
|
[
"MIT"
] |
permissive
|
#! /bin/bash
cmdname=$(basename $0)
cmddir="$(dirname $0)"
function usage {
cat << EOF
Usage:
Run the fuzzer from a docker container
$cmdname [fuzzer args]
EOF
exit 1
}
if [ $# -eq 0 ]; then
usage
fi
fuzz_lib=$( cd ${LIB_DIR:-"fuzz/"}; pwd)
pushd $cmddir
if [ ! -d results ]; then
mkdir results
fi
identifier=${IMAGE_IDENTIFIER:-`date "+%y-%m-%d.%H%M%S"`}
image=${IMAGE:-"hot-fuzz"}
results="$(pwd)/results/"
fuzzer="$(pwd)/fuzzer.py"
echo "=== Launching fuzzer container"
docker run -e DOCKER=1 -v $results:/hotfuzz/results/ -v $fuzz_lib:/hotfuzz/fuzz/ -v $fuzzer:/hotfuzz/fuzzer.py --rm -t --name=image-${identifier} ${image} python3 fuzzer.py "$@"
success=$?
popd
exit $success
| true
|
df920e55f5d2574a54cdb514de1362251cc5f8dc
|
Shell
|
afortiorama/panda-client
|
/scripts/pcontainer
|
UTF-8
| 508
| 3.265625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
export LD_LIBRARY_PATH_ORIG=${LD_LIBRARY_PATH}
export LD_LIBRARY_PATH=
export PYTHONPATH_ORIG=${PYTHONPATH}
export PYTHONPATH=${PANDA_PYTHONPATH}
export PYTHONHOME_ORIG=${PYTHONHOME}
unset PYTHONHOME
# look for option for python3
for i in "$@"
do
case $i in
-3)
PANDA_PY3=1
;;
*)
;;
esac
done
if [ -z "$PANDA_PY3" ]; then
/usr/bin/python -u -W ignore -c "import pandatools.PcontainerScript" "$@"
else
/usr/bin/python3 -u -W ignore -c "import pandatools.PcontainerScript" "$@"
fi
| true
|
cc9ab0440495f56de811b7128ebdca4b958acfd9
|
Shell
|
JohnCremona/apocrita_scripts
|
/tidyout
|
UTF-8
| 344
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
for f in xrun.out.*; do
g=${f#xrun\.out\.}
#echo $g
J=${g%\.*}
T=${g#*\.}
#qstat | grep mpx017 | awk --assign J=${J} --assign T=${T} '($1==J)&&($10==T)' | wc -l
if [ `qstat | grep mpx017 | awk --assign J=${J} --assign T=${T} '($1==J)&&($10==T)' | wc -l` == 0 ]; then mv ${f} xrun.done/xrun.done.${g}; echo xrun.done.$g; fi
done
| true
|
d96ec85869bde7133efed0f9f13aea7b953db38a
|
Shell
|
f/do-sshuttle
|
/do-sshuttle
|
UTF-8
| 1,249
| 3.609375
| 4
|
[
"WTFPL"
] |
permissive
|
#!/bin/bash
DROPLET_NAME="do-sshuttle-server"
DROPLET_FILE=/tmp/droplet-vpn.json
echo "do-shuttle v0.0.1"
echo "Fatih Kadir Akın <fatihkadirakin@gmail.com>"
echo "Transparent Proxying over DigitalOcean Droplets"
prefix="[ds]"
echo
if [ ! -f $DROPLET_FILE ]; then
echo "$prefix <--- Getting $DROPLET_NAME Droplet information..."
DROPLET_INFO=`doctl compute droplet list $DROPLET_NAME --output json`
echo $DROPLET_INFO > $DROPLET_FILE
fi
DROPLET_IP=`cat $DROPLET_FILE | python -c 'import sys, json; print json.load(sys.stdin)[0]["networks"]["v4"][0]["ip_address"]'`
DROPLET_ID=`cat $DROPLET_FILE | python -c 'import sys, json; print json.load(sys.stdin)[0]["id"]'`
echo "$prefix ---> Powering on $DROPLET_NAME (root@$DROPLET_IP) Droplet..."
doctl compute droplet-action power-on $DROPLET_ID > /dev/null
echo "$prefix ---> Power-on Request sent..."
echo "$prefix ---> Allow server 10 seconds to boot..."
sleep 10
echo "$prefix ---> Proxying network via sshuttle..."
sshuttle -r root@$DROPLET_IP 0.0.0.0/0 > /dev/null
echo "$prefix ---> sshuttle stopped..."
echo "$prefix ---> Powering off $DROPLET_NAME (root@$DROPLET_IP) Droplet..."
doctl compute droplet-action power-off $DROPLET_ID > /dev/null
echo "$prefix ---> Power-off Request sent..."
echo "$prefix ---> Bye."
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.