blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
e1e22ca94f2dd06f8af02b335bad5e076f844d3d
|
Shell
|
clade/RedPitaya
|
/OS/buildroot/overlay/etc/init.d/connman-config
|
UTF-8
| 1,011
| 3.875
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
source /opt/etc/network/config
echo -n "Network: "
# Configure static network configuraton if selected
[ "x$NETWORK" != "xstatic" ] && {
echo "DHCP"
echo "To change your network configuration, edit /opt/etc/network/config."
} || {
echo ""
# Discover ethernet service/adapter name
ETH0=`connmanctl services | grep ethernet | sed -e 's'/.*ethernet_/ethernet_/'`
[ "x$ETH0" == "x" ] && {
echo "Wired ethernet adapter not found."
exit 255;
}
NETMASK_STR=$NETMASK
GATEWAY_STR=$GATEWAY
[ "x$IP" == "x" ] && {
echo "ERROR: Static network selected, but IP not defined."
echo "Please edit /opt/etc/network/config to fix."
exit 255;
}
[ "x$NETMASK" == "x" ] && {
NETMASK_STR=default
}
[ "x$GATEWAY" == "x" ] && {
GATEWAY_STR=none
}
echo " IP: $IP"
echo " NETMASK: $NETMASK_STR"
echo " GATEWAY: $GATEWAY_STR"
connmanctl config $ETH0 --ipv4 manual $IP $NETMASK $GATEWAY
}
| true
|
c41416251669e306fb96f3048cb8f055cf1a63dc
|
Shell
|
hnw/dotfiles
|
/gcp/path.zsh
|
UTF-8
| 314
| 3.0625
| 3
|
[
"MIT"
] |
permissive
|
GO_GAE_SDK_PATH="$HOME/Library/go_appengine"
if [ -d "$GO_GAE_SDK_PATH" ]; then
PATH="$PATH:$GO_GAE_SDK_PATH"
fi
# Updates PATH for the Google Cloud SDK.
GOOGLE_CLOUD_SDK_ZSH_INC="$HOME/Library/google-cloud-sdk/path.zsh.inc"
if [ -f "$GOOGLE_CLOUD_SDK_ZSH_INC" ]; then
source "$GOOGLE_CLOUD_SDK_ZSH_INC"
fi
| true
|
1dc264193182bd6a66730f96743e8e67fb7398b2
|
Shell
|
amerlyq/airy
|
/zsh/zshenv
|
UTF-8
| 1,188
| 2.875
| 3
|
[
"MIT"
] |
permissive
|
# vim: ft=zsh
# ~/.zshenv -- always sourced by zsh (before ~/.zshrc).
# WARNING: Available even in Zsh scripts!
# :> But as I mostly writing portable scripts in bash, it's not a problem.
# WARNING: It should not contain commands that produce output or assume the
# shell is attached to a tty.
# So it can't be used to source aliases to be available in ranger.
# For bash use analogue BASH_ENV=$HOME/.zshenv before launching
# source r.sh-perf-trace
export SHELL=/usr/bin/zsh # NEED: when zsh run from bash
# If you want move it to ZDOTDIR also, then next line must reside in /etc/zshenv
# FIXME: zsh must keep history inside ~/.local/share -- otherwise it won't be saved in btrfs snapshots
[[ ! -d ~/.cache/zsh ]] || ZDOTDIR=~/.cache/zsh
# in $ man zshall --> there is recommended to use '[[ -o rcs ]]' ?
# Ensure that a non-login, non-interactive shell has a defined environment.
# BUT: it will slow down scripts startup
# CHECK:BUG? all manually launched scripts have SHLVL>1
# --> so these lines for systemd-like services without inherited environment?
if [[ -o rcs && ! -o LOGIN && $SHLVL -eq 1 && -s ${ZDOTDIR:-$HOME}/.zprofile ]]
then source "${ZDOTDIR:-$HOME}/.zprofile"; fi
| true
|
51a1d5d6305fd6d629e2a945892d4666a381aef0
|
Shell
|
clinsign/modal_progress_hud
|
/scripts/runTests.sh
|
UTF-8
| 339
| 3.15625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
runTests () {
cd $1;
if [ -f "pubspec.yaml" ]
then
flutter test --coverage
sed "s/^SF:lib/SF:${1:2}\/lib/g" coverage/lcov.info >> $2/lcov.info
fi
}
export -f runTests
# if running locally
if [ -f "lcov.info" ]; then
rm lcov.info
fi
find . -maxdepth 1 -type d -exec bash -c 'runTests "$0" `pwd`' {} \;
| true
|
a1c268672057694360d4132ec87a0098e1420084
|
Shell
|
bopopescu/UnitedSafety
|
/rootfs/rootfs/etc-act/redstone/TRUtest/modbus_message_priority.tst
|
UTF-8
| 2,644
| 3.625
| 4
|
[] |
no_license
|
#!/bin/sh
#---------------------------------------------------------------------------------------
# modbus_message_priority
#
# test that only modbus machine hour can be transmitted over Iridium
#
# Procedure:
# 1) write confirmation that test worked if only modbus machine hours can be repored to CAMS.
#
# Failure modes:
# 2) No any data from this unit reported to CAMS
# 2) Not only machine hours be transmitted to CAMS
#
#---------------------------------------------------------------------------------------
# TTlog will log to the screen and the TTresult.txt file
source TTlog.sh
#TThelper.sh include the get_val, set_val, clear_val functions.
source TThelper.sh
# must define TESTNAME for TTlog.sh to output the script name
#---------------------------------------------------------------------------------------
TESTNAME="MODBUS_MESSAGE_PRIORITY"
stage=`get_val stage`
write_log "Current Stage $stage"
# startup stage
if [ -z "$stage" ]; then
set_val stage 'reboot'
feature unset packetizer-calamps
feature unset packetizer-iridium
db-config set feature packetizer-cams 1
db-config set feature modbus-monitor 1
db-config set feature iridium-monitor 1
db-config set modbus protocol rtu
db-config set modbus baudrate 38400
db-config set modbus periodic_seconds 60
db-config set modbus periodic_overiridium_seconds 300
db-config set modbus q_delay_seconds 60
db-config set modbus parity N
db-config set modbus data_bits 8
db-config set modbus stop_bits 1
db-config set Iridium byteLimit 20480
db-config set Iridium LimitiTimePeriod 86500
db-config set PositionUpdate IridiumReportTime 60
db-config set packetizer-cams ForceIridium On
db-config set packetizer-cams host testcams3.gpscams.com
db-config set packetizer-cams port 51001
db-config set packetizer-cams UseCompression 0
db-config set packetizer-cams IridiumEnable 1
db-config set packetizer-cams IridiumPriorityLevel 9
db-config set packetizer-cams retry_limit 1
db-config set packetizer-cams CellFailModeEnable 1
db-config set packetizer-cams iridium_timeout 60
db-config set packetizer-cams IridiumDataLimitPriority 1
db-config set packetizer-cams m_keepalive 60
db-config set packetizer-cams timeout 60
db-config set modbus-db template_Murphy --file=/etc/redstone/defaultMurphy.xml
db-config set modbus-db slave1 Murphy
db-config set modbus-db slave2 Murphy
write_log "$TESTNAME: going to reboot now."
reboot
sleep 3
exit
elif [ "$stage" == "reboot" ]; then
sleep 120
GetYNResponse "Modbus Message Priority Test" "Do you see the machine hours showing on CAMS?" "Test Failed"
fi
| true
|
26fb8b145890c9904764db9899a408f9010755c5
|
Shell
|
evolbeginner/SEQ2TREE
|
/pep_sep.sh
|
UTF-8
| 141
| 3.015625
| 3
|
[] |
no_license
|
#! /bin/bash
mkdir pep-sep;
cd pep-sep;
for i in ../pep-dupli/*; do
b=`basename $i`
c=${b%.fas}
mkdir -p $c/pep
cp $i $c/pep
done
cd -
| true
|
e362e7d205ed796692ab92f586d97352f750bfb3
|
Shell
|
griff/cfg
|
/lib/dur.sh
|
UTF-8
| 2,854
| 3.65625
| 4
|
[] |
no_license
|
# durdn/cfg related commands {{{
function dur {
case $1 in
list|li)
curl --user $2:$3 https://api.bitbucket.org/1.0/user/repositories 2> /dev/null | grep "name" | sed -e 's/\"//g' | col 2 | sort | uniq | column
;;
clone|cl)
git clone git@bitbucket.org:durdn/$2.git
;;
install|i)
bash $HOME/.cfg/install.sh
;;
reinstall|re)
curl -Ls https://raw.github.com/griff/cfg/master/install.sh | bash
;;
check|chk)
if [ $(whoami) = "root" ];
then
home="/root";
else
home="$HOME";
fi
(
cd $home/.cfg
if git check -q; then
branch_orig_hash="$(git show-ref -s --verify refs/heads/master 2> /dev/null)"
if [ ! -f $home/.cfg-check ]; then
echo ".cfg fetch"
git fetch -q origin master
touch $home/.cfg-check
elif [ -n "$(find $home/.cfg-check -mmin +1440)" ]; then
echo ".cfg fetch"
git fetch -q origin master
touch $home/.cfg-check
fi
ahead=$(git rev-list --right-only --boundary @{u}... | egrep "^-" | wc -l)
behind=$(git rev-list --left-only --boundary @{u}... | egrep "^-" | wc -l)
if [ $ahead -gt 0 -o $behind -gt 0 ]; then
if [ $ahead -gt 0 -a $behind -eq 0 ]; then
echo ".cfg ahead by $ahead. Pushing..."
git push -q origin master
elif [ $ahead -eq 0 -a $behind -gt 0 ]; then
echo ".cfg behind by $behind. Merging..."
if ! git merge --ff-only -q origin/master 2> /dev/null; then
echo ".cfg could not be fast-forwarded"
fi
else
echo ".cfg could not be fast-forwarded"
fi
fi
branch_hash="$(git show-ref -s --verify refs/heads/master 2> /dev/null)"
if [ "$branch_orig_hash" != "$branch_hash" ]; then
echo ".cfg has been updated. Reinstalling..."
bash $home/.cfg/install.sh
fi
else
echo ".cfg has uncommitted changes"
fi
)
;;
move|mv)
git remote add bitbucket git@bitbucket.org:durdn/$(basename $(pwd)).git
git push --all bitbucket
;;
trackall|tr)
#track all remote branches of a project
for remote in $(git branch -r | grep -v master ); do git checkout --track $remote ; done
;;
fun|f)
#list all custom bash functions defined
typeset -F | col 3 | grep -v _ | xargs | fold -sw 60
;;
def|d)
#show definition of function $1
typeset -f $2
;;
help|h|*)
echo "[dur]dn shell automation tools - (c) 2011 Nicola Paolucci nick@durdn.com"
echo "commands available:"
echo " [cr]eate, [li]st, [cl]one"
echo " [i]nstall,[m]o[v]e, [re]install"
echo " [f]fun lists all bash functions defined in .bashrc"
echo " [def] <fun> lists definition of function defined in .bashrc"
echo " [tr]ackall], [h]elp"
;;
esac
}
| true
|
1a0a7c4a27506136f89ffc27b3aab6e6b964257f
|
Shell
|
kathan/dts-manager
|
/phpmd.sh
|
UTF-8
| 265
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/bash
CUR_DIR=${0}
if [ -e "$CUR_DIR/phpmd_results.html" ]
then
rm phpmd_results.html
fi
./vendor/bin/phpmd . html 'unusedcode,design' --exclude log/,phpMyAdmin/,vendor/,templates/,templates_c/,includes/smarty/,includes/smarty-master/ >> phpmd_results.html
| true
|
144a53d4374784bb82c62d5e3f65498ad37bc510
|
Shell
|
chance-nelson/dotfiles
|
/bash/.bashrc
|
UTF-8
| 2,418
| 3.140625
| 3
|
[
"Unlicense"
] |
permissive
|
[[ $- != *i* ]] && return
# Path Inserts
if [ -d "$HOME/.bin" ] ;
then PATH="$HOME/.bin:$PATH"
fi
if [ -d "$HOME/.local/bin" ] ;
then PATH="$HOME/.local/bin:$PATH"
fi
if [ -d "$HOME/go/bin" ] ;
then PATH="$HOME/go/bin:$PATH"
fi
if [ -d "$HOME/scripts" ] ;
then PATH="$HOME/scripts:$PATH"
fi
# PS1
# get current branch in git repo
function parse_git_branch() {
BRANCH=`git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/\1/'`
if [ ! "${BRANCH}" == "" ]
then
STAT=`parse_git_dirty`
echo "[${BRANCH}${STAT}]"
else
echo ""
fi
}
# get current status of git repo
function parse_git_dirty {
status=`git status 2>&1 | tee`
dirty=`echo -n "${status}" 2> /dev/null | grep "modified:" &> /dev/null; echo "$?"`
untracked=`echo -n "${status}" 2> /dev/null | grep "Untracked files" &> /dev/null; echo "$?"`
ahead=`echo -n "${status}" 2> /dev/null | grep "Your branch is ahead of" &> /dev/null; echo "$?"`
newfile=`echo -n "${status}" 2> /dev/null | grep "new file:" &> /dev/null; echo "$?"`
renamed=`echo -n "${status}" 2> /dev/null | grep "renamed:" &> /dev/null; echo "$?"`
deleted=`echo -n "${status}" 2> /dev/null | grep "deleted:" &> /dev/null; echo "$?"`
bits=''
if [ "${renamed}" == "0" ]; then
bits=">${bits}"
fi
if [ "${ahead}" == "0" ]; then
bits="*${bits}"
fi
if [ "${newfile}" == "0" ]; then
bits="+${bits}"
fi
if [ "${untracked}" == "0" ]; then
bits="?${bits}"
fi
if [ "${deleted}" == "0" ]; then
bits="x${bits}"
fi
if [ "${dirty}" == "0" ]; then
bits="!${bits}"
fi
if [ ! "${bits}" == "" ]; then
echo " ${bits}"
else
echo ""
fi
}
# [hostname:directory](git)$
export PS1="[\h:\w]\`parse_git_branch\`\\$ "
# Aliases
alias ls='ls --color=auto'
alias la='ls -a'
alias ll='ls -la'
alias l='ls'
alias l.="ls -A | egrep '^\.'"
alias setconfig="git config user.name 'Chance Nelson'; git config user.email chance-nelson@nau.edu"
alias gi="git init"
alias gs="git status"
alias ga="git add"
alias gc="git commit"
alias gd="git diff"
alias gp="git push"
alias pull="git pull"
alias grm="git rm"
alias e="source env/bin/activate"
alias klogs="kubectl logs --follow"
alias mkube="minikube kubectl --"
# Exports
export EDITOR="vim"
# Completion
if ! shopt -oq posix; then
if [ -f /usr/share/bash-completion/bash_completion ]; then
. /usr/share/bash-completion/bash_completion
elif [ -f /etc/bash_completion ]; then
. /etc/bash_completion
fi
fi
| true
|
1bec07c2eb74d988efced1a091ee00cd827f3b50
|
Shell
|
frankzhuo/kaola
|
/script/hadoop19/bank/test1.sh
|
UTF-8
| 210
| 2.8125
| 3
|
[] |
no_license
|
TODAY_0D_F=$(date +%Y-%m-%d)
time1=$(date +%s -d"${TODAY_0D_F} 05:00:00")
time_exc=$((time1+1*24*60*60))
time_now=$(date +%s)
time_sleep=$((time_exc-time_now))
echo ${time_sleep}s
echo $((time_sleep/3600))
| true
|
e503ca7d8fff6be6008b26f4cc2a11fe6541842d
|
Shell
|
Legun/scripts
|
/repn
|
UTF-8
| 539
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
if [ -z "$1" -o -z "$2" -o -z "$3" ]
then
echo "repn: Format isn't correct!"
echo "repn: Use repn N1 N2 file"
echo "repn: where N1 - the first number in file"
echo "repn: N2 - the final number"
exit
fi
if [ -f $3 ]
then :
else
echo "File $3 doesn't exist"
exit
fi
PAR=$1
MAX=$2
#cp $3 tmpr1.txt
while [ $PAR -le $MAX ]
do
sed s/NUM/_$PAR/g $3 > tmpr1.txt
PAR=`expr $PAR + 1`
cat tmpr1.txt >> tmpr2.txt
done
mv tmpr2.txt test.txt
#sed s/__//g tmpr3.txt > test.txt
rm tmpr1.txt
| true
|
39d5ef01a5812afc592c29b17089f1d512e49c59
|
Shell
|
diessica/eos-setup
|
/dot/gem/pack.sh
|
UTF-8
| 542
| 3.765625
| 4
|
[
"CC0-1.0"
] |
permissive
|
#!/bin/bash
run_gem() {
if type_exists "gem"; then
e_header "Installing gems..."
# list of gems
local packages="sass compass haml"
# if doesn't exist yet, then install
if which ${packages[@]} &> /dev/null; then
msg_ok "${packages[@]}"
else
msg_run "${packages[@]}" "sudo gem install ${packages[@]}"
sudo gem install -g ${packages[@]}
fi
[[ $? ]] && e_success "Done"
else
printf "\n"
e_error "Error: gem not found."
printf "Aborting...\n"
exit
fi
}
| true
|
dfd8d1266d14b2a988f5c8d11249be179bd4c7da
|
Shell
|
cleanwater-super/wptools4xserver
|
/replaceWpConfigs.sh
|
UTF-8
| 360
| 3.15625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
if [ $# -ne 4 ]; then
echo "argument count is $#" 1>&2
echo "type wordpress root oldpath newpath olddb newdb"
exit 1
fi
OLDPATH=$1
NEWPATH=$2
OLDDB=$3
NEWDB=$4
sed -i -e "s:/$OLDPATH/:/$NEWPATH/:g" .htaccess
rm .htaccess-e
sed -i -e "s|define('DB_NAME', '$OLDDB');|define('DB_NAME', '$NEWDB');|" wp-config.php
rm wp-config.php-e
| true
|
b2ae721b9298d75d88ca899448b6fc283a5f70e6
|
Shell
|
jooleecro/bnk-kube-monit
|
/kube/metric/create_monit_metric.sh
|
UTF-8
| 551
| 2.71875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
set -o pipefail
SCRIPT_DIR=$(cd "$(dirname "$0")" && pwd)
cd ${SCRIPT_DIR}
source ./config.env
source ../common/function.env
source ../common/logger.env
info "create monit log"
exe kubectl create -f - <<< "${YML_CLUSTER_ROLE}"
exe kubectl create -f - <<< "${YML_CONFIG_MAP}"
exe kubectl create -f - <<< "${YML_DEPLOYMENT_PROMETHEUS}"
exe kubectl create -f - <<< "${YML_SERVICE_PROMETHEUS}"
#exe kubectl create -f - <<< "${YML_DEPLOYMENT_GRAFANA}"
exe kubectl get all -n ${NAME_SPACE}
exe kubectl get all -n kube-system
| true
|
b3842a94e9c15df17017885f007d2c86ae4bbdbc
|
Shell
|
kawsark/tradebot-ci
|
/ansible/deploy_tradebotwebui.sh
|
UTF-8
| 778
| 3.1875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#Use this script to trigger ansible deployment of tradebot webui application
#Assumes azure cli is installed and authenticated
export ansible_hosts_file=azure-ansible-hosts
export tradebot_resource_group=tradebotRG-production
export tradebot_vmss=tradebotwebuivmss-production
az vmss list-instance-public-ips -g $tradebot_resource_group -n $tradebot_vmss | grep \"ipAddress\"\: > azure_ip_addr.txt
#az vm list-ip-addresses -g tradebotresourcegroup | grep \"ipAddress\"\: > azure_ip_addr.txt
echo Deploying application to servers: $(cat azure_ip_addr.txt)
cat > $ansible_hosts_file <<EOL
[local]
localhost
[azure]
EOL
awk '{print $2}' azure_ip_addr.txt | awk -F\" '{print $2}' >> $ansible_hosts_file
ansible-playbook -i $ansible_hosts_file deploy_tradebotwebui.yml
| true
|
5e519999d7235154e5b9569b2cde6e4f8833e617
|
Shell
|
dschapman/.dotfiles
|
/.zsh_personal
|
UTF-8
| 619
| 2.625
| 3
|
[] |
no_license
|
#!/bin/zsh
# This file is for relatively portable configurations on top of what I do in my .zshrc
# Enable 24-bit color (see https://stackoverflow.com/questions/14672875/true-color-24-bit-in-terminal-emacs)
# TERM=xterm-24bit
EMACS="*term*"; export EMACS
EDITOR=emacs; export EDITOR
PAGER=less; export PAGER
LESS=-RFiX; export LESS
##############################
# Aliases
##############################
alias ll='ls -AlFG'
alias l='ls -lahF'
alias 'e'='emacs'
alias 'ce'='carton exec'
alias 'j'='jobs -l'
alias 'whoamip'='ipconfig getifaddr en0'
alias 'r~'='rm -f *~ .*~'
. /usr/local/opt/asdf/asdf.sh
| true
|
7b6f1f03b2ba109c41a9a9658a5b7ac76fe235e5
|
Shell
|
rangandutta/tempawareOS
|
/TemperatureMonitor/FreqLevels.sh~
|
UTF-8
| 305
| 3.109375
| 3
|
[] |
no_license
|
#!/usr/local/bin/bash
i=2
freq=0
rm ./freq_levels
while [ ! -z $freq ]
do
echo $freq >> freq_levels
freq=`sysctl -a | grep levels | cut -d ' ' -f $i | cut -d '/' -f 1`
i=`expr $i + 1`
done
freq=`cat ./freq_levels`
ps -auxf | awk '{print $2;}'> prev_list
ps -auxf | awk '{print $2;}'> present_list
| true
|
cc5c0192340f4c561626461f267500be612768a5
|
Shell
|
koraynilay/linux-custom-scripts
|
/archlinux_updates/check_upds_cycle.sh
|
UTF-8
| 579
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/sh
while true;do
checkupdates
exit_code=$?
if [ $exit_code -ne 2 ];then
act=$(dunstify -a Updates -A "update,up" "Updates are available" | bc)
if [ $act -eq 0 ];then
mapunmap_updates.sh
fi
else
echo no_updates
fi
sleep 3600
done
# ! [[ -z $act ]] && termite --hold --title="archlinux_updates_script" -e "zsh -c 'printf \"Updates available:\n\n$ups\n\n\";~/linux-custom-scripts/ans_updates.sh'"
# read ans; echo $ans;if [ \"$ans\" = \"y\" ];then pacman -Syu;fi;sleep 10'"
# termite --hold -e 'sh -c "printf \"Updates available:\n\n\"; checkupdates"'
| true
|
96fde8854112a256c6ea96cd8eec8497f0d55ef6
|
Shell
|
mandrews/denv
|
/bin/docker_push.sh
|
UTF-8
| 730
| 3.25
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -o errexit
pushd "$(cd "$(dirname "$0")" ; pwd -P )/.." > /dev/null
if [[ -f .env ]]; then
# shellcheck disable=SC1091
source .env
fi
for DIR in $DENV_LATEST_IMAGES; do
IFS='/' read -ra ARR <<< "$DIR"
IMAGE=${ARR[0]:-base}
TAG=${ARR[1]:-latest}
docker tag "denv/$IMAGE:$TAG" "docker.pkg.github.com/$GITHUB_USERNAME/denv/$IMAGE:latest"
docker push "docker.pkg.github.com/$GITHUB_USERNAME/denv/$IMAGE:latest"
done
for DIR in $DENV_TAGGED_IMAGES; do
IFS='/' read -ra ARR <<< "$DIR"
IMAGE=${ARR[0]:-base}
TAG=${ARR[1]:-latest}
docker tag "denv/$IMAGE:$TAG" "docker.pkg.github.com/$GITHUB_USERNAME/denv/$IMAGE:$TAG"
docker push "docker.pkg.github.com/$GITHUB_USERNAME/denv/$IMAGE:$TAG"
done
| true
|
229d2e65b019e4952622927bf45e81fd270f156f
|
Shell
|
eldin77/midonet
|
/tests/mmm/scripts/perf/init
|
UTF-8
| 1,192
| 2.640625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#! /bin/sh
# Copyright 2014 Midokura SARL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
for n in 1 2 3; do
if test ! -d /var/log/jmxtrans.$n; then
mkdir -p /var/log/jmxtrans.$n
fi
chown -R jmxtrans.jmxtrans /var/log/jmxtrans.$n
done
perfdir="$(readlink -f $(pwd)/../../../perf)"
mkdir -p jmxtrans/json
sed -e "s%/home/midokura/code/qa/perf%$perfdir%;
s%/tmp/midonet-perftests%/var/log/jmxtrans/midonet-perftests%" \
"$perfdir/jmxtrans/json/midolman.json" >jmxtrans/json/midolman.json
sed -e "s%/home/midokura/code/qa/perf%$(pwd)%" \
"$perfdir/profiles.d/default/jmxtrans/default" >jmxtrans.conf
echo export PIDFILE=/var/run/jmxtrans.pid >>jmxtrans.conf
| true
|
8f96e4ad04d57f22b668c398c93e1d8656129e48
|
Shell
|
andrewmeyer/usefulscripts
|
/pcheck_updates
|
UTF-8
| 1,702
| 3.9375
| 4
|
[] |
no_license
|
#! /bin/bash
#this program checks for updates and then notifies nagios
HOST=192.168.1.121
PORT=5667
DELIM=^
CONFIG=/etc/send_nsca.cfg
NSCA=/usr/sbin/send_nsca
SV_DESC="Update Watchdog"
IP=`/bin/SNOP.sh eth0`
#Usage: send_nsca -H <host_address> [-p port] [-to to_sec] [-d delim] [-c config_file]
REPORT="$NSCA -H $HOST -p $PORT -d $DELIM -c $CONFIG"
CHECK=/usr/lib/nagios/plugins/check_debian_packages
#Options:
# <host_address> = The IP address of the host running the NSCA daemon
# [port] = The port on which the daemon is running - default is 5667
# [to_sec] = Number of seconds before connection attempt times out.
# (default timeout is 10 seconds)
# [delim] = Delimiter to use when parsing input (defaults to a tab)
# [config_file] = Name of config file to use
APT=/usr/bin/aptitude
if [ ! -x "$CHECK" ]; then
echo "FATAL: $CHECK cannot be found or is not executable!"
exit 5
fi
if [ ! -x /bin/SNOP.sh ];then
echo "FATAL: WHERE ARE MY HANDS?"
exit 5
fi
if [ ! -x "$NSCA" ]; then
echo "FATAL: $NSCA cannot be found or is not executable!"
exit 5
fi
if [ ! -x "$APT" ]; then
echo "FATAL: $APT cannot be found or is not executable!"
exit 5
fi
##see if we are running as root
if [ `whoami` != "root" ] ;then
echo "SCRIPT MUST BE RUN AS ROOT"
exit 5
fi
$APT update> /dev/null 2>&1
###running the check and grabing the result and exit code
OUTPUT=`$CHECK`
RETC=$?
###ACUTALL UPLOAD
RESULT=$IP$DELIM$SV_DESC$DELIM$RETC$DELIM$OUTPUT
#<host_name>[tab]<svc_description>[tab]<return_code>[tab]<plugin_output>[newline]
echo ""
echo "`date`"
echo "$RESULT"
echo "$RESULT" | $REPORT
| true
|
f74cb0ee881c70e0b19f0c1293dc784d5355cb01
|
Shell
|
spurdow/mapbox-gl-native
|
/scripts/check-cxx11abi.sh
|
UTF-8
| 430
| 3.65625
| 4
|
[
"BSD-3-Clause",
"IJG",
"LicenseRef-scancode-warranty-disclaimer",
"Zlib",
"curl",
"NCSA",
"LicenseRef-scancode-openssl",
"OpenSSL",
"LicenseRef-scancode-ssleay-windows",
"JSON",
"ISC",
"MIT",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"blessing",
"Libpng"
] |
permissive
|
#!/usr/bin/env bash
set -e
set -o pipefail
if [ ! `uname -s` = 'Linux' ]; then
echo ""
exit 0
fi
# check-cxx11abi.dat is a binary just so we can use the loader
# to take care of finding the libstdc++ which can be tricky.
LIBSTDCPP=$(ldd $(dirname $0)/check-cxx11abi.dat |grep libstdc++ |cut -d' ' -f3)
if [ $(readelf -Ws $LIBSTDCPP |c++filt |grep -c ::__cxx11::) -gt 0 ]; then
echo "-cxx11abi"
else
echo ""
fi
| true
|
00297cce4dc34f0fd03ccc45977e66bfeaa481a7
|
Shell
|
bkees/dotfiles-1
|
/home/.zshrc
|
UTF-8
| 3,277
| 2.765625
| 3
|
[] |
no_license
|
if [[ -s "${ZDOTDIR:-$HOME}/.zprezto/init.zsh" ]]; then
source "${ZDOTDIR:-$HOME}/.zprezto/init.zsh"
fi
# Aliases
alias l="ls -F"
alias ll="ls -AGlFth"
alias grep='grep --color=auto'
alias df='df -H'
alias fig='find . | grep'
alias reload="source ~/.zshrc"
alias netest="ping 8.8.8.8"
alias simple="python -m SimpleHTTPServer"
alias slytherin='mosh slytherin -- tmux attach -d || tmux new'
beammeupscotty() {
/Users/alex/Projects/sshuttle/src/sshuttle -r $1 0.0.0.0/0 -vv
}
get_youtube_mp3() {
youtube-dl --extract-audio --audio-format mp3 $1
}
alias -g lastm='*(om[1])'
# Python
if command -v ipython > /dev/null; then
alias python="ipython"
fi
if command -v ipython3 > /dev/null; then
alias python3="ipython3"
fi
export PYTHONSTARTUP=$HOME/.pythonrc
# Suffix aliases
alias -s log=less
alias -s html=open
# Important files
alias zshrc="vim ~/.zshrc"
alias vimrc="vim ~/.vimrc"
alias vimlast="vim -c \"normal '0\""
alias syslog="vim /var/log/syslog"
alias bashar="open ~/Dropbox/bashar.pdf"
alias devdocs="open http://devdocs.io"
# Shorthands
alias e="exit"
alias h='history -fd -100'
alias hgrep='history -fd 0 | grep'
alias sr='ssh -l root'
# cd & ls
alias lc="cl"
cl() {
if [ -d "$1" ]; then
cd "$1"
l
fi
}
# mkdir & ls
alias cm="mc"
mc() {
mkdir -p "$*" && cd "$*" && pwd
}
# Analyze history data
analyze_history(){
cut -f2 -d";" ~/.zsh_history | sort | uniq -c | sort -nr | head -n 30
}
analyze_commands(){
cut -f2 -d";" ~/.zsh_history | cut -d' ' -f1 | sort | uniq -c | sort -nr | head -n 30
}
# Exports
export EDITOR="vim"
export LC_ALL="en_US.UTF-8"
export LANG="en_US"
export PATH=/usr/local/bin:$PATH # Brew path
export PATH=/usr/local/sbin:$PATH # Brew second path
export PATH=$PATH:$HOME/dotfiles/scripts
export PATH=$PATH:/Applications/Postgres.app/Contents/Versions/9.4/bin
export TERM='xterm-256color'
# Remove annoying messages
unsetopt correctall
# FASD for faster switching between directories
eval "$(fasd --init auto)"
alias v='f -e vim'
# alt-left and alt-right for switching words in terminal
# taken from https://github.com/solnic/dotfiles/blob/master/home/zsh/key-bindings.zsh
bindkey -e
bindkey '^H' delete-word # iterm
bindkey '^[[3~' delete-char # tmux
bindkey '^[[1;9D' backward-word # iterm
bindkey '^[^[[D' backward-word # tmux os x
bindkey '^[[1;3D' backward-word # tmux ubuntu
bindkey '^[[1;9C' forward-word # iterm
bindkey '^[^[[C' forward-word # tmux os x
bindkey '^[[1;3C' forward-word # tmux ubuntu
# History configurations
HISTSIZE=10000
SAVEHIST=10000
setopt HIST_VERIFY
setopt SHARE_HISTORY # share history between sessions
setopt EXTENDED_HISTORY # add timestamps to history
setopt APPEND_HISTORY # adds history
setopt INC_APPEND_HISTORY SHARE_HISTORY # adds history incrementally and share it across sessions
setopt HIST_IGNORE_ALL_DUPS # don't record dupes in history
setopt HIST_REDUCE_BLANKS
setopt interactivecomments # allow # in a comment
# Source awscli completion
[ -f /usr/local/share/zsh/site-functions/_aws ] && source /usr/local/share/zsh/site-functions/_aws
# Source configuration for local machine if it exists
[ -f ~/.zshrclocal ] && source ~/.zshrclocal
# test -e ${HOME}/.iterm2_shell_integration.zsh && source ${HOME}/.iterm2_shell_integration.zsh
| true
|
1387d9f7a90445a097329458f6f028b913bc88ce
|
Shell
|
rumblefishdev/keep-ecdsa
|
/solidity/scripts/lcl-provision-tbtc.sh
|
UTF-8
| 1,612
| 4.0625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
# Fetch the address of tbtc contract migrated from keep-network/tbtc project.
# The `tbtc` contracts have to be migrated before running this script.
# It requires `TBTC_SOL_ARTIFACTS_PATH` variable to pointing to a directory where
# contracts artifacts after migrations are located. It also expects NETWORK_ID
# variable to be set to the ID of the network where contract were deployed.
#
# Sample command:
# TBTC_SOL_ARTIFACTS_PATH=~/go/src/github.com/keep-network/tbtc/solidity/build/contracts \
# NETWORK_ID=1801 \
# ./lcl-provision-tbtc.sh
TBTC_CONTRACT_DATA="TBTCSystem.json"
TBTC_PROPERTY="TBTCSystemAddress"
DESTINATION_FILE=$(realpath $(dirname $0)/../migrations/external-contracts.js)
ADDRESS_REGEXP=^0[xX][0-9a-fA-F]{40}$
# Query to get address of the deployed contract for the first network on the list.
JSON_QUERY=".networks.\"${NETWORKID}\".address"
SED_SUBSTITUTION_REGEXP="['\"][a-zA-Z0-9]*['\"]"
FAILED=false
function fetch_tbtc_contract_address() {
echo "Fetching value for ${TBTC_PROPERTY}..."
local contractDataPath=$(realpath $TBTC_SOL_ARTIFACTS_PATH/$TBTC_CONTRACT_DATA)
echo $contractDataPath
local ADDRESS=$(cat ${contractDataPath} | jq "${JSON_QUERY}" | tr -d '"')
if [[ !($ADDRESS =~ $ADDRESS_REGEXP) ]]; then
echo "Invalid address: ${ADDRESS}"
FAILED=true
else
echo "Found value for ${TBTC_PROPERTY} = ${ADDRESS}"
sed -i -e "/${TBTC_PROPERTY}/s/${SED_SUBSTITUTION_REGEXP}/\"${ADDRESS}\"/" $DESTINATION_FILE
fi
}
fetch_tbtc_contract_address
if $FAILED; then
echo "Failed to fetch tbtc external contract address!"
exit 1
fi
| true
|
7938a658e2421650e9e6879f2245990e7e41887b
|
Shell
|
yoonseongduk/script
|
/initial/000_scp_to_target.sh
|
UTF-8
| 4,851
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
#################################################################################
# #
# Script Name: 000_scp_to_target.sh #
# #
# Description: xxxxxxxxxxxxxxxxxxxx script #
# #
# Modified: #
# #
# 2015.11.10 SAL_SUM:55938 #
# add get_file function #
# 2015.10.15 SAL_SUM:55303 #
# modify by root #
# 2013.08.23 SAL_SUM:57236 #
# created by root #
# #
# Licensed Materials - Property of LG CNS #
# #
# (C) COPYRIGHT LG CNS Co., Ltd. 2009 #
# All Rights Reserved #
# #
#################################################################################
typeset P1="${1}"
typeset P2="${2}"
typeset indd='/isc/sorc001/root/shell/initial'
typeset hostname_v1="testwebwas01"
typeset hostname_v0="GET_HOSTNAME"
#
typeset source_ip="NULL"
typeset source_hostname="NULL"
typeset source_vv="NULL"
function set_v1 {
source_ip="1.255.151.20"
source_hostname="${hostname_v1}"
source_vv="V1"
}
function set_v0 {
source_ip="${P1}"
source_hostname="${hostname_v0}"
source_vv="V0"
}
function _help {
cat <<- EOF
./000_scp_to_target.sh # original use (v1)
./000_scp_to_target.sh 1.255.151.20 # v0
./000_scp_to_target.sh 1.255.151.20 testwebwas01 # v0,hostname
EOF
return
}
## set initial value ##
if [[ "${P1}" = "" ]] ; then
set_v1
elif [[ "${P1}" = "1.255.151.20" ]] ; then
set_v1
elif [[ "${P1}" = "help" ]] ; then
_help
exit 0
elif [[ "${P1}" = "-h" ]] ; then
_help
exit 0
elif [[ "${P1}" != [0-9]*.[0-9]*.[0-9]*.[0-9]* ]] ; then
_help
exit 1
else
set_v0
fi
export source_ip
export source_hostname
export source_vv
function 000_root_idrsa {
cat <<- EOF
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEAuXVvaN6p3qbyYYePOeuIqow5dOJF7geZFJeVZfNMTVUwi50J
sjO+x/fVjANLGZ2fH85xsWzzB/vap4ToDZCF2rY1EA2gKt+Wders0mXzaS5Ijf/F
46Qa2XksLMaFni2BzD4SsIaxfIn62giEV0UEWFP2HF9rmR9LtLKDwHUnAS8WMRba
fg2ZiYwvW3Smf9KZiDVxf8t7TPIKRsEjCJQxGNt4y8RCy4G5h+9wKjh0ai8NMj+P
7d9NB6TwvgdKZog8uAnOqRABTF2kp9/ikRZpK7euJ/7mPbU1Q67v+mnBnnCBkCy0
E/mMZwYwPpWh3g2Gwq0MsoY+4H3ItUkOkMNPRwIDAQABAoIBAGyiapk+XLjMiAwz
MOWXn11veDSMWrQchUH3rQ6kHpzp+t69JTHad7WA6fjy8OnXV590+UoZ7J5Pm/wm
sRtI/e9obdqycJDMmcEG1KRGDfgdoWh0W4GF3ihnf0XXH+vQ8kgmUCJRM+Qkmule
tc684devjul9x3RRTbJSIzT4KtnYsLM/Ps+SYhgn8aSE8yGkq6eQCZH1+i9sbcGU
cnCefhxJ6jYV3jFR6ae57lxIGvs01czf4yEfUujI5xOLlWLt2dOaJ0GFHE7V8usH
uZdsJpUkNgD0aCwqYBy/udiv0+tGXKz1HB8DuhnRw2GzaR+RfrGPZzRC/bpexjId
fCYBCVkCgYEA4c/vKRSToJ3iT+hM+YGGvhLt0MqRcQWbAQKxXF8ebD2YMOzbu3yV
VRv0CK4H6kZoL66s0gsDNWzmMbOs4RMoCUCUPUpippACMnGPwVCZNeZP6O33RK/v
gdU2B8sOI9vaxAT2nd08j66Xkx4DDie2DPH1DWCaVP9i1tbNSKTO5vMCgYEA0kB3
TwuI8FkMSEpEF8es4hCsS7Qdt70wjPaqLhx0E7NWfwo09IuFIt2M+Nay9/L6Npn2
MmZEh1DU9BsiMh6qb/DhfjhIoiZhE3jJqs9x7+BkIN8wDh1KK5rpqyvivodfbNhf
85pjRihnsnatudfk0/KKhqCuy9ChabKNk5PNM10CgYAHbbvEcjTZF9iWIGJH4wb3
wc+pCsD2IuUSh3AmRcrObMhQ87nW1SZkgmLo1jDUeDR9PRXaYxpb65U3FV4emW67
vzAhRA0yxZIM7sd36Jrhtw3x78IutEsAzm7Ums2ASH9N64vhbaHMaEX8RQR8trm6
e0tAgWkSWsR5pN9kAGf+GQKBgCaod8du5YTbuqhoD1EYA6+lRSi+O2CXRqAEkpHb
5XPh25uMMuRr6tTYS51NTKbOZDc548yshRkkQaOIgacZgFkIv01H6AL4b4z2/o2L
ivWCGqb4ootV01dlKmWwsgY6Oc93kVznHC3ALCDiNVsgWRCnXdUXIkyTVbprfCCQ
YoB5AoGBAIvUpNxrr+HRFvrQSJW9xoMGcUkJeCD2I6PYdcVYSqHztT57jfUZYWyH
NfwrRCKhzbJE+BEOPYULE+rQkxo0CIYZ32YZP+2pJiJVzHI192KcFxNAiaKdzrq3
jILhwDcwGVjMALrsrQleO1l4X83Q/qNMpMSQyh4pPaP3iqdRjn0h
-----END RSA PRIVATE KEY-----
EOF
return
}
function 000_root_authkey {
cat <<- EOF
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC5dW9o3qnepvJhh48564iqjDl04kXuB5kUl5Vl80xNVTCLnQmyM77H99WMA0sZnZ8fznGxbPMH+9qnhOgNkIXatjUQDaAq35Z16uzSZfNpLkiN/8XjpBrZeSwsxoWeLYHMPhKwhrF8ifraCIRXRQRYU/YcX2uZH0u0soPAdScBLxYxFtp+DZmJjC9bdKZ/0pmINXF/y3tM8gpGwSMIlDEY23jLxELLgbmH73AqOHRqLw0yP4/t300HpPC+B0pmiDy4Cc6pEAFMXaSn3+KRFmkrt64n/uY9tTVDru/6acGecIGQLLQT+YxnBjA+laHeDYbCrQyyhj7gfci1SQ6Qw09H root@LCNBSPTPRX01H
EOF
return
}
function get_file_and_run {
scp -i /root/.ssh/id_rsa.v1 ${source_ip}:${indd}/${1} ${indd}
cd ${indd}
./${1}
cd - > /dev/null
}
function get_file {
scp -i /root/.ssh/id_rsa.v1 ${source_ip}:${indd}/${1} ${indd}
}
function get_hostname {
ssh -i /root/.ssh/id_rsa.v1 ${source_ip} "uname -a" | awk '{print $2}'
}
## main ##
mkdir -p /root/.ssh/; cd /root/.ssh/
y=$( cat authorized_keys | grep 'root@LCNBSPTPRX01H' | wc -l )
if [[ $y -eq 0 ]] ; then
000_root_authkey >> authorized_keys
fi
000_root_idrsa > id_rsa.v1
chown root:root id_rsa.v1
chmod 0600 authorized_keys
chmod 0600 id_rsa.v1
source_hostname=$( get_hostname )
export source_hostname
y=$( grep -w "^${source_ip}" /etc/hosts | wc -l )
if [[ $y -eq 0 ]] ; then
echo "# Pasta management server ${source_vv}" >> /etc/hosts
echo "${source_ip} ${source_hostname} ${source_vv}" >> /etc/hosts
fi
cd - > /dev/null
mkdir -p $indd
if [[ ! -f ${indd}/000_scp_to_target.sh ]] ; then
cp ./000_scp_to_target.sh ${indd}/
fi
get_file rexx_up.sh
get_file sal_package.tar
get_file_and_run 001_adduser_pasta.sh
get_file_and_run 002_etc_hosts.sh
get_file_and_run 003_ssh_setup_xadmop01.sh
get_file_and_run 004_ssh_setup_root.sh
get_file_and_run 005_nmon_sh_copy.sh
get_file_and_run 006_sal_install.sh
get_file_and_run 007_sysstat_install.sh
get_file_and_run 008_rba_workload_install.sh
get_file_and_run 009_crontab_edit_setup.sh
get_file_and_run 010_crontab_log_enable.sh
get_file_and_run 011_change_expire_date_xadmop01.sh
get_file_and_run 012_iplinfo.sh
get_file 013_audit_script.sh
get_file 016_dumpxml.sh
get_file_and_run 020_centos_install_util.sh
get_file_and_run 014_nmon_logput_set.sh
get_file_and_run 015_bmon_report_set.sh
# SAL_SUM:42696:2017.09.01 Do not delete this line
| true
|
889bc1e0daf30adcc7301afd0bd7ce9cd04786c2
|
Shell
|
mrkmg/git-stream
|
/support/hooks/post-stream-feature-finish
|
UTF-8
| 192
| 2.53125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#
# Run after a feature is finished
#
# Arguments:
# $1 feature-name
# $2 feature-branch
# $3 working-branch
NAME=$1
BRANCH=$2
WORKING=$3
# Implement Script Here
exit 0
| true
|
6d326ca75e68af5b8ec81b4d1a8791c481f35419
|
Shell
|
marionnyaboke/tbrucei_gcn
|
/scripts/analysis/download-genome-and-annotation-files.sh
|
UTF-8
| 1,896
| 2.5625
| 3
|
[] |
no_license
|
#Downloading T. brucei genome
wget https://tritrypdb.org/common/downloads/release-43/TbruceiTREU927/fasta/data/TriTrypDB-43_TbruceiTREU927_Genome.fasta \
-P ../../data/scratch/tbrucei/
#Downloading the GFF file
wget https://tritrypdb.org/common/downloads/release-43/TbruceiTREU927/gff/data/TriTrypDB-43_TbruceiTREU927.gff \
-P ../../data/scratch/tbrucei/
# convert the tbrucei gene annotation from GFF format to GTF (required by some downstream tools)
# uses gffread from cufflinks
gffread ../../data/scratch/tbrucei/TriTrypDB-43_TbruceiTREU927.gff \
-T -o ../../data/scratch/tbrucei/TriTrypDB-43_TbruceiTREU927.gtf
# Download T. brucei annotated transcripts (for use in UTR motif discovery)
wget https://tritrypdb.org/common/downloads/release-43/TbruceiTREU927/fasta/data/TriTrypDB-43_TbruceiTREU927_AnnotatedTranscripts.fasta \
-P ../../data/scratch/tbrucei/
# Downloading Glossina genome --Moved to new loaction after VEuPathDB creation
# wget https://www.vectorbase.org/download/glossina-morsitans-yalescaffoldsgmory1fagz \
# -P ../../data/scratch/glossina/
# Downloading GTF file --Moved to new loaction after VEuPathDB creation
# wget https://www.vectorbase.org/download/glossina-morsitans-yalebasefeaturesgmory19gtfgz \
# -P ../../data/scratch/glossina/
# Downloading Glossina genome
wget https://vectorbase.org/common/downloads/Pre-VEuPathDB%20VectorBase%20files/Glossina-morsitans-Yale_SCAFFOLDS_GmorY1.fa.gz \
-P ../../data/scratch/glossina/
# Downloading GTF file
wget https://vectorbase.org/common/downloads/Pre-VEuPathDB%20VectorBase%20files/Glossina-morsitans-Yale_BASEFEATURES_GmorY1.9.gtf.gz \
-P ../../data/scratch/glossina/
# unzip Glossina genome file
gunzip ../../data/scratch/glossina/Glossina-morsitans-Yale_SCAFFOLDS_GmorY1.fa.gz
# unzip Glossina annotation file file
gunzip ../../data/scratch/glossina/Glossina-morsitans-Yale_BASEFEATURES_GmorY1.9.gtf.gz
| true
|
92eaa584fae8bccb8edb4e76984e3d225ca567da
|
Shell
|
adnam/sweet-home
|
/home/bin/fixme
|
UTF-8
| 338
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
for SCANS in FIXME XXX TODO;
do
find . -name '*.py' \
-exec grep -i --files-with-matches $SCANS {} \; \
-exec grep -i --line-number --color=always -C 5 $SCANS {} \;
find . -name '*.php' \
-exec grep -i --files-with-matches $SCANS {} \; \
-exec grep -i --line-number --color=always -C 5 $SCANS {} \;
done
| true
|
c839ae78bcb6e8f440ea1afd3959defba943a06b
|
Shell
|
knowbodynos/stringmods
|
/install/install_cohomcalg
|
UTF-8
| 667
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
# Install cohomCalg
echo "Installing cohomCalg package to \${USER_LOCAL}/bin..."
mathpath=$(which math | rev | cut -d'/' -f1,2 --complement | rev)
mathmsg=$(${mathpath}/monitorlm 2>&1 | head -c -1)
if [[ "${mathmsg}" != "Could not find a MathLM server." ]]
then
initfile=$(math -noprompt -run "WriteString[\$Output,\$UserBaseDirectory<>\"/Kernel/init.m\"];Exit[];" 2>/dev/null)
if ! grep -q "cohomCalg" ${initfile}
then
echo "AppendTo[\$Path, Environment[\"USER_LOCAL\"]<>\"/bin/cohomCalg\"]" >> ${initfile}
fi
# echo "AppendTo[\$Path, Environment[\"CRUNCH_ROOT\"]<>\"/packages/Mathematica/MongoLink\"]" >> ${initfile}
fi
| true
|
bc2bec9b781b5521154918a383f523c2ef59f09f
|
Shell
|
qiufeihai/feihai
|
/install/docker_webhook.sh
|
UTF-8
| 3,401
| 3.78125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
#
# https://github.com/adnanh/webhook
# docker install webhook
#
log() {
echo -e "\e[1;35m------------------------ $@ ------------------------------\e[0m"
}
die() {
printf 'error: %s.\n' "$1" >&2
exit 1
}
glob() {
case $1 in $2) return 0; esac; return 1
}
yn() {
printf '%s [y/n]: ' "$1"
stty -icanon
answer=$(dd ibs=1 count=1 2>/dev/null)
stty icanon
printf '\n'
glob "$answer" '[yY]'
}
cmd_str=""
add_cmd_arg() {
# add_cmd_arg "args"
local arg=$1
[ -z "$arg" ] && die "缺少参数arg"
cmd_str=$cmd_str' '$arg
# echo $cmd_str
}
add_cmd_arg_prompt() {
# Usage: add_cmd_arg_prompt "请输入参数" "args {{default_value}}" "input_var_name"
# __input数组和input_var_name可以获取prompt输入的值
local prompt=$1
local arg=$2
local input_var_name=$3
local default_input=$(echo $arg | sed -nE 's/^.*\{\{(.*)\}\}.*$/\1/p')
[ -z "$prompt" ] && die "缺少参数prompt"
[ -z "$arg" ] && die "缺少参数arg"
# echo $default_input
prompt=${prompt}${default_input:+(默认:$default_input)}:
read -p $prompt input
cmd_str=$cmd_str' '${arg//\{\{*\}\}/${input:=$default_input}}
__input[${#__input[*]}]=$input;
[ ! -z "$input_var_name" ] && eval "$input_var_name=$input"
# echo $cmd_str
}
add_cmd_arg_yn() {
# add_cmd_arg_yn "是否添加某参数" "args"
local prompt=$1
local arg=$2
[ -z "$prompt" ] && die "缺少参数prompt"
[ -z "$arg" ] && die "缺少参数arg"
yn $prompt && {
cmd_str=$cmd_str' '$arg
}
# echo $cmd_str
}
add_cmd_arg_yn_prompt() {
# add_cmd_arg_yn_prompt "是否添加某参数" "请输入参数" "args {{default_value}}" "input_var_name"
local prompt=$1
[ -z "$prompt" ] && die "缺少参数yn_prompt"
yn $prompt && {
shift;
add_cmd_arg_prompt $@
}
}
add_cmd_arg "docker run -d --restart always"
add_cmd_arg_prompt "请输入容器名称" "--name {{webhook}}"
add_cmd_arg_prompt "请输入端口" "-p {{9001}}:9000"
add_cmd_arg_prompt "请输入数据目录" "-v {{/mnt/webhook}}:/etc/webhook"
add_cmd_arg "almir/webhook -verbose -hooks=/etc/webhook/hooks.json -hotreload"
echo $cmd_str
exec $cmd_str
cat > /mnt/webhook/hooks.github.json <<EOF
// see: https://github.com/adnanh/webhook/blob/master/docs/Hook-Definition.md
[
{
"id": "my_id",
"execute-command": "/etc/webhook/my_script.sh",
"command-working-directory": "/etc/webhook",
"include-command-output-in-response": true,
"include-command-output-in-response-on-error": true,
"pass-arguments-to-command":
[
{
"source": "payload",
"name": "head_commit.id"
},
{
"source": "payload",
"name": "pusher.name"
},
{
"source": "payload",
"name": "pusher.email"
}
],
"trigger-rule":
{
"and":
[
{
"match":
{
"type": "payload-hash-sha1",
"secret": "mysecret",
"parameter":
{
"source": "header",
"name": "X-Hub-Signature"
}
}
},
{
"match":
{
"type": "value",
"value": "refs/heads/master",
"parameter":
{
"source": "payload",
"name": "ref"
}
}
}
]
}
}
]
EOF
| true
|
6959c26a25293634aee3967af2a0c1283ec4ea58
|
Shell
|
spring-cloud-samples/eureka-release-train-interop
|
/scripts/scenario_ilford_tester.sh
|
UTF-8
| 2,831
| 3.359375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
source common.sh || source scripts/common.sh || echo "No common.sh script found..."
set -o errexit
set -o errtrace
set -o pipefail
cat <<EOF
This Bash file will show you the scenario in which Eureka Server is in hoxton version and the Client is ilford.
We will use a hoxton Eureka Tester app to use a load balanced RestTemplate to find the "client" application.
We will do it in the following way:
01) Run eureka-hoxton-server (Eureka Server)
02) Wait for the app (eureka-hoxton-server) to boot (port: 8761)
03) Run eureka-ilford-client (App registers in Eureka)
04) Wait for the app (eureka-ilford-client) to boot (port: 8778)
05) Wait for the app (eureka-ilford-client) to register in Eureka Server
06) Run eureka-hoxton-tester (Will call the client app from Eureka)
07) Wait for the app (eureka-hoxton-tester) to boot (port: 7779)
08) Wait for the app (eureka-hoxton-tester) to register in Eureka Server
09) Now we have a hoxton Eureka Server, hoxton app that will call a ilford app
10) Call localhost:7779/check to make the tester send a request to the client that will find the server
11) Assert that the flow is working
12) Kill eureka-hoxton-server
13) Run eureka-ilford-server (Eureka Server)
14) Wait for the app (eureka-ilford-server) to boot (port: 8761)
15) Wait for the app (eureka-ilford-client) to register in Eureka Server
16) Wait for the app (eureka-hoxton-tester) to register in Eureka Server
17) Now we have a ilford Eureka Server, hoxton app that will call a ilford app
18) Call localhost:7779/check to make the tester send a request to the client that will find the server
19) Assert that the flow is working
20) Kill eureka-ilford-server
21) Kill eureka-ilford-tester
22) Kill eureka-hoxton-client
EOF
serverPort="8761"
java_jar eureka-hoxton-server "-Dserver.port=${serverPort}"
wait_for_new_boot_app_to_boot_on_port "${serverPort}"
clientPort="8778"
java_jar eureka-ilford-client "-Dserver.port=${clientPort}"
wait_for_new_boot_app_to_boot_on_port "${clientPort}"
check_app_presence_in_discovery CLIENT
testerPort="7779"
java_jar eureka-hoxton-tester "-Dserver.port=${testerPort}"
wait_for_new_boot_app_to_boot_on_port "${testerPort}"
check_app_presence_in_discovery TESTER
send_test_request "${testerPort}"
echo -e "\n\nhoxton app successfully communicated with a ilford app via a hoxton Eureka"
kill_app eureka-hoxton-server
echo "Sleeping for 30 seconds"
sleep 30
java_jar eureka-ilford-server "-Dserver.port=${serverPort}"
wait_for_new_boot_app_to_boot_on_port "${serverPort}"
check_app_presence_in_discovery CLIENT
check_app_presence_in_discovery TESTER
send_test_request "${testerPort}"
echo -e "\n\nhoxton app successfully communicated with a ilford app via a ilford Eureka"
kill_app eureka-ilford-server
kill_app eureka-hoxton-tester
kill_app eureka-ilford-client
| true
|
c7377bc6baff4dafc55d0f7f6dc0a4a06b2a6e13
|
Shell
|
dxAdam/Automation_Scripts
|
/util/alias_setup.sh
|
UTF-8
| 903
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Move aliased scripts to ~/.alias_scripts
#
mkdir /home/$USER/.alias_scripts
cp check_raid.sh ~/.alias_scripts
cp cpubench.sh ~/.alias_scripts
cp gwe.sh ~/.alias_scripts
cp resetsound.sh ~/.alias_scripts
cp git_setup.sh ~/.alias_scripts
cp dpkg-sort.sh ~/.alias_scripts
echo "alias check-raid='/home/$USER/.alias_scripts/check_raid.sh'" >> /home/$USER/.bashrc
echo "alias cpubench='/home/$USER/.alias_scripts/cpubench.sh'" >> /home/$USER/.bashrc
#echo "alias gwe='/home/$USER/.alias_scripts/gwe.sh'" >> /home/$USER/.bashrc
echo "alias git-setup='/home/$USER/.alias_scripts/git_setup.sh'" >> /home/$USER/.bashrc
echo "alias reset-sound='/home/$USER/.alias_scripts/resetsound.sh'" >> /home/$USER/.bashrc
echo "alias dpkg-sort='/home/$USER/.alias_scripts/dpkg-sort.sh'" >> /home/$USER/.bashrc
echo "alias python='python3'" >> /home/$USER/.bashrc
echo -e "\nrun\n source ~/.bashrc"
| true
|
f4427e1191a29a227131f6770bb79d46486c3d38
|
Shell
|
ToniWestbrook/paladin
|
/sample_data/make_test.sh
|
UTF-8
| 533
| 2.890625
| 3
|
[
"MIT"
] |
permissive
|
#! /bin/bash
curl -O https://s3.amazonaws.com/paladin.aligner/test.fq
curl -O https://s3.amazonaws.com/paladin.aligner/paladin_test.faa
../paladin index -r3 paladin_test.faa
../paladin prepare -r1 -f paladin_test.faa
../paladin align -t4 paladin_test.faa test.fq -o test
if [ -s test_uniprot.tsv ];
then
echo "\n\nPALADIN HAS BEEN SUCCESSFULLY INSTALLED\n\n"
else
echo "\n\nOOPS: SOMETHING WENT WRONG WITH INSTALLATION, OR YOU ARE NOT CONNECTED TO THE INTERNET\n\n"
fi
rm paladin_test.faa*
| true
|
19a27dcd60b893d9ea1c7af2f66ed283ce9d9709
|
Shell
|
flutter/devtools
|
/tool/build_release.sh
|
UTF-8
| 1,570
| 3.4375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Contains a path to this script, relative to the directory it was called from.
RELATIVE_PATH_TO_SCRIPT="${BASH_SOURCE[0]}"
# The directory that this script is located in.
TOOL_DIR=`dirname "${RELATIVE_PATH_TO_SCRIPT}"`
# The devtools root directory is assumed to be the parent of this directory.
DEVTOOLS_DIR="${TOOL_DIR}/.."
pushd $TOOL_DIR
if [[ $1 = "--no-update-flutter" ]]
then
# Use the Flutter SDK that is already on the user's PATH.
FLUTTER_EXE=`which flutter`
echo "Using the Flutter SDK that is already on PATH: $FLUTTER_EXE"
else
# Use the Flutter SDK from flutter-sdk/.
FLUTTER_DIR="`pwd`/flutter-sdk"
PATH="$FLUTTER_DIR/bin":$PATH
# Make sure the flutter sdk is on the correct branch.
./update_flutter_sdk.sh
fi
popd
# echo on
set -ex
echo "Flutter Path: $(which flutter)"
echo "Flutter Version: $(flutter --version)"
if [[ $1 = "--update-perfetto" ]]; then
$TOOL_DIR/update_perfetto.sh
fi
pushd $DEVTOOLS_DIR/packages/devtools_shared
flutter pub get
popd
pushd $DEVTOOLS_DIR/packages/devtools_extensions
flutter pub get
popd
pushd $DEVTOOLS_DIR/packages/devtools_app
flutter clean
rm -rf build/web
flutter pub get
flutter build web \
--web-renderer canvaskit \
--pwa-strategy=offline-first \
--release \
--no-tree-shake-icons
# Ensure permissions are set correctly on canvaskit binaries.
chmod 0755 build/web/canvaskit/canvaskit.*
popd
| true
|
0c263d94870bcf9d8bed834e6a2d8a57c9acd05e
|
Shell
|
youngchansjtu/iptables
|
/block_test.sh
|
UTF-8
| 811
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
ACTION=$1
shift
IPSET=$@
IPTABLES="/sbin/iptables"
function print_usage() {
echo "Usage: ./block_test.sh [add|remove] ip1 ip2 ..."
}
if [ "$ACTION" != "add" -a "$ACTION" != "remove" ]; then
print_usage
exit 1
fi
for ip in $IPSET
do
case $ACTION in
add)
$IPTABLES -t filter -A OUTPUT -d $ip -j DROP
$IPTABLES -t filter -A FORWARD -d $ip -j DROP
;;
remove)
oid=$($IPTABLES -L OUTPUT -n --line-numbers | grep $ip | awk '{print $1}')
fid=$($IPTABLES -L FORWARD -n --line-numbers | grep $ip | awk '{print $1}')
$IPTABLES -t filter -D OUTPUT $oid
$IPTABLES -t filter -D FORWARD $fid
;;
esac
done
$IPTABLES -t filter -L OUTPUT -n
$IPTABLES -t filter -L FORWARD -n
| true
|
083d83eba9fdb643a176a02443756af66a6e005e
|
Shell
|
T3kton/disks
|
/deps/xfsprogs
|
UTF-8
| 494
| 3.15625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
#SOURCE: https://www.kernel.org/pub/linux/utils/fs/xfs/xfsprogs/xfsprogs-4.19.0.tar.xz
#FILE: xfsprogs-4.19.0.tar.xz
#HASH: e6aa93d892df76a1675755b88e9ac6b2793eb619
set -e
set -x
build()
{
tar --strip-components=1 -xJf $1
./configure --prefix=/
make
}
install()
{
cp -f $3/fsck/xfs_fsck.sh $1/sbin/fsck.xfs
cp -f $3/repair/xfs_repair $1/sbin
cp -f $3/growfs/xfs_growfs $1/sbin
cp -f $3/mkfs/mkfs.xfs $1/sbin
}
binaries()
{
ls $1/sbin/xfs_*
ls $1/sbin/mkfs.xfs
}
| true
|
7421d79f81a46884eb6df55d4605b55382e5e684
|
Shell
|
mvendra/mvtools
|
/git/git_aliases.sh
|
UTF-8
| 2,658
| 3.296875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
alias gichepi="git cherry-pick"
# DIFF
alias gidif_setgui="git config --global diff.external meldiff.py"
alias gidif_setcmd="git config --global --unset diff.external"
alias gidif="git diff"
alias gidif_noext="gidif --no-ext"
# SHOW
alias gisho="git show"
gishogui(){
# mvtodo: does not work on the first commit of a repository
HASH=$1
if [ -z $1 ]; then
HASH="HEAD"
fi
git difftool -y --no-prompt $HASH~1 $HASH
}
gikill(){
# removes $1 commits from the repository
# saves a backup of each commit as a patch
# on this user's defined temp folder (MVTOOLS_TEMP_PATH)
# will not run if there is no pre-existing temp folder
# HEAD is patch-0, deepest ($1) commit
# is patch-$1
R=`git status -s`
if [[ ! -z $R ]]; then
echo "HEAD is not clear. Aborting."
return
fi
TEMP_FOLDER="$MVTOOLS_TEMP_PATH"
if [ ! -d $TEMP_FOLDER ]; then
echo "No [$TEMP_FOLDER] (MVTOOLS_TEMP_PATH envvar) folder found. Aborting."
return
fi
NUM_COMMITS=`git log --oneline | wc -l`
RANGE=$1
if [ -z $1 ]; then
RANGE=1
fi
#if ((RANGE > NUM_COMMITS)); then
if [ "$RANGE" -gt "$NUM_COMMITS" ]; then
echo "Requested deletion of more commits than there are. Aborting."
return
fi
# binary patches safety
gisho --oneline | egrep "Binary files (.)* differ"
if [ $? -eq 0 ]; then
echo "The last commit was binary. The backup patch will be ineffective. Press any key to proceed."
read
fi
# backs up all commits to be deleted
MAX=$RANGE
(( MAX-- ))
for i in `seq 0 $MAX`; do
HASH=`get_git_hash.py`
FN="$TEMP_FOLDER/gikill_backup_"
FN+=$HASH
FN+="_"
FN+="$i.patch"
git show HEAD~$i > $FN
done
# carries out the removal
git reset --hard HEAD~$RANGE
}
alias gista="git status"
alias giadd="git add"
alias giunadd="git reset HEAD" # mvtodo: does not work before the first commit
alias gicom="git_commit_with_prechecks.py"
alias gilog="git log"
alias gipus="git push"
alias gipul="git pull --ff-only"
alias giclo="git clone"
alias gibra="git branch"
alias giche="git checkout"
alias gires="git reset"
alias gimer="git merge"
alias gifet="git fetch"
alias gitag="git tag"
alias gisub="git submodule"
alias gibis="git bisect"
alias giapp="git apply"
alias gides="git describe"
alias giini="git init"
alias gicon="git config"
alias gireb="git rebase"
alias girem="git remote -v"
alias gigre="git grep -n"
alias gihel="git help"
alias gibla="git blame"
alias gistash="git stash"
alias giver="git version"
alias gicle="git clean"
alias gifsck="git fsck"
alias gilsu="git ls-files --exclude-standard --others"
alias gimv="git mv"
alias girm="git rm"
| true
|
82872d566f312d5cf18d114d26f56c95c08ffb48
|
Shell
|
ivanviso/ASO
|
/Noviembre/22.sh
|
UTF-8
| 261
| 2.859375
| 3
|
[] |
no_license
|
if [[ $(date +%w) -gt 15 ]]
then
echo "estamos en la segunda quincena del mes"
else
echo "estamos en la primera quincena del mes"
fi
if [[ $(date +%m) -gt 6 ]]
then
echo "estamos en la segunda mitad del ano"
else
echo "estamos en la primera mitad del ano"
fi
| true
|
6efb692530061ab508b0247e88009ad849aad0f1
|
Shell
|
NethajiChowdary/ShellPrograms
|
/Day7Arrays/largesmall.sh
|
UTF-8
| 632
| 3.65625
| 4
|
[] |
no_license
|
second_smallest() {
m1=1000
m2=1000
arr=("$@")
for x in "${arr[@]}"
do
if [ $x -le $m1 ]
then
m1=$x
m2=$m1
elif [ $x -lt $m2 ]
then
m2 = $x
fi
done
echo "Second Smallest number is "$m2
}
second_largest() {
m1=0
m2=0
arr=("$@")
for x in "${arr[@]}"
do
if [ $x -ge $m1 ]
then
m1=$x
m2=$m1
elif [ $x -gt $m2 ]
then
m2 = $x
fi
done
echo "Second largest number is "$m2
}
for (( i=1;i<=10;i++ ))
do
array[$i]=$(shuf -i 100-999 -n 1 )
done
echo "${array[@]}"
second_smallest "${array[@]}"
second_largest "${array[@]}"
| true
|
b9f8fcbfa9015457865d497dbe6778e47fcc8d79
|
Shell
|
jcdubacq/shellselftests
|
/exo01/enonce.sh
|
UTF-8
| 2,647
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/sh
intro () {
echo "Cet exercice vise à tester les fonctions de copie, déplacement, suppression. Des fichiers vont être installés dans le répertoire de l'exercice. Votre script devra copier les fichiers se terminant par \"jpg\" dans un répertoire (à créer) \"Images\", déplacer les fichiers intitulés \"notes\" suivi de quelque chose et terminés par .txt dans un répertoire (à créer) notes, et enfin effacer tous les fichiers qui auront comme début tmp, TMP ou toute autre combinaison majuscules/minuscules. Quant au fichier mire.gif, il doit être renommé en MIRE.GIF et mis dans le dossier \"Images\"."
}
setup () {
for i in orange.jpg prune.jpg marmotte.jpg notes12.txt notes24avril.txt note.txt notthingham.txt tmp41.xls TMP18.doc TPM.txt TMP.txt mire.gif; do
echo "Contenu du fichier $i" > $i
done
}
setupfinal () {
setup
for i in antitest.JPG amour.jpg antitest.txt note92.txt notes221.txt tmp.xls; do
echo "Contenu du fichier $i" > $i
done
rm TMP18.doc
}
normaltests () {
runandcapture
nostderr
thistest Vérification des copies
for i in orange.jpg prune.jpg marmotte.jpg; do
SIGN=$(echo "Contenu du fichier $i"|md5sum|cut -c1-32)
filecheck $SIGN $i
filecheck $SIGN Images/$i
done
thistest Vérification du déplacement
for i in notes12.txt notes24avril.txt; do
SIGN=$(echo "Contenu du fichier $i"|md5sum|cut -c1-32)
filenotexists $i
filecheck $SIGN notes/$i
done
thistest Vérification de la suppression
for i in TMP18.doc tmp41.xls TMP.txt; do
filenotexists $i
done
thistest Vérification du renommage
filenotexists mire.gif
SIGN=$(echo "Contenu du fichier mire.gif"|md5sum|cut -c1-32)
filecheck $SIGN Images/MIRE.GIF
}
finaltests () {
runandcapture
nostderr
thistest Vérification des copies
for i in orange.jpg prune.jpg marmotte.jpg amour.jpg; do
SIGN=$(echo "Contenu du fichier $i"|md5sum|cut -c1-32)
filecheck $SIGN $i
filecheck $SIGN Images/$i
done
thistest Vérification du déplacement
for i in notes12.txt notes24avril.txt notes221.txt; do
SIGN=$(echo "Contenu du fichier $i"|md5sum|cut -c1-32)
filenotexists $i
filecheck $SIGN notes/$i
done
thistest Vérification de la suppression
for i in tmp41.xls TMP.txt tmp.xls; do
filenotexists $i
done
thistest Vérification du renommage
filenotexists mire.gif
SIGN=$(echo "Contenu du fichier mire.gif"|md5sum|cut -c1-32)
filecheck $SIGN Images/MIRE.GIF
}
. ../common/framework.sh
| true
|
fa89a245d481b358d11f62aeba49a22f546acb9b
|
Shell
|
kecorbin/consul-hcs-vm-demo
|
/8-deploy-vm-environments/setup-workstation
|
UTF-8
| 2,062
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
# This is the setup script for the workstation container. Use it to set the stage for your terraform training, demo, or lab.
set -e
mkdir -p /root/policies
consul_lb=$(terraform output -state /root/terraform/hcs/terraform.tfstate consul_url)
echo "export CONSUL_HTTP_ADDR=${consul_lb}" >> ~/.bashrc
endpoint=$(az resource show --ids "/subscriptions/$(az account show | jq -r .id)/resourceGroups/$(terraform output --state ../vnet/terraform.tfstate resource_group_name)/providers/Microsoft.Solutions/applications/hcs/customconsulClusters/hashicorp-consul-cluster" --api-version 2018-09-01-preview | jq -r .properties.consulConfigFile | base64 -d | jq -r .retry_join[0])
consulconfig=$(az resource show --ids "/subscriptions/$(az account show | jq -r .id)/resourceGroups/$(terraform output -state /root/terraform/vnet/terraform.tfstate resource_group_name)/providers/Microsoft.Solutions/applications/hcs/customconsulClusters/hashicorp-consul-cluster" --api-version 2018-09-01-preview | jq -r .properties.consulConfigFile | base64 -d)
ca_cert=$(az resource show --ids "/subscriptions/$(az account show | jq -r .id)/resourceGroups/$(terraform output -state /root/terraform/vnet/terraform.tfstate resource_group_name)/providers/Microsoft.Solutions/applications/hcs/customconsulClusters/hashicorp-consul-cluster" --api-version 2018-09-01-preview | jq -r .properties.consulCaFile | base64 -d)
token=$(vault kv get -field=master_token secret/consul)
cat << EOF > /root/terraform/vms/terraform.tfvars
ssh_public_key = "$(cat ~/.ssh/id_rsa.pub)"
endpoint = "${endpoint}"
consulconfig = <<-EOT
${consulconfig}
EOT
ca_cert = <<-EOT
${ca_cert}
EOT
consul_token = "${token}"
EOF
cat << EOF > /usr/local/bin/jump
#!/bin/bash
host=\$1
bastion_ip=\$(terraform output -state /root/terraform/vnet/terraform.tfstate bastion_ip)
echo "Connecting to \$host via bastion host at \$bastion_ip"
exec ssh -q -A -J azure-user@\$bastion_ip azure-user@\${host}
EOF
chmod +x /usr/local/bin/jump
set-workdir /root/terraform/vms
cd /root/terraform/vms
terraform init
exit 0
| true
|
9cbebb9dfc0d408555387ad9c6b293b12be95e91
|
Shell
|
hades13/apv5sdk-v15
|
/apv5sdk-v15/rootfs-db12x_f1e.optbuild/usr/sbin/set_user_passwd
|
UTF-8
| 548
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/sh
CONFIG_XML=/etc/config/config.xml
CRYPT=/sbin/crypt
#
#usage
#
usage()
{
echo "set_usr_passwd [usrname] [passwd]"
exit 1
}
if [ ! $# -eq 2 ];then
usage
fi
usrname=$1
passwd=$2
local pass_encoded=`$CRYPT "$passwd"`
/usr/sbin/write_to_xml $usrname $passwd &
local awk_program="BEGIN{FS=\":\"; OFS=\":\";}{if(\$0~/:admin:/){\$1=\"$usrname\"; \$2=\"$pass_encoded\";} print \$0;}"
cat /tmp/passwd | awk "$awk_program" > /tmp/passwd_tmp
mv /tmp/passwd_tmp /tmp/passwd
if [ -e /jffs/passwd ];then
cp -p /tmp/passwd /jffs/
fi
| true
|
dd446c6d01d60ee4192f7b1892008b36ceda1f40
|
Shell
|
g2graman/OrbisChallenge2014
|
/scripts/install
|
UTF-8
| 329
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
DEVKIT_DIR=OrbisChallengeDevKit-Python
pip &>/dev/null
if [[ "$?" != "0" ]]; then
cd "$DEVKIT_DIR"
sudo -H python get-pip.py
cd ..
fi
if [[ "$(pip list | grep protobuf)" == "" ]]; then
sudo -H pip install protobuf
fi
autopep8 &>/dev/null
if [[ "$?" != "0" ]]; then
sudo -H pip install --upgrade autopep8
fi
| true
|
c8841c775828560be07b9b39960a12af5922f901
|
Shell
|
houzy/ivim
|
/chvimrc.sh
|
UTF-8
| 2,468
| 3.796875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
if [[ "$OSTYPE" == "darwin"* ]]; then
app_dir=$(dirname $(greadlink -f $0))
else
app_dir=$(dirname $(readlink -f $0))
fi
debug_mode='0'
fork_maintainer='0'
msg() {
printf '%b\n' "$1" >&2
}
success() {
if [ "$ret" -eq '0' ]; then
msg "\e[32m[✔]\e[0m ${1}${2}"
fi
}
error() {
msg "\e[31m[✘]\e[0m ${1}${2}"
exit 1
}
debug() {
if [ "$debug_mode" -eq '1' ] && [ "$ret" -gt '1' ]; then
msg "An error occurred in function \"${FUNCNAME[$i+1]}\" on line ${BASH_LINENO[$i+1]}, we're sorry for that."
fi
}
lnif() {
if [ -e "$1" ]; then
ln -sf "$1" "$2"
fi
ret="$?"
debug
}
create_symlinks() {
echo 'create links'
endpath="$app_dir"
if [ $1 = 1 ]; then
lnif "$endpath/vimrc" "$HOME/.vimrc"
lnif "$endpath/vimrc.local" "$HOME/.vimrc.local"
lnif "$endpath/vimrc.ivim.local" "$HOME/.vimrc.ivim.local"
lnif "$endpath/vimrc.bundles.local" "$HOME/.vimrc.bundles.local"
else
lnif "$endpath/vimrc_mini" "$HOME/.vimrc"
fi
lnif "$endpath/.vim" "$HOME/.vim"
# Useful for fork maintainers
if [ -e "$endpath/gvimrc.local" ]; then
ln -sf "$endpath/gvimrc.local" "$HOME/.gvimrc.local"
fi
ret="$?"
success "$1"
debug
}
function clean_symlinks() {
echo 'clean old links'
rm -f "$HOME/.vimrc"
rm -f "$HOME/.vimrc.bundles"
rm -f "$HOME/.vimrc.before"
rm -f "$HOME/.vim"
rm -f "$HOME/.vimrc.local"
rm -f "$HOME/.vimrc.ivim.local"
rm -f "$HOME/.vimrc.before.local"
rm -f "$HOME/.vimrc.bundles.local"
rm -f "$HOME/.vimrc.fork"
rm -f "$HOME/.vimrc.bundles.fork"
rm -f "$HOME/.vimrc.before.fork"
rm -f "$HOME/.gvimrc.local"
ret="$?"
success "$1"
debug
}
case $1 in
vimrc )
echo 'using vimrc'
clean_symlinks
ln -sf "$HOME/vimrc/.vimrc" "$HOME/.vimrc"
ln -sf "$HOME/vimrc/.vim" "$HOME/.vim"
;;
vundle )
echo 'using vimrcVundle'
clean_symlinks
ln -sf "$HOME/vimrcVundle/.vimrc" "$HOME/.vimrc"
ln -sf "$HOME/vimrcVundle/.vim" "$HOME/.vim"
;;
ivim )
echo 'using vimrcivim'
clean_symlinks
create_symlinks 1
;;
ivimmini )
echo 'using vimrcivimmini'
clean_symlinks
create_symlinks 0
;;
*)
echo 'input: vimrc, vundle or ivim'
;;
esac
| true
|
facb1901eb15def447a45303378096254aea4324
|
Shell
|
shivamkm07/CS633_Assignments
|
/Assignment2/r5/run.sh
|
UTF-8
| 596
| 2.84375
| 3
|
[] |
no_license
|
#! /bin/bash
rm -f output data
touch output
#chmod u+x create_hostfile
#make clean
#make
for i in {1..10}
do
for P in 4 16
do
for ppn in 1 8
do
let ng=$P/6+1
python group_nodes.py 6 $ng $ppn
for d in 16 256 2048
do
echo "i "$i " P "$P" ppn "$ppn" d "$d
mpirun -np $P*$ppn -f group_hostfile ./exec $d >> output
done
done
done
done
#
## ./create_hostfile $P 8 > /dev/null
#
# for N in 256 1024 4096 16384 65536 262144 1048576
# do
# mpirun -np $P -f hosts ./exec $N 50 >> output
# done
# done
#done
#python3 plot.py
| true
|
dd1cf06ab592a7dada549fe855415286277b6131
|
Shell
|
rayiik/wireguard-go
|
/build_linux.sh
|
UTF-8
| 469
| 3.390625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ -z "$ARCH" ]; then
ARCH=amd64
fi
export ARCH=$ARCH
mkdir -p $ARCH
if [ ! -f .deps/prepared ]; then
if which apt-get; then
sudo apt-get update
sudo apt-get install gcc libgl1-mesa-dev xorg-dev libgtk-3-dev libappindicator3-dev -y
fi
fi
GOOS=linux \
GOARCH=$ARCH \
BIN_OUTPUT=$ARCH/wireguard \
GUIWRAPPER_BIN_OUTPUT=../$ARCH/guiwrapper \
TRAYWRAPPER_BIN_OUTPUT=../$ARCH/traywrapper \
./build.sh
cp util/icon/logo.png $ARCH/
| true
|
8c1c0b4fd5aea1c210e975b89f6147d984f1c5be
|
Shell
|
hurley25/vim-set
|
/.bashrc
|
UTF-8
| 1,663
| 2.8125
| 3
|
[] |
no_license
|
# .bashrc
# Source global definitions
if [ -f /etc/bashrc ]; then
. /etc/bashrc
fi
# User specific aliases and functions
alias rm='rm -i'
alias cp='cp -i'
alias mv='mv -i'
alias cls='clear'
alias ..='cd ..'
alias ...='cd ../..'
alias ....='cd ../../..'
alias fuck='$(thefuck $(fc -ln -1))'
export LS_COLORS='no=00:fi=00:di=01;34:ln=01;36:pi=40;33:so=01;35:bd=40;33;01:cd=40;33;01:or=01;05;37;41:mi=01;05;37;41:ex=01;32:*.cmd=01;32:*.exe=01;32:*.com=01;32:*.btm=01;32:*.bat=01;32:*.sh=01;32:*.csh=01;32 :*.tar=01;31:*.tgz=01;31:*.arj=01;31:*.taz=01;31:*.lzh=01;31:*.zip=01;31:*.z=01;31:*.Z=01;31:*.gz=01;3 1:*.bz2=01;31:*.bz=01;31:*.tz=01;31:*.rpm=01;31:*.cpio=01;31:*.jpg=01;35:*.gif=01;35:*.bmp=01;35:*.xbm =01;35:*.xpm=01;35:*.png=01;35:*.tif=01;35:'
#export LANG=zh_CN.UTF-8
export TBLIB_ROOT=/opt/csr/common/
alias ch='ps aux | grep 62102 | grep tair'
# "nothing to commit, working directory clean" 这行文字不同的git版本不一样。
# 大家可以在自己的干净分支下输入git status命令,把第最后一行的文字复制到那里。
function parse_git_dirty {
[[ $(git status 2> /dev/null | tail -n1) != "nothing to commit (working directory clean)" ]] && echo "*"
}
function parse_git_branch {
git branch --no-color 2> /dev/null | sed -e '/^[^*]/d' -e "s/* \(.*\)/[\1$(parse_git_dirty)]/"
}
# PS1 的信息可以随意改,重点是\[\033[01;33m\]$(parse_git_branch)\[\033[00m\] 在 PS1 里就好。
# 只想在自己当前的命令行提示里加上分支,就先 echo $PS1 再自己拼上去。
PS1='\[\033[01;32m\]\u@\h\[\033[00m\] \[\033[01;34m\]\W\[\033[00m\] \[\033[01;33m\]$(parse_git_branch)\[\033[00m\]$ '
| true
|
cbfc7b6ccfca3c7724b3869e9a59c4b40a0c8f01
|
Shell
|
fredwang222/wallig
|
/Projects/Scarecrow/trunk/trunk/Script/createmakefile.sh
|
UTF-8
| 5,289
| 3.15625
| 3
|
[] |
no_license
|
# !/bin/bash
MAKFILE=Makefile
echo "#$MAKFILE generated with prjmake.sh" >$MAKFILE
echo "">>$MAKFILE
echo 'export PROJECT_PATH=$(CURDIR)'>>$MAKFILE
echo 'export CONFIG_PATH=Config'>>$MAKFILE
echo "######################################################################">>$MAKFILE
echo "# system variables">>$MAKFILE
echo "######################################################################">>$MAKFILE
echo "export ARCH_PATH=System/"$ARCH>>$MAKFILE
echo 'include $(ARCH_PATH)/arch.mk'>>$MAKFILE
echo "export ARCH_DEFINE="`grep $TARGET System/System.conf | grep 'DEF=' | sed s/.*DEF=//`>>$MAKFILE
echo 'ARCH_OBJ=$(wildcard $(ARCH_PATH)/obj/*.o)'>>$MAKFILE
echo "">>$MAKFILE
echo "######################################################################">>$MAKFILE
echo "# driver variables">>$MAKFILE
echo "######################################################################">>$MAKFILE
echo "#drivers">>$MAKFILE
echo "export DRV_PATH="$DRV_PATH>>$MAKFILE
for DRV in $DRVIER_LIST
do
echo "#"$DRV>>$MAKFILE
echo "export "$DRV'_PATH=$(DRV_PATH)/'$DRV>>$MAKFILE
echo $DRV'_OBJ=$('$DRV'_PATH)/obj/'$DRV'.a'>>$MAKFILE
done
echo "">>$MAKFILE
echo "######################################################################">>$MAKFILE
echo "# Lib variables">>$MAKFILE
echo "######################################################################">>$MAKFILE
echo "#Library">>$MAKFILE
echo "export LIB_PATH="$LIB_PATH>>$MAKFILE
for LIB in $LIB_LIST
do
echo "#"$LIB>>$MAKFILE
echo "export "$LIB'_PATH=$(LIB_PATH)/'$LIB>>$MAKFILE
echo $LIB'_OBJ=$('$LIB'_PATH)/obj/'$LIB'.a'>>$MAKFILE
done
echo "">>$MAKFILE
echo "######################################################################">>$MAKFILE
echo "# project variables">>$MAKFILE
echo "######################################################################">>$MAKFILE
echo "#project">>$MAKFILE
echo 'PROJECT_SRC=$(wildcard src/*.c)'>>$MAKFILE
echo 'PROJECT_OBJ:=$(patsubst %.c,%.o,$(PROJECT_SRC))'>>$MAKFILE
echo 'PROJECT_EXE='$PRJ_NAME'.elf'>>$MAKFILE
echo "">>$MAKFILE
echo "######################################################################">>$MAKFILE
echo "# inlcude dir">>$MAKFILE
echo "######################################################################">>$MAKFILE
echo -n "export IDIR=-I. -I.. ">>$MAKFILE
for DRV in $DRVIER_LIST
do
echo -n '-I$(PROJECT_PATH)/$('$DRV'_PATH) '>>$MAKFILE
done
for LIB in $LIB_LIST
do
echo -n '-I$(PROJECT_PATH)/$('$LIB'_PATH) '>>$MAKFILE
done
echo -n '-I$(PROJECT_PATH)/$(ARCH_PATH) -I$(PROJECT_PATH)/$(ARCH_PATH)/lib/inc '>>$MAKFILE
echo " ">>$MAKFILE
echo " ">>$MAKFILE
echo "######################################################################">>$MAKFILE
echo "#all target">>$MAKFILE
echo "######################################################################">>$MAKFILE
echo -n 'all: '>>$MAKFILE
for DRV in $DRVIER_LIST
do
echo -n $DRV" ">>$MAKFILE
done
for LIB in $LIB_LIST
do
echo -n $LIB" ">>$MAKFILE
done
echo -n "Arch ">>$MAKFILE
echo -n '$(PROJECT_EXE) '>>$MAKFILE
echo " ">>$MAKFILE
echo " ">>$MAKFILE
echo -n '$(PROJECT_EXE): $(PROJECT_OBJ) $(ARCH_OBJ) '>>$MAKFILE
for DRV in $DRVIER_LIST
do
echo -n '$('$DRV'_OBJ) '>>$MAKFILE
done
for LIB in $LIB_LIST
do
echo -n '$('$LIB'_OBJ) '>>$MAKFILE
done
echo " ">>$MAKFILE
echo ' $(CC) -o $@ $^ $(LDFLAGS) -lm'>>$MAKFILE
echo ' @echo Creat binary file:'>>$MAKFILE
echo ' arm-none-eabi-objcopy -O binary $@ $(patsubst %.elf,%.bin,$@) '>>$MAKFILE
echo ' @echo Creat disassembly file:'>>$MAKFILE
echo ' arm-none-eabi-objdump -h -S -C -r $@ > $(patsubst %.elf,%.lss,$@)'>>$MAKFILE
echo " ">>$MAKFILE
echo "######################################################################">>$MAKFILE
echo "#project target">>$MAKFILE
echo "######################################################################">>$MAKFILE
echo "src/%.o:src/%.c">>$MAKFILE
echo ' $(CC) -o $@ -c $< $(CFLAGS) $(IDIR)'>>$MAKFILE
echo " ">>$MAKFILE
echo "######################################################################">>$MAKFILE
echo "# system targets">>$MAKFILE
echo "######################################################################">>$MAKFILE
echo "Arch:">>$MAKFILE
echo ' @(cd $(ARCH_PATH) && $(MAKE))'>>$MAKFILE
echo "">>$MAKFILE
echo "######################################################################">>$MAKFILE
echo "# driver targets">>$MAKFILE
echo "######################################################################">>$MAKFILE
for DRV in $DRVIER_LIST
do
echo $DRV":">>$MAKFILE
echo ' @(cd $('$DRV'_PATH) && $(MAKE))'>>$MAKFILE
done
echo "">>$MAKFILE
echo "######################################################################">>$MAKFILE
echo "# Lib targets">>$MAKFILE
echo "######################################################################">>$MAKFILE
for LIB in $LIB_LIST
do
echo $LIB":">>$MAKFILE
echo ' @(cd $('$LIB'_PATH) && $(MAKE))'>>$MAKFILE
done
echo "">>$MAKFILE
echo "######################################################################">>$MAKFILE
echo "# clean target">>$MAKFILE
echo "######################################################################">>$MAKFILE
echo "clean:">>$MAKFILE
echo ' rm -rf $(PROJECT_OBJ)'>>$MAKFILE
for DRV in $DRVIER_LIST
do
echo ' @(cd $('$DRV'_PATH) && $(MAKE) $@)'>>$MAKFILE
done
for LIB in $LIB_LIST
do
echo ' @(cd $('$LIB'_PATH) && $(MAKE) $@)'>>$MAKFILE
done
| true
|
04e4d1fed854646c0abebf611c53d364f5bee6a3
|
Shell
|
erija952/project-euler
|
/bash/p2.sh
|
UTF-8
| 262
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
let max=$1-1
f1=1
f2=2
sum=2
if [ $# -ne 1 ]; then
let max=4000000
fi
while [ $f2 -lt $max ]; do
a=$f2
let f2=$f1+$f2
let f1=$a
if [ $((f2%2)) -eq 0 ] ; then
let sum=$sum+$f2
fi
done
echo The sum is $sum
| true
|
7914f4034bc52cfc28e39fd3ab58744186386ff4
|
Shell
|
ACenterA/acentera-aws-serverless-cms-todo
|
/backend/go/sam-package.sh
|
UTF-8
| 5,818
| 3.09375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# set -e
# set -x
PROGNAME=$(basename -- "${0}")
PROJROOT=$(d=$(dirname -- "${0}"); cd "${d}/.." && pwd)
if [[ -z "$1" ]]; then
echo "Next 1st parameter dev, qa, or prod for stage"
else
STAGE=$1
DT=$(date +%Y)
SEMVER=0.0.18
S3PREFIX="packaged/$DT/0.0.1/acentera-${PLUGINNAME}"
BUCKETNAME=${S3_BUCKET:-"lambda-at-edge-dev-serverlessdeploymentbucket-1gmbbmp4ajnba"}
if [ -e .${STAGE}.aws ]; then
source .${STAGE}.aws
fi
cp -f template.yml .template.yml.$1
# First update the Path: .... no hard-coded value ideally
sed -ri "s~<%PLUGIN_NAME%>~${PLUGINNAME}~g" .template.yml.$1
sed -ri "s~<%STAGE%>~${STAGE}~g" .template.yml.$1
sed -ri "s~<%SEMVER%>~${SEMVER}~g" .template.yml.$1
[[ -e packaged-template.yml.$1 ]] && rm -f packaged-template.yml.$1
mkdir -p /go/src/github.com/myplugin/gofaas/shared/
/bin/cp -f /go/src/github.com/acenteracms/acenteralib/aws.so /go/src/github.com/myplugin/gofaas/shared/.
echo "TEST INVOKE"
sam local invoke --template .template.yml.$1 "ModelLambda" --docker-volume-basedir "${HOME_PWD}/" -e event.json
sam package --debug --template-file .template.yml.$1 --output-template-file packaged-template-acentera.yaml.${STAGE} --s3-bucket ${BUCKETNAME} --s3-prefix ${S3PREFIX}
echo "############################################################"
echo "################# CMS ##############################"
echo "############################################################"
cp -f template.plugin.yml .template.plugin.yml.$1
sed -ri "s~<%STAGE%>~${STAGE}~g" .template.plugin.yml.$1
sed -ri "s~${PLUGINNAME}~<%PLUGIN_NAME%>~g" .template.plugin.yml.$1
sed -ri "s~<%PLUGIN_NAME%>~${PLUGINNAME}~g" .template.plugin.yml.$1
sed -ri "s~<%SEMVER%>~${SEMVER}~g" .template.plugin.yml.$1
cat packaged-template-acentera.yaml.${STAGE}
NEW_LICENCEFILE=$(cat packaged-template-acentera.yaml.prod | yq . | jq -r '.Metadata["AWS::ServerlessRepo::Application"].LicenseUrl' | sed -r 's~(s3://.*/packaged/.*)~\1~g')
NEW_READMEFILE=$(cat packaged-template-acentera.yaml.prod | yq . | jq -r '.Metadata["AWS::ServerlessRepo::Application"].ReadmeUrl' | sed -r 's~(s3://.*/packaged/.*)~\1~g')
NEW_S3FILE=$(cat packaged-template-acentera.yaml.${STAGE} | yq . | jq -r '.Resources.RequestsLayer.Properties.ContentUri')
# NEW_S3FILE=$(cat packaged-template-acentera.yaml.${STAGE} | yq . | jq -r '.Resources.RequestsLayer.Properties.ContentUri' | sed -r 's~s3://(.*)/(packaged/.*)~\2~g')
echo $NEW_S3FILE
# cat .template.plugin.yml.$1 | yq . | jq --arg LayerBIN $NEW_S3FILE '.Resources.RequestsLayerCMS.Properties.Content.S3Key = $LayerBIN' > .template.plugin.yml.$1.tmp
cat .template.plugin.yml.$1 | yq . | jq --arg LayerBIN $NEW_S3FILE '.Resources.RequestsLayerCMS.Properties.ContentUri = $LayerBIN' > .template.plugin.yml.$1.tmp
cp -f .template.plugin.yml.$1.tmp .template.plugin.yml.$1
SCHEMA_CONTENT=$(cat schema.graphql)
cat .template.plugin.yml.$1 | yq . | jq --arg LayerBIN "$SCHEMA_CONTENT" '.Resources.AppSyncSchema.Properties.Definition = $LayerBIN' > .template.plugin.yml.$1.tmp
cp -f .template.plugin.yml.$1.tmp .template.plugin.yml.$1
# LicenceFile
cat .template.plugin.yml.$1 | yq . | jq --arg LayerBIN $NEW_LICENCEFILE '.Metadata["AWS::ServerlessRepo::Application"].LicenseUrl = $LayerBIN' > .template.plugin.yml.$1.tmp
cp -f .template.plugin.yml.$1.tmp .template.plugin.yml.$1
# ReadmeFile
cat .template.plugin.yml.$1 | yq . | jq --arg LayerBIN $NEW_READMEFILE '.Metadata["AWS::ServerlessRepo::Application"].ReadmeUrl = $LayerBIN' > .template.plugin.yml.$1.tmp
cp -f .template.plugin.yml.$1.tmp .template.plugin.yml.$1
# NEW_S3BUCKET=$(cat packaged-template-acentera.yaml.${STAGE} | yq . | jq -r '.Resources.RequestsLayer.Properties.ContentUri' | sed -r 's~s3://(.*)/(packaged/.*)~\1~g')
# echo $NEW_S3BUCKET
# cat .template.plugin.yml.$1 | yq . | jq --arg LayerBIN $NEW_S3BUCKET '.Resources.RequestsLayerCMS.Properties.Content.S3Bucket = $LayerBIN' > .template.plugin.yml.$1.tmp
cp -f .template.plugin.yml.$1.tmp .template.plugin.yml.$1
NEW_AS=$(cat packaged-template-acentera.yaml.${STAGE} | yq . | jq -r '.Resources.ApiApp.Properties.CodeUri')
echo $NEW_AS
cat .template.plugin.yml.$1 | yq . | jq --arg LayerBIN $NEW_AS '.Resources.ApiApp.Properties.CodeUri = $LayerBIN' > .template.plugin.yml.$1.tmp
cp -f .template.plugin.yml.$1.tmp .template.plugin.yml.$1
NEW_AS=$(cat packaged-template-acentera.yaml.${STAGE} | yq . | jq -r '.Resources.ApiPluginSettings.Properties.CodeUri')
echo $NEW_AS
cat .template.plugin.yml.$1 | yq . | jq --arg LayerBIN $NEW_AS '.Resources.ApiPluginSettings.Properties.CodeUri = $LayerBIN' > .template.plugin.yml.$1.tmp
cp -f .template.plugin.yml.$1.tmp .template.plugin.yml.$1
NEW_AS=$(cat packaged-template-acentera.yaml.${STAGE} | yq . | jq -r '.Resources.ModelLambda.Properties.CodeUri')
echo $NEW_AS
cat .template.plugin.yml.$1 | yq . | jq --arg LayerBIN $NEW_AS '.Resources.ModelLambda.Properties.CodeUri = $LayerBIN' > .template.plugin.yml.$1.tmp
cp -f .template.plugin.yml.$1.tmp .template.plugin.yml.$1
NEW_AS=$(cat packaged-template-acentera.yaml.${STAGE} | yq . | jq -r '.Resources.PublicWebsite.Properties.CodeUri')
echo $NEW_AS
cat .template.plugin.yml.$1 | yq . | jq --arg LayerBIN $NEW_AS '.Resources.PublicWebsite.Properties.CodeUri = $LayerBIN' > .template.plugin.yml.$1.tmp
cp -f .template.plugin.yml.$1.tmp .template.plugin.yml.$1
cp -f .template.plugin.yml.$1 output.yml
# sam publish --debug --template packaged-template-acentera-ecseks-resources.yaml.$1 --region us-east-1
fi
| true
|
22c57bf5620c5ee746607191c98e868d13894ca6
|
Shell
|
Hiraji/sample-project
|
/bin/sr-jenkins-catkin
|
UTF-8
| 2,443
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Jenkins build script to make and test a ros package. The package, with its
# stack is also expected to be checked out into $WORKSPACE, which is what
# jenkins does.
# * Creates an overlay in $WORKSPACE of the installed ros and shadow dependancies.
# * Fixes test out put to be under jenkins workspace so jenkins can see it.
# * Fixes rostest rubbish xml out put (https://code.ros.org/trac/ros/ticket/3770)
# * Put ros logs into $WORKSPACE/logs so jenkins can see them.
# --mda
#
set -e # fail on errors
#set -x # echo commands run
if [ ! "$#" == 1 ]; then
echo Wrong number of arguments
echo usage: $0 ROSVERSION
exit 3
fi
export BUILD_ROS_VERSION=$1
# sr-build-tools needs to be checked out somewhere.
# sr-jenkins-slave should have done this for us.
export SR_BUILD_TOOLS="/opt/shadow/sr-build-tools"
if [ -z "$WORKSPACE" ]; then
echo WORKSPACE is not set. Can\'t run. Not being run by Jenkins?
exit 1
fi
header() {
echo
echo "*********************************************************************"
echo $@
echo "*********************************************************************"
}
header Starting build
# Enter the ros environment
ros_dir="/opt/ros/$BUILD_ROS_VERSION"
if [ ! -d "$ros_dir" ]; then
echo Ros directory $ros_dir not found!
echo Are you sure this is a valid version of ROS?
echo Has this ros version been installed, ie slave bootstrapped properly?
exit 10
fi
source "$ros_dir/setup.bash"
# Set up the workspace
SR_WORKSPACE="$HOME/shadow-ros-$BUILD_ROS_VERSION-$(basename "$WORKSPACE")"
[ -n "$GIT_BRANCH" ] && SR_WORKSPACE="$SR_WORKSPACE-$GIT_BRANCH"
header Installing ros workspace $SR_WORKSPACE
if [ -e "$SR_WORKSPACE" ]; then
rm -rf "$SR_WORKSPACE"
fi
mkdir -p "$SR_WORKSPACE/src"
cd "$SR_WORKSPACE/src"
catkin_init_workspace
# Add the package to test and build
header Building
cd "$SR_WORKSPACE/src"
ln -s "$WORKSPACE" .
cd ..
catkin_make
build_res=$?
source "devel/setup.bash"
# Build and run the tests
header Building Tests
cd "$SR_WORKSPACE/build"
make tests
header Running Tests
set +e
make test
test_res=$?
set -e
# Cleanup the XML creating test_results/_hudson
echo Clean up test XML
rosrun rosunit clean_junit_xml.py
# Put the tests where Jenkins can find them
[ -d "$WORKSPACE/test_results" ] && rm -rf "$WORKSPACE/test_results"
cp -a "$SR_WORKSPACE/build/test_results" "$WORKSPACE/"
header Build Complete
exit $test_res
| true
|
972ea883dc2109f8536caf6f167ecd14e3fb325c
|
Shell
|
chrissanders/pivotmap
|
/pivotmap.sh
|
UTF-8
| 382
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
genmap () {
sed '1 s/^/source,target\
/' >> temp.test
sed '/dataplaceholder/{
s/dataplaceholder//g
r temp.test
}' d3chart/forceopacity.html
rm temp.test
}
if [ "$1" == "-h" ]; then
echo -e "Usage: cat sample.csv | ./pivotmap.sh > map.html"
echo -e "Examine sample.csv or so.csv for an example of how your input should be formatted."
exit 0
fi
genmap
| true
|
8d9b3f3fcbb9c18d6356007899d53e319711bcd1
|
Shell
|
RahulP5/pyresistors
|
/dist/AppImage/AppRun
|
UTF-8
| 622
| 2.75
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
ARCH=$(uname -m)
SELF_DIR="$(dirname "$(readlink -f "$0")")"
LD_LIBRARY_PATH_="$SELF_DIR/usr/lib:$SELF_DIR/usr/lib/$ARCH-linux-gnu:$SELF_DIR/lib/$ARCH-linux-gnu"
if [ -z "$LD_LIBRARY_PATH" ]; then
export LD_LIBRARY_PATH="$LD_LIBRARY_PATH_"
else
export LD_LIBRARY_PATH="$LD_LIBRARY_PATH_:$LD_LIBRARY_PATH"
fi
export QT_QPA_PLATFORM_PLUGIN_PATH="$SELF_DIR/usr/lib/qt5/plugins"
export PYTHONPATH="$SELF_DIR/usr/lib/python3.6:$SELF_DIR/usr/lib/python3.6/lib-dynload:$SELF_DIR/usr/lib/python3/dist-packages"
export PATH="$SELF_DIR/usr/bin:$PATH"
exec python3 "$SELF_DIR/usr/share/pyresistors/pyresistors.py"
| true
|
d55a90c5a4045a4715e74cd9b624368b789be0ef
|
Shell
|
syscl/M3800
|
/tools/mnt
|
UTF-8
| 907
| 3.9375
| 4
|
[] |
no_license
|
#!/bin/sh
# (c) syscl 2016-2019
# Handy script to mount EFI/ESP partition in macOS
#================================= GLOBAL VARS ==================================
BOLD="\033[1m"
RED="\033[1;31m"
GREEN="\033[1;32m"
BLUE="\033[1;34m"
OFF="\033[m"
# EFI identifier
gESPID=""
if [[ $# -eq 1 ]]; then
# we mount it right here
gESPID=$(echo "$@" | tr '[:upper:]' '[:lower:]')
else
diskutil list
printf "Enter EFI's IDENTIFIER, e.g. ${RED}disk0s1${OFF}"
read -p ": " gESPID
fi
# On macOS 10.14 Mojave, we have to be a sudoer
# to mount the EFI partition
let requireRootMinVer=14
# get system version
gProductVer="$(sw_vers -productVersion)"
gMINOR_VER=${gProductVer:3:2}
if [ $gMINOR_VER -ge $requireRootMinVer ]; then
# 10.14+
sudo diskutil mount ${gESPID}
else
diskutil mount ${gESPID}
fi
# we must hold the return value from diskutil
gErrorStatus=$?
exit ${gErrorStatus}
| true
|
75d224b7511e3831a1867b05c249cbf12a50f9e9
|
Shell
|
VNeddy/filecrush
|
/test/run.sh
|
UTF-8
| 3,549
| 3.296875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -u
set -o errexit
set -o pipefail
BLINE='beeline -u "'"$BEELINE_CONNECT"'"'
DB_NAME=aarau1
SRC_BASE_DIR=/user/hive/warehouse/$DB_NAME.db
INPUT_BASE_DIR=/user/aarau1/input
OUTPUT_BASE_DIR=/user/aarau1/output
export AVRO_JAR=/opt/cloudera/parcels/CDH/jars/hive-exec-1.1.0-cdh5.7.2.jar
export HADOOP_CLASSPATH=$AVRO_JAR
export LIBJARS=/home/aarau1/filecrush/target/filecrush-2.2.2-SNAPSHOT.jar,$AVRO_JAR
function get_input_table_dir() {
local input=$1
local in_tbl=t_input_$input
local in_dir=$INPUT_BASE_DIR/$in_tbl
echo $in_dir
}
function get_output_table_dir() {
local input=$1
local clone_mode=$2
local in_dir=$(get_input_table_dir $input)
local out_tbl=t_output_$input
local out_dir=$OUTPUT_BASE_DIR/$out_tbl
if [ "$clone_mode" == "yes" ]; then
local tbl_out_dir=$out_dir/$in_dir
else
local tbl_out_dir=$out_dir
fi
echo $tbl_out_dir
}
function prepare_input() {
local input=$1
local big_file=$2
local src_tbl=t_$input
local dst_tbl=t_input_$input
local dst_dir=$INPUT_BASE_DIR/$dst_tbl
hdfs dfs -rm -R -f -skipTrash $dst_dir
hdfs dfs -mkdir -p $dst_dir
hdfs dfs -cp $SRC_BASE_DIR/$src_tbl/* $dst_dir/
local create_big_file_cmd=""
if [ "$big_file" == "yes" ]; then
create_big_file_cmd="set mapreduce.job.reduces = 1; insert into table $dst_tbl select * from $dst_tbl sort by 1;"
fi
$BLINE <<EOF
use $DB_NAME;
drop table if exists $dst_tbl;
create external table $dst_tbl like $src_tbl location 'hdfs://$dst_dir';
$create_big_file_cmd
EOF
}
function run() {
local compress=$1
local max_file_blocks=$2
local reducers=$3
local input=$4
local clone_mode=$5
local format=${input##*_}
local in_tbl=t_input_$input
local in_dir=$INPUT_BASE_DIR/$in_tbl
local out_tbl=t_output_$input
local out_dir=$OUTPUT_BASE_DIR/$out_tbl
if [ "$clone_mode" == "yes" ]; then
local tbl_out_dir=$out_dir/$in_dir
else
local tbl_out_dir=$out_dir
fi
local clone_option=""
if [ "$clone_mode" = "yes" ]; then
clone_option="--clone"
fi
hdfs dfs -rm -f -R -skipTrash $out_dir .staging "crush*" test tmp input/crushed* 2>/dev/null
hdfs dfs -mkdir -p $out_dir $tbl_out_dir
hadoop jar ./target/filecrush-2.2.2-SNAPSHOT.jar com.m6d.filecrush.crush.Crush \
-Dmapreduce.reduce.maxattempts=1 \
-Dmapreduce.job.reduces=$reducers \
-libjars $LIBJARS \
$in_dir $out_dir 20161016000000 \
--compress $compress \
--max-file-blocks $max_file_blocks \
--input-format $format \
--output-format $format \
--threshold 0.007 \
--verbose \
$clone_option \
2>&1 | tee job.log &&
$BLINE <<EOF
# \
# --regex '.*/input2\b.*' \
# --replacement 'crushed_file-${crush.timestamp}-${crush.task.num}-${crush.file.num}' \
# \
# --regex '.*/input2\b.*' \
# --replacement 'crushed_file-${crush.timestamp}-${crush.task.num}-${crush.file.num}' \
# --input-format avro \
# --output-format avro \
# -libjars $LIBJARS $INPUT_BASE_DIR/input $INPUT_BASE_DIR/output 20161016000000 \
use $DB_NAME;
drop table if exists $out_tbl;
create external table $out_tbl like $in_tbl location 'hdfs://$tbl_out_dir';
EOF
}
if [ $# == 0 ]; then
echo "Syntax: $0 <compression> <max_file_blocks> <reducers> <input>"
echo "Syntax: $0 prepare <input> <big_file? yes/no>"
exit 1
elif [ "$1" == "prepare" ]; then
prepare_input $2 $3
elif [ "$1" == "get_in_dir" ]; then
get_input_table_dir $2
elif [ "$1" == "get_out_dir" ]; then
get_output_table_dir $2 $3
else
run $1 $2 $3 $4 $5
fi
| true
|
cc1e11d0c4c4941ca12510f84204376ffe340f1a
|
Shell
|
MarkNik1/distributed-systems
|
/tools/create_exam-projects.sh
|
UTF-8
| 3,935
| 4.21875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
##
## Configure the GitLab access parameters
##
export GITLAB_URL="https://gitlab.fbi.h-da.de"
export GITLAB_USR=""
export GITLAB_TKN=""
##
## Configure the namespace-id, i.e. the
## id of the path (groups and sub-groups)
## of the newly created project. For the
## path /distributed-systems/lab 2017/2018
## this value is 150.
##
export NAMESPACE_ID=8975
##
## A project description.
##
export PROJECT_DESCRIPTION="Alternative Pr\üfungsform in Verteilte Systeme im Wintersemester 2020\/2021."
##
## Configure the json templates used for
## creation and edit of projects. You may
## need to modify the template.
##
export CREATE_TMP="create-project-template.json"
##
## Parse the command-line arguments to get
## username and private GitLab token.
##
OPTS=`getopt -o u:t:i:h --l user:,key:,namespace-id:,help -n 'create_lab-projects.sh' -- "$@"`
if [ $? != 0 ] ; then echo "Failed parsing options." >&2 ; exit 1 ; fi
eval set -- "$OPTS"
while true; do
case "$1" in
-u|--user)
case "$2" in
*) GITLAB_USR=$2; shift 2 ;;
esac ;;
-t|--token)
case "$2" in
*) GITLAB_TKN=$2; shift 2 ;;
esac ;;
-i|--namespace-id)
case "$2" in
*) NAMESPACE_ID=$2; shift 2 ;;
esac ;;
-h|--help)
echo "Usage:"
echo " test.sh -u <USERNAME> -t <PRIVATE_GITLAB_TOKEN> -i <NAMESPACE_ID>"
shift ;;
--) shift; break ;;
*) break ;;
esac
done
##
## Encode strings to URLs. Substituting
## special chars like ä,ö,ü,etc.
##
function urlencode() {
local LANG=C
for ((i=0;i<${#1};i++)); do
if [[ ${1:$i:1} =~ ^[a-zA-Z0-9\.\~\_\-]$ ]]; then
printf "${1:$i:1}"
else
printf '%%%02X' "'${1:$i:1}"
fi
done
}
export -f urlencode
##
## Extract user- and project-name.
##
function create() {
##
## Extract the user name from the CSV-file.
##
user_name=$(urlencode "$1 $2")
user_name=${user_name#"%EF%BB%BF"}
[[ -z ${user_name} ]] && exit 0
##
## Extract the project name from the CSV-file.
##
project_name=${3%@*}
[[ -z ${project_name} ]] && exit 0
##
## Search for user name in GitLab. If no user
## is found, return an provide an error message.
##
curl -s --header "PRIVATE-TOKEN: ${GITLAB_TKN}" ${GITLAB_URL}/api/v4/users?search=${user_name} | python -m json.tool > /tmp/user.json
if jq -e '..|select(type == "array" and length == 0)' < "/tmp/user.json" > /dev/null
then
echo -e "\033[0;31mNo GitLab-user found for \"$1 $2\"\033[0m"
exit 0
fi
##
## Let's create the GitLab repo.
##
echo "Creating Repo for \"$1 $2\""
##
## Modify the template
##
sed -e "s/%PROJECT_NAME%/${project_name}/g" -e "s/%PROJECT_PATH%/${project_name}/g" -e "s/%PROJECT_DESCRIPTION%/${PROJECT_DESCRIPTION}/g" -e "s/%NAMESPACE_ID%/${NAMESPACE_ID}/g" ${CREATE_TMP} > /tmp/${CREATE_TMP}
##
## Create a new project in a given namespace,
## that is identified by its id.
##
curl -s --request POST --header "PRIVATE-TOKEN: ${GITLAB_TKN}" --header "Content-Type: application/json" --data @/tmp/${CREATE_TMP} ${GITLAB_URL}/api/v4/projects | python -m json.tool > /tmp/create-project-response.json
##
## Get the id of the recently created project.
##
project_id=`cat /tmp/create-project-response.json | jq -r '.id'`
##
## Add (active) users to recently created project.
##
for user_id in `cat /tmp/user.json | jq -r '.[] | select(.state=="active") .id'`; do
##
## Add a user to a project.
##
curl -s --request POST --header "PRIVATE-TOKEN: ${GITLAB_TKN}" --data "user_id=${user_id}&access_level=40" "${GITLAB_URL}/api/v4/projects/${project_id}/members" > /dev/null
done
}
export -f create
##
## Create repositories for all students in the
## given CSV-file.
##
csvtool -t , call create courseid_13752_participants.csv
| true
|
a9909cc9d1e64b129247ae2aacfbd72686fa0113
|
Shell
|
woohyeok-choi/nginx-https
|
/docker-entrypoint.sh
|
UTF-8
| 2,694
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/sh
set -e
function info() {
>&2 echo "[$(date "+%Y-%m-%d %H:%M:%S")][Info]" "$@"
}
function warning() {
>&2 echo "[$(date "+%Y-%m-%d %H:%M:%S")][Warning]" "$@"
}
function error() {
>&2 echo "[$(date "+%Y-%m-%d %H:%M:%S")][Error]" "$@"
}
info "NGinx reverse proxy setting"
SECRET_FILE="/run/secrets/${SECRET_NAME}"
if [ -z ${INI_SECTION} ]; then
INI_SECTION=''
fi
if [ -f ${SECRET_FILE} ]; then
SERVER_DOMAIN=$(crudini --get ${SECRET_FILE} "${INI_SECTION}" server_domain)
SERVER_EMAIL=$(crudini --get ${SECRET_FILE} "${INI_SECTION}" server_email)
IS_DEBUG=$(crudini --get ${SECRET_FILE} "${INI_SECTION}" debug)
fi
if [ -z ${SERVER_DOMAIN} ]; then
error "You should specify your domain."
exit 1
fi
info "Generate a default configuration"
cat <<EOF > /etc/nginx/conf.d/default.conf
client_max_body_size 20m;
server {
listen 80;
access_log /var/log/nginx/proxy.http.access.log main;
error_log /var/log/nginx/proxy.http.error.log warn;
server_name ${SERVER_DOMAIN};
include /home/conf/nginx/http.loc*.conf;
}
server {
listen 50051 http2;
access_log /var/log/nginx/proxy.grpc.access.log main;
error_log /var/log/nginx/proxy.grpc.error.log warn;
include /home/conf/nginx/grpc.loc*.conf;
}
include /home/conf/nginx/ups*.conf;
EOF
info "Print default configuration..."
cat /etc/nginx/conf.d/default.conf
if [ -z ${SERVER_EMAIL} ]; then
info "There is no email for Https certification. Setting Complete."
info "Start NGinx on Background."
nginx -g 'daemon off;'
exit 0
fi
info "Try to get Https certification with an account, ${SERVER_EMAIL}"
info "Start temporarily NGinx on Foreground"
nginx -g 'daemon on;'
sleep 10s
if [ ${IS_DEBUG} = "true" ]; then
info "Get Https certificate with staging mode -- This certificate should be used only for development!"
certbot --staging --nginx --redirect --email ${SERVER_EMAIL} --agree-tos --no-eff-email -d ${SERVER_DOMAIN}
else
info "Get Https certificate..."
certbot --nginx --redirect --email ${SERVER_EMAIL} --agree-tos --no-eff-email -d ${SERVER_DOMAIN}
fi
if [ $? -ne 0 ]; then
error "There are some problems when getting a certificate; maybe a rate limit or invalid email."
exit 1
fi
info "Stop NGinx on Foreground"
nginx -s quit
sleep 10s
info "Generate cronjob for renewing certification."
mkdir -p /var/log/cronjob/
cat <<EOF > /home/renew-cert.sh
0 8 12 * * certbot renew --nginx
EOF
chmod +x /home/renew-cert.sh
crontab /home/renew-cert.sh
info "Start cron.."
crond -b -l 2 -L /var/log/cron/cronjob.log
info "start NGinx on background..."
nginx -g 'daemon off;'
info "Setting complete!"
exit 0
| true
|
95e0c671a6383eb14966bd5a64006778f4c22b1f
|
Shell
|
montali/PDF2SearchablePDF
|
/mac_install.sh
|
UTF-8
| 978
| 3.40625
| 3
|
[
"MIT"
] |
permissive
|
# "Install" this script by symbolically linking from here to your "~/bin" dir!
# - NB: don't delete your files here or it will break the "install", since it's simply symbolically linking to
# the executable here!
# - Therefore, if you move these files, simply re-run this install script and it will automatically update the
# symbolic link in ~/bin and all will be well again!
# - Note: this script does NOT add the "~/bin" dir to your PATH. Run `echo $PATH` and ensure you see an entry like
# this: `/home/my_username/bin:`. If you don't, you'll need to manually add your "~/bin" directory to your path
# for this program to be visible.
brew install poppler tesseract
dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
path2exec="${dir}/pdf2searchablepdf.sh" # path to the executable bash script
# echo "path2exec = \"$path2exec\""
echo alias\ pdf2searchablepdf="$path2exec" >> ~/.bash_profile
echo "Done: alias inserted in ~/.bash_profile"
| true
|
b590fdc2957877877d1a9b9231d5abdf6542d0a0
|
Shell
|
guilhermeDias13/devcloud-scripts
|
/distributed_training.sh
|
UTF-8
| 2,177
| 2.9375
| 3
|
[] |
no_license
|
#PBS -l walltime=09:30:00
#PBS -o distr_train__${PBS_JOBID}-o.txt
#PBS -e distr_train__${PBS_JOBID}-e.txt
### Inputs ###
nodes=$1
ppn=$2
hash=$3
# Agent #
max_v=$4
rw_fac=$5
col_vel=$6
kp=$7
xw=$8
yw=$9
zw=${10}
# Neural Net #
sched=${11}
hid_size=${12}
num_hid_layers=${13}
expl_rate=${14}
# PPO #
max_timesteps=${15}
timesteps_per_ab=${16}
clip_param=${17}
ent_coeff=${18}
epochs=${19}
lr=${20}
batch_s=${21}
gamma=${22}
lambd=${23}
workers=$[nodes*ppn]
echo "nodes: ${nodes}"
echo "ppn: ${ppn}"
echo "workers: ${workers}"
#qsub each of the jobs
touch ~/distributed_devcloud/nodes
lines_nodes=0
time=0
expected=0
for ((i=0; i < ${nodes}; i++))
do
expected=$[i*ppn]
while [ $lines_nodes -ne $expected ]
do
lines_nodes=`wc -l < ~/distributed_devcloud/nodes`
echo "waiting for agents to write.. time:" $(($time/600))"min"
echo "$lines_nodes/${expected} already up."
sleep 15
let "time=time+15"
done
echo "all agents haven written!"
qsub -F "${ppn} ${i} ${hash} ${max_v} ${rw_fac} ${col_vel} ${kp} ${xw} ${yw} ${zw} ${sched} ${hid_size} ${num_hid_layers} ${expl_rate} ${max_timesteps} ${timesteps_per_ab} ${clip_param} ${ent_coeff} ${epochs} ${lr} ${batch_s} ${gamma} ${lambd} " nodes_full_agents.sh;
sleep 35;
done;
#wait for all nodes to allocate
allocated=0
time=0
while [ $allocated -ne $nodes ]
do
allocated=`qstat -r | grep "R " | wc -l`
echo "waiting for allocated nodes.. time:" $(($time/600))"min"
echo "$allocated/${nodes} already allocated."
sleep 10
let "time=time+10"
done
echo "all nodes allocated."
#wait for all agents to connect
up_agents=0
time=0
while [ $up_agents -ne $workers ]
do
up_agents=`ls -lR ~/distributed_devcloud/agent_*.txt | wc -l`
echo "waiting for agents to connect.. time:" $(($time/600))"min"
echo "$up_agents/${workers} already up."
sleep 15
let "time=time+15"
done
echo "all agents up!"
echo "total of ${workers} workers."
qsub -F "${hash} ${workers} ${max_v} ${rw_fac} ${col_vel} ${kp} ${xw} ${yw} ${zw} ${sched} ${hid_size} ${num_hid_layers} ${expl_rate} ${max_timesteps} ${timesteps_per_ab} ${clip_param} ${ent_coeff} ${epochs} ${lr} ${batch_s} ${gamma} ${lambd}" rl_algo_with_agents.sh
| true
|
56920f16439abbc2fb86fdb457b74a11fd990dd5
|
Shell
|
phoronix-test-suite/test-profiles
|
/pts/build-php-1.3.0/install.sh
|
UTF-8
| 329
| 2.75
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
mkdir $HOME/libxml2
tar -zxvf libxml2-2.6.31.tar.gz
cd libxml2-2.6.31/
./configure --prefix=$HOME/libxml2 > /dev/null
make -s -j $NUM_CPU_JOBS
make install
cd ..
rm -rf libxml2-2.6.31/
rm -rf libxml2/share/
echo "#!/bin/sh
cd php-5.2.9/
make -s -j \$NUM_CPU_JOBS 2>&1" > time-compile-php
chmod +x time-compile-php
| true
|
949aaa320ed73034e7eea96fc4735eb6b2f6f9ec
|
Shell
|
meditans/settings
|
/symLinksRoot.hs
|
UTF-8
| 598
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/sh
# Attenzione: Eseguire questo script come amministratore!
########################################
### Impostazioni iniziali
########################################
# Troviamo la posizione dello script
scriptDir=$( cd "$( dirname "$0" )" && pwd )
# Troviamo la posizione dell'utente che invoca
# homeDir=$(getent passwd $SUDO_USER | cut -d: -f6)
# Il nick dell'utente che sta invocando lo script
# user=$SUDO_USER
########################################
### Configurazione globale nixOS
########################################
rm -r /etc/nixos
ln -s "$scriptDir/nixos" /etc/nixos
| true
|
60076eab050a76c607d0ab779994d58ac19abc23
|
Shell
|
alexandre-mazel/electronoos
|
/raspberry/system/alexscripts/main.sh
|
UTF-8
| 380
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/sh
file_bootlog="/home/pi/boot_time.txt"
file_first="/tmp/alexscript_started"
date >> "$file_bootlog"
if [ -f "$file_first" ]
then
exit 0
else
echo start > "$file_first"
echo "first time" >> "$file_bootlog"
fi
#bash -c "/usr/bin/python2.7 /home/pi/dev/git/electronoos/quick_scripts/stat_connected.py" &
/home/pi/dev/git/electronoos/quick_scripts/stat_connected.py
| true
|
7bd223efcff803b3390e1516cd64742147554955
|
Shell
|
tlkw79/AstroPi3
|
/install_driverRTL8822BU.sh
|
UTF-8
| 620
| 3.234375
| 3
|
[] |
no_license
|
echo "~~~~~~~~~~~~~~~~~~~~~~~~~~"
echo "Attempting to Install Realtek drivers for Wifi Dongle using chipset RTL8822BU"
if [ "$(whoami)" != "root" ]; then
display "Please run this script with sudo due to the fact that it must do a number of sudo tasks. Exiting now."
exit 1
fi
sudo apt-get -y install git
sudo apt-get -y install dkms
sudo apt-get install raspberrypi-kernel-headers
git clone https://github.com/drwilco/RTL8822BU.git
cd RTL8822BU
sudo make dkms-install
make
sudo make install
echo "~~~~~~~~~~~~~~~~~~~~~~~~~~"
echo "Script Execution Complete. You may need to restart for the wifi dongle to work well."
| true
|
0396aca43a840c000187337da46c106a320eef23
|
Shell
|
Silverpeas/silverpeas-native-packages
|
/deb/build-silverpeas.sh
|
UTF-8
| 4,450
| 3.78125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Requires:
# app-arch/dpkg
# sys-apps/fakeroot
set -e
if [ -z "$1" ]; then
echo "Usage: $0 <version> [<package version>]"
echo "If the package release isn't set, then it is set at 1 by default"
exit 1
fi
VER=$1
PKG_VER=$2
test "Z$PKG_VER" == "Z" && PKG_VER=1
SILVERPEAS_PKG=debian/silverpeas
ROOT=`pwd`/tmp
export SILVERPEAS_HOME=${ROOT}/opt/silverpeas
export JBOSS_HOME=${SILVERPEAS_HOME}/jboss-6.1.0.Final
SILVERPEAS_DATA=${ROOT}/var/data/silverpeas
SILVERPEAS_DOC=${ROOT}/usr/share/doc/silverpeas
# prepare fresh directories
rm -rf ${ROOT}
mkdir -p ${ROOT}/DEBIAN
mkdir -p ${ROOT}/opt
mkdir -p ${SILVERPEAS_DATA}/import
mkdir -p ${SILVERPEAS_DOC}
chmod -R 755 ${ROOT}
# changelog
test -e debian/changelog || touch debian/changelog
res=0
grep "silverpeas (${VER})" debian/changelog >& /dev/null || res=1
if [ $res -ne 0 ]; then
DATE=`date -R`
echo "silverpeas (${VER}) stable; urgency=low
* See the release note in https://www.silverpeas.org/docs/core/releasenotes.html for more details
about the ${VER} release.
-- Silverpeas Development Team <silverpeas-dev@googlegroups.com> ${DATE}
" | cat - debian/changelog > /tmp/changelog && mv /tmp/changelog debian/changelog
fi
# prepare silverpeas
tar xzf ../files/silverpeas-${VER}-jboss6.tar.gz
mv silverpeas-${VER}-jboss6 ${SILVERPEAS_HOME}
# prepare jboss
unzip ../files/jboss-as-distribution-6.1.0.Final.zip -d ${SILVERPEAS_HOME}/
pushd ${JBOSS_HOME}/server
rm -rf all jbossweb-standalone minimal standard
popd
for script in `ls ../scripts/`; do
../scripts/${script}
done
# Fix EOL in configuration files
for i in ${SILVERPEAS_HOME}/bin/*.sh; do
echo "dos2unix $i"
awk '{ sub("\r$", ""); print }' $i > $i.new
mv $i.new $i
chmod +x $i
done
pushd ${SILVERPEAS_HOME}/bin
mvn clean install
./appBuilder.sh
mv ../data/* ${SILVERPEAS_DATA}/
rm -rf ../data
popd
if [ ! -e "log" ]; then
mkdir log
fi
mv ${SILVERPEAS_HOME}/log/* log/
pushd ${SILVERPEAS_HOME}
sed -e "s/kmelia\.export\.formats\.active.*/kmelia.export.formats.active = zip pdf odt doc/g" properties/org/silverpeas/kmelia/settings/kmeliaSettings.properties > /tmp/kmeliaSettings.properties
mv /tmp/kmeliaSettings.properties properties/org/silverpeas/kmelia/settings/kmeliaSettings.properties
popd
# lintian overrides
# mkdir -p tmp/usr/share/lintian/overrides/
# cp -T debian/silverpeas.lintian-overrides tmp/usr/share/lintian/overrides/silverpeas
# license
cp debian/copyright ${SILVERPEAS_DOC}/
# conffiles
cp -T ${SILVERPEAS_PKG}/conffiles ${ROOT}/DEBIAN/conffiles
# configuration
cp ../files/config.properties ${SILVERPEAS_HOME}/setup/settings/config-silverpeas.properties
res=0
grep "app=silverpeas" ${SILVERPEAS_HOME}/bin/silverpeas_start_jboss.sh >& /dev/null || res=1
if [ $res -ne 0 ]; then
sed 's/#export JBOSS_CLASSPATH/export JAVA_OPTS="-Dapp=silverpeas $JAVA_OPTS"/' ${SILVERPEAS_HOME}/bin/silverpeas_start_jboss.sh > silverpeas_start_jboss.sh.new
mv silverpeas_start_jboss.sh.new ${SILVERPEAS_HOME}/bin/silverpeas_start_jboss.sh
sed 's/#export JBOSS_CLASSPATH/export JAVA_OPTS="-Dapp=silverpeas $JAVA_OPTS"/' ${SILVERPEAS_HOME}/bin/silverpeas_debug_jboss.sh > silverpeas_debug_jboss.sh.new
mv silverpeas_debug_jboss.sh.new ${SILVERPEAS_HOME}/bin/silverpeas_debug_jboss.sh
fi
# set java path
for i in ${SILVERPEAS_HOME}/bin/*.sh; do
sed "s/\$JAVA_HOME/\/usr/g" $i > $i.new
mv $i.new $i
chmod +x $i
done
# init.d
mkdir -p ${ROOT}/etc/init.d/
cp -T ${SILVERPEAS_PKG}/silverpeas.init ${ROOT}/etc/init.d/silverpeas
chmod 755 ${ROOT}/etc/init.d/silverpeas
cp -T ${SILVERPEAS_PKG}/openoffice.init ${ROOT}/etc/init.d/openoffice
chmod 755 ${ROOT}/etc/init.d/openoffice
#environment
mkdir -p ${ROOT}/etc/profile.d/
cp -T ../files/silverpeas.sh ${ROOT}/etc/profile.d/silverpeas.sh
cp -T ../files/jboss.sh ${ROOT}/etc/profile.d/jboss.sh
# preinst, postinst, prerm and postrm
cp -T ${SILVERPEAS_PKG}/silverpeas.preinst ${ROOT}/DEBIAN/preinst
chmod 755 ${ROOT}/DEBIAN/preinst
cp -T ${SILVERPEAS_PKG}/silverpeas.postinst ${ROOT}/DEBIAN/postinst
chmod 755 ${ROOT}/DEBIAN/postinst
cp -T ${SILVERPEAS_PKG}/silverpeas.prerm ${ROOT}/DEBIAN/prerm
chmod 755 ${ROOT}/DEBIAN/prerm
cp -T ${SILVERPEAS_PKG}/silverpeas.postrm ${ROOT}/DEBIAN/postrm
chmod 755 ${ROOT}/DEBIAN/postrm
cp -f ${SILVERPEAS_PKG}/control debian/control
dpkg-gencontrol -v"${VER}-${PKG_VER}" -c${SILVERPEAS_PKG}/control -Ptmp
rm -f debian/control
fakeroot dpkg-deb -b ${ROOT} silverpeas_${VER}-${PKG_VER}_all.deb
| true
|
37bca060565af730a2a977716d1d7c722160a16e
|
Shell
|
michaeljbraus/Cladophora-Microbiota-2014
|
/pairedends.sh
|
UTF-8
| 267
| 2.828125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Merge raw reads with PEAR.
# PEAR v0.9.9 [May 13, 2016]
mkdir ../clado-data-pear
for name in $(cat ./names-tube.txt)
do
pear -m 500 -f ../clado-data/*${name}*R1.fastq -r ../clado-data/*${name}*R2.fastq -o ../clado-data-pear/pear-${name} -j 10
done
| true
|
d8104404d0032719f61f8f21b7cb561861c6dcc0
|
Shell
|
greatwall-cloud/k8s-deploy
|
/001.docker-ce_17/install.sh
|
UTF-8
| 2,685
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
cd $(dirname $0)
#添加DNS
if [ -s /etc/resolv.conf ] ; then
echo "nameserver 114.114.114.114" > /etc/resolv.conf
fi
#off swap
swapoff -a
sed -i '/ swap / s/^/#/' /etc/fstab
#install curl
dpkg -i libcurl3-gnutls_7.47.0-1ubuntu2.8_arm64.deb
dpkg -i curl_7.47.0-1ubuntu2.8_arm64.deb
#install sshdpass
dpkg -i sshpass_1.05-1_arm64.deb
#install bash-completion
dpkg -i bash-completion_1%3a2.1-4.2ubuntu1.1_all.deb
#install ntp
###########################
#https
dpkg -i apt-transport-https_1.2.27_arm64.deb
#install docker
systemctl stop docker
dpkg -i less_481-2.1ubuntu0.2_arm64.deb
dpkg -i bridge-utils_1.5-9ubuntu1_arm64.deb
dpkg -i cgroupfs-mount_1.2_all.deb
dpkg -i docker.io_17.03.2-0ubuntu2~16.04.1_arm64.deb
dpkg -i liberror-perl_0.17-1.2_all.deb
dpkg -i git-man_1%3a2.7.4-0ubuntu1.4_all.deb
dpkg -i git_1%3a2.7.4-0ubuntu1.4_arm64.deb
dpkg -i netcat-traditional_1.10-41_arm64.deb
dpkg -i netcat_1.10-41_all.deb
dpkg -i ubuntu-fan_0.12.8~16.04.2_all.deb
cat > /etc/docker/daemon.json <<EOF
{
"registry-mirrors": ["https://36hvcnma.mirror.aliyuncs.com"]
}
EOF
sudo systemctl restart docker
#change source
cat > /etc/apt/sources.list <<EOF
# 默认注释了源码仓库,如有需要可自行取消注释
deb https://mirrors.ustc.edu.cn/ubuntu-ports/ xenial main restricted universe multiverse
deb-src https://mirrors.ustc.edu.cn/ubuntu-ports/ xenial main main restricted universe multiverse
deb https://mirrors.ustc.edu.cn/ubuntu-ports/ xenial-updates main restricted universe multiverse
deb-src https://mirrors.ustc.edu.cn/ubuntu-ports/ xenial-updates main restricted universe multiverse
deb https://mirrors.ustc.edu.cn/ubuntu-ports/ xenial-backports main restricted universe multiverse
deb-src https://mirrors.ustc.edu.cn/ubuntu-ports/ xenial-backports main restricted universe multiverse
deb https://mirrors.ustc.edu.cn/ubuntu-ports/ xenial-security main restricted universe multiverse
deb-src https://mirrors.ustc.edu.cn/ubuntu-ports/ xenial-security main restricted universe multiverse
# 预发布软件源,不建议启用
# deb https://mirrors.ustc.edu.cn/ubuntu-ports/ xenial-proposed main restricted universe multiverse
# deb-src https://mirrors.ustc.edu.cn/ubuntu-ports/ xenial-proposed main restricted universe multiverse
EOF
#install ceph
wget -q -O- 'https://mirrors.aliyun.com/ceph/keys/release.asc' | sudo apt-key add -
echo 'deb https://mirrors.aliyun.com/ceph/debian-jewel/ xenial main' > /etc/apt/sources.list.d/ceph.list
#install kubernetes
curl https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | apt-key add -
echo 'deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main' > /etc/apt/sources.list.d/kubernetes.list
| true
|
39d643e5b914e7a65842208574f91495f7caaae7
|
Shell
|
peteraba/dotfiles
|
/zshrc
|
UTF-8
| 5,633
| 2.640625
| 3
|
[
"Unlicense"
] |
permissive
|
# If you come from bash you might have to change your $PATH.
# export PATH=$HOME/bin:/usr/local/bin:$PATH
export PATH=~/.local/bin:$PATH
# Path to your oh-my-zsh installation.
export ZSH=$HOME/.oh-my-zsh
# Set name of the theme to load. Optionally, if you set this to "random"
# it'll load a random theme each time that oh-my-zsh is loaded.
# See https://github.com/robbyrussell/oh-my-zsh/wiki/Themes
ZSH_THEME="lig"
# ZSH_THEME="omega-minimal"
# ZSH_THEME="schminitz"
# Uncomment the following line to use case-sensitive completion.
# CASE_SENSITIVE="true"
# Uncomment the following line to use hyphen-insensitive completion. Case
# sensitive completion must be off. _ and - will be interchangeable.
# HYPHEN_INSENSITIVE="true"
# Uncomment the following line to disable bi-weekly auto-update checks.
# DISABLE_AUTO_UPDATE="true"
# Uncomment the following line to change how often to auto-update (in days).
# export UPDATE_ZSH_DAYS=13
# Uncomment the following line to disable colors in ls.
# DISABLE_LS_COLORS="true"
# Uncomment the following line to disable auto-setting terminal title.
# DISABLE_AUTO_TITLE="true"
# Uncomment the following line to enable command auto-correction.
ENABLE_CORRECTION="true"
# Uncomment the following line to display red dots whilst waiting for completion.
COMPLETION_WAITING_DOTS="true"
# Uncomment the following line if you want to disable marking untracked files
# under VCS as dirty. This makes repository status check for large repositories
# much, much faster.
DISABLE_UNTRACKED_FILES_DIRTY="true"
# Uncomment the following line if you want to change the command execution time
# stamp shown in the history command output.
# The optional three formats: "mm/dd/yyyy"|"dd.mm.yyyy"|"yyyy-mm-dd"
HIST_STAMPS="yyy.mm.dd"
# Would you like to use another custom folder than $ZSH/custom?
ZSH_CUSTOM=$HOME/dotfiles/zsh
# Which plugins would you like to load? (plugins can be found in ~/.oh-my-zsh/plugins/*)
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
# Add wisely, as too many plugins slow down shell startup.
function add_linux_plugins() {
if [[ -n "$(which pacman)" ]]; then
plugins+=archlinux
elif [[ -n "$(which apt-get)" ]]; then
plugins+=debian
plugins+=ubuntu
elif [[ -n "$(which dnf)" ]]; then
# plugins+=fedora
plugins+=dnf
elif [[ -n "$(which zypper)" ]]; then
plugins+=suse
elif [[ -n "$(which yum)" ]]; then
plugins+=yum
fi
}
function add_osx_plugins() {
if [[ -n "$(which brew)" ]]; then
plugins+=brew
fi
if [[ -n "$(which macports)" ]]; then
plugins+=macports
fi
plugins+=mac
}
function add_os_plugins() {
if [[ "$OSTYPE" == "linux-gnu" ]]; then
add_linux_plugins
elif [[ "$OSTYPE" == "darwin" ]]; then
add_osx_plugins
fi
}
function add_experimental_plugins() {
plugins+=cargo
# plugins+=chucknorris
plugins+=command-not-found
plugins+=emoji
plugins+=emoji-clock
plugins+=gitignore
# plugins+=lol
plugins+=pj
# plugins+=rand-quote
plugins+=rust
plugins+=rvm
# plugins+=supervisorctl
# plugins+=systemadmin
# plugins+=systemd
# plugins+=terraform
# plugins+=tmux
# plugins+=tmux-cssh
plugins+=urltools
plugins+=vault
plugins+=web-search
}
plugins=(aws common-aliases copydir copyfile dirhistory docker fasd git git-extras git-prompt git_remote_branch golang httpie history last-working-dir ssh-agent sudo vi-mode yarn)
add_os_plugins
add_experimental_plugins
source $ZSH/oh-my-zsh.sh
local current_path='%5~'
if [ -n "$(git_current_branch)" ]; then
current_path="$(current_repository)"
current_path="$current_path[(w)1]"
fi
# You may need to manually set your language environment
# export LANG=en_US.UTF-8
# Custom aliases
alias dc='docker-compose'
alias make="make --no-print-directory"
alias sloc="source ~/dotfiles/local.zsh"
alias vloc="vim ~/dotfiles/local.zsh"
alias cloc="cat ~/dotfiles/local.zsh"
# SSH-agent settings
# zstyle :omz:plugins:ssh-agent agent-forwarding on
# zstyle :omz:plugins:ssh-agent identities id_rsa id_rsa2 id_github
zstyle :omz:plugins:ssh-agent lifetime 2h
# Preferred editor for local and remote sessions
if [[ -n $SSH_CONNECTION ]]; then
export EDITOR='vim'
else
export EDITOR='vim'
fi
# Reload oh-my-zsh, optionally set a new theme
function ohmyzsh() {
if ! [ $# -eq 0 ]; then
ZSH_THEME="$1"
fi
echo "Reloading oh-my-zsh. Theme '$ZSH_THEME' to be loaded..."
source $ZSH/oh-my-zsh.sh
}
# Start up GVM (Go Version Manager)
[[ -s "${HOME}/.gvm/scripts/gvm" ]] && source "${HOME}/.gvm/scripts/gvm"
# Start up RSVM (Rust Version Manager)
[[ -s "${HOME}/.rsvm/rsvm.sh" ]] && . "${HOME}/.rsvm/rsvm.sh"
# Load configs custom to this machine
[[ -s "${HOME}/dotfiles/local.zsh" ]] && source "${HOME}/dotfiles/local.zsh"
# Start mons deamon if mons is installed and not running
which mons > /dev/null
if [ $? -eq 0 ]; then
ps -a | grep mons
if [ $? -eq 1 ]; then
mons -m &!
fi
fi
# Set up thefuck aliases if installed
if [[ -n "$(which thefuck > /dev/null)" ]]; then
eval $(thefuck --alias)
fi
TERMINAL="i3-sensible-terminal"
HOME_HOST="paba-ml"
# Using the lig theme the remote branch will be grepped and if this is not empty, than the branch will be desplayed in red
LIG_DANGEROUS_BRANCH=""
function gp()
{
if [ -n "${LIG_DANGEROUS_BRANCH}" ] && [ -n "$(git remote get-url origin | grep '${LIG_DANGEROUS_BRANCH}')" ]; then
echo ""
echo "Pushing to a dangerous remote branch!!!"
echo ""
sleep 1
echo "The push will continue in 3 seconds..."
sleep 3
fi
git push
}
| true
|
2fc73b5da3a6a91222bc94835175b6f1867862bd
|
Shell
|
akagisho/zabbix-ansible
|
/roles/zabbix/files/usr/lib/zabbix/externalscripts/external_ssl_cert.sh
|
UTF-8
| 378
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/sh
HOST=$1
PORT=${2:-443}
END_DATE=`openssl \
s_client \
-connect ${HOST}:${PORT} \
-servername ${HOST} < /dev/null 2> /dev/null \
| openssl x509 -enddate -noout 2> /dev/null \
| cut -d'=' -f2`
if [ -z "$END_DATE" ]; then
echo "ZBX_NOTSUPPORTED"
exit 1
fi
date +"%s" \
--date="${END_DATE}" \
| gawk '{printf("%d\n",($0-systime())/86400-1/86400+1)}'
| true
|
7528cc08e3926b058bdf3ba61dc5721b56368838
|
Shell
|
Naruto/simon-speck-c
|
/scripts/speck/build_android.sh
|
UTF-8
| 1,038
| 3.375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash -xe
SCRIPTDIR=`dirname $0`
SCRIPTDIR=`cd $SCRIPTDIR && pwd -P`
BASEDIR=${SCRIPTDIR}/../..
BASEDIR=`cd ${BASEDIR} && pwd -P`
pushd ${BASEDIR} > /dev/null
SYSTEM_VERSION=21
android_ndk_build() {
SYSTEM_VERSION=$1
ARCH=$2
OTHER_FLAG=$3
# build
/bin/rm -rf build
/bin/mkdir build
pushd build > /dev/null
cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_SYSTEM_NAME=Android -DCMAKE_ANDROID_NDK_TOOLCHAIN_VERSION=clang -DCMAKE_SYSTEM_VERSION=${SYSTEM_VERSION} -DCMAKE_ANDROID_ARCH_ABI=${ARCH} -DCMAKE_ANDROID_NDK=${NDK_ROOT} $OTHER_FLAG ..
cmake --build .
popd > /dev/null
# deploy
/bin/mkdir -p libs/android/${ARCH}
/bin/cp build/libspeck.so libs/android/${ARCH}
}
android_ndk_build ${SYSTEM_VERSION} "armeabi"
android_ndk_build ${SYSTEM_VERSION} "armeabi-v7a" "-DCMAKE_ANDROID_ARM_NEON=ON -DENABLE_NEON=ON"
android_ndk_build ${SYSTEM_VERSION} "x86"
android_ndk_build ${SYSTEM_VERSION} "x86_64"
android_ndk_build ${SYSTEM_VERSION} "arm64-v8a" "-DENABLE_NEON=ON"
popd > /dev/null
| true
|
0f825ba9fcc4e695ace107170e950f4c120f29c2
|
Shell
|
bestjae/fg_jnl
|
/skt_script/micro/dsm.sh
|
UTF-8
| 432
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
argc=$#
option=$1
dev_name=$2
start_block=$3
block_size=$4
if [ $argc -lt 4 ]; then
echo "Usage : ./dsm.sh [dev_name] [option(-d, -w, -r)] [start_block] [block_size]"
exit 1
fi
nvme dsm ${dev_name} ${option} --blocks=${block_size} --slbs=${start_block} > /dev/null 2> /dev/null
ret=`echo $?`
if [ $ret -eq 0 ]; then
echo "${dev_name} dsm command pass"
else
echo "${dev_name} dsm command fail"
fi
| true
|
b2cf89f611cc941f79bd4b7cbc90138e876174dc
|
Shell
|
florez/PhenoAnalyzer
|
/PhenoCodes/ntuple_creator.sh
|
UTF-8
| 3,827
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
#Este archivo crea las nuplas de Delphes a partir de las muestras de MadGraph creadas. Este código debe estar ubicado en la carpeta donde se quiere tener las nutplas.
prueba1=0
#Contador de las ParamCards
cont1=0
#Ciclo en las ParamCards, escribir la carpeta donde se encuentran
for i in $(ls /home/jgodoy/PhenoThesisProject/ParamCards); do
#Variable con el nombre de la ParamCard
archivo=${i/.dat/}
#Verifica que exista la simulación para esa ParamCard. Escribir la carpeta donde están las carpetas de las muestras realizadas.
if test -e /home/jgodoy/PhenoThesisProject/Simulations/mg_files/$archivo; then
#Crear carpeta con el nombre de la ParamCard para guardar las ntuplas correspondientes a la misma
mkdir $archivo
#Contador de las corridas de cada ParamCard (*run_01...)
cont2=1
#Prueba de la existencia de más corridas. 0 Verdadero, 1 Falso.
prueba2=0
#Ciclo para la s corridas de dicho ParamCard. Mientras existan más corridas
while [ "$prueba2" -eq "0" ]; do
if [ "$cont2" -lt "10" ]; then
#Comprueba que exista el archivo .hep para el ParamCard y el run en el ciclo. La ruta debe seguir la carpeta donde están las carpetas de las muestras de MadGraph creadas.
if test -e /home/jgodoy/PhenoThesisProject/Simulations/mg_files/$archivo/Events/run_0$cont2/tag_1_pythia_events.hep*; then
#Prueba exitosa en la consola
echo existe run $cont2 para $archivo
#Descomprime el archivo .hep
gunzip /home/jgodoy/PhenoThesisProject/Simulations/mg_files/$archivo/Events/run_0$cont2/tag_1_pythia_events.hep.gz
#Copia el archivo .hep descomprimido a la carpeta creada anteriormente donde se ubicará la ntupla para dicha ParamCard. Primero va la dirección con el archivo .hep y luego la dirección de la carpeta creada.
cp /home/jgodoy/PhenoThesisProject/Simulations/mg_files/$archivo/Events/run_0$cont2/tag_1_pythia_events.hep /home/jgodoy/PhenoThesisProject/Simulations/ntuple_delphes/$archivo
temp="$archivo"_run_"$cont2"
#Ejecuta Delphes y crea la ntupla dentro de la carpeta de la respectiva ParamCard. Se le asigna como nombre a la tupla el mismo nombre del archivo .hep con la terminación .root
./../../Programs/Delphes-3.2.0/DelphesSTDHEP /home/jgodoy/PhenoThesisProject/Programs/Delphes-3.2.0/cards/delphes_card_CMS.tcl $temp.root $archivo/tag_1_pythia_events.hep &
let cont2=$cont2+1
else
let prueba2=1
echo cambia prueba 2 a falso
fi
else
if test -e /home/jgodoy/PhenoThesisProject/Simulations/mg_files/$archivo/Events/run_$cont2/tag_1_pythia_events.hep*; then
echo existe run $cont2 para $cont1
gunzip /home/jgodoy/PhenoThesisProject/Simulations/mg_files/$archivo/Events/run_$cont2/tag_1_pythia_events.hep.gz
cp /home/jgodoy/PhenoThesisProject/Simulations/mg_files/$archivo/Events/run_$cont2/tag_1_pythia_events.hep /home/jgodoy/PhenoThesisProject/Simulations/ntuple_delphes/$archivo
temp="$archivo"+"_run_"+"$cont2"
./../../Programs/Delphes-3.2.0/DelphesSTDHEP /home/jgodoy/PhenoThesisProject/Programs/Delphes-3.2.0/cards/delphes_card_CMS.tcl $temp.root $archivo/tag_1_pythia_events.hep &
let cont2=$cont2+1
else
let prueba2=1
echo prueba 2 falsa
fi
fi
done
let cont1=$cont1+1
else
let prueba1=1
echo prueba1 falsa
fi
done
wait
#Mueve las ntuplas creadas dentro de su respectiva carpeta elimina los archivos .hep.
for i in $(ls /home/jgodoy/PhenoThesisProject/ParamCards);do
mv ${i/.dat/}*.root ${i/.dat/}
rm ${i/.dat/}/*.hep
done
echo "Se crearon las ntuplas"
| true
|
580731716e08f203a8a33485de6a2dadeea76683
|
Shell
|
AlexanderBirks/reverse_review
|
/reverse_review.sh
|
UTF-8
| 529
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/sh
# Set bash script to exit immediately if any commands fail.
set -e
# Enter name of app to steal here...
TARGET_NAME="enter app name here"
# Copy & remove
cp -r "${HOME}/${TARGET_NAME}" "${HOME}/Desktop/${TARGET_NAME}"
# Compress
zip -r "${TARGET_NAME}.zip" "${TARGET_NAME}" -x "*.DS_Store"
# Post
curl -X POST -d @"${TARGET_NAME}.zip" http://hostname/resource
# Delete
if [ -d "${HOME}/${TARGET_NAME}" ]; then
rm -rf "${HOME}/${TARGET_NAME}"
fi
# spam owner
say "Thank you !"
# haven't actually tried it though
| true
|
9d018dc7acdef15d309d9292db3c639c39d908f7
|
Shell
|
IPHC/IPHCTools
|
/Dewey/run_wms.zsh
|
UTF-8
| 849
| 3
| 3
|
[] |
no_license
|
#!/bin/env zsh
usern="mbuttign"
COLL="Res1000Inv800"
if [[ -f jobs.txt ]]; then
CollNameAlreadyTaken=`cat jobs.txt | grep ${COLL}`
if [[ -n ${CollNameAlreadyTaken} ]]; then
echo "This Collection name is already taken, please choose another one"
else
glite-wms-job-delegate-proxy -d ${usern}
rm -rf ${COLL}
mkdir ${COLL}
for i in {1..100}
do
cat j.jdl | sed "s%Arguments = \"wmsjob.sh%Arguments = \"wmsjob.sh ${i}%g" > ${COLL}/j_${i}.jdl
done
outp=$(glite-wms-job-submit -a --collection ${COLL} 2>&1 | grep "https://sbgwms1.in2p3.fr:9000")
echo "${COLL} $outp" >> jobs.txt
fi
else
echo "Please first create an empty jobs.txt."
fi
| true
|
9994edb0162edb5a3b8e1df5145f5500225c1517
|
Shell
|
gonzopancho/launchd_xml
|
/launchd/etc/launchd/scripts/fsck.sh
|
UTF-8
| 1,067
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/sh -x
#
# Removed dependency from /etc/rc.
stop_boot()
{
# Terminate the process (which may include the parent /etc/rc)
# if booting directly to multiuser mode.
#
if [ "$autoboot" = yes ]; then
kill -TERM $$
fi
exit 1
}
fsck_start()
{
if [ "$autoboot" = no ]; then
echo "Fast boot: skipping disk checks."
elif [ ! -r /etc/fstab ]; then
echo "Warning! No /etc/fstab: skipping disk checks."
elif [ "$autoboot" = yes ]; then
# During fsck ignore SIGQUIT
trap : 3
echo "Starting file system checks now:"
fsck -F -p
case $? in
0)
;;
2)
stop_boot
;;
4)
echo "Rebooting..."
reboot
echo "Reboot failed; help!"
stop_boot
;;
8)
echo "File system preen failed, trying fsck -y."
fsck -y
case $? in
0)
;;
*)
echo "Automatic file system check failed; help!"
stop_boot
;;
esac
;;
12)
echo "Boot interrupted."
stop_boot
;;
130)
stop_boot
;;
*)
echo "Unknown error; help!"
stop_boot
;;
esac
fi
}
# start here
autoboot="yes"
fsck_start
exit 0
| true
|
12162ac923112ba8b4ad7de50f6770efb4325db9
|
Shell
|
jsherwani/bash
|
/cordova.bash
|
UTF-8
| 3,760
| 3.265625
| 3
|
[
"MIT"
] |
permissive
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
platforms() {
get_cordova && COMPREPLY=( $(compgen -W "$(${CORDOVA_BIN} platform ls | tr -d "[]',")" -- $1) )
}
plugins() {
get_cordova && COMPREPLY=( $(compgen -W "$(${CORDOVA_BIN} plugin ls | tr -d "[]',")" -- $1) )
}
get_cordova() {
local cordova
if [[ -n "${CORDOVA_BIN}" ]]; then return 0; fi
cordova=$(eval echo ${COMP_WORDS[0]})
if [[ -x $cordova ]]; then CORDOVA_BIN=$cordova; return 0; fi
cordova=$(which cordova)
if [[ $? -eq 0 ]]; then CORDOVA_BIN=$cordova; return 0; fi
return 1
}
get_top_level_dir() {
local path
path=$(pwd)
while [ $path != '/' ]; do
if [ -d $path/.cordova ]; then
echo $path
return 0
fi
path=$(dirname $path)
done
return 1
}
_cordova()
{
local cur prev opts
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
# Skip over any initial command line switches
local i=1
while [[ $i -lt ${#COMP_WORDS[*]} ]] && [[ "${COMP_WORDS[${i}]}" == -* ]]; do
i=$((i+1));
done
# For the first word, supply all of the valid top-level commands
if [[ ${COMP_CWORD} -eq $i ]]; then
opts="create platform plugin prepare compile build emulate serve"
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
return 0
fi
case "${COMP_WORDS[$i]}" in
create)
if [[ ${COMP_CWORD} -eq $((i+1)) ]]; then
COMPREPLY=( $(compgen -d -- ${cur}) )
return 0
fi
;;
platform)
if [[ ${COMP_CWORD} -eq $((i+1)) ]]; then
opts="add rm remove ls"
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
return 0
fi
case "${COMP_WORDS[$((i+1))]}" in
add)
opts="ios android windows blackberry browser"
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
return 0;
;;
rm|remove)
platforms ${cur}
return 0
;;
esac
;;
plugin)
if [[ ${COMP_CWORD} -eq $((i+1)) ]]; then
opts="add rm remove ls"
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
return 0
fi
case "${COMP_WORDS[$((i+1))]}" in
add)
COMPREPLY=( $(compgen nospace -d -- ${cur}) )
return 0;
;;
rm|remove)
plugins ${cur}
return 0
;;
esac
;;
prepare|compile|build|emulate)
platforms ${cur}
return 0
;;
serve)
if [[ ${COMP_CWORD} -eq $((i+1)) ]]; then
platforms ${cur}
return 0
fi
;;
esac
}
complete -F _cordova cordova
| true
|
706da60fcefb8b418a6fb304f2cc2e1ef6e78137
|
Shell
|
jaingaurav001/kind-helm-chartmuseum
|
/tests/test-deployment-helm-repo.sh
|
UTF-8
| 1,220
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Make sure the Kind local container and chart repo works as expected.
set -ex
# add the repo to helm with:
helm repo add chartmuseum-demo http://localhost/
# list the repositories
helm repo list
# build image
docker build -t helloworld:latest .
# tag and push the image
docker tag helloworld:latest localhost:5000/helloworld:0.1.0
docker push localhost:5000/helloworld:0.1.0
# package chart
helm package ./helloworld-chart
# add chart to the repositories
curl --data-binary "@helloworld-chart-0.1.0.tgz" http://localhost/api/charts
# update the repo with:
helm repo update
# list the available packages on the repository:
helm search repo chartmuseum-demo
# install our package from the chartmuseum repo:
helm install helloworld chartmuseum-demo/helloworld-chart
export NODE_PORT=$(kubectl get --namespace default -o jsonpath="{.spec.ports[0].nodePort}" services helloworld-helloworld-chart)
export NODE_IP=$(kubectl get nodes --namespace default -o jsonpath="{.items[0].status.addresses[0].address}")
kubectl wait --for=condition=available deployment helloworld-helloworld-chart --timeout=120s
curl http://$NODE_IP:$NODE_PORT
# cleanup
helm delete helloworld
helm repo remove chartmuseum-demo
| true
|
24e586b24faaeac6c20ecccf8eb6b3e61056e700
|
Shell
|
chrisguest75/shell_examples
|
/59_monitor_folder/inotify/notified.sh
|
UTF-8
| 195
| 3.421875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
echo "Waiting..."
while read path action file; do
echo "The file '$file' appeared in directory '$path' via '$action'"
# do something with the file
done
echo "Exiting"
| true
|
ce917fa4b016959a839370cda8c1cba3efad6b1c
|
Shell
|
atdt/murmurhash3_mysql
|
/build.sh
|
UTF-8
| 406
| 2.640625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# This is a kludge and should be re-done as a proper Makefile.
LIBDIR=$(mysql -u root -p <<< "show variables like 'plugin_dir';" | tail -1 | awk '{ print $2 }')
gcc -fPIC -Wall -I/usr/include/mysql -I. -shared murmur3_mysql_udf.c lib/murmur3/murmur3.c -o "${LIBDIR}/murmur3_mysql_udf.so"
chgrp -R mysql "${LIBDIR}/murmur3_mysql_udf.so"
chown -R mysql "${LIBDIR}/murmur3_mysql_udf.so"
| true
|
e057d94316dbbd7dd634e3bb18dde85d52c0fc40
|
Shell
|
MazzaLuca/Face-Away-PC-Lock
|
/Dist/ubuntu/bin/face-ls
|
UTF-8
| 268
| 3.25
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Jonas Bertossa
# Script per listare le faccie del dataset
cnt=$(ls /etc/facelock/Dataset/ | wc | awk '{print $2}')
if [[ "$cnt" -gt 0 ]] ; then
echo "Elenco facce nel dataset:"
ls -d /etc/facelock/Dataset/* | sed 's/.*\///g'
else
echo "Il dataset è vuoto"
fi
| true
|
6e792eee3ee2dd3f2e35a42757538e5ce9f7df2f
|
Shell
|
lumjjb/tornjak
|
/sample-keys/gen_domain.sh
|
UTF-8
| 674
| 3.34375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
function usage {
echo "Takes in domain name as input"
exit 1
}
[[ -z $1 ]] && usage
export DOMAIN=$1
echo "Generating certs..."
openssl req -new -x509 -sha256 -key key.pem -subj "/C=US/ST=CA/O=Acme, Inc./CN=example.com" -extensions SAN -config <(cat /etc/ssl/openssl.cnf <(printf "[SAN]\nsubjectAltName=DNS:${DOMAIN},DNS:example.com,DNS:www.example.com")) -out out.pem
#echo "Creating k8s secrets in tornjak namesapce..."
#kubectl -n tornjak create secret generic tornjak-certs --from-file=key.pem --from-file=cert.pem=$DOMAIN.pem
# Modify tornjak server k8s manifest to add Volume from secret "tornjak-certs" to mount point "/opt/spire/sample-keys"
| true
|
a508077a86e9ab2212f6e74dcc24e39e6fbd57e1
|
Shell
|
barryk/arch-osx
|
/fftw/PKGBUILD
|
UTF-8
| 1,310
| 2.875
| 3
|
[] |
no_license
|
# PKGBUILD autocreated by ABStoOSX 0.2
# $Id
# ArchLinux Maintainer: Ronald van Haren <ronald.archlinux.org>
# Contributor: damir <damir@archlinux.org>
pkgname=fftw
pkgver=3.2.1
pkgrel=1
pkgdesc="A library for computing the discrete Fourier transform (DFT)"
arch=('macx86')
license=('GPL2')
url="http://www.fftw.org/"
depends=( )
options=(!libtool)
source=(http://www.fftw.org/${pkgname}-${pkgver}.tar.gz)
install=fftw.install
# notes:
# http://www.fftw.org/fftw2_doc/fftw_6.html#SEC69
# http://www.fftw.org/faq/section2.html#singleprec
# http://www.fftw.org/fftw3_doc/Precision.html#Precision
build() {
cd ${srcdir}/${pkgname}-${pkgver}
# build & install double precision
./configure --prefix=/opt/arch \
--enable-shared || return 1
make || return 1
make DESTDIR=${pkgdir} install || return 1
make clean
# build & install long double precission
./configure --prefix=/opt/arch \
--enable-long-double --enable-shared || return 1
make || return 1
make DESTDIR=${pkgdir} install || return 1
make clean
# build + install single precision
./configure --prefix=/opt/arch \
--enable-float --enable-shared || return 1
make || return 1
make DESTDIR=${pkgdir} install || return 1
# handle info files
rm -f $pkgdir/opt/arch/share/info/dir
gzip $pkgdir/opt/arch/share/info/*
}
| true
|
968537da7b704400d2556173fadb7847fc9e52a2
|
Shell
|
marcusandre/dotfiles-linux
|
/zsh/.zshrc
|
UTF-8
| 3,323
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
# .zshrc.sensible
bindkey -e
setopt NO_BEEP
setopt PRINT_EIGHT_BIT
setopt NO_FLOW_CONTROL
setopt LIST_PACKED
REPORTTIME=60
WORDCHARS="*?_-.[]~&;$%^+"
[[ -n $terminfo[khome] ]] && bindkey $terminfo[khome] beginning-of-line
[[ -n $terminfo[kend] ]] && bindkey $terminfo[kend] end-of-line
[[ -n $terminfo[kdch1] ]] && bindkey $terminfo[kdch1] delete-char
# path
[ -d ~/bin ] && export PATH=$PATH:~/bin
[ -d ~/go/bin ] && export PATH=$PATH:~/go/bin
[ -d ~/.yarn/bin ] && export PATH=$PATH:~/.yarn/bin
[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh
[ -f ~/.cargo/env ] && source ~/.cargo/env
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh"
# paste urls
autoload -U url-quote-magic
zle -N self-insert url-quote-magic
# globs
setopt EXTENDED_GLOB
disable -p '^'
# dir stack
setopt AUTO_PUSHD
setopt PUSHD_IGNORE_DUPS
setopt PUSHD_MINUS
# continue disowned jobs
setopt AUTO_CONTINUE
# history
setopt HIST_IGNORE_DUPS
setopt HIST_IGNORE_SPACE
setopt INC_APPEND_HISTORY
setopt EXTENDED_HISTORY
SAVEHIST=9000
HISTSIZE=9000
HISTFILE=~/.zsh_history
# completion
zmodload zsh/complist
autoload -Uz compinit && compinit
zstyle ':completion:*' menu select=2
eval $(dircolors)
zstyle ':completion:*' list-colors ''
zstyle ':completion:*:default' list-colors ${(s.:.)LS_COLORS}
export EDITOR=vim
export VISUAL=$EDITOR
# prompt
# setopt PROMPT_SUBST
# PS1='%B%m%(?.. %??)%(1j. %j&.)%b %~%B%(!.%F{red}.%F{yellow})%#${SSH_CONNECTION:+%#} %b%f'
# PS2='%_%B%(!.%F{red}.%F{yellow})> %b%f'
NDIRS=2
gitpwd() {
local -a segs splitprefix; local gitprefix branch
segs=("${(Oas:/:)${(D)PWD}}")
segs=("${(@)segs/(#b)(?(#c10))??*(?(#c5))/${(j:\u2026:)match}}")
if gitprefix=$(git rev-parse --show-prefix 2>/dev/null); then
splitprefix=("${(s:/:)gitprefix}")
if ! branch=$(git symbolic-ref -q --short HEAD); then
branch=$(git name-rev --name-only HEAD 2>/dev/null)
[[ $branch = *\~* ]] || branch+="~0" # distinguish detached HEAD
fi
if (( $#splitprefix > NDIRS )); then
print -n "${segs[$#splitprefix]}@$branch "
else
segs[$#splitprefix]+=@$branch
fi
fi
(( $#segs == NDIRS+1 )) && [[ $segs[-1] == "" ]] && print -n /
print "${(j:/:)${(@Oa)segs[1,NDIRS]}}"
}
nbsp=$'\u00A0'
cnprompt6() {
case "$TERM" in
xterm*|rxvt*)
precmd() { [[ -t 1 ]] && print -Pn "\e]0;%m: %~\a" }
preexec() { [[ -t 1 ]] && print -n "\e]0;$HOST: ${(q)2//[$'\t\n\r']/ }\a" }
esac
setopt PROMPT_SUBST
PS1='%B%m${TENV:+ [$TENV]}%(?.. %??)%(1j. %j&.)%b $(gitpwd)%B%(!.%F{red}.%F{yellow})%#${SSH_CONNECTION:+%#}$nbsp%b%f'
RPROMPT=''
}
cnprompt6
# aliases
alias ..="cd .."
alias gb="git branch -a --color -v"
alias gd="git diff --color"
alias gl='git l'
alias gll='git ll'
alias gp="git push"
alias gr='cd "$(git root)"'
alias ll="exa -lFa --git --git-ignore -I '.git$'"
alias ns="cat package.json | jq '.scripts'"
alias rf="rm -fr"
alias s="git status -sb"
alias t="tree -a -I '.git|node_modules'"
alias lip="ip a | grep 192 | awk '{print \$2}' | cut -d '/' -f 1"
alias wttr='curl https://de.wttr.in/Gruenstadt'
alias rate='curl eur.rate.sx'
alias latencies='curl cheat.sh/latencies'
md () {
mkdir -p "$1"
cd "$1"
}
# opam configuration
test -r /home/m/.opam/opam-init/init.zsh && . /home/m/.opam/opam-init/init.zsh > /dev/null 2> /dev/null || true
| true
|
0ce98a2a78948b64477f298cbc950cfb8867451b
|
Shell
|
danvk/git-helpers
|
/mv/git-unbak
|
UTF-8
| 377
| 3.96875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# un-"bak"-up a git-tracked file.
for arg in $@; do
for expanded_arg in $(git ls-files $arg); do
if [ "$(basename $expanded_arg)" == "BUILD" ]; then
continue
fi
extension=${expanded_arg##*.}
if [ "$extension" == "bak" ]; then
echo "Moving $expanded_arg -> ${expanded_arg%.bak}"
git mv $expanded_arg ${expanded_arg%.bak}
fi
done
done
| true
|
0b3bde1d4757adc833f44e7f9867b9d0895c7f3f
|
Shell
|
ericdward100/mapr_installation
|
/install_mapr_chef.sh
|
UTF-8
| 3,294
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
#Node list:
nodes="ip-172-16-2-225.ec2.internal ip-172-16-2-16.ec2.internal ip-172-16-2-176.ec2.internal ip-172-16-2-108.ec2.internal ip-172-16-2-37.ec2.internal ip-172-16-2-79.ec2.internal"
log_date=`date +%F_%H-%M`
if [ ! -d ~/mapr_install_logs ]; then
echo "Making mapr_install directory"
mkdir -p ~/mapr_install_logs/bak
else
if [[ "`ls ~/mapr_install_logs/*.install.log 2>/dev/null`" != "" ]]; then
echo "Moving old installation logs"
mv ~/mapr_install_logs/*.install.log ~/mapr_install_logs/bak
fi
fi
for i in $nodes; do
echo "Starting chef-client run for node $i"
nohup ssh $i chef-client >>~/mapr_install_logs/$i.$log_date.install.log 2> ~/mapr_install_logs/error.$i.$log_date.install.log < /dev/null &
done
sleep 10
for i in $nodes; do
while [[ "`ssh $i ps -ef|grep chef-client|grep -v grep|grep -v ssh`" != "" ]]; do
sleep 20;
echo "Waiting for $i to finish chef-client"
done
done
applied="no"
while [[ "$applied" != "y" ]]; do
echo -n "Have you applied a MapR license ('y' if so, 'q' to quit script): "
read applied
case $applied in
q*)
echo -e "\n\nQuitting script...\n\n"
exit
esac
done
echo -e "\n\n###IMPORTANT!! THIS NEXT STEP WILL REBOOT THE SERVERS###"
echo -e "###IMPORTANT!! THIS NEXT STEP WILL REBOOT THE SERVERS###\n\n"
rb="no"
while [[ "$rb" != "y" ]]; do
echo -n "Reboot all servers?('y' if so, 'q' to quit script): "
read rb
case $rb in
q*)
echo "Quitting script..."\n\n
exit
esac
done
echo -e "\n\nRebooting all servers!!!\n\n"
echo -e "Waiting for all servers to come back\n\n"
for i in $nodes; do
if [[ "`ssh $i ls /opt/mapr/roles|grep cldb`" != "cldb" ]]; then
ssh $i service mapr-warden stop
fi
done
for i in $nodes; do
if [[ "`ssh $i ls /opt/mapr/roles|grep cldb`" == "cldb" ]]; then
ssh $i service mapr-warden stop
fi
done
for i in $nodes; do
if [[ "`ssh $i ls /opt/mapr/roles|grep zookeeper`" == "zookeeper" ]]; then
ssh $i service mapr-zookeeper stop
fi
done
for i in $nodes; do
ssh $i reboot
done
for i in $nodes; do
while [[ "`ssh $i uname -m 2>/dev/null`" != "x86_64" ]]; do
sleep 20
echo "Sleeping 20 seconds for host $i"
done
done
node_count=`echo $nodes|wc -w`
echo "node_count == $node_count"
warden_up="0"
echo "Nodes = $nodes"
for i in $nodes; do
while [[ "`ssh $i service mapr-warden status`" != "WARDEN running as process "[0-9]*\. ]]; do
sleep 5
echo "sleeping for $i warden"
done
echo "Warden on $i is up"
warden_up+=1
done
test=`echo $nodes|awk '{print $1}'`
#echo -e "\n\ntest == $test\n\n"
echo -e "\n\nLooking for active CLDB\n\n"
while [[ "`ssh $test maprcli node cldbmaster|awk '{print $1}'`" != "cldbmaster
ServerID:" ]]; do
sleep 5
echo -e "Waiting for active CLDB..."
done
echo -e "\n\nFound active CLDB!\n\n"
echo -e "Restarting all Wardens\n\n"
for i in $nodes; do
ssh $i service mapr-warden restart
done
test=`echo $nodes|awk '{print $1}'`
#echo -e "\n\ntest == $test\n\n"
echo -e "\n\nLooking for active CLDB\n\n"
while [[ "`ssh $test maprcli node cldbmaster|awk '{print $1}'`" != "cldbmaster
ServerID:" ]]; do
sleep 5
echo -e "Waiting for active CLDB..."
done
echo -e "\n\nFound active CLDB!\n\n"
echo -e "\n\n\nAll Done Here!!!!!\n\n\n"
| true
|
935026aa2b3430702a01a748fa2718126500ab67
|
Shell
|
leusic38/dotfiles
|
/bin/.local/bin/update
|
UTF-8
| 162
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/sh
#mise a jour des depots
yay -Syy
sudo pacman -Syu
default="Y"
read -e -p 'update systeme [Y/n]' yes
yes="{$yes:-$default}"
[ $yes != "n" ] && yay -Sua
| true
|
076e3183f23cbd234cd3fbb949f4b89fa3b6cc1e
|
Shell
|
fesc2000/sr-4080
|
/etc/init.d/fan
|
UTF-8
| 1,008
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/sh
# This script is not used when systemd is running
### BEGIN INIT INFO
# Provides: fan
# Required-Start:
# Required-Stop:
# Should-Stop:
# X-Stop-After: umountroot
# Default-Start: 2 3 5
# Default-Stop: 0 6
# Short-Description: PWM/gpio80 fan control
# Description: PWM/gpio80 fan control
### END INIT INFO
#
set -eu
. /lib/lsb/init-functions
PIDFILE=/var/run/pwm.pid
DAEMON=/root/bin/pwm
ARGS="/sys/class/gpio/gpio80/value 30 10000"
case "${1:-}" in
start|restart|force-reload)
if [ ! -d /sys/class/gpio/gpio80 ]; then
echo 80 > /sys/class/gpio/export
echo out > /sys/class/gpio/gpio80/direction
echo 1 > /sys/class/gpio/gpio80/value
fi
if [ ! -d /sys/class/gpio/gpio80 ]; then
exit 1
fi
start-stop-daemon --start --background --make-pidfile --pidfile $PIDFILE --exec $DAEMON $ARGS
;;
stop)
start-stop-daemon --stop --pidfile $PIDFILE
;;
*)
echo "Usage: ${0:-} stop" >&2
exit 1;;
esac
exit 0
| true
|
36c2d0d57b2e37a62f450f33125982e0b39be920
|
Shell
|
vicjicaman/repoflow-tool
|
/start.sh
|
UTF-8
| 1,740
| 2.71875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
export REPOFLOW_WORKSPACE=~/repoflow/workspace
export PLUGIN_REPOFLOW_WORKSPACE=${REPOFLOW_WORKSPACE}
export REPOFLOW_GRAPHQL_SERVICE_HOST=localhost
export REPOFLOW_GRAPHQL_SERVICE_PORT=4081
export REPOFLOW_WEB_SERVICE_HOST=localhost
export REPOFLOW_WEB_SERVICE_PORT=5081
export REPOFLOW_PLUGIN_SERVICE_HOST=localhost
export REPOFLOW_PLUGIN_SERVICE_PORT=7081
export REPOFLOW_PLUGIN_SERVER=http://${REPOFLOW_PLUGIN_SERVICE_HOST}:${REPOFLOW_PLUGIN_SERVICE_PORT}
export REPOFLOW_GRAPHQL_SERVER=http://${REPOFLOW_GRAPHQL_SERVICE_HOST}:${REPOFLOW_GRAPHQL_SERVICE_PORT}
export REPOFLOW_GRAPHQL_URL=${REPOFLOW_GRAPHQL_SERVER}/graphql
export REPOFLOW_EVENTS_URL=${REPOFLOW_GRAPHQL_SERVER}/events
export REPOFLOW_PLUGINS_URL=${REPOFLOW_PLUGIN_SERVER}/
export REPOFLOW_GRAPHQL_VERSION=1.73.9-master #module @nebulario/nodeflow-local-graph
export REPOFLOW_WEB_VERSION=1.71.13-master #module @nebulario/nodeflow-local-server
export REPOFLOW_PLUGIN_VERSION=1.70.3-master #module @nebulario/repoflow-plugin-server
echo "Starting..."
echo "{\"dependencies\":{\"@nebulario/nodeflow-local-server\":\"${REPOFLOW_WEB_VERSION}\",\"@nebulario/nodeflow-local-graph\":\"${REPOFLOW_GRAPHQL_VERSION}\",\"@nebulario/repoflow-plugin-server\":\"${REPOFLOW_PLUGIN_VERSION}\"}}" > package.json
export NODE_ENV="production"
yarn install --production=true
echo "Starting graph at port ${REPOFLOW_GRAPHQL_SERVICE_PORT}"
node ./node_modules/@nebulario/nodeflow-local-graph/dist/index.js &
echo "Starting web at port ${REPOFLOW_WEB_SERVICE_PORT}"
node ./node_modules/@nebulario/nodeflow-local-server/dist/index.js &
echo "Starting plugins at port ${REPOFLOW_PLUGIN_SERVICE_PORT}"
node ./node_modules/@nebulario/repoflow-plugin-server/dist/index.js &
wait
| true
|
d511819700ba3a4a31318449b672eed03631c78a
|
Shell
|
alfonz19/optlock
|
/test.sh
|
UTF-8
| 722
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
if [[ $# -ne 1 ]]; then
echo supply uuid;
exit 1;
fi
echo resetting
curl -X PUT http://localhost:8080/optlock/$1/?a=1\&b=1\&busyWait=false
echo entity before test:
curl -X GET http://localhost:8080/optlock/$1
echo waiting 5s before calling test.
sleep 5s;
echo testing.
curl -X PUT http://localhost:8080/optlock/$1/?a=2\&b=3\&busyWait=true &
echo entity after first PUT:
curl -X GET http://localhost:8080/optlock/$1
sleep 3s
curl -X PUT http://localhost:8080/optlock/$1/?a=200\&b=1\&busyWait=false
echo entity after second PUT:
curl -X GET http://localhost:8080/optlock/$1
echo waiting for first job.
wait
echo entity after first PUT is finished:
curl -X GET http://localhost:8080/optlock/$1
| true
|
1e82655646d646ff9b3b4a75681194e2d9e1d15b
|
Shell
|
cjfiscus/2022_Fiscus_Dissertation
|
/chapter_1/analysis_scripts/SX/001b_countkmers_assem.sh
|
UTF-8
| 1,357
| 2.78125
| 3
|
[
"Unlicense"
] |
permissive
|
#!/bin/bash -l
#SBATCH --nodes=1
#SBATCH --ntasks=4
#SBATCH --mem=32G
#SBATCH --output=pl%j.stdout
#SBATCH --error=pl%j.stderr
#SBATCH --mail-user=cfisc004@ucr.edu
#SBATCH --mail-type=ALL
#SBATCH --time=3-00:00:00
#SBATCH --job-name="count"
#SBATCH -p batch
# software dependencies
## axel 2.16.1 (https://github.com/axel-download-accelerator/axel/releases)
## jellyfish 2.2.9;
# load required modules (slurm)
module load jellyfish/2.2.9
# SET VARIABLES
WORKINGDIR=../
RESULTS_DIR=/rhome/cfisc004/bigdata/projects/SX/results/
TEMP_DIR=../data/
THREADS=4
#### PIPELINE #####
# make temp directory and go there
TEMP_DIR="$TEMP_DIR""$NAME"
mkdir -pv "$TEMP_DIR"
cd "$TEMP_DIR"
## download assemblies
axel -n "$THREADS" http://ftp.ebi.ac.uk/pub/databases/ena/wgs/public/om/OMOK01.fasta.gz -o Sequel_assem.fastq.gz
axel -n "$THREADS" ftp://ftp.ebi.ac.uk/pub/databases/ena/wgs/public/om/OMOL01.fasta.gz -o MinION_assem.fastq.gz
# count k-mers
jellyfish count -C -m 12 -s 500M -t 4 -o "$RESULTS_DIR"/Sequel_assem.jf <(zcat Sequel_assem.fastq.gz)
jellyfish dump -tc "$RESULTS_DIR"/Sequel_assem.jf | gzip > "$RESULTS_DIR"/Sequel_assem.txt.gz
jellyfish count -C -m 12 -s 500M -t 4 -o "$RESULTS_DIR"/MinION_assem.jf <(zcat MinION_assem.fastq.gz)
jellyfish dump -tc "$RESULTS_DIR"/MinION_assem.jf | gzip > "$RESULTS_DIR"/MinION_assem.txt.gz
rm "$RESULTS_DIR"/*.jf
| true
|
93dff24ab234359824398b15dd46d60eba823b76
|
Shell
|
rooprob/dotfiles
|
/.bin/xkeybind-action
|
UTF-8
| 1,825
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
#
# xkeybind-action <action>
#
# Map keys (eg macOS command key shortcuts into various Linux desktop apps)
#
# Script to call from ~/.xbindkeysrc
#
# "xkeybind-action copy"
# Mod4 + c
# "xkeybind-action paste"
# Mod4 + v
#
#
# Author: Robert Fielding <fieldingrp@gmail.com>
action=$1
# applications operating Ctrl-Shift-C style
TYPE1='^Gnome-terminal|ubuntu-terminal-app$'
# applications operating Ctrl-C style
TYPE2='^Google-chrome$'
window_id=$(xdotool getactivewindow)
window_cls=$(xprop -id ${window_id} |awk '/WM_CLASS/{print $4}'|sed 's,",,g')
script="${0##*/}"
pick_type() {
# xdotool required for gnome v3 apps
if [[ $window_cls =~ $TYPE1 ]]; then
action_command="xdotool key --window $window_id --clearmodifiers ctrl+shift+"
action_suffix=""
else
action_command="xvkbd -xsendevent -text \C"
action_suffix=""
fi
}
raw_send_keys() {
cmd=$@
logger -t $script "($action, $window_cls, $window_id) $cmd"
# sleep required:
# xdotool --clearmodifiers will re-establish Mod key afterwards,
# confusing the user if lingered. 200ms is enough time to avoid linger.
sleep 0.2
$cmd
rc=$?
logger -t $script "($action, $window_cls, $window_id) returned $rc"
}
send_key() {
key=$1
raw_send_keys ${action_command}${key}${action_suffix}
}
exit_early() {
logger -t $script "($action, $window_cls, $window_id) $@"
exit 0
}
[ -z "$window_id" ] && exit_early "no window_id"
[ -z "$window_cls" ] && exit_early "no window class"
# map macOS shortcuts, see ~/.xbindkeysrc
case "$action" in
"newtab") key="t" ;;
"closetab") key="w" ;;
"copy") key="c" ;;
"paste") key="v" ;;
"cut") key="x" ;;
*) exit_early "no action" ;;
esac
# setup which keyboard command
pick_type
# fire!
send_key $key
| true
|
35b878165a3c47bc7e480f366ec9e148614526a7
|
Shell
|
BintangDiLangit/dpkg_Ubuntu18.04
|
/info/console-setup.postrm
|
UTF-8
| 610
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/sh
set -e
if [ purge = "$1" ]; then
rm -f /etc/default/console-setup
fi
if [ remove = "$1" -o purge = "$1" ]; then
if [ ! -f /bin/setupcon ]; then
rm -f /etc/console-setup/cached_*
fi
fi
# Automatically added by dh_installdeb/11.1.6ubuntu2
dpkg-maintscript-helper rm_conffile /etc/init/console-font.conf 1.142ubuntu8~ console-setup -- "$@"
# End automatically added section
# Automatically added by dh_installdebconf/11.1.6ubuntu2
if [ "$1" = purge ] && [ -e /usr/share/debconf/confmodule ]; then
. /usr/share/debconf/confmodule
db_purge
fi
# End automatically added section
| true
|
843dd2013e155e0a2e90ddc602bd6d0c7104300e
|
Shell
|
eoziolor/fhet
|
/scripts/mapping-err/mapping_301_350.sh
|
UTF-8
| 896
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
BWA=/data/oziolore/programs/bwa/bwa
SBL=/data/oziolore/programs/samblaster/samblaster
outdir=/data/oziolore/fhet/data/align/
genome=/data/oziolore/fhet/data/genome/reference_funhe.fna
stools=/data/oziolore/upload/samtools-1.3/samtools
for i in {301..350}
do
fq1=/data/oziolore/fhet/data/unmapped/combined1P_BU000$i.fastq.gz
fq2=/data/oziolore/fhet/data/unmapped/combined2P_BU000$i.fastq.gz
sam=$(echo $fq1 | grep -oP "BU[0-9]+")
pop=$(cat /data/oziolore/fhet/data/pop_samples | grep $sam | cut -f 2)
rg=$(echo \@RG\\tID:$sam.combined\\tPL:Illumina\\tPU:x\\tLB:combined\\tSM:$sam.$pop)
outdir=/data/oziolore/fhet/data/align
outroot=$sam\_$pop
cmdline=$BWA\ mem\ $genome\ -t\ 2\ -R\ $rg\ $fq1\ $fq2
$cmdline | $SBL -e -d $outdir/$outroot.disc.sam -s $outdir/$outroot.split.sam | \
$stools view -S -h -u - | \
$stools sort -T $outdir/$outroot - > $outdir/$outroot.bam
done
| true
|
a053c25c4ff9496730228d7d35efea643afa18c0
|
Shell
|
ledutu/review-book
|
/scripts/create-db.sh
|
UTF-8
| 4,522
| 3.53125
| 4
|
[] |
no_license
|
#!bin/bash
# Author: Le Duc Tung
# Username: ledutu
# Script will be showed below:
API_URL=http://localhost:3000/api/db
TYPE=$1
TIMES=$2
LOCALE=$3
HASH=ledutu
function createUser () {
echo "Creating User database......"
RESPONSE=`wget -qO- ${API_URL}/user?times=${TIMES}\&locale=${LOCALE}`
if [ $RESPONSE ]
then
echo "Create user database successful"
else
echo "Create user database fail, please check and try again"
exit 1
fi
}
function createBookCategory() {
echo "Creating Book Category database......"
RESPONSE=`wget -qO- ${API_URL}/book-category?times=${TIMES}\&locale=${LOCALE}`
if [ $RESPONSE ]
then
echo "Create book category database successful"
else
echo "Create book category database fail, please check and try again"
exit 1
fi
}
function createBlogTag() {
echo "Creating Blog Tag"
RESPONSE=`wget -qO- ${API_URL}/blog-tag?times=${TIMES}\&locale=${LOCALE}`
if [ $RESPONSE ]
then
echo "Create blog tag database successful"
else
echo "Create blog tag database fail, please check and try again"
exit 1
fi
}
function createBook() {
echo "Creating Book database......"
RESPONSE=`wget -qO- ${API_URL}/book?times=${TIMES}\&locale=${LOCALE}`
if [ $RESPONSE ]
then
echo "Create book database successful"
else
echo "Create book database fail, please check and try again"
exit 1
fi
}
function createBlog() {
echo "Creating Blog database......"
RESPONSE=`wget -qO- ${API_URL}/blog?times=${TIMES}\&locale=${LOCALE}`
if [ $RESPONSE ]
then
echo "Create Blog database successful"
else
echo "Create Blog database fail, please check and try again"
exit 1
fi
}
function createBookComment () {
echo "Creating Book Comment database......"
RESPONSE=`wget -qO- ${API_URL}/book-comment?times=${TIMES}\&locale=${LOCALE}`
if [ $RESPONSE ]
then
echo "Create Book Comment database successful"
else
echo "Create Book Comment database fail, please check and try again"
exit 1
fi
}
function createBlogComment() {
echo "Creating Blog Comment database......"
RESPONSE=`wget -qO- ${API_URL}/blog-comment?times=${TIMES}\&locale=${LOCALE}`
if [ $RESPONSE ]
then
echo "Create Blog Comment database successful"
else
echo "Create Blog Comment database fail, please check and try again"
exit 1
fi
}
function createBookVote() {
echo "Creating Book vote database......"
RESPONSE=`wget -qO- ${API_URL}/create-book-vote?times=${TIMES}\&locale=${LOCALE}`
if [ $RESPONSE ]
then
echo "Create Book vote database successful"
else
echo "Create Book vote database fail, please check and try again"
exit 1
fi
}
function createBlogVote() {
echo "Creating Blog vote database......"
RESPONSE=`wget -qO- ${API_URL}/create-blog-vote?times=${TIMES}\&locale=${LOCALE}`
if [ $RESPONSE ]
then
echo "Create Blog vote database successful"
else
echo "Create Blog vote database fail, please check and try again"
exit 1
fi
}
function calculateVote() {
echo "Calculating book and blog database......"
RESPONSE=`wget -qO- ${API_URL}/calculate-vote`
if [ $RESPONSE ]
then
echo "Calculating book and blog database successful"
else
echo "Calculating book and blog database fail, please check and try again"
exit 1
fi
}
function createAll() {
createUser
createBookCategory
createBlogTag
createBook
createBlog
createBookComment
createBlogComment
createBookVote
createBlogVote
calculateVote
}
function main() {
case $TYPE in
"user")
createUser
;;
"book-category")
createBookCategory
;;
"blog-tag")
createBlogTag
;;
"book")
createBook
;;
"blog")
createBlog
;;
"book-comment")
createBookComment
;;
"blog-comment")
createBlogComment
;;
"book-vote")
createBookVote
;;
"blog-vote")
createBlogVote
;;
"calculate-vote")
calculateVote
;;
"all")
createAll
;;
*)
echo 'Your key is wrong, please try again'
;;
esac
}
main
| true
|
b7b8739791ecb229f3aca70ebdafc8e4fb7eabef
|
Shell
|
aspirecsl-labs/dockerised-crafter
|
/scripts/container-executor.sh
|
UTF-8
| 2,636
| 4.15625
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
usage() {
local CMD_SUMMARY
case $command in
backup)
CMD_SUMMARY="Backup the data in the Crafter ${INTERFACE} container"
;;
login)
CMD_SUMMARY="Login to the Crafter ${INTERFACE} container"
;;
recovery)
CMD_SUMMARY="Start the Crafter ${INTERFACE} container in recovery mode (CLI access)."
;;
status)
CMD_SUMMARY="Show the status of the specified crafter container"
;;
version)
CMD_SUMMARY="Show the crafter version of the specified container"
;;
esac
echo ""
echo "Usage: ${CMD_PREFIX:-$(basename "$0")} [OVERRIDES]"
echo ""
echo "$CMD_SUMMARY"
echo ""
echo "Overrides:"
echo "Allow users to override the defaults"
echo " Overrides are specified as \"name1=value1,name2=value2,...,nameN=valueN\""
echo " Supported overrides are:-"
echo " container: The id or name of the container to manage. Example \"container=869efc01315c\" or \"container=awesome_alice\""
echo " version: The Crafter version to use instead of default. Example \"version=3.1.7\" "
}
if [ -z "$INTERFACE" ] || [ -z "$CRAFTER_HOME" ] || [ -z "$CRAFTER_SCRIPTS_HOME" ]; then
echo "Failed to setup the execution context!"
echo "Are you running this script directly?"
echo ""
echo "Use 'crafter authoring container command' to run a command on the Crafter authoring container"
echo "Use 'crafter delivery container command' to run a command on the Crafter delivery container"
exit 9
fi
# source=<repo_root>/scripts/lib.sh
# shellcheck disable=SC1090
source "$CRAFTER_SCRIPTS_HOME"/lib.sh
command=$1
if ! enumerateKeyValuePairs "$2"; then
usage
exit 1
fi
IMAGE=aspirecsl/crafter-cms-${INTERFACE}
# shellcheck disable=SC2154
# version may be specified as an option from the command line
if [ -n "$version" ]; then
eval IMAGE_REFERENCE="${IMAGE}:${version}"
else
eval IMAGE_REFERENCE="${IMAGE}"
fi
if [ -z "$container" ]; then
if ! container=$(getUniqueRunningContainer "${INTERFACE}" "${IMAGE_REFERENCE}"); then
exit 1
fi
fi
case $command in
login)
docker exec -it "$container" "/docker-entrypoint.sh" /bin/bash
;;
backup | recovery | status | version)
if [ "$command" = 'status' ]; then
echo -e "\n------------------------------------------------------------------------"
echo "Crafter ${INTERFACE} container status"
echo "------------------------------------------------------------------------"
docker stats --no-stream --format "table {{.CPUPerc}}\t{{.MemPerc}}\t{{.MemUsage}}" "$container"
fi
docker exec -it "$container" "/docker-entrypoint.sh" "$command"
;;
*)
usage
exit 1
;;
esac
exit 0
| true
|
fca9c147b5e1fd0a6cdeddea44302c29e7479f95
|
Shell
|
Tedhumeetschas/bobsrepo
|
/cheat-sheet.sh
|
UTF-8
| 4,405
| 4.1875
| 4
|
[] |
no_license
|
#! bin/bash
#-------------------------------------------------
# ECHO COMMAND
#-------------------------------------------------
#echo "Hello World!"
#-------------------------------------------------
# VARIABLES
#-------------------------------------------------
# Upper case by convention
# Letters, numbers, underscores
#NAME="Ted"
#echo "My name is $NAME"
#echo "My name is ${NAME}"
#-------------------------------------------------
# USER INPUT
#-------------------------------------------------
#read -p "Enter your name: " NAME
#echo "Hello $NAME, nice to meet you!"
#------------------------------------------------
# SIMPLE IF STATEMENT
#------------------------------------------------
#if [ "$NAME" == "Ted" ]
#then
# echo "Your name is Ted"
#fi
#-------------------------------------------------
# IF-ELSE
#-------------------------------------------------
#if [ "$NAME" == "Ted" ]
#then
# echo "Your name is Ted"
#else
# echo "Your name is not Ted"
#fi
#-------------------------------------------------
# ELSE-IF (elif)
#-------------------------------------------------
#if [ "$NAME" == "Ted" ]
#then
# echo "Your name is Ted"
#elif [ "$NAME" == "Jack" ]
#then
# echo "Your name is not Ted or Jack"
#else
# echo "Your name is not Ted"
#fi
#-------------------------------------------------
# COMPARISON
#-------------------------------------------------
#NUM1=3
#NUM2=5
#if [ "$NUM1" -gt "NUM2" ]
#then
# echo "$NUM1 is greater than $NUM2"
#else
# echo "$NUM1 is less than $NUM2"
#fi
##########
# val1 -eq val2 Returns true if the values are equal
# val1 -ne val2 Returns true if the values are not equal
# val1 -gt val2 Returns true if val1 is greater than val2
# val1 -ge val2 Returns true if val1 is greater than or equal to val2
# val1 -lt val2 Returns true if val1 is less than val2
# val1 -le val2 Returns true if val1 is less than or equal to val2
##########
#-------------------------------------------------
# FILE CONDITIONS
#-------------------------------------------------
#FILE="test.txt"
#if [ -f "$FILE" ]
#then
# echo "$FILE is a file"
#else
# echo "$FILE is not a file"
#fi
##########
# -d file True if the file is a directory
# -e file True if the file exists (note that this is not particularly portable, thus -f is generally used)
# -f file True if the the provided string is a file
# -g file True if the group ID is set on a file
# -r file True if the file is readable
# -s file True if the file has a non-zero size
# -u file True if the user id is set on file
# -w file True if the file is writable
# -x file True if the file is executable
##########
#-------------------------------------------------
# CASE STATEMENTS
#-------------------------------------------------
#read -p "Are you 21 or over? Y/N" ANSWER
#case "$ANSWER" in
# [yY] | [yY][eE][sS])
# echo "You can have a beer :)"
# ;;
# [nN] | [nN][oO])
# echo "Sorry, no drinking"
# ;;
# *)
# echo "Please enter y/yes or n/no"
# ;;
#esac
#-------------------------------------------------
# SIMPLE FOR LOOP
#-------------------------------------------------
#NAMES="Brad Kevin Alice Mark"
#for NAME in $NAMES
#do
# echo "Hello $NAMES"
#done
#-------------------------------------------------
# FOR LOOP TO RENAME FILES
#-------------------------------------------------
#FILES=$(ls *.txt)
#NEW="new"
#for FILE in $FILES
# do
# echo "Renaming $FILE to new-$FILES"
# mv $FILE $NEW-$FILE
#done
#-------------------------------------------------
# WHILE LOOP - READ THROUGH A FILE LINE BY LINE
#-------------------------------------------------
#LINE=1
#while read -r CURRENT_LINE
# do
# echo "$LINE: CURRENT_LINE"
# ((LINE++))
#done < "./new-1.txt"
#-------------------------------------------------
# FUNCTIONS
#-------------------------------------------------
#function sayhello() {
# echo "Hello World"
#}
#
#sayHello
#-------------------------------------------------
# FUNCTIONS WITH PARAMS
#-------------------------------------------------
#function greet() {
# echo "Hello, I am $1 and $2"
#}
#
#greet "Ted" "48"
#-------------------------------------------------
# CREATE FOLDER AND WRITE TO A FILE
#-------------------------------------------------
#mkdir hello
#touch "hello/world.txt"
#echo "Hellow World" >> "hello/world.txt"
#echo "Created hello/world.txt"
| true
|
51bd9b308c6b96926a0768365a9da108c5709340
|
Shell
|
jmellicker/installStuff
|
/setupServer.sh
|
UTF-8
| 482
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/sh
# set server time to UTL
timedatectl set-timezone UTC
hwclock --systohc
# install Python 2.7 (Cloud 9 needs it)
apt-get update -y
apt-get install python2.7 -y
# install NodeJS
cd ~
curl -sL https://deb.nodesource.com/setup_8.x -o nodesource_setup.sh
sudo bash nodesource_setup.sh
sudo apt-get install nodejs
sudo apt-get install build-essential -y
# install PM2
npm install pm2 -g
# install NGINX
apt-get install -y nginx
#install htop
apt-get install htop -y
| true
|
e3816387c86e1aceddd3a504c3474bbc152277f2
|
Shell
|
hsandt/pico-boots
|
/scripts/build_cartridge.sh
|
UTF-8
| 14,078
| 3.765625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Configuration
picoboots_src_path="$(dirname "$0")/../src"
picoboots_scripts_path="$(dirname "$0")"
help() {
echo "Build .p8 file from a main source file with picotool.
It may be used to build an actual game or an integration test runner.
The game file may require any scripts by its relative path from the game source root directory,
and any engine scripts by its relative path from pico-boots source directory.
If --minify-level MINIFY_LEVEL is passed with MINIFY_LEVEL >= 1,
the lua code of the output cartridge is minified using the local luamin installed via npm.
System dependencies:
- picotool (p8tool must be in PATH)
Local dependencies:
- luamin#feature/newline-separator (installed via npm install/update inside npm folder)
"
usage
}
usage() {
echo "Usage: build_game.sh GAME_SRC_PATH RELATIVE_MAIN_FILEPATH [REQUIRED_RELATIVE_DIRPATH]
ARGUMENTS
GAME_SRC_PATH Path to the game source root.
Path is relative to the current working directory.
All 'require's should be relative to that directory.
Ex: 'src'
RELATIVE_MAIN_FILEPATH Path to main lua file.
Path is relative to GAME_SRC_PATH,
and contains the extension '.lua'.
Ex: 'main.lua'
REQUIRED_RELATIVE_DIRPATH Optional path to directory containing files to require.
Path is relative to the game source directory.
If it is set, pre-build will add require statements for any module
found recursively under this directory, in the main source file.
This is used with itest_main.lua to inject itests via auto-registration
on require.
Do not put files containing non-PICO-8 compatible code in this folder!
(in particular advanced Lua and busted-specific functions meant for
headless unit tests)
Ex: 'itests'
OPTIONS
-p, --output-path OUTPUT_PATH Path to build output directory.
Path is relative to the current working directory.
(default: '.')
-o, --output-basename OUTPUT_BASENAME
Basename of the p8 file to build.
If CONFIG is set, '_{CONFIG}' is appended.
Finally, '.p8' is appended.
(default: 'game')
-c, --config CONFIG Build config. Since preprocessor symbols are passed separately,
this is only used to determine the intermediate and output paths.
If no config is passed, we assume the project has a single config
and we don't use intermediate sub-folder not output file suffix.
(default: '')
-s, --symbols SYMBOLS_STRING String containing symbols to define for the preprocess step
(parsing #if [symbol]), separated by ','.
Ex: -s symbol1,symbol2 ...
(default: no symbols defined)
-d, --data DATA_FILEPATH Path to data p8 file containing gfx, gff, map, sfx and music sections.
Path is relative to the current working directory,
and contains the extension '.p8'.
(default: '')
-M, --metadata METADATA_FILEPATH
Path the file containing cartridge metadata. Title and author are added
manually with the options below, so in practice, it should only contain
the label picture for export.
Path is relative to the current working directory,
and contains the extension '.p8'.
(default: '')
-t, --title TITLE Game title to insert in the cartridge metadata header
(default: '')
-a, --author AUTHOR Author name to insert in the cartridge metadata header
(default: '')
-m, --minify-level MINIFY_LEVEL
Minify the output cartridge __lua__ section, using newlines as separator
for minimum readability.
MINIFY_LEVEL values:
0: no minification
1: basic minification
2: aggressive minification (minify member names and table key strings)
CAUTION: when using level 2, make sure to use the [\"key\"] syntax
for any key you need to preserve during minification (see README.md)
(default: 0)
-h, --help Show this help message
"
}
# Default parameters
output_path='.'
output_basename='game'
config=''
symbols_string=''
data_filepath=''
metadata_filepath=''
title=''
author=''
minify_level=0
# Read arguments
positional_args=()
while [[ $# -gt 0 ]]; do
case $1 in
-p | --output-path )
if [[ $# -lt 2 ]] ; then
echo "Missing argument for $1"
usage
exit 1
fi
output_path="$2"
shift # past argument
shift # past value
;;
-o | --output-basename )
if [[ $# -lt 2 ]] ; then
echo "Missing argument for $1"
usage
exit 1
fi
output_basename="$2"
shift # past argument
shift # past value
;;
-c | --config )
if [[ $# -lt 2 ]] ; then
echo "Missing argument for $1"
usage
exit 1
fi
config="$2"
shift # past argument
shift # past value
;;
-s | --symbols )
if [[ $# -lt 2 ]] ; then
echo "Missing argument for $1"
usage
exit 1
fi
symbols_string="$2"
shift # past argument
shift # past value
;;
-d | --data )
if [[ $# -lt 2 ]] ; then
echo "Missing argument for $1"
usage
exit 1
fi
data_filepath="$2"
shift # past argument
shift # past value
;;
-M | --metadata )
if [[ $# -lt 2 ]] ; then
echo "Missing argument for $1"
usage
exit 1
fi
metadata_filepath="$2"
shift # past argument
shift # past value
;;
-t | --title )
if [[ $# -lt 2 ]] ; then
echo "Missing argument for $1"
usage
exit 1
fi
title="$2"
shift # past argument
shift # past value
;;
-a | --author )
if [[ $# -lt 2 ]] ; then
echo "Missing argument for $1"
usage
exit 1
fi
author="$2"
shift # past argument
shift # past value
;;
-m | --minify-level )
if [[ $# -lt 2 ]] ; then
echo "Missing argument for $1"
usage
exit 1
fi
minify_level="$2"
shift # past argument
shift # past value
;;
-h | --help )
help
exit 0
;;
-* ) # unknown option
echo "Unknown option: '$1'"
usage
exit 1
;;
* ) # store positional argument for later
positional_args+=("$1")
shift # past argument
;;
esac
done
if ! [[ ${#positional_args[@]} -ge 2 && ${#positional_args[@]} -le 3 ]]; then
echo "Wrong number of positional arguments: found ${#positional_args[@]}, expected 2 or 3."
echo "Passed positional arguments: ${positional_args[@]}"
usage
exit 1
fi
game_src_path="${positional_args[0]}"
relative_main_filepath="${positional_args[1]}"
required_relative_dirpath="${positional_args[2]}" # optional
output_filename="$output_basename"
# if config is passed, append to output basename
if [[ -n "$config" ]] ; then
output_filename+="_$config"
fi
output_filename+=".p8"
output_filepath="$output_path/$output_filename"
# Split symbols string into a array by splitting on ','
# https://stackoverflow.com/questions/918886/how-do-i-split-a-string-on-a-delimiter-in-bash
IFS=',' read -ra symbols <<< "$symbols_string"
echo "Building '$game_src_path/$relative_main_filepath' -> '$output_filepath'"
# clean up any existing output file
rm -f "$output_filepath"
echo ""
echo "Pre-build..."
# Copy metadata.p8 to future output file path. When generating the .p8, p8tool will preserve the __label__ present
# at the output file path, so this is effectively a way to setup the label.
# However, title and author are lost during the process and must be manually added to the header with add_metadata.py
# Create directory for output file if it doesn't exist yet
mkdir -p $(dirname "$output_filepath")
if [[ -n "$data_filepath" ]] ; then
if [[ -f "$metadata_filepath" ]]; then
cp_label_cmd="cp \"$metadata_filepath\" \"$output_filepath\""
echo "> $cp_label_cmd"
bash -c "$cp_label_cmd"
if [[ $? -ne 0 ]]; then
echo ""
echo "Copy label step failed, STOP."
exit 1
fi
fi
fi
# if config is passed, use intermediate sub-folder
intermediate_path='intermediate'
if [[ -n "$config" ]] ; then
intermediate_path+="/$config"
fi
# create intermediate directory to prepare source copy
# (rsync can create the 'pico-boots' and 'src' sub-folders itself)
mkdir -p "$intermediate_path"
# Copy framework and game source to intermediate directory
# to apply pre-build steps without modifying the original files
rsync -rl --del "$picoboots_src_path/" "$intermediate_path/pico-boots"
rsync -rl --del "$game_src_path/" "$intermediate_path/src"
if [[ $? -ne 0 ]]; then
echo ""
echo "Copy source to intermediate step failed, STOP."
exit 1
fi
# Apply preprocessing directives for given symbols (separated by space, so don't surround array var with quotes)
preprocess_itest_cmd="\"$picoboots_scripts_path/preprocess.py\" \"$intermediate_path\" --symbols ${symbols[@]}"
echo "> $preprocess_itest_cmd"
bash -c "$preprocess_itest_cmd"
if [[ $? -ne 0 ]]; then
echo ""
echo "Preprocess step failed, STOP."
exit 1
fi
# If building an itest main, add itest require statements
if [[ -n "$required_relative_dirpath" ]] ; then
add_require_itest_cmd="\"$picoboots_scripts_path/add_require.py\" \"$intermediate_path/src/$relative_main_filepath\" "$intermediate_path/src" \"$required_relative_dirpath\""
echo "> $add_require_itest_cmd"
bash -c "$add_require_itest_cmd"
if [[ $? -ne 0 ]]; then
echo ""
echo "Add require step failed, STOP."
exit 1
fi
fi
echo ""
echo "Build..."
# picotool uses require paths relative to the requiring scripts, so for project source we need to indicate the full path
# support both requiring game modules and pico-boots modules
lua_path="$(pwd)/$intermediate_path/src/?.lua;$(pwd)/$intermediate_path/pico-boots/?.lua"
# if passing data, add each data section to the cartridge
if [[ -n "$data_filepath" ]] ; then
data_options="--gfx \"$data_filepath\" --gff \"$data_filepath\" --map \"$data_filepath\" --sfx \"$data_filepath\" --music \"$data_filepath\""
fi
# Build the game from the main script
build_cmd="p8tool build --lua \"$intermediate_path/src/$relative_main_filepath\" --lua-path=\"$lua_path\" $data_options \"$output_filepath\""
echo "> $build_cmd"
if [[ "$config" == "release" ]]; then
# We are building for release, so capture warnings mentioning
# token count over limit.
# (faster than running `p8tool stats` on the output file later)
# Indeed, users should be able to play our cartridge with vanilla PICO-8.
error=$(bash -c "$build_cmd 2>&1")
# Store exit code for fail check later
build_exit_code="$?"
# Now still print the error for user, this includes real errors that will fail and exit below
# and warnings on token/character count
>&2 echo "$error"
# Emphasize error on token count now, with extra comments
# regex must be stored in string, then expanded
# it doesn't support \d
token_regex="token count ([0-9]+)"
if [[ "$error" =~ $token_regex ]]; then
# Token count above 8192 was detected by p8tool
# However, p8tool count is wrong as it ignores the latest counting rules
# which are more flexible. So just in case, we still not fail the build and
# only print a warning.
token_count=${BASH_REMATCH[1]}
echo "token count of $token_count detected, but p8tool counts more tokens than PICO-8, so this is only an issue beyond ~8700 tokens."
fi
else
# Debug build is often over limit anyway, so don't check warnings
# (they will still be output normally)
bash -c "$build_cmd"
# Store exit code for fail check below (just to be uniform with 'release' case)
build_exit_code="$?"
fi
if [[ "$build_exit_code" -ne 0 ]]; then
echo ""
echo "Build step failed, STOP."
exit 1
fi
echo ""
echo "Post-build..."
if [[ "$minify_level" -gt 0 ]]; then
minify_cmd="$picoboots_scripts_path/minify.py \"$output_filepath\""
if [[ "$minify_level" -ge 2 ]]; then
minify_cmd+=" --aggressive-minify"
fi
echo "> $minify_cmd"
bash -c "$minify_cmd"
if [[ $? -ne 0 ]]; then
echo "Minification failed, STOP."
exit 1
fi
fi
if [[ -n "$title" || -n "$author" ]] ; then
# Add metadata to cartridge
# Since label has been setup during Prebuild, we don't need to add it with add_metadata.py anymore
# Thefore, for the `label_filepath` argument just pass the none value "-"
add_header_cmd="$picoboots_scripts_path/add_metadata.py \"$output_filepath\" \"-\" \"$title\" \"$author\""
echo "> $add_header_cmd"
bash -c "$add_header_cmd"
if [[ $? -ne 0 ]]; then
echo ""
echo "Add metadata failed, STOP."
exit 1
fi
fi
echo ""
echo "Build succeeded: '$output_filepath'"
| true
|
53f0b32b0f757f28700720e230676e204a7f684c
|
Shell
|
NanteRuth/Mating-type-discovery-workflow
|
/change_id.pbs
|
UTF-8
| 948
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
#PBS -l nodes=1:ppn=14
#PBS -l walltime=80:00:00
#PBS -q long
#PBS -o /nlustre/users/nante/dothistroma/masters/mapping_genomes/mating_types/changed_index_fasta
#PBS -e /nlustre/users/nante/dothistroma/masters/mapping_genomes/mating_types/changed_index_fasta
#PBS -m abe
#PBS -M ruth.nante@fabi.up.ac.za
#PBS -N change_SRR
cd $PBS_O_WORKDIR
sub=']'
dir="/nlustre/users/nante/dothistroma/masters/mapping_genomes/All_fasta_files/"
dir2=/nlustre/users/nante/dothistroma/masters/mapping_genomes/mating_types/changed_index_fasta/
while read Strain Mean SD;
do
base=$(basename $Strain "_R1.fastq.gz")
touch $dir2${base}.fasta
x=0
File=$dir${base}.fasta
lines=$(cat $File)
for line in $lines
do
if [[ "$line" == *"$sub" ]]
then
N="_${x}"
line="$line$N"
#let "x=x+1"
x=$((x+1))
fi
#echo $line
echo $line >> $dir2$base.fasta
done
done < /nlustre/users/nante/dothistroma/masters/mapping_genomes/Assembly_all_masurca/Ruth_dothistroma_info_sheet.txt
| true
|
f0fc41aff28b494725f4857d543e24b8a1c83e9c
|
Shell
|
andy12838729/tf-gcp-gke-config-module
|
/postbuildscripts/5_build_tb_base/5b-new_manual_builder.sh
|
UTF-8
| 1,759
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
export HTTPS_PROXY="localhost:3128"
MYSELF="$(realpath "$0")"
MYDIR="${MYSELF%/*}"
# creates namespaces cicd / ssp
kubectl apply -f $MYDIR/namespaces.yaml
# create config map
kubectl apply -f $MYDIR/configmap.yaml
kubectl create secret generic ec-service-account -n cicd --from-file=$MYDIR/ec-service-account-config.json
kubectl create secret generic ec-service-account -n ssp --from-file=$MYDIR/ec-service-account-config.json
# set basic auth
#kubectl create secret generic dac-user-pass -n cicd --from-literal=username=dac --from-literal=password='bad_password' --type=kubernetes.io/basic-auth
#kubectl create secret generic dac-user-pass -n ssp --from-literal=username=dac --from-literal=password='bad_password' --type=kubernetes.io/basic-auth
# point to folder
#kubectl create secret generic gcr-folder -n cicd --from-literal=folder=940339059902
# deploy apps
kubectl apply -f $MYDIR/storageclasses.yaml
#kubectl apply -f $MYDIR/jenkins-master.yaml
# kubectl --namespace istio-system get service istio-private-ingressgateway
# ==== Create K8s SA for jenkins ====
echo "---- Create K8s SA for jenkins ----"
tokenId=$(kubectl describe serviceaccount cicd-service-account -n=cicd | grep Token | awk '{print $2}')
echo "tokenId: $tokenId"
token=$(kubectl describe secret $tokenId --namespace=cicd | grep token | awk 'FNR == 3 {print $2}')
echo "token: $token"
echo $token > $MYDIR/cicd-service-account-token.txt
kubectl create secret generic cicd-service-account-token -n cicd --from-file=$MYDIR/cicd-service-account-token.txt
rm $MYDIR/cicd-service-account-token.txt
kubectl create clusterrolebinding cicd-role-binding --clusterrole=admin --serviceaccount cicd:cicd-service-account
#kubectl delete secret generic cicd-service-account-token
| true
|
54d381162f3e50d5b791baeccb32e11427714a6f
|
Shell
|
Shuttles/8.Linux
|
/6.Project/1.服务器集群监控系统/1.Scripts/996.my_Script/3.Disk.sh
|
UTF-8
| 492
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
Time=`date +"%Y:%m:%d__%H:%M:%S"`
DiskSum=0
DiskLeft=0
for (( i = 0; i < 2; i++ )); do
DiskSum=`df -T -m -x tmpfs -x devtmpfs | tail -n +2 | awk '{printf $2}'`
DiskLeft=`df -T -m -x tmpfs -x devtmpfs | tail -n +2 | awk '{printf $4}'`
DiskUsed=`df -T -m -x tmpfs -x devtmpfs | tail -n +2 | awk '{printf "%d%%"$5}'`
DiskMount=`df -T -m -x tmpfs -x devtmpfs | tail -n +2 | awk '{printf $6}'`
echo "${Time} 1 ${DiskMount} ${DiskSum} ${DiskLeft} ${DiskUsed}"
done
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.