blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
f15210b28fc6d0d0cfc0c37b15dff8b638f63586
|
Shell
|
davidoae/CLIfairly
|
/map_files/map_functions/pallet_town_functions.sh
|
UTF-8
| 1,014
| 2.84375
| 3
|
[] |
no_license
|
#!bin/bash
map_function_conditions(){
# map swaps - always have to come before stops
# enter leftmost house
if [ "$newy" -eq 9 ]; then
if [ "$newx" -eq 7 -o "$newx" -eq 8 ]; then
change_conf_value "character_files/character.cfg" "current_map_char_is_on" 2
get_new_map_info_set_starting_pos 6 4
fi
# go to route blahblah
elif [ "$newy" -eq 1 -a "$newx" -ge 21 -a "$newx" -le 24 ]; then
change_conf_value "character_files/character.cfg" "current_map_char_is_on" 3
get_new_map_info_set_starting_pos 16 12
fi
# stop being able to walk through the rightmost house
if [ "$newy" -eq 6 -a "$newx" -ge 8 -a "$newx" -le 13 ]; then
stop
elif [ "$newy" -eq 7 -a "$newx" -ge 7 -a "$newx" -le 14 ]; then
stop
elif [ "$newy" -ge 8 -a "$newy" -le 9 -a "$newx" -ge 6 -a "$newx" -le 15 ]; then
stop
fi
}
interaction(){
# signpost thing
if [ "$y" -eq 9 -a "$x" -eq 31 ]; then
echo "Sign post: \"Welcome to pallet town!\""
fi
}
| true
|
8744ab53388865c8a0a04f9e370ed58bec676a83
|
Shell
|
unRob/dotfiles
|
/.milpa/commands/computar/provision/dependencies.sh
|
UTF-8
| 3,462
| 3.78125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
@milpa.load_util user-input
base="$DOTFILES_PATH/brewfiles/"
brewfile="$HOME/.Brewfile"
case "$(uname -s)" in
Darwin)
os="macos"
if ! xcode-select --version >/dev/null; then
@milpa.log "Installing Command Line Tools (CLT) for Xcode, click on the thing!"
xcode-select --install
@milpa.confirm "Make sure CLT are installed, then"
fi
;;
Linux) os="linux" ;;
esac
if ! command -v brew >/dev/null; then
@milpa.log info "Installing homebrew..."
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" || @milpa.fail "could not install homebrew"
@milpa.log info "------------------------------------------------"
@milpa.log success "homebrew installed"
fi
echo "# Automatically generated by milpa computar provision dependencies" | cat - "$base/$os.Brewfile" "$base/$COMPUTAR_PROFILE.Brewfile" > "$brewfile" || @milpa.fail "Could not create profile Brewfile at $brewfile"
@milpa.log info "Ensuring brew dependencies are installed"
if ! brew bundle check --file "$brewfile"; then
@milpa.log info "Installing brew dependencies"
brew bundle install --no-lock --file "$brewfile" || @milpa.fail "Could not install dependencies"
@milpa.log success "Brew dependencies installed"
else
@milpa.log success "Brew dependencies up to date"
fi
@milpa.log info "Installing vscode extensions"
while read -r extension; do
if code --list-extensions 2>/dev/null | grep -m1 "^${extension}\$" >/dev/null; then
@milpa.log success "extension $extension already installed"
continue
fi
code --install-extension "$extension" || @milpa.fail "Could not install vscode extension $extension"
@milpa.log success "Installed extension $extension"
done < <(grep -v '^#' "${DOTFILES_PATH}/vscode.extensions")
if [[ "$os" == "macos" ]]; then
if [[ "$(defaults read com.googlecode.iterm2 PrefsCustomFolder)" != "$DOTFILES_PATH" ]]; then
@milpa.log "Configuring iTerm"
defaults write com.googlecode.iterm2 PrefsCustomFolder "$DOTFILES_PATH"
killall cfprefsd
@milpa.log success "iterm preference folder configured"
fi
fi
if [[ ! -d "${HOME}/.asdf" ]]; then
@milpa.log info "Installing asdf version manager..."
git clone https://github.com/asdf-vm/asdf.git "${HOME}/.asdf" --branch v0.8.0 || @milpa.fail "Could not clone asdf-vm"
@milpa.log success "Installed asdf-vm"
# shellcheck disable=1091
source "$HOME/.asdf/asdf.sh"
else
@milpa.log success "asdf installed"
fi
@milpa.log info "installing tools from .tool-versions..."
while read -r plugin; do
if asdf plugin list | grep -m1 "$plugin" >/dev/null; then
@milpa.log success "asdf plugin for $plugin already installed"
continue
fi
@milpa.log info "Installing $plugin asdf plugin..."
asdf plugin-add "$plugin" || @milpa.fail "Could not install asdf plugin $plugin"
done < <(cut -d ' ' -f 1 "${HOME}/.tool-versions")
# shellcheck disable=2164
cd "$HOME";
while read -r plugin version; do
if asdf list "$plugin" | grep -m1 "^\s*${version}\$" >/dev/null; then
@milpa.log success "asdf: $plugin version $version is already installed"
continue
fi
@milpa.log info "Installing $plugin version $version..."
asdf install "$plugin" "$version" || @milpa.fail "Could not install $plugin version $version"
@milpa.log success "$plugin version $version installed"
done <"${HOME}/.tool-versions"
@milpa.log complete "Computar has dependencies provisioned!"
| true
|
2e6f98d9a084b3435e49ee6577231e13b2e29cc5
|
Shell
|
shmuel4/mqtt-controler
|
/runme.sh
|
UTF-8
| 216
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
#
# B.H.
#
script=`readlink -f $0`
dir=`dirname $script`
project=`basename $dir`
cd $dir
docker stop $project
docker rm $project
docker run -d --restart always \
-p 8091:8091 --name $project $project
| true
|
9cd3aee80247762035468e5529918d14552c3c7d
|
Shell
|
mrmiywj/SimpleDB
|
/CSE444-lab6/src/bin/startSimpleDB.sh
|
UTF-8
| 6,302
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/bash
function setSimpleDBRoot
{
local this
local lls
local link
local bin
this="${BASH_SOURCE-$0}"
while [ -h "$this" ]; do
lls=`ls -ld "$this"`
link=`expr "$lls" : '.*-> \(.*\)$'`
if expr "$link" : '.*/.*' > /dev/null; then
this="$link"
else
this=`dirname "$this"`/"$link"
fi
done
# convert relative path to absolute path
bin=`dirname "$this"`
script=`basename "$this"`
bin=`cd "$bin"; pwd`
#this="$bin/$script"
echo "$bin/..";
}
function isLinux
{
test "$(uname)" = "Linux"
}
function isMac
{
test "$(uname)" = "Darwin"
}
function isCygwin
{
os="$(uname)"
test "${os:0:6}" = "CYGWIN"
}
function xEnabled
{
if isCygwin
then
return false
fi
(xterm -e "") 2>&1 > /dev/null
}
#--------------------------------------------init--------------------------------------
SIMPLEDB_ROOT=${SIMPLEDB_ROOT="$(setSimpleDBRoot)"}
osName="$(uname)"
if [ "$osName" = Darwin ] || [ "${osName:0:6}" = CYGWIN ] || [ "$osName" = Linux ]
#if [ "$osName" = Darwin ] || [ "$osName" = Linux ]
then
true
else
echo "Unsupported OS. Currently only Linux, Mac and Windows with \
Cygwin are supported"
#echo "Unsupported OS. Currently only Linux and Mac OS \
#are supported"
exit;
fi
if [ $# -lt 1 ]
then
echo "Usage: ./startSimpleDB.sh catalogFile [-explain] [-f queryFile]"
exit 1
fi
catalogFile=$(cd $(dirname $1);pwd)/$(basename $1)
shift
if isCygwin
then
catalogFile="$(cygpath --windows $catalogFile)"
fi
CLASSPATH_SEPARATOR=":"
if isLinux || isCygwin
then
workerHosts=$(cat "$SIMPLEDB_ROOT/conf/workers.conf" | sed 's/[ \t]\+//g' | \
sed 's/#.*$//g' | sed '/^$/d' | sed 's/:[0-9]\+$//g' | sort | uniq)
workers=$(cat "$SIMPLEDB_ROOT/conf/workers.conf" | sed 's/[ \t]\+//g' | \
sed 's/#.*$//g' | sed '/^$/d')
serverAddr=$(cat "$SIMPLEDB_ROOT/conf/server.conf" | sed 's/[ \t]\+//g' | \
sed 's/#.*$//g' | sed '/^$/d')
if isCygwin
then
CLASSPATH_SEPARATOR=";"
fi
elif isMac
then
workerHosts=$(cat "$SIMPLEDB_ROOT/conf/workers.conf" | sed -E 's/[ ]+//g' | \
sed -E 's/#.*$//g' | sed -E '/^$/d' | sed -E 's/:[0-9]+//g' | sort | uniq)
workers=$(cat "$SIMPLEDB_ROOT/conf/workers.conf" | sed -E 's/[ ]+//g' | \
sed -E 's/#.*$//g' | sed -E '/^$/d')
serverAddr=$(cat "$SIMPLEDB_ROOT/conf/server.conf" | sed -E 's/[ ]+//g' | \
sed -E 's/#.*$//g' | sed -E '/^$/d')
fi
#--------------------------------------------data splitting--------------------------------------
echo "Start splitting data files to worker partitions"
echo "catalogFile is : $catalogFile"
#HeapFileSplitter should store the splitted data files to SIMPLEDB_ROOT/data
#The number of splits equals to the number of workers
#For each worker host:port
#The data for this worker lies in SIMPLEDB_ROOT/data/host_port
#The filename of the catalog is fixed as catalog.schema
cd "$SIMPLEDB_ROOT"; java -classpath "bin/src${CLASSPATH_SEPARATOR}lib/*" \
simpledb.HeapFileSplitter $catalogFile
chmod -R u+rw,g+rw,o+rw data
chmod -R u+rw,g+rw,o+rw lib
chmod -R u+rw,g+rw,o+rw conf
chmod -R u+rw,g+rw,o+rw bin
#--------------------------------------------sync workers--------------------------------------
echo "Start copying simpledb files to workers"
for host in $workerHosts
do
echo "Copying to $host"
ssh $host "export JAVA_HOME=`/usr/libexec/java_home -v '1.6*'`; java -version"
ssh $host "mkdir -p /tmp/simpledb/data;mkdir /tmp/simpledb/bin;mkdir /tmp/simpledb/lib;mkdir /tmp/simpledb/conf"
rsync -a "$SIMPLEDB_ROOT/bin/" $host:/tmp/simpledb/bin
rsync -a "$SIMPLEDB_ROOT/lib/" $host:/tmp/simpledb/lib
rsync -a "$SIMPLEDB_ROOT/conf/" $host:/tmp/simpledb/conf
echo "Done"
done
echo "Finish copying simpledb files"
#--------------------------------------------sync data--------------------------------------
echo "Starting copying data files to workers"
for worker in $workers
do
if isMac
then
host=$(echo $worker | sed -E 's/:[0-9]+$//g')
port=$(echo $worker | sed -E 's/^[^:]+://g')
else
host=$(echo $worker | sed 's/:[0-9]\+$//g')
port=$(echo $worker | sed 's/^[^:]\+://g')
fi
echo "Copying to $host:$port"
ssh $host "mkdir -p /tmp/simpledb/data/$port"
rsync -a "$SIMPLEDB_ROOT/data/${host}_${port}/" $host:/tmp/simpledb/data/$port
echo "Done"
done
echo "Finish copying data files"
#--------------------------------------------start workers--------------------------------------
echo "Starting workers"
terminal=xterm
titleOption=-title
if ! [ -z "$(which gnome-terminal)" ]
then
terminal=gnome-terminal
titleOption=-t
#elif ! [ -z "$(which konsole)" ]
#then
# terminal=konsole
fi
for worker in $workers
do
if isMac
then
host=$(echo $worker | sed -E 's/:[0-9]+$//g')
port=$(echo $worker | sed -E 's/^[^:]+://g')
else
host=$(echo $worker | sed 's/:[0-9]\+$//g')
port=$(echo $worker | sed 's/^[^:]\+://g')
fi
if isLinux || isCygwin
then
if ! xEnabled
then
(exec ssh $host "cd /tmp/simpledb; java -version; java -classpath \
\"bin/src${CLASSPATH_SEPARATOR}lib/*\" simpledb.parallel.Worker ${host}:$port $serverAddr" 2>&1 | \
sed "s/^/$host:$port\\$ /g" ) &
else
${terminal} ${titleOption} "Worker: $host:$port. Do not close this window when SimpleDB is running." -e \
"bash -c \"ssh $host \\\"cd /tmp/simpledb; java -classpath \
\\\\\\\"bin/src${CLASSPATH_SEPARATOR}lib/*\\\\\\\" \
simpledb.parallel.Worker ${host}:$port $serverAddr\\\" | \
sed \\\"s/^/$host:$port\\\\$ /g\\\" \" " &
fi
else
#mac
osascript -e "tell app \"Terminal\"
do script \"echo -e \\\"\\\\033]0;Worker: $host:$port. Do not close this window when SimpleDB is running.\\\\007\\\"; ssh $host \\\"cd /tmp/simpledb; export JAVA_HOME=`/usr/libexec/java_home -v '1.6*'`; java -version;java -classpath \\\\\\\"bin/src:lib/*\\\\\\\" simpledb.parallel.Worker ${host}:$port $serverAddr \\\" \"
end tell"
fi
done
#outputStyle="X"
#if ! xEnabled
#then
# outputStyle="T"
#fi
javaOptions=
if isCygwin
then
javaOptions=-Djline.terminal=jline.UnixTerminal
fi
#--------------------------------------------start server--------------------------------------
echo "Finish starting workers, now starting the server"
cd "$SIMPLEDB_ROOT"
exec java $javaOptions -classpath "bin/src${CLASSPATH_SEPARATOR}lib/*" simpledb.parallel.Server $catalogFile $*
| true
|
1474d577a0e464d660d560ee1e35e6482d985c9d
|
Shell
|
mcarifio/sshfs
|
/sshfs.sh
|
UTF-8
| 1,068
| 4.09375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# Mike Carifio <mike@carif.io>
# TODO mike@carif.io: figure out afuse to make this script unnecessary
# exit on error
# set -e
function usage {
echo <<EOF
-v verbose
-h help
-? help
-d mount all mount points in a directory
EOF
}
function complete {
echo "bash completion spec tbs"
}
function on_exit {
:
}
function on_error {
echo $* > /dev/stderr
exit 1
}
trap on_exit EXIT
here=$(dirname ${BASH_SOURCE})
me=$(basename ${BASH_SOURCE})
# read default values
[[ -r $0.defaults ]] && source $0.defaults || :
# parse arguments
OPTIND=1
let verbose=0
let usage=0
mounts=${here}/mnt
while getopts "h?v-d:" opt; do
case "$opt" in
h|\?)
usage
;;
v) let verbose=1
;;
d) mounts=$1
;;
*) shift ${OPTIND}
on_error "'$1' is not a valid argument"
esac
done
shift $((OPTIND-1))
[[ "$1" = "--" ]] && shift
if [[ -d $mounts ]] ; then
for d in ${mounts}/*; do sshfs $(basename ${d}): ${d} || echo "$d already mounted?" ; done
else
on_error "'${mounts}' is not a directory."
fi
| true
|
399d3d3ea476263cab379b7804e5358912285657
|
Shell
|
fulong/wechat_work_webhook
|
/tools/auxiliary_completion.bash
|
UTF-8
| 770
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
#让bash可以自动补全auxiliary工具参数,这脚本在install命令下会运行,也可以手动执行
_auxiliary()
{
COMPREPLY=()
local cur=${COMP_WORDS[COMP_CWORD]};
local cmd=${COMP_WORDS[COMP_CWORD-1]};
first_cmd="build install debug redebug project"
case $cmd in
'./linux_cmake_template_auxiliary.sh')
COMPREPLY=( $(compgen -W '${first_cmd[@]}' -- $cur) ) ;;
'build')
local pro="distclean clean all rebuild"
COMPREPLY=( $(compgen -W '${pro[@]}' -- $cur) ) ;;
'project')
local pro="release version changelog commit"
COMPREPLY=( $(compgen -W '${pro[@]}' -- $cur) )
;;
'*')
;;
esac
return 0
}
complete -F _auxiliary ./linux_cmake_template_auxiliary.sh
| true
|
10772419f7dd5437d49ecb824c8b7ab6996dd21b
|
Shell
|
pkerichang/hammer
|
/src/tools/get-makefile-vars
|
UTF-8
| 1,692
| 4.375
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
# get-makefile-vars
#
# Copyright 2017 Edward Wang <edward.c.wang@compdigitec.com>
#
# Get the expanded variable definitions from a Makefile.
#
# Usage:
# make <...> -pn | get-makefile-vars -
set -e
# The file containing the make -pn results.
if [[ $1 == "-" ]]; then
in_data=/dev/stdin
else
in_data=$1
fi
# Commands adapted from https://stackoverflow.com/q/7117978
tmpfile=$(mktemp)
# Get the list of variables.
# This will output something like:
# VAR1 := foo
# VAR2 = $(VAR1) bar
cat $1 | grep -A1 "^# makefile"| grep -v "^#\|^--" | sort -u > $tmpfile
# Remove MAKEFILE_LIST and MAKEFLAGS to prevent the previous Make run
# from interfering.
sed -i "s/^MAKEFILE_LIST.*//g" $tmpfile
sed -i "s/^MAKEFLAGS.*//g" $tmpfile
# Re-run the list of variables and expand all definitions.
# Filter .VARIABLES to exclude variables not defined in this Makefile.
# e.g. the above example would have VAR2 = foo bar
tmpfile2=$(mktemp)
# Add a dummy target for Make to chew on and suppress an unwanted message.
echo '.PHONY: DUMMY_RULE' >> $tmpfile2
echo 'DUMMY_RULE:' >> $tmpfile2
echo -e "\t@echo > /dev/null" >> $tmpfile2
cat >> $tmpfile2 <<"EOF"
VARS_OLD := $(.VARIABLES)
# Escape any rogue '%' characters, or filter_out will to match it and weird
# things can happen.
# e.g. if %D is present in .VARIABLES, then any variable ending in the letter D
# will be filtered out without warning (!!!)
VARS_OLD := $(subst %,\%,$(VARS_OLD))
EOF
cat $tmpfile >> $tmpfile2
cat >> $tmpfile2 <<"EOF"
$(foreach v, \
$(filter-out $(VARS_OLD) VARS_OLD,$(.VARIABLES)), \
$(info $(v) = $($(v))))
EOF
make -f $tmpfile2 DUMMY_RULE
# Clean up temporaries.
rm $tmpfile
rm $tmpfile2
| true
|
aa2ff14e578882432318adca4bf0db9031109785
|
Shell
|
euginetheninth/cs2150Labs
|
/lab07/postLab/averagetime.sh
|
UTF-8
| 577
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
# Derek Johnson dej3tc
# 10/31/19
# We will first prompt the user for a number
# between 1 and 9 that will be used for the
# timer program
read -e -p "Input a number between 1 and 9: " number
quit="quit"
if [ $number == $quit ]
then
exit
fi
clang++ counter.cpp
i=1
RUNNING_TIME=0
while [ $i -lt 6 ]; do
echo "Running iteration " $i"..."
RUN=`./a.out $number | tail -1`
RUNNING_TIME=$((RUNNING_TIME+RUN))
echo "time taken: "$RUN "ms"
i=$((i+1))
done
AVE=$((RUNNING_TIME / 5))
echo "5 iterations took" $RUNNING_TIME "ms"
echo "Average time was" $AVE "ms"
| true
|
7ab2a65823e1020dcf847bbe910f7d3e16e9a904
|
Shell
|
joshdorsey/temperature-monitor
|
/log.sh
|
UTF-8
| 1,162
| 3.71875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
pushd ~htpc/temperature > /dev/null
. time.sh
# Log Data
declare -r LOGFILE=temperature.log
temp=$(sensors -u | grep -A 1 'Package id 0:' | grep _input | cut -s -d\ -f 4)
time=$(getTime)
if [ ! -r $LOGFILE ]; then
echo "Couldn't find temperature log, creating it..."
echo "Time, Temperature" > $LOGFILE
fi
echo "${time}, ${temp}" >> $LOGFILE
# Build Graphs
declare -r GRAPH_UPDATE_RATE=4
needToUpdate=''
if [ ! -r graphs/updated ]; then
needToUpdate='Yes'
fi
lastModified=$(date +%s -r graphs/updated)
age=$(("$(date +%s)" - "$lastModified"))
if [ "$age" -gt "$GRAPH_UPDATE_RATE" ]; then
needToUpdate='Yes'
fi
if [ ! -z "$needToUpdate" ]; then
now=$(getTime)
fiveMinutes=$(getTime -d '5 minutes ago')
thirtyMinutes=$(getTime -d '30 minutes ago')
oneHour=$(getTime -d '1 hour ago')
threeHours=$(getTime -d '3 hours ago')
touch graphs/updated
./graph.sh "$fiveMinutes" "$now" graphs/last-five-minutes.png
./graph.sh "$thirtyMinutes" "$now" graphs/last-thirty-minutes.png
./graph.sh "$oneHour" "$now" graphs/last-hour.png
./graph.sh "$threeHours" "$now" graphs/last-three-hours.png
fi
popd > /dev/null
| true
|
70d14a93b14e9aa1f6dfd29e3ff01f5d3cc3191d
|
Shell
|
frennkie/dotfiles
|
/zshrc
|
UTF-8
| 1,910
| 3.171875
| 3
|
[] |
no_license
|
export ZSH=~/.zsh
# Source git zshrc prompt style
source ~/dotfiles/zsh/lib/zsh-git-prompt/zshrc.sh
# Load all of the config files in ~/oh-my-zsh that end in .zsh
for config_file ($ZSH/lib/*.zsh) source $config_file
# Load and run compinit
autoload -U compinit
compinit -i
# this is a fix for vi in OS X
#alias vim="stty stop '' -ixoff; vim"
# this adds an extra line break before every new prompt
precmd() { print "" }
export TERM='screen-256color'
if test "$TERM" != linux; then
# show current command, directory, and user in terminal title
precmd() { print -Pn "\e]2;$0 (%~) %n@%m\a" 2>/dev/null }
preexec() { print -Pn "\e]2;$1 (%~) %n@%m\a" 2>/dev/null }
fi
# generate random passwd (32 chars)
genpasswd() {
openssl rand -base64 32
}
genpassphrase() {
# echo $(grep "^[^']\{3,5\}$" /usr/share/dict/words|shuf -n5)
echo $(grep "^[^']\{1,10\}$" ~/dotfiles/all.dic|shuf -n4)
}
dpkgclean() {
dpkg --list |grep "^rc" | cut -d " " -f 3 | xargs sudo dpkg --purge
}
# avoid most common annoying correction:
alias sudo='nocorrect sudo'
# SSH/GPG Agent
export GPG_TTY="$(tty)"
export SSH_AUTH_SOCK="/run/user/$UID/gnupg/S.gpg-agent.ssh"
# golang stuff
export GOPATH="$HOME/work/go" # add GOPATH (golang)
export GOBIN="$GOPATH/bin" # add GOBIN (binary)
export PATH="$PATH:$GOPATH" # add GOPATH to PATH
export PATH="$PATH:$HOME/local/bin/" # add tmux dir
alias tmux="TERM=xterm-256color $HOME/local/bin/tmux"
export GOPATH="$HOME/work/go" # add GOPATH (golang)
export GOBIN="$GOPATH/bin" # add GOBIN (binary)
export PATH="$PATH:$GOBIN" # add go bin dir
# The next line updates PATH for the Google Cloud SDK.
if [ -f '/tmp/google-cloud-sdk/path.zsh.inc' ]; then . '/tmp/google-cloud-sdk/path.zsh.inc'; fi
# The next line enables shell command completion for gcloud.
if [ -f '/tmp/google-cloud-sdk/completion.zsh.inc' ]; then . '/tmp/google-cloud-sdk/completion.zsh.inc'; fi
| true
|
98db6a3732a8b5d097d88e7b937e83b1010cd8ac
|
Shell
|
softbaseas/oracle7-deploy
|
/includes/main.sh
|
UTF-8
| 2,425
| 3.96875
| 4
|
[] |
no_license
|
#!/bin/bash
source ./includes/setVars.sh
while true; do
echo "";
echo "Choose one of the following possibilities:"
echo " 0 - Stop webnm"
echo " 1 - Setup network"
echo " 2 - Change hosts files"
echo " 3 - Change formsweb"
echo " 4 - Change Oracle virtual hosts"
echo " 5 - Deploy ORDS (prerequisite: ORDS already configured)"
echo " 6 - Generate SSL certificates using Lets Encrypt"
echo " 7 - Setup Lets Encrypt automatic renewal"
echo " 8 - Create Apache (httpd) virtual hosts"
echo " 9 - Create nodemanager Service"
#echo " a - Start webnm" # Removed because it has to be started from web first time
echo " a - Start admin server using Nodemanager"
#echo " full - Full Configuration"
echo " q - Exit"
read -p "Choice: " choice
case $choice in
[0]* ) # Stop webm service
echo "Stopping. Can take several minutes..."
systemctl stop webnm
systemctl status webnm;;
[1]* ) # Setup network
if [ -z "$fqdn" ]; then getFQDN; fi
if [ -z "$ip1" ]; then getIP; fi
./includes/linux/setup_network.sh $fqdn $externalIP $internalIP;;
[2]* ) # Setup /etc/hosts
if [ -z "$fqdn" ]; then getFQDN; fi
if [ -z "$ip1" ]; then getIP; fi
./includes/linux/setup_hosts.sh $fqdn $subdomain $externalIP;;
[3]* ) # Configure formsweb
if [ -z "$fqdn" ]; then getFQDN; fi
./includes/oracle/formsweb.sh $fqdn;;
[4]* ) # Change Oracle vhosts
if [ -z "$fqdn" ]; then getFQDN; fi
./includes/oracle/vhosts.sh $fqdn;;
[5]* ) # configure ords
./includes/oracle/ords.sh;;
[6]* ) # Generate SSL Certificates using Lets Encrypt
if [ -z "$fqdn" ]; then getFQDN; fi
./includes/linux/letsencrypt.sh $fqdn;;
[7]* ) # Enable Lets Encrypt Automatic Renewal
./includes/linux/le-autorenew.sh;;
[8]* ) # Create httpd vHosts
if [ -z "$fqdn" ]; then getFQDN; fi
./includes/linux/apache_add_vhost.sh $fqdn;;
[9]* ) # create nodemanager service
./includes/oracle/nodemanager_service.sh;;
#[a]* ) # Start webnm
# echo "Starting. Can take several minutes..."
# systemctl start webnm
# systemctl status webnm;;
[a]* ) # Start adminserver using nodemanager
./includes/oracle/nodemanager.sh ;;
[q]* ) echo "Exiting"; break;;
esac
done
| true
|
e3546fb7657e138bd57e5f8105a37f27d812859f
|
Shell
|
jianzuoyi/Chicken
|
/20.processing_for_gatk/Bat_merge.sh
|
UTF-8
| 385
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
#set -vex
OUTDIR=/its1/GB_BT2/jianzuoyi/projects/Chicken/20.processing_for_gatk/merge_data
while read dir
do
sm=$(echo $dir | awk -F '/' '{print $10}')
# echo $sm
out_dir=${OUTDIR}/${sm}
read1=${sm}_1.fq.gz
read2=${sm}_2.fq.gz
echo "mkdir -p $out_dir;cd $dir; cat *_1.fq.gz > ${out_dir}/$read1;cat *_2.fq.gz > ${out_dir}/$read2"
done < all.dir.fofn
| true
|
736717ececfac553109de34903d7e6815b3696cd
|
Shell
|
animuxOS/Genesis64
|
/usr/share/kino/scripts/dvdauthor/qdvdauthor.sh
|
UTF-8
| 382
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/sh
# A Kino script that invokes qdvdauthor on a generated dvdauthor xml file.
usage()
{
# Title
echo "Title: Open in 'Q' DVD-Author"
# Usable?
which qdvdauthor > /dev/null
[ $? -eq 0 ] && echo Status: Active || echo Status: Inactive
}
execute()
{
xml="$1"
output="$2"
qdvdauthor -a -d "$xml" &
}
[ "$1" = "--usage" ] || [ -z "$1" ] && usage "$@" || execute "$@"
| true
|
9aff5a61d277ca075c3f7bdc7da35804cee67883
|
Shell
|
victoriza/docker-kafka-zookeeper
|
/assets/scripts/kafka_standalone.sh
|
UTF-8
| 693
| 2.59375
| 3
|
[] |
no_license
|
#install java
apt-get update
apt-get install openjdk-8-jre -y
#update-alternatives --config java
#note that default broker.id = 0
export ZOOKEEPER_VERSION=3.4.11
export SCALA_VERSION=2.12
export KAFKA_VERSION=1.0.0
export KAFKA_HOME=/opt/kafka_"$SCALA_VERSION"-"$KAFKA_VERSION"
export KAFKA_DOWNLOAD_URL=https://archive.apache.org/dist/kafka/"$KAFKA_VERSION"/kafka_"$SCALA_VERSION"-"$KAFKA_VERSION".tgz
wget -q $KAFKA_DOWNLOAD_URL -O /tmp/kafka_"$SCALA_VERSION"-"$KAFKA_VERSION".tgz
tar xfz /tmp/kafka_"$SCALA_VERSION"-"$KAFKA_VERSION".tgz -C /opt && rm /tmp/kafka_"$SCALA_VERSION"-"$KAFKA_VERSION".tgz
# Run Kafka
$KAFKA_HOME/bin/kafka-server-start.sh $KAFKA_HOME/config/server.properties
| true
|
ad116b2d020687509de65e8937aa0949af015524
|
Shell
|
jpiscionere/BOSS-Clustering
|
/Scripts/calculate_wp_nojack.sh
|
UTF-8
| 537
| 2.546875
| 3
|
[] |
no_license
|
#! /bin/bash
#nden[1]=0.0002569
#nden[2]=0.0003243
#nden[3]=0.0002327
#nden[4]=0.0000942
#nden[5]=0.0001848
nden[1]=0.0002494737166611
nden[2]=0.0003150319712696
nden[3]=0.0002238406773599
nden[4]=0.0000912964784524
nden[5]=0.0001847687542961
i=1
for bin in bin1 bin2 bin3 bin4 bin_all
do
nden1=${nden[$i]}
echo $nden1
paste ${bin}_DsDi_nojack.out ${bin}_DsRi_nojack.out ${bin}_RsDi_nojack.out ${bin}_RsRi_nojack.out >tmp
awk '{wp=$2/$5-$8/$11; {print $1,wp,wp/'$nden1'}}'<tmp >wp_${bin}_nojack.out
i=`expr $i + 1`
done
| true
|
25ae0887837d26927f3ffc369fc6606e5a71a7d0
|
Shell
|
hufh/osaris
|
/modules/unstable_coh_metric/UCM-batch.sh
|
UTF-8
| 1,735
| 3.3125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
start=`date +%s`
echo; echo "Starting UCM processing ..."; echo
UCM_work_PATH=$1
UCM_output_PATH=$2
corr_file=$3
high_corr_file=$4
high_corr_threshold=$5
boundary_box=$6
swath=$7
cd $UCM_work_PATH/input
echo; echo "Grdinfo high corr file:"
gmt grdinfo $high_corr_file
echo "Extracting high coherence areas (threshold: $high_corr_threshold)"
gmt grdclip $high_corr_file -GHC_$high_corr_file -R$boundary_box -V -Sb$high_corr_threshold/NaN;
echo "Now working on:"; echo "Corr file: $corr_file"; echo "High corr file: $high_corr_file"
echo "Cutting files to same extent ..."
gmt grdcut $corr_file -G$UCM_work_PATH/cut_files/$corr_file -R$boundary_box -V
gmt grdcut HC_$high_corr_file -G$UCM_work_PATH/cut_files/HC_$high_corr_file -R$boundary_box -V
# cut2same_extent
echo; echo "Processing Unstable Coherence Metric ..."
cd $UCM_work_PATH/cut_files
UCM_file="${high_corr_file:5:8}-${high_corr_file:15:8}---${corr_file:5:8}-${corr_file:15:8}_F${swath}-UCM.grd"
echo "gmt grdmath $high_corr_file $corr_file SUB -V1 = $work_PATH/UCM/temp/$UCM_file"
gmt grdmath HC_$high_corr_file $corr_file SUB -V1 = $UCM_work_PATH/temp/$UCM_file
cd $UCM_work_PATH/temp
echo "gmt grdclip $UCM_file -G$output_PATH/UCM/$UCM_file -Sb0/NaN"
gmt grdclip $UCM_file -G$UCM_output_PATH/$UCM_file -Sb0/NaN
echo; echo
if [ -f $UCM_output_PATH/$UCM_file ]; then status_UCM=1; else status_UCM=0; fi
end=`date +%s`
runtime=$((end-start))
echo "${high_corr_file:7:8}-${high_corr_file:30:8} ${corr_file:7:8}-${corr_file:30:8} $SLURM_JOB_ID $runtime $status_UCM" >> $output_PATH/Reports/PP-UCM-stats.tmp
printf 'Processing finished in %02dd %02dh:%02dm:%02ds\n' $(($runtime/86400)) $(($runtime%86400/3600)) $(($runtime%3600/60)) $(($runtime%60))
| true
|
1ebfdec38dfe764577d115a8c3210c51030a20cc
|
Shell
|
mamihackl/MalletDT
|
/build_dt.sh
|
UTF-8
| 296
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/bash
ul='_'
ext=$3$ul$4
accfile=$acc$ext
modelfile=$5$ext
sysfile=$6$ext
timelog='time.'
START=$SECONDS
./DTlearner.py $1 $3 $4 $modelfile
./DTclassifier.py $1 $2 $modelfile $sysfile
END=$SECONDS
DIFF=$(($END-$START))
echo ''$accfile: Processing time $DIFF seconds'' > $timelog$accfile
| true
|
6c94d9dcafc17a16b1c205b8f9ab13c42fefb42b
|
Shell
|
zalando/skipper
|
/skptesting/profile-proxy.sh
|
UTF-8
| 494
| 2.765625
| 3
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
#! /bin/bash
if [ "$1" == -help ]; then
log profile-proxy.sh [duration] [connections]
exit 0
fi
source $GOPATH/src/github.com/zalando/skipper/skptesting/benchmark.inc
trap cleanup SIGINT
log [generating content]
lorem
log [content generated]
log; log [starting servers]
# ngx nginx-static.conf
skp :9980 static.eskip
skp-pprof :9090 proxy.eskip
log [servers started, wait 1 sec]
sleep 1
log; log [profiling skipper]
bench :9090
log [profiling skipper done]
cleanup
log; log [all done]
| true
|
3d868c1f6363341fac702ad2df47e7c79b0de799
|
Shell
|
computezrmle/boinc-scripts
|
/vbox/tty2monitor/dump_atlas_logs
|
UTF-8
| 1,656
| 3.84375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# ATLAS logs don't grant read access to other accounts but atlas
# Hence we need to dump the logs to a location where the monitoring user can read them
source_location="/home/atlas/RunAtlas"
target_location="/home/montty2/RunAtlas"
main_log_name="log.EVNTtoHITS"
athena_log_name="AthenaMP.log"
athena_workers_dir="athenaMP-workers-EVNTtoHITS-sim"
# trigger 1: until main log exists
while :
do
main_log="$(find -L ${source_location} -name "${main_log_name}")"
[[ "${main_log}" ]] && break
# check all 17 s (we are not in a hurry)
sleep 17
done
# tail complete file starting at line 1
tail -f -n +1 ${main_log} >${target_location}/${main_log_name} 2>/dev/null &
# check if ATLAS is running singlecore or multicore
# wait until maxEvents appears in main_log
grep -E -m 1 -s "^.*maxEvents =[^0-9]*[0-9]+" <(tail -F -n +1 --pid ${$} ${target_location}/${main_log_name} 2>/dev/null) 2>/dev/null
# it's a multicore if ATHENA_PROC_NUMBER is in the log before maxEvents
# it's a singlecore if ATHENA_PROC_NUMBER is missing
pattrn="^.*ATHENA_PROC_NUMBER set to[^0-9]*"
n_workers="$(sed -e "0,/${pattrn}/ s/${pattrn}\([0-9]\+\).*/\1/p" -n ${target_location}/${main_log_name})"
[[ ! "${n_workers}" ]] && n_workers="1"
# singlecore logs to main_log
# a tail for main_log is already running
# now start the tails for multicore
if (( n_workers > 1 ))
then
for (( i=0; i<n_workers; ++i ))
do
mkdir -p ${target_location}/worker_${i}
tail -F -n +1 $(dirname ${main_log})/${athena_workers_dir}/worker_${i}/${athena_log_name} >${target_location}/worker_${i}/${athena_log_name} 2>/dev/null &
done
fi
| true
|
5e276f23b72e6648560e6eabe0b096015822a97b
|
Shell
|
landrylaboratory/Gene_duplication_2019
|
/scripts_for_simulations/006_gather_simulations.sh
|
UTF-8
| 2,638
| 3.53125
| 4
|
[
"MIT"
] |
permissive
|
##!/bin/bash
# This script will go to a folder with the output of the simulations and gather the fixed and proposed mutations in tables
# in a subfolder called "Final_results"
# $1 = path to the directory with results from simulations
# $2 = number of replicates in that simulation
cd $1
num_reps=$2
mkdir Final_results
# Gather the fixed mutations for all replicates
cat <(head -n 1 1/all_deltaGs_results_heterodimer_subs.tab) <(ls */all_deltaGs_results_heterodimer_subs.tab | sort -g | xargs -I {} tail -n +2 {}) > Final_results/all_deltaGs_heterodimer_all_reps.tab
cat <(head -n 1 1/all_deltaGs_results_homodimer_A_subs.tab) <(ls */all_deltaGs_results_homodimer_A_subs.tab | sort -g | xargs -I {} tail -n +2 {}) > Final_results/all_deltaGs_homodimer_A_all_reps.tab
cat <(head -n 1 1/all_deltaGs_results_homodimer_B_subs.tab) <(ls */all_deltaGs_results_homodimer_B_subs.tab | sort -g | xargs -I {} tail -n +2 {}) > Final_results/all_deltaGs_homodimer_B_all_reps.tab
# Gather the data for proposed mutations for each replicate
for replicate in $(seq 1 ${num_reps})
do
cd $replicate
cat <(cat ../original_deltaGs*) <(ls */heterodimer/deltaGs.tab | sort -g | xargs -I {} sed '2q;d' {}) > all_deltaGs_proposed_heterodimer.txt
cat <(cat ../original_deltaGs*) <(ls */homodimer_A/deltaGs.tab | sort -g | xargs -I {} sed '2q;d' {}) > all_deltaGs_proposed_homodimer_A.txt
cat <(cat ../original_deltaGs*) <(ls */homodimer_B/deltaGs.tab | sort -g | xargs -I {} sed '2q;d' {}) > all_deltaGs_proposed_homodimer_B.txt
paste all_deltaGs_proposed_heterodimer.txt all_substitutions_heterodimer_header.tab > all_deltaGs_proposed_heterodimer_subs.tab
paste all_deltaGs_proposed_homodimer_A.txt all_substitutions_homodimer_A_header.tab > all_deltaGs_proposed_homodimer_A_subs.tab
paste all_deltaGs_proposed_homodimer_B.txt all_substitutions_homodimer_B_header.tab > all_deltaGs_proposed_homodimer_B_subs.tab
cd ..
done
# Gather the proposed mutations for all replicates
cat <(head -n 1 1/all_deltaGs_proposed_heterodimer_subs.tab) <(ls */all_deltaGs_proposed_heterodimer_subs.tab | sort -g | xargs -I {} tail -n +2 {}) > Final_results/all_deltaGs_proposed_heterodimer_all_reps.tab
cat <(head -n 1 1/all_deltaGs_proposed_homodimer_A_subs.tab) <(ls */all_deltaGs_proposed_homodimer_A_subs.tab | sort -g | xargs -I {} tail -n +2 {}) > Final_results/all_deltaGs_proposed_homodimer_A_all_reps.tab
cat <(head -n 1 1/all_deltaGs_proposed_homodimer_B_subs.tab) <(ls */all_deltaGs_proposed_homodimer_B_subs.tab | sort -g | xargs -I {} tail -n +2 {}) > Final_results/all_deltaGs_proposed_homodimer_B_all_reps.tab
| true
|
1b7cfc613324fd4054ec8958ce42964fad2d5bbc
|
Shell
|
jormao/holberton-system_engineering-devops
|
/0x04-loops_conditions_and_parsing/100-read_and_cut
|
UTF-8
| 234
| 3.203125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Bash script that displays the content of the file /etc/passwd.
# script should only display:
# username
# user id
# Home directory path for the user
while IFS= read
do
cut -d ":" -f 1,3,6
done < /etc/passwd
| true
|
3f9c2c86becc1f7e92ee2be12d231668e5df35d0
|
Shell
|
dameyerdave/sshutils
|
/sufw
|
UTF-8
| 4,899
| 3.90625
| 4
|
[] |
no_license
|
#!/bin/bash
# require daemon
# require tcptunnel on remotehost
function usage {
echo "USAGE: $(basename $0) start|stop|status -h host -l localport -r remoteport [-u user] [-n name] [-k key] [-t]"
echo ""
echo " <connection name>: <localhost>:<localport> o==o <user>@<remotehost>:<remoteport> o== <tcptunnel>:<remoteport> "
echo ""
echo " -h host: the destination host to connect to"
echo " -l localport: the port on the local host to use for the ssh tunnel"
echo " -r remoteport: the remote port to use for the ssh tunnel"
echo " -u user: the user to authenticate at the destination host"
echo " -n name: the name of the ssh tunnel"
echo " -k key: the key to authenticate at the destination host"
echo " -t: open tcptunnel on remote host"
exit 1
}
function parse_daemon {
local line="$1"
NAME=$(echo $line | sed -E 's,.*-n ([^ ]+) .*,\1,')
USER=$(echo $line | sed -E 's,.* ([^@]+)@.*,\1,')
HOST=$(echo $line | sed -E 's,.*@([^ ]+) .*,\1,')
KEY=$(echo $line | sed -E 's,.*-i ([^ ]+) .*,\1,')
LPORT=$(echo $line | sed -E 's,.*-R [^:]+:[^:]+:([^ ]+).*,\1,')
RPORT=$(echo $line | sed -E 's,.*-R ([^:]+):.*,\1,')
echo "${NAME}#${USER}#${HOST}#${KEY}#${LPORT}#${RPORT}"
}
USER="$(whoami)"
ACTION="$1"
OWNNAME=''
OWNKEY=''
TCPTUNNEL=0
IP=''
shift
if [ "${ACTION}" == "" ]; then
usage
fi
while [[ $# -gt 0 ]]
do
key="$1"
case $key in
-u)
USER="$2"
shift 2
;;
-h)
HOST="$2"
if [[ "$HOST" =~ [0-9]+\.[0-9]+\.[0-9]+\.[0-9]+ ]]; then
IP=$HOST
else
IP=$(host ${HOST} | cut -d' ' -f4)
fi
shift 2
;;
-l)
LPORT="$2"
shift 2
;;
-r)
RPORT="$2"
shift 2
;;
-n)
OWNNAME="$2"
shift 2
;;
-k)
OWNKEY="$2"
shift 2
;;
-t)
TCPTUNNEL=1
shift 1
;;
*)
usage
;;
esac
done
if [ "${OWNKEY}" == "" ]; then
KEYFILE=~/.ssh/${USER}
else
KEYFILE=~/.ssh/${OWNKEY}
fi
if [ ! -f $KEYFILE -a "${ACTION}" == "start" ]; then
echo "Keyfile ${KEYFILE} does not exist!"
echo "\tUse suinit to create one."
exit 1
fi
if [ "${OWNNAME}" == "" -a "${ACTION}" != "status" ]; then
if [ "${LPORT}" != "" -a "${RPORT}" != "" -a "${HOST}" != "" ]; then
NAME="${LPORT}_${HOST}_${RPORT}"
else
echo "Some parameters are missing!"
echo "Give at least -h host -l localport -r remoteport OR -n name"
exit 1
fi
else
NAME="${OWNNAME}"
fi
case $ACTION in
start)
if [ "${LPORT}" != "" -a "${RPORT}" != "" -a "${HOST}" != "" ]; then
if ssh -R ${RPORT}:localhost:${LPORT} -i ${KEYFILE} ${USER}@${HOST} 'exit'; then
daemon -n "${NAME}" -U -r -X "ssh -R ${RPORT}:localhost:${LPORT} -i ${KEYFILE} ${USER}@${HOST} -N"
until daemon --running -n "${NAME}"; do
sleep 1
done
echo "Started SSH tunnel '${NAME}': localhost:${LPORT} <- ${USER}@${HOST}:${RPORT}."
if [ ${TCPTUNNEL} -eq 1 ]; then
if [ "${IP}" == "" ]; then
echo "Cannot resolv hostname ${HOST}!"
exit 1
fi
if ! ssh -i ${KEYFILE} ${USER}@${HOST} "ps -ef | grep 'tcptunnel --local-port=${RPORT}' | grep -v grep" 2>&1 >/dev/null; then
if ! ssh -i ${KEYFILE} ${USER}@${HOST} 'test -x "$(command -v tcptunnel)"'; then
echo "Cannot start tcptunnel because its not installed on the remote host!"
exit 1
fi
ssh -i ${KEYFILE} ${USER}@${HOST} "nohup tcptunnel --local-port=${RPORT} --remote-port=${RPORT} --remote-host=localhost --bind-address=${IP} --fork --stay-alive >/dev/null 2>/dev/null </dev/null &"
echo "Started tcp tunnel on remote host."
else
echo "Tcp tunnel on remote host already running."
fi
fi
else
echo "Error creating SSH tunnel!"
fi
else
echo "Some parameters are missing!"
echo "Give at least -h host -l localport -r remoteport"
exit 1
fi
;;
stop)
if daemon --running -n "${NAME}"; then
DAEMON=$(ps -ef | grep "daemon -n ${NAME}" | grep -v grep)
SPEC=$(parse_daemon "${DAEMON}")
KEYFILE=$(echo "$SPEC" | cut -d'#' -f4)
USER=$(echo "$SPEC" | cut -d'#' -f2)
HOST=$(echo "$SPEC" | cut -d'#' -f3)
RPORT=$(echo "$SPEC" | cut -d'#' -f6)
if ssh -i ${KEYFILE} ${USER}@${HOST} "ps -ef | grep 'tcptunnel --local-port=${RPORT}' | grep -v grep" 2>&1 >/dev/null; then
TCPTUNPID=$(ssh -i ${KEYFILE} ${USER}@${HOST} "ps -ef | grep 'tcptunnel --local-port=${RPORT}' | grep -v grep | awk '{print \$2}'")
ssh -i ${KEYFILE} ${USER}@${HOST} "kill -9 ${TCPTUNPID}"
echo "Stoped tcptunnel on remote host"
fi
daemon --stop -n "${NAME}"
until ! daemon --running -n "${NAME}"; do
sleep 1
done
echo "Stoped SSH tunnel '${NAME}'"
else
echo "SSH tunnel '${NAME}' not found!"
exit 1
fi
;;
status)
ps -ef | grep "daemon -n" | grep -v grep | while read line
do
echo $(parse_daemon "$line" | cut -d'#' -f1)
done
;;
*)
usage
;;
esac
exit 0
| true
|
aeec88a50abfac812d03b10626c9d5d7f02ce489
|
Shell
|
dawxoje/Scripting
|
/Bash/B3_I10_Colección_Scripts_1/sc16.sh
|
UTF-8
| 480
| 3.375
| 3
|
[] |
no_license
|
#! /usr/bin/sh
echo "Introduzca un código postal: \c"
read cp
case "$cp" in
280[0-4][0-9] | 2805[0-4] )
echo "$cp es un código postal de Madrid"
;;
28[0-9][0-9][0-9] )
echo "$cp es un código postal de la provincia de Madrid"
;;
[0-4][0-9][0-9][0-9][0-9] | 5[0-2][0-9][0-9][0-9] )
echo "$cp es un código postal de España"
;;
*)
echo "$cp no es un código postal de España"
;;
esac
| true
|
39613d678886c61fc465ab4ace391da5ad76aa97
|
Shell
|
sophia-jihye/coursera-ml
|
/sophia.sh
|
UTF-8
| 129
| 2.796875
| 3
|
[] |
no_license
|
if [ -z "$1" ]; then
echo "ERROR: One parameter is required. [COMMIT MESSAGE]"
exit
fi
git add .
git commit -m $1
git push
| true
|
548dee73d75dfa7c2e5b373aa0c373a665ced758
|
Shell
|
twotigers0608/test_python
|
/jenkins_android/clear_BAT.sh
|
UTF-8
| 6,566
| 3.515625
| 4
|
[] |
no_license
|
#/bin/sh
# Clear BAT script
failed_cve=
function tc_boot_first_boot ()
{
dmesg > dmesg.log
}
function tc_generics_check_kernel_warning () {
dmesg | grep -i warning | wc -l
}
function tc_generics_check_kernel_version () {
cat /proc/version
}
function tc_generics_kernel_cmdline () {
cat /proc/cmdline
}
function tc_generics_partitions () {
df |tee df.txt
}
function tc_generics_mount () {
mount |tee mount.txt
}
function tc_generics_cpuinfo () {
cat /proc/cpuinfo > cpuinfo.txt
}
function tc_wifi_driver_loaded() {
dmesg |grep network |grep "network logging started"
}
function tc_wlan_enable() {
phy=$(rfkill list |grep phy |cut -c 1,1)
rfkill unblock $phy
ifconfig wlan0 up
iw wlan0 scan
}
function tc_bluetooth_enable() {
hci=$(rfkill list |grep hci |cut -c 1,1)
rfkill unblock $hci
hciconfig up
}
function tc_kernel_config_check() {
kv=$(uname -a | cut -d " " -f 3)
export http_proxy=http://child-prc.intel.com:913
export https_proxy=http://child-prc.intel.com:913
git clone https://github.com/clearlinux/kernel-config-checker.git
cd kernel-config-checker
python setup.py build
python setup.py install
cd ../
zcat /proc/config.gz | kcc >kccr-$kv.txt
cat kccr-$kv.txt | grep "is not set but is required to be set to y" || cat kccr-$kv.txt | grep "is set but is required to be not set"
}
function tc_spectre_meltdown_check() {
swupd bundle-add binutils\
lz4\
c-basic\
gzip\
xz\
sysadmin-basic\
os-core\
binutils
git clone https://github.com/speed47/spectre-meltdown-checker
cd spectre-meltdown-checker
git am 0001-kernel_decompress-continue-to-try-other-decompress-t.patch
cd ../
bash spectre-meltdown-checker/spectre-meltdown-checker.sh -v > spectre-meltdown-check.log
failed_cve=$(cat spectre-meltdown-check.log | grep 'SUMMARY'|awk -F ' ' '{for(i=1; i<=NF; i++) {print $i}}' | grep 'KO')
}
# run tests
if [ $# = 1 ]
then
$1
else
echo " running full BAT takes approximately 1.5 minutes
firstboot
check kernel version
check cpu info
check kernel cmdline
partitions
mounted file systems
dmesg kernel warnings
check wifi and firmware loaded
enable wifi
enable bluetooth
"
echo
echo
# LAVAFy results log
touch clear_bat.json
> clear_bat.json
echo -e "{" >> clear_bat.json
tc_boot_first_boot
ret=$?
echo -n ......first boot test...
TESTCASE='tc-boot-first_boot'
if [ $ret == 0 ]
then
echo PASS
RESULT='pass'
else
echo FAILED!
RESULT='fail'
fi
echo -e "\"$TESTCASE\": [\"$RESULT\"]," >> clear_bat.json
echo
echo
tc_generics_check_kernel_version
ret=$?
echo -n ......kernel version test...
TESTCASE='tc-generics-kernel_version'
if [ $ret == 0 ]
then
echo PASS
RESULT='pass'
else
echo FAILED!
RESULT='fail'
fi
echo -e "\"$TESTCASE\": [\"$RESULT\"]," >> clear_bat.json
echo
echo
tc_generics_cpuinfo
ret=$?
echo -n ......cpuinfo test...
TESTCASE='tc-generics-cpuinfo'
if [ $ret == 0 ]
then
echo PASS
RESULT='pass'
else
echo FAILED!
RESULT='fail'
fi
echo -e "\"$TESTCASE\": [\"$RESULT\"]," >> clear_bat.json
echo
echo
tc_generics_kernel_cmdline
ret=$?
echo -n ......check kernel cmdline test...
TESTCASE='tc-generics-kernel_cmdline'
if [ $ret == 0 ]
then
echo PASS
RESULT='pass'
else
echo FAILED!
RESULT='fail'
fi
echo -e "\"$TESTCASE\": [\"$RESULT\"]," >> clear_bat.json
echo
echo
tc_generics_partitions
ret=$?
echo -n ......check partition test...
TESTCASE='tc-generics-partitions'
if [ $ret == 0 ]
then
echo PASS
RESULT='pass'
else
echo FAILED!
RESULT='fail'
fi
echo -e "\"$TESTCASE\": [\"$RESULT\"]," >> clear_bat.json
echo
echo
tc_generics_mount
ret=$?
echo -n .........check mounts test...
TESTCASE='tc-generics-mounts'
if [ $ret == 0 ]
then
echo PASS
RESULT='pass'
else
echo FAILED!
RESULT='fail'
fi
echo -e "\"$TESTCASE\": [\"$RESULT\"]," >> clear_bat.json
echo
echo
tc_generics_check_kernel_warning
ret=$?
echo -n .........check kernel warnings test...
TESTCASE='tc-generics-check_kernel_warning'
if [ $ret == 0 ]
then
echo PASS
RESULT='pass'
else
echo FAILED!
RESULT='fail'
fi
echo -e "\"$TESTCASE\": [\"$RESULT\"]," >> clear_bat.json
echo
echo
tc_wifi_driver_loaded
ret=$?
echo -n .........check wifi driver and firmware loaded...
TESTCASE='tc_wifi_driver_loaded'
if [ $ret == 0 ]
then
echo PASS
RESULT='pass'
else
echo FAILED!
RESULT='fail'
fi
echo -e "\"$TESTCASE\": [\"$RESULT\"]," >> clear_bat.json
echo
echo
tc_spectre_meltdown_check
ret=$?
echo -n .........check spectre and meltdown...
TESTCASE='tc_spectre_meltdown_check'
if [ $ret == 0 ]
then
if [ -z "$failed_cve" ]
then
echo PASS
RESULT='pass'
else
echo FAILED!
echo $failed_cve
RESULT='fail'
fi
else
echo FAILED!
RESULT='fail'
fi
echo -e "\"$TESTCASE\": [\"$RESULT\"]," >> clear_bat.json
echo
echo
# tc_wlan_enable
# ret=$?
# echo -n .........enable wlan...
# TESTCASE='tc_wlan_enable'
# if [ $ret == 0 ]
# then
# echo PASS
# RESULT='pass'
# else
# echo FAILED!
# RESULT='fail'
# fi
# echo -e "\"$TESTCASE\": [\"$RESULT\"]," >> clear_bat.json
# echo
# echo
# tc_bluetooth_enable
# ret=$?
# echo -n .........enable bluetooth...
# TESTCASE='tc_bluetooth_enable'
# if [ $ret == 0 ]
# then
# echo PASS
# RESULT='pass'
# else
# echo FAILED!
# RESULT='fail'
# fi
# echo -e "\"$TESTCASE\": [\"$RESULT\"]" >> clear_bat.json
#
#
# echo -e "}" >> clear_bat.json
# tc_kernel_config_check
# ret=$?
# echo -n .........check kernel config...
# TESTCASE='tc_kernel_config_check'
# if [ $ret == 0 ]
# then
# echo FAILED!
# RESULT='fail'
# else
# echo PASS
# RESULT='pass'
# fi
# echo -e "\"$TESTCASE\": [\"$RESULT\"]," >> clear_bat.json
# echo
# echo
fi
| true
|
99cfa8d266089c553e66af9c02592eb0c6d7cb89
|
Shell
|
esa-shine/shine-testbed
|
/shaka-packager/segments-encrypt-contents.v2.sh
|
UTF-8
| 2,527
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
echo -e "\n script per creare contenuti cifrati DASH-enabled usando shaka-packager. Prende in ingresso la durata di ogni segmento ed il nome del file mp4 da segmentare e cifrare."
# NOME_VIDEO_EXTENDED=$1
SEGMENT_DURATION=$1
for i in $( ls /shaka_packager/media/mp4); do
#rimuovo l'estensione dal file..
NOME_VIDEO=$(echo $i | cut -f 1 -d ".")
#echo "$NOME_VIDEO"
#controllo se e' stato inserito anche il parametro per la durata di segmenti; altrimenti, di default, il packager crea contenuti con durata di segmenti 10s.
if [ -z "$SEGMENT_DURATION" ]
then
echo "non e' stata inserita la durata di segmenti! Di default 10 s. Creo i contenuti cifrati..."
packager input=/shaka_packager/media/mp4/$NOME_VIDEO.mp4,stream=audio,output=/storagedash-s2/$NOME_VIDEO-Audio.mp4 input=/shaka_packager/media/mp4/$NOME_VIDEO.mp4,stream=video,output=/storagedash-s2/$NOME_VIDEO-Video.mp4 --profile on-demand --enable_widevine_encryption --key_server_url "https://license.uat.widevine.com/cenc/getcontentkey/widevine_test" --content_id "3031323334353637" --signer "widevine_test" --aes_signing_key "1ae8ccd0e7985cc0b6203a55855a1034afc252980e970ca90e5202689f947ab9" --aes_signing_iv "d58ce954203b7c9a9a9d467f59839249" --mpd_output mpd-s2/$NOME_VIDEO.mpd
echo -e "Contenuto $NOME_VIDEO.mp4 cifrato correttamente! Le tracce audio/video cifrate sono $NOME_VIDEO-Audio.mp4 e $NOME_VIDEO-Video.mp4. L'mpd da mettere nel frontend e' $NOME_VIDEO.mpd"
else
echo "e' stata inserita la durata di segmenti= $SEGMENT_DURATION. Creo i contenuti cifrati..."
packager input=/shaka_packager/media/mp4/$NOME_VIDEO.mp4,stream=audio,output=/storagedash-s2/$NOME_VIDEO-Audio-$SEGMENT_DURATION-s.mp4 input=/shaka_packager/media/mp4/$NOME_VIDEO.mp4,stream=video,output=/storagedash-s2/$NOME_VIDEO-Video-$SEGMENT_DURATION-s.mp4 --profile on-demand --enable_widevine_encryption --segment_duration $SEGMENT_DURATION --fragment_duration $SEGMENT_DURATION --key_server_url "https://license.uat.widevine.com/cenc/getcontentkey/widevine_test" --content_id "3031323334353637" --signer "widevine_test" --aes_signing_key "1ae8ccd0e7985cc0b6203a55855a1034afc252980e970ca90e5202689f947ab9" --aes_signing_iv "d58ce954203b7c9a9a9d467f59839249" --mpd_output mpd-s2/$NOME_VIDEO-$SEGMENT_DURATION-s.mpd
echo -e "\n Contenuto $NOME_VIDEO.mp4 cifrato correttamente! Le tracce audio/video cifrate sono $NOME_VIDEO-Audio-$SEGMENT_DURATION-s.mp4 e $NOME_VIDEO-Video-$SEGMENT_DURATION-s.mp4. L'mpd da mettere nel frontend e' $NOME_VIDEO-$SEGMENT_DURATION-s.mpd"
fi
#echo "creo e segmento il file $NOME_VIDEO.mp4 con durata di segmento= $SEGMENT_DURATION"
done
| true
|
5146ac20a0231367515ab50ef60a6703eb15a3e8
|
Shell
|
ShoupingShan/Shell
|
/train3/exam_while.sh
|
UTF-8
| 704
| 3.296875
| 3
|
[] |
no_license
|
#! /bin/bash
i=1
while [[ "$i" -lt 10 ]]
do
let "square=i*i"
echo "$square"
let "i++"
done
for ((i=1;i<=9;i++))
do
for ((j=1;j<=i;j++))
do
let "product=i*j"
printf "$i*$j=$product"
if [[ "$product" -gt 9 ]]
then
printf " "
else
printf " "
fi
if [[ "$j" -eq 5 ]]
then
break
fi
done
echo
done
#break 2表示跳出两层循环
for ((i=1;i<=9;i++))
do
for ((j=1;j<=i;j++))
do
let "product=i*j"
printf "$i*$j=$product"
if [[ "$product" -gt 9 ]]
then
printf " "
else
printf " "
fi
if [[ "$j" -eq 5 ]]
then
break 2
fi
done
echo #这一句没有来得及执行
done
echo
| true
|
e6ab1b16808afd98121bdaeb30986a8e88552a73
|
Shell
|
husiana/azurehpc
|
/ci/build.sh
|
UTF-8
| 4,243
| 3.90625
| 4
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
#!/bin/bash
PROJECT_DIR=$1
show_logs=${AZHPC_SHOW_LOGS,,}
if [ "$SYSTEM_DEBUG" = "true" ]; then
set -x
AZHPC_OPTION="--debug"
show_logs="true"
printenv
fi
if [ "$AZHPC_CONFIG" = "" ]; then
echo "variable AZHPC_CONFIG is required"
exit 1
fi
if [ "$AZHPC_PIPELINE_DIR" = "" ]; then
echo "variable AZHPC_PIPELINE_DIR is required"
exit 1
fi
if [ "$AZHPC_VARIABLES_LOCATION" = "" ]; then
echo "variable AZHPC_VARIABLES_LOCATION is required"
exit 1
fi
if [ "$AZHPC_RESOURCEGROUP" = "" ]; then
echo "variable AZHPC_RESOURCEGROUP is required"
exit 1
fi
echo "********************************************************************"
echo "* INIT CONFIG VARIABLES *"
echo "********************************************************************"
# AZHPC_UUID is set when creating the RG unique name when starting the pipeline
export AZHPC_VARIABLES_UUID=${AZHPC_UUID-azhpc}
azhpc_variables=$(printenv | grep AZHPC_VARIABLES)
init_variables="-v resource_group=$AZHPC_RESOURCEGROUP"
for item in $azhpc_variables; do
key=$(echo $item | cut -d '=' -f1)
value=$(echo $item | cut -d '=' -f2)
variable=${key#AZHPC_VARIABLES_}
variable=${variable,,}
init_variables+=",$variable=$value"
done
echo $init_variables
. install.sh
conf_dir=$(dirname $AZHPC_CONFIG)
if [ "$PROJECT_DIR" = "" ]; then
PROJECT_DIR=${conf_dir##*/}
fi
config_file=$(basename $AZHPC_CONFIG)
# clean up project dir
if [ -d $PROJECT_DIR ]; then
ls -al $PROJECT_DIR
# rm -rf $PROJECT_DIR
fi
echo "Calling azhpc-init"
azhpc-init $AZHPC_OPTION -c $BUILD_REPOSITORY_LOCALPATH/$conf_dir -d $PROJECT_DIR $init_variables || exit 1
pushd $PROJECT_DIR
jq '.' $config_file
echo "********************************************************************"
echo "* BUILD RESOURCES *"
echo "********************************************************************"
echo "Calling azhpc-build"
export PATH=$PATH:$HOME/bin # add that path for any CycleCloud calls
azhpc-build -c $config_file $AZHPC_OPTION
return_code=$?
cat deploy*.json
ls -al
if [[ "$return_code" -ne "0" ]] || [[ "$show_logs" == "true" ]]; then
config_file_no_path=${config_file##*/}
config_file_no_path_or_extension=${config_file_no_path%.*}
tmp_dir=azhpc_install_$config_file_no_path_or_extension
if [ -d $tmp_dir ]; then
echo "============"
echo "Dumping logs"
echo "============"
echo ""
cat $tmp_dir/install/*.log
grep -A4 "\[FAILURE\]" $tmp_dir/install/*.log
fi
if [ "$return_code" -ne "0" ]; then
exit $return_code
fi
fi
# Dump resource status only if install_from is set
install_from=$(jq -r '.install_from' $config_file)
if [ "$install_from" != "" ]; then
echo "********************************************************************"
echo "* RESOURCES UPTIME *"
echo "********************************************************************"
azhpc-status -c $config_file $AZHPC_OPTION
else
echo "Exiting as no scripts need to be copied on remote VMs"
exit 0
fi
echo "********************************************************************"
echo "* COPY SCRIPTS *"
echo "********************************************************************"
# Copy scripts
if [ "$AZHPC_SCRIPT_REMOTE_DEST" = "" ]; then
export AZHPC_SCRIPT_REMOTE_DEST="hpcuser@headnode:/apps"
fi
# Copy Applications run scripts
echo "Copy Applications run scripts to $AZHPC_SCRIPT_REMOTE_DEST"
azhpc-scp $debug_option -c $config_file -- -r $BUILD_REPOSITORY_LOCALPATH/apps/. $AZHPC_SCRIPT_REMOTE_DEST || exit 1
# Copy pipeline library scripts
echo "Copy pipeline library scripts to $AZHPC_SCRIPT_REMOTE_DEST"
azhpc-scp $debug_option -c $config_file -- -r $BUILD_REPOSITORY_LOCALPATH/ci/scripts/. $AZHPC_SCRIPT_REMOTE_DEST/ci || exit 1
# List remote files
echo "List files copied to $AZHPC_SCRIPT_REMOTE_DEST"
remote_dir=$(echo $AZHPC_SCRIPT_REMOTE_DEST | cut -d ':' -f2)
azhpc-run $debug_option -c $config_file ls -al $remote_dir
azhpc-run $debug_option -c $config_file ls -al $remote_dir/ci
| true
|
ef9716b1681fb9e311ee1c056eceac58ed4d5b30
|
Shell
|
Giappo/jap
|
/cluster_scripts/run_on_cluster.bash
|
UTF-8
| 2,498
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
#SBATCH --time=00:04:58 --partition=short
my_email=$3
chosen_partition=$4
cluster_folder=$5
account=$6
projects_folder_name=$7
package_name=$8
function_name=$9
cd /$cluster_folder/$account/jap_scripts/
args_file=$1
fun_file=$2
args_string=${args_file%.*}
echo ${args_file}
echo ${args_string}
echo ${fun_file}
args_file=$( printf $args_file )
fun_file=$( printf $fun_file )
R_file_name=R-${args_string}.R
bash_file_name=bash-${args_string}.bash
job_name=${args_string}
#log_name=${args_string}.log
out_name=${args_string}.RData
log_name=/${cluster_folder}/${account}/${projects_folder_name}/${package_name}/${function_name}/logs/${args_string}.log
cluster_folder=$( printf $cluster_folder )
account=$( printf $account )
projects_folder_name=$( printf $projects_folder_name )
package_name=$( printf $package_name )
function_name=$( printf $function_name )
rm $R_file_name #remove previous versions
rm $bash_file_name #remove previous versions
echo "args <- commandArgs(TRUE)" > $R_file_name
echo "print(args)" >> $R_file_name
echo "load(file.path(\"\", \"${cluster_folder}\", \"${account}\", \"jap_scripts\", \"${fun_file}\"))" >> $R_file_name
echo "x <- fun_list\$run_function_from_file(args_file = args)" >> $R_file_name
echo "setwd(dir = file.path(\"\", \"${cluster_folder}\", \"${account}\", \"${projects_folder_name}\", \"${package_name}\", \"${function_name}\", \"results\"))" >> $R_file_name
echo "print(x)" >> $R_file_name
#echo "save(x, file = file.path(getwd(), \"${out_name}\"))" >> $R_file_name
#echo "save(x, file = file.path(\"\", \"${cluster_folder}\", \"${account}\", \"${package_name}\", \"${function_name}\", \"results\", \"${out_name}\"))" >> $R_file_name
echo "save(x, file = file.path(getwd(), \"${out_name}\"))" >> $R_file_name
echo "#!/bin/bash" > $bash_file_name
#echo "#SBATCH --time=71:58:58" >> $bash_file_name
#echo "#SBATCH --output=${log_name}" >> $bash_file_name
echo "module load R" >> $bash_file_name
echo "Rscript ${R_file_name} ${args_file}" >> $bash_file_name
echo "rm ${R_file_name}" >> $bash_file_name
echo "rm ${bash_file_name}" >> $bash_file_name
echo "rm ${args_file}" >> $bash_file_name
echo "rm ${fun_file}" >> $bash_file_name
#NEVER ASK FOR MORE THAN 9GB OF MEMORY!
sbatch --partition=$chosen_partition \
--time=71:58:58 \
--mem=9GB \
--job-name=$job_name \
--mail-type=FAIL,TIME_LIMIT \
--mail-user=$my_email \
--output=${log_name} \
$bash_file_name
cd /$cluster_folder/$USER/
# ls | find . -name "slurm*" | xargs rm
| true
|
90ea16043c2e5bf0bdc0dfdac0e1cff763cf4e40
|
Shell
|
usergenic/dotfiles
|
/node/dot-bash_profile_node
|
UTF-8
| 276
| 2.671875
| 3
|
[] |
no_license
|
#! /bin/bash
# nvm
export NVM_DIR=$HOME/.nvm
# This loads nvm
[ -s $HOME/homebrew/opt/nvm/nvm.sh ] && \
. $HOME/homebrew/opt/nvm/nvm.sh
# This loads nvm bash_completion
[ -s $HOME/homebrew/opt/nvm/etc/bash_completion ] && \
. $HOME/homebrew/opt/nvm/etc/bash_completion
| true
|
d1017073e30a04f679bfe9b95162f23e12e46da2
|
Shell
|
elongeau/dotfiles
|
/vm.sh
|
UTF-8
| 833
| 3.625
| 4
|
[] |
no_license
|
# Variables pour la gestion des VMs
VBOX_HOME=/cygdrive/c/Program\ Files/Oracle/VirtualBox
VM_UBUNTU="ubuntu LVM"
alias VBoxHeadless='$VBOX_HOME/VBoxHeadless.exe'
alias VBoxManage='$VBOX_HOME/VBoxManage.exe'
# cette fonction permet de démarrer la VM en arrière plan et d'afficher une progression dans l'attente de son démarrage
startVM() {
VBoxHeadless -s "$VM_UBUNTU" >> /dev/null &
echo "Démarrage en cours de $VM_UBUNTU"
runningvm="$(VBoxManage list runningvms | cut -d'"' -f2)"
while [ -z "$runningvm" ]
do
runningvm="$(VBoxManage list runningvms | cut -d'"' -f2)"
echo -n "."
sleep 1
done
echo
echo "VM $VM_UBUNTU demarre"
}
alias vm-start='startVM'
alias vm-stop='VBoxManage controlvm "$VM_UBUNTU" poweroff'
| true
|
4a92e49352847a83b36a348a1441db7d3e25091b
|
Shell
|
yfang1644/FArm_distro
|
/script/ORBit2
|
UTF-8
| 907
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
PKG_NAME=$1
PKG_VERSION=2.14.19
PKG_DEPENDS="libIDL"
PKG_MAINTAINER="Michael Meeks <michael@ximian.com>"
PKG_SECTION="gnome"
PKG_SHORTDESC="ORBit2 is a CORBA 2.4-compliant Object Request Broker (ORB) featuring mature C, C++ and Python bindings."
buildpkg() {
../configure ${TARGET_CONFIGURE_OPTS} \
ac_cv_alignof_CORBA_octet=1 \
ac_cv_alignof_CORBA_boolean=1 \
ac_cv_alignof_CORBA_char=1 \
ac_cv_alignof_CORBA_wchar=2 \
ac_cv_alignof_CORBA_short=2 \
ac_cv_alignof_CORBA_long=4 \
ac_cv_alignof_CORBA_long_long=8 \
ac_cv_alignof_CORBA_float=4 \
ac_cv_alignof_CORBA_double=8 \
ac_cv_alignof_CORBA_long_double=8 \
ac_cv_alignof_CORBA_struct=1 \
ac_cv_alignof_CORBA_pointer=4 \
--with-idl-compiler=/usr/bin/orbit-idl-2
sed -i 's/-DG_DISABLE_DEPRECATED//g' linc2/src/Makefile
make $MAKEFLAGS
make DESTDIR=$INSTALL_PKG install
}
| true
|
2b540f855137262742f1c6104c91974bb54ead4e
|
Shell
|
ternence-li/castle-games
|
/macosx/tools/update-framework.sh
|
UTF-8
| 919
| 3.859375
| 4
|
[
"Zlib",
"BSD-3-Clause",
"MIT"
] |
permissive
|
#!/bin/sh
# usage: update-framework [path]
#
# e.g. update-framework ./path/to/FreeType.framework
#
# WARNING: recommend doing this from someplace version controlled, because it deletes files.
#
# fixes a pre-mavericks macOS framework to follow the convention specified here:
# https://developer.apple.com/library/archive/technotes/tn2206/_index.html#//apple_ref/doc/uid/DTS40007919-CH1-TNTAG201
#
set -e
if [ -z "$1" ]
then
echo "#usage: update-framework [path]"
exit 1
fi
if git rev-parse --git-dir > /dev/null 2>&1; then
echo "inside a git repo, continuing"
else
echo "not inside a git repo, exiting"
exit 1
fi
pushd $1
echo "ln -s Versions/A Versions/Current"
pushd Versions
rm -rf Current
ln -s A Current
popd
echo "Removing invalid root level stuff"
find . ! -path "./Versions" ! -path . -maxdepth 1 -exec rm -r {} \;
echo "ln -s Versions/Current/* ."
ln -s Versions/Current/* .
popd
| true
|
ee0dbb0284a5a8fe281afc77d0116f95c7a8289b
|
Shell
|
feyfree/bash_draft
|
/read_file.sh
|
UTF-8
| 91
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
filename='/etc/hosts'
while read myline
do
echo "$myline"
done < $filename
| true
|
513334986021f8aa3745909a69f610842aa26e53
|
Shell
|
huashuolee/borqs_stress
|
/atBorqs/presto/msr.sh
|
UTF-8
| 178
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
ss=""
[[ -n $1 ]] && ss="-s $1"
while true;
do
result = "`adb $ss logcat |grep "success"`"
if [ -n "$result" ];then
sleep 1
adb $ss shell input tap 731 525
fi
done
| true
|
a4a9a133a05143181fae9fade3c429315bfabb43
|
Shell
|
dead-beef/markovchain
|
/test
|
UTF-8
| 192
| 2.515625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
if [ -d env ]; then
. env/bin/activate
fi
if which coverage >/dev/null 2>&1; then
coverage run --include 'markovchain/*' -m pytest && coverage report -m
else
pytest
fi
| true
|
86db78bd76d4d603b78139578a5bd0d2e02a5999
|
Shell
|
kushimoto/my-nvim-adjusting-space
|
/init.sh
|
UTF-8
| 150
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
cat <<EOF > .env
APP_NAME=${PWD##*/}
EOF
echo ;
echo "Please execute the following command.";
echo "$ (sudo) docker-compose up -d\n";
| true
|
354c3460237c180cc7ec61b887ef1f5b7150f727
|
Shell
|
selivanovm/dotfiles
|
/i3/status.sh
|
UTF-8
| 183
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/sh
i3status -c ~/.i3/status.conf | while :
do
read line
mem=`free -t -m | grep "Total" | tail -n 1 | awk '{ print ""$3"MB"; }'`
echo "$mem | $line" || exit 1
done
| true
|
1d6ee5a4d02d07447fc7dd633e06c6ad861ea5ac
|
Shell
|
lacraig2/JHU_Comp_Arch_Hacking_Book_Project
|
/run.sh
|
UTF-8
| 813
| 3.96875
| 4
|
[] |
no_license
|
# This example supports x86, x86_64, and ARM.
#
# It checks if you are on the right architecture
# when running the ARM example and uses QEMU
# if your machine isn't ARM.
# The first argument is the architecture.
# The second argument is the string to be supplied.
arch=$(arch)
if [[ "$#" -lt 1 ]]; then
arg1=$(arch)
else
arg1=$1
fi
if [[ $arg1 == "amd64" || $arg1 == "x86_64" ]]; then
echo "[INFO] You selected an amd64 demo"
./demo_amd64 $2
elif [[ $arg1 == "arm" ]]; then
echo "You selected an arm demo"
if [[ $arch == "arm" ]]; then
echo "[INFO] Running on native arm machine"
./demo $2
else
echo "[INFO] Running in qemu-arm"
qemu-arm -L /usr/arm-linux-gnueabihf/ ./demo_arm $2
fi
else
echo "Defaulted to i386"
./demo_i386 $2
fi
| true
|
8265315d25fe31ed5dcbcb2bdd33ce1ed64d2a5f
|
Shell
|
vklimov1976/bash
|
/task04.sh
|
UTF-8
| 222
| 2.671875
| 3
|
[] |
no_license
|
if [ $base -eq 1 ] && [ $dm -eq 1 ]; then
installDMBase
elif [ $base -ne 1] && [ $dm -eq 1 ]; then
installBase
elif [ $base -eq 1 ] && [ $dm -eq 1 ]; then
installDM
else
echo '==> Installing nothing'
fi
| true
|
60de4c6f107cac9f0ef60877df2099fd865e281f
|
Shell
|
verdude/random
|
/thechosenones/monitor-eb-target-group-health.sh
|
UTF-8
| 1,478
| 4.03125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -ueo pipefail
profile=""
environment=""
function usage() {
echo "Usage:"
echo " Required Arguments:"
echo " -e <environment name> # required. Elastic Beanstalk env."
echo
echo " Optional Arguments:"
echo " -p <aws profile name> # optional"
echo " -x # optional. verbose mode."
echo " -h"
echo
echo " Required programs:"
echo " - jq"
echo " - awscli"
exit ${1:-0}
}
while getopts :e:hp:x flag
do
case ${flag} in
x) set -x;;
p) profile="--profile ${OPTARG}";;
e) environment="${OPTARG}";;
h) usage;;
:) echo "arg required for: -${OPTARG}"; usage 1;;
?) echo "invalid arg: -${OPTARG}"; usage 1;;
esac
done
if [[ -z "$environment" ]]; then
usage 1
fi
lbarn=$(aws $profile elasticbeanstalk describe-environment-resources --environment-name $environment | jq .EnvironmentResources.LoadBalancers[0].Name | tr -d '"')
targetgrouparn=$(aws $profile elbv2 describe-target-groups --load-balancer-arn $lbarn | jq .TargetGroups[0].TargetGroupArn | tr -d '""')
truncate -s 0 target-deploy.log
while true; do
sclear="true"
for x in {1..10}; do
state=$(aws $profile elbv2 describe-target-health --target-group-arn $targetgrouparn | jq .TargetHealthDescriptions[0].TargetHealth.State)
if [[ -n "${sclear}" ]]; then
clear
sclear=""
fi
printf "$(date) - ${state}\n" | tee -a target-deploy.log > /dev/null
echo ${state}
done
done
| true
|
2dbcb9f79c1718d12a9d4418b286da7dd084cae2
|
Shell
|
drio/py.analysis
|
/pipelines/split_mapping/test/phix_test.sh
|
UTF-8
| 487
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
[ $(which sapi.py) ] || (echo "sapi.py not found"; exit 1)
curl -L http://cl.ly/1f0q3X2L373U/phix.tar.bz2 | tar -jx
end="-x"
bam="`pwd`/phix/phix.bam"
fa="`pwd`/phix/phix.fa"
sapi.py -i FOO -b $bam fastqc $end
sapi.py -i FOO -b $bam init -n 40000 $end
sapi.py -i FOO -b $bam -n 40000 splits $end
sapi.py -i FOO -b $bam -f $fa sais $end
sapi.py -i FOO -b $bam -f $fa sampe $end
sapi.py -i FOO -b $bam -f $fa merge $end
sapi.py -i FOO dups $end
sapi.py -i FOO stats $end
| true
|
3839f45f2259dad69f8ac9e5ea56f62eba91fd4b
|
Shell
|
ilventu/aur-mirror
|
/mcobj-git/PKGBUILD
|
UTF-8
| 1,227
| 3.234375
| 3
|
[] |
no_license
|
# Maintainer: Limao Luo <luolimao+AUR@gmail.com>
_pkgname=mcobj
pkgname=$_pkgname-git
pkgver=20120930
pkgrel=1
pkgdesc="Exports minecraft worlds to .obj or .prt"
arch=(i686 x86_64)
url=https://github.com/quag/$_pkgname
license=(custom)
makedepends=($_pkgname-lib go git)
optdepends=(minecraft)
provides=($_pkgname=0.14)
conflicts=($_pkgname)
options=(!strip)
_gitroot=http://github.com/quag/$_pkgname.git
_gitname=$_pkgname
build() {
cd "$srcdir"
msg "Connecting to GIT server...."
if [[ -d $_gitname/.git ]] ; then
pushd $_gitname && git pull
msg2 "The local files are updated."
popd
else
git clone $_gitroot
fi
msg2 "GIT checkout done or server timeout"
rm -rf $_gitname-build/
cp -r $_gitname/ $_gitname-build/
cd $_gitname-build/cmd/$_pkgname/
msg "Building..."
go build -o ../../$_pkgname
}
package() {
cd "$srcdir"/$_gitname-build/
install -Dm755 $_pkgname "$pkgdir"/usr/share/$_pkgname/$_pkgname
install -Dm644 blocks.json "$pkgdir"/usr/share/$_pkgname/blocks.json
install -Dm644 LICENSE "$pkgdir"/usr/share/licenses/$pkgname/LICENSE
install -d "$pkgdir"/usr/bin/
ln -s /usr/share/$_pkgname/$_pkgname "$pkgdir"/usr/bin/$_pkgname
}
| true
|
7bb5fa064f0223ec978a5bc79ea420de9a9cec99
|
Shell
|
javaskater/Utilitaires
|
/CMS/Drupal/LOCAL/composerInstallD8WithConf.sh
|
UTF-8
| 18,475
| 3.328125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#The drush drupal8 installer drops by himself the database if exists
## Anyway if we need to create ti from scratch, just put true to the following variable
INSTALL_DATABASE="True"
LOCALE="fr"
# On http://docs.drush.org/en/master/install/ they note that:
## Drush 9 (coming soon!) only supports one install method.
## It will require that your Drupal site be built with Composer
## ( which is for the coming Drupal8 releases the preferred method ... )
### and Drush be listed as a dependency.
## that script's directory ...
SCRIPT_DIR=$(pwd)
USER=$(whoami)
APACHE="www-data"
#absolute path of Drupal's instance
DRU_INSTALL_DIR="$HOME/RIF"
DRU_INSTANCE="d8devextranet"
DRU_SOURCES_DIR="${DRU_INSTALL_DIR}/${DRU_INSTANCE}"
DRU_HOME="${DRU_SOURCES_DIR}/web"
DRU_NAME=$(basename $DRU_SOURCES_DIR)
DRU_COMPOSER_MODULES="${DRU_HOME}/modules/contrib"
DRU_PERSONAL_MODULES="${DRU_HOME}/modules/custom"
DRU_THEMES="${DRU_HOME}/themes"
# parameters required by the Drupal installation script
ADMIN_PASSWD="admin"
SITE_NAME="Randonneurs Ile de France"
# Adding parameters to the default settings.php file
## the path to the private files/medias must be rwx for www-data
### uncomment the following variable if you put your images/files in a private location (not public like default)
#PRIVATE_FILE_IMAGE_PATH="$HOME/Images/RIF"
## The Proxy server for Drupal to access internet (updates, localisation updates, adding module through GUI)
### uncomment the following variable (and change for the right parameters) if your Drupal installation stays behind such a proxy server
#PROXY="http://proxy.mycompany:itsport"
MYSQL_ROOT="root"
MYSQL_ROOTPASSWD="root"
MYSQL_DATABASE=$DRU_INSTANCE
DIR=${PWD%/}
DAT=$(date +%Y%m%d_%H%M%S)
FLOG="$DIR/${DRU_INSTANCE}-$DAT.log"
DRUPAL_ARCHIVE="${DRU_INSTALL_DIR}/${DRU_INSTANCE}-${DAT}"
#for aliases definiton we need...
shopt -s expand_aliases #cf. answer 4 from https://stackoverflow.com/questions/24054154/how-to-create-an-aliases-in-shell-scripts
#before launching that script follow underneath instructions from https://getcomposer.org/download/ to install composer.phar under your $HOME dir:
#those instructions are:
## curl https://getcomposer.org/installer -o composer-setup.php
## php -r "if (hash_file('SHA384', 'composer-setup.php') === '669656bab3166a7aff8a7506b8cb2d1c292f042046c5a994c43155c0be6190fa0355160742ab2e1c88d40d5be660b410') { echo 'Installer verified'; } else { echo 'Installer corrupt'; unlink('composer-setup.php'); } echo PHP_EOL;"
## php composer-setup.php
## php -r "unlink('composer-setup.php');"
# now that composer.phar is under $HOME, I can define the following alias :
alias local_composer="php $HOME/composer.phar"
#once Drupal will have been installed by composer, we have to define the following alias ...
## which can be used only when we are under $DRU_HOME
##the local drush command
alias local_drush="php ../vendor/drush/drush/drush.php"
#une fois la console drupal installée localement par composer (partie vendor), on pourra passer la commande :
## attention les commmandes de la console se passent depuis le répertoire des sources
## et non depuis le sous-répertoire web !!!
alias local_drupal="php vendor/drupal/console/bin/drupal.php"
function mysql_database_creation(){
echo "calling the $0 / ${FUNCNAME[0]} function"
mysql -u${MYSQL_ROOT} -p${MYSQL_ROOTPASSWD} -h localhost 2>&1 <<EOF
DROP DATABASE IF EXISTS ${MYSQL_DATABASE};
DROP USER IF EXISTS ${MYSQL_DATABASE}@localhost;
CREATE USER '${MYSQL_DATABASE}'@'localhost' IDENTIFIED BY '${MYSQL_DATABASE}';
CREATE DATABASE IF NOT EXISTS ${MYSQL_DATABASE} CHARACTER SET utf8 COLLATE utf8_general_ci;
GRANT ALL ON \`${MYSQL_DATABASE}\`.* TO \`${MYSQL_DATABASE}\`@localhost;
EOF
resmysql=$?
}
function kernel(){
echo "calling the $0 / ${FUNCNAME[0]} function"
old_dir=$(pwd)
if [ -d "$DRU_SOURCES_DIR" ]; then
echo "+ ${DRU_SOURCES_DIR} exists (prvious installation), we supress those old sources before getting the new ones"
sudo rm -rf $DRU_SOURCES_DIR
fi
echo "We install the latest Drupal8 sources unser ${DRU_SOURCES_DIR} unsig composer"
cd $DRU_INSTALL_DIR
local_composer create-project drupal-composer/drupal-project:8.x-dev $DRU_INSTANCE --stability dev --no-interaction 2>&1
echo "we now launch Drupal automatic installation using the local drush present in the vendor directory"
#you have to be under DRUPAL root to launch our drush commands
cd $DRU_HOME
# Some remarks about the site-install (si) drush command:
## that command drop the existing Drupal Tables in the Database if necessary !!!!
## Here we chose the standard profile.
### the possible profiles match the directories' names present under $DRU_HOME/web/core/profiles
local_drush si -y --notify --db-url="mysql://${MYSQL_DATABASE}:${MYSQL_DATABASE}@127.0.0.1:3306/${MYSQL_DATABASE}" standard --site-name="$SITE_NAME" --account-pass="$ADMIN_PASSWD" 2>&1
cd $old_dir
}
function add_another_language(){
newlang=$1 # must be fr or de or es see https://docs.drupalconsole.com/en/commands/locale-language-add.html
echo "calling the $0 / ${FUNCNAME[0]} function for adding ${newlang} to Drupal"
LOCALE_DRUSH="locale"
old_dir=$(pwd)
cd "${DRU_HOME}"
#In the case of an automatic installation this module is not active by default
echo " -- activating the $LOCALE_DRUSH module present but not activated by default"
local_drush en -y ${LOCALE_DRUSH} 2>&1
cd "${DRU_SOURCES_DIR}"
echo " - adding the ${newlang} as Drupal interface language"
#see. https://docs.drupalconsole.com/en/commands/locale-language-add.html
local_drupal ${LOCALE_DRUSH}:language:add ${newlang} 2>&1
echo " - rebuilding the cache..."
cd "${DRU_HOME}"
local_drush cr 2>&1
cd ${old_dir}
}
function set_language_as_default(){
default_lang=$1 # must be fr or de or es see https://docs.drupalconsole.com/en/commands/locale-language-add.html
echo "calling the $0 / ${FUNCNAME[0]} function for setting ${default_lang} as the Drupal default language"
old_dir=$(pwd)
cd "$DRU_SOURCES_DIR"
echo " - settig ${newlang} as the default Drupal interface language"
local_drupal co system.site langcode ${default_lang} 2>&1
local_drupal co system.site default_langcode ${default_lang} 2>&1
echo " - rebuilding the cache..."
cd "${DRU_HOME}"
local_drush cr 2>&1
cd ${old_dir}
}
function update_interface_translations(){
echo "calling the $0 / ${FUNCNAME[0]} function"
LOCALE_DRUSH="locale"
old_dir=$(pwd)
cd "${DRU_HOME}"
local_drush locale-check 2>&1
local_drush locale-update 2>&1
local_drush cr
cd ${old_dir}
}
function complementary_modules(){
echo "calling the $0 / ${FUNCNAME[0]} function"
#We have to download module code using composer, because Drupal's kernel itself has been downloaded using composer
MEDIA_ENTITY_DRUSH="media_entity_image"
MEDIA_ENTITY_COMPOSER="drupal/${MEDIA_ENTITY_DRUSH}"
old_dir=$(pwd)
#composer.json (created by the composer download of drupal sources), is present at $DRU_SOURCES_DIR
##we need to change directory there to complement it with our required complmentary modules ...
cd "$DRU_SOURCES_DIR"
echo "+ we need $MEDIA_ENTITY_COMPOSER (we download it using composer)"
echo "we are at: $(pwd)"
local_composer require $MEDIA_ENTITY_COMPOSER 2>&1
#you have to be under DRUPAL root to launch our drush commands
cd "${DRU_HOME}"
echo "+ we activate $MEDIA_ENTITY_DRUSH (and its dependencies)"
local_drush en -y $MEDIA_ENTITY_DRUSH 2>&1
cd $old_dir
}
function developper_modules(){
echo "calling the $0 / ${FUNCNAME[0]} function"
#We have to download module code using composer, because Drupal's kernel itself has been downloaded using composer
#Getting the active configuration key-values pairs on your admin dasboard
CONFIG_INSPECT_DRUSH="config_inspector"
CONFIG_INSPECT_COMPOSER="drupal/${CONFIG_INSPECT_DRUSH}"
#changing user without having to logout and login again
MASQUERADE_DRUSH="masquerade"
MASQUERADE_COMPOSER="drupal/${MASQUERADE_DRUSH}"
#delete all users or all entities of a specific content type
DELETE_ALL_DRUSH="delete_all"
DELETE_ALL_COMPOSER="drupal/${DELETE_ALL_DRUSH}"
#Developpers' tools suite ...
DEVEL_DRUSH="devel"
DEVEL_COMPOSER="drupal/${DEVEL_DRUSH}"
##We will use DEVEL_GENERATE from the suite for automatically generating content of any content type
DEVEL_GENERATE_DRUSH="devel_generate"
##We will use DEVEL_GENERATE from the suite for graphically twig debugging using the 'kint($my_variable);' command
DEVEL_KINT_DRUSH="kint"
##We will use WEPROFILER from the suite to get a developper's ToolBar at the bottom of the screen, analogous to the Symfony app_dev.php toolbar
DEVEL_WEBPROFILER_DRUSH="webprofiler"
#We will use EXAMPLES from the suite to get a Suite of well written modules (each modules does only one thing and does it well)
EXAMPLES_DRUSH="examples"
EXAMPLES_COMPOSER="drupal/${EXAMPLES_DRUSH}"
#composer.json (created by the composer download of drupal sources), is present at $DRU_SOURCES_DIR
##we need to change directory there to complement it with our required complmentary modules...
old_dir=$(pwd)
cd "$DRU_SOURCES_DIR"
echo "+ we need $CONFIG_INSPECT_COMPOSER (we download it using composer)"
local_composer require $CONFIG_INSPECT_COMPOSER 2>&1
echo "+ we need $DEVEL_COMPOSER (we download it using composer)"
local_composer require $DEVEL_COMPOSER 2>&1
echo "+ we need $EXAMPLES_COMPOSER (we download it using composer)"
local_composer require $EXAMPLES_COMPOSER 2>&1
echo "+ we need $MASQUERADE_COMPOSER (we download it using composer)"
local_composer require $MASQUERADE_COMPOSER 2>&1
echo "+ we need $DELETE_ALL_COMPOSER (we download it using composer)"
local_composer require $DELETE_ALL_COMPOSER 2>&1
#you have to be under DRUPAL root to launch our drush commands
cd "${DRU_HOME}"
echo "+ we activate $CONFIG_INSPECT_DRUSH (and its dependencies)"
local_drush en -y $CONFIG_INSPECT_DRUSH 2>&1
echo "+ we activate $DEVEL_GENERATE_DRUSH (and its dependencies)"
local_drush en -y $DEVEL_GENERATE_DRUSH 2>&1
echo "+ we activate $DEVEL_KINT_DRUSH (and its dependencies)"
local_drush en -y $DEVEL_KINT_DRUSH 2>&1
echo "+ we activate $DEVEL_WEBPROFILER_DRUSH (and its dependencies)"
local_drush en -y $DEVEL_WEBPROFILER_DRUSH 2>&1
echo "+ we activate $EXAMPLES_DRUSH (and its dependencies)"
local_drush en -y $EXAMPLES_DRUSH 2>&1
echo "+ we activate $MASQUERADE_DRUSH (and its dependencies)"
local_drush en -y $MASQUERADE_DRUSH 2>&1
echo "+ we activate $DELETE_ALL_DRUSH (and its dependencies)"
local_drush en -y $DELETE_ALL_DRUSH 2>&1
cd $old_dir
}
function personal_devs(){
echo "calling the $0 / ${FUNCNAME[0]} function"
old_dir=$(pwd)
IMPORT_MODULE="rif_imports"
cd "$DRU_SOURCES_DIR"
GIT_IMPORT_MODULE="https://github.com/javaskater/${IMPORT_MODULE}.git"
echo "+ we clone $GIT_IMPORT_MODULE into $DRU_PERSONAL_MODULES"
mkdir $DRU_PERSONAL_MODULES && cd $DRU_PERSONAL_MODULES
git clone $GIT_IMPORT_MODULE 2>&1
#you have to be under DRUPAL root to launch our drush commands
cd "$DRU_HOME"
echo "+ we activate $IMPORT_MODULE and its dependencies (configuration modules)"
local_drush en -y $IMPORT_MODULE 2>&1
cd $old_dir
}
function featuring(){
echo "calling the $0 / ${FUNCNAME[0]} function"
old_dir=$(pwd)
#my module need the following one that I have to download via composer before enabling the whole
FEATURES_DRUSH="features"
FEATURES_UI_DRUSH="${FEATURES_DRUSH}_ui"
FEATURES_COMPOSER="drupal/${FEATURES_DRUSH}"
cd "$DRU_SOURCES_DIR"
echo "+ we need ${FEATURES_DRUSH} (we download it using composer)"
local_composer require $FEATURES_COMPOSER 2>&1
#you have to be under DRUPAL root to launch our drush commands
cd "$DRU_HOME"
echo "+ we then activate ${FEATURES_DRUSH} using drush"
local_drush en -y $FEATURES_DRUSH 2>&1
echo "+ we then activate ${FEATURES_UI_DRUSH} using drush"
local_drush en -y $FEATURES_UI_DRUSH 2>&1
cd $old_dir
}
#the kernel search module makes Drupal wuse intesively the Database
#for our dev environment we need to deactivate it .
function search_deactivate(){
echo "calling the $0 / ${FUNCNAME[0]} function"
old_dir=$(pwd)
#the kernel search module
SEARCH_DRUSH="search"
#you have to be under DRUPAL root to launch our drush commands
cd "$DRU_HOME"
local_drush pm-uninstall -y $SEARCH_DRUSH 2>&1
cd $old_dir
}
function drupal_themings(){
echo "calling the $0 / ${FUNCNAME[0]} function"
old_dir=$(pwd)
#Main Drupal8 frontend theme based on Bootstrap (comes with a Starter Kit)
BOOTSTRAP_THEME_DRUSH="bootstrap"
BOOTSTRAP_THEME_COMPOSER="drupal/${BOOTSTRAP_THEME_DRUSH}"
#lightweight backend theme
## To avoid conflict between the default admin toolbar and the adminimal admin toolbar we need the followinc module
### see. explanations on https://www.drupal.org/project/adminimal_admin_toolbar
ADMINIMAL_TOOLBAR_DRUSH="adminimal_admin_toolbar"
ADMINIMAL_TOOLBAR_COMPOSER="drupal/${ADMINIMAL_TOOLBAR_DRUSH}"
ADMINIMAL_THEME_DRUSH="adminimal_theme"
ADMINIMAL_THEME_COMPOSER="drupal/${ADMINIMAL_THEME_DRUSH}"
cd "$DRU_SOURCES_DIR"
echo "+ we need ${BOOTSTRAP_THEME_DRUSH} (we download it using composer)"
local_composer require $BOOTSTRAP_THEME_COMPOSER 2>&1
echo "+ we need ${ADMINIMAL_TOOLBAR_DRUSH} (we download it using composer)"
local_composer require $ADMINIMAL_TOOLBAR_COMPOSER 2>&1
echo "+ we need ${ADMINIMAL_THEME_DRUSH} (we download it using composer)"
local_composer require $ADMINIMAL_THEME_COMPOSER 2>&1
cd "$DRU_HOME"
echo "+ we activate $BOOTSTRAP_THEME_DRUSH"
local_drush en -y $BOOTSTRAP_THEME_DRUSH 2>&1
echo "+ we activate $ADMINIMAL_THEME_DRUSH"
local_drush en -y $ADMINIMAL_THEME_DRUSH 2>&1
echo "+ we activate $ADMINIMAL_TOOLBAR_DRUSH"
local_drush en -y $ADMINIMAL_TOOLBAR_DRUSH 2>&1
## vset does not work in Drupal 8, instead we have to cchange the cconfiguration
## defined at http://d8devextranet.ovh/admin/config/development/configuration/inspect/system.theme/raw
## unsing drupal cconsole ...
cd "$DRU_SOURCES_DIR"
echo "+ we define $BOOTSTRAP_THEME_DRUSH as the default frontend theme"
local_drupal co system.theme default $BOOTSTRAP_THEME_DRUSH 2>&1
echo "+ we define $ADMINIMAL_THEME_DRUSH as the default backend theme"
local_drupal co system.theme admin $ADMINIMAL_THEME_DRUSH 2>&1
cd "$DRU_HOME"
local_drush cr 2>&1
cd $old_dir
}
function tunings(){
echo "calling the $0 / ${FUNCNAME[0]} function"
SETTINGS_FILE="${DRU_HOME}/sites/default/settings.php"
chmod u+w $SETTINGS_FILE
echo "" >> $SETTINGS_FILE
echo "/* " >> $SETTINGS_FILE
echo "* paramètres ajoutés par la fonction ${FUNCNAME[0]} du script $0" >> $SETTINGS_FILE
echo "*/" >> $SETTINGS_FILE
#We don't want our attached file be in the public directory by default see de https://www.drupal.org/node/2392959 (bottom of the webpage)
if [ -n "${PRIVATE_FILE_IMAGE_PATH}" ]
then
echo "\$settings['file_private_path'] = '${PRIVATE_FILE_IMAGE_PATH}';" >> $SETTINGS_FILE
fi
#If Drupal has to access internet through a proxy server, wee need to add its address here ....
if [ -n "${PROXY}" ]
then
echo "\$settings['http_client_config']['proxy']['http'] = '${PROXY}';" >> $SETTINGS_FILE
echo "\$settings['http_client_config']['proxy']['https'] = '${PROXY}';" >> $SETTINGS_FILE
echo "\$settings['http_client_config']['proxy']['no'] = ['127.0.0.1', 'localhost', '*.dgfip'];" >> $SETTINGS_FILE
fi
chmod u-w $SETTINGS_FILE
}
function display_drupal_available_console_commands(){
echo "calling the $0 / ${FUNCNAME[0]}"
old_dir=$(pwd)
cd "${DRU_HOME}"
echo "1/ displaying the list of available drush commands:"
local_drush help 2>&1
# attention les commmandes de la console se passent depuis le répertoire des sources
## et non depuis le sous-répertoire web !!!
cd "${DRU_SOURCES_DIR}"
echo "2/ on affiche la liste des commandes proposées par la console Drupal:"
local_drupal list 2>&1
cd $old_dir
}
function backup_instance(){
echo "calling the $0 / ${FUNCNAME[0]}"
archive_name="$(basename $DRUPAL_ARCHIVE)"
archive_install_dir="$(dirname $DRUPAL_ARCHIVE)"
old_dir=$(pwd)
cd "$DRU_HOME"
drupal_code_dirname=$DRU_NAME
echo "+ clean up cache before backuping"
local_drush cr 2>&1
if [ -d "$DRUPAL_ARCHIVE" ]; then
rm -rf "$DRUPAL_ARCHIVE"
fi
mkdir -p "$DRUPAL_ARCHIVE"
cd "$DRUPAL_ARCHIVE"
echo "backupin Mysql Database: $MYSQL_DATABASE"
if mysql -u${MYSQL_ROOT} -p${MYSQL_ROOTPASSWD} -e 'show databases;' 2>/dev/null | grep -i ${MYSQL_DATABASE}; then
echo "Mysql Database $MYSQL_DATABASE exists, we can backup it"
mysqldump -u${MYSQL_ROOT} -p${MYSQL_ROOTPASSWD} ${MYSQL_DATABASE} -h localhost > "${MYSQL_DATABASE}.sql"
else
echo "Mysql Database $MYSQL_DATABASE does not exists, we cannot backup it. Giving up"
exit -1
fi
echo "Backuping Drupal8 Source Code ...."
cd $archive_install_dir
tar czf "${drupal_code_dirname}.tgz" $drupal_code_dirname && mv -v "${drupal_code_dirname}.tgz" $archive_name
echo "backuping ${archive_install_dir} as ${archive_name}.tgz"
tar czf "${archive_name}.tgz" $archive_name -C "${archive_install_dir}" 2>&1
cd $old_dir
}
function main(){
echo "calling the $0 / ${FUNCNAME[0]} function"
old_dir=$(pwd)
if [ $INSTALL_DATABASE == "True" ]; then
mysql_database_creation
fi
kernel
search_deactivate
add_another_language ${LOCALE}
set_language_as_default ${LOCALE}
complementary_modules
drupal_themings
developper_modules
personal_devs
featuring
tunings
update_interface_translations
display_drupal_available_console_commands
backup_instance
cd "$DRU_HOME"
local_drush cr 2>&1
sudo chown -R $USER:$APACHE $DRU_HOME 2>&1
sudo chmod -R g+w $DRU_HOME 2>&1
cd $old_dir
}
main | tee $FLOG
| true
|
b60549d4b4bd60b2d4205431ffaf44836b95813a
|
Shell
|
geodesicsolutions-community/geocore-community
|
/contrib/build-release.sh
|
UTF-8
| 1,846
| 3.46875
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
#!/bin/bash
# builds the software for release - should be run using composer.
# this is quick and dirty - hopefully we'll switch to use github actions at some point.
composer install --no-dev --optimize-autoloader
# make folder if not exists
mkdir build
# remove existing files if exists
rm build/geocore-ce.zip build/fusion.zip build/marquee.zip build/tempo.zip
# add license to base folder and all contents of src to base folder, minus a few things
zip build/geocore-ce.zip LICENSE
cd src/
# add files in src as the base folder.. but:
# exclude (some are added back partially further down):
# - config.php
# - templates_c
# - user_images
# - _geocache
# - geo_templates
# - addons/exporter/exports
# - .DS_Store files (Mac OS file)
zip ../build/geocore-ce.zip -r * -x config.php "templates_c/*" "user_images/*" "_geocache/*" "geo_templates/*" \
"addons/exporter/exports/*" "*.DS_Store"
# add the starting files needed for _geocache
zip ../build/geocore-ce.zip _geocache/index.php _geocache/.htaccess
# Add empty folders for user_images, templates_c
zip ../build/geocore-ce.zip user_images templates_c
# Add the almost empty folder for the exporter addon with the README.md included
zip ../build/geocore-ce.zip addons/exporter/exports/README.md
# Add the default template and min.php in geo_templates (Note: we exclude the extra template sets for now)
zip ../build/geocore-ce.zip geo_templates/min.php -r geo_templates/default/* -x "*.DS_Store"
# Make a download specificaly for the extra template sets - done so they can be uploaded using the manager if desired
cd geo_templates
zip ../../build/fusion.zip -r fusion -x "*.DS_Store"
zip ../../build/marquee.zip -r marquee -x "*.DS_Store"
zip ../../build/tempo.zip -r tempo -x "*.DS_Store"
echo
echo --- Build complete! Check the zips in the build/ folder ---
echo
| true
|
7cb88fb2f13f7af808e307abf314c00e4d63827b
|
Shell
|
efectn-forks/sayfalar
|
/update.sh
|
UTF-8
| 640
| 3.25
| 3
|
[
"WTFPL"
] |
permissive
|
#!/bin/bash
for j in man odt
do
rm -rf $j
mkdir -p $j
for i in $(ls rst/*.rst | sed "s/.rst$//g" | sed "s/rst\///g" | sort)
do
rst2$j rst/$i.rst > $j/$i.$j
done
done
rm -rf html
mkdir -p html
for i in $(ls rst/*.rst | sed "s/.rst$//g" | sed "s/rst\///g" | sort)
do
rst2html --link-stylesheet rst/$i.rst > html/$i.html
done
sed -i 's|href=.*.css|href=\"main.css|g' html/*.html
cat main.css > html/main.css
echo "<head><title>Sayfalar</title></head><body>" > index.html
for i in $(ls html | grep ".html$" | sed "s/.html//g" | sort)
do
echo -e "=> <a href=\"html/$i.html\">$i</a><br>" >> index.html
done
echo "</body>" >> index.html
| true
|
791a47a48ff0f32a3e24ccd981a30ce242ae514f
|
Shell
|
Kitware/VTK
|
/Utilities/KWIML/update.sh
|
UTF-8
| 625
| 2.90625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
set -e
set -x
shopt -s dotglob
readonly name="KWIML"
readonly ownership="KWIML Upstream <kwrobot@kitware.com>"
readonly subtree="Utilities/KWIML/vtkkwiml"
readonly repo="https://gitlab.kitware.com/utils/kwiml.git"
readonly tag="master"
readonly paths="
"
extract_source () {
git_archive
cat > "$extractdir/$name-reduced/abi.h" <<EOF
/* Forward include for source-tree layout. */
#include "include/kwiml/abi.h"
EOF
cat > "$extractdir/$name-reduced/int.h" <<EOF
/* Forward include for source-tree layout. */
#include "include/kwiml/int.h"
EOF
}
. "${BASH_SOURCE%/*}/../../ThirdParty/update-common.sh"
| true
|
3cc81e369c48bd0d1171ace653e135b991b7ea8d
|
Shell
|
led-spb/automate-scripts
|
/bin/check-mounts
|
UTF-8
| 346
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/sh
touch .mounts.total
mounts="$1"
mount_ok(){
awk -v mount="$1" 'BEGIN{status="FAIL"} ($2==mount && $4 ~ /rw,/) {status="NORM"} END{print "Mount",mount,"has",status,"status"}' /proc/mounts
}
for mnt in $mounts; do
mount_ok $mnt
done | sort >.mounts.status
comm -2 -3 .mounts.status .mounts.total
cp -f .mounts.status .mounts.total
| true
|
7e194fe4f476477caeb23156b6a34807027650e6
|
Shell
|
benji07/hooks
|
/bin/bootstrap
|
UTF-8
| 243
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
if [ ! -d ~/bin ]; then
mkdir ~/bin
#on doit ajouter le dossier dans le PATH
echo "You must add the ~/bin directory to your path"
fi
DIR=$(cd $(dirname $0); pwd)
ln -snf $DIR/git-install-hook ~/bin/git-install-hook
| true
|
fc0bd32307541df6580fda8e6412bf3c7ac0eae3
|
Shell
|
docker-archive/docker-snap
|
/prep-docker-build.sh
|
UTF-8
| 1,056
| 3.203125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# should be sourced from snapcraft.yaml while building Docker
# current working directory should be the Docker source directory
# SNAPDIR should be set to the root of this Git repo
# (the directory of snapcraft.yml)
for patch in "$SNAPDIR"/patches/*.patch; do
echo "Applying $(basename "$patch") ..."
patch \
--batch \
--forward \
--strip 1 \
--input "$patch"
echo
done
# aww, can't use "git ls-remote" on launchpad:
# fatal: unable to access 'https://github.com/docker/docker.git/': Could not resolve host: github.com
# Loïc: you can, but only during the pull phase
DOCKER_GITCOMMIT="$(
git ls-remote --tags \
https://github.com/docker/docker.git \
"refs/tags/v$(< VERSION)^{}" \
| cut -b1-7 \
|| echo "v$(< VERSION)"
)-snap"
if git rev-parse &> /dev/null; then
DOCKER_GITCOMMIT+="-$(git rev-parse --short HEAD)"
fi
export DOCKER_GITCOMMIT
export BUILDTIME="$(
date --rfc-3339 ns 2>/dev/null | sed -e 's/ /T/' \
|| date -u
)"
export DOCKER_BUILDTAGS='
apparmor
seccomp
selinux
'
# pkcs11
export AUTO_GOPATH=1
| true
|
c95e3105e150d97c2b3fcd69fc1bae38b9851c75
|
Shell
|
Millennial-Polymath/alx-system_engineering-devops
|
/0x05-processes_and_signals/7-highlander
|
UTF-8
| 201
| 2.953125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# This script catches SIGTERM signal and echoes, "I am invicible"
sig()
{
echo "I am invincible!!!"
}
while :
do
echo "To infinity and beyond"
sleep 2
trap "sig" SIGTERM
done
| true
|
b59168893cfc47a987e40ef85ad0f64bd93f0b16
|
Shell
|
cutefishos-ubuntu/pacstall-programs
|
/packages/dotdrop/dotdrop.pacscript
|
UTF-8
| 810
| 2.515625
| 3
|
[] |
no_license
|
name="dotdrop"
version="1.7.1"
url="https://github.com/deadc0de6/dotdrop/archive/refs/tags/v${version}.zip"
hash="165347e1950c99dfb442ea19b670d1ad2769610682a1e2bf9abf7f51b64cb61c"
maintainer="TwilightBlood <hwengerstickel@pm.me>"
depends="python3 python3-docopt python3-setools python3-jinja2 python3-ruamel.yaml python3-magic"
prepare() {
true
}
build() {
true
}
install() {
python3 setup.py install --root="$STOWDIR/$name/" --optimize=1
sudo install -Dm644 completion/dotdrop-completion.bash "$STOWDIR/"$name"/usr/share/bash-completion/completions/${name}"
sudo install -Dm644 completion/_dotdrop-completion.zsh "$STOWDIR/"$name"/usr/share/zsh/site-functions/_${name}"
sudo install -Dm644 completion/dotdrop.fish "$STOWDIR/"$name"/usr/share/fish/completions/${name}.fish"
}
# vim:set ft=sh ts=2 sw=2 et:
| true
|
5a262ac5ada03ed949f8e0d92a0cfbeb76f37857
|
Shell
|
cyrilvj94/ShellScriptExamples
|
/2_For_While_funcs/while_4.sh
|
UTF-8
| 416
| 3.484375
| 3
|
[] |
no_license
|
#! /bin/bash
#Gambler problem
function simulate_coin_flip
{
echo $((RANDOM%2))
}
money=100
declare -a lst
lst[0]=TAILS
lst[1]=HEADS
while [ $money ]
do
if [ $money -eq 0 -o $money -eq 200 ]
then
echo Betting ended final money left $money
break
fi
result=$(simulate_coin_flip)
echo "Flipping coin : " ${lst[result]}
case $result in
0)((money--));;
1)((money++));;
esac
echo Money left $money
done
| true
|
33801bcbb76f8865d59cad1f3cf871b823fb0122
|
Shell
|
kennedyj/symphony
|
/bin/check-cert-dates
|
UTF-8
| 180
| 3.125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ "X$1" == "X" ]
then
echo "usage: $0 certificate"
exit 1
fi
echo " - output the stat and end dates for the certificate"
openssl x509 -dates -noout -in "$1"
| true
|
d24ad2806dbd232c8001ac6531bb912a2c7fec00
|
Shell
|
davinci2016/calssification_person_dataset
|
/tools/__get_person_from_voc0712.sh
|
UTF-8
| 622
| 2.96875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
VOC_PATH=/home/davinci/dnn/data/voc/VOCdevkit/VOC0712
TARGET_PATH=/home/davinci/dnn/data/my_person/voc0712
year="VOC0712"
mkdir $TARGET_PATH
mkdir $TARGET_PATH/Annotations/
mkdir $TARGET_PATH/JPEGImages/
cd $VOC_PATH/Annotations/
grep -H -R "<name>person</name>" > $TARGET_PATH/temp.txt
cd $TARGET_PATH
cat temp.txt | sort | uniq > $year.txt
find -name $year.txt | xargs perl -pi -e 's|.xml:\t\t<name>person</name>||g'
#cat $year.txt | xargs -i cp $VOC_PATH/Annotations/{}.xml $TARGET_PATH/Annotations/
#cat $year.txt | xargs -i cp $VOC_PATH/JPEGImages/{}.jpg $TARGET_PATH/JPEGImages/
#rm temp.txt
| true
|
51c31d1c93abf406650478a6f91a02ea07534041
|
Shell
|
reo11/dotfiles
|
/.bin/lib/dotsinstaller/gnome-terminal-config-restore.sh
|
UTF-8
| 2,865
| 3.265625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -ue
if ! builtin command -v gnome-terminal > /dev/null 2>&1;then
echo "Not found gnome-terminal"
exit 0
fi
if ! builtin command -v dbus-launch > /dev/null 2>&1;then
echo "Not found dbus-launch"
exit 0
fi
if ! builtin command -v gsettings > /dev/null 2>&1;then
echo "Not found gsettings"
exit 0
fi
GNOME_TERMINAL_VERSION="$(gnome-terminal --version | tr " " "\n" | sed -e 's/[^0-9.]//g' | \grep -v "^$" | head -n 1)"
# gnome-terminal version < 3.8
if ! [[ ("$(echo "$GNOME_TERMINAL_VERSION" | cut -d"." -f1)" = "3" && \
"$(echo "$GNOME_TERMINAL_VERSION" | cut -d"." -f2)" -ge 8) || \
"$(echo "$GNOME_TERMINAL_VERSION" | cut -d"." -f1)" -ge 4 ]];then
echo "Old gnome-terminal can not set a config"
exit 0
fi
## use gsettings
dbus-launch gsettings set org.gnome.Terminal.Legacy.Settings menu-accelerator-enabled false
dbus-launch gsettings set org.gnome.Terminal.Legacy.Settings default-show-menubar false
dbus-launch gsettings set org.gnome.Terminal.Legacy.Settings schema-version 'uint32 3'
dbus-launch gsettings set org.gnome.Terminal.Legacy.Settings shortcuts-enabled false
profile=$(dbus-launch gsettings get org.gnome.Terminal.ProfilesList default)
profile=${profile:1:-1}
dbus-launch gsettings set org.gnome.Terminal.Legacy.Profile:/org/gnome/terminal/legacy/profiles:/:$profile/ background-color 'rgb(0,0,0)'
dbus-launch gsettings set org.gnome.Terminal.Legacy.Profile:/org/gnome/terminal/legacy/profiles:/:$profile/ font 'Ubuntu Mono 9'
dbus-launch gsettings set org.gnome.Terminal.Legacy.Profile:/org/gnome/terminal/legacy/profiles:/:$profile/ foreground-color 'rgb(170,170,170)'
dbus-launch gsettings set org.gnome.Terminal.Legacy.Profile:/org/gnome/terminal/legacy/profiles:/:$profile/ palette "['rgb(0,0,0)', 'rgb(204,0,0)', 'rgb(78,154,6)', 'rgb(196,160,0)', 'rgb(52,101,164)', 'rgb(117,80,123)', 'rgb(6,152,154)', 'rgb(211,215,207)', 'rgb(85,87,83)', 'rgb(239,41,41)', 'rgb(138,226,52)', 'rgb(252,233,79)', 'rgb(114,159,207)', 'rgb(173,127,168)', 'rgb(52,226,226)', 'rgb(238,238,236)']"
dbus-launch gsettings set org.gnome.Terminal.Legacy.Profile:/org/gnome/terminal/legacy/profiles:/:$profile/ scroll-on-output false
dbus-launch gsettings set org.gnome.Terminal.Legacy.Profile:/org/gnome/terminal/legacy/profiles:/:$profile/ use-system-font false
dbus-launch gsettings set org.gnome.Terminal.Legacy.Profile:/org/gnome/terminal/legacy/profiles:/:$profile/ use-theme-colors false
#dbus-launch gsettings set org.gnome.Terminal.Legacy.Profile:/org/gnome/terminal/legacy/profiles:/:$profile/ use-theme-transparency false
## use dconf
#REPO_ROOT_DIR=$(builtin cd $(dirname "${BASH_SOURCE[0]:-$0}")/.. && pwd)
# backup
#dbus-launch dconf dump /org/gnome/terminal/ > gnome-terminal.conf
# restore
#dbus-launch dconf load /org/gnome/terminal/ < ${REPO_ROOT_DIR}/.i3/app-config/gnome-terminal.conf
| true
|
319fad87ad9da820e7c6120a3c2fd9965f7c7b1f
|
Shell
|
mahesh-km/monitoring-server
|
/Collecting Log/Client_side/collecting-log~
|
UTF-8
| 1,993
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
#script for collecting log from local systems to the server;
#Run in host machines.
LOG_PATH="/mnt/logs"
LOG="/mnt/logs/log-to-server.log"
DATE=`date +%d-%m-%Y`
TIME=`date +%H:%M:%S`
TIME_STAMP=`date +%Y-%m-%d-%H-%M-%S`
#TIME_STAMP=`date +%d%m%Y%m%s`
NAME=`hostname`
RSYNC_OPTIONS="-razv"
DESTINATION=${NAME}/${TIME_STAMP}
RSYNC_DEST="etomer@mmonit.vyoma-media.com:/opt/ddis/logs/${DESTINATION}"
HOST_FOLDER="/opt/ddis/logs/${NAME}"
FLAG_CHECK="/opt/ddis/logs/${NAME}/collect-log"
mkdir -p /mnt/logs/Remote_log
BASE_PATH="/home/vyoma/pads/indavest/vyoma"
echo "Attempt for log collection "${TIME_STAMP} | tee ${LOG}
#checking for [hostname] folder.if not found , will create .
ssh -p 2222 etomer@mmonit.vyoma-media.com -i /root/.ssh/ssh-key 'HOST_FOLDER="'"$HOST_FOLDER"'"; test -d "$HOST_FOLDER" || mkdir -p "$HOST_FOLDER"' | tee -a ${LOG}
#checking for flag file is existing inside the host folder.
ssh -p 2222 etomer@mmonit.vyoma-media.com -i /root/.ssh/ssh-key "ls $FLAG_CHECK"
if [ $? -eq 0 ]
then
for f in /mnt/logs/*.log
do
size=`wc -c < ${f}`
if [ ${size} -gt 1048576 ]
then
echo "Log file found more than 1mb - " ${f} | tee -a ${LOG}
tail -n 1000 ${f} >> ${f}.big | tee -a ${LOG}
mv -fv ${f}.big /mnt/logs/Remote_log | tee -a ${LOG}
#cat /dev/null > ${f}
else
cp -fv ${f} /mnt/logs/Remote_log | tee -a ${LOG}
fi
done
#rsync log file to the host folder with timestamp.
rsync ${RSYNC_OPTIONS} /mnt/logs/Remote_log/* -e 'ssh -p 2222 -i /root/.ssh/ssh-key' ${RSYNC_DEST} | tee -a ${LOG}
#remove the flag file after log collection.
ssh -p 2222 etomer@mmonit.vyoma-media.com -i /root/.ssh/ssh-key rm -rf ${FLAG_CHECK} | tee -a ${LOG}
#clearing log after log collection
rm -rf /mnt/logs/Remote_log/* | tee -a ${LOG}
# rm -rf /mnt/logs/*.big | tee -a ${LOG}
# cat /dev/null > ${BASE_PATH}/logs/audit-screen1.log
# cat /dev/null > ${BASE_PATH}/logs/audit-screen2.log
cat /dev/null > /mnt/logs/system-uptime.log | tee -a ${LOG} #clearing after log collected
else
echo "flag not exists or connection error!" | tee -a ${LOG}
fi
#end
| true
|
e22b3cd84c0c286c439982b4d1c7a3d7c491d90b
|
Shell
|
jhefferson144/MyVBoxWizar
|
/dialog.sh
|
UTF-8
| 204
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
NCORE="dialog --backtitle \"Num CPU\" --radiolist \"Seleccione num cpus:\" 10 40 4"
for i in `seq 1 1 2`; do NCORE="${NCORE} ${i} ${i} off"; done
NCORE=”${NCORE} 2> salida”
Eval ${NCORE}
| true
|
a272c0a10bd808234909ec6c01ae9711bfbdfe88
|
Shell
|
rgl/kubernetes-ubuntu-vagrant
|
/provision-docker.sh
|
UTF-8
| 3,145
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
source /vagrant/lib.sh
# NB execute apt-cache madison docker-ce to known the available versions.
docker_version="${1:-20.10.8}"; shift || true
# prevent apt-get et al from asking questions.
# NB even with this, you'll still get some warnings that you can ignore:
# dpkg-preconfigure: unable to re-open stdin: No such file or directory
export DEBIAN_FRONTEND=noninteractive
# install docker.
# see https://docs.docker.com/engine/installation/linux/docker-ce/ubuntu/#install-using-the-repository
apt-get install -y apt-transport-https software-properties-common
wget -qO- https://download.docker.com/linux/ubuntu/gpg | apt-key add -
add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
apt-get update
docker_version="$(apt-cache madison docker-ce | awk "/$docker_version~/{print \$3}")"
apt-get install -y "docker-ce=$docker_version" "docker-ce-cli=$docker_version" containerd.io
apt-mark hold docker-ce docker-ce-cli
# stop docker and containerd.
systemctl stop docker
systemctl stop containerd
# use the systemd cgroup driver.
# NB by default docker uses the containerd runc runtime.
cgroup_driver='systemd'
# configure containerd.
# see https://kubernetes.io/docs/setup/cri/
cat >/etc/sysctl.d/99-kubernetes-cri.conf <<'EOF'
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF
sysctl --system
containerd config default >/etc/containerd/config.toml
cp -p /etc/containerd/config.toml{,.orig}
if [ "$cgroup_driver" = 'systemd' ]; then
patch -d / -p0 </vagrant/containerd-config.toml.patch
else
patch -d / -R -p0 </vagrant/containerd-config.toml.patch
fi
diff -u /etc/containerd/config.toml{.orig,} || true
systemctl restart containerd
# configure it.
# see https://kubernetes.io/docs/setup/cri/
cat >/etc/docker/daemon.json <<EOF
{
"experimental": false,
"debug": false,
"exec-opts": [
"native.cgroupdriver=$cgroup_driver"
],
"features": {
"buildkit": true
},
"log-driver": "journald",
"labels": [
"os=linux"
],
"hosts": [
"fd://",
"tcp://0.0.0.0:2375"
],
"default-runtime": "runc",
"containerd": "/run/containerd/containerd.sock"
}
EOF
# start docker without any command line flags as its entirely configured from daemon.json.
install -d /etc/systemd/system/docker.service.d
cat >/etc/systemd/system/docker.service.d/override.conf <<'EOF'
[Service]
ExecStart=
ExecStart=/usr/bin/dockerd
EOF
systemctl daemon-reload
systemctl start docker
systemctl cat docker
# validate that docker is using the expected cgroup driver.
docker_cgroup_driver="$(docker info -f '{{.CgroupDriver}}')"
if [ "$docker_cgroup_driver" != "$cgroup_driver" ]; then
echo "ERROR: Cgroup driver MUST be $cgroup_driver, but its $docker_cgroup_driver"
exit 1
fi
# let the vagrant user manage docker.
usermod -aG docker vagrant
# kick the tires.
docker version
docker info
docker network ls
ip link
bridge link
docker run --rm hello-world
docker run --rm alpine cat /etc/resolv.conf
docker run --rm alpine ping -c1 8.8.8.8
| true
|
578bc672abc5fd3d203c42d4626b273dcccc3904
|
Shell
|
flaviosakakibara/hyperledger-fabric
|
/start-kafka.sh
|
UTF-8
| 505
| 2.71875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
echo "Iniciando zookeepers"
for file in $(ls ./dc | grep zookeeper)
do
echo $file
oc create -f ./dc/$file
done
sleep 40
echo "Iniciando kafkas"
for file in $(ls ./dc | grep kafka )
do
echo $file
oc create -f ./dc/$file
done
sleep 25
echo "Iniciando orderers"
for file in $(ls ./dc | grep orderer )
do
echo $file
oc create -f ./dc/$file
done
sleep 25
echo "Iniciando peers"
for file in $(ls ./dc | grep peer )
do
echo $file
oc create -f ./dc/$file
done
| true
|
e21c66a1cf4fd8d7714b9dae22447a35d0e146bf
|
Shell
|
mmgaggle/bench-cephstore
|
/test.sh
|
UTF-8
| 1,694
| 3.921875
| 4
|
[] |
no_license
|
#!/bin/bash
set -o nounset
set -o errexit
DEVICE=$1
DEVICE_BASENAME=$(basename ${DEVICE})
PREFIX=$(date +%d%m%y-%H%M%S)
echo " + Results will be stored at /tmp/${PREFIX}-fio"
mkdir /tmp/${PREFIX}-fio
echo " + Disable disk cache on ${DEVICE}"
hdparm -qW0 ${DEVICE}
echo " + Create disk partitions"
parted --script -- ${DEVICE} mklabel gpt unit MB mkpart primary 1 -0
echo " + Creating XFS filesystem"
mkfs.xfs -qfd su=64k,sw=1 -i size=2048 ${DEVICE}1
echo " + Mount data partition"
mkdir -p /mnt/${DEVICE_BASENAME}1
mount ${DEVICE}1 /mnt/${DEVICE_BASENAME}1
function fio_file {
# need to test for variables
DESCRIPTION=$1
MODE=$2
IOENGINE=$3
BLOCK_SIZE=$4
ITERATIONS=$5
echo ${DESCRIPTION}
echo " + FSync and drop linux page cache"
sync && echo 3 > /proc/sys/vm/drop_caches
echo " + Begin test"
fio --directory /mnt/${DEVICE_BASENAME}1
--name=${DEVICE} \
--direct=1 \
--rw=${MODE} \
--bs=${BLOCK_SIZE} \
--ioengine=${IOENGINE}\
--iodepth=16 \
--numjobs 1 \
--time_based \
--runtime 300 \
--size 1G \
--group_reporting \
| tee -a /tmp/${PREFIX}-fio/${ITERATION}/fio-${MODE}-${IOENGINE}-${BLOCK_SIZE}.log
}
# Single DEVICE benchmark
for iteration in $( seq 1 3 );do
for block_size in 4096 8192 16384 65536 4194304;do
for mode in 'write' 'read' 'rw' 'randwrite' 'randread' 'randrw';do
mkdir /tmp/${PREFIX}-fio/${iteration}
fio_block "Running ${block_size} ${mode} workload against ${DEVICE}" ${mode} "libaio" ${block_size} ${iteration}
done
done
done
echo " -> Running OpenSSL AES CBC benchmarks"
openssl speed aes-128-cbc aes-192-cbc aes-256-cbc
umount /mnt/${DEVICE}1
| true
|
d7f6f68258a74bd367151158dd35cf7884692ec6
|
Shell
|
PsymonLi/sw
|
/nic/apollo/test/scale/run_scale_test_mock.sh
|
UTF-8
| 1,627
| 2.71875
| 3
|
[] |
no_license
|
#! /bin/bash
set -e
export ASIC="${ASIC:-capri}"
export NICDIR=`pwd`
export PDSPKG_TOPDIR=$NICDIR
export NON_PERSISTENT_LOG_DIR=${NICDIR}
export ZMQ_SOC_DIR=${NICDIR}
export ASIC_MOCK_MODE=1
export ASIC_MOCK_MEMORY_MODE=1
export IPC_MOCK_MODE=1
export SKIP_VERIFY=1
export BUILD_DIR=${NICDIR}/build/x86_64/apollo/${ASIC}
export GEN_TEST_RESULTS_DIR=${BUILD_DIR}/gtest_results
export CONFIG_PATH=${NICDIR}/conf
#export GDB='gdb --args'
cfgfile=scale_cfg.json
if [[ "$1" == --cfg ]]; then
cfgfile=$2
fi
export PATH=${PATH}:${BUILD_DIR}/bin
rm -f $NICDIR/conf/pipeline.json
ln -s $NICDIR/conf/apollo/pipeline.json $NICDIR/conf/pipeline.json
apollo_scale_test -c hal.json -i ${NICDIR}/apollo/test/scale/$cfgfile --gtest_output="xml:${GEN_TEST_RESULTS_DIR}/apollo_scale_test.xml" > apollo_scale_test.log
#$GDB apollo_scale_test -c hal.json -i ${NICDIR}/apollo/test/scale/$cfgfile --gtest_output="xml:${GEN_TEST_RESULTS_DIR}/apollo_scale_test.xml"
rm -f $NICDIR/conf/pipeline.json
if [ $? -eq 0 ]
then
rm -f apollo_scale_test.log
else
tail -100 apollo_scale_test.log
fi
#$GDB apollo_scale_test -p p1 -c hal.json -i ${NICDIR}/apollo/test/scale/scale_cfg_p1.json --gtest_output="xml:${GEN_TEST_RESULTS_DIR}/apollo_scale_test.xml"
#$GDB apollo_scale_test -c hal.json -i ${NICDIR}/apollo/test/scale/scale_cfg_v4_only.json --gtest_output="xml:${GEN_TEST_RESULTS_DIR}/apollo_scale_test.xml"
#valgrind --track-origins=yes --leak-check=full --show-leak-kinds=all --gen-suppressions=all --verbose --error-limit=no --log-file=valgrind-out.txt apollo_scale_test -c hal.json -i ${NICDIR}/apollo/test/scale/scale_cfg_v4_only.json
| true
|
67774bd559b6b3b303bd4baa4cb0fd478b3c469d
|
Shell
|
wgoodall01/dotfiles
|
/lib/install_fzf.sh
|
UTF-8
| 432
| 3.46875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
install_fzf(){
printf "[fzf ] installing fzf... "
if [[ ! -d ~/.fzf ]]; then
git clone --depth 1 https://github.com/junegunn/fzf.git ~/.fzf &>>$LOGS/fzf_install \
|| fatal "Failed to download fzf. check \$LOGS/fzf_install"
~/.fzf/install --all --xdg &>>$LOGS/fzf_install \
|| fatal "Failed to install fzf. check \$LOGS/fzf_install"
printf "done.\n"
else
printf "already installed.\n"
fi
}
| true
|
411287c77def18a3ef44976ae30dcca3d8f2e122
|
Shell
|
tday981/general_scripts
|
/sep
|
UTF-8
| 1,087
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/bash
echo -n "Any special separator? (y/n) "
read answer
if [ "$answer" = "y" ]; then
echo -n "What is the field separator? "
read sep
echo -n "Which input file? "
read file
echo -n "Which column number do you want? "
read number
echo -n "Do you want to output to a file? (y/n) "
read out
if [ "$out" = "y" ]; then
echo -n "What's the name of the output file? "
read outfile
awk -F $sep '{ print $'$number' }' $file > $outfile
else
awk -F $sep '{ print $'$number' }' $file
fi
elif [ "$answer" = "n" ]; then
echo -n "Which file? "
read file
echo -n "Which column number? "
read number
echo -n "Do you want to output to a file? (y/n) "
read out
if [ "$out" = "y" ]; then
echo -n "What's the name of the output file? "
read outfile
awk '{ print $'$number' }' $file > temp.txt
sort temp.txt > $outfile
rm -f temp.txt
else
awk '{ print $'$number' }' $file
fi
else
echo "Please enter yes or no!"
fi
#echo 'awk -F '$sep' '{ print $'$number' }' '$file''
#awk -F $sep '{ print $'$number' }' $file
| true
|
42088fd6a91c51fdf152f10c5554d8973a685cc9
|
Shell
|
thomas-barthelemy/docker-symfony-app
|
/php70/app/init
|
UTF-8
| 1,217
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
script_dir=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
application_dir="/var/app"
parameters_file="${application_dir}/app/config/parameters.yml"
secret=`apg -a 1 -M nl -n 1 -m 40 -E ghijklmnopqrstuvwxyz`
: ${database_host:=${DB_PORT_5432_TCP_ADDR}}
: ${database_port:=${DB_PORT_5432_TCP_PORT}}
: ${database_user:=postgres}
: ${database_password:=postgres}
## Updating parameters.yml
sed -i "s/^\\( *database_host:\\).*/\\1 ${database_host}/" "$parameters_file"
RETVAL=$?
if [ $RETVAL -ne 0 ]; then
exit $RETVAL
fi
sed -i "s/^\\( *database_port:\\).*/\\1 ${database_port}/" "$parameters_file"
RETVAL=$?
if [ $RETVAL -ne 0 ]; then
exit $RETVAL
fi
sed -i "s/^\\( *database_user:\\).*/\\1 ${database_user}/" "$parameters_file"
RETVAL=$?
if [ $RETVAL -ne 0 ]; then
exit $RETVAL
fi
sed -i "s/^\\( *database_password:\\).*/\\1 ${database_password}/" "$parameters_file"
RETVAL=$?
if [ $RETVAL -ne 0 ]; then
exit $RETVAL
fi
sed -i "s/^\\( *secret:\\).*/\\1 ${secret}/" "$parameters_file"
RETVAL=$?
if [ $RETVAL -ne 0 ]; then
exit $RETVAL
fi
## Checking app_dev
"$script_dir/app-make-app-accessible" "$application_dir"
RETVAL=$?
if [ $RETVAL -ne 0 ]; then
exit $RETVAL
fi
| true
|
5697e335609e94cf7ab1891631a2896c14b44aa6
|
Shell
|
lifeonmarspt/golds-tools
|
/bin/spreadsheets-jwt
|
UTF-8
| 832
| 3
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
PKEY="$(pass ${LIFEONMARS_PASSWORD_STORE_DIR}all/golds/spreadsheets-api | jq -Sjc .private_key)"
ISSUER="$(pass ${LIFEONMARS_PASSWORD_STORE_DIR}all/golds/spreadsheets-api | jq -Sjc .client_email)"
NOW="$(date +"%s")"
EXP=$(( $NOW + 3600 ))
HEADER="$(
jq -Sjcn '{"alg":"RS256","typ":"JWT"}' |
base64 |
tr -d "\n"
)"
CLAIMS="$(
jq -Sjcn \
--arg exp "$EXP" \
--arg now "$NOW" \
--arg iss "$ISSUER" \
'{
"scope": "https://www.googleapis.com/auth/spreadsheets.readonly",
"aud": "https://www.googleapis.com/oauth2/v4/token",
"iss": $iss,
"exp": $exp,
"iat": $now
}' |
base64 |
tr -d "\n"
)"
SIGNATURE="$(
echo -n "$HEADER.$CLAIMS" |
openssl dgst -sha256 -sign <(echo "$PKEY") |
base64 |
tr -d "\n"
)"
echo "$HEADER.$CLAIMS.$SIGNATURE"
| true
|
dc2179d3fe28d1177e047d828f43f430c3227f77
|
Shell
|
dnkennedy/CloudComputeExample
|
/abcd-test_aws.sh
|
UTF-8
| 2,055
| 4.03125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
echo "abcd-test_aws.sh: Running dcanlabs/abcd-hcp-pipeline in test mode container on ABCD S3 BIDS case on aws"
# This is the main script, that lives on the aws instance itself, that, given
# the S3 path to an anatomic case, manages its fetch, kwyk run, and 'post' to
# the ReproNim S3 results location
# At the moment it is expecting to run via the AWS-RunShellScript System
# Management functions, as user = root
# Check usage, 1 argument expected.
if [ $# -ne 1 ]; then
echo "Illegal number of parameters provided"
echo "Expected usage: abcd-test_aws.sh Output_Basename"
echo "I would terminate"
exit 10
fi
basenam=$1
bucket=abcd-test/output
localdir=abcd-test
# We are using aws 'profile' for credential management.
# We expect the .aws/configuration file to be pushed from your local system
# move creds from user ubuntu to root
#cp ~ubuntu/.aws/credentials /root/.aws/credentials
# Clear Prior BIDS directory, if present...
if [ -d ~ubuntu/BIDS ] ; then
echo "BIDS Directory exists, removing it"
rm -r ~ubuntu/BIDS
fi
# Fetch Case
echo "ABCD Fetching BIDS"
python3 ~ubuntu/nda-abcd-s3-downloader/download.py -o ~ubuntu/BIDS \
-s ~ubuntu/$localdir/subj.txt \
-i ~ubuntu/$localdir/datastructure_manifest.txt \
-l ~ubuntu/nda-abcd-s3-downloader/log/ \
-d ~ubuntu/$localdir/subsets.txt
# Prepare output directory
if [ -d ~ubuntu/DCAN ] ; then
echo "DCAN Directory exists, removing it"
rm -r sudo ~ubuntu/DCAN
fi
mkdir ~ubuntu/DCAN
# Run Container
docker run --rm -v /home/ubuntu/BIDS:/bids_input:ro \
-v /home/ubuntu/DCAN:/output -v /home/ubuntu/$localdir/license.txt:/license \
dcanlabs/abcd-hcp-pipeline /bids_input /output \
--freesurfer-license=/license --print-commands-only >>\
/home/ubuntu/DCAN/log
# Transfer data out
echo "Copying result to s3://abcd_test/output/$basenam"
aws s3 cp ~ubuntu/DCAN s3://${bucket}/$basenam --recursive --profile reprodnk
# cleanup original tmp
sudo rm -r ~ubuntu/DCAN
sudo rm -r ~ubuntu/BIDS
# the end
echo "Done, thanks!"
#echo "Terminating"
#poweroff
exit
| true
|
b9ffb157baf17e3d729a90d32414ec347a5050f0
|
Shell
|
tapaswenipathak/linux-kernel-stats
|
/scripts/Extended Scripts/spinlock_log.sh
|
UTF-8
| 1,176
| 3.296875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Displays all git logs for spinlock and related keywords
# Contributor: kavita23meena.2002@gmail.com
SRCDIR=~/linux-stable/linux-stable
cd $SRCDIR
spinlock_keywords=("spinlock"
"spinlock_t"
"rwlock_t"
"ticket_spin_lock"
"raw_spinlock_t"
"qspinlock"
"ticket_spin_trylock"
"ticket_spin_unlock"
"read_lock"
"ticket_spin_is_locked"
"read_trylock"
"read_unlock"
"write_lock"
"write_trylock"
"write_unlock"
"raw_spin_lock"
"raw_spin_trylock"
"raw_spin_unlock"
"raw_spin_lock_irq"
"raw_spin_lock_irqsave"
"raw_spin_unlock_irq"
"raw_spin_unlock_irqrestore"
"spin_lock"
"spin_trylock"
"spin_unlock"
"spin_lock_irq"
"spin_lock_irqsave"
"spin_unlock_irq"
"spin_unlock_irqrestore"
)
for ((i=3; i<=6; i++)); do
git checkout -fq v$i.0
for k in ${spinlock_keywords[@]}; do
git log --all --grep=$k
done
done
# Extend version
ver_name1="v5.17.1"
git checkout ${ver_name1}
for k in ${spinlock_keywords[@]}; do
git log --all --grep=$k
done
ver_name2="v5.19.15"
git checkout ${ver_name2}
for k in ${spinlock_keywords[@]}; do
git log --all --grep=$k
done
| true
|
6cedca8c9c5a030009e93f1e062079f02135383a
|
Shell
|
chrisfu/dotfiles
|
/bootstrap.sh
|
UTF-8
| 197
| 2.828125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Install zgen
if [ ! -f ~/.zgen ]
then
git clone https://github.com/tarjoilija/zgen.git ~/.zgen
fi
# Setup dotfile symlinks
cd stow
for app in */
do
stow -t ~/ $app
done
| true
|
3f7c0b4bffda96dca488b87de9263eba9b862a99
|
Shell
|
thuanle123/cmps101-pt.s19.grading
|
/pa1/pa1-perf-check.sh
|
UTF-8
| 1,746
| 3.609375
| 4
|
[] |
no_license
|
#!/usr/bin/bash
SRCDIR=https://raw.githubusercontent.com/legendddhgf/cmps101-pt.s18.grading/master/pa1
NUMTESTS=3
PNTSPERTEST=5
let MAXPTS=$NUMTESTS*$PNTSPERTEST
if [ ! -e backup ]; then
echo "WARNING: a backup has been created for you in the \"backup\" folder"
mkdir backup
fi
cp *.java Makefile backup # copy all files of importance into backup
for NUM in $(seq 1 $NUMTESTS); do
curl $SRCDIR/infile$NUM.txt > infile$NUM.txt
curl $SRCDIR/model-outfile$NUM.txt > model-outfile$NUM.txt
done
curl $SRCDIR/ModelListTest.java > ModelListTest.java
rm -f *.class
javac -Xlint Lex.java List.java
echo "Main-class: Lex" > Manifest
jar cvfm Lex Manifest *.class
rm Manifest
chmod +x Lex
echo ""
echo ""
lextestspassed=$(expr 0)
echo "Please be warned that the following tests discard all output to stdout/stderr"
echo "Lex tests: If nothing between '=' signs, then test is passed"
echo "Press enter to continue"
read verbose
for NUM in $(seq 1 $NUMTESTS); do
rm -f outfile$NUM.txt
timeout 5 Lex infile$NUM.txt outfile$NUM.txt &> garbage >> garbage
diff -bBwu outfile$NUM.txt model-outfile$NUM.txt &> diff$NUM.txt >> diff$NUM.txt
echo "Test $NUM:"
echo "=========="
cat diff$NUM.txt
echo "=========="
if [ -e diff$NUM.txt ] && [[ ! -s diff$NUM.txt ]]; then
let lextestspassed+=1
fi
done
echo ""
echo ""
let lextestpoints=5*lextestspassed
echo "Passed $lextestspassed / $NUMTESTS Lex tests"
echo "This gives a total of $lextestpoints / $MAXPTS points"
echo ""
echo ""
echo ""
echo "Press Enter To Continue with ListTest Results"
read verbose
javac ModelListTest.java List.java
timeout 5 java ModelListTest -v > ListTest-out.txt &>> ListTest-out.txt
cat ListTest-out.txt
rm *.class ModelListTest.java garbage
| true
|
3f9fca3f27433366abdf9cc995ff055745e5a683
|
Shell
|
salemove/xmpp-visitor-app
|
/cobrowse.sh
|
UTF-8
| 586
| 3.234375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
if [ "$#" -ne 3 ]; then
echo "Usage: $0 UserJID Password OperatorJID"
exit 1
fi
echo "Starting cobrowsing session for you in 2 seconds"
sleep 2
CWD=`pwd`
SOCK_PATH=/tmp/tmate.sock
# Start tmate session
tmate -S $SOCK_PATH new-session -d
tmate -S $SOCK_PATH wait tmate-ready
COBROWSING_LINK=`tmate -S $SOCK_PATH display -p '#{tmate_web}'`
tmate -S $SOCK_PATH send-keys "cd $CWD; ./chat/cli.rb $* $COBROWSING_LINK" Enter
tmate -S $SOCK_PATH split-window "/bin/bash -l"
tmate -S $SOCK_PATH select-layout main-vertical
tmate -S $SOCK_PATH attach-session
| true
|
998e8409e9b9e07afdf000717106f37b65cbb583
|
Shell
|
thirdwing/Rtools
|
/gcc463/scripts/buildcrossfromnative.sh
|
UTF-8
| 1,185
| 3.40625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
# common settings
echo "Executing preliminary common steps"
export BUILD_CROSS_FROM_NATIVE="true"
. ./scripts/common.sh || exit 1
# Projects to be built, in the right order
PREGCC_STEPS="mingw-w64-headers binutils gmp mpfr mpc"
POSTGCC_STEPS="cleanup zipping"
cd $BUILD_DIR
mkdir -p $PREGCC_STEPS
mkdir -p mingw-w64-crt
mkdir -p winpthreads
#mkdir -p gcc-posix
mkdir -p $POSTGCC_STEPS
cd $TOP_DIR
# Build
MAKE_OPTS='-j8'
# prepare for GCC
for step in $PREGCC_STEPS
do
echo "-> $step"
cd $BUILD_DIR/$step
. $SCRIPTS/$step.sh || exit 1
done
# point PATH to new tools
export PATH=$PREFIX/bin:$PATH
# build GCC C compiler
echo "-> GCC: C compiler"
cd $BUILD_DIR/gcc
. $SCRIPTS/gcc-c.sh || exit 1
# build mingw-w64 crt
echo "-> MinGW-w64 CRT"
cd $BUILD_DIR/mingw-w64-crt
. $SCRIPTS/mingw-w64-crt.sh || exit 1
# build winpthreads
echo "-> Winpthreads"
cd $BUILD_DIR/winpthreads
. $SCRIPTS/winpthreads.sh || exit 1
# build the rest of GCC
echo "-> GCC: Full compiler suite"
cd $BUILD_DIR/gcc
. $SCRIPTS/gcc.sh || exit 1
# build the rest
for step in $POSTGCC_STEPS
do
echo "-> $step"
cd $BUILD_DIR/$step
. $SCRIPTS/$step.sh || exit 1
done
| true
|
e0c83ddbe7a65ecd1487d0fc3383f35e1c9cc676
|
Shell
|
cncf/devstats
|
/devel/drop_psql_db.sh
|
UTF-8
| 291
| 3.1875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
if [ -z "$1" ]
then
echo "$0: you need to provide db name"
exit 1
fi
echo "Dropping $1"
./devel/db.sh psql postgres -c "select pg_terminate_backend(pid) from pg_stat_activity where datname = '$1'"
./devel/db.sh psql postgres -c "drop database if exists $1"
echo "Dropped $1"
| true
|
a592f5c16d2c4e0bfc76943d6600e9f68082700a
|
Shell
|
philipz/weave
|
/build/build.sh
|
UTF-8
| 1,617
| 4.28125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
set -e
GOPATH=/home/go
export GOPATH
WEAVE_SRC=$GOPATH/src/github.com/weaveworks/weave
if [ $# -eq 0 -o "$1" = "tests" ] ; then
# No arguments. Expect that the weave repo will be bind-mounted
# into $GOPATH
if ! [ -e $WEAVE_SRC ] ; then
cat 2>&1 <<EOF
No container arguments supplied, and nothing at ${WEAVE_SRC}. Please
either bind-mount the golang workspace containing weave with the
docker run -v option, e.g.:
$ docker run -v <host gopath>:${GOPATH} \\
-v /var/run/docker.sock:/var/run/docker.sock weaveworks/weave-build
Or supply git clone arguments to retrieve it, e.g.:
$ docker run -v /var/run/docker.sock:/var/run/docker.sock \\
weaveworks/weave-build https://github.com/weaveworks/weave.git
EOF
exit 1
fi
# If we run make directly, any files created on the bind mount
# will have awkward ownership. So we switch to a user with the
# same user and group IDs as source directory. We have to set a
# few things up so that sudo works without complaining later on.
uid=$(stat --format="%u" $WEAVE_SRC)
gid=$(stat --format="%g" $WEAVE_SRC)
echo "weave:x:$uid:$gid::$WEAVE_SRC:/bin/sh" >>/etc/passwd
echo "weave:*:::::::" >>/etc/shadow
echo "weave ALL=(ALL) NOPASSWD: ALL" >>/etc/sudoers
if [ "$1" = "tests" ] ; then
su weave -c "PATH=$PATH make -C $WEAVE_SRC tests"
else
su weave -c "PATH=$PATH make -C $WEAVE_SRC build"
fi
else
# There are arguments to pass to git-clone
mkdir -p ${WEAVE_SRC%/*}
git clone "$@" $WEAVE_SRC
make -C $WEAVE_SRC build
fi
| true
|
8f3b9c9e503629b122d82f988d935495946604a8
|
Shell
|
larueli/wordpress-nonroot
|
/deploy.sh
|
UTF-8
| 151
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
if [ ! -f /var/www/html/index.php ]; then
cd /tmp
unzip wordpress-${WORDPRESS_VERSION}.zip
mv -f wordpress/* /var/www/html/
fi
| true
|
c47cc1176a9921ba3067d9420fe360d612f3b04b
|
Shell
|
DESY-CMS-SUS/cmg-cmssw
|
/CMGTools/TTHAnalysis/python/plotter/susy-1lep/make_binned_plots.sh
|
UTF-8
| 10,690
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
if [[ "$1" == "SingleLepAFS" ]]; then
shift # shift register
T="/afs/cern.ch/work/k/kirschen/public/PlotExampleSamples/V3";
FT="/afs/cern.ch/work/k/kirschen/public/PlotExampleSamples/PHYS14_V3_FriendsRefinedIds"
J=4;
elif [[ "$HOSTNAME" == *"lxplus"* ]] ; then
T="/afs/cern.ch/work/k/kirschen/public/PlotExampleSamples/V3";
FT="/afs/cern.ch/work/a/alobanov/public/SUSY/CMG/CMGtuples/FriendTrees/phys14_v3_btagCSVv2"
J=4;
elif [[ "$1" == "DESYV3" ]] ; then
shift # shift register
T="/nfs/dust/cms/group/susy-desy/Run2/MC/CMGtuples/Phys14_v3/ForCMGplot";
FT="/nfs/dust/cms/group/susy-desy/Run2/MC/CMGtuples/Phys14_v3/Phys14_V3_Friend_CSVbtag"
J=8;
elif [[ "$HOSTNAME" == *"naf"* ]] ; then
T="/nfs/dust/cms/group/susy-desy/Run2/MC/CMGtuples/Phys14_v3/ForCMGplot";
FT="/nfs/dust/cms/group/susy-desy/Run2/MC/CMGtuples/Phys14_v3/Phys14_V3_Friend_CSVbtag"
J=8;
else
echo "Didn't specify location!"
echo "Usage: ./susy-1lep/make_binned_plots.sh location analysis STValue"
echo "e.g. ./susy-1lep/make_binned_plots.sh SingleLepAFS 1l-makeBinnedPlots STInc
"
exit 0
fi
LUMI=3.0
OUTDIR="susy_cards_1l_4fb_test"
OPTIONS=" -P $T -j $J -l $LUMI -f --s2v --tree treeProducerSusySingleLepton --print-dir $OUTDIR --noStackSig --showIndivSigShapes --legendWidth 0.3 --lspam \"PHYS14\" --print png"
# Get current plotter dir
#PLOTDIR="$CMSSW_BASE/src/CMGTools/TTHAnalysis/python/plotter/"
PLOTDIR=$(pwd -P)
PLOTDIR=${PLOTDIR/plotter/plotterX}
PLOTDIR=$(echo $PLOTDIR | cut -d 'X' -f 1 )
PLOTDEFINITION="1l_TopnessBasics.txt"
# Append FriendTree dir
OPTIONS=" $OPTIONS -F sf/t $FT/evVarFriend_{cname}.root "
function makeBinnedPlots_1l {
local EXPR=$1; local BINS=$2; local SYSTS=$3; local OUT=$4; local GO=$5
CutFlowCard="1l_CardsFullCutFlow.txt"
# CutFlowCard="2l_CardsFullCutFlow.txt"
EXTRALABEL=""
# b-jet cuts
case $nB in
0B) GO="${GO} -R 1nB 0nB nBJetMedium30==0 "; EXTRALABEL="${EXTRALABEL} nB=0\n" ;;
1B) GO="${GO} -R 1nB 1nB nBJetMedium30==1 "; EXTRALABEL="${EXTRALABEL} nB=1\n" ;;
2B) GO="${GO} -R 1nB 2nB nBJetMedium30==2 "; EXTRALABEL="${EXTRALABEL} nB=2\n" ;;
2Btop) GO="${GO} -R 1nB 2nB nBJetMedium30==2&&Topness>5 "; EXTRALABEL="${EXTRALABEL} nB=2(+topness)\n" ;;
1p) GO="${GO} -R 1nB 1nBp nBJetMedium30>=1 "; EXTRALABEL="${EXTRALABEL} nB#geq1\n" ;;
2p) GO="${GO} -R 1nB 2nBp nBJetMedium30>=2 "; EXTRALABEL="${EXTRALABEL} nB#geq2\n" ;;
3p) GO="${GO} -R 1nB 3nBp nBJetMedium30>=3 "; EXTRALABEL="${EXTRALABEL} nB#geq3\n" ;;
esac;
# ST categories
case $ST in
STInc) GO="${GO} -R st200 st200Inf ST>200 "; EXTRALABEL="${EXTRALABEL} ST>200 GeV\n" ;;
ST0) GO="${GO} -R st200 st200250 ST>200&&ST<250 "; EXTRALABEL="${EXTRALABEL} 200<ST<250 GeV\n" ;;
ST1) GO="${GO} -R st200 st250350 ST>250&&ST<350 "; EXTRALABEL="${EXTRALABEL} 250<ST<350 GeV\n" ;;
ST2) GO="${GO} -R st200 st350450 ST>350&&ST<450 "; EXTRALABEL="${EXTRALABEL} 350<ST<450 GeV\n" ;;
ST3) GO="${GO} -R st200 st450550 ST>450&&ST<550 "; EXTRALABEL="${EXTRALABEL} 450<ST<550 GeV\n" ;;
ST4) GO="${GO} -R st200 st550700 ST>550&&ST<700 "; EXTRALABEL="${EXTRALABEL} 550<ST<700 GeV\n" ;;
ST5) GO="${GO} -R st200 st700Inf ST>700 "; EXTRALABEL="${EXTRALABEL} ST>700 GeV\n" ;;
STDynDP0) GO="${GO} -R st200 st200250 ST>200&&ST<250 -R dp1 dp10 fabs(DeltaPhiLepW)>1.0 "; EXTRALABEL="${EXTRALABEL} 200<ST<250 GeV\n #Delta#phi>1.0\n" ;;
STDynDP1) GO="${GO} -R st200 st250350 ST>250&&ST<350 -R dp1 dp10 fabs(DeltaPhiLepW)>1.0 "; EXTRALABEL="${EXTRALABEL} 250<ST<350 GeV\n #Delta#phi>1.0\n" ;;
STDynDP2) GO="${GO} -R st200 st350450 ST>350&&ST<450 -R dp1 dp075 fabs(DeltaPhiLepW)>0.75 "; EXTRALABEL="${EXTRALABEL} 350<ST<450 GeV\n #Delta#phi>0.75\n" ;;
STDynDP3) GO="${GO} -R st200 st450550 ST>450&&ST<550 -R dp1 dp075 fabs(DeltaPhiLepW)>0.75 "; EXTRALABEL="${EXTRALABEL} 450<ST<550 GeV\n #Delta#phi>0.75\n" ;;
STDynDP4) GO="${GO} -R st200 st550700 ST>550&&ST<700 -R dp1 dp05 fabs(DeltaPhiLepW)>0.5 "; EXTRALABEL="${EXTRALABEL} 550<ST<700 GeV\n #Delta#phi>0.5\n" ;;
STDynDP5) GO="${GO} -R st200 st700Inf ST>700 -R dp1 dp05 fabs(DeltaPhiLepW)>0.5 "; EXTRALABEL="${EXTRALABEL} ST>700 GeV\n #Delta#phi>0.5\n" ;;
esac;
if [[ "$CutFlowCard" == "2l_CardsFullCutFlow.txt" ]]; then
case $nJ in
23j) GO="${GO} -R geq6j 23j nCentralJet30>=1&&nCentralJet30<=2"; EXTRALABEL="${EXTRALABEL} 2-3 jets\n" ;;
45j) GO="${GO} -R geq6j 45j nCentralJet30>=3&&nCentralJet30<=4"; EXTRALABEL="${EXTRALABEL} 4-5 jets\n" ;;
68j) GO="${GO} -R geq6j 67j nCentralJet30>=5&&nCentralJet30<=7"; EXTRALABEL="${EXTRALABEL} 6-8 jets\n" ;;
6Infj) GO="${GO} -R geq6j geq6j nCentralJet30>=5"; EXTRALABEL="${EXTRALABEL} #geq6 jets\n" ;;
9Infj) GO="${GO} -R geq6j geq8j nCentralJet30>=8"; EXTRALABEL="${EXTRALABEL} #geq9 jets\n" ;;
esac;
else
case $nJ in
23j) GO="${GO} -R geq6j 23j nCentralJet30>=2&&nCentralJet30<=3"; EXTRALABEL="${EXTRALABEL} 2-3 jets\n" ;;
45j) GO="${GO} -R geq6j 45j nCentralJet30>=4&&nCentralJet30<=5"; EXTRALABEL="${EXTRALABEL} 4-5 jets\n" ;;
68j) GO="${GO} -R geq6j 67j nCentralJet30>=6&&nCentralJet30<=8"; EXTRALABEL="${EXTRALABEL} 6-8 jets\n" ;;
6Infj) GO="${GO} -R geq6j geq6j nCentralJet30>=6"; EXTRALABEL="${EXTRALABEL} #geq6 jets\n" ;;
9Infj) GO="${GO} -R geq6j geq8j nCentralJet30>=9"; EXTRALABEL="${EXTRALABEL} #geq9 jets\n" ;;
68TTj) GO="${GO} -R geq6j 68TTj nCentralJet30+2*nHighPtTopTagPlusTau23>=6&&nCentralJet30+2*nHighPtTopTagPlusTau23<9"; EXTRALABEL="${EXTRALABEL} 6-8 TT enh. jets\n" ;;
9InfTTj) GO="${GO} -R geq6j 9InfTTj nCentralJet30+2*nHighPtTopTagPlusTau23>=9"; EXTRALABEL="${EXTRALABEL} #geq9 TT enh. jets\n" ;;
esac;
fi
# jet multiplicities
# HT and "R&D" categories
case $HT in
HTInc) GO="${GO} -R ht500 ht500Inf HT>500"; EXTRALABEL="${EXTRALABEL} HT>500 GeV\n" ;;
HT0) GO="${GO} -R ht500 ht500750 HT>500&&HT<=750"; EXTRALABEL="${EXTRALABEL} 500<HT<750 GeV\n" ;;
HT1) GO="${GO} -R ht500 ht7501250 HT>750&&HT<=1250"; EXTRALABEL="${EXTRALABEL} 750<HT<1250 GeV\n" ;;
HT2) GO="${GO} -R ht500 ht1250Inf HT>1250"; EXTRALABEL="${EXTRALABEL} HT>1250 GeV\n" ;;
esac;
# "R&D" categories
case $RD in
Def) GO="${GO} "; EXTRALABEL="${EXTRALABEL} baseline\n" ;;
DPhi10) GO="${GO} "; EXTRALABEL="${EXTRALABEL} #Delta#phi>1.0\n" ;;
DPhi075) GO="${GO} -R dp1 dp075 fabs(DeltaPhiLepW)>0.75 "; EXTRALABEL="${EXTRALABEL} #Delta#phi>0.75\n" ;;
DPhi05) GO="${GO} -R dp1 dp05 fabs(DeltaPhiLepW)>0.5 "; EXTRALABEL="${EXTRALABEL} #Delta#phi>0.5\n" ;;
DPhi00) GO="${GO} -R dp1 dp00 fabs(DeltaPhiLepW)>0.0 "; EXTRALABEL="${EXTRALABEL} #Delta#phi>0.0\n" ;;
Stop) GO="${GO} -R dp1 dp05 fabs(DeltaPhiLepW)>0.5 -A dp1 stopness (TopVarsMETovTopMin[0]-0.5)/0.5+(TopVarsMtopMin[0]-175)/175>1.25"; EXTRALABEL="${EXTRALABEL} #Delta#phi>0.5+STop\n" ;;
Top) GO="${GO} -R dp1 dp05 fabs(DeltaPhiLepW)>0.5 -A dp1 stopness (TopVarsMETovTopMin[0]-0.5)/0.5+(TopVarsMtopMin[0]-175)/175>1.25&&Topness>5"; EXTRALABEL="${EXTRALABEL} #Delta#phi>0.5+STop+Top\n" ;;
LowLepPtStop) GO="${GO} -R 1tl 1tllowpt nTightLeps==1&&LepGood1_pt<=25 -R dp1 dp00 fabs(DeltaPhiLepW)>0.0 -A dp1 stopness (TopVarsMETovTopMin[0]-0.5)/0.5+(TopVarsMtopMin[0]-175)/175>1.25"; EXTRALABEL="${EXTRALABEL} soft lept.\n#Delta#phi>0.0+STop\n" ;;
LowLepPtTop) GO="${GO} -R 1tl 1tllowpt nTightLeps==1&&LepGood1_pt<=25 -R dp1 dp00 fabs(DeltaPhiLepW)>0.0 -A dp1 stopness (TopVarsMETovTopMin[0]-0.5)/0.5+(TopVarsMtopMin[0]-175)/175>1.25&&Topness>5"; EXTRALABEL="${EXTRALABEL} soft lept.\n#Delta#phi>0.0+STop+Top\n" ;;
HTLowLepPtDPhi) GO="${GO} -R 1tl 1tllowpt nTightLeps==1&&LepGood1_pt<=25"; EXTRALABEL="${EXTRALABEL} soft lept.\n#Delta#phi>1.0\n" ;;
TTYes) GO="${GO} -A dp1 TopTag nHighPtTopTagPlusTau23>=1"; EXTRALABEL="${EXTRALABEL} TopTag\n" ;;
TTNo) GO="${GO} -A dp1 TopTag nHighPtTopTagPlusTau23==0"; EXTRALABEL="${EXTRALABEL} no TopTag\n" ;;
esac;
echo $EXTRALABEL
if [[ "$PRETEND" == "1" ]]; then
echo "making plots using python $PLOTDIR/mcPlots.py $PLOTDIR/mca-Phys14_1l.txt $PLOTDIR/susy-1lep/$CutFlowCard $PLOTDIR/susy-1lep/$PLOTDEFINITION -o $OUTDIR/$OUT $GO --extraLabel \"$EXTRALABEL\";"
else
echo "python $PLOTDIR/mcPlots.py $PLOTDIR/mca-Phys14_1l.txt $PLOTDIR/susy-1lep/$CutFlowCard $PLOTDIR/susy-1lep/$PLOTDEFINITION -o $OUTDIR/$OUT $GO --extraLabel \"$EXTRALABEL\";"
python $PLOTDIR/mcPlots.py $PLOTDIR/mca-Phys14_1l.txt $PLOTDIR/susy-1lep/$CutFlowCard $PLOTDIR/susy-1lep/$PLOTDEFINITION -o $OUT $GO --extraLabel "$EXTRALABEL";
echo " -- done at $(date)";
fi;
}
if [[ "$1" == "--pretend" ]]; then
PRETEND=1; shift;
echo "# Pretending to run"
fi;
if [[ "$1" == "1l-makeBinnedPlots" ]]; then
SYSTS="syst/susyDummy.txt"
CnC_expr="1" #not used as of now
CnC_bins="[0.5,1.5]"
STValue="$2"
echo "$STValue"
echo "Making individual datacards"
for ST in "$STValue"; do for nJ in 6Infj; do for nB in 1p; do for HT in HT1; do for RD in DPhi00 DPhi05 DPhi075 DPhi10; do
# for ST in "$STValue"; do for nJ in 45j 68j 6Infj 9Infj; do for nB in 1p 1B 2B 3p; do for HT in HT0 HT1; do for RD in DPhi00 DPhi05 DPhi075 DPhi10; do
# for ST in ST0 ST1 ST2 ST3 ST4 STInc; do for nJ in 45j 68j 6Infj 9Infj; do for nB in 1p 1B 2B 3p; do for HT in HT0 HT1; do for RD in DPhi00 DPhi05 DPhi075 DPhi10; do
#for baseline analysis:
# for ST in "$STValue"; do for nJ in 45j 68j 6Infj 9Infj; do for nB in 1p 1B 2B 3p; do for HT in HT0 HT1 HT2; do for RD in Def; do
# for ST in STDynDP0 STDynDP1 STDynDP2 STDynDP3 STDynDP4 STDynDP5; do for nJ in 45j 68j 6Infj 9Infj; do for nB in 1p 1B 2B 3p; do for HT in HT0 HT1 HT2; do for RD in Def; do
#for dphi>0.5, single topness, topness, and soft lepton analysis:
# for ST in "$STValue"; do for nJ in 45j 68j 6Infj 9Infj; do for nB in 1p 1B 2B 3p; do for HT in HT0 HT1 HT2; do for RD in Def; do
# for ST in ST0 ST1 ST2 ST3 ST4 ST5; do for nJ in 45j 68j 6Infj 9Infj; do for nB in 1p 1B 2B 2Btop 3p; do for HT in HT0 HT1 HT2; do for RD in DPhi05 Stop LowLepPtStop; do
echo " --- CnC2015X_${nB}_${ST}_${nJ}_${HT}_${RD} ---"
makeBinnedPlots_1l $CnC_expr $CnC_bins $SYSTS CnC2015X_${nB}_${ST}_${nJ}_${HT}_${RD} "$OPTIONS";
done; done; done; done; done;
# done;
exit 0
fi
echo "Done at $(date)";
| true
|
556766c18daa8f0354f8015e28bde0ab988a158f
|
Shell
|
avodvudStudy/Semestr_5
|
/Git/zadaniaBash/opisKatalogu
|
UTF-8
| 247
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
if [ -d "$1" ]; then
spis=`ls $1`
count=0
echo "nazwa katalogu $1"
for nazwa in $spis
do
if [ -d $nazwa ]; then
./opisKatalogu $nazwa
else
count=$((count+1))
echo $nazwa
fi
done
echo ""
echo $count
echo ""
fi
| true
|
a280fb7360a85a5ae7c0b2b73b3bd1dd4729c3c6
|
Shell
|
itlonewolf/knowledge
|
/script.sh
|
UTF-8
| 1,947
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
##實現顯示指定包名APP的ERROR以上級別的日誌,建議崩潰之後,重新開啓此腳本
show_error_log(){
#先清空屏幕日誌
adb logcat -c
echo 請輸入包名
read package_name
pid=`adb shell ps | grep $package_name | awk '{print $2}'`
adb logcat -v time *:E | grep -E --color=auto $pid
}
start_drozer(){
adb forward tcp:31415 tcp:31415
drozer console connect
}
show_v_log(){
echo 請輸入包名
read package_name
pid=`adb shell ps | grep $package_name | awk '{print $2}'`
#先清空屏幕日誌
adb logcat -c
adb logcat -v time *:V | grep -E --color=auto $pid
}
chmod_file_and_subfile(){
echo 请输入文件夹路径
read file_path
sudo chmod 777 -R $file_path
}
start_uml_util(){
cd /home/yee/tools/SOFTWARE/astah
sh astah
}
#顯示指定標籤的所有級別日誌
show_log_with_tag(){
echo 請輸入tag
read tag
adb logcat -c
adb logcat -v time -s $tag
}
release_install_sixfoot(){
cd /home/yee/dev/git_repo/xingzongex
#/home/yee/tools/sdk此路径需要是sdk所在路径!也就是说需要将此jar包放在sdk目录中,而不能放在随便一个位置
export ANT_OPTS="-javaagent:/home/yee/tools/sdk/nbs.newlens.class.rewriter.jar"
ant clean release install
}
isTingyunEncoded(){
echo 判断听云是否成功嵌入
adb logcat -v time -s NBSAgent:V
}
options=(error debug tag tingyun all drozer ant_install_sixfoot chmod_file uml_util)
echo "显示什么?"
select choice in ${options[*]}; do
break;
done
echo You have selected $choice
case "$choice" in
${options[0]})
show_error_log
;;
${options[1]})
;;
${options[2]})
show_log_with_tag
;;
${options[3]})
isTingyunEncoded
;;
${options[4]})
show_v_log
;;
${options[5]})
start_drozer
;;
${options[6]})
release_install_sixfoot
;;
${options[7]})
chmod_file_and_subfile
;;
${options[8]})
start_uml_util
;;
*)
echo invalid option
;;
esac
# scribes.desktop
| true
|
b4604baa9631cdbffaaa83c830fafbf6d930ecf3
|
Shell
|
jsfr/SteinerExact
|
/experiments/helpers/convert_defs.sh
|
UTF-8
| 592
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/zsh
from=$1
to=$2
for instance in `ls $from`; do
rm -rf $to/$instance
mkdir $to/$instance
for file in `ls $from/$instance`; do
{
echo -n '{\n "nodes": ';
grep 'Nodes' $from/$instance/$file | sed 's/Nodes //' | tr -d '\n'
echo ',\n "points": [';
grep -E "^(D)+ [ \.0-9\-]*$" $from/$instance/$file | \
cut -d " " -f 3- | \
sed -e 's/\(\w\) /\1, /g;s/, $//;s/^.*$/\ [ \0 \],/;$s/,$//';
echo ' ]\n}';
} > $to/$instance/${file/stp/json}
done
done
| true
|
ac2f10249d0ef958fb0357bdf88b8a45c728bfcc
|
Shell
|
davidcawork/Investigacion-Resultados
|
/ssoo_p2_datos/auto_sim.sh
|
UTF-8
| 314
| 3.1875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#Repeticiones a realizar tanto bubble sort, como su version opt
NUM_REPETICIONES=$1
for ((RUN = 1; RUN < $NUM_REPETICIONES + 1; RUN++)); do
echo -e "Ejecutandose la prueba $RUN del bubble sort simple\n\n"
make datos
./practica2_std > ./Resultados/data/data_bubble_sort_opt_$RUN.txt
done
| true
|
11718c9f3abdeb720f9d034953ba134c3ed2606d
|
Shell
|
mdp/docker-tor-hidden-services
|
/start-tor
|
UTF-8
| 854
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
USERNAME="toruser"
USERPATH="/home/toruser"
REGEX='\([0-9]*\)_TCP=tcp://\([0-9]\{1,3\}\.\)\{3\}\([0-9]\)\{1,3\}\:\([0-9]*\)'
MATCH="$(env | grep -o $REGEX | sed -e "s/_TCP=tcp:\/\// /" -e "s/:/ /" | awk '{ printf "HiddenServicePort %s %s:%s\n", $1, $2, $3 }')"
if [ ! -z "$MATCH" ]; then
echo "$MATCH" >> "$USERPATH/torrc"
fi
if [ -f /root/hidden_service.key ]; then
printf "\nUsing a user supplied private key \n\n"
cp /root/hidden_service.key $USERPATH/tor/hidden_service/private_key
chmod 600 $USERPATH/tor/hidden_service/private_key
fi
# Deal with all the permissions issues for tor
chown -R $USERNAME:$USERNAME $USERPATH/tor
chmod 700 $USERPATH/tor/hidden_service
printf "\n--- Using the following torrc config ---\n"
cat $USERPATH/torrc
printf "\n\n -------- \n\n"
su -c "/usr/bin/tor -f $USERPATH/torrc" -m "$USERNAME"
| true
|
b8080310b754916d4f22a92f198fb88be8ac819c
|
Shell
|
karser/EasyQuickImport
|
/docker/bin/copy-env.sh
|
UTF-8
| 2,147
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
APPLICATION=${CI_PROJECT_DIR:-..}
APP_ENV=${APP_ENV:-prod}
ENVIRONMENT=${ENVIRONMENT:-local}
DOCKER_ENV=${DOCKER_ENV:-prod}
COMPOSE_PROJECT_NAME=easyimport
GIT_TAG=${CI_COMMIT_TAG:-$(git describe --tags --exact-match || true)}
GIT_BRANCH=${CI_COMMIT_BRANCH:-$(git rev-parse --abbrev-ref HEAD)}
DATE_ISO=$(date -I'seconds')
VERSION=${GIT_TAG:-$GIT_BRANCH}-${DATE_ISO}
echo "APP_ENV: ${APP_ENV} VERSION: ${VERSION}"
TAG=${CI_COMMIT_REF_SLUG:-latest}
PUID=$(id -u)
PGID=$(id -g)
REGISTRY=${CI_REGISTRY}
REGISTRY_IMAGE=${CI_REGISTRY_IMAGE}
REGISTRY_USER=${CI_REGISTRY_USER}
REGISTRY_PASSWORD=${CI_REGISTRY_PASSWORD}
case "$DOCKER_ENV" in
"prod")
COMPOSE_FILE=docker-compose.prod.yml
;;
"test")
COMPOSE_FILE=docker-compose.test.yml
;;
esac
# docker env file
sed -e" \
s#^DOCKER_ENV=.*#DOCKER_ENV=$DOCKER_ENV#; \
s#APP_ENV=.*#APP_ENV=$APP_ENV#; \
s#ENVIRONMENT=.*#ENVIRONMENT=$ENVIRONMENT#; \
s#APPLICATION=.*#APPLICATION=$APPLICATION#; \
s#PUID=.*#PUID=$PUID#; \
s#PGID=.*#PGID=$PGID#; \
s#REGISTRY=.*#REGISTRY=$REGISTRY#; \
s#REGISTRY_IMAGE=.*#REGISTRY_IMAGE=$REGISTRY_IMAGE#; \
s#REGISTRY_USER=.*#REGISTRY_USER=$REGISTRY_USER#; \
s#REGISTRY_PASSWORD=.*#REGISTRY_PASSWORD=$REGISTRY_PASSWORD#; \
s#TAG=.*#TAG=$TAG#; \
s#COMPOSE_FILE=.*#COMPOSE_FILE=$COMPOSE_FILE#; \
s#COMPOSE_PROJECT_NAME=.*#COMPOSE_PROJECT_NAME=$COMPOSE_PROJECT_NAME#; \
" .env.dist > .env
if [ ! -z "$VIRTUAL_HOST" ] ; then
sed -i " \
s#^VIRTUAL_HOST=.*#VIRTUAL_HOST=$VIRTUAL_HOST#; \
" .env
fi
# app env file
sed -e" \
s#^APP_ENV=.*#APP_ENV=$APP_ENV#; \
s#^VERSION=.*#VERSION=$VERSION#; \
s#COMPOSE_FILE=.*#COMPOSE_FILE=$COMPOSE_FILE#; \
" ${APPLICATION}/.env > .app_env
if [ ! -z "$DATABASE_URL" ] ; then
sed -i " \
s#^DATABASE_URL=.*#DATABASE_URL=$DATABASE_URL#; \
" .app_env
fi
if [ ! -z "$ENVIRONMENT" ] ; then
sed -i " \
s#^DEPLOYMENT=.*#DEPLOYMENT=$ENVIRONMENT#; \
" .app_env
fi
if [ ! -z "$MAILER_DSN" ] ; then
sed -i " \
s#^MAILER_DSN=.*#MAILER_DSN=$MAILER_DSN#; \
" .app_env
fi
| true
|
0c60e227390c65a6746a415d3e0c295b05951fc8
|
Shell
|
nsnycde/docker-arm-mysql
|
/install-mysql.bash
|
UTF-8
| 615
| 3.140625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -e
export DEBIAN_FRONTEND=noninteractive
## Setup the root user password
debconf-set-selections <<< "mysql-server mysql-server/root_password password password"
debconf-set-selections <<< "mysql-server mysql-server/root_password_again password password"
## Update the apt lists
apt-get update
## Install MySQL
apt-get install -y mysql-server-${MYSQL_MAJOR} tzdata
## Clean up any mess
apt-get clean autoclean
apt-get autoremove -y
rm -rf /var/lib/apt/lists/*
## Empty out the default MySQL data directory, its for our entrypoint script
rm -rf /var/lib/mysql
mkdir -p /var/lib/mysql
| true
|
4f00fd0392bc628e563daff7e391f024235fa9e1
|
Shell
|
11uhafnk/quadrube
|
/autobuild.sh
|
UTF-8
| 2,084
| 3.390625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
clear
ERROR_STATE=0
MODULE_NAME=test
ROOT_PATH=$GOPATH/src
# MODULE_BIN_NAME='bin/ldc_search.bin'
#PATH_TO_LIB="../lib"
# PATH_TO_WUI="../wui"
LOG_FILE="logs/run.log"
CNTR_FILE="logs/build_counter.bld"
CUT_LINE_BEGIN="--------------------------->8---------------------------------"
CUT_LINE_END="---------------------------8<---------------------------------"
read BUILD_CNT < $CNTR_FILE
# if [ $? -ne 0 ] ; then
# echo 'First run...'
# BUILD_CNT=0
# echo $BUILD_CNT > $CNTR_FILE
# fi
SERVER=as-nginx1
RUNFILE="quadrube"
RESULTFILE="result.csv"
RUN="./$RUNFILE"
CODE="."
SPY=$(find . -name '*.go')
# SPY="$SPY DCpp-global.json DCpp-local.json"
function remoteRun {
scp $RUNFILE $SERVER: && \
ssh $SERVER $RUN && \
scp $SERVER:$RESULTFILE ./ \
ssh $SERVER rm -f $RUNFILE $RESULTFILE
}
echo spy: $SPY
killall $RUN
cat '' > $LOG_FILE
while true
do
ATIME=`stat -c %Z $SPY 2>/dev/null`
if [[ "$ATIME" != "$LTIME" ]]
then
clear
echo -e "$(tput bold)autobuild: [qonetime].[\E[32m'$MODULE_NAME'\E[39m], please wait...$(tput sgr0)"
echo -e "\E[36m"
echo "Last build number: "$BUILD_CNT
pgrep -f -x $RUN > /dev/null 2>&1
if [ $? -eq 0 ] ; then
pkill -f -x $RUN
killall $RUN
fi
if [ $ERROR_STATE -ne 0 ];
then
echo $CUT_LINE_BEGIN
else
echo $CUT_LINE_BEGIN
echo $(date +%c)' '$MODULE_NAME' build number: '$BUILD_CNT
echo $(date +%c)' '$MODULE_NAME' build number: '$BUILD_CNT >> logs/build.bld
echo $BUILD_CNT > $CNTR_FILE
fi
time go build $CODE
if [ $? -eq 0 ]; then
ERROR_STATE=0
BUILD_CNT=$((BUILD_CNT+1))
echo $CUT_LINE_END
echo -e "\E[0;39m"
if [[ $1 = "-r" ]]; then
remoteRun
else
$RUN &
fi
else
ERROR_STATE=1
fi
LTIME=$ATIME
fi
# echo -e "\E[32m"
# echo 'TICK: '$ATIME' ? '$LTIME
sleep 1
done
| true
|
086fa671db3d7f500622730eda8480820190febb
|
Shell
|
thongdong7/autoimport
|
/bin/publish.sh
|
UTF-8
| 713
| 3.046875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd $DIR/..
# TMP_FOLDER=/tmp/autoimport
# rm -rf $TMP_FOLDER || true
# echo "Copy soure code to another folder"
# cp -R $DIR/.. $TMP_FOLDER
# cd $TMP_FOLDER
lerna clean --yes
echo Bootstrap project...
lerna bootstrap
echo Compile to ES5
cd packages/core
yarn
yarn run compile
cd ../..
echo Publish the npm package...
lerna publish --yes --cd-version patch
echo Publish vscode package...
cd packages/vscode
# Need to install the package from npm as `vsce` does not support linked package.
rm -Rf node_modules/ package-lock.json
npm install --no-optional
rm *.vsix || true
vsce publish
lerna clean --yes
lerna bootstrap
| true
|
43a12415b49e79f448e1bf142cbf314036012735
|
Shell
|
StuPro-TOSCAna/TOSCAna
|
/utils/jenkins/deploy-to-local-docker.sh
|
UTF-8
| 1,421
| 3.75
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# IMPORTANT
# This script is meant to be executed after the Build process has finished.
# It builds a minimal docker image to deploy the application on the local docker daemon
# (application will be running on port 9001)
# Make the Working Directory
echo "Creating Working Directory"
mkdir server/target/docker_deploy
# Copying Dockerfile to working Directory
echo "Copying Dockerfile"
cp utils/jenkins/Dockerfile server/target/docker_deploy
cp docker/alpine-dind/toscana-dind-entrypoint.sh server/target/docker_deploy
cp docker/alpine-dind/install-deps.sh server/target/docker_deploy
cp docker/alpine-dind/cleanup.sh server/target/docker_deploy
# Copying server.jar in working Directory
echo "Copying server.jar"
cp server/target/server-1.0-SNAPSHOT.jar server/target/docker_deploy/server.jar
echo "Navigating into Working Directory"
cd server/target/docker_deploy
echo "Stopping old container (if running)"
docker stop toscana || true
echo "Deleting Container"
docker rm toscana || true
echo "Deleting Docker image (if present)"
docker rmi toscana/toscana:alpine-build || true
echo "Building Docker image"
docker build . -t toscana/toscana:alpine-build
echo "Running Docker image"
docker run -d -p 127.0.0.1:9001:8080 --privileged \
-v toscana_data:/toscana/data --restart=unless-stopped \
--name=toscana toscana/toscana:alpine-build
cd ..
echo "Removing Working Directory"
rm -r docker_deploy
| true
|
94880a390ba0e4e5b4d09cef5ac77125cd99d217
|
Shell
|
milliQan-sw/milliqanOffline
|
/Run3Detector/compile.sh
|
UTF-8
| 1,056
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
if [ $# != 1 ]
then echo "Please provide exactly one argument, the desired macro name."
exit
fi
NAME=$1
SHORT_TAG=`git describe --tag --abbrev=0`
LONG_TAG=`git describe --tags --long`
#echo $SHORT_TAG
echo "milliqanOffline version $LONG_TAG"
sed "s/shorttagplaceholder/$SHORT_TAG/g" src/OfflineFactory.cc > src/OfflineFactory_temporary_for_compile.cc
sed -i "s/longtagplaceholder/$LONG_TAG/g" src/OfflineFactory_temporary_for_compile.cc
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
source ${SCRIPT_DIR}/setup.sh
g++ -o $NAME src/runOfflineFactory.cc ./src/jsoncpp.cpp ./src/OfflineFactory_temporary_for_compile.cc ${MILLIDAQDIR}/src/ConfigurationReader.cc ${MILLIDAQDIR}/libMilliDAQ.so -lpython2.7 `root-config --cflags --glibs` -Wno-narrowing -I$SCRIPT_DIR -I$MILLIDAQDIR -I$ROOT_INCLUDE_PATH # same as above but with correct local file path
if [ $? -eq 0 ]; then
echo "Compiled macro $NAME"
else
echo "FAILED to compile macro $NAME"
fi
rm ./src/OfflineFactory_temporary_for_compile.cc
| true
|
5101be7f7c206b40d672c1e683338f29196f922d
|
Shell
|
bhavani125/Assignment-problems
|
/class/salary20days.sh
|
UTF-8
| 346
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash -x
isfulltime=1
isparttime=2
empWorkperHr=100
for ((day=0;day<20;day++))
do
randomnumber=$((RANDOM%3))
if [ $randomnumber -eq $isfulltime ]
then
workHrs=8
elif [ $randomnumber -eq $isparttime ]
then
workHrs=4
else
workHrs=0
fi
done
totalsalary=$(($empWorkperHr*$workHrs))
echo "employee salary:-"$totalsalary
| true
|
1065649aefc711d3ac7af6485f39fec36d96b76c
|
Shell
|
tro3373/dotfiles
|
/bin/url_encode
|
UTF-8
| 277
| 3.1875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
main() {
if [[ $# -eq 0 && ! -p /dev/stdin ]]; then
echo "Speciry url" 1>&2
exit 1
fi
if [[ -p /dev/stdin ]]; then
cat -
else
echo "$@"
fi | curl -Gso /dev/null -w '%{url_effective}' --data-urlencode @- "" | cut -c 3-
}
main "$@"
| true
|
292d704d400495679fc0c37c7e9f622e2aa2596d
|
Shell
|
kristianmandrup/ruby-rails-dev-bashing
|
/generator_helpers.sh
|
UTF-8
| 639
| 3.15625
| 3
|
[] |
no_license
|
function rails3_generator {
generator_name=$1
gem_name=gem_$generator_name
shift 1
script/generate generator $generator_name
cd lib/generators
# create jewel
jewel_build $gem_name
cd lib
# remove default rb file
# create directory structure for generator
rm *.rb
mkdir generators
# go to top dir
cd ../..
# # move generator into jewel
mv $generator_name $gem_name/lib/generators
echo "Set in Rakefile: gem.files=Dir['lib/**/*.rb']"
echo "Add template files to /templates dir"
echo "When done: $ jewel_install"
echo "Then add: gem '$name' to Gemfile and then to install"
echo "$ gem bundle"
mate $gem_name
}
| true
|
897e77a2ceb05ac86af75550e7b69b66bf6f7e23
|
Shell
|
thm-projects/arsnova-router
|
/src/nuc/pipeline/download_click.sh
|
UTF-8
| 368
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/sh
CLICK_PIPE="https://git.thm.de/arsnova/arsnova.click/-/jobs/artifacts/staging/download?job=build"
. "./download.sh"
get_build_no $CLICK_PIPE
CLICK_BUILD_NO=$?
LAST_CLICK_BUILD=`cat click.build`
if [ "$LAST_CLICK_BUILD" != "$CLICK_BUILD_NO" ]
then
download_build $CLICK_BUILD_NO $CLICK_PIPE "click"
sh update_click.sh $CLICK_BUILD_NO
else
exit 42
fi
| true
|
cc631d6056d70a8789a38c7b2f2db7cb02b1a62d
|
Shell
|
yinghai9989/OpenStackDeploy
|
/DeployScripts/ceph-cluster-install.sh
|
UTF-8
| 3,711
| 3.453125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
CMD_PATH=.
DST_PATH=./conf_orig
CONF_DEPLOY_DIR=./conf_deploy
RUN_DATE=$1
MY_LOCALE=$($CMD_PATH/get-conf-data.sh ./locale.txt LOCALE)
if [ $MY_LOCALE = 'CN' ]; then
source ./locale_cn.txt
else
source ./locale_en.txt
fi
#Start ceph cluster installation
mkdir -p $CONF_DEPLOY_DIR
#Modify /etc/hosts for ceph nodes
echo "Modify /etc/hosts for ceph nodes"
for MY_IP in $(cat ./Ceph-Install/conf/ceph-client-server-nodes-ext-ip.txt); do
echo $MY_IP
rsync -vaI root@$MY_IP:/etc/hosts ./
./Ceph-Install/delete-file2-in-file1.sh ./hosts ./Ceph-Install/conf/ceph-server-nodes-hosts.txt
cat ./Ceph-Install/conf/ceph-server-nodes-hosts.txt >> ./hosts
rsync -vaI ./hosts root@$MY_IP:/etc/
done
for MY_IP in $(cat ./Ceph-Install/conf/ceph-admin-node-ext-ip.txt); do
echo $MY_IP
rsync -vaI root@$MY_IP:/etc/hosts ./
./Ceph-Install/delete-file2-in-file1.sh ./hosts ./Ceph-Install/conf/ceph-server-nodes-hosts.txt
cat ./Ceph-Install/conf/ceph-server-nodes-hosts.txt >> ./hosts
./Ceph-Install/delete-file2-in-file1.sh ./hosts ./Ceph-Install/conf/ceph-client-nodes-hosts.txt
cat ./Ceph-Install/conf/ceph-client-nodes-hosts.txt >> ./hosts
rsync -vaI ./hosts root@$MY_IP:/etc/
done
rm -f ./hosts
echo $STR_BEGIN_CEPH_CLUSTER_INSTALL_PART_1
screen -dmS niu -U -t sleeping $CMD_PATH/sleep-x-seconds.sh 10
$CMD_PATH/check-screen-started.sh
MY_IP=$(head -n 1 ./Ceph-Install/conf/ceph-admin-node-ext-ip.txt)
screen -S niu -U -X screen -U -t $MY_IP $CMD_PATH/run-on-ceph-node.expect $MY_IP ceph-install-part-1.sh $RUN_DATE-ceph-install-part-1-$MY_IP.log
$CMD_PATH/check-screen-ended.sh
echo $STR_GET_LOG_FILE_FROM_SERVERS
rsync -va $MY_IP:/root/Ceph-Install/log/$RUN_DATE-ceph-install-part-1-$MY_IP.log $CMD_PATH/log/
#Copy ssh public key of ceph admin node to ceph client and server nodes
echo "Copy ssh public key of ceph admin node to ceph client and server nodes"
CEPH_ADMIN_NODE_IP=$(head -n 1 ./Ceph-Install/conf/ceph-admin-node-ext-ip.txt)
rsync -vaI root@$CEPH_ADMIN_NODE_IP:/root/.ssh/id_rsa.pub ./
for MY_IP in $(cat ./Ceph-Install/conf/ceph-client-server-nodes-ext-ip.txt); do
rsync -vaI ./id_rsa.pub root@$MY_IP:/root/
ssh root@$MY_IP "touch /root/.ssh/authorized_keys;cat /root/id_rsa.pub >> /root/.ssh/authorized_keys;rm -f /root/id_rsa.pub;"
done
rm -f ./id_rsa.pub
echo $STR_BEGIN_CEPH_CLUSTER_INSTALL_PART_2
screen -dmS niu -U -t sleeping $CMD_PATH/sleep-x-seconds.sh 10
$CMD_PATH/check-screen-started.sh
MY_IP=$(head -n 1 ./Ceph-Install/conf/ceph-admin-node-ext-ip.txt)
screen -S niu -U -X screen -U -t $MY_IP $CMD_PATH/run-on-ceph-node.expect $MY_IP ceph-install-part-2.sh $RUN_DATE-ceph-install-part-2-$MY_IP.log
$CMD_PATH/check-screen-ended.sh
echo $STR_GET_LOG_FILE_FROM_SERVERS
rsync -va $MY_IP:/root/Ceph-Install/log/$RUN_DATE-ceph-install-part-2-$MY_IP.log $CMD_PATH/log/
#
##########################################################################################
PREFIX_CEPH_ADMIN_NODE=$($CMD_PATH/get-max-prefix.sh $DST_PATH ceph-admin-node.txt)
PREFIX_CEPH_MON_NODE=$($CMD_PATH/get-max-prefix.sh $DST_PATH ceph-mon-node.txt)
PREFIX_CEPH_OSD_NODE=$($CMD_PATH/get-max-prefix.sh $DST_PATH ceph-osd-node.txt)
PREFIX_CEPH_MDS_NODE=$($CMD_PATH/get-max-prefix.sh $DST_PATH ceph-mds-node.txt)
HISTROY_FILE=ceph-install-prefix-history.txt
if [ ! -e $CONF_DEPLOY_DIR/$HISTROY_FILE ]; then
echo "ceph-admin,ceph-mon,ceph-osd,ceph-mds" > $CONF_DEPLOY_DIR/$HISTROY_FILE
fi
echo "$PREFIX_CEPH_ADMIN_NODE,$PREFIX_CEPH_MON_NODE,$PREFIX_CEPH_OSD_NODE,$PREFIX_CEPH_MDS_NODE" >> $CONF_DEPLOY_DIR/$HISTROY_FILE
##########################################################################################
#
exit 0
| true
|
dc31e4e7a16161d666a45467cb9abd1be31ac2ee
|
Shell
|
openstack/tacker
|
/tools/gen_vim_config.sh
|
UTF-8
| 11,670
| 3.453125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Copyright (C) 2021 Nippon Telegraph and Telephone Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Uncomment for debugging.
#set -x
# Default values for optional inputs.
VIMC_DEFAULT_PROJ=admin
VIMC_DEFAULT_OS_DOMAIN=Default
VIMC_DEFAULT_TYPE=openstack
VIMC_DEFAULT_OUTPUT=vim_config.yaml
#######################################
# Find token from first entry of secrets.
# Returns:
# Secret token retrieved from kubectl.
#######################################
function k8s_token() {
# NOTES:
# - Service account tokens are no longer automatically generated
# for each ServiceAccount in Kubernetes 1.24,
# so it is necessary to manually register Secret.
kubectl create -f - <<EOF &>/dev/null
apiVersion: v1
kind: Secret
metadata:
name: default-token-k8svim
annotations:
kubernetes.io/service-account.name: "default"
type: kubernetes.io/service-account-token
EOF
local _secret=$(kubectl get secret -o jsonpath="{.items[0].metadata.name}")
echo $(kubectl get secret ${_secret} -o jsonpath="{.data.token}" |
base64 --decode)
}
#######################################
# Get endpoint of k8s.
# Returns:
# URL of endpoint retrieved from kubectl.
#######################################
function k8s_endpoints() {
local _attr="'kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint'"
_attr=${_attr//\./\\.}
local _ep=$(kubectl get pods -A -o \
jsonpath="{.items[0].metadata.annotations[$_attr]}")
echo "https://${_ep}"
}
#######################################
# Get cert from first entry of secrets.
# Returns:
# Contents of CA sert retrieved from kubectl.
#######################################
function k8s_ssl_ca_cert() {
local _secret=$(kubectl get secret -o jsonpath="{.items[0].metadata.name}")
echo $(kubectl get secrets $_secret -o jsonpath="{.data.ca\.crt}" |
base64 --decode)
}
#######################################
# Setup contents of config from given params and output to a file.
# Globals:
# VIMC_OS_CERT_VERIFY
# VIMC_OUTPUT
# VIMC_ENDPOINT
# VIMC_OS_USER
# VIMC_OS_PASSWORD
# VIMC_PROJ
# VIMC_OS_PROJ_DOMAIN
# VIMC_OS_USER_DOMAIN
# Outputs:
# Writes contents of config for OpenStack VIM to a file, ${VIMC_OUTPUT}.
#######################################
function setup_os_config() {
local _cert_verify=
if "${VIMC_OS_CERT_VERIFY}"; then
_cert_verify=True
else
_cert_verify=False
fi
cat << EOF > ${VIMC_OUTPUT}
auth_url: "${VIMC_ENDPOINT}"
username: "${VIMC_OS_USER}"
password: "${VIMC_OS_PASSWORD}"
project_name: "${VIMC_PROJ}"
domain_name: "${VIMC_OS_PROJ_DOMAIN}"
project_domain_name: "${VIMC_OS_PROJ_DOMAIN}"
user_domain_name: "${VIMC_OS_USER_DOMAIN}"
cert_verify: "${_cert_verify}"
EOF
}
#######################################
# Setup contents of config from given params and output to a file.
# Globals:
# VIMC_K8S_USE_CERT
# VIMC_K8S_USE_HELM
# VIMC_OUTPUT
# VIMC_ENDPOINT
# VIMC_K8S_TOKEN
# VIMC_PROJ
# Outputs:
# Write contents of config for OpenStack Kubernetes to a file, ${VIMC_OUTPUT}.
#######################################
function setup_k8s_config() {
# In the contents of cert, blanks are replaced with `\n` without
# in header and footer. So, remove before the procedure at once, then
# add after that again.
local _cert_header="-----BEGIN CERTIFICATE-----"
local _cert_footer="-----END CERTIFICATE-----"
# Delimiter used temporarily for replacing blanks.
local _delim=":"
local _extra=""
if "${VIMC_K8S_USE_CERT}"; then
local _k8s_cert=`k8s_ssl_ca_cert`
_k8s_cert=`echo ${_k8s_cert} | sed "s/${_cert_header}//"`
_k8s_cert=`echo ${_k8s_cert} | sed "s/${_cert_footer}//"`
_k8s_cert=`echo ${_k8s_cert} | sed -e "s/ /${_delim}/g"`
_k8s_cert=`echo \
"${_cert_header}${_delim}${_k8s_cert}${_delim}${_cert_footer}"`
_k8s_cert=`echo ${_k8s_cert} | sed -e "s/${_delim}/\\n/g"`
else
_k8s_cert="None"
fi
if "${VIMC_K8S_USE_HELM}"; then
_extra="extra:"$'\n'" use_helm: true"
fi
cat << EOF > ${VIMC_OUTPUT}
auth_url: "${VIMC_ENDPOINT}"
bearer_token: "${VIMC_K8S_TOKEN}"
ssl_ca_cert: "${_k8s_cert}"
project_name: "${VIMC_PROJ}"
type: "kubernetes"
${_extra}
EOF
}
#######################################
# Show help message.
# Outputs:
# Writes help message to stdout.
#######################################
function show_help() {
cat << EOS
Generate config file for registering Kubernetes VIM
usage:
$(basename $0) [-t VIM_TYPE] [-o OUTPUT_FILE] [-e ENDPOINT]
[-p PROJCT_NAME] [-u USER_NAME] [--token TOKEN] [-c] [-h]
options:
All of options are optional.
1) Common options
-t|--type VIM_TYPE
type of VIM.
* 'openstack' or 'os' for OpenStack
* 'kubernetes' or 'k8s' for Kubernetes
-o|--output OUTPUT_FILE
name of output file, default is '${VIMC_DEFAULT_OUTPUT}'.
-e|--endpoint ENDPOINT
endpoint consists of url and port, such as 'https://127.0.0.1:6443'.
-p|--project PROJECT_NAME
name of project in which VIM is registered, default value is
'${VIMC_DEFAULT_PROJ}'.
-h|--help
show this message.
2) Options for OpenStack VIM
--os-user USER_NAME
name of OpenStack user, value of 'OS_USERNAME' is used by default.
--os-password PASSWORD
password of OpenStack user, value of 'OS_PASSWORD' is used by default.
--os-project-domain PROJ_DOMAIN
name of project domain, value of 'OS_PROJECT_DOMAIN_ID' is used by
default.
--os-user-domain USER_DOMAIN
name of user domain, value of 'OS_USER_DOMAIN_ID' is used by default.
--os-disable-cert-verify
use this option only if you set 'cert_verify' to False to disable
verifying against system certificates for keystone.
3) Options for Kubernetes VIM
--k8s-token TOKEN
bearer token.
--k8s-use-cert
use SSL CA cert.
--k8s-use-helm
configure VIM to use helm for deploying CNFs.
EOS
}
#######################################
# Main function for OpenStack VIM config.
# Globals:
# VIMC_ENDPOINT
# VIMC_OS_USER
# VIMC_OS_PASSWORD
# VIMC_OS_PROJ_DOMAIN
# VIMC_OS_USER_DOMAIN
#######################################
function os_main() {
VIMC_ENDPOINT=${VIMC_ENDPOINT:-${OS_AUTH_URL}}
if [ ! ${VIMC_ENDPOINT} ]; then
clean_exit 1 \
"Error: Set 'OS_AUTH_URL' or use '--endpoint'."
fi
VIMC_OS_USER=${VIMC_OS_USER:-${OS_USERNAME}}
if [ ! ${VIMC_OS_USER} ]; then
clean_exit 1 \
"Error: No username found. Set 'OS_USERNAME' or use '--os-user'."
fi
VIMC_OS_PASSWORD=${VIMC_OS_PASSWORD:-${OS_PASSWORD}}
if [ ! ${VIMC_OS_PASSWORD} ]; then
clean_exit 1 \
"Error: No password found. Set 'OS_PASSWORD' or use '--os-password'."
fi
VIMC_OS_PROJ_DOMAIN=${VIMC_OS_PROJ_DOMAIN:-${OS_PROJECT_DOMAIN_ID}}
if [ ! ${VIMC_OS_PROJ_DOMAIN} ]; then
VIMC_OS_PROJ_DOMAIN=${VIMC_DEFAULT_OS_DOMAIN}
fi
VIMC_OS_USER_DOMAIN=${VIMC_OS_USER_DOMAIN:-${OS_USER_DOMAIN_ID}}
if [ ! ${VIMC_OS_USER_DOMAIN} ]; then
VIMC_OS_USER_DOMAIN=${VIMC_DEFAULT_OS_DOMAIN}
fi
setup_os_config
}
#######################################
# Main function for Kubernetes VIM config.
# Globals:
# VIMC_K8S_TOKEN
# VIMC_ENDPOINT
#######################################
function k8s_main() {
kubectl create clusterrolebinding cluster-admin-binding \
--clusterrole cluster-admin --serviceaccount=default:default \
&>/dev/null
VIMC_K8S_TOKEN=${VIMC_K8S_TOKEN:-`k8s_token`}
VIMC_ENDPOINT=${VIMC_ENDPOINT:-`k8s_endpoints`}
setup_k8s_config
}
#######################################
# Re-wind OPTIND and clean all other variables as finalization.
# Globals:
# OPTIND
# PREV_OPTIND
# VIMC_DEFAULT_TYPE
# VIMC_DEFAULT_OUTPUT
# VIMC_DEFAULT_PROJ
# VIMC_TYPE
# VIMC_OUTPUT
# VIMC_ENDPOINT
# VIMC_PROJ
# VIMC_OS_USER
# VIMC_OS_PASSWORD
# VIMC_OS_PROJ_DOMAIN
# VIMC_OS_USER_DOMAIN
# VIMC_OS_CERT_VERIFY
# VIMC_K8S_TOKEN
# VIMC_K8S_USE_CERT
# VIMC_K8S_USE_HELM
#######################################
function cleanup() {
OPTIND=${PREV_OPTIND}
VIMC_DEFAULT_TYPE=
VIMC_DEFAULT_OUTPUT=
VIMC_DEFAULT_PROJ=
VIMC_TYPE=
VIMC_OUTPUT=
VIMC_ENDPOINT=
VIMC_PROJ=
VIMC_OS_USER=
VIMC_OS_PASSWORD=
VIMC_OS_PROJ_DOMAIN=
VIMC_OS_USER_DOMAIN=
VIMC_OS_CERT_VERIFY=
VIMC_K8S_TOKEN=
VIMC_K8S_USE_CERT=
VIMC_K8S_USE_HELM=
}
#######################################
# Ensure cleanup before exit.
# Arguments:
# Exit code (optional).
# Error message to be output to stderr (optional).
#######################################
function clean_exit() {
cleanup
if [[ $2 != "" ]]; then
echo $2 >&2
fi
exit $1
}
#######################################
# Main procedure is started from here.
#######################################
PREV_OPTIND=${OPTIND}
OPTIND=1
while getopts t:o:e:p:ch-: opt; do
optarg=${OPTARG}
if [[ "${opt}" = - ]]; then
opt="-${OPTARG%%=*}"
optarg="${OPTARG/${OPTARG%%=*}/}"
optarg="${optarg#=}"
if [[ -z "$optarg" ]] && [[ ! "${!OPTIND}" = -* ]]; then
optarg="${!OPTIND}"
shift
fi
fi
case "-${opt}" in
-t|--type)
VIMC_TYPE=${optarg};
;;
-o|--output)
VIMC_OUTPUT=${optarg};
;;
-e|--endpoint)
VIMC_ENDPOINT=${optarg};
;;
-p|--project)
VIMC_PROJ=${optarg};
;;
--os-user)
VIMC_OS_USER=${optarg};
;;
--os-password)
VIMC_OS_PASSWORD=${optarg};
;;
--os-project-domain)
VIMC_OS_PROJ_DOMAIN=${optarg};
;;
--os-user-domain)
VIMC_OS_USER_DOMAIN=${optarg};
;;
--os-disable-cert-verify)
VIMC_OS_CERT_VERIFY=false;
;;
--k8s-token)
VIMC_K8S_TOKEN=${optarg};
;;
--k8s-use-cert)
VIMC_K8S_USE_CERT=true;
;;
--k8s-use-helm)
VIMC_K8S_USE_HELM=true;
;;
-h|--help)
show_help;
clean_exit;
;;
--*)
clean_exit 1 "Error: Illegal option '${opt##-}'.";
;;
esac
done
VIMC_TYPE=${VIMC_TYPE:-${VIMC_DEFAULT_TYPE}}
VIMC_OUTPUT=${VIMC_OUTPUT:-${VIMC_DEFAULT_OUTPUT}}
VIMC_PROJ=${VIMC_PROJ:-${VIMC_DEFAULT_PROJ}}
VIMC_OS_CERT_VERIFY=${VIMC_OS_CERT_VERIFY:-true}
VIMC_K8S_USE_CERT=${VIMC_K8S_USE_CERT:-false}
if [[ ${VIMC_TYPE} == "openstack" || ${VIMC_TYPE} == "os" ]]; then
os_main
echo "Config for OpenStack VIM '${VIMC_OUTPUT}' generated."
elif [[ ${VIMC_TYPE} == "kubernetes" || ${VIMC_TYPE} == "k8s" ]]; then
k8s_main
echo "Config for Kubernetes VIM '${VIMC_OUTPUT}' generated."
else
echo "Error: No type matched with '${VIMC_TYPE}'." >&2
fi
cleanup
set +x
| true
|
f30fd588543a3ba829cc52a9525a43e1fc00ceeb
|
Shell
|
simifalaye/dotfiles
|
/config/shell/.config/shell/zsh/you-should-use/@interactive.zsh
|
UTF-8
| 314
| 2.890625
| 3
|
[
"MIT"
] |
permissive
|
# shellcheck shell=zsh
# Load plugin
plugin_dir=${ZPLUGDIR}/zsh-you-should-use
if [[ ! -e ${plugin_dir} ]]; then
git clone --depth=1 https://github.com/MichaelAquilina/zsh-you-should-use.git ${plugin_dir}
zcompile-many ${plugin_dir}/you-should-use.plugin.zsh
fi
source ${plugin_dir}/you-should-use.plugin.zsh
| true
|
a3df8acd3a5fa2b46466fac551244a48670184ff
|
Shell
|
anjijava16/GCP_Data_Enginner_Utils
|
/DataEnginner_GCP/gsutil/gsutil_s5cmd_linux_comparison.sh
|
UTF-8
| 4,897
| 3.09375
| 3
|
[] |
no_license
|
# Using Debian GNU/Linux 10 (buster)
# Create local SSD file system and mount
sudo mkfs.ext4 -F /dev/nvme0n1
sudo mkdir -p /mnt/disks/nvme0n1
sudo mount /dev/nvme0n1 /mnt/disks/nvme0n1
sudo chmod a+w /mnt/disks/nvme0n1
cd /mnt/disks/nvme0n1
# Update
sudo apt-get update
sudo apt-get -y upgrade
##### Testing gsutil with a large file #####
# Create large dataset
fallocate -l 30G temp_30GB_file
# Test upload from local SSD
time gsutil cp temp_30GB_file gs://doit-speed-test-bucket/
# n2-standard-4: 2m21.893s, 216.50 MB/s
# n2-standard-80: 2m11.676s, 233.30 MB/s
time gsutil -m cp temp_30GB_file gs://doit-speed-test-bucket/
# n2-standard-4: 2m48.710s, 182.09 MB/s
# n2-standard-80: 2m29.348s, 205.69 MB/s
time gsutil -o GSUtil:parallel_composite_upload_threshold=150M cp temp_30GB_file gs://doit-speed-test-bucket/
# n2-standard-4: 1m40.104s, 306.88 MB/s
# n2-standard-80: 0m52.145s, 589.13 MB/s
time gsutil -m -o GSUtil:parallel_composite_upload_threshold=150M cp temp_30GB_file gs://doit-speed-test-bucket/
# n2-standard-4: 1m44.579s, 293.75 MB/s
# n2-standard-80: 0m51.154s, 600.54 MB/s
# Test download to local SSD
time gsutil cp gs://doit-speed-test-bucket/temp_30GB_file .
# n2-standard-4: 8m3.186s, 63.58 MB/s
# n2-standard-80: 6m13.585, 82.23 MB/s
time gsutil -m cp gs://doit-speed-test-bucket/temp_30GB_file .
# n2-standard-4: 7m57.881s, 64.28 MB/s
# n2-standard-80: 6m20.131s, 80.81 MB/s
##### Testing s5cmd with a large file #####
# Install required packages
sudo apt-get -y install git wget unzip
# Install and configure AWS CLI with GCS Interoperability credentials
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
unzip awscliv2.zip
sudo ./aws/install
rm -rf aws*
aws configure
# Enter the Access and Secret Access key for the service account
# Install go
wget https://golang.org/dl/go1.15.linux-amd64.tar.gz
tar -C /usr/local -xzf go1.15.linux-amd64.tar.gz
export PATH=$PATH:/usr/local/go/bin
rm -f go1.15.linux-amd64.tar.gz
go version
# Install s5cmd
# Note: GCS multipart upload isn't supported as GCS Interoperability doesn't support S3's multipart upload API, causing large file uploads to fail unless you provide '-p=1000000':
# https://github.com/peak/s5cmd/issues/217
# https://github.com/peak/s5cmd/issues/29
go get github.com/peak/s5cmd
# Test upload from local SSD
time s5cmd --endpoint-url https://storage.googleapis.com cp -c=1 -p=1000000 temp_30GB_file s3://doit-speed-test-bucket/
# n2-standard-4: 6m7.459s, 83.60 MB/s
# n2-standard-80: 6m50.272s, 74.88 MB/s
time s5cmd --endpoint-url https://storage.googleapis.com cp -p=1000000 temp_30GB_file s3://doit-speed-test-bucket/
# n2-standard-4: 7m18.682s, 70.03 MB/s
# n2-standard-80: 6m48.380s, 75.22 MB/s
# Test download to local SSD
time s5cmd --endpoint-url https://storage.googleapis.com cp -c=1 -p=1000000 s3://doit-speed-test-bucket/temp_30GB_file .
# n2-standard-4: 1m56.170s, 264.44 MB/s
# n2-standard-80: 1m46.196s, 289.28 MB/s
time s5cmd --endpoint-url https://storage.googleapis.com cp -c=1 s3://doit-speed-test-bucket/temp_30GB_file .
# n2-standard-4: 3m21.380s, 152.55 MB/s
# n2-standard-80: 3m45.414s, 136.28 MB/s
time s5cmd --endpoint-url https://storage.googleapis.com cp -p=1000000 s3://doit-speed-test-bucket/temp_30GB_file .
# n2-standard-4: 2m33.148s, 200.59 MB/s
# n2-standard-80: 2m48.071s, 182.78 MB/s
time s5cmd --endpoint-url https://storage.googleapis.com cp s3://doit-speed-test-bucket/temp_30GB_file .
# n2-standard-4: 1m46.378s, 288.78 MB/s
# n2-standard-80: 2m1.116s, 253.64 MB/s
##### Testing gsutil with many small files #####
# Create ~50,001 small files, each 630 KBs in size
mkdir parts
split -b 644245 temp_30GB_file
mv x* parts/
# Test upload from local SSD
nohup bash -c 'time gsutil cp -r parts/* gs://doit-speed-test-bucket/smallparts/' &
# n2-standard-4: 71m30.420s, 7.16 MB/s
# n2-standard-80: 69m32.803s, 7.36 MB/s
nohup bash -c 'time gsutil -m cp -r parts/* gs://doit-speed-test-bucket/smallparts/' &
# n2-standard-4: 9m7.045s, 56.16 MB/s
# n2-standard-80: 3m41.081s, 138.95 MB/s
# Test download to local SSD
nohup bash -c 'time gsutil cp -r gs://doit-speed-test-bucket/smallparts/ parts/' &
# n2-standard-4: 61m24.516s, 8.34 MB/s
# n2-standard-80: 56m54.841s, 9.00 MB/s
nohup bash -c 'time gsutil -m cp -r gs://doit-speed-test-bucket/smallparts/ parts/' &
# n2-standard-4: 7m42.249s, 66.46 MB/s
# n2-standard-80: 3m38.421s, 140.65 MB/s
##### Testing s5cmd download with many small files #####
# Test download to local SSD
nohup bash -c 'time s5cmd --endpoint-url https://storage.googleapis.com cp s3://doit-speed-test-bucket/smallparts/* parts/' &
# n2-standard-4: 1m15.615s, 406.27 MB/s
# n2-standard-80: 1m31.592s, 335.40 MB/s
nohup bash -c 'time s5cmd --endpoint-url https://storage.googleapis.com cp -c=80 s3://doit-speed-test-bucket/smallparts/x* parts/' &
# n2-standard-80: 1m29.837s, 341.95 MB/s
| true
|
cfdffa87e62e0c2cc63b3f003c04a58e88566c89
|
Shell
|
wychen/dom-distiller
|
/create-hook-symlinks
|
UTF-8
| 177
| 3.328125
| 3
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
# Install hook scripts by making symlinks to $GIT_ROOT/hooks.
HOOK_DIR=.git/hooks
for hook in $(ls hooks); do
ln -s -f ../../hooks/$hook $HOOK_DIR/$hook
done
| true
|
521a236b4ed2940c342273179b30f96eeb6a8ac1
|
Shell
|
shinobee/dotfiles
|
/.bashrc.ps
|
UTF-8
| 618
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
#
DIR_ThisScript="$(cd "$(dirname "${BASH_SOURCE:-${(%):-%N}}")"; pwd)"
#
FILE_BASH_PS=${HOME}/.bashrc.ps
if [ ! -e "${FILE_BASH_PS}" ] ; then
cp -p ${DIR_ThisScript}/.bashrc.ps ${FILE_BASH_PS}
fi
#
FILE_DIR_COLORS=${HOME}/.dircolors
if [ ! -e "${FILE_DIR_COLORS}" ] ; then
cp -p ${DIR_ThisScript}/.dircolors ${FILE_DIR_COLORS}
fi
#
source ${HOME}/.bashrc.git
# [change color] blue'34' --> cyan'36'
## prompt
PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;36m\]\w$(__git_ps1) \$\[\033[00m\] '
## ls
eval `dircolors .dircolors`
alias ls='ls --color=auto'
#
| true
|
c5635fb0fe66ea9220dc83d482a0dd3a50a51ee4
|
Shell
|
TheConnMan/PhotoPlaylists
|
/version-bump.sh
|
UTF-8
| 807
| 3.96875
| 4
|
[] |
no_license
|
#!/bin/bash
# Version Bumper
if [ "$#" -ne 2 ]; then
echo "Two arguments required, version name and tag message respectively"
else
echo -e "\nChecking out dev."
git checkout dev
echo -e "\nEditing application.properties."
mv application.properties application.properties.temp
sed "s/app.version=.*/app.version=$1/g" < application.properties.temp > application.properties
rm application.properties.temp
echo -e "\nCommitting updated version."
git add application.properties
git commit -m "Version bump to $1."
git checkout master
git merge --no-ff dev
echo -e "\nTagging version $1."
git tag -a v$1 -m "$2"
git checkout dev
echo -e "\nSUCCESS"
echo "To push changes run 'push --all' and 'push --tags'."
fi
| true
|
f99c7f6270679a789a5e5a850f59f9611b73d321
|
Shell
|
nirecom/install-kubernetes
|
/2-server-common.sh
|
UTF-8
| 985
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Install Kubernetes
# ref. https://qiita.com/nnagashima/items/d7deb00d086b6e276eea
# Letting iptables see bridged traffic
# ref. https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#troubleshooting
cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
br_netfilter
EOF
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sudo sysctl --system
if type "kubeadm" >/dev/null 2>&1; then
echo kubadm exists. Skipping installation of Kubernetes tools...
else
echo Install Kubernetes .....
sudo apt update
sudo apt install -y apt-transport-https curl
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
sudo apt-add-repository "deb http://apt.kubernetes.io/ kubernetes-xenial main"
sudo apt install -y kubeadm kubelet kubectl kubernetes-cni
sudo apt-mark hold kubelet kubeadm kubectl
fi
swapoff -a
| true
|
0bfce887b65015a2f1b48823d1d5715c4eef3ae6
|
Shell
|
adiblol/audiotools
|
/jamendo-dl
|
UTF-8
| 1,285
| 3.578125
| 4
|
[
"WTFPL"
] |
permissive
|
#!/bin/bash
# Download Jamendo album (single tracks not supported yet) in FLAC format (YA RLY!)
inurl="$1"
albumid="`echo "$inurl" | sed -e 's/.\+\/list\/a\([0-9]\+\)\/.\+$/\1/'`"
foldername="`echo "$inurl" | sed -e 's/.\+\/list\/a\([0-9]\+\)\/\(.\+\)$/\1_\2/'`"
# get metadata
artist_name="`wget -O - -q "http://api.jamendo.com/get2/artist_name/album/plain/?album_id=$albumid"`"
album_name="`wget -O - -q "http://api.jamendo.com/get2/name/album/plain/?album_id=$albumid"`"
#echo "$albumid $foldername" #debug
targetdir="./$foldername"
mkdir -p "$targetdir"
trackn=1
( wget -O - -q "http://api.jamendo.com/get2/stream+name/track/plain/album_track/?n=100&order=numalbum_asc&streamencoding=flac&album_id=$albumid"; echo '' ) | while read track_url track_name; do
#sleep 2
if [ "x$track_url" == "x" ]; then
break
fi
track_name_safe="`echo "$track_name" | tr -c 'a-zA-Z0-9' '_'`"
tracknp=$trackn
if [ $trackn -lt 10 ]; then
tracknp=0$trackn
fi
trackfile="$targetdir/${tracknp}_${track_name_safe}.flac"
wget -c -O $trackfile "$track_url"
# set metadata
metaflac --set-tag="TITLE=$track_name" --set-tag="ARTIST=$artist_name"\
--set-tag="ALBUM=$album_name" --set-tag="TRACKNUMBER=$trackn" $trackfile
trackn=$[$trackn+1]
done
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.